Merge branch 'akpm' (patches from Andrew)

Merge first patch-bomb from Andrew Morton:

 - some misc things

 - ofs2 updates

 - about half of MM

 - checkpatch updates

 - autofs4 update

* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (120 commits)
  autofs4: fix string.h include in auto_dev-ioctl.h
  autofs4: use pr_xxx() macros directly for logging
  autofs4: change log print macros to not insert newline
  autofs4: make autofs log prints consistent
  autofs4: fix some white space errors
  autofs4: fix invalid ioctl return in autofs4_root_ioctl_unlocked()
  autofs4: fix coding style line length in autofs4_wait()
  autofs4: fix coding style problem in autofs4_get_set_timeout()
  autofs4: coding style fixes
  autofs: show pipe inode in mount options
  kallsyms: add support for relative offsets in kallsyms address table
  kallsyms: don't overload absolute symbol type for percpu symbols
  x86: kallsyms: disable absolute percpu symbols on !SMP
  checkpatch: fix another left brace warning
  checkpatch: improve UNSPECIFIED_INT test for bare signed/unsigned uses
  checkpatch: warn on bare unsigned or signed declarations without int
  checkpatch: exclude asm volatile from complex macro check
  mm: memcontrol: drop unnecessary lru locking from mem_cgroup_migrate()
  mm: migrate: consolidate mem_cgroup_migrate() calls
  mm/compaction: speed up pageblock_pfn_to_page() when zone is contiguous
  ...
diff --git a/Documentation/devicetree/bindings/arm/altera/socfpga-eccmgr.txt b/Documentation/devicetree/bindings/arm/altera/socfpga-eccmgr.txt
new file mode 100644
index 0000000..885f93d
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/altera/socfpga-eccmgr.txt
@@ -0,0 +1,49 @@
+Altera SoCFPGA ECC Manager
+This driver uses the EDAC framework to implement the SOCFPGA ECC Manager.
+The ECC Manager counts and corrects single bit errors and counts/handles
+double bit errors which are uncorrectable.
+
+Required Properties:
+- compatible : Should be "altr,socfpga-ecc-manager"
+- #address-cells: must be 1
+- #size-cells: must be 1
+- ranges : standard definition, should translate from local addresses
+
+Subcomponents:
+
+L2 Cache ECC
+Required Properties:
+- compatible : Should be "altr,socfpga-l2-ecc"
+- reg : Address and size for ECC error interrupt clear registers.
+- interrupts : Should be single bit error interrupt, then double bit error
+	interrupt. Note the rising edge type.
+
+On Chip RAM ECC
+Required Properties:
+- compatible : Should be "altr,socfpga-ocram-ecc"
+- reg : Address and size for ECC error interrupt clear registers.
+- iram : phandle to On-Chip RAM definition.
+- interrupts : Should be single bit error interrupt, then double bit error
+	interrupt. Note the rising edge type.
+
+Example:
+
+	eccmgr: eccmgr@ffd08140 {
+		compatible = "altr,socfpga-ecc-manager";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges;
+
+		l2-ecc@ffd08140 {
+			compatible = "altr,socfpga-l2-ecc";
+			reg = <0xffd08140 0x4>;
+			interrupts = <0 36 1>, <0 37 1>;
+		};
+
+		ocram-ecc@ffd08144 {
+			compatible = "altr,socfpga-ocram-ecc";
+			reg = <0xffd08144 0x4>;
+			iram = <&ocram>;
+			interrupts = <0 178 1>, <0 179 1>;
+		};
+	};
diff --git a/Documentation/devicetree/bindings/edac/apm-xgene-edac.txt b/Documentation/devicetree/bindings/edac/apm-xgene-edac.txt
index 78e2a31..1006b04 100644
--- a/Documentation/devicetree/bindings/edac/apm-xgene-edac.txt
+++ b/Documentation/devicetree/bindings/edac/apm-xgene-edac.txt
@@ -16,6 +16,10 @@
 - regmap-mcba		: Regmap of the MCB-A (memory bridge) resource.
 - regmap-mcbb		: Regmap of the MCB-B (memory bridge) resource.
 - regmap-efuse		: Regmap of the PMD efuse resource.
+- regmap-rb		: Regmap of the register bus resource. This property
+			  is optional only for compatibility. If the RB
+			  error conditions are not cleared, it will
+			  continuously generate interrupt.
 - reg			: First resource shall be the CPU bus (PCP) resource.
 - interrupts            : Interrupt-specifier for MCU, PMD, L3, or SoC error
 			  IRQ(s).
@@ -64,6 +68,11 @@
 		reg = <0x0 0x1054a000 0x0 0x20>;
 	};
 
+	rb: rb@7e000000 {
+		compatible = "apm,xgene-rb", "syscon";
+		reg = <0x0 0x7e000000 0x0 0x10>;
+	};
+
 	edac@78800000 {
 		compatible = "apm,xgene-edac";
 		#address-cells = <2>;
@@ -73,6 +82,7 @@
 		regmap-mcba = <&mcba>;
 		regmap-mcbb = <&mcbb>;
 		regmap-efuse = <&efuse>;
+		regmap-rb = <&rb>;
 		reg = <0x0 0x78800000 0x0 0x100>;
 		interrupts = <0x0 0x20 0x4>,
 			     <0x0 0x21 0x4>,
diff --git a/Documentation/devicetree/bindings/gpio/microchip,pic32-gpio.txt b/Documentation/devicetree/bindings/gpio/microchip,pic32-gpio.txt
new file mode 100644
index 0000000..ef37528
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/microchip,pic32-gpio.txt
@@ -0,0 +1,49 @@
+* Microchip PIC32 GPIO devices (PIO).
+
+Required properties:
+ - compatible: "microchip,pic32mzda-gpio"
+ - reg: Base address and length for the device.
+ - interrupts: The port interrupt shared by all pins.
+ - gpio-controller: Marks the port as GPIO controller.
+ - #gpio-cells: Two. The first cell is the pin number and
+   the second cell is used to specify the gpio polarity as defined in
+   defined in <dt-bindings/gpio/gpio.h>:
+      0 = GPIO_ACTIVE_HIGH
+      1 = GPIO_ACTIVE_LOW
+      2 = GPIO_OPEN_DRAIN
+ - interrupt-controller: Marks the device node as an interrupt controller.
+ - #interrupt-cells: Two. The first cell is the GPIO number and second cell
+   is used to specify the trigger type as defined in
+   <dt-bindings/interrupt-controller/irq.h>:
+      IRQ_TYPE_EDGE_RISING
+      IRQ_TYPE_EDGE_FALLING
+      IRQ_TYPE_EDGE_BOTH
+ - clocks: Clock specifier (see clock bindings for details).
+ - microchip,gpio-bank: Specifies which bank a controller owns.
+ - gpio-ranges: Interaction with the PINCTRL subsystem.
+
+Example:
+
+/* PORTA */
+gpio0: gpio0@1f860000 {
+	compatible = "microchip,pic32mzda-gpio";
+	reg = <0x1f860000 0x100>;
+	interrupts = <118 IRQ_TYPE_LEVEL_HIGH>;
+	#gpio-cells = <2>;
+	gpio-controller;
+	interrupt-controller;
+	#interrupt-cells = <2>;
+	clocks = <&PBCLK4>;
+	microchip,gpio-bank = <0>;
+	gpio-ranges = <&pic32_pinctrl 0 0 16>;
+};
+
+keys {
+	...
+
+	button@sw1 {
+		label = "ESC";
+		linux,code = <1>;
+		gpios = <&gpio0 12 0>;
+	};
+};
diff --git a/Documentation/devicetree/bindings/hwmon/nsa320-mcu.txt b/Documentation/devicetree/bindings/hwmon/nsa320-mcu.txt
new file mode 100644
index 0000000..0863e06
--- /dev/null
+++ b/Documentation/devicetree/bindings/hwmon/nsa320-mcu.txt
@@ -0,0 +1,20 @@
+Bindings for the fan / temperature monitor microcontroller used on
+the Zyxel NSA 320 and several subsequent models.
+
+Required properties:
+- compatible	: "zyxel,nsa320-mcu"
+- data-gpios	: The GPIO pin connected to the data line on the MCU
+- clk-gpios	: The GPIO pin connected to the clock line on the MCU
+- act-gpios	: The GPIO pin connected to the active line on the MCU
+
+Example:
+
+	hwmon {
+		compatible = "zyxel,nsa320-mcu";
+		pinctrl-0 = <&pmx_mcu_data &pmx_mcu_clk &pmx_mcu_act>;
+		pinctrl-names = "default";
+
+		data-gpios = <&gpio0 14 GPIO_ACTIVE_HIGH>;
+		clk-gpios = <&gpio0 16 GPIO_ACTIVE_HIGH>;
+		act-gpios = <&gpio0 17 GPIO_ACTIVE_LOW>;
+	};
diff --git a/Documentation/devicetree/bindings/hwmon/ntc_thermistor.txt b/Documentation/devicetree/bindings/hwmon/ntc_thermistor.txt
index a04a80f..c3b9c4c 100644
--- a/Documentation/devicetree/bindings/hwmon/ntc_thermistor.txt
+++ b/Documentation/devicetree/bindings/hwmon/ntc_thermistor.txt
@@ -10,6 +10,7 @@
 	"murata,ncp03wb473"
 	"murata,ncp15wl333"
 	"murata,ncp03wf104"
+	"murata,ncp15xh103"
 
 /* Usage of vendor name "ntc" is deprecated */
 <DEPRECATED>	"ntc,ncp15wb473"
diff --git a/Documentation/devicetree/bindings/iio/iio-bindings.txt b/Documentation/devicetree/bindings/iio/iio-bindings.txt
index 0b447d9..68d6f8c 100644
--- a/Documentation/devicetree/bindings/iio/iio-bindings.txt
+++ b/Documentation/devicetree/bindings/iio/iio-bindings.txt
@@ -82,7 +82,7 @@
 
 	...
 
-	iio_hwmon {
+	iio-hwmon {
 		compatible = "iio-hwmon";
 		io-channels = <&adc 0>, <&adc 1>, <&adc 2>,
 			<&adc 3>, <&adc 4>, <&adc 5>,
diff --git a/Documentation/devicetree/bindings/interrupt-controller/al,alpine-msix.txt b/Documentation/devicetree/bindings/interrupt-controller/al,alpine-msix.txt
new file mode 100644
index 0000000..f6f1c14
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/al,alpine-msix.txt
@@ -0,0 +1,26 @@
+Alpine MSIX controller
+
+See arm,gic-v3.txt for SPI and MSI definitions.
+
+Required properties:
+
+- compatible: should be "al,alpine-msix"
+- reg: physical base address and size of the registers
+- interrupt-parent: specifies the parent interrupt controller.
+- interrupt-controller: identifies the node as an interrupt controller
+- msi-controller: identifies the node as an PCI Message Signaled Interrupt
+		  controller
+- al,msi-base-spi: SPI base of the MSI frame
+- al,msi-num-spis: number of SPIs assigned to the MSI frame, relative to SPI0
+
+Example:
+
+msix: msix {
+	compatible = "al,alpine-msix";
+	reg = <0x0 0xfbe00000 0x0 0x100000>;
+	interrupt-parent = <&gic>;
+	interrupt-controller;
+	msi-controller;
+	al,msi-base-spi = <160>;
+	al,msi-num-spis = <160>;
+};
diff --git a/Documentation/devicetree/bindings/interrupt-controller/arm,gic.txt b/Documentation/devicetree/bindings/interrupt-controller/arm,gic.txt
index 5a1cb4b..793c20f 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/arm,gic.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/arm,gic.txt
@@ -16,6 +16,7 @@
 	"arm,cortex-a15-gic"
 	"arm,cortex-a7-gic"
 	"arm,cortex-a9-gic"
+	"arm,eb11mp-gic"
 	"arm,gic-400"
 	"arm,pl390"
 	"arm,tc11mp-gic"
diff --git a/Documentation/devicetree/bindings/interrupt-controller/marvell,odmi-controller.txt b/Documentation/devicetree/bindings/interrupt-controller/marvell,odmi-controller.txt
new file mode 100644
index 0000000..8af0a8e
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/marvell,odmi-controller.txt
@@ -0,0 +1,44 @@
+
+* Marvell ODMI for MSI support
+
+Some Marvell SoCs have an On-Die Message Interrupt (ODMI) controller
+which can be used by on-board peripheral for MSI interrupts.
+
+Required properties:
+
+- compatible           : The value here should contain:
+
+    "marvell,ap806-odmi-controller", "marvell,odmi-controller".
+
+- interrupt,controller : Identifies the node as an interrupt controller.
+
+- msi-controller       : Identifies the node as an MSI controller.
+
+- marvell,odmi-frames  : Number of ODMI frames available. Each frame
+                         provides a number of events.
+
+- reg                  : List of register definitions, one for each
+                         ODMI frame.
+
+- marvell,spi-base     : List of GIC base SPI interrupts, one for each
+                         ODMI frame. Those SPI interrupts are 0-based,
+                         i.e marvell,spi-base = <128> will use SPI #96.
+                         See Documentation/devicetree/bindings/interrupt-controller/arm,gic.txt
+                         for details about the GIC Device Tree binding.
+
+- interrupt-parent     : Reference to the parent interrupt controller.
+
+Example:
+
+	odmi: odmi@300000 {
+		compatible = "marvell,ap806-odm-controller",
+			     "marvell,odmi-controller";
+		interrupt-controller;
+		msi-controller;
+		marvell,odmi-frames = <4>;
+		reg = <0x300000 0x4000>,
+		      <0x304000 0x4000>,
+		      <0x308000 0x4000>,
+		      <0x30C000 0x4000>;
+		marvell,spi-base = <128>, <136>, <144>, <152>;
+	};
diff --git a/Documentation/devicetree/bindings/interrupt-controller/mips-gic.txt b/Documentation/devicetree/bindings/interrupt-controller/mips-gic.txt
index aae4c38..1735953 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/mips-gic.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/mips-gic.txt
@@ -23,6 +23,12 @@
 - mti,reserved-cpu-vectors : Specifies the list of CPU interrupt vectors
   to which the GIC may not route interrupts.  Valid values are 2 - 7.
   This property is ignored if the CPU is started in EIC mode.
+- mti,reserved-ipi-vectors : Specifies the range of GIC interrupts that are
+  reserved for IPIs.
+  It accepts 2 values, the 1st is the starting interrupt and the 2nd is the size
+  of the reserved range.
+  If not specified, the driver will allocate the last 2 * number of VPEs in the
+  system.
 
 Required properties for timer sub-node:
 - compatible : Should be "mti,gic-timer".
@@ -44,6 +50,7 @@
 		#interrupt-cells = <3>;
 
 		mti,reserved-cpu-vectors = <7>;
+		mti,reserved-ipi-vectors = <40 8>;
 
 		timer {
 			compatible = "mti,gic-timer";
diff --git a/Documentation/devicetree/bindings/interrupt-controller/sigma,smp8642-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/sigma,smp8642-intc.txt
new file mode 100644
index 0000000..1f441fa
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/sigma,smp8642-intc.txt
@@ -0,0 +1,49 @@
+Sigma Designs SMP86xx/SMP87xx secondary interrupt controller
+
+Required properties:
+- compatible: should be "sigma,smp8642-intc"
+- reg: physical address of MMIO region
+- ranges: address space mapping of child nodes
+- interrupt-parent: phandle of parent interrupt controller
+- interrupt-controller: boolean
+- #address-cells: should be <1>
+- #size-cells: should be <1>
+
+One child node per control block with properties:
+- reg: address of registers for this control block
+- interrupt-controller: boolean
+- #interrupt-cells: should be <2>, interrupt index and flags per interrupts.txt
+- interrupts: interrupt spec of primary interrupt controller
+
+Example:
+
+interrupt-controller@6e000 {
+	compatible = "sigma,smp8642-intc";
+	reg = <0x6e000 0x400>;
+	ranges = <0x0 0x6e000 0x400>;
+	interrupt-parent = <&gic>;
+	interrupt-controller;
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	irq0: interrupt-controller@0 {
+		reg = <0x000 0x100>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+		interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
+	};
+
+	irq1: interrupt-controller@100 {
+		reg = <0x100 0x100>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+		interrupts = <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>;
+	};
+
+	irq2: interrupt-controller@300 {
+		reg = <0x300 0x100>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+		interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>;
+	};
+};
diff --git a/Documentation/devicetree/bindings/leds/leds-is31fl32xx.txt b/Documentation/devicetree/bindings/leds/leds-is31fl32xx.txt
new file mode 100644
index 0000000..926c211
--- /dev/null
+++ b/Documentation/devicetree/bindings/leds/leds-is31fl32xx.txt
@@ -0,0 +1,52 @@
+Binding for ISSI IS31FL32xx and Si-En SN32xx LED Drivers
+
+The IS31FL32xx/SN32xx family of LED drivers are I2C devices with multiple
+constant-current channels, each with independent 256-level PWM control.
+Each LED is represented as a sub-node of the device.
+
+Required properties:
+- compatible: one of
+	issi,is31fl3236
+	issi,is31fl3235
+	issi,is31fl3218
+	issi,is31fl3216
+	si-en,sn3218
+	si-en,sn3216
+- reg: I2C slave address
+- address-cells : must be 1
+- size-cells : must be 0
+
+LED sub-node properties:
+- reg : LED channel number (1..N)
+- label :  (optional)
+  see Documentation/devicetree/bindings/leds/common.txt
+- linux,default-trigger :  (optional)
+  see Documentation/devicetree/bindings/leds/common.txt
+
+
+Example:
+
+is31fl3236: led-controller@3c {
+	compatible = "issi,is31fl3236";
+	reg = <0x3c>;
+	#address-cells = <1>;
+	#size-cells = <0>;
+
+	led@1 {
+		reg = <1>;
+		label = "EB:blue:usr0";
+	};
+	led@2 {
+		reg = <2>;
+		label = "EB:blue:usr1";
+	};
+	...
+	led@36 {
+		reg = <36>;
+		label = "EB:blue:usr35";
+	};
+};
+
+For more product information please see the links below:
+http://www.issi.com/US/product-analog-fxled-driver.shtml
+http://www.si-en.com/product.asp?parentid=890
diff --git a/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt
index 9213b27..6961722 100644
--- a/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt
@@ -21,6 +21,8 @@
   "allwinner,sun9i-a80-r-pinctrl"
   "allwinner,sun8i-a83t-pinctrl"
   "allwinner,sun8i-h3-pinctrl"
+  "allwinner,sun8i-h3-r-pinctrl"
+  "allwinner,sun50i-a64-pinctrl"
 
 - reg: Should contain the register physical address and length for the
   pin controller.
diff --git a/Documentation/devicetree/bindings/pinctrl/brcm,ns2-pinmux.txt b/Documentation/devicetree/bindings/pinctrl/brcm,ns2-pinmux.txt
new file mode 100644
index 0000000..e295dda
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/brcm,ns2-pinmux.txt
@@ -0,0 +1,102 @@
+Broadcom Northstar2 IOMUX Controller
+
+The Northstar2 IOMUX controller supports group based mux configuration. There
+are some individual pins that support modifying the pinconf parameters.
+
+Required properties:
+
+- compatible:
+    Must be "brcm,ns2-pinmux"
+
+- reg:
+    Define the base and range of the I/O address space that contains the
+    Northstar2 IOMUX and pin configuration registers.
+
+Properties in sub nodes:
+
+- function:
+    The mux function to select
+
+- groups:
+    The list of groups to select with a given function
+
+- pins:
+    List of pin names to change configuration
+
+The generic properties bias-disable, bias-pull-down, bias-pull-up,
+drive-strength, slew-rate, input-enable, input-disable are supported
+for some individual pins listed at the end.
+
+For more details, refer to
+Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
+
+For example:
+
+	pinctrl: pinctrl@6501d130 {
+		compatible = "brcm,ns2-pinmux";
+		reg = <0x6501d130 0x08>,
+		      <0x660a0028 0x04>,
+		      <0x660009b0 0x40>;
+
+		pinctrl-names = "default";
+		pinctrl-0 = <&nand_sel &uart3_rx &sdio0_d4>;
+
+		/* Select nand function */
+		nand_sel: nand_sel {
+			function = "nand";
+			groups = "nand_grp";
+		};
+
+		/* Pull up the uart3 rx pin */
+		uart3_rx: uart3_rx {
+			pins = "uart3_sin";
+			bias-pull-up;
+		};
+
+		/* Set the drive strength of sdio d4 pin */
+		sdio0_d4: sdio0_d4 {
+			pins = "sdio0_data4";
+			drive-strength = <8>;
+		};
+	};
+
+List of supported functions and groups in Northstar2:
+
+"nand": "nand_grp"
+
+"nor": "nor_data_grp", "nor_adv_grp", "nor_addr_0_3_grp", "nor_addr_4_5_grp",
+	"nor_addr_6_7_grp", "nor_addr_8_9_grp", "nor_addr_10_11_grp",
+	"nor_addr_12_15_grp"
+
+"gpio": "gpio_0_1_grp", "gpio_2_5_grp", "gpio_6_7_grp", "gpio_8_9_grp",
+	"gpio_10_11_grp", "gpio_12_13_grp", "gpio_14_17_grp", "gpio_18_19_grp",
+	"gpio_20_21_grp", "gpio_22_23_grp", "gpio_24_25_grp", "gpio_26_27_grp",
+	"gpio_28_29_grp", "gpio_30_31_grp"
+
+"pcie": "pcie_ab1_clk_wak_grp", "pcie_a3_clk_wak_grp", "pcie_b3_clk_wak_grp",
+	"pcie_b2_clk_wak_grp", "pcie_a2_clk_wak_grp"
+
+"uart0": "uart0_modem_grp", "uart0_rts_cts_grp", "uart0_in_out_grp"
+
+"uart1": "uart1_ext_clk_grp", "uart1_dcd_dsr_grp", "uart1_ri_dtr_grp",
+	"uart1_rts_cts_grp", "uart1_in_out_grp"
+
+"uart2": "uart2_rts_cts_grp"
+
+"pwm": "pwm_0_grp", "pwm_1_grp", "pwm_2_grp", "pwm_3_grp"
+
+
+List of pins that support pinconf parameters:
+
+"qspi_wp", "qspi_hold", "qspi_cs", "qspi_sck", "uart3_sin", "uart3_sout",
+"qspi_mosi", "qspi_miso", "spi0_fss", "spi0_rxd", "spi0_txd", "spi0_sck",
+"spi1_fss", "spi1_rxd", "spi1_txd", "spi1_sck", "sdio0_data7",
+"sdio0_emmc_rst", "sdio0_led_on", "sdio0_wp", "sdio0_data3", "sdio0_data4",
+"sdio0_data5", "sdio0_data6", "sdio0_cmd", "sdio0_data0", "sdio0_data1",
+"sdio0_data2", "sdio1_led_on", "sdio1_wp", "sdio0_cd_l", "sdio0_clk",
+"sdio1_data5", "sdio1_data6", "sdio1_data7", "sdio1_emmc_rst", "sdio1_data1",
+"sdio1_data2", "sdio1_data3", "sdio1_data4", "sdio1_cd_l", "sdio1_clk",
+"sdio1_cmd", "sdio1_data0", "ext_mdio_0", "ext_mdc_0", "usb3_p1_vbus_ppc",
+"usb3_p1_overcurrent", "usb3_p0_vbus_ppc", "usb3_p0_overcurrent",
+"usb2_presence_indication", "usb2_vbus_present", "usb2_vbus_ppc",
+"usb2_overcurrent", "sata_led1", "sata_led0"
diff --git a/Documentation/devicetree/bindings/pinctrl/microchip,pic32-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/microchip,pic32-pinctrl.txt
new file mode 100644
index 0000000..4b5efa5
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/microchip,pic32-pinctrl.txt
@@ -0,0 +1,60 @@
+* Microchip PIC32 Pin Controller
+
+Please refer to pinctrl-bindings.txt, ../gpio/gpio.txt, and
+../interrupt-controller/interrupts.txt for generic information regarding
+pin controller, GPIO, and interrupt bindings.
+
+PIC32 'pin configuration node' is a node of a group of pins which can be
+used for a specific device or function. This node represents configuraions of
+pins, optional function, and optional mux related configuration.
+
+Required properties for pin controller node:
+ - compatible: "microchip,pic32mada-pinctrl"
+ - reg: Address range of the pinctrl registers.
+ - clocks: Clock specifier (see clock bindings for details)
+
+Required properties for pin configuration sub-nodes:
+ - pins: List of pins to which the configuration applies.
+
+Optional properties for pin configuration sub-nodes:
+----------------------------------------------------
+ - function: Mux function for the specified pins.
+ - bias-pull-up: Enable weak pull-up.
+ - bias-pull-down: Enable weak pull-down.
+ - input-enable: Set the pin as an input.
+ - output-low: Set the pin as an output level low.
+ - output-high: Set the pin as an output level high.
+ - microchip,digital: Enable digital I/O.
+ - microchip,analog: Enable analog I/O.
+
+Example:
+
+pic32_pinctrl: pinctrl@1f801400{
+	#address-cells = <1>;
+	#size-cells = <1>;
+	compatible = "microchip,pic32mzda-pinctrl";
+	reg = <0x1f801400 0x400>;
+	clocks = <&PBCLK1>;
+
+	pinctrl_uart2: pinctrl_uart2 {
+		uart2-tx {
+			pins = "G9";
+			function = "U2TX";
+			microchip,digital;
+			output-low;
+		};
+		uart2-rx {
+			pins = "B0";
+			function = "U2RX";
+			microchip,digital;
+			input-enable;
+		};
+	};
+};
+
+uart2: serial@1f822200 {
+	compatible = "microchip,pic32mzda-uart";
+	reg = <0x1f822200 0x50>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_uart2>;
+};
diff --git a/Documentation/devicetree/bindings/pinctrl/nxp,lpc1850-scu.txt b/Documentation/devicetree/bindings/pinctrl/nxp,lpc1850-scu.txt
index df0309c..bd8b0c6 100644
--- a/Documentation/devicetree/bindings/pinctrl/nxp,lpc1850-scu.txt
+++ b/Documentation/devicetree/bindings/pinctrl/nxp,lpc1850-scu.txt
@@ -22,6 +22,10 @@
  - input-schmitt-disable
  - slew-rate
 
+NXP specific properties:
+ - nxp,gpio-pin-interrupt : Assign pin to gpio pin interrupt controller
+			    irq number 0 to 7. See example below.
+
 Not all pins support all properties so either refer to the NXP 1850/4350
 user manual or the pin table in the pinctrl-lpc18xx driver for supported
 pin properties.
@@ -54,4 +58,14 @@
 			bias-disable;
 		};
 	};
+
+	gpio_joystick_pins: gpio-joystick-pins {
+		gpio_joystick_1_cfg {
+			pins =  "p9_0";
+			function = "gpio";
+			nxp,gpio-pin-interrupt = <0>;
+			input-enable;
+			bias-disable;
+		};
+	};
 };
diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl-mt65xx.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl-mt65xx.txt
index 9ffb0b2..17631d0 100644
--- a/Documentation/devicetree/bindings/pinctrl/pinctrl-mt65xx.txt
+++ b/Documentation/devicetree/bindings/pinctrl/pinctrl-mt65xx.txt
@@ -6,6 +6,7 @@
 - compatible: value should be one of the following.
 	"mediatek,mt2701-pinctrl", compatible with mt2701 pinctrl.
 	"mediatek,mt6397-pinctrl", compatible with mt6397 pinctrl.
+	"mediatek,mt7623-pinctrl", compatible with mt7623 pinctrl.
 	"mediatek,mt8127-pinctrl", compatible with mt8127 pinctrl.
 	"mediatek,mt8135-pinctrl", compatible with mt8135 pinctrl.
 	"mediatek,mt8173-pinctrl", compatible with mt8173 pinctrl.
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,ipq4019-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,ipq4019-pinctrl.txt
new file mode 100644
index 0000000..cfb8500
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,ipq4019-pinctrl.txt
@@ -0,0 +1,74 @@
+Qualcomm Atheros IPQ4019 TLMM block
+
+This is the Top Level Mode Multiplexor block found on the Qualcomm IPQ8019
+platform, it provides pinctrl, pinmux, pinconf, and gpiolib facilities.
+
+Required properties:
+- compatible: "qcom,ipq4019-pinctrl"
+- reg: Should be the base address and length of the TLMM block.
+- interrupts: Should be the parent IRQ of the TLMM block.
+- interrupt-controller: Marks the device node as an interrupt controller.
+- #interrupt-cells: Should be two.
+- gpio-controller: Marks the device node as a GPIO controller.
+- #gpio-cells : Should be two.
+                The first cell is the gpio pin number and the
+                second cell is used for optional parameters.
+
+Please refer to ../gpio/gpio.txt and ../interrupt-controller/interrupts.txt for
+a general description of GPIO and interrupt bindings.
+
+Please refer to pinctrl-bindings.txt in this directory for details of the
+common pinctrl bindings used by client devices, including the meaning of the
+phrase "pin configuration node".
+
+The pin configuration nodes act as a container for an abitrary number of
+subnodes. Each of these subnodes represents some desired configuration for a
+pin, a group, or a list of pins or groups. This configuration can include the
+mux function to select on those pin(s)/group(s), and various pin configuration
+parameters, such as pull-up, drive strength, etc.
+
+The name of each subnode is not important; all subnodes should be enumerated
+and processed purely based on their content.
+
+Each subnode only affects those parameters that are explicitly listed. In
+other words, a subnode that lists a mux function but no pin configuration
+parameters implies no information about any pin configuration parameters.
+Similarly, a pin subnode that describes a pullup parameter implies no
+information about e.g. the mux function.
+
+
+The following generic properties as defined in pinctrl-bindings.txt are valid
+to specify in a pin configuration subnode:
+ pins, function, bias-disable, bias-pull-down, bias-pull,up, drive-strength.
+
+Non-empty subnodes must specify the 'pins' property.
+Note that not all properties are valid for all pins.
+
+
+Valid values for qcom,pins are:
+  gpio0-gpio99
+    Supports mux, bias and drive-strength
+
+Valid values for qcom,function are:
+gpio, blsp_uart1, blsp_i2c0, blsp_i2c1, blsp_uart0, blsp_spi1, blsp_spi0
+
+Example:
+
+	tlmm: pinctrl@1000000 {
+		compatible = "qcom,ipq4019-pinctrl";
+		reg = <0x1000000 0x300000>;
+
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+		interrupts = <0 208 0>;
+
+		serial_pins: serial_pinmux {
+			mux {
+				pins = "gpio60", "gpio61";
+				function = "blsp_uart0";
+				bias-disable;
+			};
+		};
+	};
diff --git a/Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.txt
index 0cd701b..c68b955 100644
--- a/Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.txt
@@ -22,7 +22,7 @@
   - compatible: one of "rockchip,rk2928-pinctrl", "rockchip,rk3066a-pinctrl"
 		       "rockchip,rk3066b-pinctrl", "rockchip,rk3188-pinctrl"
 		       "rockchip,rk3228-pinctrl", "rockchip,rk3288-pinctrl"
-		       "rockchip,rk3368-pinctrl"
+		       "rockchip,rk3368-pinctrl", "rockchip,rk3399-pinctrl"
   - rockchip,grf: phandle referencing a syscon providing the
 	 "general register files"
 
diff --git a/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.txt
new file mode 100644
index 0000000..7b4800c
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.txt
@@ -0,0 +1,126 @@
+* STM32 GPIO and Pin Mux/Config controller
+
+STMicroelectronics's STM32 MCUs intregrate a GPIO and Pin mux/config hardware
+controller. It controls the input/output settings on the available pins and
+also provides ability to multiplex and configure the output of various on-chip
+controllers onto these pads.
+
+Pin controller node:
+Required properies:
+ - compatible: value should be one of the following:
+   (a) "st,stm32f429-pinctrl"
+ - #address-cells: The value of this property must be 1
+ - #size-cells	: The value of this property must be 1
+ - ranges	: defines mapping between pin controller node (parent) to
+   gpio-bank node (children).
+ - pins-are-numbered: Specify the subnodes are using numbered pinmux to
+   specify pins.
+
+GPIO controller/bank node:
+Required properties:
+ - gpio-controller : Indicates this device is a GPIO controller
+ - #gpio-cells	  : Should be two.
+			The first cell is the pin number
+			The second one is the polarity:
+				- 0 for active high
+				- 1 for active low
+ - reg		  : The gpio address range, relative to the pinctrl range
+ - clocks	  : clock that drives this bank
+ - st,bank-name	  : Should be a name string for this bank as specified in
+   the datasheet
+
+Optional properties:
+ - reset:	  : Reference to the reset controller
+
+Example:
+#include <dt-bindings/pinctrl/stm32f429-pinfunc.h>
+...
+
+	pin-controller {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		compatible = "st,stm32f429-pinctrl";
+		ranges = <0 0x40020000 0x3000>;
+		pins-are-numbered;
+
+		gpioa: gpio@40020000 {
+			gpio-controller;
+			#gpio-cells = <2>;
+			reg = <0x0 0x400>;
+			resets = <&reset_ahb1 0>;
+			st,bank-name = "GPIOA";
+		};
+		...
+		pin-functions nodes follow...
+	};
+
+Contents of function subnode node:
+----------------------------------
+Subnode format
+A pinctrl node should contain at least one subnode representing the
+pinctrl group available on the machine. Each subnode will list the
+pins it needs, and how they should be configured, with regard to muxer
+configuration, pullups, drive, output high/low and output speed.
+
+    node {
+	pinmux = <PIN_NUMBER_PINMUX>;
+	GENERIC_PINCONFIG;
+    };
+
+Required properties:
+- pinmux: integer array, represents gpio pin number and mux setting.
+  Supported pin number and mux varies for different SoCs, and are defined in
+  dt-bindings/pinctrl/<soc>-pinfunc.h directly.
+  These defines are calculated as:
+    ((port * 16 + line) << 8) | function
+  With:
+    - port: The gpio port index (PA = 0, PB = 1, ..., PK = 11)
+    - line: The line offset within the port (PA0 = 0, PA1 = 1, ..., PA15 = 15)
+    - function: The function number, can be:
+      * 0 : GPIO
+      * 1 : Alternate Function 0
+      * 2 : Alternate Function 1
+      * 3 : Alternate Function 2
+      * ...
+      * 16 : Alternate Function 15
+      * 17 : Analog
+
+Optional properties:
+- GENERIC_PINCONFIG: is the generic pinconfig options to use.
+  Available options are:
+   - bias-disable,
+   - bias-pull-down,
+   - bias-pull-up,
+   - drive-push-pull,
+   - drive-open-drain,
+   - output-low
+   - output-high
+   - slew-rate = <x>, with x being:
+       < 0 > : Low speed
+       < 1 > : Medium speed
+       < 2 > : Fast speed
+       < 3 > : High speed
+
+Example:
+
+pin-controller {
+...
+	usart1_pins_a: usart1@0 {
+		pins1 {
+			pinmux = <STM32F429_PA9_FUNC_USART1_TX>;
+			bias-disable;
+			drive-push-pull;
+			slew-rate = <0>;
+		};
+		pins2 {
+			pinmux = <STM32F429_PA10_FUNC_USART1_RX>;
+			bias-disable;
+		};
+	};
+};
+
+&usart1 {
+	pinctrl-0 = <&usart1_pins_a>;
+	pinctrl-names = "default";
+	status = "okay";
+};
diff --git a/Documentation/devicetree/bindings/regmap/regmap.txt b/Documentation/devicetree/bindings/regmap/regmap.txt
index b494f8b..e98a9652 100644
--- a/Documentation/devicetree/bindings/regmap/regmap.txt
+++ b/Documentation/devicetree/bindings/regmap/regmap.txt
@@ -5,15 +5,18 @@
 ---------------------------------------------------
 1         BE         'big-endian'
 2         LE         'little-endian'
+3	  Native     'native-endian'
 
 For one device driver, which will run in different scenarios above
 on different SoCs using the devicetree, we need one way to simplify
 this.
 
-Required properties:
-- {big,little}-endian: these are boolean properties, if absent
-  meaning that the CPU and the Device are in the same endianness mode,
-  these properties are for register values and all the buffers only.
+Optional properties:
+- {big,little,native}-endian: these are boolean properties, if absent
+  then the implementation will choose a default based on the device
+  being controlled.  These properties are for register values and all
+  the buffers only.  Native endian means that the CPU and device have
+  the same endianness.
 
 Examples:
 Scenario 1 : CPU in LE mode & device in LE mode.
diff --git a/Documentation/devicetree/bindings/regulator/act8945a-regulator.txt b/Documentation/devicetree/bindings/regulator/act8945a-regulator.txt
new file mode 100644
index 0000000..5c80a77
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/act8945a-regulator.txt
@@ -0,0 +1,80 @@
+Device-Tree bindings for regulators of Active-semi ACT8945A Multi-Function Device
+
+Required properties:
+ - compatible: "active-semi,act8945a", please refer to ../mfd/act8945a.txt.
+
+Optional properties:
+- active-semi,vsel-high: Indicates if the VSEL pin is set to logic-high.
+  If this property is missing, assume the VSEL pin is set to logic-low.
+
+Optional input supply properties:
+  - vp1-supply: The input supply for REG_DCDC1
+  - vp2-supply: The input supply for REG_DCDC2
+  - vp3-supply: The input supply for REG_DCDC3
+  - inl45-supply: The input supply for REG_LDO1 and REG_LDO2
+  - inl67-supply: The input supply for REG_LDO3 and REG_LDO4
+
+Any standard regulator properties can be used to configure the single regulator.
+
+The valid names for regulators are:
+	REG_DCDC1, REG_DCDC2, REG_DCDC3, REG_LDO1, REG_LDO2, REG_LDO3, REG_LDO4.
+
+Example:
+	pmic@5b {
+		compatible = "active-semi,act8945a";
+		reg = <0x5b>;
+		status = "okay";
+
+		active-semi,vsel-high;
+
+		regulators {
+			vdd_1v35_reg: REG_DCDC1 {
+				regulator-name = "VDD_1V35";
+				regulator-min-microvolt = <1350000>;
+				regulator-max-microvolt = <1350000>;
+				regulator-always-on;
+			};
+
+			vdd_1v2_reg: REG_DCDC2 {
+				regulator-name = "VDD_1V2";
+				regulator-min-microvolt = <1100000>;
+				regulator-max-microvolt = <1300000>;
+				regulator-always-on;
+			};
+
+			vdd_3v3_reg: REG_DCDC3 {
+				regulator-name = "VDD_3V3";
+				regulator-min-microvolt = <3300000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-always-on;
+			};
+
+			vdd_fuse_reg: REG_LDO1 {
+				regulator-name = "VDD_FUSE";
+				regulator-min-microvolt = <2500000>;
+				regulator-max-microvolt = <2500000>;
+				regulator-always-on;
+			};
+
+			vdd_3v3_lp_reg: REG_LDO2 {
+				regulator-name = "VDD_3V3_LP";
+				regulator-min-microvolt = <3300000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-always-on;
+			};
+
+			vdd_led_reg: REG_LDO3 {
+				regulator-name = "VDD_LED";
+				regulator-min-microvolt = <3300000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-always-on;
+			};
+
+			vdd_sdhc_1v8_reg: REG_LDO4 {
+				regulator-name = "VDD_SDHC_1V8";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+				regulator-always-on;
+			};
+		};
+	};
diff --git a/Documentation/devicetree/bindings/regulator/hisilicon,hi655x-regulator.txt b/Documentation/devicetree/bindings/regulator/hisilicon,hi655x-regulator.txt
new file mode 100644
index 0000000..14cfdc5
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/hisilicon,hi655x-regulator.txt
@@ -0,0 +1,29 @@
+Hisilicon Hi655x Voltage regulators
+
+Note:
+The Hi655x regulator control is managed by Hi655x PMIC.
+So the node of this regulator must be child node of Hi655x
+PMIC node.
+
+The driver uses the regulator core framework, so please also
+take the bindings of regulator.txt for reference.
+
+The valid names for regulators are:
+
+LDO2_2V8 LDO7_SDIO LDO10_2V85 LDO13_1V8 LDO14_2V8
+LDO15_1V8 LDO17_2V5 LDO19_3V0 LDO21_1V8 LDO22_1V2
+
+Example:
+        pmic: pmic@f8000000 {
+                compatible = "hisilicon,hi655x-pmic";
+		...
+		regulators {
+			ldo2: LDO2@a21 {
+				regulator-name = "LDO2_2V8";
+				regulator-min-microvolt = <2500000>;
+				regulator-max-microvolt = <3200000>;
+				regulator-enable-ramp-delay = <120>;
+			};
+			...
+		}
+	}
diff --git a/Documentation/devicetree/bindings/regulator/lp872x.txt b/Documentation/devicetree/bindings/regulator/lp872x.txt
index 7818318..ca58a68 100644
--- a/Documentation/devicetree/bindings/regulator/lp872x.txt
+++ b/Documentation/devicetree/bindings/regulator/lp872x.txt
@@ -28,6 +28,7 @@
   - ti,dvs-gpio: GPIO specifier for external DVS pin control of LP872x devices.
   - ti,dvs-vsel: DVS selector. 0 = SEL_V1, 1 = SEL_V2.
   - ti,dvs-state: initial DVS pin state. 0 = DVS_LOW, 1 = DVS_HIGH.
+  - enable-gpios: GPIO specifier for EN pin control of LP872x devices.
 
   Sub nodes for regulator_init_data
     LP8720 has maximum 6 nodes. (child name: ldo1 ~ 5 and buck)
diff --git a/Documentation/devicetree/bindings/regulator/max77802.txt b/Documentation/devicetree/bindings/regulator/max77802.txt
index 09d796e..879e98d 100644
--- a/Documentation/devicetree/bindings/regulator/max77802.txt
+++ b/Documentation/devicetree/bindings/regulator/max77802.txt
@@ -60,7 +60,7 @@
 	1: Normal regulator voltage output mode.
 	3: Low Power which reduces the quiescent current down to only 1uA
 
-The list of valid modes are defined in the dt-bindings/clock/maxim,max77802.h
+The valid modes list is defined in the dt-bindings/regulator/maxim,max77802.h
 header and can be included by device tree source files.
 
 The standard "regulator-mode" property can only be used for regulators that
diff --git a/Documentation/devicetree/bindings/regulator/regulator-max77620.txt b/Documentation/devicetree/bindings/regulator/regulator-max77620.txt
new file mode 100644
index 0000000..b3c8ca6
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/regulator-max77620.txt
@@ -0,0 +1,200 @@
+Regulator DT binding for MAX77620 Power management IC from Maxim Semiconductor.
+
+Device has multiple DCDC(sd[0-3] and LDOs(ldo[0-8]). The input supply
+of these regulators are defined under parent device node.
+Details of regulator properties are defined as child node under
+sub-node "regulators" which is child node of device node.
+
+Please refer file <Documentation/devicetree/bindings/regulator/regulator.txt>
+for common regulator bindings used by client.
+
+Following are properties of parent node related to regulators.
+
+Optional properties:
+-------------------
+The input supply of regulators are the optional properties on the
+parent device node. The input supply of these regulators are provided
+through following properties:
+in-sd0-supply:		Input supply for SD0, INA-SD0 or INB-SD0 pins.
+in-sd1-supply:		Input supply for SD1.
+in-sd2-supply:		Input supply for SD2.
+in-sd3-supply:		Input supply for SD3.
+in-ldo0-1-supply:	Input supply for LDO0 and LDO1.
+in-ldo2-supply:		Input supply for LDO2.
+in-ldo3-5-supply:	Input supply for LDO3 and LDO5
+in-ldo4-6-supply:	Input supply for LDO4 and LDO6.
+in-ldo7-8-supply:	Input supply for LDO7 and LDO8.
+
+Optional sub nodes for regulators under "regulators" subnode:
+------------------------------------------------------------
+The subnodes name is the name of regulator and it must be one of:
+	sd[0-3], ldo[0-8]
+
+Each sub-node should contain the constraints and initialization
+information for that regulator. The definition for each of these
+nodes is defined using the standard binding for regulators found at
+<Documentation/devicetree/bindings/regulator/regulator.txt>.
+
+Theres are also additional properties for SD/LDOs. These additional properties
+are required to configure FPS configuration parameters for SDs and LDOs.
+Please refer <devicetree/bindings/mfd/max77620.txt> for more detail of Flexible
+Power Sequence (FPS).
+Following are additional properties:
+
+- maxim,active-fps-source:		FPS source for the regulators to get
+					enabled/disabled when system is in
+					active state.  Valid values are:
+					- MAX77620_FPS_SRC_0,
+						FPS source is FPS0.
+					- MAX77620_FPS_SRC_1,
+						FPS source is FPS1
+					- MAX77620_FPS_SRC_2 and
+						FPS source is FPS2
+					- MAX77620_FPS_SRC_NONE.
+						Regulator is not controlled
+						by FPS events and it gets
+						enabled/disabled by register
+						access.
+					Absence of this property will leave
+					the FPS configuration register for that
+					regulator to default configuration.
+
+- maxim,active-fps-power-up-slot:	Sequencing event slot number on which
+					the regulator get enabled when
+					master FPS input event set to HIGH.
+					Valid values are 0 to 7.
+					This is applicable if FPS source is
+					selected as FPS0, FPS1 or FPS2.
+			
+- maxim,active-fps-power-down-slot:	Sequencing event slot number on which
+					the regulator get disabled when master
+					FPS input event set to LOW.
+					Valid values are 0 to 7.
+					This is applicable if FPS source is
+					selected as FPS0, FPS1 or FPS2.
+			
+- maxim,suspend-fps-source:		This is same as property
+					"maxim,active-fps-source" but value
+					get configured when system enters in
+					to suspend state.
+
+- maxim,suspend-fps-power-up-slot:	This is same as property
+					"maxim,active-fps-power-up-slot" but
+					this value get configured into FPS
+					configuration register when system
+					enters into suspend.
+					This is applicable if suspend state
+					FPS source is selected as FPS0, FPS1 or
+
+- maxim,suspend-fps-power-down-slot:	This is same as property
+					"maxim,active-fps-power-down-slot" but
+					this value get configured into FPS
+					configuration register when system
+					enters into suspend.
+					This is applicable if suspend state
+					FPS source is selected as FPS0, FPS1 or
+					FPS2.
+
+Example:
+--------
+#include <dt-bindings/mfd/max77620.h>
+...
+max77620@3c {
+	in-ldo0-1-supply = <&max77620_sd2>;
+	in-ldo7-8-supply = <&max77620_sd2>;
+	regulators {
+		sd0 {
+			regulator-name = "vdd-core";
+			regulator-min-microvolt = <600000>;
+			regulator-max-microvolt = <1400000>;
+			regulator-boot-on;
+			regulator-always-on;
+			maxim,active-fps-source = <MAX77620_FPS_SRC_1>;
+		};
+
+		sd1 {
+			regulator-name = "vddio-ddr";
+			regulator-min-microvolt = <1200000>;
+			regulator-max-microvolt = <1200000>;
+			regulator-always-on;
+			regulator-boot-on;
+			maxim,active-fps-source = <MAX77620_FPS_SRC_0>;
+		};
+
+		sd2 {
+			regulator-name = "vdd-pre-reg";
+			regulator-min-microvolt = <1350000>;
+			regulator-max-microvolt = <1350000>;
+		};
+
+		sd3 {
+			regulator-name = "vdd-1v8";
+			regulator-min-microvolt = <1800000>;
+			regulator-max-microvolt = <1800000>;
+			regulator-always-on;
+			regulator-boot-on;
+		};
+
+		ldo0 {
+			regulator-name = "avdd-sys";
+			regulator-min-microvolt = <1200000>;
+			regulator-max-microvolt = <1200000>;
+			regulator-always-on;
+			regulator-boot-on;
+		};
+
+		ldo1 {
+			regulator-name = "vdd-pex";
+			regulator-min-microvolt = <1050000>;
+			regulator-max-microvolt = <1050000>;
+		};
+
+		ldo2 {
+			regulator-name = "vddio-sdmmc3";
+			regulator-min-microvolt = <1800000>;
+			regulator-max-microvolt = <3300000>;
+		};
+
+		ldo3 {
+			regulator-name = "vdd-cam-hv";
+			regulator-min-microvolt = <2800000>;
+			regulator-max-microvolt = <2800000>;
+		};
+
+		ldo4 {
+			regulator-name = "vdd-rtc";
+			regulator-min-microvolt = <1250000>;
+			regulator-max-microvolt = <1250000>;
+			regulator-always-on;
+			regulator-boot-on;
+		};
+
+		ldo5 {
+			regulator-name = "avdd-ts-hv";
+			regulator-min-microvolt = <3000000>;
+			regulator-max-microvolt = <3000000>;
+		};
+
+		ldo6 {
+			regulator-name = "vdd-ts";
+			regulator-min-microvolt = <1800000>;
+			regulator-max-microvolt = <1800000>;
+			regulator-always-on;
+			regulator-boot-on;
+		};
+
+		ldo7 {
+			regulator-name = "vdd-gen-pll-edp";
+			regulator-min-microvolt = <1050000>;
+			regulator-max-microvolt = <1050000>;
+			regulator-always-on;
+			regulator-boot-on;
+		};
+
+		ldo8 {
+			regulator-name = "vdd-hdmi-dp";
+			regulator-min-microvolt = <1050000>;
+			regulator-max-microvolt = <1050000>;
+		};
+	};
+};
diff --git a/Documentation/devicetree/bindings/regulator/regulator.txt b/Documentation/devicetree/bindings/regulator/regulator.txt
index 1d112fc..ecfc593 100644
--- a/Documentation/devicetree/bindings/regulator/regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/regulator.txt
@@ -44,6 +44,11 @@
   any consumer request.
 - regulator-pull-down: Enable pull down resistor when the regulator is disabled.
 - regulator-over-current-protection: Enable over current protection.
+- regulator-active-discharge: tristate, enable/disable active discharge of
+  regulators. The values are:
+	0: Disable active discharge.
+	1: Enable active discharge.
+	Absence of this property will leave configuration to default.
 
 Deprecated properties:
 - regulator-compatible: If a regulator chip contains multiple
diff --git a/Documentation/devicetree/bindings/rtc/alphascale,asm9260-rtc.txt b/Documentation/devicetree/bindings/rtc/alphascale,asm9260-rtc.txt
new file mode 100644
index 0000000..76ebca5
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/alphascale,asm9260-rtc.txt
@@ -0,0 +1,19 @@
+* Alphascale asm9260 SoC Real Time Clock
+
+Required properties:
+- compatible: Should be "alphascale,asm9260-rtc"
+- reg: Physical base address of the controller and length
+       of memory mapped region.
+- interrupts: IRQ line for the RTC.
+- clocks: Reference to the clock entry.
+- clock-names: should contain:
+  * "ahb" for the SoC RTC clock
+
+Example:
+rtc0: rtc@800a0000 {
+	compatible = "alphascale,asm9260-rtc";
+	reg = <0x800a0000 0x100>;
+	clocks = <&acc CLKID_AHB_RTC>;
+	clock-names = "ahb";
+	interrupts = <2>;
+};
diff --git a/Documentation/devicetree/bindings/rtc/epson,rx6110.txt b/Documentation/devicetree/bindings/rtc/epson,rx6110.txt
new file mode 100644
index 0000000..3dc313e
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/epson,rx6110.txt
@@ -0,0 +1,39 @@
+Epson RX6110 Real Time Clock
+============================
+
+The Epson RX6110 can be used with SPI or I2C busses. The kind of
+bus depends on the SPISEL pin and can not be configured via software.
+
+I2C mode
+--------
+
+Required properties:
+  - compatible: should be: "epson,rx6110"
+  - reg : the I2C address of the device for I2C
+
+Example:
+
+	rtc: rtc@32 {
+		compatible = "epson,rx6110"
+		reg = <0x32>;
+	};
+
+SPI mode
+--------
+
+Required properties:
+  - compatible: should be: "epson,rx6110"
+  - reg: chip select number
+  - spi-cs-high: RX6110 needs chipselect high
+  - spi-cpha: RX6110 works with SPI shifted clock phase
+  - spi-cpol: RX6110 works with SPI inverse clock polarity
+
+Example:
+
+	rtc: rtc@3 {
+		compatible = "epson,rx6110"
+		reg = <3>
+		spi-cs-high;
+		spi-cpha;
+		spi-cpol;
+	};
diff --git a/Documentation/devicetree/bindings/rtc/maxim,ds3231.txt b/Documentation/devicetree/bindings/rtc/maxim,ds3231.txt
new file mode 100644
index 0000000..ddef330
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/maxim,ds3231.txt
@@ -0,0 +1,37 @@
+* Maxim DS3231 Real Time Clock
+
+Required properties:
+see: Documentation/devicetree/bindings/i2c/trivial-devices.txt
+
+Optional property:
+- #clock-cells: Should be 1.
+- clock-output-names:
+  overwrite the default clock names "ds3231_clk_sqw" and "ds3231_clk_32khz".
+
+Each clock is assigned an identifier and client nodes can use this identifier
+to specify the clock which they consume. Following indices are allowed:
+    - 0: square-wave output on the SQW pin
+    - 1: square-wave output on the 32kHz pin
+
+- interrupts: rtc alarm/event interrupt. When this property is selected,
+  clock on the SQW pin cannot be used.
+
+Example:
+
+ds3231: ds3231@51 {
+	compatible = "maxim,ds3231";
+	reg = <0x68>;
+	#clock-cells = <1>;
+};
+
+device1 {
+...
+	clocks = <&ds3231 0>;
+...
+};
+
+device2 {
+...
+	clocks = <&ds3231 1>;
+...
+};
diff --git a/Documentation/devicetree/bindings/rtc/microchip,pic32-rtc.txt b/Documentation/devicetree/bindings/rtc/microchip,pic32-rtc.txt
new file mode 100644
index 0000000..180b714
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/microchip,pic32-rtc.txt
@@ -0,0 +1,21 @@
+* Microchip PIC32 Real Time Clock and Calendar
+
+The RTCC keeps time in hours, minutes, and seconds, and one half second. It
+provides a calendar in weekday, date, month, and year. It also provides a
+configurable alarm.
+
+Required properties:
+- compatible: should be: "microchip,pic32mzda-rtc"
+- reg: physical base address of the controller and length of memory mapped
+    region.
+- interrupts: RTC alarm/event interrupt
+- clocks: clock phandle
+
+Example:
+
+	rtc: rtc@1f8c0000 {
+		compatible = "microchip,pic32mzda-rtc";
+		reg = <0x1f8c0000 0x60>;
+		interrupts = <166 IRQ_TYPE_EDGE_RISING>;
+		clocks = <&PBCLK6>;
+	};
diff --git a/Documentation/devicetree/bindings/sound/cs4271.txt b/Documentation/devicetree/bindings/sound/cs4271.txt
index e2cd1d7..6e699ce 100644
--- a/Documentation/devicetree/bindings/sound/cs4271.txt
+++ b/Documentation/devicetree/bindings/sound/cs4271.txt
@@ -33,12 +33,19 @@
 	Note that this is not needed in case the clocks are stable
 	throughout the entire runtime of the codec.
 
+ - vd-supply:	Digital power
+ - vl-supply:	Logic power
+ - va-supply:	Analog Power
+
 Examples:
 
 	codec_i2c: cs4271@10 {
 		compatible = "cirrus,cs4271";
 		reg = <0x10>;
 		reset-gpio = <&gpio 23 0>;
+		vd-supply = <&vdd_3v3_reg>;
+		vl-supply = <&vdd_3v3_reg>;
+		va-supply = <&vdd_3v3_reg>;
 	};
 
 	codec_spi: cs4271@0 {
diff --git a/Documentation/devicetree/bindings/spi/adi,axi-spi-engine.txt b/Documentation/devicetree/bindings/spi/adi,axi-spi-engine.txt
new file mode 100644
index 0000000..8a18d71
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/adi,axi-spi-engine.txt
@@ -0,0 +1,31 @@
+Analog Devices AXI SPI Engine controller Device Tree Bindings
+
+Required properties:
+- compatible		: Must be "adi,axi-spi-engine-1.00.a""
+- reg			: Physical base address and size of the register map.
+- interrupts		: Property with a value describing the interrupt
+			  number.
+- clock-names		: List of input clock names - "s_axi_aclk", "spi_clk"
+- clocks		: Clock phandles and specifiers (See clock bindings for
+			  details on clock-names and clocks).
+- #address-cells	: Must be <1>
+- #size-cells		: Must be <0>
+
+Optional subnodes:
+	Subnodes are use to represent the SPI slave devices connected to the SPI
+	master. They follow the generic SPI bindings as outlined in spi-bus.txt.
+
+Example:
+
+    spi@@44a00000 {
+		compatible = "adi,axi-spi-engine-1.00.a";
+		reg = <0x44a00000 0x1000>;
+		interrupts = <0 56 4>;
+		clocks = <&clkc 15 &clkc 15>;
+		clock-names = "s_axi_aclk", "spi_clk";
+
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		/* SPI devices */
+    };
diff --git a/Documentation/devicetree/bindings/spi/icpdas-lp8841-spi-rtc.txt b/Documentation/devicetree/bindings/spi/icpdas-lp8841-spi-rtc.txt
new file mode 100644
index 0000000..852b651
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/icpdas-lp8841-spi-rtc.txt
@@ -0,0 +1,54 @@
+* ICP DAS LP-8841 SPI Controller for RTC
+
+ICP DAS LP-8841 contains a DS-1302 RTC. RTC is connected to an IO
+memory register, which acts as an SPI master device.
+
+The device uses the standard MicroWire half-duplex transfer timing.
+Master output is set on low clock and sensed by the RTC on the rising
+edge. Master input is set by the RTC on the trailing edge and is sensed
+by the master on low clock.
+
+Required properties:
+
+- #address-cells: should be 1
+
+- #size-cells: should be 0
+
+- compatible: should be "icpdas,lp8841-spi-rtc"
+
+- reg: should provide IO memory address
+
+Requirements to SPI slave nodes:
+
+- There can be only one slave device.
+
+- The spi slave node should claim the following flags which are
+  required by the spi controller.
+
+  - spi-3wire: The master itself has only 3 wire. It cannor work in
+    full duplex mode.
+
+  - spi-cs-high: DS-1302 has active high chip select line. The master
+    doesn't support active low.
+
+  - spi-lsb-first: DS-1302 requires least significant bit first
+    transfers. The master only support this type of bit ordering.
+
+
+Example:
+
+spi@901c {
+	#address-cells = <1>;
+	#size-cells = <0>;
+	compatible = "icpdas,lp8841-spi-rtc";
+	reg = <0x901c 0x1>;
+
+	rtc@0 {
+		compatible = "maxim,ds1302";
+		reg = <0>;
+		spi-max-frequency = <500000>;
+		spi-3wire;
+		spi-lsb-first;
+		spi-cs-high;
+	};
+};
diff --git a/Documentation/devicetree/bindings/spi/spi-rockchip.txt b/Documentation/devicetree/bindings/spi/spi-rockchip.txt
index 0c491bd..1b14d69 100644
--- a/Documentation/devicetree/bindings/spi/spi-rockchip.txt
+++ b/Documentation/devicetree/bindings/spi/spi-rockchip.txt
@@ -9,6 +9,7 @@
     "rockchip,rk3066-spi" for rk3066.
     "rockchip,rk3188-spi", "rockchip,rk3066-spi" for rk3188.
     "rockchip,rk3288-spi", "rockchip,rk3066-spi" for rk3288.
+    "rockchip,rk3399-spi", "rockchip,rk3066-spi" for rk3399.
 - reg: physical base address of the controller and length of memory mapped
        region.
 - interrupts: The interrupt number to the cpu. The interrupt specifier format
diff --git a/Documentation/devicetree/bindings/spi/spi-xilinx.txt b/Documentation/devicetree/bindings/spi/spi-xilinx.txt
new file mode 100644
index 0000000..c7b7856
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/spi-xilinx.txt
@@ -0,0 +1,22 @@
+Xilinx SPI controller Device Tree Bindings
+-------------------------------------------------
+
+Required properties:
+- compatible		: Should be "xlnx,xps-spi-2.00.a" or "xlnx,xps-spi-2.00.b"
+- reg			: Physical base address and size of SPI registers map.
+- interrupts		: Property with a value describing the interrupt
+			  number.
+- interrupt-parent	: Must be core interrupt controller
+
+Optional properties:
+- xlnx,num-ss-bits	: Number of chip selects used.
+
+Example:
+	axi_quad_spi@41e00000 {
+			compatible = "xlnx,xps-spi-2.00.a";
+			interrupt-parent = <&intc>;
+			interrupts = <0 31 1>;
+			reg = <0x41e00000 0x10000>;
+			xlnx,num-ss-bits = <0x1>;
+	};
+
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index 72e2c5a..dd72e05 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -120,6 +120,7 @@
 invensense	InvenSense Inc.
 isee	ISEE 2007 S.L.
 isil	Intersil
+issi	Integrated Silicon Solutions Inc.
 jedec	JEDEC Solid State Technology Association
 karo	Ka-Ro electronics GmbH
 keymile	Keymile GmbH
@@ -204,6 +205,7 @@
 semtech	Semtech Corporation
 sgx	SGX Sensortech
 sharp	Sharp Corporation
+si-en	Si-En Technology Ltd.
 sigma	Sigma Designs, Inc.
 sil	Silicon Image
 silabs	Silicon Laboratories
diff --git a/Documentation/hwmon/adm1275 b/Documentation/hwmon/adm1275
index d697229..791bc0b 100644
--- a/Documentation/hwmon/adm1275
+++ b/Documentation/hwmon/adm1275
@@ -14,6 +14,10 @@
     Prefix: 'adm1276'
     Addresses scanned: -
     Datasheet: www.analog.com/static/imported-files/data_sheets/ADM1276.pdf
+  * Analog Devices ADM1278
+    Prefix: 'adm1278'
+    Addresses scanned: -
+    Datasheet: www.analog.com/static/imported-files/data_sheets/ADM1278.pdf
   * Analog Devices ADM1293/ADM1294
     Prefix: 'adm1293', 'adm1294'
     Addresses scanned: -
@@ -25,13 +29,15 @@
 Description
 -----------
 
-This driver supports hardware montoring for Analog Devices ADM1075, ADM1275,
-ADM1276, ADM1293, and ADM1294 Hot-Swap Controller and Digital Power Monitors.
+This driver supports hardware monitoring for Analog Devices ADM1075, ADM1275,
+ADM1276, ADM1278, ADM1293, and ADM1294 Hot-Swap Controller and Digital
+Power Monitors.
 
-ADM1075, ADM1275, ADM1276, ADM1293, and ADM1294 are hot-swap controllers that
-allow a circuit board to be removed from or inserted into a live backplane.
-They also feature current and voltage readback via an integrated 12
-bit analog-to-digital converter (ADC), accessed using a PMBus interface.
+ADM1075, ADM1275, ADM1276, ADM1278, ADM1293, and ADM1294 are hot-swap
+controllers that allow a circuit board to be removed from or inserted into
+a live backplane. They also feature current and voltage readback via an
+integrated 12 bit analog-to-digital converter (ADC), accessed using a
+PMBus interface.
 
 The driver is a client driver to the core PMBus driver. Please see
 Documentation/hwmon/pmbus for details on PMBus client drivers.
@@ -96,3 +102,14 @@
 
 			Power attributes are supported on ADM1075, ADM1276,
 			ADM1293, and ADM1294.
+
+temp1_input		Chip temperature.
+			Temperature attributes are only available on ADM1278.
+temp1_max		Maximum chip temperature.
+temp1_max_alarm		Temperature alarm.
+temp1_crit		Critical chip temperature.
+temp1_crit_alarm	Critical temperature high alarm.
+temp1_highest		Highest observed temperature.
+temp1_reset_history	Write any value to reset history.
+
+			Temperature attributes are supported on ADM1278.
diff --git a/Documentation/hwmon/lm25066 b/Documentation/hwmon/lm25066
index b34c3de..2cb20eb 100644
--- a/Documentation/hwmon/lm25066
+++ b/Documentation/hwmon/lm25066
@@ -36,7 +36,7 @@
 Description
 -----------
 
-This driver supports hardware montoring for National Semiconductor / TI LM25056,
+This driver supports hardware monitoring for National Semiconductor / TI LM25056,
 LM25063, LM25066, LM5064, and LM5066 Power Management, Monitoring, Control, and
 Protection ICs.
 
diff --git a/Documentation/hwmon/ltc2990 b/Documentation/hwmon/ltc2990
new file mode 100644
index 0000000..c25211e
--- /dev/null
+++ b/Documentation/hwmon/ltc2990
@@ -0,0 +1,43 @@
+Kernel driver ltc2990
+=====================
+
+Supported chips:
+  * Linear Technology LTC2990
+    Prefix: 'ltc2990'
+    Addresses scanned: -
+    Datasheet: http://www.linear.com/product/ltc2990
+
+Author: Mike Looijmans <mike.looijmans@topic.nl>
+
+
+Description
+-----------
+
+LTC2990 is a Quad I2C Voltage, Current and Temperature Monitor.
+The chip's inputs can measure 4 voltages, or two inputs together (1+2 and 3+4)
+can be combined to measure a differential voltage, which is typically used to
+measure current through a series resistor, or a temperature.
+
+This driver currently uses the 2x differential mode only. In order to support
+other modes, the driver will need to be expanded.
+
+
+Usage Notes
+-----------
+
+This driver does not probe for PMBus devices. You will have to instantiate
+devices explicitly.
+
+
+Sysfs attributes
+----------------
+
+The "curr*_input" measurements actually report the voltage drop across the
+input pins in microvolts. This is equivalent to the current through a 1mOhm
+sense resistor. Divide the reported value by the actual sense resistor value
+in mOhm to get the actual value.
+
+in0_input     Voltage at Vcc pin in millivolt (range 2.5V to 5V)
+temp1_input   Internal chip temperature in millidegrees Celcius
+curr1_input   Current in mA across v1-v2 assuming a 1mOhm sense resistor.
+curr2_input   Current in mA across v3-v4 assuming a 1mOhm sense resistor.
diff --git a/Documentation/hwmon/max16064 b/Documentation/hwmon/max16064
index d59cc78..265370f 100644
--- a/Documentation/hwmon/max16064
+++ b/Documentation/hwmon/max16064
@@ -13,7 +13,7 @@
 Description
 -----------
 
-This driver supports hardware montoring for Maxim MAX16064 Quad Power-Supply
+This driver supports hardware monitoring for Maxim MAX16064 Quad Power-Supply
 Controller with Active-Voltage Output Control and PMBus Interface.
 
 The driver is a client driver to the core PMBus driver.
diff --git a/Documentation/hwmon/max34440 b/Documentation/hwmon/max34440
index 37cbf47..f5b1fca 100644
--- a/Documentation/hwmon/max34440
+++ b/Documentation/hwmon/max34440
@@ -33,7 +33,7 @@
 Description
 -----------
 
-This driver supports hardware montoring for Maxim MAX34440 PMBus 6-Channel
+This driver supports hardware monitoring for Maxim MAX34440 PMBus 6-Channel
 Power-Supply Manager, MAX34441 PMBus 5-Channel Power-Supply Manager
 and Intelligent Fan Controller, and MAX34446 PMBus Power-Supply Data Logger.
 It also supports the MAX34460 and MAX34461 PMBus Voltage Monitor & Sequencers.
diff --git a/Documentation/hwmon/max8688 b/Documentation/hwmon/max8688
index e780786..ca233be 100644
--- a/Documentation/hwmon/max8688
+++ b/Documentation/hwmon/max8688
@@ -13,7 +13,7 @@
 Description
 -----------
 
-This driver supports hardware montoring for Maxim MAX8688 Digital Power-Supply
+This driver supports hardware monitoring for Maxim MAX8688 Digital Power-Supply
 Controller/Monitor with PMBus Interface.
 
 The driver is a client driver to the core PMBus driver. Please see
diff --git a/Documentation/hwmon/nsa320 b/Documentation/hwmon/nsa320
new file mode 100644
index 0000000..fdbd694
--- /dev/null
+++ b/Documentation/hwmon/nsa320
@@ -0,0 +1,53 @@
+Kernel driver nsa320_hwmon
+==========================
+
+Supported chips:
+  * Holtek HT46R065 microcontroller with onboard firmware that configures
+	it to act as a hardware monitor.
+    Prefix: 'nsa320'
+    Addresses scanned: none
+    Datasheet: Not available, driver was reverse engineered based upon the
+	Zyxel kernel source
+
+Author:
+  Adam Baker <linux@baker-net.org.uk>
+
+Description
+-----------
+
+This chip is known to be used in the Zyxel NSA320 and NSA325 NAS Units and
+also in some variants of the NSA310 but the driver has only been tested
+on the NSA320. In all of these devices it is connected to the same 3 GPIO
+lines which are used to provide chip select, clock and data lines. The
+interface behaves similarly to SPI but at much lower speeds than are normally
+used for SPI.
+
+Following each chip select pulse the chip will generate a single 32 bit word
+that contains 0x55 as a marker to indicate that data is being read correctly,
+followed by an 8 bit fan speed in 100s of RPM and a 16 bit temperature in
+tenths of a degree.
+
+
+sysfs-Interface
+---------------
+
+temp1_input - temperature input
+fan1_input - fan speed
+
+Notes
+-----
+
+The access timings used in the driver are the same as used in the Zyxel
+provided kernel. Testing has shown that if the delay between chip select and
+the first clock pulse is reduced from 100 ms to just under 10ms then the chip
+will not produce any output. If the duration of either phase of the clock
+is reduced from 100 us to less than 15 us then data pulses are likely to be
+read twice corrupting the output. The above analysis is based upon a sample
+of one unit but suggests that the Zyxel provided delay values include a
+reasonable tolerance.
+
+The driver incorporates a limit that it will not check for updated values
+faster than once a second. This is because the hardware takes a relatively long
+time to read the data from the device and when it does it reads both temp and
+fan speed. As the most likely case for two accesses in quick succession is
+to read both of these values avoiding a second read delay is desirable.
diff --git a/Documentation/hwmon/ntc_thermistor b/Documentation/hwmon/ntc_thermistor
index 1d4cc84..8b9ff23 100644
--- a/Documentation/hwmon/ntc_thermistor
+++ b/Documentation/hwmon/ntc_thermistor
@@ -3,9 +3,9 @@
 
 Supported thermistors from Murata:
 * Murata NTC Thermistors NCP15WB473, NCP18WB473, NCP21WB473, NCP03WB473,
-  NCP15WL333, NCP03WF104
+  NCP15WL333, NCP03WF104, NCP15XH103
   Prefixes: 'ncp15wb473', 'ncp18wb473', 'ncp21wb473', 'ncp03wb473',
-  'ncp15wl333', 'ncp03wf104'
+  'ncp15wl333', 'ncp03wf104', 'ncp15xh103'
   Datasheet: Publicly available at Murata
 
 Supported thermistors from EPCOS:
diff --git a/Documentation/hwmon/pmbus b/Documentation/hwmon/pmbus
index b397675..dfd9c65 100644
--- a/Documentation/hwmon/pmbus
+++ b/Documentation/hwmon/pmbus
@@ -43,7 +43,7 @@
 Description
 -----------
 
-This driver supports hardware montoring for various PMBus compliant devices.
+This driver supports hardware monitoring for various PMBus compliant devices.
 It supports voltage, current, power, and temperature sensors as supported
 by the device.
 
diff --git a/Documentation/hwmon/zl6100 b/Documentation/hwmon/zl6100
index 33908a4..477a94b 100644
--- a/Documentation/hwmon/zl6100
+++ b/Documentation/hwmon/zl6100
@@ -60,7 +60,7 @@
 Description
 -----------
 
-This driver supports hardware montoring for Intersil / Zilker Labs ZL6100 and
+This driver supports hardware monitoring for Intersil / Zilker Labs ZL6100 and
 compatible digital DC-DC controllers.
 
 The driver is a client driver to the core PMBus driver. Please see
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 8e5abd6..5b47acb 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -666,7 +666,7 @@
 
 	clearcpuid=BITNUM [X86]
 			Disable CPUID feature X for the kernel. See
-			arch/x86/include/asm/cpufeature.h for the valid bit
+			arch/x86/include/asm/cpufeatures.h for the valid bit
 			numbers. Note the Linux specific bits are not necessarily
 			stable over kernel options, but the vendor specific
 			ones should be.
@@ -1687,6 +1687,15 @@
 	ip=		[IP_PNP]
 			See Documentation/filesystems/nfs/nfsroot.txt.
 
+	irqaffinity=	[SMP] Set the default irq affinity mask
+			Format:
+			<cpu number>,...,<cpu number>
+			or
+			<cpu number>-<cpu number>
+			(must be a positive range in ascending order)
+			or a mixture
+			<cpu number>,...,<cpu number>-<cpu number>
+
 	irqfixup	[HW]
 			When an interrupt is not handled search all handlers
 			for it. Intended to get systems with badly broken
@@ -2576,6 +2585,8 @@
 
 	nointroute	[IA-64]
 
+	noinvpcid	[X86] Disable the INVPCID cpu feature.
+
 	nojitter	[IA-64] Disables jitter checking for ITC timers.
 
 	no-kvmclock	[X86,KVM] Disable paravirtualized KVM clock driver
diff --git a/Documentation/ptp/testptp.c b/Documentation/ptp/testptp.c
index 6c6247a..d99012f 100644
--- a/Documentation/ptp/testptp.c
+++ b/Documentation/ptp/testptp.c
@@ -277,13 +277,15 @@
 			       "  %d external time stamp channels\n"
 			       "  %d programmable periodic signals\n"
 			       "  %d pulse per second\n"
-			       "  %d programmable pins\n",
+			       "  %d programmable pins\n"
+			       "  %d cross timestamping\n",
 			       caps.max_adj,
 			       caps.n_alarm,
 			       caps.n_ext_ts,
 			       caps.n_per_out,
 			       caps.pps,
-			       caps.n_pins);
+			       caps.n_pins,
+			       caps.cross_timestamping);
 		}
 	}
 
diff --git a/Documentation/rtc.txt b/Documentation/rtc.txt
index 8446f1e..ddc3660 100644
--- a/Documentation/rtc.txt
+++ b/Documentation/rtc.txt
@@ -157,6 +157,12 @@
 		 the epoch by default, or if there's a leading +, seconds in the
 		 future, or if there is a leading +=, seconds ahead of the current
 		 alarm.
+offset:		 The amount which the rtc clock has been adjusted in firmware.
+		 Visible only if the driver supports clock offset adjustment.
+		 The unit is parts per billion, i.e. The number of clock ticks
+		 which are added to or removed from the rtc's base clock per
+		 billion ticks. A positive value makes a day pass more slowly,
+		 longer, and a negative value makes a day pass more quickly.
 
 IOCTL INTERFACE
 ---------------
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index 07e4cdf..4d0542c 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -2507,8 +2507,9 @@
 
 4.80 KVM_SET_DEVICE_ATTR/KVM_GET_DEVICE_ATTR
 
-Capability: KVM_CAP_DEVICE_CTRL, KVM_CAP_VM_ATTRIBUTES for vm device
-Type: device ioctl, vm ioctl
+Capability: KVM_CAP_DEVICE_CTRL, KVM_CAP_VM_ATTRIBUTES for vm device,
+  KVM_CAP_VCPU_ATTRIBUTES for vcpu device
+Type: device ioctl, vm ioctl, vcpu ioctl
 Parameters: struct kvm_device_attr
 Returns: 0 on success, -1 on error
 Errors:
@@ -2533,8 +2534,9 @@
 
 4.81 KVM_HAS_DEVICE_ATTR
 
-Capability: KVM_CAP_DEVICE_CTRL, KVM_CAP_VM_ATTRIBUTES for vm device
-Type: device ioctl, vm ioctl
+Capability: KVM_CAP_DEVICE_CTRL, KVM_CAP_VM_ATTRIBUTES for vm device,
+  KVM_CAP_VCPU_ATTRIBUTES for vcpu device
+Type: device ioctl, vm ioctl, vcpu ioctl
 Parameters: struct kvm_device_attr
 Returns: 0 on success, -1 on error
 Errors:
@@ -2577,6 +2579,8 @@
 	  Depends on KVM_CAP_ARM_EL1_32BIT (arm64 only).
 	- KVM_ARM_VCPU_PSCI_0_2: Emulate PSCI v0.2 for the CPU.
 	  Depends on KVM_CAP_ARM_PSCI_0_2.
+	- KVM_ARM_VCPU_PMU_V3: Emulate PMUv3 for the CPU.
+	  Depends on KVM_CAP_ARM_PMU_V3.
 
 
 4.83 KVM_ARM_PREFERRED_TARGET
@@ -3035,6 +3039,87 @@
 
 Queues an SMI on the thread's vcpu.
 
+4.97 KVM_CAP_PPC_MULTITCE
+
+Capability: KVM_CAP_PPC_MULTITCE
+Architectures: ppc
+Type: vm
+
+This capability means the kernel is capable of handling hypercalls
+H_PUT_TCE_INDIRECT and H_STUFF_TCE without passing those into the user
+space. This significantly accelerates DMA operations for PPC KVM guests.
+User space should expect that its handlers for these hypercalls
+are not going to be called if user space previously registered LIOBN
+in KVM (via KVM_CREATE_SPAPR_TCE or similar calls).
+
+In order to enable H_PUT_TCE_INDIRECT and H_STUFF_TCE use in the guest,
+user space might have to advertise it for the guest. For example,
+IBM pSeries (sPAPR) guest starts using them if "hcall-multi-tce" is
+present in the "ibm,hypertas-functions" device-tree property.
+
+The hypercalls mentioned above may or may not be processed successfully
+in the kernel based fast path. If they can not be handled by the kernel,
+they will get passed on to user space. So user space still has to have
+an implementation for these despite the in kernel acceleration.
+
+This capability is always enabled.
+
+4.98 KVM_CREATE_SPAPR_TCE_64
+
+Capability: KVM_CAP_SPAPR_TCE_64
+Architectures: powerpc
+Type: vm ioctl
+Parameters: struct kvm_create_spapr_tce_64 (in)
+Returns: file descriptor for manipulating the created TCE table
+
+This is an extension for KVM_CAP_SPAPR_TCE which only supports 32bit
+windows, described in 4.62 KVM_CREATE_SPAPR_TCE
+
+This capability uses extended struct in ioctl interface:
+
+/* for KVM_CAP_SPAPR_TCE_64 */
+struct kvm_create_spapr_tce_64 {
+	__u64 liobn;
+	__u32 page_shift;
+	__u32 flags;
+	__u64 offset;	/* in pages */
+	__u64 size; 	/* in pages */
+};
+
+The aim of extension is to support an additional bigger DMA window with
+a variable page size.
+KVM_CREATE_SPAPR_TCE_64 receives a 64bit window size, an IOMMU page shift and
+a bus offset of the corresponding DMA window, @size and @offset are numbers
+of IOMMU pages.
+
+@flags are not used at the moment.
+
+The rest of functionality is identical to KVM_CREATE_SPAPR_TCE.
+
+4.98 KVM_REINJECT_CONTROL
+
+Capability: KVM_CAP_REINJECT_CONTROL
+Architectures: x86
+Type: vm ioctl
+Parameters: struct kvm_reinject_control (in)
+Returns: 0 on success,
+         -EFAULT if struct kvm_reinject_control cannot be read,
+         -ENXIO if KVM_CREATE_PIT or KVM_CREATE_PIT2 didn't succeed earlier.
+
+i8254 (PIT) has two modes, reinject and !reinject.  The default is reinject,
+where KVM queues elapsed i8254 ticks and monitors completion of interrupt from
+vector(s) that i8254 injects.  Reinject mode dequeues a tick and injects its
+interrupt whenever there isn't a pending interrupt from i8254.
+!reinject mode injects an interrupt as soon as a tick arrives.
+
+struct kvm_reinject_control {
+	__u8 pit_reinject;
+	__u8 reserved[31];
+};
+
+pit_reinject = 0 (!reinject mode) is recommended, unless running an old
+operating system that uses the PIT for timing (e.g. Linux 2.4.x).
+
 5. The kvm_run structure
 ------------------------
 
@@ -3339,6 +3424,7 @@
 
 		struct kvm_hyperv_exit {
 #define KVM_EXIT_HYPERV_SYNIC          1
+#define KVM_EXIT_HYPERV_HCALL          2
 			__u32 type;
 			union {
 				struct {
@@ -3347,6 +3433,11 @@
 					__u64 evt_page;
 					__u64 msg_page;
 				} synic;
+				struct {
+					__u64 input;
+					__u64 result;
+					__u64 params[2];
+				} hcall;
 			} u;
 		};
 		/* KVM_EXIT_HYPERV */
diff --git a/Documentation/virtual/kvm/devices/s390_flic.txt b/Documentation/virtual/kvm/devices/s390_flic.txt
index d1ad9d5..e3e314c 100644
--- a/Documentation/virtual/kvm/devices/s390_flic.txt
+++ b/Documentation/virtual/kvm/devices/s390_flic.txt
@@ -88,6 +88,8 @@
       perform a gmap translation for the guest address provided in addr,
       pin a userspace page for the translated address and add it to the
       list of mappings
+      Note: A new mapping will be created unconditionally; therefore,
+            the calling code should avoid making duplicate mappings.
 
     KVM_S390_IO_ADAPTER_UNMAP
       release a userspace page for the translated address specified in addr
diff --git a/Documentation/virtual/kvm/devices/vcpu.txt b/Documentation/virtual/kvm/devices/vcpu.txt
new file mode 100644
index 0000000..c041658
--- /dev/null
+++ b/Documentation/virtual/kvm/devices/vcpu.txt
@@ -0,0 +1,33 @@
+Generic vcpu interface
+====================================
+
+The virtual cpu "device" also accepts the ioctls KVM_SET_DEVICE_ATTR,
+KVM_GET_DEVICE_ATTR, and KVM_HAS_DEVICE_ATTR. The interface uses the same struct
+kvm_device_attr as other devices, but targets VCPU-wide settings and controls.
+
+The groups and attributes per virtual cpu, if any, are architecture specific.
+
+1. GROUP: KVM_ARM_VCPU_PMU_V3_CTRL
+Architectures: ARM64
+
+1.1. ATTRIBUTE: KVM_ARM_VCPU_PMU_V3_IRQ
+Parameters: in kvm_device_attr.addr the address for PMU overflow interrupt is a
+            pointer to an int
+Returns: -EBUSY: The PMU overflow interrupt is already set
+         -ENXIO: The overflow interrupt not set when attempting to get it
+         -ENODEV: PMUv3 not supported
+         -EINVAL: Invalid PMU overflow interrupt number supplied
+
+A value describing the PMUv3 (Performance Monitor Unit v3) overflow interrupt
+number for this vcpu. This interrupt could be a PPI or SPI, but the interrupt
+type must be same for each vcpu. As a PPI, the interrupt number is the same for
+all vcpus, while as an SPI it must be a separate number per vcpu.
+
+1.2 ATTRIBUTE: KVM_ARM_VCPU_PMU_V3_INIT
+Parameters: no additional parameter in kvm_device_attr.addr
+Returns: -ENODEV: PMUv3 not supported
+         -ENXIO: PMUv3 not properly configured as required prior to calling this
+                 attribute
+         -EBUSY: PMUv3 already initialized
+
+Request the initialization of the PMUv3.
diff --git a/Documentation/virtual/kvm/devices/vm.txt b/Documentation/virtual/kvm/devices/vm.txt
index f083a16..a9ea877 100644
--- a/Documentation/virtual/kvm/devices/vm.txt
+++ b/Documentation/virtual/kvm/devices/vm.txt
@@ -84,3 +84,55 @@
 	    -EFAULT if the given address is not accessible from kernel space
 	    -ENOMEM if not enough memory is available to process the ioctl
 	    0 in case of success
+
+3. GROUP: KVM_S390_VM_TOD
+Architectures: s390
+
+3.1. ATTRIBUTE: KVM_S390_VM_TOD_HIGH
+
+Allows user space to set/get the TOD clock extension (u8).
+
+Parameters: address of a buffer in user space to store the data (u8) to
+Returns:    -EFAULT if the given address is not accessible from kernel space
+	    -EINVAL if setting the TOD clock extension to != 0 is not supported
+
+3.2. ATTRIBUTE: KVM_S390_VM_TOD_LOW
+
+Allows user space to set/get bits 0-63 of the TOD clock register as defined in
+the POP (u64).
+
+Parameters: address of a buffer in user space to store the data (u64) to
+Returns:    -EFAULT if the given address is not accessible from kernel space
+
+4. GROUP: KVM_S390_VM_CRYPTO
+Architectures: s390
+
+4.1. ATTRIBUTE: KVM_S390_VM_CRYPTO_ENABLE_AES_KW (w/o)
+
+Allows user space to enable aes key wrapping, including generating a new
+wrapping key.
+
+Parameters: none
+Returns:    0
+
+4.2. ATTRIBUTE: KVM_S390_VM_CRYPTO_ENABLE_DEA_KW (w/o)
+
+Allows user space to enable dea key wrapping, including generating a new
+wrapping key.
+
+Parameters: none
+Returns:    0
+
+4.3. ATTRIBUTE: KVM_S390_VM_CRYPTO_DISABLE_AES_KW (w/o)
+
+Allows user space to disable aes key wrapping, clearing the wrapping key.
+
+Parameters: none
+Returns:    0
+
+4.4. ATTRIBUTE: KVM_S390_VM_CRYPTO_DISABLE_DEA_KW (w/o)
+
+Allows user space to disable dea key wrapping, clearing the wrapping key.
+
+Parameters: none
+Returns:    0
diff --git a/Documentation/virtual/kvm/mmu.txt b/Documentation/virtual/kvm/mmu.txt
index c817310..481b6a9 100644
--- a/Documentation/virtual/kvm/mmu.txt
+++ b/Documentation/virtual/kvm/mmu.txt
@@ -392,11 +392,11 @@
   write-protected pages
 - the guest page must be wholly contained by a single memory slot
 
-To check the last two conditions, the mmu maintains a ->write_count set of
+To check the last two conditions, the mmu maintains a ->disallow_lpage set of
 arrays for each memory slot and large page size.  Every write protected page
-causes its write_count to be incremented, thus preventing instantiation of
+causes its disallow_lpage to be incremented, thus preventing instantiation of
 a large spte.  The frames at the end of an unaligned memory slot have
-artificially inflated ->write_counts so they can never be instantiated.
+artificially inflated ->disallow_lpages so they can never be instantiated.
 
 Zapping all pages (page generation count)
 =========================================
diff --git a/Documentation/x86/early-microcode.txt b/Documentation/x86/early-microcode.txt
index d62bea6..c956d99 100644
--- a/Documentation/x86/early-microcode.txt
+++ b/Documentation/x86/early-microcode.txt
@@ -40,3 +40,28 @@
 find . | cpio -o -H newc >../ucode.cpio
 cd ..
 cat ucode.cpio /boot/initrd-3.5.0.img >/boot/initrd-3.5.0.ucode.img
+
+Builtin microcode
+=================
+
+We can also load builtin microcode supplied through the regular firmware
+builtin method CONFIG_FIRMWARE_IN_KERNEL. Here's an example:
+
+CONFIG_FIRMWARE_IN_KERNEL=y
+CONFIG_EXTRA_FIRMWARE="intel-ucode/06-3a-09 amd-ucode/microcode_amd_fam15h.bin"
+CONFIG_EXTRA_FIRMWARE_DIR="/lib/firmware"
+
+This basically means, you have the following tree structure locally:
+
+/lib/firmware/
+|-- amd-ucode
+...
+|   |-- microcode_amd_fam15h.bin
+...
+|-- intel-ucode
+...
+|   |-- 06-3a-09
+...
+
+so that the build system can find those files and integrate them into
+the final kernel image. The early loader finds them and applies them.
diff --git a/Documentation/x86/x86_64/boot-options.txt b/Documentation/x86/x86_64/boot-options.txt
index 68ed311..0965a71 100644
--- a/Documentation/x86/x86_64/boot-options.txt
+++ b/Documentation/x86/x86_64/boot-options.txt
@@ -60,6 +60,8 @@
 		threshold to 1. Enabling this may make memory predictive failure
 		analysis less effective if the bios sets thresholds for memory
 		errors since we will not see details for all errors.
+   mce=recovery
+		Force-enable recoverable machine check code paths
 
    nomce (for compatibility with i386): same as mce=off
 
diff --git a/MAINTAINERS b/MAINTAINERS
index 2061ea7..860e306 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2422,6 +2422,7 @@
 F:	arch/mips/include/asm/mach-bmips/*
 F:	arch/mips/kernel/*bmips*
 F:	arch/mips/boot/dts/brcm/bcm*.dts*
+F:	drivers/irqchip/irq-bcm63*
 F:	drivers/irqchip/irq-bcm7*
 F:	drivers/irqchip/irq-brcmstb*
 F:	include/linux/bcm963xx_nvram.h
@@ -6909,7 +6910,7 @@
 M:	Javier Martinez Canillas <javier@osg.samsung.com>
 L:	linux-kernel@vger.kernel.org
 S:	Supported
-F:	drivers/*/*max77802.c
+F:	drivers/*/*max77802*.c
 F:	Documentation/devicetree/bindings/*/*max77802.txt
 F:	include/dt-bindings/*/*max77802.h
 
@@ -6919,7 +6920,7 @@
 L:	linux-kernel@vger.kernel.org
 S:	Supported
 F:	drivers/*/max14577.c
-F:	drivers/*/max77686.c
+F:	drivers/*/max77686*.c
 F:	drivers/*/max77693.c
 F:	drivers/extcon/extcon-max14577.c
 F:	drivers/extcon/extcon-max77693.c
diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c
index 2f24447f..46bf263 100644
--- a/arch/alpha/kernel/smp.c
+++ b/arch/alpha/kernel/smp.c
@@ -168,7 +168,7 @@
 	      cpuid, current, current->active_mm));
 
 	preempt_disable();
-	cpu_startup_entry(CPUHP_ONLINE);
+	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
 }
 
 /* Wait until hwrpb->txrdy is clear for cpu.  Return -1 on timeout.  */
diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c
index 424e937..4cb3add 100644
--- a/arch/arc/kernel/smp.c
+++ b/arch/arc/kernel/smp.c
@@ -142,7 +142,7 @@
 
 	local_irq_enable();
 	preempt_disable();
-	cpu_startup_entry(CPUHP_ONLINE);
+	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
 }
 
 /*
diff --git a/arch/arm/boot/dts/imx23.dtsi b/arch/arm/boot/dts/imx23.dtsi
index 1c6c075..302d116 100644
--- a/arch/arm/boot/dts/imx23.dtsi
+++ b/arch/arm/boot/dts/imx23.dtsi
@@ -569,7 +569,7 @@
 		};
 	};
 
-	iio_hwmon {
+	iio-hwmon {
 		compatible = "iio-hwmon";
 		io-channels = <&lradc 8>;
 	};
diff --git a/arch/arm/boot/dts/imx28.dtsi b/arch/arm/boot/dts/imx28.dtsi
index fae7b90..f637ec9 100644
--- a/arch/arm/boot/dts/imx28.dtsi
+++ b/arch/arm/boot/dts/imx28.dtsi
@@ -1256,7 +1256,7 @@
 		};
 	};
 
-	iio_hwmon {
+	iio-hwmon {
 		compatible = "iio-hwmon";
 		io-channels = <&lradc 8>;
 	};
diff --git a/arch/arm/boot/dts/mt2701-pinfunc.h b/arch/arm/boot/dts/mt2701-pinfunc.h
new file mode 100644
index 0000000..e24ebc8
--- /dev/null
+++ b/arch/arm/boot/dts/mt2701-pinfunc.h
@@ -0,0 +1,735 @@
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ * Author: Biao Huang <biao.huang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DTS_MT2701_PINFUNC_H
+#define __DTS_MT2701_PINFUNC_H
+
+#include <dt-bindings/pinctrl/mt65xx.h>
+
+#define MT2701_PIN_0_PWRAP_SPI0_MI__FUNC_GPIO0 (MTK_PIN_NO(0) | 0)
+#define MT2701_PIN_0_PWRAP_SPI0_MI__FUNC_PWRAP_SPIDO (MTK_PIN_NO(0) | 1)
+#define MT2701_PIN_0_PWRAP_SPI0_MI__FUNC_PWRAP_SPIDI (MTK_PIN_NO(0) | 2)
+
+#define MT2701_PIN_1_PWRAP_SPI0_MO__FUNC_GPIO1 (MTK_PIN_NO(1) | 0)
+#define MT2701_PIN_1_PWRAP_SPI0_MO__FUNC_PWRAP_SPIDI (MTK_PIN_NO(1) | 1)
+#define MT2701_PIN_1_PWRAP_SPI0_MO__FUNC_PWRAP_SPIDO (MTK_PIN_NO(1) | 2)
+
+#define MT2701_PIN_2_PWRAP_INT__FUNC_GPIO2 (MTK_PIN_NO(2) | 0)
+#define MT2701_PIN_2_PWRAP_INT__FUNC_PWRAP_INT (MTK_PIN_NO(2) | 1)
+
+#define MT2701_PIN_3_PWRAP_SPI0_CK__FUNC_GPIO3 (MTK_PIN_NO(3) | 0)
+#define MT2701_PIN_3_PWRAP_SPI0_CK__FUNC_PWRAP_SPICK_I (MTK_PIN_NO(3) | 1)
+
+#define MT2701_PIN_4_PWRAP_SPI0_CSN__FUNC_GPIO4 (MTK_PIN_NO(4) | 0)
+#define MT2701_PIN_4_PWRAP_SPI0_CSN__FUNC_PWRAP_SPICS_B_I (MTK_PIN_NO(4) | 1)
+
+#define MT2701_PIN_5_PWRAP_SPI0_CK2__FUNC_GPIO5 (MTK_PIN_NO(5) | 0)
+#define MT2701_PIN_5_PWRAP_SPI0_CK2__FUNC_PWRAP_SPICK2_I (MTK_PIN_NO(5) | 1)
+#define MT2701_PIN_5_PWRAP_SPI0_CK2__FUNC_ANT_SEL1 (MTK_PIN_NO(5) | 5)
+
+#define MT2701_PIN_6_PWRAP_SPI0_CSN2__FUNC_GPIO6 (MTK_PIN_NO(6) | 0)
+#define MT2701_PIN_6_PWRAP_SPI0_CSN2__FUNC_PWRAP_SPICS2_B_I (MTK_PIN_NO(6) | 1)
+#define MT2701_PIN_6_PWRAP_SPI0_CSN2__FUNC_ANT_SEL0 (MTK_PIN_NO(6) | 5)
+#define MT2701_PIN_6_PWRAP_SPI0_CSN2__FUNC_DBG_MON_A_0 (MTK_PIN_NO(6) | 7)
+
+#define MT2701_PIN_7_SPI1_CSN__FUNC_GPIO7 (MTK_PIN_NO(7) | 0)
+#define MT2701_PIN_7_SPI1_CSN__FUNC_SPI1_CS (MTK_PIN_NO(7) | 1)
+#define MT2701_PIN_7_SPI1_CSN__FUNC_KCOL0 (MTK_PIN_NO(7) | 4)
+#define MT2701_PIN_7_SPI1_CSN__FUNC_DBG_MON_B_12 (MTK_PIN_NO(7) | 7)
+
+#define MT2701_PIN_8_SPI1_MI__FUNC_GPIO8 (MTK_PIN_NO(8) | 0)
+#define MT2701_PIN_8_SPI1_MI__FUNC_SPI1_MI (MTK_PIN_NO(8) | 1)
+#define MT2701_PIN_8_SPI1_MI__FUNC_SPI1_MO (MTK_PIN_NO(8) | 2)
+#define MT2701_PIN_8_SPI1_MI__FUNC_KCOL1 (MTK_PIN_NO(8) | 4)
+#define MT2701_PIN_8_SPI1_MI__FUNC_DBG_MON_B_13 (MTK_PIN_NO(8) | 7)
+
+#define MT2701_PIN_9_SPI1_MO__FUNC_GPIO9 (MTK_PIN_NO(9) | 0)
+#define MT2701_PIN_9_SPI1_MO__FUNC_SPI1_MO (MTK_PIN_NO(9) | 1)
+#define MT2701_PIN_9_SPI1_MO__FUNC_SPI1_MI (MTK_PIN_NO(9) | 2)
+#define MT2701_PIN_9_SPI1_MO__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(9) | 3)
+#define MT2701_PIN_9_SPI1_MO__FUNC_KCOL2 (MTK_PIN_NO(9) | 4)
+#define MT2701_PIN_9_SPI1_MO__FUNC_DBG_MON_B_14 (MTK_PIN_NO(9) | 7)
+
+#define MT2701_PIN_10_RTC32K_CK__FUNC_GPIO10 (MTK_PIN_NO(10) | 0)
+#define MT2701_PIN_10_RTC32K_CK__FUNC_RTC32K_CK (MTK_PIN_NO(10) | 1)
+
+#define MT2701_PIN_11_WATCHDOG__FUNC_GPIO11 (MTK_PIN_NO(11) | 0)
+#define MT2701_PIN_11_WATCHDOG__FUNC_WATCHDOG (MTK_PIN_NO(11) | 1)
+
+#define MT2701_PIN_12_SRCLKENA__FUNC_GPIO12 (MTK_PIN_NO(12) | 0)
+#define MT2701_PIN_12_SRCLKENA__FUNC_SRCLKENA (MTK_PIN_NO(12) | 1)
+
+#define MT2701_PIN_13_SRCLKENAI__FUNC_GPIO13 (MTK_PIN_NO(13) | 0)
+#define MT2701_PIN_13_SRCLKENAI__FUNC_SRCLKENAI (MTK_PIN_NO(13) | 1)
+
+#define MT2701_PIN_14_URXD2__FUNC_GPIO14 (MTK_PIN_NO(14) | 0)
+#define MT2701_PIN_14_URXD2__FUNC_URXD2 (MTK_PIN_NO(14) | 1)
+#define MT2701_PIN_14_URXD2__FUNC_UTXD2 (MTK_PIN_NO(14) | 2)
+#define MT2701_PIN_14_URXD2__FUNC_SRCCLKENAI2 (MTK_PIN_NO(14) | 5)
+#define MT2701_PIN_14_URXD2__FUNC_DBG_MON_B_30 (MTK_PIN_NO(14) | 7)
+
+#define MT2701_PIN_15_UTXD2__FUNC_GPIO15 (MTK_PIN_NO(15) | 0)
+#define MT2701_PIN_15_UTXD2__FUNC_UTXD2 (MTK_PIN_NO(15) | 1)
+#define MT2701_PIN_15_UTXD2__FUNC_URXD2 (MTK_PIN_NO(15) | 2)
+#define MT2701_PIN_15_UTXD2__FUNC_DBG_MON_B_31 (MTK_PIN_NO(15) | 7)
+
+#define MT2701_PIN_18_PCM_CLK__FUNC_GPIO18 (MTK_PIN_NO(18) | 0)
+#define MT2701_PIN_18_PCM_CLK__FUNC_PCM_CLK0 (MTK_PIN_NO(18) | 1)
+#define MT2701_PIN_18_PCM_CLK__FUNC_MRG_CLK (MTK_PIN_NO(18) | 2)
+#define MT2701_PIN_18_PCM_CLK__FUNC_MM_TEST_CK (MTK_PIN_NO(18) | 4)
+#define MT2701_PIN_18_PCM_CLK__FUNC_CONN_DSP_JCK (MTK_PIN_NO(18) | 5)
+#define MT2701_PIN_18_PCM_CLK__FUNC_WCN_PCM_CLKO (MTK_PIN_NO(18) | 6)
+#define MT2701_PIN_18_PCM_CLK__FUNC_DBG_MON_A_3 (MTK_PIN_NO(18) | 7)
+
+#define MT2701_PIN_19_PCM_SYNC__FUNC_GPIO19 (MTK_PIN_NO(19) | 0)
+#define MT2701_PIN_19_PCM_SYNC__FUNC_PCM_SYNC (MTK_PIN_NO(19) | 1)
+#define MT2701_PIN_19_PCM_SYNC__FUNC_MRG_SYNC (MTK_PIN_NO(19) | 2)
+#define MT2701_PIN_19_PCM_SYNC__FUNC_CONN_DSP_JINTP (MTK_PIN_NO(19) | 5)
+#define MT2701_PIN_19_PCM_SYNC__FUNC_WCN_PCM_SYNC (MTK_PIN_NO(19) | 6)
+#define MT2701_PIN_19_PCM_SYNC__FUNC_DBG_MON_A_5 (MTK_PIN_NO(19) | 7)
+
+#define MT2701_PIN_20_PCM_RX__FUNC_GPIO20 (MTK_PIN_NO(20) | 0)
+#define MT2701_PIN_20_PCM_RX__FUNC_PCM_RX (MTK_PIN_NO(20) | 1)
+#define MT2701_PIN_20_PCM_RX__FUNC_MRG_RX (MTK_PIN_NO(20) | 2)
+#define MT2701_PIN_20_PCM_RX__FUNC_MRG_TX (MTK_PIN_NO(20) | 3)
+#define MT2701_PIN_20_PCM_RX__FUNC_PCM_TX (MTK_PIN_NO(20) | 4)
+#define MT2701_PIN_20_PCM_RX__FUNC_CONN_DSP_JDI (MTK_PIN_NO(20) | 5)
+#define MT2701_PIN_20_PCM_RX__FUNC_WCN_PCM_RX (MTK_PIN_NO(20) | 6)
+#define MT2701_PIN_20_PCM_RX__FUNC_DBG_MON_A_4 (MTK_PIN_NO(20) | 7)
+
+#define MT2701_PIN_21_PCM_TX__FUNC_GPIO21 (MTK_PIN_NO(21) | 0)
+#define MT2701_PIN_21_PCM_TX__FUNC_PCM_TX (MTK_PIN_NO(21) | 1)
+#define MT2701_PIN_21_PCM_TX__FUNC_MRG_TX (MTK_PIN_NO(21) | 2)
+#define MT2701_PIN_21_PCM_TX__FUNC_MRG_RX (MTK_PIN_NO(21) | 3)
+#define MT2701_PIN_21_PCM_TX__FUNC_PCM_RX (MTK_PIN_NO(21) | 4)
+#define MT2701_PIN_21_PCM_TX__FUNC_CONN_DSP_JMS (MTK_PIN_NO(21) | 5)
+#define MT2701_PIN_21_PCM_TX__FUNC_WCN_PCM_TX (MTK_PIN_NO(21) | 6)
+#define MT2701_PIN_21_PCM_TX__FUNC_DBG_MON_A_2 (MTK_PIN_NO(21) | 7)
+
+#define MT2701_PIN_22_EINT0__FUNC_GPIO22 (MTK_PIN_NO(22) | 0)
+#define MT2701_PIN_22_EINT0__FUNC_UCTS0 (MTK_PIN_NO(22) | 1)
+#define MT2701_PIN_22_EINT0__FUNC_KCOL3 (MTK_PIN_NO(22) | 3)
+#define MT2701_PIN_22_EINT0__FUNC_CONN_DSP_JDO (MTK_PIN_NO(22) | 4)
+#define MT2701_PIN_22_EINT0__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(22) | 5)
+#define MT2701_PIN_22_EINT0__FUNC_DBG_MON_A_30 (MTK_PIN_NO(22) | 7)
+#define MT2701_PIN_22_EINT0__FUNC_PCIE0_PERST_N (MTK_PIN_NO(22) | 10)
+
+#define MT2701_PIN_23_EINT1__FUNC_GPIO23 (MTK_PIN_NO(23) | 0)
+#define MT2701_PIN_23_EINT1__FUNC_URTS0 (MTK_PIN_NO(23) | 1)
+#define MT2701_PIN_23_EINT1__FUNC_KCOL2 (MTK_PIN_NO(23) | 3)
+#define MT2701_PIN_23_EINT1__FUNC_CONN_MCU_TDO (MTK_PIN_NO(23) | 4)
+#define MT2701_PIN_23_EINT1__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(23) | 5)
+#define MT2701_PIN_23_EINT1__FUNC_DBG_MON_A_29 (MTK_PIN_NO(23) | 7)
+#define MT2701_PIN_23_EINT1__FUNC_PCIE1_PERST_N (MTK_PIN_NO(23) | 10)
+
+#define MT2701_PIN_24_EINT2__FUNC_GPIO24 (MTK_PIN_NO(24) | 0)
+#define MT2701_PIN_24_EINT2__FUNC_UCTS1 (MTK_PIN_NO(24) | 1)
+#define MT2701_PIN_24_EINT2__FUNC_KCOL1 (MTK_PIN_NO(24) | 3)
+#define MT2701_PIN_24_EINT2__FUNC_CONN_MCU_DBGACK_N (MTK_PIN_NO(24) | 4)
+#define MT2701_PIN_24_EINT2__FUNC_DBG_MON_A_28 (MTK_PIN_NO(24) | 7)
+#define MT2701_PIN_24_EINT2__FUNC_PCIE2_PERST_N (MTK_PIN_NO(24) | 10)
+
+#define MT2701_PIN_25_EINT3__FUNC_GPIO25 (MTK_PIN_NO(25) | 0)
+#define MT2701_PIN_25_EINT3__FUNC_URTS1 (MTK_PIN_NO(25) | 1)
+#define MT2701_PIN_25_EINT3__FUNC_KCOL0 (MTK_PIN_NO(25) | 3)
+#define MT2701_PIN_25_EINT3__FUNC_CONN_MCU_DBGI_N (MTK_PIN_NO(25) | 4)
+#define MT2701_PIN_25_EINT3__FUNC_DBG_MON_A_27 (MTK_PIN_NO(25) | 7)
+
+#define MT2701_PIN_26_EINT4__FUNC_GPIO26 (MTK_PIN_NO(26) | 0)
+#define MT2701_PIN_26_EINT4__FUNC_UCTS3 (MTK_PIN_NO(26) | 1)
+#define MT2701_PIN_26_EINT4__FUNC_DRV_VBUS_P1 (MTK_PIN_NO(26) | 2)
+#define MT2701_PIN_26_EINT4__FUNC_KROW3 (MTK_PIN_NO(26) | 3)
+#define MT2701_PIN_26_EINT4__FUNC_CONN_MCU_TCK0 (MTK_PIN_NO(26) | 4)
+#define MT2701_PIN_26_EINT4__FUNC_CONN_MCU_AICE_JCKC (MTK_PIN_NO(26) | 5)
+#define MT2701_PIN_26_EINT4__FUNC_PCIE2_WAKE_N (MTK_PIN_NO(26) | 6)
+#define MT2701_PIN_26_EINT4__FUNC_DBG_MON_A_26 (MTK_PIN_NO(26) | 7)
+
+#define MT2701_PIN_27_EINT5__FUNC_GPIO27 (MTK_PIN_NO(27) | 0)
+#define MT2701_PIN_27_EINT5__FUNC_URTS3 (MTK_PIN_NO(27) | 1)
+#define MT2701_PIN_27_EINT5__FUNC_IDDIG_P1 (MTK_PIN_NO(27) | 2)
+#define MT2701_PIN_27_EINT5__FUNC_KROW2 (MTK_PIN_NO(27) | 3)
+#define MT2701_PIN_27_EINT5__FUNC_CONN_MCU_TDI (MTK_PIN_NO(27) | 4)
+#define MT2701_PIN_27_EINT5__FUNC_PCIE1_WAKE_N (MTK_PIN_NO(27) | 6)
+#define MT2701_PIN_27_EINT5__FUNC_DBG_MON_A_25 (MTK_PIN_NO(27) | 7)
+
+#define MT2701_PIN_28_EINT6__FUNC_GPIO28 (MTK_PIN_NO(28) | 0)
+#define MT2701_PIN_28_EINT6__FUNC_DRV_VBUS (MTK_PIN_NO(28) | 1)
+#define MT2701_PIN_28_EINT6__FUNC_KROW1 (MTK_PIN_NO(28) | 3)
+#define MT2701_PIN_28_EINT6__FUNC_CONN_MCU_TRST_B (MTK_PIN_NO(28) | 4)
+#define MT2701_PIN_28_EINT6__FUNC_PCIE0_WAKE_N (MTK_PIN_NO(28) | 6)
+#define MT2701_PIN_28_EINT6__FUNC_DBG_MON_A_24 (MTK_PIN_NO(28) | 7)
+
+#define MT2701_PIN_29_EINT7__FUNC_GPIO29 (MTK_PIN_NO(29) | 0)
+#define MT2701_PIN_29_EINT7__FUNC_IDDIG (MTK_PIN_NO(29) | 1)
+#define MT2701_PIN_29_EINT7__FUNC_MSDC1_WP (MTK_PIN_NO(29) | 2)
+#define MT2701_PIN_29_EINT7__FUNC_KROW0 (MTK_PIN_NO(29) | 3)
+#define MT2701_PIN_29_EINT7__FUNC_CONN_MCU_TMS (MTK_PIN_NO(29) | 4)
+#define MT2701_PIN_29_EINT7__FUNC_CONN_MCU_AICE_JMSC (MTK_PIN_NO(29) | 5)
+#define MT2701_PIN_29_EINT7__FUNC_DBG_MON_A_23 (MTK_PIN_NO(29) | 7)
+#define MT2701_PIN_29_EINT7__FUNC_PCIE2_PERST_N (MTK_PIN_NO(29) | 14)
+
+#define MT2701_PIN_33_I2S1_DATA__FUNC_GPIO33 (MTK_PIN_NO(33) | 0)
+#define MT2701_PIN_33_I2S1_DATA__FUNC_I2S1_DATA (MTK_PIN_NO(33) | 1)
+#define MT2701_PIN_33_I2S1_DATA__FUNC_I2S1_DATA_BYPS (MTK_PIN_NO(33) | 2)
+#define MT2701_PIN_33_I2S1_DATA__FUNC_PCM_TX (MTK_PIN_NO(33) | 3)
+#define MT2701_PIN_33_I2S1_DATA__FUNC_IMG_TEST_CK (MTK_PIN_NO(33) | 4)
+#define MT2701_PIN_33_I2S1_DATA__FUNC_G1_RXD0 (MTK_PIN_NO(33) | 5)
+#define MT2701_PIN_33_I2S1_DATA__FUNC_WCN_PCM_TX (MTK_PIN_NO(33) | 6)
+#define MT2701_PIN_33_I2S1_DATA__FUNC_DBG_MON_B_8 (MTK_PIN_NO(33) | 7)
+
+#define MT2701_PIN_34_I2S1_DATA_IN__FUNC_GPIO34 (MTK_PIN_NO(34) | 0)
+#define MT2701_PIN_34_I2S1_DATA_IN__FUNC_I2S1_DATA_IN (MTK_PIN_NO(34) | 1)
+#define MT2701_PIN_34_I2S1_DATA_IN__FUNC_PCM_RX (MTK_PIN_NO(34) | 3)
+#define MT2701_PIN_34_I2S1_DATA_IN__FUNC_VDEC_TEST_CK (MTK_PIN_NO(34) | 4)
+#define MT2701_PIN_34_I2S1_DATA_IN__FUNC_G1_RXD1 (MTK_PIN_NO(34) | 5)
+#define MT2701_PIN_34_I2S1_DATA_IN__FUNC_WCN_PCM_RX (MTK_PIN_NO(34) | 6)
+#define MT2701_PIN_34_I2S1_DATA_IN__FUNC_DBG_MON_B_7 (MTK_PIN_NO(34) | 7)
+
+#define MT2701_PIN_35_I2S1_BCK__FUNC_GPIO35 (MTK_PIN_NO(35) | 0)
+#define MT2701_PIN_35_I2S1_BCK__FUNC_I2S1_BCK (MTK_PIN_NO(35) | 1)
+#define MT2701_PIN_35_I2S1_BCK__FUNC_PCM_CLK0 (MTK_PIN_NO(35) | 3)
+#define MT2701_PIN_35_I2S1_BCK__FUNC_G1_RXD2 (MTK_PIN_NO(35) | 5)
+#define MT2701_PIN_35_I2S1_BCK__FUNC_WCN_PCM_CLKO (MTK_PIN_NO(35) | 6)
+#define MT2701_PIN_35_I2S1_BCK__FUNC_DBG_MON_B_9 (MTK_PIN_NO(35) | 7)
+
+#define MT2701_PIN_36_I2S1_LRCK__FUNC_GPIO36 (MTK_PIN_NO(36) | 0)
+#define MT2701_PIN_36_I2S1_LRCK__FUNC_I2S1_LRCK (MTK_PIN_NO(36) | 1)
+#define MT2701_PIN_36_I2S1_LRCK__FUNC_PCM_SYNC (MTK_PIN_NO(36) | 3)
+#define MT2701_PIN_36_I2S1_LRCK__FUNC_G1_RXD3 (MTK_PIN_NO(36) | 5)
+#define MT2701_PIN_36_I2S1_LRCK__FUNC_WCN_PCM_SYNC (MTK_PIN_NO(36) | 6)
+#define MT2701_PIN_36_I2S1_LRCK__FUNC_DBG_MON_B_10 (MTK_PIN_NO(36) | 7)
+
+#define MT2701_PIN_37_I2S1_MCLK__FUNC_GPIO37 (MTK_PIN_NO(37) | 0)
+#define MT2701_PIN_37_I2S1_MCLK__FUNC_I2S1_MCLK (MTK_PIN_NO(37) | 1)
+#define MT2701_PIN_37_I2S1_MCLK__FUNC_G1_RXDV (MTK_PIN_NO(37) | 5)
+#define MT2701_PIN_37_I2S1_MCLK__FUNC_DBG_MON_B_11 (MTK_PIN_NO(37) | 7)
+
+#define MT2701_PIN_39_JTMS__FUNC_GPIO39 (MTK_PIN_NO(39) | 0)
+#define MT2701_PIN_39_JTMS__FUNC_JTMS (MTK_PIN_NO(39) | 1)
+#define MT2701_PIN_39_JTMS__FUNC_CONN_MCU_TMS (MTK_PIN_NO(39) | 2)
+#define MT2701_PIN_39_JTMS__FUNC_CONN_MCU_AICE_JMSC (MTK_PIN_NO(39) | 3)
+#define MT2701_PIN_39_JTMS__FUNC_DFD_TMS_XI (MTK_PIN_NO(39) | 4)
+
+#define MT2701_PIN_40_JTCK__FUNC_GPIO40 (MTK_PIN_NO(40) | 0)
+#define MT2701_PIN_40_JTCK__FUNC_JTCK (MTK_PIN_NO(40) | 1)
+#define MT2701_PIN_40_JTCK__FUNC_CONN_MCU_TCK1 (MTK_PIN_NO(40) | 2)
+#define MT2701_PIN_40_JTCK__FUNC_CONN_MCU_AICE_JCKC (MTK_PIN_NO(40) | 3)
+#define MT2701_PIN_40_JTCK__FUNC_DFD_TCK_XI (MTK_PIN_NO(40) | 4)
+
+#define MT2701_PIN_41_JTDI__FUNC_GPIO41 (MTK_PIN_NO(41) | 0)
+#define MT2701_PIN_41_JTDI__FUNC_JTDI (MTK_PIN_NO(41) | 1)
+#define MT2701_PIN_41_JTDI__FUNC_CONN_MCU_TDI (MTK_PIN_NO(41) | 2)
+#define MT2701_PIN_41_JTDI__FUNC_DFD_TDI_XI (MTK_PIN_NO(41) | 4)
+
+#define MT2701_PIN_42_JTDO__FUNC_GPIO42 (MTK_PIN_NO(42) | 0)
+#define MT2701_PIN_42_JTDO__FUNC_JTDO (MTK_PIN_NO(42) | 1)
+#define MT2701_PIN_42_JTDO__FUNC_CONN_MCU_TDO (MTK_PIN_NO(42) | 2)
+#define MT2701_PIN_42_JTDO__FUNC_DFD_TDO (MTK_PIN_NO(42) | 4)
+
+#define MT2701_PIN_43_NCLE__FUNC_GPIO43 (MTK_PIN_NO(43) | 0)
+#define MT2701_PIN_43_NCLE__FUNC_NCLE (MTK_PIN_NO(43) | 1)
+#define MT2701_PIN_43_NCLE__FUNC_EXT_XCS2 (MTK_PIN_NO(43) | 2)
+
+#define MT2701_PIN_44_NCEB1__FUNC_GPIO44 (MTK_PIN_NO(44) | 0)
+#define MT2701_PIN_44_NCEB1__FUNC_NCEB1 (MTK_PIN_NO(44) | 1)
+#define MT2701_PIN_44_NCEB1__FUNC_IDDIG (MTK_PIN_NO(44) | 2)
+
+#define MT2701_PIN_45_NCEB0__FUNC_GPIO45 (MTK_PIN_NO(45) | 0)
+#define MT2701_PIN_45_NCEB0__FUNC_NCEB0 (MTK_PIN_NO(45) | 1)
+#define MT2701_PIN_45_NCEB0__FUNC_DRV_VBUS (MTK_PIN_NO(45) | 2)
+
+#define MT2701_PIN_46_IR__FUNC_GPIO46 (MTK_PIN_NO(46) | 0)
+#define MT2701_PIN_46_IR__FUNC_IR (MTK_PIN_NO(46) | 1)
+
+#define MT2701_PIN_47_NREB__FUNC_GPIO47 (MTK_PIN_NO(47) | 0)
+#define MT2701_PIN_47_NREB__FUNC_NREB (MTK_PIN_NO(47) | 1)
+#define MT2701_PIN_47_NREB__FUNC_IDDIG_P1 (MTK_PIN_NO(47) | 2)
+
+#define MT2701_PIN_48_NRNB__FUNC_GPIO48 (MTK_PIN_NO(48) | 0)
+#define MT2701_PIN_48_NRNB__FUNC_NRNB (MTK_PIN_NO(48) | 1)
+#define MT2701_PIN_48_NRNB__FUNC_DRV_VBUS_P1 (MTK_PIN_NO(48) | 2)
+
+#define MT2701_PIN_49_I2S0_DATA__FUNC_GPIO49 (MTK_PIN_NO(49) | 0)
+#define MT2701_PIN_49_I2S0_DATA__FUNC_I2S0_DATA (MTK_PIN_NO(49) | 1)
+#define MT2701_PIN_49_I2S0_DATA__FUNC_I2S0_DATA_BYPS (MTK_PIN_NO(49) | 2)
+#define MT2701_PIN_49_I2S0_DATA__FUNC_PCM_TX (MTK_PIN_NO(49) | 3)
+#define MT2701_PIN_49_I2S0_DATA__FUNC_WCN_I2S_DO (MTK_PIN_NO(49) | 6)
+#define MT2701_PIN_49_I2S0_DATA__FUNC_DBG_MON_B_3 (MTK_PIN_NO(49) | 7)
+
+#define MT2701_PIN_53_SPI0_CSN__FUNC_GPIO53 (MTK_PIN_NO(53) | 0)
+#define MT2701_PIN_53_SPI0_CSN__FUNC_SPI0_CS (MTK_PIN_NO(53) | 1)
+#define MT2701_PIN_53_SPI0_CSN__FUNC_SPDIF (MTK_PIN_NO(53) | 3)
+#define MT2701_PIN_53_SPI0_CSN__FUNC_ADC_CK (MTK_PIN_NO(53) | 4)
+#define MT2701_PIN_53_SPI0_CSN__FUNC_PWM1 (MTK_PIN_NO(53) | 5)
+#define MT2701_PIN_53_SPI0_CSN__FUNC_DBG_MON_A_7 (MTK_PIN_NO(53) | 7)
+
+#define MT2701_PIN_54_SPI0_CK__FUNC_GPIO54 (MTK_PIN_NO(54) | 0)
+#define MT2701_PIN_54_SPI0_CK__FUNC_SPI0_CK (MTK_PIN_NO(54) | 1)
+#define MT2701_PIN_54_SPI0_CK__FUNC_SPDIF_IN1 (MTK_PIN_NO(54) | 3)
+#define MT2701_PIN_54_SPI0_CK__FUNC_ADC_DAT_IN (MTK_PIN_NO(54) | 4)
+#define MT2701_PIN_54_SPI0_CK__FUNC_DBG_MON_A_10 (MTK_PIN_NO(54) | 7)
+
+#define MT2701_PIN_55_SPI0_MI__FUNC_GPIO55 (MTK_PIN_NO(55) | 0)
+#define MT2701_PIN_55_SPI0_MI__FUNC_SPI0_MI (MTK_PIN_NO(55) | 1)
+#define MT2701_PIN_55_SPI0_MI__FUNC_SPI0_MO (MTK_PIN_NO(55) | 2)
+#define MT2701_PIN_55_SPI0_MI__FUNC_MSDC1_WP (MTK_PIN_NO(55) | 3)
+#define MT2701_PIN_55_SPI0_MI__FUNC_ADC_WS (MTK_PIN_NO(55) | 4)
+#define MT2701_PIN_55_SPI0_MI__FUNC_PWM2 (MTK_PIN_NO(55) | 5)
+#define MT2701_PIN_55_SPI0_MI__FUNC_DBG_MON_A_8 (MTK_PIN_NO(55) | 7)
+
+#define MT2701_PIN_56_SPI0_MO__FUNC_GPIO56 (MTK_PIN_NO(56) | 0)
+#define MT2701_PIN_56_SPI0_MO__FUNC_SPI0_MO (MTK_PIN_NO(56) | 1)
+#define MT2701_PIN_56_SPI0_MO__FUNC_SPI0_MI (MTK_PIN_NO(56) | 2)
+#define MT2701_PIN_56_SPI0_MO__FUNC_SPDIF_IN0 (MTK_PIN_NO(56) | 3)
+#define MT2701_PIN_56_SPI0_MO__FUNC_DBG_MON_A_9 (MTK_PIN_NO(56) | 7)
+
+#define MT2701_PIN_57_SDA1__FUNC_GPIO57 (MTK_PIN_NO(57) | 0)
+#define MT2701_PIN_57_SDA1__FUNC_SDA1 (MTK_PIN_NO(57) | 1)
+
+#define MT2701_PIN_58_SCL1__FUNC_GPIO58 (MTK_PIN_NO(58) | 0)
+#define MT2701_PIN_58_SCL1__FUNC_SCL1 (MTK_PIN_NO(58) | 1)
+
+#define MT2701_PIN_72_I2S0_DATA_IN__FUNC_GPIO72 (MTK_PIN_NO(72) | 0)
+#define MT2701_PIN_72_I2S0_DATA_IN__FUNC_I2S0_DATA_IN (MTK_PIN_NO(72) | 1)
+#define MT2701_PIN_72_I2S0_DATA_IN__FUNC_PCM_RX (MTK_PIN_NO(72) | 3)
+#define MT2701_PIN_72_I2S0_DATA_IN__FUNC_PWM0 (MTK_PIN_NO(72) | 4)
+#define MT2701_PIN_72_I2S0_DATA_IN__FUNC_DISP_PWM (MTK_PIN_NO(72) | 5)
+#define MT2701_PIN_72_I2S0_DATA_IN__FUNC_WCN_I2S_DI (MTK_PIN_NO(72) | 6)
+#define MT2701_PIN_72_I2S0_DATA_IN__FUNC_DBG_MON_B_2 (MTK_PIN_NO(72) | 7)
+
+#define MT2701_PIN_73_I2S0_LRCK__FUNC_GPIO73 (MTK_PIN_NO(73) | 0)
+#define MT2701_PIN_73_I2S0_LRCK__FUNC_I2S0_LRCK (MTK_PIN_NO(73) | 1)
+#define MT2701_PIN_73_I2S0_LRCK__FUNC_PCM_SYNC (MTK_PIN_NO(73) | 3)
+#define MT2701_PIN_73_I2S0_LRCK__FUNC_WCN_I2S_LRCK (MTK_PIN_NO(73) | 6)
+#define MT2701_PIN_73_I2S0_LRCK__FUNC_DBG_MON_B_5 (MTK_PIN_NO(73) | 7)
+
+#define MT2701_PIN_74_I2S0_BCK__FUNC_GPIO74 (MTK_PIN_NO(74) | 0)
+#define MT2701_PIN_74_I2S0_BCK__FUNC_I2S0_BCK (MTK_PIN_NO(74) | 1)
+#define MT2701_PIN_74_I2S0_BCK__FUNC_PCM_CLK0 (MTK_PIN_NO(74) | 3)
+#define MT2701_PIN_74_I2S0_BCK__FUNC_WCN_I2S_BCK (MTK_PIN_NO(74) | 6)
+#define MT2701_PIN_74_I2S0_BCK__FUNC_DBG_MON_B_4 (MTK_PIN_NO(74) | 7)
+
+#define MT2701_PIN_75_SDA0__FUNC_GPIO75 (MTK_PIN_NO(75) | 0)
+#define MT2701_PIN_75_SDA0__FUNC_SDA0 (MTK_PIN_NO(75) | 1)
+
+#define MT2701_PIN_76_SCL0__FUNC_GPIO76 (MTK_PIN_NO(76) | 0)
+#define MT2701_PIN_76_SCL0__FUNC_SCL0 (MTK_PIN_NO(76) | 1)
+
+#define MT2701_PIN_77_SDA2__FUNC_GPIO77 (MTK_PIN_NO(77) | 0)
+#define MT2701_PIN_77_SDA2__FUNC_SDA2 (MTK_PIN_NO(77) | 1)
+
+#define MT2701_PIN_78_SCL2__FUNC_GPIO78 (MTK_PIN_NO(78) | 0)
+#define MT2701_PIN_78_SCL2__FUNC_SCL2 (MTK_PIN_NO(78) | 1)
+
+#define MT2701_PIN_79_URXD0__FUNC_GPIO79 (MTK_PIN_NO(79) | 0)
+#define MT2701_PIN_79_URXD0__FUNC_URXD0 (MTK_PIN_NO(79) | 1)
+#define MT2701_PIN_79_URXD0__FUNC_UTXD0 (MTK_PIN_NO(79) | 2)
+#define MT2701_PIN_79_URXD0__FUNC_ (MTK_PIN_NO(79) | 5)
+
+#define MT2701_PIN_80_UTXD0__FUNC_GPIO80 (MTK_PIN_NO(80) | 0)
+#define MT2701_PIN_80_UTXD0__FUNC_UTXD0 (MTK_PIN_NO(80) | 1)
+#define MT2701_PIN_80_UTXD0__FUNC_URXD0 (MTK_PIN_NO(80) | 2)
+
+#define MT2701_PIN_81_URXD1__FUNC_GPIO81 (MTK_PIN_NO(81) | 0)
+#define MT2701_PIN_81_URXD1__FUNC_URXD1 (MTK_PIN_NO(81) | 1)
+#define MT2701_PIN_81_URXD1__FUNC_UTXD1 (MTK_PIN_NO(81) | 2)
+
+#define MT2701_PIN_82_UTXD1__FUNC_GPIO82 (MTK_PIN_NO(82) | 0)
+#define MT2701_PIN_82_UTXD1__FUNC_UTXD1 (MTK_PIN_NO(82) | 1)
+#define MT2701_PIN_82_UTXD1__FUNC_URXD1 (MTK_PIN_NO(82) | 2)
+
+#define MT2701_PIN_83_LCM_RST__FUNC_GPIO83 (MTK_PIN_NO(83) | 0)
+#define MT2701_PIN_83_LCM_RST__FUNC_LCM_RST (MTK_PIN_NO(83) | 1)
+#define MT2701_PIN_83_LCM_RST__FUNC_VDAC_CK_XI (MTK_PIN_NO(83) | 2)
+#define MT2701_PIN_83_LCM_RST__FUNC_DBG_MON_B_1 (MTK_PIN_NO(83) | 7)
+
+#define MT2701_PIN_84_DSI_TE__FUNC_GPIO84 (MTK_PIN_NO(84) | 0)
+#define MT2701_PIN_84_DSI_TE__FUNC_DSI_TE (MTK_PIN_NO(84) | 1)
+#define MT2701_PIN_84_DSI_TE__FUNC_DBG_MON_B_0 (MTK_PIN_NO(84) | 7)
+
+#define MT2701_PIN_91_TDN3__FUNC_GPI91 (MTK_PIN_NO(91) | 0)
+#define MT2701_PIN_91_TDN3__FUNC_TDN3 (MTK_PIN_NO(91) | 1)
+
+#define MT2701_PIN_92_TDP3__FUNC_GPI92 (MTK_PIN_NO(92) | 0)
+#define MT2701_PIN_92_TDP3__FUNC_TDP3 (MTK_PIN_NO(92) | 1)
+
+#define MT2701_PIN_93_TDN2__FUNC_GPI93 (MTK_PIN_NO(93) | 0)
+#define MT2701_PIN_93_TDN2__FUNC_TDN2 (MTK_PIN_NO(93) | 1)
+
+#define MT2701_PIN_94_TDP2__FUNC_GPI94 (MTK_PIN_NO(94) | 0)
+#define MT2701_PIN_94_TDP2__FUNC_TDP2 (MTK_PIN_NO(94) | 1)
+
+#define MT2701_PIN_95_TCN__FUNC_GPI95 (MTK_PIN_NO(95) | 0)
+#define MT2701_PIN_95_TCN__FUNC_TCN (MTK_PIN_NO(95) | 1)
+
+#define MT2701_PIN_96_TCP__FUNC_GPI96 (MTK_PIN_NO(96) | 0)
+#define MT2701_PIN_96_TCP__FUNC_TCP (MTK_PIN_NO(96) | 1)
+
+#define MT2701_PIN_97_TDN1__FUNC_GPI97 (MTK_PIN_NO(97) | 0)
+#define MT2701_PIN_97_TDN1__FUNC_TDN1 (MTK_PIN_NO(97) | 1)
+
+#define MT2701_PIN_98_TDP1__FUNC_GPI98 (MTK_PIN_NO(98) | 0)
+#define MT2701_PIN_98_TDP1__FUNC_TDP1 (MTK_PIN_NO(98) | 1)
+
+#define MT2701_PIN_99_TDN0__FUNC_GPI99 (MTK_PIN_NO(99) | 0)
+#define MT2701_PIN_99_TDN0__FUNC_TDN0 (MTK_PIN_NO(99) | 1)
+
+#define MT2701_PIN_100_TDP0__FUNC_GPI100 (MTK_PIN_NO(100) | 0)
+#define MT2701_PIN_100_TDP0__FUNC_TDP0 (MTK_PIN_NO(100) | 1)
+
+#define MT2701_PIN_101_SPI2_CSN__FUNC_GPIO101 (MTK_PIN_NO(101) | 0)
+#define MT2701_PIN_101_SPI2_CSN__FUNC_SPI2_CS (MTK_PIN_NO(101) | 1)
+#define MT2701_PIN_101_SPI2_CSN__FUNC_SCL3 (MTK_PIN_NO(101) | 3)
+#define MT2701_PIN_101_SPI2_CSN__FUNC_KROW0 (MTK_PIN_NO(101) | 4)
+
+#define MT2701_PIN_102_SPI2_MI__FUNC_GPIO102 (MTK_PIN_NO(102) | 0)
+#define MT2701_PIN_102_SPI2_MI__FUNC_SPI2_MI (MTK_PIN_NO(102) | 1)
+#define MT2701_PIN_102_SPI2_MI__FUNC_SPI2_MO (MTK_PIN_NO(102) | 2)
+#define MT2701_PIN_102_SPI2_MI__FUNC_SDA3 (MTK_PIN_NO(102) | 3)
+#define MT2701_PIN_102_SPI2_MI__FUNC_KROW1 (MTK_PIN_NO(102) | 4)
+
+#define MT2701_PIN_103_SPI2_MO__FUNC_GPIO103 (MTK_PIN_NO(103) | 0)
+#define MT2701_PIN_103_SPI2_MO__FUNC_SPI2_MO (MTK_PIN_NO(103) | 1)
+#define MT2701_PIN_103_SPI2_MO__FUNC_SPI2_MI (MTK_PIN_NO(103) | 2)
+#define MT2701_PIN_103_SPI2_MO__FUNC_SCL3 (MTK_PIN_NO(103) | 3)
+#define MT2701_PIN_103_SPI2_MO__FUNC_KROW2 (MTK_PIN_NO(103) | 4)
+
+#define MT2701_PIN_104_SPI2_CLK__FUNC_GPIO104 (MTK_PIN_NO(104) | 0)
+#define MT2701_PIN_104_SPI2_CLK__FUNC_SPI2_CK (MTK_PIN_NO(104) | 1)
+#define MT2701_PIN_104_SPI2_CLK__FUNC_SDA3 (MTK_PIN_NO(104) | 3)
+#define MT2701_PIN_104_SPI2_CLK__FUNC_KROW3 (MTK_PIN_NO(104) | 4)
+
+#define MT2701_PIN_105_MSDC1_CMD__FUNC_GPIO105 (MTK_PIN_NO(105) | 0)
+#define MT2701_PIN_105_MSDC1_CMD__FUNC_MSDC1_CMD (MTK_PIN_NO(105) | 1)
+#define MT2701_PIN_105_MSDC1_CMD__FUNC_ANT_SEL0 (MTK_PIN_NO(105) | 2)
+#define MT2701_PIN_105_MSDC1_CMD__FUNC_SDA1 (MTK_PIN_NO(105) | 3)
+#define MT2701_PIN_105_MSDC1_CMD__FUNC_I2SOUT_BCK (MTK_PIN_NO(105) | 6)
+#define MT2701_PIN_105_MSDC1_CMD__FUNC_DBG_MON_B_27 (MTK_PIN_NO(105) | 7)
+
+#define MT2701_PIN_106_MSDC1_CLK__FUNC_GPIO106 (MTK_PIN_NO(106) | 0)
+#define MT2701_PIN_106_MSDC1_CLK__FUNC_MSDC1_CLK (MTK_PIN_NO(106) | 1)
+#define MT2701_PIN_106_MSDC1_CLK__FUNC_ANT_SEL1 (MTK_PIN_NO(106) | 2)
+#define MT2701_PIN_106_MSDC1_CLK__FUNC_SCL1 (MTK_PIN_NO(106) | 3)
+#define MT2701_PIN_106_MSDC1_CLK__FUNC_I2SOUT_LRCK (MTK_PIN_NO(106) | 6)
+#define MT2701_PIN_106_MSDC1_CLK__FUNC_DBG_MON_B_28 (MTK_PIN_NO(106) | 7)
+
+#define MT2701_PIN_107_MSDC1_DAT0__FUNC_GPIO107 (MTK_PIN_NO(107) | 0)
+#define MT2701_PIN_107_MSDC1_DAT0__FUNC_MSDC1_DAT0 (MTK_PIN_NO(107) | 1)
+#define MT2701_PIN_107_MSDC1_DAT0__FUNC_ANT_SEL2 (MTK_PIN_NO(107) | 2)
+#define MT2701_PIN_107_MSDC1_DAT0__FUNC_UTXD0 (MTK_PIN_NO(107) | 5)
+#define MT2701_PIN_107_MSDC1_DAT0__FUNC_I2SOUT_DATA_OUT (MTK_PIN_NO(107) | 6)
+#define MT2701_PIN_107_MSDC1_DAT0__FUNC_DBG_MON_B_26 (MTK_PIN_NO(107) | 7)
+
+#define MT2701_PIN_108_MSDC1_DAT1__FUNC_GPIO108 (MTK_PIN_NO(108) | 0)
+#define MT2701_PIN_108_MSDC1_DAT1__FUNC_MSDC1_DAT1 (MTK_PIN_NO(108) | 1)
+#define MT2701_PIN_108_MSDC1_DAT1__FUNC_ANT_SEL3 (MTK_PIN_NO(108) | 2)
+#define MT2701_PIN_108_MSDC1_DAT1__FUNC_PWM0 (MTK_PIN_NO(108) | 3)
+#define MT2701_PIN_108_MSDC1_DAT1__FUNC_URXD0 (MTK_PIN_NO(108) | 5)
+#define MT2701_PIN_108_MSDC1_DAT1__FUNC_PWM1 (MTK_PIN_NO(108) | 6)
+#define MT2701_PIN_108_MSDC1_DAT1__FUNC_DBG_MON_B_25 (MTK_PIN_NO(108) | 7)
+
+#define MT2701_PIN_109_MSDC1_DAT2__FUNC_GPIO109 (MTK_PIN_NO(109) | 0)
+#define MT2701_PIN_109_MSDC1_DAT2__FUNC_MSDC1_DAT2 (MTK_PIN_NO(109) | 1)
+#define MT2701_PIN_109_MSDC1_DAT2__FUNC_ANT_SEL4 (MTK_PIN_NO(109) | 2)
+#define MT2701_PIN_109_MSDC1_DAT2__FUNC_SDA2 (MTK_PIN_NO(109) | 3)
+#define MT2701_PIN_109_MSDC1_DAT2__FUNC_UTXD1 (MTK_PIN_NO(109) | 5)
+#define MT2701_PIN_109_MSDC1_DAT2__FUNC_PWM2 (MTK_PIN_NO(109) | 6)
+#define MT2701_PIN_109_MSDC1_DAT2__FUNC_DBG_MON_B_24 (MTK_PIN_NO(109) | 7)
+
+#define MT2701_PIN_110_MSDC1_DAT3__FUNC_GPIO110 (MTK_PIN_NO(110) | 0)
+#define MT2701_PIN_110_MSDC1_DAT3__FUNC_MSDC1_DAT3 (MTK_PIN_NO(110) | 1)
+#define MT2701_PIN_110_MSDC1_DAT3__FUNC_ANT_SEL5 (MTK_PIN_NO(110) | 2)
+#define MT2701_PIN_110_MSDC1_DAT3__FUNC_SCL2 (MTK_PIN_NO(110) | 3)
+#define MT2701_PIN_110_MSDC1_DAT3__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(110) | 4)
+#define MT2701_PIN_110_MSDC1_DAT3__FUNC_URXD1 (MTK_PIN_NO(110) | 5)
+#define MT2701_PIN_110_MSDC1_DAT3__FUNC_PWM3 (MTK_PIN_NO(110) | 6)
+#define MT2701_PIN_110_MSDC1_DAT3__FUNC_DBG_MON_B_23 (MTK_PIN_NO(110) | 7)
+
+#define MT2701_PIN_111_MSDC0_DAT7__FUNC_GPIO111 (MTK_PIN_NO(111) | 0)
+#define MT2701_PIN_111_MSDC0_DAT7__FUNC_MSDC0_DAT7 (MTK_PIN_NO(111) | 1)
+#define MT2701_PIN_111_MSDC0_DAT7__FUNC_NLD7 (MTK_PIN_NO(111) | 4)
+
+#define MT2701_PIN_112_MSDC0_DAT6__FUNC_GPIO112 (MTK_PIN_NO(112) | 0)
+#define MT2701_PIN_112_MSDC0_DAT6__FUNC_MSDC0_DAT6 (MTK_PIN_NO(112) | 1)
+#define MT2701_PIN_112_MSDC0_DAT6__FUNC_NLD6 (MTK_PIN_NO(112) | 4)
+
+#define MT2701_PIN_113_MSDC0_DAT5__FUNC_GPIO113 (MTK_PIN_NO(113) | 0)
+#define MT2701_PIN_113_MSDC0_DAT5__FUNC_MSDC0_DAT5 (MTK_PIN_NO(113) | 1)
+#define MT2701_PIN_113_MSDC0_DAT5__FUNC_NLD5 (MTK_PIN_NO(113) | 4)
+
+#define MT2701_PIN_114_MSDC0_DAT4__FUNC_GPIO114 (MTK_PIN_NO(114) | 0)
+#define MT2701_PIN_114_MSDC0_DAT4__FUNC_MSDC0_DAT4 (MTK_PIN_NO(114) | 1)
+#define MT2701_PIN_114_MSDC0_DAT4__FUNC_NLD4 (MTK_PIN_NO(114) | 4)
+
+#define MT2701_PIN_115_MSDC0_RSTB__FUNC_GPIO115 (MTK_PIN_NO(115) | 0)
+#define MT2701_PIN_115_MSDC0_RSTB__FUNC_MSDC0_RSTB (MTK_PIN_NO(115) | 1)
+#define MT2701_PIN_115_MSDC0_RSTB__FUNC_NLD8 (MTK_PIN_NO(115) | 4)
+
+#define MT2701_PIN_116_MSDC0_CMD__FUNC_GPIO116 (MTK_PIN_NO(116) | 0)
+#define MT2701_PIN_116_MSDC0_CMD__FUNC_MSDC0_CMD (MTK_PIN_NO(116) | 1)
+#define MT2701_PIN_116_MSDC0_CMD__FUNC_NALE (MTK_PIN_NO(116) | 4)
+
+#define MT2701_PIN_117_MSDC0_CLK__FUNC_GPIO117 (MTK_PIN_NO(117) | 0)
+#define MT2701_PIN_117_MSDC0_CLK__FUNC_MSDC0_CLK (MTK_PIN_NO(117) | 1)
+#define MT2701_PIN_117_MSDC0_CLK__FUNC_NWEB (MTK_PIN_NO(117) | 4)
+
+#define MT2701_PIN_118_MSDC0_DAT3__FUNC_GPIO118 (MTK_PIN_NO(118) | 0)
+#define MT2701_PIN_118_MSDC0_DAT3__FUNC_MSDC0_DAT3 (MTK_PIN_NO(118) | 1)
+#define MT2701_PIN_118_MSDC0_DAT3__FUNC_NLD3 (MTK_PIN_NO(118) | 4)
+
+#define MT2701_PIN_119_MSDC0_DAT2__FUNC_GPIO119 (MTK_PIN_NO(119) | 0)
+#define MT2701_PIN_119_MSDC0_DAT2__FUNC_MSDC0_DAT2 (MTK_PIN_NO(119) | 1)
+#define MT2701_PIN_119_MSDC0_DAT2__FUNC_NLD2 (MTK_PIN_NO(119) | 4)
+
+#define MT2701_PIN_120_MSDC0_DAT1__FUNC_GPIO120 (MTK_PIN_NO(120) | 0)
+#define MT2701_PIN_120_MSDC0_DAT1__FUNC_MSDC0_DAT1 (MTK_PIN_NO(120) | 1)
+#define MT2701_PIN_120_MSDC0_DAT1__FUNC_NLD1 (MTK_PIN_NO(120) | 4)
+
+#define MT2701_PIN_121_MSDC0_DAT0__FUNC_GPIO121 (MTK_PIN_NO(121) | 0)
+#define MT2701_PIN_121_MSDC0_DAT0__FUNC_MSDC0_DAT0 (MTK_PIN_NO(121) | 1)
+#define MT2701_PIN_121_MSDC0_DAT0__FUNC_NLD0 (MTK_PIN_NO(121) | 4)
+#define MT2701_PIN_121_MSDC0_DAT0__FUNC_WATCHDOG (MTK_PIN_NO(121) | 5)
+
+#define MT2701_PIN_122_CEC__FUNC_GPIO122 (MTK_PIN_NO(122) | 0)
+#define MT2701_PIN_122_CEC__FUNC_CEC (MTK_PIN_NO(122) | 1)
+#define MT2701_PIN_122_CEC__FUNC_SDA2 (MTK_PIN_NO(122) | 4)
+#define MT2701_PIN_122_CEC__FUNC_URXD0 (MTK_PIN_NO(122) | 5)
+
+#define MT2701_PIN_123_HTPLG__FUNC_GPIO123 (MTK_PIN_NO(123) | 0)
+#define MT2701_PIN_123_HTPLG__FUNC_HTPLG (MTK_PIN_NO(123) | 1)
+#define MT2701_PIN_123_HTPLG__FUNC_SCL2 (MTK_PIN_NO(123) | 4)
+#define MT2701_PIN_123_HTPLG__FUNC_UTXD0 (MTK_PIN_NO(123) | 5)
+
+#define MT2701_PIN_124_HDMISCK__FUNC_GPIO124 (MTK_PIN_NO(124) | 0)
+#define MT2701_PIN_124_HDMISCK__FUNC_HDMISCK (MTK_PIN_NO(124) | 1)
+#define MT2701_PIN_124_HDMISCK__FUNC_SDA1 (MTK_PIN_NO(124) | 4)
+#define MT2701_PIN_124_HDMISCK__FUNC_PWM3 (MTK_PIN_NO(124) | 5)
+
+#define MT2701_PIN_125_HDMISD__FUNC_GPIO125 (MTK_PIN_NO(125) | 0)
+#define MT2701_PIN_125_HDMISD__FUNC_HDMISD (MTK_PIN_NO(125) | 1)
+#define MT2701_PIN_125_HDMISD__FUNC_SCL1 (MTK_PIN_NO(125) | 4)
+#define MT2701_PIN_125_HDMISD__FUNC_PWM4 (MTK_PIN_NO(125) | 5)
+
+#define MT2701_PIN_126_I2S0_MCLK__FUNC_GPIO126 (MTK_PIN_NO(126) | 0)
+#define MT2701_PIN_126_I2S0_MCLK__FUNC_I2S0_MCLK (MTK_PIN_NO(126) | 1)
+#define MT2701_PIN_126_I2S0_MCLK__FUNC_WCN_I2S_MCLK (MTK_PIN_NO(126) | 6)
+#define MT2701_PIN_126_I2S0_MCLK__FUNC_DBG_MON_B_6 (MTK_PIN_NO(126) | 7)
+
+#define MT2701_PIN_199_SPI1_CLK__FUNC_GPIO199 (MTK_PIN_NO(199) | 0)
+#define MT2701_PIN_199_SPI1_CLK__FUNC_SPI1_CK (MTK_PIN_NO(199) | 1)
+#define MT2701_PIN_199_SPI1_CLK__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(199) | 3)
+#define MT2701_PIN_199_SPI1_CLK__FUNC_KCOL3 (MTK_PIN_NO(199) | 4)
+#define MT2701_PIN_199_SPI1_CLK__FUNC_DBG_MON_B_15 (MTK_PIN_NO(199) | 7)
+
+#define MT2701_PIN_200_SPDIF_OUT__FUNC_GPIO200 (MTK_PIN_NO(200) | 0)
+#define MT2701_PIN_200_SPDIF_OUT__FUNC_SPDIF_OUT (MTK_PIN_NO(200) | 1)
+#define MT2701_PIN_200_SPDIF_OUT__FUNC_G1_TXD3 (MTK_PIN_NO(200) | 5)
+#define MT2701_PIN_200_SPDIF_OUT__FUNC_URXD2 (MTK_PIN_NO(200) | 6)
+#define MT2701_PIN_200_SPDIF_OUT__FUNC_DBG_MON_B_16 (MTK_PIN_NO(200) | 7)
+
+#define MT2701_PIN_201_SPDIF_IN0__FUNC_GPIO201 (MTK_PIN_NO(201) | 0)
+#define MT2701_PIN_201_SPDIF_IN0__FUNC_SPDIF_IN0 (MTK_PIN_NO(201) | 1)
+#define MT2701_PIN_201_SPDIF_IN0__FUNC_G1_TXEN (MTK_PIN_NO(201) | 5)
+#define MT2701_PIN_201_SPDIF_IN0__FUNC_UTXD2 (MTK_PIN_NO(201) | 6)
+#define MT2701_PIN_201_SPDIF_IN0__FUNC_DBG_MON_B_17 (MTK_PIN_NO(201) | 7)
+
+#define MT2701_PIN_202_SPDIF_IN1__FUNC_GPIO202 (MTK_PIN_NO(202) | 0)
+#define MT2701_PIN_202_SPDIF_IN1__FUNC_SPDIF_IN1 (MTK_PIN_NO(202) | 1)
+
+#define MT2701_PIN_203_PWM0__FUNC_GPIO203 (MTK_PIN_NO(203) | 0)
+#define MT2701_PIN_203_PWM0__FUNC_PWM0 (MTK_PIN_NO(203) | 1)
+#define MT2701_PIN_203_PWM0__FUNC_DISP_PWM (MTK_PIN_NO(203) | 2)
+#define MT2701_PIN_203_PWM0__FUNC_G1_TXD2 (MTK_PIN_NO(203) | 5)
+#define MT2701_PIN_203_PWM0__FUNC_DBG_MON_B_18 (MTK_PIN_NO(203) | 7)
+#define MT2701_PIN_203_PWM0__FUNC_I2S2_DATA (MTK_PIN_NO(203) | 9)
+
+#define MT2701_PIN_204_PWM1__FUNC_GPIO204 (MTK_PIN_NO(204) | 0)
+#define MT2701_PIN_204_PWM1__FUNC_PWM1 (MTK_PIN_NO(204) | 1)
+#define MT2701_PIN_204_PWM1__FUNC_CLKM3 (MTK_PIN_NO(204) | 2)
+#define MT2701_PIN_204_PWM1__FUNC_G1_TXD1 (MTK_PIN_NO(204) | 5)
+#define MT2701_PIN_204_PWM1__FUNC_DBG_MON_B_19 (MTK_PIN_NO(204) | 7)
+#define MT2701_PIN_204_PWM1__FUNC_I2S3_DATA (MTK_PIN_NO(204) | 9)
+
+#define MT2701_PIN_205_PWM2__FUNC_GPIO205 (MTK_PIN_NO(205) | 0)
+#define MT2701_PIN_205_PWM2__FUNC_PWM2 (MTK_PIN_NO(205) | 1)
+#define MT2701_PIN_205_PWM2__FUNC_CLKM2 (MTK_PIN_NO(205) | 2)
+#define MT2701_PIN_205_PWM2__FUNC_G1_TXD0 (MTK_PIN_NO(205) | 5)
+#define MT2701_PIN_205_PWM2__FUNC_DBG_MON_B_20 (MTK_PIN_NO(205) | 7)
+
+#define MT2701_PIN_206_PWM3__FUNC_GPIO206 (MTK_PIN_NO(206) | 0)
+#define MT2701_PIN_206_PWM3__FUNC_PWM3 (MTK_PIN_NO(206) | 1)
+#define MT2701_PIN_206_PWM3__FUNC_CLKM1 (MTK_PIN_NO(206) | 2)
+#define MT2701_PIN_206_PWM3__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(206) | 3)
+#define MT2701_PIN_206_PWM3__FUNC_G1_TXC (MTK_PIN_NO(206) | 5)
+#define MT2701_PIN_206_PWM3__FUNC_DBG_MON_B_21 (MTK_PIN_NO(206) | 7)
+
+#define MT2701_PIN_207_PWM4__FUNC_GPIO207 (MTK_PIN_NO(207) | 0)
+#define MT2701_PIN_207_PWM4__FUNC_PWM4 (MTK_PIN_NO(207) | 1)
+#define MT2701_PIN_207_PWM4__FUNC_CLKM0 (MTK_PIN_NO(207) | 2)
+#define MT2701_PIN_207_PWM4__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(207) | 3)
+#define MT2701_PIN_207_PWM4__FUNC_G1_RXC (MTK_PIN_NO(207) | 5)
+#define MT2701_PIN_207_PWM4__FUNC_DBG_MON_B_22 (MTK_PIN_NO(207) | 7)
+
+#define MT2701_PIN_208_AUD_EXT_CK1__FUNC_GPIO208 (MTK_PIN_NO(208) | 0)
+#define MT2701_PIN_208_AUD_EXT_CK1__FUNC_AUD_EXT_CK1 (MTK_PIN_NO(208) | 1)
+#define MT2701_PIN_208_AUD_EXT_CK1__FUNC_PWM0 (MTK_PIN_NO(208) | 2)
+#define MT2701_PIN_208_AUD_EXT_CK1__FUNC_ANT_SEL5 (MTK_PIN_NO(208) | 4)
+#define MT2701_PIN_208_AUD_EXT_CK1__FUNC_DISP_PWM (MTK_PIN_NO(208) | 5)
+#define MT2701_PIN_208_AUD_EXT_CK1__FUNC_DBG_MON_A_31 (MTK_PIN_NO(208) | 7)
+#define MT2701_PIN_208_AUD_EXT_CK1__FUNC_PCIE0_PERST_N (MTK_PIN_NO(208) | 11)
+
+#define MT2701_PIN_209_AUD_EXT_CK2__FUNC_GPIO209 (MTK_PIN_NO(209) | 0)
+#define MT2701_PIN_209_AUD_EXT_CK2__FUNC_AUD_EXT_CK2 (MTK_PIN_NO(209) | 1)
+#define MT2701_PIN_209_AUD_EXT_CK2__FUNC_MSDC1_WP (MTK_PIN_NO(209) | 2)
+#define MT2701_PIN_209_AUD_EXT_CK2__FUNC_PWM1 (MTK_PIN_NO(209) | 5)
+#define MT2701_PIN_209_AUD_EXT_CK2__FUNC_DBG_MON_A_32 (MTK_PIN_NO(209) | 7)
+#define MT2701_PIN_209_AUD_EXT_CK2__FUNC_PCIE1_PERST_N (MTK_PIN_NO(209) | 11)
+
+#define MT2701_PIN_236_EXT_SDIO3__FUNC_GPIO236 (MTK_PIN_NO(236) | 0)
+#define MT2701_PIN_236_EXT_SDIO3__FUNC_EXT_SDIO3 (MTK_PIN_NO(236) | 1)
+#define MT2701_PIN_236_EXT_SDIO3__FUNC_IDDIG (MTK_PIN_NO(236) | 2)
+#define MT2701_PIN_236_EXT_SDIO3__FUNC_DBG_MON_A_1 (MTK_PIN_NO(236) | 7)
+
+#define MT2701_PIN_237_EXT_SDIO2__FUNC_GPIO237 (MTK_PIN_NO(237) | 0)
+#define MT2701_PIN_237_EXT_SDIO2__FUNC_EXT_SDIO2 (MTK_PIN_NO(237) | 1)
+#define MT2701_PIN_237_EXT_SDIO2__FUNC_DRV_VBUS (MTK_PIN_NO(237) | 2)
+
+#define MT2701_PIN_238_EXT_SDIO1__FUNC_GPIO238 (MTK_PIN_NO(238) | 0)
+#define MT2701_PIN_238_EXT_SDIO1__FUNC_EXT_SDIO1 (MTK_PIN_NO(238) | 1)
+#define MT2701_PIN_238_EXT_SDIO1__FUNC_IDDIG_P1 (MTK_PIN_NO(238) | 2)
+
+#define MT2701_PIN_239_EXT_SDIO0__FUNC_GPIO239 (MTK_PIN_NO(239) | 0)
+#define MT2701_PIN_239_EXT_SDIO0__FUNC_EXT_SDIO0 (MTK_PIN_NO(239) | 1)
+#define MT2701_PIN_239_EXT_SDIO0__FUNC_DRV_VBUS_P1 (MTK_PIN_NO(239) | 2)
+
+#define MT2701_PIN_240_EXT_XCS__FUNC_GPIO240 (MTK_PIN_NO(240) | 0)
+#define MT2701_PIN_240_EXT_XCS__FUNC_EXT_XCS (MTK_PIN_NO(240) | 1)
+
+#define MT2701_PIN_241_EXT_SCK__FUNC_GPIO241 (MTK_PIN_NO(241) | 0)
+#define MT2701_PIN_241_EXT_SCK__FUNC_EXT_SCK (MTK_PIN_NO(241) | 1)
+
+#define MT2701_PIN_242_URTS2__FUNC_GPIO242 (MTK_PIN_NO(242) | 0)
+#define MT2701_PIN_242_URTS2__FUNC_URTS2 (MTK_PIN_NO(242) | 1)
+#define MT2701_PIN_242_URTS2__FUNC_UTXD3 (MTK_PIN_NO(242) | 2)
+#define MT2701_PIN_242_URTS2__FUNC_URXD3 (MTK_PIN_NO(242) | 3)
+#define MT2701_PIN_242_URTS2__FUNC_SCL1 (MTK_PIN_NO(242) | 4)
+#define MT2701_PIN_242_URTS2__FUNC_DBG_MON_B_32 (MTK_PIN_NO(242) | 7)
+
+#define MT2701_PIN_243_UCTS2__FUNC_GPIO243 (MTK_PIN_NO(243) | 0)
+#define MT2701_PIN_243_UCTS2__FUNC_UCTS2 (MTK_PIN_NO(243) | 1)
+#define MT2701_PIN_243_UCTS2__FUNC_URXD3 (MTK_PIN_NO(243) | 2)
+#define MT2701_PIN_243_UCTS2__FUNC_UTXD3 (MTK_PIN_NO(243) | 3)
+#define MT2701_PIN_243_UCTS2__FUNC_SDA1 (MTK_PIN_NO(243) | 4)
+#define MT2701_PIN_243_UCTS2__FUNC_DBG_MON_A_6 (MTK_PIN_NO(243) | 7)
+
+#define MT2701_PIN_244_HDMI_SDA_RX__FUNC_GPIO244 (MTK_PIN_NO(244) | 0)
+#define MT2701_PIN_244_HDMI_SDA_RX__FUNC_HDMI_SDA_RX (MTK_PIN_NO(244) | 1)
+
+#define MT2701_PIN_245_HDMI_SCL_RX__FUNC_GPIO245 (MTK_PIN_NO(245) | 0)
+#define MT2701_PIN_245_HDMI_SCL_RX__FUNC_HDMI_SCL_RX (MTK_PIN_NO(245) | 1)
+
+#define MT2701_PIN_246_MHL_SENCE__FUNC_GPIO246 (MTK_PIN_NO(246) | 0)
+
+#define MT2701_PIN_247_HDMI_HPD_CBUS_RX__FUNC_GPIO247 (MTK_PIN_NO(247) | 0)
+#define MT2701_PIN_247_HDMI_HPD_CBUS_RX__FUNC_HDMI_HPD_RX (MTK_PIN_NO(247) | 1)
+
+#define MT2701_PIN_248_HDMI_TESTOUTP_RX__FUNC_GPIO248 (MTK_PIN_NO(248) | 0)
+#define MT2701_PIN_248_HDMI_TESTOUTP_RX__FUNC_HDMI_TESTOUTP_RX (MTK_PIN_NO(248) | 1)
+
+#define MT2701_PIN_249_MSDC0E_RSTB__FUNC_MSDC0E_RSTB (MTK_PIN_NO(249) | 9)
+
+#define MT2701_PIN_250_MSDC0E_DAT7__FUNC_MSDC3_DAT7 (MTK_PIN_NO(250) | 9)
+#define MT2701_PIN_250_MSDC0E_DAT7__FUNC_PCIE0_CLKREQ_N (MTK_PIN_NO(250) | 14)
+
+#define MT2701_PIN_251_MSDC0E_DAT6__FUNC_MSDC3_DAT6 (MTK_PIN_NO(251) | 9)
+#define MT2701_PIN_251_MSDC0E_DAT6__FUNC_PCIE0_WAKE_N (MTK_PIN_NO(251) | 14)
+
+#define MT2701_PIN_252_MSDC0E_DAT5__FUNC_MSDC3_DAT5 (MTK_PIN_NO(252) | 9)
+#define MT2701_PIN_252_MSDC0E_DAT5__FUNC_PCIE1_CLKREQ_N (MTK_PIN_NO(252) | 14)
+
+#define MT2701_PIN_253_MSDC0E_DAT4__FUNC_MSDC3_DAT4 (MTK_PIN_NO(253) | 9)
+#define MT2701_PIN_253_MSDC0E_DAT4__FUNC_PCIE1_WAKE_N (MTK_PIN_NO(253) | 14)
+
+#define MT2701_PIN_254_MSDC0E_DAT3__FUNC_MSDC3_DAT3 (MTK_PIN_NO(254) | 9)
+#define MT2701_PIN_254_MSDC0E_DAT3__FUNC_PCIE2_CLKREQ_N (MTK_PIN_NO(254) | 14)
+
+#define MT2701_PIN_255_MSDC0E_DAT2__FUNC_MSDC3_DAT2 (MTK_PIN_NO(255) | 9)
+#define MT2701_PIN_255_MSDC0E_DAT2__FUNC_PCIE2_WAKE_N (MTK_PIN_NO(255) | 14)
+
+#define MT2701_PIN_256_MSDC0E_DAT1__FUNC_MSDC3_DAT1 (MTK_PIN_NO(256) | 9)
+
+#define MT2701_PIN_257_MSDC0E_DAT0__FUNC_MSDC3_DAT0 (MTK_PIN_NO(257) | 9)
+
+#define MT2701_PIN_258_MSDC0E_CMD__FUNC_MSDC3_CMD (MTK_PIN_NO(258) | 9)
+
+#define MT2701_PIN_259_MSDC0E_CLK__FUNC_MSDC3_CLK (MTK_PIN_NO(259) | 9)
+
+#define MT2701_PIN_260_MSDC0E_DSL__FUNC_MSDC3_DSL (MTK_PIN_NO(260) | 9)
+
+#define MT2701_PIN_261_MSDC1_INS__FUNC_GPIO261 (MTK_PIN_NO(261) | 0)
+#define MT2701_PIN_261_MSDC1_INS__FUNC_MSDC1_INS (MTK_PIN_NO(261) | 1)
+#define MT2701_PIN_261_MSDC1_INS__FUNC_DBG_MON_B_29 (MTK_PIN_NO(261) | 7)
+
+#define MT2701_PIN_262_G2_TXEN__FUNC_GPIO262 (MTK_PIN_NO(262) | 0)
+#define MT2701_PIN_262_G2_TXEN__FUNC_G2_TXEN (MTK_PIN_NO(262) | 1)
+
+#define MT2701_PIN_263_G2_TXD3__FUNC_GPIO263 (MTK_PIN_NO(263) | 0)
+#define MT2701_PIN_263_G2_TXD3__FUNC_G2_TXD3 (MTK_PIN_NO(263) | 1)
+#define MT2701_PIN_263_G2_TXD3__FUNC_ANT_SEL5 (MTK_PIN_NO(263) | 6)
+
+#define MT2701_PIN_264_G2_TXD2__FUNC_GPIO264 (MTK_PIN_NO(264) | 0)
+#define MT2701_PIN_264_G2_TXD2__FUNC_G2_TXD2 (MTK_PIN_NO(264) | 1)
+#define MT2701_PIN_264_G2_TXD2__FUNC_ANT_SEL4 (MTK_PIN_NO(264) | 6)
+
+#define MT2701_PIN_265_G2_TXD1__FUNC_GPIO265 (MTK_PIN_NO(265) | 0)
+#define MT2701_PIN_265_G2_TXD1__FUNC_G2_TXD1 (MTK_PIN_NO(265) | 1)
+#define MT2701_PIN_265_G2_TXD1__FUNC_ANT_SEL3 (MTK_PIN_NO(265) | 6)
+
+#define MT2701_PIN_266_G2_TXD0__FUNC_GPIO266 (MTK_PIN_NO(266) | 0)
+#define MT2701_PIN_266_G2_TXD0__FUNC_G2_TXD0 (MTK_PIN_NO(266) | 1)
+#define MT2701_PIN_266_G2_TXD0__FUNC_ANT_SEL2 (MTK_PIN_NO(266) | 6)
+
+#define MT2701_PIN_267_G2_TXC__FUNC_GPIO267 (MTK_PIN_NO(267) | 0)
+#define MT2701_PIN_267_G2_TXC__FUNC_G2_TXC (MTK_PIN_NO(267) | 1)
+
+#define MT2701_PIN_268_G2_RXC__FUNC_GPIO268 (MTK_PIN_NO(268) | 0)
+#define MT2701_PIN_268_G2_RXC__FUNC_G2_RXC (MTK_PIN_NO(268) | 1)
+
+#define MT2701_PIN_269_G2_RXD0__FUNC_GPIO269 (MTK_PIN_NO(269) | 0)
+#define MT2701_PIN_269_G2_RXD0__FUNC_G2_RXD0 (MTK_PIN_NO(269) | 1)
+
+#define MT2701_PIN_270_G2_RXD1__FUNC_GPIO270 (MTK_PIN_NO(270) | 0)
+#define MT2701_PIN_270_G2_RXD1__FUNC_G2_RXD1 (MTK_PIN_NO(270) | 1)
+
+#define MT2701_PIN_271_G2_RXD2__FUNC_GPIO271 (MTK_PIN_NO(271) | 0)
+#define MT2701_PIN_271_G2_RXD2__FUNC_G2_RXD2 (MTK_PIN_NO(271) | 1)
+
+#define MT2701_PIN_272_G2_RXD3__FUNC_GPIO272 (MTK_PIN_NO(272) | 0)
+#define MT2701_PIN_272_G2_RXD3__FUNC_G2_RXD3 (MTK_PIN_NO(272) | 1)
+
+#define MT2701_PIN_274_G2_RXDV__FUNC_GPIO274 (MTK_PIN_NO(274) | 0)
+#define MT2701_PIN_274_G2_RXDV__FUNC_G2_RXDV (MTK_PIN_NO(274) | 1)
+
+#define MT2701_PIN_275_MDC__FUNC_GPIO275 (MTK_PIN_NO(275) | 0)
+#define MT2701_PIN_275_MDC__FUNC_MDC (MTK_PIN_NO(275) | 1)
+#define MT2701_PIN_275_MDC__FUNC_ANT_SEL0 (MTK_PIN_NO(275) | 6)
+
+#define MT2701_PIN_276_MDIO__FUNC_GPIO276 (MTK_PIN_NO(276) | 0)
+#define MT2701_PIN_276_MDIO__FUNC_MDIO (MTK_PIN_NO(276) | 1)
+#define MT2701_PIN_276_MDIO__FUNC_ANT_SEL1 (MTK_PIN_NO(276) | 6)
+
+#define MT2701_PIN_278_JTAG_RESET__FUNC_GPIO278 (MTK_PIN_NO(278) | 0)
+#define MT2701_PIN_278_JTAG_RESET__FUNC_JTAG_RESET (MTK_PIN_NO(278) | 1)
+
+#endif /* __DTS_MT2701_PINFUNC_H */
diff --git a/arch/arm/boot/dts/socfpga.dtsi b/arch/arm/boot/dts/socfpga.dtsi
index 3ed4abd..15cbc74 100644
--- a/arch/arm/boot/dts/socfpga.dtsi
+++ b/arch/arm/boot/dts/socfpga.dtsi
@@ -656,6 +656,26 @@
 			status = "disabled";
 		};
 
+		eccmgr: eccmgr@ffd08140 {
+			compatible = "altr,socfpga-ecc-manager";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges;
+
+			l2-ecc@ffd08140 {
+				compatible = "altr,socfpga-l2-ecc";
+				reg = <0xffd08140 0x4>;
+				interrupts = <0 36 1>, <0 37 1>;
+			};
+
+			ocram-ecc@ffd08144 {
+				compatible = "altr,socfpga-ocram-ecc";
+				reg = <0xffd08144 0x4>;
+				iram = <&ocram>;
+				interrupts = <0 178 1>, <0 179 1>;
+			};
+		};
+
 		L2: l2-cache@fffef000 {
 			compatible = "arm,pl310-cache";
 			reg = <0xfffef000 0x1000>;
diff --git a/arch/arm/boot/dts/vfxxx.dtsi b/arch/arm/boot/dts/vfxxx.dtsi
index a9ceb5b..4539f8d 100644
--- a/arch/arm/boot/dts/vfxxx.dtsi
+++ b/arch/arm/boot/dts/vfxxx.dtsi
@@ -629,5 +629,10 @@
 				status = "disabled";
 			};
 		};
+
+		iio-hwmon {
+			compatible = "iio-hwmon";
+			io-channels = <&adc0 16>, <&adc1 16>;
+		};
 	};
 };
diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h
index 194c91b..15d58b4 100644
--- a/arch/arm/include/asm/kvm_asm.h
+++ b/arch/arm/include/asm/kvm_asm.h
@@ -19,38 +19,7 @@
 #ifndef __ARM_KVM_ASM_H__
 #define __ARM_KVM_ASM_H__
 
-/* 0 is reserved as an invalid value. */
-#define c0_MPIDR	1	/* MultiProcessor ID Register */
-#define c0_CSSELR	2	/* Cache Size Selection Register */
-#define c1_SCTLR	3	/* System Control Register */
-#define c1_ACTLR	4	/* Auxiliary Control Register */
-#define c1_CPACR	5	/* Coprocessor Access Control */
-#define c2_TTBR0	6	/* Translation Table Base Register 0 */
-#define c2_TTBR0_high	7	/* TTBR0 top 32 bits */
-#define c2_TTBR1	8	/* Translation Table Base Register 1 */
-#define c2_TTBR1_high	9	/* TTBR1 top 32 bits */
-#define c2_TTBCR	10	/* Translation Table Base Control R. */
-#define c3_DACR		11	/* Domain Access Control Register */
-#define c5_DFSR		12	/* Data Fault Status Register */
-#define c5_IFSR		13	/* Instruction Fault Status Register */
-#define c5_ADFSR	14	/* Auxilary Data Fault Status R */
-#define c5_AIFSR	15	/* Auxilary Instrunction Fault Status R */
-#define c6_DFAR		16	/* Data Fault Address Register */
-#define c6_IFAR		17	/* Instruction Fault Address Register */
-#define c7_PAR		18	/* Physical Address Register */
-#define c7_PAR_high	19	/* PAR top 32 bits */
-#define c9_L2CTLR	20	/* Cortex A15/A7 L2 Control Register */
-#define c10_PRRR	21	/* Primary Region Remap Register */
-#define c10_NMRR	22	/* Normal Memory Remap Register */
-#define c12_VBAR	23	/* Vector Base Address Register */
-#define c13_CID		24	/* Context ID Register */
-#define c13_TID_URW	25	/* Thread ID, User R/W */
-#define c13_TID_URO	26	/* Thread ID, User R/O */
-#define c13_TID_PRIV	27	/* Thread ID, Privileged */
-#define c14_CNTKCTL	28	/* Timer Control Register (PL1) */
-#define c10_AMAIR0	29	/* Auxilary Memory Attribute Indirection Reg0 */
-#define c10_AMAIR1	30	/* Auxilary Memory Attribute Indirection Reg1 */
-#define NR_CP15_REGS	31	/* Number of regs (incl. invalid) */
+#include <asm/virt.h>
 
 #define ARM_EXCEPTION_RESET	  0
 #define ARM_EXCEPTION_UNDEFINED   1
@@ -86,19 +55,15 @@
 extern char __kvm_hyp_init[];
 extern char __kvm_hyp_init_end[];
 
-extern char __kvm_hyp_exit[];
-extern char __kvm_hyp_exit_end[];
-
 extern char __kvm_hyp_vector[];
 
-extern char __kvm_hyp_code_start[];
-extern char __kvm_hyp_code_end[];
-
 extern void __kvm_flush_vm_context(void);
 extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
 extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
 
 extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
+
+extern void __init_stage2_translation(void);
 #endif
 
 #endif /* __ARM_KVM_ASM_H__ */
diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h
index 3095df0..ee5328f 100644
--- a/arch/arm/include/asm/kvm_emulate.h
+++ b/arch/arm/include/asm/kvm_emulate.h
@@ -68,12 +68,12 @@
 
 static inline unsigned long *vcpu_pc(struct kvm_vcpu *vcpu)
 {
-	return &vcpu->arch.regs.usr_regs.ARM_pc;
+	return &vcpu->arch.ctxt.gp_regs.usr_regs.ARM_pc;
 }
 
 static inline unsigned long *vcpu_cpsr(struct kvm_vcpu *vcpu)
 {
-	return &vcpu->arch.regs.usr_regs.ARM_cpsr;
+	return &vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr;
 }
 
 static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
@@ -83,13 +83,13 @@
 
 static inline bool mode_has_spsr(struct kvm_vcpu *vcpu)
 {
-	unsigned long cpsr_mode = vcpu->arch.regs.usr_regs.ARM_cpsr & MODE_MASK;
+	unsigned long cpsr_mode = vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr & MODE_MASK;
 	return (cpsr_mode > USR_MODE && cpsr_mode < SYSTEM_MODE);
 }
 
 static inline bool vcpu_mode_priv(struct kvm_vcpu *vcpu)
 {
-	unsigned long cpsr_mode = vcpu->arch.regs.usr_regs.ARM_cpsr & MODE_MASK;
+	unsigned long cpsr_mode = vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr & MODE_MASK;
 	return cpsr_mode > USR_MODE;;
 }
 
@@ -108,11 +108,6 @@
 	return ((phys_addr_t)vcpu->arch.fault.hpfar & HPFAR_MASK) << 8;
 }
 
-static inline unsigned long kvm_vcpu_get_hyp_pc(struct kvm_vcpu *vcpu)
-{
-	return vcpu->arch.fault.hyp_pc;
-}
-
 static inline bool kvm_vcpu_dabt_isvalid(struct kvm_vcpu *vcpu)
 {
 	return kvm_vcpu_get_hsr(vcpu) & HSR_ISV;
@@ -143,6 +138,11 @@
 	return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_S1PTW;
 }
 
+static inline bool kvm_vcpu_dabt_is_cm(struct kvm_vcpu *vcpu)
+{
+	return !!(kvm_vcpu_get_hsr(vcpu) & HSR_DABT_CM);
+}
+
 /* Get Access Size from a data abort */
 static inline int kvm_vcpu_dabt_get_as(struct kvm_vcpu *vcpu)
 {
@@ -192,7 +192,7 @@
 
 static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
 {
-	return vcpu->arch.cp15[c0_MPIDR] & MPIDR_HWID_BITMASK;
+	return vcpu_cp15(vcpu, c0_MPIDR) & MPIDR_HWID_BITMASK;
 }
 
 static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index f9f2779..3850701 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -85,20 +85,61 @@
 	u32 hsr;		/* Hyp Syndrome Register */
 	u32 hxfar;		/* Hyp Data/Inst. Fault Address Register */
 	u32 hpfar;		/* Hyp IPA Fault Address Register */
-	u32 hyp_pc;		/* PC when exception was taken from Hyp mode */
 };
 
-typedef struct vfp_hard_struct kvm_cpu_context_t;
+/*
+ * 0 is reserved as an invalid value.
+ * Order should be kept in sync with the save/restore code.
+ */
+enum vcpu_sysreg {
+	__INVALID_SYSREG__,
+	c0_MPIDR,		/* MultiProcessor ID Register */
+	c0_CSSELR,		/* Cache Size Selection Register */
+	c1_SCTLR,		/* System Control Register */
+	c1_ACTLR,		/* Auxiliary Control Register */
+	c1_CPACR,		/* Coprocessor Access Control */
+	c2_TTBR0,		/* Translation Table Base Register 0 */
+	c2_TTBR0_high,		/* TTBR0 top 32 bits */
+	c2_TTBR1,		/* Translation Table Base Register 1 */
+	c2_TTBR1_high,		/* TTBR1 top 32 bits */
+	c2_TTBCR,		/* Translation Table Base Control R. */
+	c3_DACR,		/* Domain Access Control Register */
+	c5_DFSR,		/* Data Fault Status Register */
+	c5_IFSR,		/* Instruction Fault Status Register */
+	c5_ADFSR,		/* Auxilary Data Fault Status R */
+	c5_AIFSR,		/* Auxilary Instrunction Fault Status R */
+	c6_DFAR,		/* Data Fault Address Register */
+	c6_IFAR,		/* Instruction Fault Address Register */
+	c7_PAR,			/* Physical Address Register */
+	c7_PAR_high,		/* PAR top 32 bits */
+	c9_L2CTLR,		/* Cortex A15/A7 L2 Control Register */
+	c10_PRRR,		/* Primary Region Remap Register */
+	c10_NMRR,		/* Normal Memory Remap Register */
+	c12_VBAR,		/* Vector Base Address Register */
+	c13_CID,		/* Context ID Register */
+	c13_TID_URW,		/* Thread ID, User R/W */
+	c13_TID_URO,		/* Thread ID, User R/O */
+	c13_TID_PRIV,		/* Thread ID, Privileged */
+	c14_CNTKCTL,		/* Timer Control Register (PL1) */
+	c10_AMAIR0,		/* Auxilary Memory Attribute Indirection Reg0 */
+	c10_AMAIR1,		/* Auxilary Memory Attribute Indirection Reg1 */
+	NR_CP15_REGS		/* Number of regs (incl. invalid) */
+};
+
+struct kvm_cpu_context {
+	struct kvm_regs	gp_regs;
+	struct vfp_hard_struct vfp;
+	u32 cp15[NR_CP15_REGS];
+};
+
+typedef struct kvm_cpu_context kvm_cpu_context_t;
 
 struct kvm_vcpu_arch {
-	struct kvm_regs regs;
+	struct kvm_cpu_context ctxt;
 
 	int target; /* Processor target */
 	DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES);
 
-	/* System control coprocessor (cp15) */
-	u32 cp15[NR_CP15_REGS];
-
 	/* The CPU type we expose to the VM */
 	u32 midr;
 
@@ -111,9 +152,6 @@
 	/* Exception Information */
 	struct kvm_vcpu_fault_info fault;
 
-	/* Floating point registers (VFP and Advanced SIMD/NEON) */
-	struct vfp_hard_struct vfp_guest;
-
 	/* Host FP context */
 	kvm_cpu_context_t *host_cpu_context;
 
@@ -158,12 +196,14 @@
 	u64 exits;
 };
 
+#define vcpu_cp15(v,r)	(v)->arch.ctxt.cp15[r]
+
 int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init);
 unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
 int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
 int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
 int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
-u64 kvm_call_hyp(void *hypfn, ...);
+unsigned long kvm_call_hyp(void *hypfn, ...);
 void force_vm_exit(const cpumask_t *mask);
 
 #define KVM_ARCH_WANT_MMU_NOTIFIER
@@ -220,6 +260,11 @@
 	kvm_call_hyp((void*)hyp_stack_ptr, vector_ptr, pgd_ptr);
 }
 
+static inline void __cpu_init_stage2(void)
+{
+	kvm_call_hyp(__init_stage2_translation);
+}
+
 static inline int kvm_arch_dev_ioctl_check_extension(long ext)
 {
 	return 0;
@@ -242,5 +287,20 @@
 static inline void kvm_arm_setup_debug(struct kvm_vcpu *vcpu) {}
 static inline void kvm_arm_clear_debug(struct kvm_vcpu *vcpu) {}
 static inline void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu) {}
+static inline int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
+					     struct kvm_device_attr *attr)
+{
+	return -ENXIO;
+}
+static inline int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
+					     struct kvm_device_attr *attr)
+{
+	return -ENXIO;
+}
+static inline int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
+					     struct kvm_device_attr *attr)
+{
+	return -ENXIO;
+}
 
 #endif /* __ARM_KVM_HOST_H__ */
diff --git a/arch/arm/include/asm/kvm_hyp.h b/arch/arm/include/asm/kvm_hyp.h
new file mode 100644
index 0000000..f0e8607
--- /dev/null
+++ b/arch/arm/include/asm/kvm_hyp.h
@@ -0,0 +1,139 @@
+/*
+ * Copyright (C) 2015 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ARM_KVM_HYP_H__
+#define __ARM_KVM_HYP_H__
+
+#include <linux/compiler.h>
+#include <linux/kvm_host.h>
+#include <asm/kvm_mmu.h>
+#include <asm/vfp.h>
+
+#define __hyp_text __section(.hyp.text) notrace
+
+#define kern_hyp_va(v) (v)
+#define hyp_kern_va(v) (v)
+
+#define __ACCESS_CP15(CRn, Op1, CRm, Op2)	\
+	"mrc", "mcr", __stringify(p15, Op1, %0, CRn, CRm, Op2), u32
+#define __ACCESS_CP15_64(Op1, CRm)		\
+	"mrrc", "mcrr", __stringify(p15, Op1, %Q0, %R0, CRm), u64
+#define __ACCESS_VFP(CRn)			\
+	"mrc", "mcr", __stringify(p10, 7, %0, CRn, cr0, 0), u32
+
+#define __write_sysreg(v, r, w, c, t)	asm volatile(w " " c : : "r" ((t)(v)))
+#define write_sysreg(v, ...)		__write_sysreg(v, __VA_ARGS__)
+
+#define __read_sysreg(r, w, c, t) ({				\
+	t __val;						\
+	asm volatile(r " " c : "=r" (__val));			\
+	__val;							\
+})
+#define read_sysreg(...)		__read_sysreg(__VA_ARGS__)
+
+#define write_special(v, r)					\
+	asm volatile("msr " __stringify(r) ", %0" : : "r" (v))
+#define read_special(r) ({					\
+	u32 __val;						\
+	asm volatile("mrs %0, " __stringify(r) : "=r" (__val));	\
+	__val;							\
+})
+
+#define TTBR0		__ACCESS_CP15_64(0, c2)
+#define TTBR1		__ACCESS_CP15_64(1, c2)
+#define VTTBR		__ACCESS_CP15_64(6, c2)
+#define PAR		__ACCESS_CP15_64(0, c7)
+#define CNTV_CVAL	__ACCESS_CP15_64(3, c14)
+#define CNTVOFF		__ACCESS_CP15_64(4, c14)
+
+#define MIDR		__ACCESS_CP15(c0, 0, c0, 0)
+#define CSSELR		__ACCESS_CP15(c0, 2, c0, 0)
+#define VPIDR		__ACCESS_CP15(c0, 4, c0, 0)
+#define VMPIDR		__ACCESS_CP15(c0, 4, c0, 5)
+#define SCTLR		__ACCESS_CP15(c1, 0, c0, 0)
+#define CPACR		__ACCESS_CP15(c1, 0, c0, 2)
+#define HCR		__ACCESS_CP15(c1, 4, c1, 0)
+#define HDCR		__ACCESS_CP15(c1, 4, c1, 1)
+#define HCPTR		__ACCESS_CP15(c1, 4, c1, 2)
+#define HSTR		__ACCESS_CP15(c1, 4, c1, 3)
+#define TTBCR		__ACCESS_CP15(c2, 0, c0, 2)
+#define HTCR		__ACCESS_CP15(c2, 4, c0, 2)
+#define VTCR		__ACCESS_CP15(c2, 4, c1, 2)
+#define DACR		__ACCESS_CP15(c3, 0, c0, 0)
+#define DFSR		__ACCESS_CP15(c5, 0, c0, 0)
+#define IFSR		__ACCESS_CP15(c5, 0, c0, 1)
+#define ADFSR		__ACCESS_CP15(c5, 0, c1, 0)
+#define AIFSR		__ACCESS_CP15(c5, 0, c1, 1)
+#define HSR		__ACCESS_CP15(c5, 4, c2, 0)
+#define DFAR		__ACCESS_CP15(c6, 0, c0, 0)
+#define IFAR		__ACCESS_CP15(c6, 0, c0, 2)
+#define HDFAR		__ACCESS_CP15(c6, 4, c0, 0)
+#define HIFAR		__ACCESS_CP15(c6, 4, c0, 2)
+#define HPFAR		__ACCESS_CP15(c6, 4, c0, 4)
+#define ICIALLUIS	__ACCESS_CP15(c7, 0, c1, 0)
+#define ATS1CPR		__ACCESS_CP15(c7, 0, c8, 0)
+#define TLBIALLIS	__ACCESS_CP15(c8, 0, c3, 0)
+#define TLBIALLNSNHIS	__ACCESS_CP15(c8, 4, c3, 4)
+#define PRRR		__ACCESS_CP15(c10, 0, c2, 0)
+#define NMRR		__ACCESS_CP15(c10, 0, c2, 1)
+#define AMAIR0		__ACCESS_CP15(c10, 0, c3, 0)
+#define AMAIR1		__ACCESS_CP15(c10, 0, c3, 1)
+#define VBAR		__ACCESS_CP15(c12, 0, c0, 0)
+#define CID		__ACCESS_CP15(c13, 0, c0, 1)
+#define TID_URW		__ACCESS_CP15(c13, 0, c0, 2)
+#define TID_URO		__ACCESS_CP15(c13, 0, c0, 3)
+#define TID_PRIV	__ACCESS_CP15(c13, 0, c0, 4)
+#define HTPIDR		__ACCESS_CP15(c13, 4, c0, 2)
+#define CNTKCTL		__ACCESS_CP15(c14, 0, c1, 0)
+#define CNTV_CTL	__ACCESS_CP15(c14, 0, c3, 1)
+#define CNTHCTL		__ACCESS_CP15(c14, 4, c1, 0)
+
+#define VFP_FPEXC	__ACCESS_VFP(FPEXC)
+
+/* AArch64 compatibility macros, only for the timer so far */
+#define read_sysreg_el0(r)		read_sysreg(r##_el0)
+#define write_sysreg_el0(v, r)		write_sysreg(v, r##_el0)
+
+#define cntv_ctl_el0			CNTV_CTL
+#define cntv_cval_el0			CNTV_CVAL
+#define cntvoff_el2			CNTVOFF
+#define cnthctl_el2			CNTHCTL
+
+void __timer_save_state(struct kvm_vcpu *vcpu);
+void __timer_restore_state(struct kvm_vcpu *vcpu);
+
+void __vgic_v2_save_state(struct kvm_vcpu *vcpu);
+void __vgic_v2_restore_state(struct kvm_vcpu *vcpu);
+
+void __sysreg_save_state(struct kvm_cpu_context *ctxt);
+void __sysreg_restore_state(struct kvm_cpu_context *ctxt);
+
+void asmlinkage __vfp_save_state(struct vfp_hard_struct *vfp);
+void asmlinkage __vfp_restore_state(struct vfp_hard_struct *vfp);
+static inline bool __vfp_enabled(void)
+{
+	return !(read_sysreg(HCPTR) & (HCPTR_TCP(11) | HCPTR_TCP(10)));
+}
+
+void __hyp_text __banked_save_state(struct kvm_cpu_context *ctxt);
+void __hyp_text __banked_restore_state(struct kvm_cpu_context *ctxt);
+
+int asmlinkage __guest_enter(struct kvm_vcpu *vcpu,
+			     struct kvm_cpu_context *host);
+int asmlinkage __hyp_do_panic(const char *, int, u32);
+
+#endif /* __ARM_KVM_HYP_H__ */
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index a520b79..da44be9 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -179,7 +179,7 @@
 
 static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
 {
-	return (vcpu->arch.cp15[c1_SCTLR] & 0b101) == 0b101;
+	return (vcpu_cp15(vcpu, c1_SCTLR) & 0b101) == 0b101;
 }
 
 static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu,
diff --git a/arch/arm/include/asm/virt.h b/arch/arm/include/asm/virt.h
index 4371f45..d4ceaf5 100644
--- a/arch/arm/include/asm/virt.h
+++ b/arch/arm/include/asm/virt.h
@@ -74,6 +74,15 @@
 {
 	return !!(__boot_cpu_mode & BOOT_CPU_MODE_MISMATCH);
 }
+
+static inline bool is_kernel_in_hyp_mode(void)
+{
+	return false;
+}
+
+/* The section containing the hypervisor text */
+extern char __hyp_text_start[];
+extern char __hyp_text_end[];
 #endif
 
 #endif /* __ASSEMBLY__ */
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index 871b826..27d0581 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -170,41 +170,11 @@
   DEFINE(CACHE_WRITEBACK_GRANULE, __CACHE_WRITEBACK_GRANULE);
   BLANK();
 #ifdef CONFIG_KVM_ARM_HOST
-  DEFINE(VCPU_KVM,		offsetof(struct kvm_vcpu, kvm));
-  DEFINE(VCPU_MIDR,		offsetof(struct kvm_vcpu, arch.midr));
-  DEFINE(VCPU_CP15,		offsetof(struct kvm_vcpu, arch.cp15));
-  DEFINE(VCPU_VFP_GUEST,	offsetof(struct kvm_vcpu, arch.vfp_guest));
-  DEFINE(VCPU_VFP_HOST,		offsetof(struct kvm_vcpu, arch.host_cpu_context));
-  DEFINE(VCPU_REGS,		offsetof(struct kvm_vcpu, arch.regs));
-  DEFINE(VCPU_USR_REGS,		offsetof(struct kvm_vcpu, arch.regs.usr_regs));
-  DEFINE(VCPU_SVC_REGS,		offsetof(struct kvm_vcpu, arch.regs.svc_regs));
-  DEFINE(VCPU_ABT_REGS,		offsetof(struct kvm_vcpu, arch.regs.abt_regs));
-  DEFINE(VCPU_UND_REGS,		offsetof(struct kvm_vcpu, arch.regs.und_regs));
-  DEFINE(VCPU_IRQ_REGS,		offsetof(struct kvm_vcpu, arch.regs.irq_regs));
-  DEFINE(VCPU_FIQ_REGS,		offsetof(struct kvm_vcpu, arch.regs.fiq_regs));
-  DEFINE(VCPU_PC,		offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_pc));
-  DEFINE(VCPU_CPSR,		offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_cpsr));
-  DEFINE(VCPU_HCR,		offsetof(struct kvm_vcpu, arch.hcr));
-  DEFINE(VCPU_IRQ_LINES,	offsetof(struct kvm_vcpu, arch.irq_lines));
-  DEFINE(VCPU_HSR,		offsetof(struct kvm_vcpu, arch.fault.hsr));
-  DEFINE(VCPU_HxFAR,		offsetof(struct kvm_vcpu, arch.fault.hxfar));
-  DEFINE(VCPU_HPFAR,		offsetof(struct kvm_vcpu, arch.fault.hpfar));
-  DEFINE(VCPU_HYP_PC,		offsetof(struct kvm_vcpu, arch.fault.hyp_pc));
-  DEFINE(VCPU_VGIC_CPU,		offsetof(struct kvm_vcpu, arch.vgic_cpu));
-  DEFINE(VGIC_V2_CPU_HCR,	offsetof(struct vgic_cpu, vgic_v2.vgic_hcr));
-  DEFINE(VGIC_V2_CPU_VMCR,	offsetof(struct vgic_cpu, vgic_v2.vgic_vmcr));
-  DEFINE(VGIC_V2_CPU_MISR,	offsetof(struct vgic_cpu, vgic_v2.vgic_misr));
-  DEFINE(VGIC_V2_CPU_EISR,	offsetof(struct vgic_cpu, vgic_v2.vgic_eisr));
-  DEFINE(VGIC_V2_CPU_ELRSR,	offsetof(struct vgic_cpu, vgic_v2.vgic_elrsr));
-  DEFINE(VGIC_V2_CPU_APR,	offsetof(struct vgic_cpu, vgic_v2.vgic_apr));
-  DEFINE(VGIC_V2_CPU_LR,	offsetof(struct vgic_cpu, vgic_v2.vgic_lr));
-  DEFINE(VGIC_CPU_NR_LR,	offsetof(struct vgic_cpu, nr_lr));
-  DEFINE(VCPU_TIMER_CNTV_CTL,	offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_ctl));
-  DEFINE(VCPU_TIMER_CNTV_CVAL,	offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_cval));
-  DEFINE(KVM_TIMER_CNTVOFF,	offsetof(struct kvm, arch.timer.cntvoff));
-  DEFINE(KVM_TIMER_ENABLED,	offsetof(struct kvm, arch.timer.enabled));
-  DEFINE(KVM_VGIC_VCTRL,	offsetof(struct kvm, arch.vgic.vctrl_base));
-  DEFINE(KVM_VTTBR,		offsetof(struct kvm, arch.vttbr));
+  DEFINE(VCPU_GUEST_CTXT,	offsetof(struct kvm_vcpu, arch.ctxt));
+  DEFINE(VCPU_HOST_CTXT,	offsetof(struct kvm_vcpu, arch.host_cpu_context));
+  DEFINE(CPU_CTXT_VFP,		offsetof(struct kvm_cpu_context, vfp));
+  DEFINE(CPU_CTXT_GP_REGS,	offsetof(struct kvm_cpu_context, gp_regs));
+  DEFINE(GP_REGS_USR,		offsetof(struct kvm_regs, usr_regs));
 #endif
   BLANK();
 #ifdef CONFIG_VDSO
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 37312f6..baee702 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -409,7 +409,7 @@
 	/*
 	 * OK, it's off to the idle thread for us
 	 */
-	cpu_startup_entry(CPUHP_ONLINE);
+	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
 }
 
 void __init smp_cpus_done(unsigned int max_cpus)
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index 8b60fde..b4139cb 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -18,6 +18,11 @@
 	*(.proc.info.init)						\
 	VMLINUX_SYMBOL(__proc_info_end) = .;
 
+#define HYPERVISOR_TEXT							\
+	VMLINUX_SYMBOL(__hyp_text_start) = .;				\
+	*(.hyp.text)							\
+	VMLINUX_SYMBOL(__hyp_text_end) = .;
+
 #define IDMAP_TEXT							\
 	ALIGN_FUNCTION();						\
 	VMLINUX_SYMBOL(__idmap_text_start) = .;				\
@@ -108,6 +113,7 @@
 			TEXT_TEXT
 			SCHED_TEXT
 			LOCK_TEXT
+			HYPERVISOR_TEXT
 			KPROBES_TEXT
 			*(.gnu.warning)
 			*(.glue_7)
diff --git a/arch/arm/kvm/Makefile b/arch/arm/kvm/Makefile
index c5eef02c..eb1bf43 100644
--- a/arch/arm/kvm/Makefile
+++ b/arch/arm/kvm/Makefile
@@ -17,6 +17,7 @@
 KVM := ../../../virt/kvm
 kvm-arm-y = $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o $(KVM)/vfio.o
 
+obj-$(CONFIG_KVM_ARM_HOST) += hyp/
 obj-y += kvm-arm.o init.o interrupts.o
 obj-y += arm.o handle_exit.o guest.o mmu.o emulate.o reset.o
 obj-y += coproc.o coproc_a15.o coproc_a7.o mmio.o psci.o perf.o
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 08e49c4..76552b5 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -28,6 +28,7 @@
 #include <linux/sched.h>
 #include <linux/kvm.h>
 #include <trace/events/kvm.h>
+#include <kvm/arm_pmu.h>
 
 #define CREATE_TRACE_POINTS
 #include "trace.h"
@@ -265,6 +266,7 @@
 	kvm_mmu_free_memory_caches(vcpu);
 	kvm_timer_vcpu_terminate(vcpu);
 	kvm_vgic_vcpu_destroy(vcpu);
+	kvm_pmu_vcpu_destroy(vcpu);
 	kmem_cache_free(kvm_vcpu_cache, vcpu);
 }
 
@@ -320,6 +322,7 @@
 	vcpu->cpu = -1;
 
 	kvm_arm_set_running_vcpu(NULL);
+	kvm_timer_vcpu_put(vcpu);
 }
 
 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
@@ -577,6 +580,7 @@
 		 * non-preemptible context.
 		 */
 		preempt_disable();
+		kvm_pmu_flush_hwstate(vcpu);
 		kvm_timer_flush_hwstate(vcpu);
 		kvm_vgic_flush_hwstate(vcpu);
 
@@ -593,6 +597,7 @@
 		if (ret <= 0 || need_new_vmid_gen(vcpu->kvm) ||
 			vcpu->arch.power_off || vcpu->arch.pause) {
 			local_irq_enable();
+			kvm_pmu_sync_hwstate(vcpu);
 			kvm_timer_sync_hwstate(vcpu);
 			kvm_vgic_sync_hwstate(vcpu);
 			preempt_enable();
@@ -642,10 +647,11 @@
 		trace_kvm_exit(ret, kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu));
 
 		/*
-		 * We must sync the timer state before the vgic state so that
-		 * the vgic can properly sample the updated state of the
+		 * We must sync the PMU and timer state before the vgic state so
+		 * that the vgic can properly sample the updated state of the
 		 * interrupt line.
 		 */
+		kvm_pmu_sync_hwstate(vcpu);
 		kvm_timer_sync_hwstate(vcpu);
 
 		kvm_vgic_sync_hwstate(vcpu);
@@ -823,11 +829,54 @@
 	return 0;
 }
 
+static int kvm_arm_vcpu_set_attr(struct kvm_vcpu *vcpu,
+				 struct kvm_device_attr *attr)
+{
+	int ret = -ENXIO;
+
+	switch (attr->group) {
+	default:
+		ret = kvm_arm_vcpu_arch_set_attr(vcpu, attr);
+		break;
+	}
+
+	return ret;
+}
+
+static int kvm_arm_vcpu_get_attr(struct kvm_vcpu *vcpu,
+				 struct kvm_device_attr *attr)
+{
+	int ret = -ENXIO;
+
+	switch (attr->group) {
+	default:
+		ret = kvm_arm_vcpu_arch_get_attr(vcpu, attr);
+		break;
+	}
+
+	return ret;
+}
+
+static int kvm_arm_vcpu_has_attr(struct kvm_vcpu *vcpu,
+				 struct kvm_device_attr *attr)
+{
+	int ret = -ENXIO;
+
+	switch (attr->group) {
+	default:
+		ret = kvm_arm_vcpu_arch_has_attr(vcpu, attr);
+		break;
+	}
+
+	return ret;
+}
+
 long kvm_arch_vcpu_ioctl(struct file *filp,
 			 unsigned int ioctl, unsigned long arg)
 {
 	struct kvm_vcpu *vcpu = filp->private_data;
 	void __user *argp = (void __user *)arg;
+	struct kvm_device_attr attr;
 
 	switch (ioctl) {
 	case KVM_ARM_VCPU_INIT: {
@@ -870,6 +919,21 @@
 			return -E2BIG;
 		return kvm_arm_copy_reg_indices(vcpu, user_list->reg);
 	}
+	case KVM_SET_DEVICE_ATTR: {
+		if (copy_from_user(&attr, argp, sizeof(attr)))
+			return -EFAULT;
+		return kvm_arm_vcpu_set_attr(vcpu, &attr);
+	}
+	case KVM_GET_DEVICE_ATTR: {
+		if (copy_from_user(&attr, argp, sizeof(attr)))
+			return -EFAULT;
+		return kvm_arm_vcpu_get_attr(vcpu, &attr);
+	}
+	case KVM_HAS_DEVICE_ATTR: {
+		if (copy_from_user(&attr, argp, sizeof(attr)))
+			return -EFAULT;
+		return kvm_arm_vcpu_has_attr(vcpu, &attr);
+	}
 	default:
 		return -EINVAL;
 	}
@@ -967,6 +1031,11 @@
 	}
 }
 
+static void cpu_init_stage2(void *dummy)
+{
+	__cpu_init_stage2();
+}
+
 static void cpu_init_hyp_mode(void *dummy)
 {
 	phys_addr_t boot_pgd_ptr;
@@ -985,6 +1054,7 @@
 	vector_ptr = (unsigned long)__kvm_hyp_vector;
 
 	__cpu_init_hyp_mode(boot_pgd_ptr, pgd_ptr, hyp_stack_ptr, vector_ptr);
+	__cpu_init_stage2();
 
 	kvm_arm_init_debug();
 }
@@ -1035,6 +1105,82 @@
 }
 #endif
 
+static void teardown_common_resources(void)
+{
+	free_percpu(kvm_host_cpu_state);
+}
+
+static int init_common_resources(void)
+{
+	kvm_host_cpu_state = alloc_percpu(kvm_cpu_context_t);
+	if (!kvm_host_cpu_state) {
+		kvm_err("Cannot allocate host CPU state\n");
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static int init_subsystems(void)
+{
+	int err;
+
+	/*
+	 * Init HYP view of VGIC
+	 */
+	err = kvm_vgic_hyp_init();
+	switch (err) {
+	case 0:
+		vgic_present = true;
+		break;
+	case -ENODEV:
+	case -ENXIO:
+		vgic_present = false;
+		break;
+	default:
+		return err;
+	}
+
+	/*
+	 * Init HYP architected timer support
+	 */
+	err = kvm_timer_hyp_init();
+	if (err)
+		return err;
+
+	kvm_perf_init();
+	kvm_coproc_table_init();
+
+	return 0;
+}
+
+static void teardown_hyp_mode(void)
+{
+	int cpu;
+
+	if (is_kernel_in_hyp_mode())
+		return;
+
+	free_hyp_pgds();
+	for_each_possible_cpu(cpu)
+		free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
+}
+
+static int init_vhe_mode(void)
+{
+	/*
+	 * Execute the init code on each CPU.
+	 */
+	on_each_cpu(cpu_init_stage2, NULL, 1);
+
+	/* set size of VMID supported by CPU */
+	kvm_vmid_bits = kvm_get_vmid_bits();
+	kvm_info("%d-bit VMID\n", kvm_vmid_bits);
+
+	kvm_info("VHE mode initialized successfully\n");
+	return 0;
+}
+
 /**
  * Inits Hyp-mode on all online CPUs
  */
@@ -1065,7 +1211,7 @@
 		stack_page = __get_free_page(GFP_KERNEL);
 		if (!stack_page) {
 			err = -ENOMEM;
-			goto out_free_stack_pages;
+			goto out_err;
 		}
 
 		per_cpu(kvm_arm_hyp_stack_page, cpu) = stack_page;
@@ -1074,16 +1220,16 @@
 	/*
 	 * Map the Hyp-code called directly from the host
 	 */
-	err = create_hyp_mappings(__kvm_hyp_code_start, __kvm_hyp_code_end);
+	err = create_hyp_mappings(__hyp_text_start, __hyp_text_end);
 	if (err) {
 		kvm_err("Cannot map world-switch code\n");
-		goto out_free_mappings;
+		goto out_err;
 	}
 
 	err = create_hyp_mappings(__start_rodata, __end_rodata);
 	if (err) {
 		kvm_err("Cannot map rodata section\n");
-		goto out_free_mappings;
+		goto out_err;
 	}
 
 	/*
@@ -1095,20 +1241,10 @@
 
 		if (err) {
 			kvm_err("Cannot map hyp stack\n");
-			goto out_free_mappings;
+			goto out_err;
 		}
 	}
 
-	/*
-	 * Map the host CPU structures
-	 */
-	kvm_host_cpu_state = alloc_percpu(kvm_cpu_context_t);
-	if (!kvm_host_cpu_state) {
-		err = -ENOMEM;
-		kvm_err("Cannot allocate host CPU state\n");
-		goto out_free_mappings;
-	}
-
 	for_each_possible_cpu(cpu) {
 		kvm_cpu_context_t *cpu_ctxt;
 
@@ -1117,7 +1253,7 @@
 
 		if (err) {
 			kvm_err("Cannot map host CPU state: %d\n", err);
-			goto out_free_context;
+			goto out_err;
 		}
 	}
 
@@ -1126,34 +1262,22 @@
 	 */
 	on_each_cpu(cpu_init_hyp_mode, NULL, 1);
 
-	/*
-	 * Init HYP view of VGIC
-	 */
-	err = kvm_vgic_hyp_init();
-	switch (err) {
-	case 0:
-		vgic_present = true;
-		break;
-	case -ENODEV:
-	case -ENXIO:
-		vgic_present = false;
-		break;
-	default:
-		goto out_free_context;
-	}
-
-	/*
-	 * Init HYP architected timer support
-	 */
-	err = kvm_timer_hyp_init();
-	if (err)
-		goto out_free_context;
-
 #ifndef CONFIG_HOTPLUG_CPU
 	free_boot_hyp_pgd();
 #endif
 
-	kvm_perf_init();
+	cpu_notifier_register_begin();
+
+	err = __register_cpu_notifier(&hyp_init_cpu_nb);
+
+	cpu_notifier_register_done();
+
+	if (err) {
+		kvm_err("Cannot register HYP init CPU notifier (%d)\n", err);
+		goto out_err;
+	}
+
+	hyp_cpu_pm_init();
 
 	/* set size of VMID supported by CPU */
 	kvm_vmid_bits = kvm_get_vmid_bits();
@@ -1162,14 +1286,9 @@
 	kvm_info("Hyp mode initialized successfully\n");
 
 	return 0;
-out_free_context:
-	free_percpu(kvm_host_cpu_state);
-out_free_mappings:
-	free_hyp_pgds();
-out_free_stack_pages:
-	for_each_possible_cpu(cpu)
-		free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
+
 out_err:
+	teardown_hyp_mode();
 	kvm_err("error initializing Hyp mode: %d\n", err);
 	return err;
 }
@@ -1213,26 +1332,27 @@
 		}
 	}
 
-	cpu_notifier_register_begin();
+	err = init_common_resources();
+	if (err)
+		return err;
 
-	err = init_hyp_mode();
+	if (is_kernel_in_hyp_mode())
+		err = init_vhe_mode();
+	else
+		err = init_hyp_mode();
 	if (err)
 		goto out_err;
 
-	err = __register_cpu_notifier(&hyp_init_cpu_nb);
-	if (err) {
-		kvm_err("Cannot register HYP init CPU notifier (%d)\n", err);
-		goto out_err;
-	}
+	err = init_subsystems();
+	if (err)
+		goto out_hyp;
 
-	cpu_notifier_register_done();
-
-	hyp_cpu_pm_init();
-
-	kvm_coproc_table_init();
 	return 0;
+
+out_hyp:
+	teardown_hyp_mode();
 out_err:
-	cpu_notifier_register_done();
+	teardown_common_resources();
 	return err;
 }
 
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c
index f3d88dc..1bb2b79 100644
--- a/arch/arm/kvm/coproc.c
+++ b/arch/arm/kvm/coproc.c
@@ -16,6 +16,8 @@
  * along with this program; if not, write to the Free Software
  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
  */
+
+#include <linux/bsearch.h>
 #include <linux/mm.h>
 #include <linux/kvm_host.h>
 #include <linux/uaccess.h>
@@ -54,8 +56,8 @@
 				       const struct coproc_reg *r,
 				       u64 val)
 {
-	vcpu->arch.cp15[r->reg] = val & 0xffffffff;
-	vcpu->arch.cp15[r->reg + 1] = val >> 32;
+	vcpu_cp15(vcpu, r->reg) = val & 0xffffffff;
+	vcpu_cp15(vcpu, r->reg + 1) = val >> 32;
 }
 
 static inline u64 vcpu_cp15_reg64_get(struct kvm_vcpu *vcpu,
@@ -63,9 +65,9 @@
 {
 	u64 val;
 
-	val = vcpu->arch.cp15[r->reg + 1];
+	val = vcpu_cp15(vcpu, r->reg + 1);
 	val = val << 32;
-	val = val | vcpu->arch.cp15[r->reg];
+	val = val | vcpu_cp15(vcpu, r->reg);
 	return val;
 }
 
@@ -104,7 +106,7 @@
 	 * vcpu_id, but we read the 'U' bit from the underlying
 	 * hardware directly.
 	 */
-	vcpu->arch.cp15[c0_MPIDR] = ((read_cpuid_mpidr() & MPIDR_SMP_BITMASK) |
+	vcpu_cp15(vcpu, c0_MPIDR) = ((read_cpuid_mpidr() & MPIDR_SMP_BITMASK) |
 				     ((vcpu->vcpu_id >> 2) << MPIDR_LEVEL_BITS) |
 				     (vcpu->vcpu_id & 3));
 }
@@ -117,7 +119,7 @@
 	if (p->is_write)
 		return ignore_write(vcpu, p);
 
-	*vcpu_reg(vcpu, p->Rt1) = vcpu->arch.cp15[c1_ACTLR];
+	*vcpu_reg(vcpu, p->Rt1) = vcpu_cp15(vcpu, c1_ACTLR);
 	return true;
 }
 
@@ -139,7 +141,7 @@
 	if (p->is_write)
 		return ignore_write(vcpu, p);
 
-	*vcpu_reg(vcpu, p->Rt1) = vcpu->arch.cp15[c9_L2CTLR];
+	*vcpu_reg(vcpu, p->Rt1) = vcpu_cp15(vcpu, c9_L2CTLR);
 	return true;
 }
 
@@ -156,7 +158,7 @@
 	ncores = min(ncores, 3U);
 	l2ctlr |= (ncores & 3) << 24;
 
-	vcpu->arch.cp15[c9_L2CTLR] = l2ctlr;
+	vcpu_cp15(vcpu, c9_L2CTLR) = l2ctlr;
 }
 
 static void reset_actlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
@@ -171,7 +173,7 @@
 	else
 		actlr &= ~(1U << 6);
 
-	vcpu->arch.cp15[c1_ACTLR] = actlr;
+	vcpu_cp15(vcpu, c1_ACTLR) = actlr;
 }
 
 /*
@@ -218,9 +220,9 @@
 
 	BUG_ON(!p->is_write);
 
-	vcpu->arch.cp15[r->reg] = *vcpu_reg(vcpu, p->Rt1);
+	vcpu_cp15(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt1);
 	if (p->is_64bit)
-		vcpu->arch.cp15[r->reg + 1] = *vcpu_reg(vcpu, p->Rt2);
+		vcpu_cp15(vcpu, r->reg + 1) = *vcpu_reg(vcpu, p->Rt2);
 
 	kvm_toggle_cache(vcpu, was_enabled);
 	return true;
@@ -381,17 +383,26 @@
 	{ CRn(15), CRm( 0), Op1( 4), Op2( 0), is32, access_cbar},
 };
 
+static int check_reg_table(const struct coproc_reg *table, unsigned int n)
+{
+	unsigned int i;
+
+	for (i = 1; i < n; i++) {
+		if (cmp_reg(&table[i-1], &table[i]) >= 0) {
+			kvm_err("reg table %p out of order (%d)\n", table, i - 1);
+			return 1;
+		}
+	}
+
+	return 0;
+}
+
 /* Target specific emulation tables */
 static struct kvm_coproc_target_table *target_tables[KVM_ARM_NUM_TARGETS];
 
 void kvm_register_target_coproc_table(struct kvm_coproc_target_table *table)
 {
-	unsigned int i;
-
-	for (i = 1; i < table->num; i++)
-		BUG_ON(cmp_reg(&table->table[i-1],
-			       &table->table[i]) >= 0);
-
+	BUG_ON(check_reg_table(table->table, table->num));
 	target_tables[table->target] = table;
 }
 
@@ -405,29 +416,32 @@
 	return table->table;
 }
 
+#define reg_to_match_value(x)						\
+	({								\
+		unsigned long val;					\
+		val  = (x)->CRn << 11;					\
+		val |= (x)->CRm << 7;					\
+		val |= (x)->Op1 << 4;					\
+		val |= (x)->Op2 << 1;					\
+		val |= !(x)->is_64bit;					\
+		val;							\
+	 })
+
+static int match_reg(const void *key, const void *elt)
+{
+	const unsigned long pval = (unsigned long)key;
+	const struct coproc_reg *r = elt;
+
+	return pval - reg_to_match_value(r);
+}
+
 static const struct coproc_reg *find_reg(const struct coproc_params *params,
 					 const struct coproc_reg table[],
 					 unsigned int num)
 {
-	unsigned int i;
+	unsigned long pval = reg_to_match_value(params);
 
-	for (i = 0; i < num; i++) {
-		const struct coproc_reg *r = &table[i];
-
-		if (params->is_64bit != r->is_64)
-			continue;
-		if (params->CRn != r->CRn)
-			continue;
-		if (params->CRm != r->CRm)
-			continue;
-		if (params->Op1 != r->Op1)
-			continue;
-		if (params->Op2 != r->Op2)
-			continue;
-
-		return r;
-	}
-	return NULL;
+	return bsearch((void *)pval, table, num, sizeof(table[0]), match_reg);
 }
 
 static int emulate_cp15(struct kvm_vcpu *vcpu,
@@ -645,6 +659,9 @@
 	{ CRn( 0), CRm( 0), Op1( 0), Op2( 3), is32, NULL, get_TLBTR },
 	{ CRn( 0), CRm( 0), Op1( 0), Op2( 6), is32, NULL, get_REVIDR },
 
+	{ CRn( 0), CRm( 0), Op1( 1), Op2( 1), is32, NULL, get_CLIDR },
+	{ CRn( 0), CRm( 0), Op1( 1), Op2( 7), is32, NULL, get_AIDR },
+
 	{ CRn( 0), CRm( 1), Op1( 0), Op2( 0), is32, NULL, get_ID_PFR0 },
 	{ CRn( 0), CRm( 1), Op1( 0), Op2( 1), is32, NULL, get_ID_PFR1 },
 	{ CRn( 0), CRm( 1), Op1( 0), Op2( 2), is32, NULL, get_ID_DFR0 },
@@ -660,9 +677,6 @@
 	{ CRn( 0), CRm( 2), Op1( 0), Op2( 3), is32, NULL, get_ID_ISAR3 },
 	{ CRn( 0), CRm( 2), Op1( 0), Op2( 4), is32, NULL, get_ID_ISAR4 },
 	{ CRn( 0), CRm( 2), Op1( 0), Op2( 5), is32, NULL, get_ID_ISAR5 },
-
-	{ CRn( 0), CRm( 0), Op1( 1), Op2( 1), is32, NULL, get_CLIDR },
-	{ CRn( 0), CRm( 0), Op1( 1), Op2( 7), is32, NULL, get_AIDR },
 };
 
 /*
@@ -901,7 +915,7 @@
 	if (vfpid < num_fp_regs()) {
 		if (KVM_REG_SIZE(id) != 8)
 			return -ENOENT;
-		return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpregs[vfpid],
+		return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpregs[vfpid],
 				   id);
 	}
 
@@ -911,13 +925,13 @@
 
 	switch (vfpid) {
 	case KVM_REG_ARM_VFP_FPEXC:
-		return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpexc, id);
+		return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpexc, id);
 	case KVM_REG_ARM_VFP_FPSCR:
-		return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpscr, id);
+		return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpscr, id);
 	case KVM_REG_ARM_VFP_FPINST:
-		return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpinst, id);
+		return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpinst, id);
 	case KVM_REG_ARM_VFP_FPINST2:
-		return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpinst2, id);
+		return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpinst2, id);
 	case KVM_REG_ARM_VFP_MVFR0:
 		val = fmrx(MVFR0);
 		return reg_to_user(uaddr, &val, id);
@@ -945,7 +959,7 @@
 	if (vfpid < num_fp_regs()) {
 		if (KVM_REG_SIZE(id) != 8)
 			return -ENOENT;
-		return reg_from_user(&vcpu->arch.vfp_guest.fpregs[vfpid],
+		return reg_from_user(&vcpu->arch.ctxt.vfp.fpregs[vfpid],
 				     uaddr, id);
 	}
 
@@ -955,13 +969,13 @@
 
 	switch (vfpid) {
 	case KVM_REG_ARM_VFP_FPEXC:
-		return reg_from_user(&vcpu->arch.vfp_guest.fpexc, uaddr, id);
+		return reg_from_user(&vcpu->arch.ctxt.vfp.fpexc, uaddr, id);
 	case KVM_REG_ARM_VFP_FPSCR:
-		return reg_from_user(&vcpu->arch.vfp_guest.fpscr, uaddr, id);
+		return reg_from_user(&vcpu->arch.ctxt.vfp.fpscr, uaddr, id);
 	case KVM_REG_ARM_VFP_FPINST:
-		return reg_from_user(&vcpu->arch.vfp_guest.fpinst, uaddr, id);
+		return reg_from_user(&vcpu->arch.ctxt.vfp.fpinst, uaddr, id);
 	case KVM_REG_ARM_VFP_FPINST2:
-		return reg_from_user(&vcpu->arch.vfp_guest.fpinst2, uaddr, id);
+		return reg_from_user(&vcpu->arch.ctxt.vfp.fpinst2, uaddr, id);
 	/* These are invariant. */
 	case KVM_REG_ARM_VFP_MVFR0:
 		if (reg_from_user(&val, uaddr, id))
@@ -1030,7 +1044,7 @@
 		val = vcpu_cp15_reg64_get(vcpu, r);
 		ret = reg_to_user(uaddr, &val, reg->id);
 	} else if (KVM_REG_SIZE(reg->id) == 4) {
-		ret = reg_to_user(uaddr, &vcpu->arch.cp15[r->reg], reg->id);
+		ret = reg_to_user(uaddr, &vcpu_cp15(vcpu, r->reg), reg->id);
 	}
 
 	return ret;
@@ -1060,7 +1074,7 @@
 		if (!ret)
 			vcpu_cp15_reg64_set(vcpu, r, val);
 	} else if (KVM_REG_SIZE(reg->id) == 4) {
-		ret = reg_from_user(&vcpu->arch.cp15[r->reg], uaddr, reg->id);
+		ret = reg_from_user(&vcpu_cp15(vcpu, r->reg), uaddr, reg->id);
 	}
 
 	return ret;
@@ -1096,7 +1110,7 @@
 static u64 cp15_to_index(const struct coproc_reg *reg)
 {
 	u64 val = KVM_REG_ARM | (15 << KVM_REG_ARM_COPROC_SHIFT);
-	if (reg->is_64) {
+	if (reg->is_64bit) {
 		val |= KVM_REG_SIZE_U64;
 		val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT);
 		/*
@@ -1210,8 +1224,8 @@
 	unsigned int i;
 
 	/* Make sure tables are unique and in order. */
-	for (i = 1; i < ARRAY_SIZE(cp15_regs); i++)
-		BUG_ON(cmp_reg(&cp15_regs[i-1], &cp15_regs[i]) >= 0);
+	BUG_ON(check_reg_table(cp15_regs, ARRAY_SIZE(cp15_regs)));
+	BUG_ON(check_reg_table(invariant_cp15, ARRAY_SIZE(invariant_cp15)));
 
 	/* We abuse the reset function to overwrite the table itself. */
 	for (i = 0; i < ARRAY_SIZE(invariant_cp15); i++)
@@ -1248,7 +1262,7 @@
 	const struct coproc_reg *table;
 
 	/* Catch someone adding a register without putting in reset entry. */
-	memset(vcpu->arch.cp15, 0x42, sizeof(vcpu->arch.cp15));
+	memset(vcpu->arch.ctxt.cp15, 0x42, sizeof(vcpu->arch.ctxt.cp15));
 
 	/* Generic chip reset first (so target could override). */
 	reset_coproc_regs(vcpu, cp15_regs, ARRAY_SIZE(cp15_regs));
@@ -1257,6 +1271,6 @@
 	reset_coproc_regs(vcpu, table, num);
 
 	for (num = 1; num < NR_CP15_REGS; num++)
-		if (vcpu->arch.cp15[num] == 0x42424242)
-			panic("Didn't reset vcpu->arch.cp15[%zi]", num);
+		if (vcpu_cp15(vcpu, num) == 0x42424242)
+			panic("Didn't reset vcpu_cp15(vcpu, %zi)", num);
 }
diff --git a/arch/arm/kvm/coproc.h b/arch/arm/kvm/coproc.h
index 88d24a3..eef1759 100644
--- a/arch/arm/kvm/coproc.h
+++ b/arch/arm/kvm/coproc.h
@@ -37,7 +37,7 @@
 	unsigned long Op1;
 	unsigned long Op2;
 
-	bool is_64;
+	bool is_64bit;
 
 	/* Trapped access from guest, if non-NULL. */
 	bool (*access)(struct kvm_vcpu *,
@@ -47,7 +47,7 @@
 	/* Initialization for vcpu. */
 	void (*reset)(struct kvm_vcpu *, const struct coproc_reg *);
 
-	/* Index into vcpu->arch.cp15[], or 0 if we don't need to save it. */
+	/* Index into vcpu_cp15(vcpu, ...), or 0 if we don't need to save it. */
 	unsigned long reg;
 
 	/* Value (usually reset value) */
@@ -104,25 +104,25 @@
 				 const struct coproc_reg *r)
 {
 	BUG_ON(!r->reg);
-	BUG_ON(r->reg >= ARRAY_SIZE(vcpu->arch.cp15));
-	vcpu->arch.cp15[r->reg] = 0xdecafbad;
+	BUG_ON(r->reg >= ARRAY_SIZE(vcpu->arch.ctxt.cp15));
+	vcpu_cp15(vcpu, r->reg) = 0xdecafbad;
 }
 
 static inline void reset_val(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
 {
 	BUG_ON(!r->reg);
-	BUG_ON(r->reg >= ARRAY_SIZE(vcpu->arch.cp15));
-	vcpu->arch.cp15[r->reg] = r->val;
+	BUG_ON(r->reg >= ARRAY_SIZE(vcpu->arch.ctxt.cp15));
+	vcpu_cp15(vcpu, r->reg) = r->val;
 }
 
 static inline void reset_unknown64(struct kvm_vcpu *vcpu,
 				   const struct coproc_reg *r)
 {
 	BUG_ON(!r->reg);
-	BUG_ON(r->reg + 1 >= ARRAY_SIZE(vcpu->arch.cp15));
+	BUG_ON(r->reg + 1 >= ARRAY_SIZE(vcpu->arch.ctxt.cp15));
 
-	vcpu->arch.cp15[r->reg] = 0xdecafbad;
-	vcpu->arch.cp15[r->reg+1] = 0xd0c0ffee;
+	vcpu_cp15(vcpu, r->reg) = 0xdecafbad;
+	vcpu_cp15(vcpu, r->reg+1) = 0xd0c0ffee;
 }
 
 static inline int cmp_reg(const struct coproc_reg *i1,
@@ -141,7 +141,7 @@
 		return i1->Op1 - i2->Op1;
 	if (i1->Op2 != i2->Op2)
 		return i1->Op2 - i2->Op2;
-	return i2->is_64 - i1->is_64;
+	return i2->is_64bit - i1->is_64bit;
 }
 
 
@@ -150,8 +150,8 @@
 #define CRm64(_x)       .CRn = _x, .CRm = 0
 #define Op1(_x) 	.Op1 = _x
 #define Op2(_x) 	.Op2 = _x
-#define is64		.is_64 = true
-#define is32		.is_64 = false
+#define is64		.is_64bit = true
+#define is32		.is_64bit = false
 
 bool access_vm_reg(struct kvm_vcpu *vcpu,
 		   const struct coproc_params *p,
diff --git a/arch/arm/kvm/emulate.c b/arch/arm/kvm/emulate.c
index dc99159..a494def 100644
--- a/arch/arm/kvm/emulate.c
+++ b/arch/arm/kvm/emulate.c
@@ -112,7 +112,7 @@
  */
 unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num)
 {
-	unsigned long *reg_array = (unsigned long *)&vcpu->arch.regs;
+	unsigned long *reg_array = (unsigned long *)&vcpu->arch.ctxt.gp_regs;
 	unsigned long mode = *vcpu_cpsr(vcpu) & MODE_MASK;
 
 	switch (mode) {
@@ -147,15 +147,15 @@
 	unsigned long mode = *vcpu_cpsr(vcpu) & MODE_MASK;
 	switch (mode) {
 	case SVC_MODE:
-		return &vcpu->arch.regs.KVM_ARM_SVC_spsr;
+		return &vcpu->arch.ctxt.gp_regs.KVM_ARM_SVC_spsr;
 	case ABT_MODE:
-		return &vcpu->arch.regs.KVM_ARM_ABT_spsr;
+		return &vcpu->arch.ctxt.gp_regs.KVM_ARM_ABT_spsr;
 	case UND_MODE:
-		return &vcpu->arch.regs.KVM_ARM_UND_spsr;
+		return &vcpu->arch.ctxt.gp_regs.KVM_ARM_UND_spsr;
 	case IRQ_MODE:
-		return &vcpu->arch.regs.KVM_ARM_IRQ_spsr;
+		return &vcpu->arch.ctxt.gp_regs.KVM_ARM_IRQ_spsr;
 	case FIQ_MODE:
-		return &vcpu->arch.regs.KVM_ARM_FIQ_spsr;
+		return &vcpu->arch.ctxt.gp_regs.KVM_ARM_FIQ_spsr;
 	default:
 		BUG();
 	}
@@ -266,8 +266,8 @@
 
 static u32 exc_vector_base(struct kvm_vcpu *vcpu)
 {
-	u32 sctlr = vcpu->arch.cp15[c1_SCTLR];
-	u32 vbar = vcpu->arch.cp15[c12_VBAR];
+	u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR);
+	u32 vbar = vcpu_cp15(vcpu, c12_VBAR);
 
 	if (sctlr & SCTLR_V)
 		return 0xffff0000;
@@ -282,7 +282,7 @@
 static void kvm_update_psr(struct kvm_vcpu *vcpu, unsigned long mode)
 {
 	unsigned long cpsr = *vcpu_cpsr(vcpu);
-	u32 sctlr = vcpu->arch.cp15[c1_SCTLR];
+	u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR);
 
 	*vcpu_cpsr(vcpu) = (cpsr & ~MODE_MASK) | mode;
 
@@ -357,22 +357,22 @@
 
 	if (is_pabt) {
 		/* Set IFAR and IFSR */
-		vcpu->arch.cp15[c6_IFAR] = addr;
-		is_lpae = (vcpu->arch.cp15[c2_TTBCR] >> 31);
+		vcpu_cp15(vcpu, c6_IFAR) = addr;
+		is_lpae = (vcpu_cp15(vcpu, c2_TTBCR) >> 31);
 		/* Always give debug fault for now - should give guest a clue */
 		if (is_lpae)
-			vcpu->arch.cp15[c5_IFSR] = 1 << 9 | 0x22;
+			vcpu_cp15(vcpu, c5_IFSR) = 1 << 9 | 0x22;
 		else
-			vcpu->arch.cp15[c5_IFSR] = 2;
+			vcpu_cp15(vcpu, c5_IFSR) = 2;
 	} else { /* !iabt */
 		/* Set DFAR and DFSR */
-		vcpu->arch.cp15[c6_DFAR] = addr;
-		is_lpae = (vcpu->arch.cp15[c2_TTBCR] >> 31);
+		vcpu_cp15(vcpu, c6_DFAR) = addr;
+		is_lpae = (vcpu_cp15(vcpu, c2_TTBCR) >> 31);
 		/* Always give debug fault for now - should give guest a clue */
 		if (is_lpae)
-			vcpu->arch.cp15[c5_DFSR] = 1 << 9 | 0x22;
+			vcpu_cp15(vcpu, c5_DFSR) = 1 << 9 | 0x22;
 		else
-			vcpu->arch.cp15[c5_DFSR] = 2;
+			vcpu_cp15(vcpu, c5_DFSR) = 2;
 	}
 
 }
diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c
index 99361f1..9093ed0 100644
--- a/arch/arm/kvm/guest.c
+++ b/arch/arm/kvm/guest.c
@@ -25,7 +25,6 @@
 #include <asm/cputype.h>
 #include <asm/uaccess.h>
 #include <asm/kvm.h>
-#include <asm/kvm_asm.h>
 #include <asm/kvm_emulate.h>
 #include <asm/kvm_coproc.h>
 
@@ -55,7 +54,7 @@
 static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
 {
 	u32 __user *uaddr = (u32 __user *)(long)reg->addr;
-	struct kvm_regs *regs = &vcpu->arch.regs;
+	struct kvm_regs *regs = &vcpu->arch.ctxt.gp_regs;
 	u64 off;
 
 	if (KVM_REG_SIZE(reg->id) != 4)
@@ -72,7 +71,7 @@
 static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
 {
 	u32 __user *uaddr = (u32 __user *)(long)reg->addr;
-	struct kvm_regs *regs = &vcpu->arch.regs;
+	struct kvm_regs *regs = &vcpu->arch.ctxt.gp_regs;
 	u64 off, val;
 
 	if (KVM_REG_SIZE(reg->id) != 4)
diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c
index 3ede90d..3f1ef0d 100644
--- a/arch/arm/kvm/handle_exit.c
+++ b/arch/arm/kvm/handle_exit.c
@@ -147,13 +147,6 @@
 	switch (exception_index) {
 	case ARM_EXCEPTION_IRQ:
 		return 1;
-	case ARM_EXCEPTION_UNDEFINED:
-		kvm_err("Undefined exception in Hyp mode at: %#08lx\n",
-			kvm_vcpu_get_hyp_pc(vcpu));
-		BUG();
-		panic("KVM: Hypervisor undefined exception!\n");
-	case ARM_EXCEPTION_DATA_ABORT:
-	case ARM_EXCEPTION_PREF_ABORT:
 	case ARM_EXCEPTION_HVC:
 		/*
 		 * See ARM ARM B1.14.1: "Hyp traps on instructions
diff --git a/arch/arm/kvm/hyp/Makefile b/arch/arm/kvm/hyp/Makefile
new file mode 100644
index 0000000..8dfa5f7
--- /dev/null
+++ b/arch/arm/kvm/hyp/Makefile
@@ -0,0 +1,17 @@
+#
+# Makefile for Kernel-based Virtual Machine module, HYP part
+#
+
+KVM=../../../../virt/kvm
+
+obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v2-sr.o
+obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/timer-sr.o
+
+obj-$(CONFIG_KVM_ARM_HOST) += tlb.o
+obj-$(CONFIG_KVM_ARM_HOST) += cp15-sr.o
+obj-$(CONFIG_KVM_ARM_HOST) += vfp.o
+obj-$(CONFIG_KVM_ARM_HOST) += banked-sr.o
+obj-$(CONFIG_KVM_ARM_HOST) += entry.o
+obj-$(CONFIG_KVM_ARM_HOST) += hyp-entry.o
+obj-$(CONFIG_KVM_ARM_HOST) += switch.o
+obj-$(CONFIG_KVM_ARM_HOST) += s2-setup.o
diff --git a/arch/arm/kvm/hyp/banked-sr.c b/arch/arm/kvm/hyp/banked-sr.c
new file mode 100644
index 0000000..111bda8
--- /dev/null
+++ b/arch/arm/kvm/hyp/banked-sr.c
@@ -0,0 +1,77 @@
+/*
+ * Original code:
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Author: Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * Mostly rewritten in C by Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <asm/kvm_hyp.h>
+
+__asm__(".arch_extension     virt");
+
+void __hyp_text __banked_save_state(struct kvm_cpu_context *ctxt)
+{
+	ctxt->gp_regs.usr_regs.ARM_sp	= read_special(SP_usr);
+	ctxt->gp_regs.usr_regs.ARM_pc	= read_special(ELR_hyp);
+	ctxt->gp_regs.usr_regs.ARM_cpsr	= read_special(SPSR);
+	ctxt->gp_regs.KVM_ARM_SVC_sp	= read_special(SP_svc);
+	ctxt->gp_regs.KVM_ARM_SVC_lr	= read_special(LR_svc);
+	ctxt->gp_regs.KVM_ARM_SVC_spsr	= read_special(SPSR_svc);
+	ctxt->gp_regs.KVM_ARM_ABT_sp	= read_special(SP_abt);
+	ctxt->gp_regs.KVM_ARM_ABT_lr	= read_special(LR_abt);
+	ctxt->gp_regs.KVM_ARM_ABT_spsr	= read_special(SPSR_abt);
+	ctxt->gp_regs.KVM_ARM_UND_sp	= read_special(SP_und);
+	ctxt->gp_regs.KVM_ARM_UND_lr	= read_special(LR_und);
+	ctxt->gp_regs.KVM_ARM_UND_spsr	= read_special(SPSR_und);
+	ctxt->gp_regs.KVM_ARM_IRQ_sp	= read_special(SP_irq);
+	ctxt->gp_regs.KVM_ARM_IRQ_lr	= read_special(LR_irq);
+	ctxt->gp_regs.KVM_ARM_IRQ_spsr	= read_special(SPSR_irq);
+	ctxt->gp_regs.KVM_ARM_FIQ_r8	= read_special(R8_fiq);
+	ctxt->gp_regs.KVM_ARM_FIQ_r9	= read_special(R9_fiq);
+	ctxt->gp_regs.KVM_ARM_FIQ_r10	= read_special(R10_fiq);
+	ctxt->gp_regs.KVM_ARM_FIQ_fp	= read_special(R11_fiq);
+	ctxt->gp_regs.KVM_ARM_FIQ_ip	= read_special(R12_fiq);
+	ctxt->gp_regs.KVM_ARM_FIQ_sp	= read_special(SP_fiq);
+	ctxt->gp_regs.KVM_ARM_FIQ_lr	= read_special(LR_fiq);
+	ctxt->gp_regs.KVM_ARM_FIQ_spsr	= read_special(SPSR_fiq);
+}
+
+void __hyp_text __banked_restore_state(struct kvm_cpu_context *ctxt)
+{
+	write_special(ctxt->gp_regs.usr_regs.ARM_sp,	SP_usr);
+	write_special(ctxt->gp_regs.usr_regs.ARM_pc,	ELR_hyp);
+	write_special(ctxt->gp_regs.usr_regs.ARM_cpsr,	SPSR_cxsf);
+	write_special(ctxt->gp_regs.KVM_ARM_SVC_sp,	SP_svc);
+	write_special(ctxt->gp_regs.KVM_ARM_SVC_lr,	LR_svc);
+	write_special(ctxt->gp_regs.KVM_ARM_SVC_spsr,	SPSR_svc);
+	write_special(ctxt->gp_regs.KVM_ARM_ABT_sp,	SP_abt);
+	write_special(ctxt->gp_regs.KVM_ARM_ABT_lr,	LR_abt);
+	write_special(ctxt->gp_regs.KVM_ARM_ABT_spsr,	SPSR_abt);
+	write_special(ctxt->gp_regs.KVM_ARM_UND_sp,	SP_und);
+	write_special(ctxt->gp_regs.KVM_ARM_UND_lr,	LR_und);
+	write_special(ctxt->gp_regs.KVM_ARM_UND_spsr,	SPSR_und);
+	write_special(ctxt->gp_regs.KVM_ARM_IRQ_sp,	SP_irq);
+	write_special(ctxt->gp_regs.KVM_ARM_IRQ_lr,	LR_irq);
+	write_special(ctxt->gp_regs.KVM_ARM_IRQ_spsr,	SPSR_irq);
+	write_special(ctxt->gp_regs.KVM_ARM_FIQ_r8,	R8_fiq);
+	write_special(ctxt->gp_regs.KVM_ARM_FIQ_r9,	R9_fiq);
+	write_special(ctxt->gp_regs.KVM_ARM_FIQ_r10,	R10_fiq);
+	write_special(ctxt->gp_regs.KVM_ARM_FIQ_fp,	R11_fiq);
+	write_special(ctxt->gp_regs.KVM_ARM_FIQ_ip,	R12_fiq);
+	write_special(ctxt->gp_regs.KVM_ARM_FIQ_sp,	SP_fiq);
+	write_special(ctxt->gp_regs.KVM_ARM_FIQ_lr,	LR_fiq);
+	write_special(ctxt->gp_regs.KVM_ARM_FIQ_spsr,	SPSR_fiq);
+}
diff --git a/arch/arm/kvm/hyp/cp15-sr.c b/arch/arm/kvm/hyp/cp15-sr.c
new file mode 100644
index 0000000..c478281
--- /dev/null
+++ b/arch/arm/kvm/hyp/cp15-sr.c
@@ -0,0 +1,84 @@
+/*
+ * Original code:
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Author: Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * Mostly rewritten in C by Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <asm/kvm_hyp.h>
+
+static u64 *cp15_64(struct kvm_cpu_context *ctxt, int idx)
+{
+	return (u64 *)(ctxt->cp15 + idx);
+}
+
+void __hyp_text __sysreg_save_state(struct kvm_cpu_context *ctxt)
+{
+	ctxt->cp15[c0_MPIDR]		= read_sysreg(VMPIDR);
+	ctxt->cp15[c0_CSSELR]		= read_sysreg(CSSELR);
+	ctxt->cp15[c1_SCTLR]		= read_sysreg(SCTLR);
+	ctxt->cp15[c1_CPACR]		= read_sysreg(CPACR);
+	*cp15_64(ctxt, c2_TTBR0)	= read_sysreg(TTBR0);
+	*cp15_64(ctxt, c2_TTBR1)	= read_sysreg(TTBR1);
+	ctxt->cp15[c2_TTBCR]		= read_sysreg(TTBCR);
+	ctxt->cp15[c3_DACR]		= read_sysreg(DACR);
+	ctxt->cp15[c5_DFSR]		= read_sysreg(DFSR);
+	ctxt->cp15[c5_IFSR]		= read_sysreg(IFSR);
+	ctxt->cp15[c5_ADFSR]		= read_sysreg(ADFSR);
+	ctxt->cp15[c5_AIFSR]		= read_sysreg(AIFSR);
+	ctxt->cp15[c6_DFAR]		= read_sysreg(DFAR);
+	ctxt->cp15[c6_IFAR]		= read_sysreg(IFAR);
+	*cp15_64(ctxt, c7_PAR)		= read_sysreg(PAR);
+	ctxt->cp15[c10_PRRR]		= read_sysreg(PRRR);
+	ctxt->cp15[c10_NMRR]		= read_sysreg(NMRR);
+	ctxt->cp15[c10_AMAIR0]		= read_sysreg(AMAIR0);
+	ctxt->cp15[c10_AMAIR1]		= read_sysreg(AMAIR1);
+	ctxt->cp15[c12_VBAR]		= read_sysreg(VBAR);
+	ctxt->cp15[c13_CID]		= read_sysreg(CID);
+	ctxt->cp15[c13_TID_URW]		= read_sysreg(TID_URW);
+	ctxt->cp15[c13_TID_URO]		= read_sysreg(TID_URO);
+	ctxt->cp15[c13_TID_PRIV]	= read_sysreg(TID_PRIV);
+	ctxt->cp15[c14_CNTKCTL]		= read_sysreg(CNTKCTL);
+}
+
+void __hyp_text __sysreg_restore_state(struct kvm_cpu_context *ctxt)
+{
+	write_sysreg(ctxt->cp15[c0_MPIDR],	VMPIDR);
+	write_sysreg(ctxt->cp15[c0_CSSELR],	CSSELR);
+	write_sysreg(ctxt->cp15[c1_SCTLR],	SCTLR);
+	write_sysreg(ctxt->cp15[c1_CPACR],	CPACR);
+	write_sysreg(*cp15_64(ctxt, c2_TTBR0),	TTBR0);
+	write_sysreg(*cp15_64(ctxt, c2_TTBR1),	TTBR1);
+	write_sysreg(ctxt->cp15[c2_TTBCR],	TTBCR);
+	write_sysreg(ctxt->cp15[c3_DACR],	DACR);
+	write_sysreg(ctxt->cp15[c5_DFSR],	DFSR);
+	write_sysreg(ctxt->cp15[c5_IFSR],	IFSR);
+	write_sysreg(ctxt->cp15[c5_ADFSR],	ADFSR);
+	write_sysreg(ctxt->cp15[c5_AIFSR],	AIFSR);
+	write_sysreg(ctxt->cp15[c6_DFAR],	DFAR);
+	write_sysreg(ctxt->cp15[c6_IFAR],	IFAR);
+	write_sysreg(*cp15_64(ctxt, c7_PAR),	PAR);
+	write_sysreg(ctxt->cp15[c10_PRRR],	PRRR);
+	write_sysreg(ctxt->cp15[c10_NMRR],	NMRR);
+	write_sysreg(ctxt->cp15[c10_AMAIR0],	AMAIR0);
+	write_sysreg(ctxt->cp15[c10_AMAIR1],	AMAIR1);
+	write_sysreg(ctxt->cp15[c12_VBAR],	VBAR);
+	write_sysreg(ctxt->cp15[c13_CID],	CID);
+	write_sysreg(ctxt->cp15[c13_TID_URW],	TID_URW);
+	write_sysreg(ctxt->cp15[c13_TID_URO],	TID_URO);
+	write_sysreg(ctxt->cp15[c13_TID_PRIV],	TID_PRIV);
+	write_sysreg(ctxt->cp15[c14_CNTKCTL],	CNTKCTL);
+}
diff --git a/arch/arm/kvm/hyp/entry.S b/arch/arm/kvm/hyp/entry.S
new file mode 100644
index 0000000..21c2388
--- /dev/null
+++ b/arch/arm/kvm/hyp/entry.S
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 2016 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include <linux/linkage.h>
+#include <asm/asm-offsets.h>
+#include <asm/kvm_arm.h>
+
+	.arch_extension     virt
+
+	.text
+	.pushsection	.hyp.text, "ax"
+
+#define USR_REGS_OFFSET		(CPU_CTXT_GP_REGS + GP_REGS_USR)
+
+/* int __guest_enter(struct kvm_vcpu *vcpu, struct kvm_cpu_context *host) */
+ENTRY(__guest_enter)
+	@ Save host registers
+	add	r1, r1, #(USR_REGS_OFFSET + S_R4)
+	stm	r1!, {r4-r12}
+	str	lr, [r1, #4]	@ Skip SP_usr (already saved)
+
+	@ Restore guest registers
+	add	r0, r0,  #(VCPU_GUEST_CTXT + USR_REGS_OFFSET + S_R0)
+	ldr	lr, [r0, #S_LR]
+	ldm	r0, {r0-r12}
+
+	clrex
+	eret
+ENDPROC(__guest_enter)
+
+ENTRY(__guest_exit)
+	/*
+	 * return convention:
+	 * guest r0, r1, r2 saved on the stack
+	 * r0: vcpu pointer
+	 * r1: exception code
+	 */
+
+	add	r2, r0, #(VCPU_GUEST_CTXT + USR_REGS_OFFSET + S_R3)
+	stm	r2!, {r3-r12}
+	str	lr, [r2, #4]
+	add	r2, r0, #(VCPU_GUEST_CTXT + USR_REGS_OFFSET + S_R0)
+	pop	{r3, r4, r5}		@ r0, r1, r2
+	stm	r2, {r3-r5}
+
+	ldr	r0, [r0, #VCPU_HOST_CTXT]
+	add	r0, r0, #(USR_REGS_OFFSET + S_R4)
+	ldm	r0!, {r4-r12}
+	ldr	lr, [r0, #4]
+
+	mov	r0, r1
+	bx	lr
+ENDPROC(__guest_exit)
+
+/*
+ * If VFPv3 support is not available, then we will not switch the VFP
+ * registers; however cp10 and cp11 accesses will still trap and fallback
+ * to the regular coprocessor emulation code, which currently will
+ * inject an undefined exception to the guest.
+ */
+#ifdef CONFIG_VFPv3
+ENTRY(__vfp_guest_restore)
+	push	{r3, r4, lr}
+
+	@ NEON/VFP used.  Turn on VFP access.
+	mrc	p15, 4, r1, c1, c1, 2		@ HCPTR
+	bic	r1, r1, #(HCPTR_TCP(10) | HCPTR_TCP(11))
+	mcr	p15, 4, r1, c1, c1, 2		@ HCPTR
+	isb
+
+	@ Switch VFP/NEON hardware state to the guest's
+	mov	r4, r0
+	ldr	r0, [r0, #VCPU_HOST_CTXT]
+	add	r0, r0, #CPU_CTXT_VFP
+	bl	__vfp_save_state
+	add	r0, r4, #(VCPU_GUEST_CTXT + CPU_CTXT_VFP)
+	bl	__vfp_restore_state
+
+	pop	{r3, r4, lr}
+	pop	{r0, r1, r2}
+	clrex
+	eret
+ENDPROC(__vfp_guest_restore)
+#endif
+
+	.popsection
+
diff --git a/arch/arm/kvm/hyp/hyp-entry.S b/arch/arm/kvm/hyp/hyp-entry.S
new file mode 100644
index 0000000..7809138
--- /dev/null
+++ b/arch/arm/kvm/hyp/hyp-entry.S
@@ -0,0 +1,169 @@
+/*
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Author: Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include <linux/linkage.h>
+#include <asm/kvm_arm.h>
+#include <asm/kvm_asm.h>
+
+	.arch_extension     virt
+
+	.text
+	.pushsection	.hyp.text, "ax"
+
+.macro load_vcpu	reg
+	mrc	p15, 4, \reg, c13, c0, 2	@ HTPIDR
+.endm
+
+/********************************************************************
+ * Hypervisor exception vector and handlers
+ *
+ *
+ * The KVM/ARM Hypervisor ABI is defined as follows:
+ *
+ * Entry to Hyp mode from the host kernel will happen _only_ when an HVC
+ * instruction is issued since all traps are disabled when running the host
+ * kernel as per the Hyp-mode initialization at boot time.
+ *
+ * HVC instructions cause a trap to the vector page + offset 0x14 (see hyp_hvc
+ * below) when the HVC instruction is called from SVC mode (i.e. a guest or the
+ * host kernel) and they cause a trap to the vector page + offset 0x8 when HVC
+ * instructions are called from within Hyp-mode.
+ *
+ * Hyp-ABI: Calling HYP-mode functions from host (in SVC mode):
+ *    Switching to Hyp mode is done through a simple HVC #0 instruction. The
+ *    exception vector code will check that the HVC comes from VMID==0.
+ *    - r0 contains a pointer to a HYP function
+ *    - r1, r2, and r3 contain arguments to the above function.
+ *    - The HYP function will be called with its arguments in r0, r1 and r2.
+ *    On HYP function return, we return directly to SVC.
+ *
+ * Note that the above is used to execute code in Hyp-mode from a host-kernel
+ * point of view, and is a different concept from performing a world-switch and
+ * executing guest code SVC mode (with a VMID != 0).
+ */
+
+	.align 5
+__kvm_hyp_vector:
+	.global __kvm_hyp_vector
+
+	@ Hyp-mode exception vector
+	W(b)	hyp_reset
+	W(b)	hyp_undef
+	W(b)	hyp_svc
+	W(b)	hyp_pabt
+	W(b)	hyp_dabt
+	W(b)	hyp_hvc
+	W(b)	hyp_irq
+	W(b)	hyp_fiq
+
+.macro invalid_vector label, cause
+	.align
+\label:	mov	r0, #\cause
+	b	__hyp_panic
+.endm
+
+	invalid_vector	hyp_reset	ARM_EXCEPTION_RESET
+	invalid_vector	hyp_undef	ARM_EXCEPTION_UNDEFINED
+	invalid_vector	hyp_svc		ARM_EXCEPTION_SOFTWARE
+	invalid_vector	hyp_pabt	ARM_EXCEPTION_PREF_ABORT
+	invalid_vector	hyp_dabt	ARM_EXCEPTION_DATA_ABORT
+	invalid_vector	hyp_fiq		ARM_EXCEPTION_FIQ
+
+ENTRY(__hyp_do_panic)
+	mrs	lr, cpsr
+	bic	lr, lr, #MODE_MASK
+	orr	lr, lr, #SVC_MODE
+THUMB(	orr	lr, lr, #PSR_T_BIT	)
+	msr	spsr_cxsf, lr
+	ldr	lr, =panic
+	msr	ELR_hyp, lr
+	ldr	lr, =kvm_call_hyp
+	clrex
+	eret
+ENDPROC(__hyp_do_panic)
+
+hyp_hvc:
+	/*
+	 * Getting here is either because of a trap from a guest,
+	 * or from executing HVC from the host kernel, which means
+	 * "do something in Hyp mode".
+	 */
+	push	{r0, r1, r2}
+
+	@ Check syndrome register
+	mrc	p15, 4, r1, c5, c2, 0	@ HSR
+	lsr	r0, r1, #HSR_EC_SHIFT
+	cmp	r0, #HSR_EC_HVC
+	bne	guest_trap		@ Not HVC instr.
+
+	/*
+	 * Let's check if the HVC came from VMID 0 and allow simple
+	 * switch to Hyp mode
+	 */
+	mrrc    p15, 6, r0, r2, c2
+	lsr     r2, r2, #16
+	and     r2, r2, #0xff
+	cmp     r2, #0
+	bne	guest_trap		@ Guest called HVC
+
+	/*
+	 * Getting here means host called HVC, we shift parameters and branch
+	 * to Hyp function.
+	 */
+	pop	{r0, r1, r2}
+
+	/* Check for __hyp_get_vectors */
+	cmp	r0, #-1
+	mrceq	p15, 4, r0, c12, c0, 0	@ get HVBAR
+	beq	1f
+
+	push	{lr}
+
+	mov	lr, r0
+	mov	r0, r1
+	mov	r1, r2
+	mov	r2, r3
+
+THUMB(	orr	lr, #1)
+	blx	lr			@ Call the HYP function
+
+	pop	{lr}
+1:	eret
+
+guest_trap:
+	load_vcpu r0			@ Load VCPU pointer to r0
+
+#ifdef CONFIG_VFPv3
+	@ Check for a VFP access
+	lsr	r1, r1, #HSR_EC_SHIFT
+	cmp	r1, #HSR_EC_CP_0_13
+	beq	__vfp_guest_restore
+#endif
+
+	mov	r1, #ARM_EXCEPTION_HVC
+	b	__guest_exit
+
+hyp_irq:
+	push	{r0, r1, r2}
+	mov	r1, #ARM_EXCEPTION_IRQ
+	load_vcpu r0			@ Load VCPU pointer to r0
+	b	__guest_exit
+
+	.ltorg
+
+	.popsection
diff --git a/arch/arm/kvm/hyp/s2-setup.c b/arch/arm/kvm/hyp/s2-setup.c
new file mode 100644
index 0000000..7be39af
--- /dev/null
+++ b/arch/arm/kvm/hyp/s2-setup.c
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2016 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/types.h>
+#include <asm/kvm_arm.h>
+#include <asm/kvm_asm.h>
+#include <asm/kvm_hyp.h>
+
+void __hyp_text __init_stage2_translation(void)
+{
+	u64 val;
+
+	val = read_sysreg(VTCR) & ~VTCR_MASK;
+
+	val |= read_sysreg(HTCR) & VTCR_HTCR_SH;
+	val |= KVM_VTCR_SL0 | KVM_VTCR_T0SZ | KVM_VTCR_S;
+
+	write_sysreg(val, VTCR);
+}
diff --git a/arch/arm/kvm/hyp/switch.c b/arch/arm/kvm/hyp/switch.c
new file mode 100644
index 0000000..b13caa9
--- /dev/null
+++ b/arch/arm/kvm/hyp/switch.c
@@ -0,0 +1,232 @@
+/*
+ * Copyright (C) 2015 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <asm/kvm_asm.h>
+#include <asm/kvm_hyp.h>
+
+__asm__(".arch_extension     virt");
+
+/*
+ * Activate the traps, saving the host's fpexc register before
+ * overwriting it. We'll restore it on VM exit.
+ */
+static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu, u32 *fpexc_host)
+{
+	u32 val;
+
+	/*
+	 * We are about to set HCPTR.TCP10/11 to trap all floating point
+	 * register accesses to HYP, however, the ARM ARM clearly states that
+	 * traps are only taken to HYP if the operation would not otherwise
+	 * trap to SVC.  Therefore, always make sure that for 32-bit guests,
+	 * we set FPEXC.EN to prevent traps to SVC, when setting the TCP bits.
+	 */
+	val = read_sysreg(VFP_FPEXC);
+	*fpexc_host = val;
+	if (!(val & FPEXC_EN)) {
+		write_sysreg(val | FPEXC_EN, VFP_FPEXC);
+		isb();
+	}
+
+	write_sysreg(vcpu->arch.hcr | vcpu->arch.irq_lines, HCR);
+	/* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */
+	write_sysreg(HSTR_T(15), HSTR);
+	write_sysreg(HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11), HCPTR);
+	val = read_sysreg(HDCR);
+	write_sysreg(val | HDCR_TPM | HDCR_TPMCR, HDCR);
+}
+
+static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
+{
+	u32 val;
+
+	write_sysreg(0, HCR);
+	write_sysreg(0, HSTR);
+	val = read_sysreg(HDCR);
+	write_sysreg(val & ~(HDCR_TPM | HDCR_TPMCR), HDCR);
+	write_sysreg(0, HCPTR);
+}
+
+static void __hyp_text __activate_vm(struct kvm_vcpu *vcpu)
+{
+	struct kvm *kvm = kern_hyp_va(vcpu->kvm);
+	write_sysreg(kvm->arch.vttbr, VTTBR);
+	write_sysreg(vcpu->arch.midr, VPIDR);
+}
+
+static void __hyp_text __deactivate_vm(struct kvm_vcpu *vcpu)
+{
+	write_sysreg(0, VTTBR);
+	write_sysreg(read_sysreg(MIDR), VPIDR);
+}
+
+static void __hyp_text __vgic_save_state(struct kvm_vcpu *vcpu)
+{
+	__vgic_v2_save_state(vcpu);
+}
+
+static void __hyp_text __vgic_restore_state(struct kvm_vcpu *vcpu)
+{
+	__vgic_v2_restore_state(vcpu);
+}
+
+static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
+{
+	u32 hsr = read_sysreg(HSR);
+	u8 ec = hsr >> HSR_EC_SHIFT;
+	u32 hpfar, far;
+
+	vcpu->arch.fault.hsr = hsr;
+
+	if (ec == HSR_EC_IABT)
+		far = read_sysreg(HIFAR);
+	else if (ec == HSR_EC_DABT)
+		far = read_sysreg(HDFAR);
+	else
+		return true;
+
+	/*
+	 * B3.13.5 Reporting exceptions taken to the Non-secure PL2 mode:
+	 *
+	 * Abort on the stage 2 translation for a memory access from a
+	 * Non-secure PL1 or PL0 mode:
+	 *
+	 * For any Access flag fault or Translation fault, and also for any
+	 * Permission fault on the stage 2 translation of a memory access
+	 * made as part of a translation table walk for a stage 1 translation,
+	 * the HPFAR holds the IPA that caused the fault. Otherwise, the HPFAR
+	 * is UNKNOWN.
+	 */
+	if (!(hsr & HSR_DABT_S1PTW) && (hsr & HSR_FSC_TYPE) == FSC_PERM) {
+		u64 par, tmp;
+
+		par = read_sysreg(PAR);
+		write_sysreg(far, ATS1CPR);
+		isb();
+
+		tmp = read_sysreg(PAR);
+		write_sysreg(par, PAR);
+
+		if (unlikely(tmp & 1))
+			return false; /* Translation failed, back to guest */
+
+		hpfar = ((tmp >> 12) & ((1UL << 28) - 1)) << 4;
+	} else {
+		hpfar = read_sysreg(HPFAR);
+	}
+
+	vcpu->arch.fault.hxfar = far;
+	vcpu->arch.fault.hpfar = hpfar;
+	return true;
+}
+
+static int __hyp_text __guest_run(struct kvm_vcpu *vcpu)
+{
+	struct kvm_cpu_context *host_ctxt;
+	struct kvm_cpu_context *guest_ctxt;
+	bool fp_enabled;
+	u64 exit_code;
+	u32 fpexc;
+
+	vcpu = kern_hyp_va(vcpu);
+	write_sysreg(vcpu, HTPIDR);
+
+	host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
+	guest_ctxt = &vcpu->arch.ctxt;
+
+	__sysreg_save_state(host_ctxt);
+	__banked_save_state(host_ctxt);
+
+	__activate_traps(vcpu, &fpexc);
+	__activate_vm(vcpu);
+
+	__vgic_restore_state(vcpu);
+	__timer_restore_state(vcpu);
+
+	__sysreg_restore_state(guest_ctxt);
+	__banked_restore_state(guest_ctxt);
+
+	/* Jump in the fire! */
+again:
+	exit_code = __guest_enter(vcpu, host_ctxt);
+	/* And we're baaack! */
+
+	if (exit_code == ARM_EXCEPTION_HVC && !__populate_fault_info(vcpu))
+		goto again;
+
+	fp_enabled = __vfp_enabled();
+
+	__banked_save_state(guest_ctxt);
+	__sysreg_save_state(guest_ctxt);
+	__timer_save_state(vcpu);
+	__vgic_save_state(vcpu);
+
+	__deactivate_traps(vcpu);
+	__deactivate_vm(vcpu);
+
+	__banked_restore_state(host_ctxt);
+	__sysreg_restore_state(host_ctxt);
+
+	if (fp_enabled) {
+		__vfp_save_state(&guest_ctxt->vfp);
+		__vfp_restore_state(&host_ctxt->vfp);
+	}
+
+	write_sysreg(fpexc, VFP_FPEXC);
+
+	return exit_code;
+}
+
+__alias(__guest_run) int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
+
+static const char * const __hyp_panic_string[] = {
+	[ARM_EXCEPTION_RESET]      = "\nHYP panic: RST   PC:%08x CPSR:%08x",
+	[ARM_EXCEPTION_UNDEFINED]  = "\nHYP panic: UNDEF PC:%08x CPSR:%08x",
+	[ARM_EXCEPTION_SOFTWARE]   = "\nHYP panic: SVC   PC:%08x CPSR:%08x",
+	[ARM_EXCEPTION_PREF_ABORT] = "\nHYP panic: PABRT PC:%08x CPSR:%08x",
+	[ARM_EXCEPTION_DATA_ABORT] = "\nHYP panic: DABRT PC:%08x ADDR:%08x",
+	[ARM_EXCEPTION_IRQ]        = "\nHYP panic: IRQ   PC:%08x CPSR:%08x",
+	[ARM_EXCEPTION_FIQ]        = "\nHYP panic: FIQ   PC:%08x CPSR:%08x",
+	[ARM_EXCEPTION_HVC]        = "\nHYP panic: HVC   PC:%08x CPSR:%08x",
+};
+
+void __hyp_text __noreturn __hyp_panic(int cause)
+{
+	u32 elr = read_special(ELR_hyp);
+	u32 val;
+
+	if (cause == ARM_EXCEPTION_DATA_ABORT)
+		val = read_sysreg(HDFAR);
+	else
+		val = read_special(SPSR);
+
+	if (read_sysreg(VTTBR)) {
+		struct kvm_vcpu *vcpu;
+		struct kvm_cpu_context *host_ctxt;
+
+		vcpu = (struct kvm_vcpu *)read_sysreg(HTPIDR);
+		host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
+		__deactivate_traps(vcpu);
+		__deactivate_vm(vcpu);
+		__sysreg_restore_state(host_ctxt);
+	}
+
+	/* Call panic for real */
+	__hyp_do_panic(__hyp_panic_string[cause], elr, val);
+
+	unreachable();
+}
diff --git a/arch/arm/kvm/hyp/tlb.c b/arch/arm/kvm/hyp/tlb.c
new file mode 100644
index 0000000..a263600
--- /dev/null
+++ b/arch/arm/kvm/hyp/tlb.c
@@ -0,0 +1,70 @@
+/*
+ * Original code:
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Author: Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * Mostly rewritten in C by Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <asm/kvm_hyp.h>
+
+/**
+ * Flush per-VMID TLBs
+ *
+ * __kvm_tlb_flush_vmid(struct kvm *kvm);
+ *
+ * We rely on the hardware to broadcast the TLB invalidation to all CPUs
+ * inside the inner-shareable domain (which is the case for all v7
+ * implementations).  If we come across a non-IS SMP implementation, we'll
+ * have to use an IPI based mechanism. Until then, we stick to the simple
+ * hardware assisted version.
+ *
+ * As v7 does not support flushing per IPA, just nuke the whole TLB
+ * instead, ignoring the ipa value.
+ */
+static void __hyp_text __tlb_flush_vmid(struct kvm *kvm)
+{
+	dsb(ishst);
+
+	/* Switch to requested VMID */
+	kvm = kern_hyp_va(kvm);
+	write_sysreg(kvm->arch.vttbr, VTTBR);
+	isb();
+
+	write_sysreg(0, TLBIALLIS);
+	dsb(ish);
+	isb();
+
+	write_sysreg(0, VTTBR);
+}
+
+__alias(__tlb_flush_vmid) void __kvm_tlb_flush_vmid(struct kvm *kvm);
+
+static void __hyp_text __tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
+{
+	__tlb_flush_vmid(kvm);
+}
+
+__alias(__tlb_flush_vmid_ipa) void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm,
+							    phys_addr_t ipa);
+
+static void __hyp_text __tlb_flush_vm_context(void)
+{
+	write_sysreg(0, TLBIALLNSNHIS);
+	write_sysreg(0, ICIALLUIS);
+	dsb(ish);
+}
+
+__alias(__tlb_flush_vm_context) void __kvm_flush_vm_context(void);
diff --git a/arch/arm/kvm/hyp/vfp.S b/arch/arm/kvm/hyp/vfp.S
new file mode 100644
index 0000000..7c297e8
--- /dev/null
+++ b/arch/arm/kvm/hyp/vfp.S
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Author: Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/linkage.h>
+#include <asm/vfpmacros.h>
+
+	.text
+	.pushsection	.hyp.text, "ax"
+
+/* void __vfp_save_state(struct vfp_hard_struct *vfp); */
+ENTRY(__vfp_save_state)
+	push	{r4, r5}
+	VFPFMRX	r1, FPEXC
+
+	@ Make sure *really* VFP is enabled so we can touch the registers.
+	orr	r5, r1, #FPEXC_EN
+	tst	r5, #FPEXC_EX		@ Check for VFP Subarchitecture
+	bic	r5, r5, #FPEXC_EX	@ FPEXC_EX disable
+	VFPFMXR	FPEXC, r5
+	isb
+
+	VFPFMRX	r2, FPSCR
+	beq	1f
+
+	@ If FPEXC_EX is 0, then FPINST/FPINST2 reads are upredictable, so
+	@ we only need to save them if FPEXC_EX is set.
+	VFPFMRX r3, FPINST
+	tst	r5, #FPEXC_FP2V
+	VFPFMRX r4, FPINST2, ne		@ vmrsne
+1:
+	VFPFSTMIA r0, r5		@ Save VFP registers
+	stm	r0, {r1-r4}		@ Save FPEXC, FPSCR, FPINST, FPINST2
+	pop	{r4, r5}
+	bx	lr
+ENDPROC(__vfp_save_state)
+
+/* void __vfp_restore_state(struct vfp_hard_struct *vfp);
+ * Assume FPEXC_EN is on and FPEXC_EX is off */
+ENTRY(__vfp_restore_state)
+	VFPFLDMIA r0, r1		@ Load VFP registers
+	ldm	r0, {r0-r3}		@ Load FPEXC, FPSCR, FPINST, FPINST2
+
+	VFPFMXR FPSCR, r1
+	tst	r0, #FPEXC_EX		@ Check for VFP Subarchitecture
+	beq	1f
+	VFPFMXR FPINST, r2
+	tst	r0, #FPEXC_FP2V
+	VFPFMXR FPINST2, r3, ne
+1:
+	VFPFMXR FPEXC, r0		@ FPEXC	(last, in case !EN)
+	bx	lr
+ENDPROC(__vfp_restore_state)
+
+	.popsection
diff --git a/arch/arm/kvm/init.S b/arch/arm/kvm/init.S
index 3988e72..1f9ae17 100644
--- a/arch/arm/kvm/init.S
+++ b/arch/arm/kvm/init.S
@@ -84,14 +84,6 @@
 	orr	r0, r0, r1
 	mcr	p15, 4, r0, c2, c0, 2	@ HTCR
 
-	mrc	p15, 4, r1, c2, c1, 2	@ VTCR
-	ldr	r2, =VTCR_MASK
-	bic	r1, r1, r2
-	bic	r0, r0, #(~VTCR_HTCR_SH)	@ clear non-reusable HTCR bits
-	orr	r1, r0, r1
-	orr	r1, r1, #(KVM_VTCR_SL0 | KVM_VTCR_T0SZ | KVM_VTCR_S)
-	mcr	p15, 4, r1, c2, c1, 2	@ VTCR
-
 	@ Use the same memory attributes for hyp. accesses as the kernel
 	@ (copy MAIRx ro HMAIRx).
 	mrc	p15, 0, r0, c10, c2, 0
diff --git a/arch/arm/kvm/interrupts.S b/arch/arm/kvm/interrupts.S
index 900ef6d..b1bd316 100644
--- a/arch/arm/kvm/interrupts.S
+++ b/arch/arm/kvm/interrupts.S
@@ -17,211 +17,14 @@
  */
 
 #include <linux/linkage.h>
-#include <linux/const.h>
-#include <asm/unified.h>
-#include <asm/page.h>
-#include <asm/ptrace.h>
-#include <asm/asm-offsets.h>
-#include <asm/kvm_asm.h>
-#include <asm/kvm_arm.h>
-#include <asm/vfpmacros.h>
-#include "interrupts_head.S"
 
 	.text
 
-__kvm_hyp_code_start:
-	.globl __kvm_hyp_code_start
-
-/********************************************************************
- * Flush per-VMID TLBs
- *
- * void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
- *
- * We rely on the hardware to broadcast the TLB invalidation to all CPUs
- * inside the inner-shareable domain (which is the case for all v7
- * implementations).  If we come across a non-IS SMP implementation, we'll
- * have to use an IPI based mechanism. Until then, we stick to the simple
- * hardware assisted version.
- *
- * As v7 does not support flushing per IPA, just nuke the whole TLB
- * instead, ignoring the ipa value.
- */
-ENTRY(__kvm_tlb_flush_vmid_ipa)
-	push	{r2, r3}
-
-	dsb	ishst
-	add	r0, r0, #KVM_VTTBR
-	ldrd	r2, r3, [r0]
-	mcrr	p15, 6, rr_lo_hi(r2, r3), c2	@ Write VTTBR
-	isb
-	mcr     p15, 0, r0, c8, c3, 0	@ TLBIALLIS (rt ignored)
-	dsb	ish
-	isb
-	mov	r2, #0
-	mov	r3, #0
-	mcrr	p15, 6, r2, r3, c2	@ Back to VMID #0
-	isb				@ Not necessary if followed by eret
-
-	pop	{r2, r3}
-	bx	lr
-ENDPROC(__kvm_tlb_flush_vmid_ipa)
-
-/**
- * void __kvm_tlb_flush_vmid(struct kvm *kvm) - Flush per-VMID TLBs
- *
- * Reuses __kvm_tlb_flush_vmid_ipa() for ARMv7, without passing address
- * parameter
- */
-
-ENTRY(__kvm_tlb_flush_vmid)
-	b	__kvm_tlb_flush_vmid_ipa
-ENDPROC(__kvm_tlb_flush_vmid)
-
-/********************************************************************
- * Flush TLBs and instruction caches of all CPUs inside the inner-shareable
- * domain, for all VMIDs
- *
- * void __kvm_flush_vm_context(void);
- */
-ENTRY(__kvm_flush_vm_context)
-	mov	r0, #0			@ rn parameter for c15 flushes is SBZ
-
-	/* Invalidate NS Non-Hyp TLB Inner Shareable (TLBIALLNSNHIS) */
-	mcr     p15, 4, r0, c8, c3, 4
-	/* Invalidate instruction caches Inner Shareable (ICIALLUIS) */
-	mcr     p15, 0, r0, c7, c1, 0
-	dsb	ish
-	isb				@ Not necessary if followed by eret
-
-	bx	lr
-ENDPROC(__kvm_flush_vm_context)
-
-
-/********************************************************************
- *  Hypervisor world-switch code
- *
- *
- * int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
- */
-ENTRY(__kvm_vcpu_run)
-	@ Save the vcpu pointer
-	mcr	p15, 4, vcpu, c13, c0, 2	@ HTPIDR
-
-	save_host_regs
-
-	restore_vgic_state
-	restore_timer_state
-
-	@ Store hardware CP15 state and load guest state
-	read_cp15_state store_to_vcpu = 0
-	write_cp15_state read_from_vcpu = 1
-
-	@ If the host kernel has not been configured with VFPv3 support,
-	@ then it is safer if we deny guests from using it as well.
-#ifdef CONFIG_VFPv3
-	@ Set FPEXC_EN so the guest doesn't trap floating point instructions
-	VFPFMRX r2, FPEXC		@ VMRS
-	push	{r2}
-	orr	r2, r2, #FPEXC_EN
-	VFPFMXR FPEXC, r2		@ VMSR
-#endif
-
-	@ Configure Hyp-role
-	configure_hyp_role vmentry
-
-	@ Trap coprocessor CRx accesses
-	set_hstr vmentry
-	set_hcptr vmentry, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11))
-	set_hdcr vmentry
-
-	@ Write configured ID register into MIDR alias
-	ldr	r1, [vcpu, #VCPU_MIDR]
-	mcr	p15, 4, r1, c0, c0, 0
-
-	@ Write guest view of MPIDR into VMPIDR
-	ldr	r1, [vcpu, #CP15_OFFSET(c0_MPIDR)]
-	mcr	p15, 4, r1, c0, c0, 5
-
-	@ Set up guest memory translation
-	ldr	r1, [vcpu, #VCPU_KVM]
-	add	r1, r1, #KVM_VTTBR
-	ldrd	r2, r3, [r1]
-	mcrr	p15, 6, rr_lo_hi(r2, r3), c2	@ Write VTTBR
-
-	@ We're all done, just restore the GPRs and go to the guest
-	restore_guest_regs
-	clrex				@ Clear exclusive monitor
-	eret
-
-__kvm_vcpu_return:
-	/*
-	 * return convention:
-	 * guest r0, r1, r2 saved on the stack
-	 * r0: vcpu pointer
-	 * r1: exception code
-	 */
-	save_guest_regs
-
-	@ Set VMID == 0
-	mov	r2, #0
-	mov	r3, #0
-	mcrr	p15, 6, r2, r3, c2	@ Write VTTBR
-
-	@ Don't trap coprocessor accesses for host kernel
-	set_hstr vmexit
-	set_hdcr vmexit
-	set_hcptr vmexit, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11)), after_vfp_restore
-
-#ifdef CONFIG_VFPv3
-	@ Switch VFP/NEON hardware state to the host's
-	add	r7, vcpu, #VCPU_VFP_GUEST
-	store_vfp_state r7
-	add	r7, vcpu, #VCPU_VFP_HOST
-	ldr	r7, [r7]
-	restore_vfp_state r7
-
-after_vfp_restore:
-	@ Restore FPEXC_EN which we clobbered on entry
-	pop	{r2}
-	VFPFMXR FPEXC, r2
-#else
-after_vfp_restore:
-#endif
-
-	@ Reset Hyp-role
-	configure_hyp_role vmexit
-
-	@ Let host read hardware MIDR
-	mrc	p15, 0, r2, c0, c0, 0
-	mcr	p15, 4, r2, c0, c0, 0
-
-	@ Back to hardware MPIDR
-	mrc	p15, 0, r2, c0, c0, 5
-	mcr	p15, 4, r2, c0, c0, 5
-
-	@ Store guest CP15 state and restore host state
-	read_cp15_state store_to_vcpu = 1
-	write_cp15_state read_from_vcpu = 0
-
-	save_timer_state
-	save_vgic_state
-
-	restore_host_regs
-	clrex				@ Clear exclusive monitor
-#ifndef CONFIG_CPU_ENDIAN_BE8
-	mov	r0, r1			@ Return the return code
-	mov	r1, #0			@ Clear upper bits in return value
-#else
-	@ r1 already has return code
-	mov	r0, #0			@ Clear upper bits in return value
-#endif /* CONFIG_CPU_ENDIAN_BE8 */
-	bx	lr			@ return to IOCTL
-
 /********************************************************************
  *  Call function in Hyp mode
  *
  *
- * u64 kvm_call_hyp(void *hypfn, ...);
+ * unsigned long kvm_call_hyp(void *hypfn, ...);
  *
  * This is not really a variadic function in the classic C-way and care must
  * be taken when calling this to ensure parameters are passed in registers
@@ -232,7 +35,7 @@
  * passed as r0, r1, and r2 (a maximum of 3 arguments in addition to the
  * function pointer can be passed).  The function being called must be mapped
  * in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c).  Return values are
- * passed in r0 and r1.
+ * passed in r0 (strictly 32bit).
  *
  * A function pointer with a value of 0xffffffff has a special meaning,
  * and is used to implement __hyp_get_vectors in the same way as in
@@ -246,281 +49,4 @@
 ENTRY(kvm_call_hyp)
 	hvc	#0
 	bx	lr
-
-/********************************************************************
- * Hypervisor exception vector and handlers
- *
- *
- * The KVM/ARM Hypervisor ABI is defined as follows:
- *
- * Entry to Hyp mode from the host kernel will happen _only_ when an HVC
- * instruction is issued since all traps are disabled when running the host
- * kernel as per the Hyp-mode initialization at boot time.
- *
- * HVC instructions cause a trap to the vector page + offset 0x14 (see hyp_hvc
- * below) when the HVC instruction is called from SVC mode (i.e. a guest or the
- * host kernel) and they cause a trap to the vector page + offset 0x8 when HVC
- * instructions are called from within Hyp-mode.
- *
- * Hyp-ABI: Calling HYP-mode functions from host (in SVC mode):
- *    Switching to Hyp mode is done through a simple HVC #0 instruction. The
- *    exception vector code will check that the HVC comes from VMID==0 and if
- *    so will push the necessary state (SPSR, lr_usr) on the Hyp stack.
- *    - r0 contains a pointer to a HYP function
- *    - r1, r2, and r3 contain arguments to the above function.
- *    - The HYP function will be called with its arguments in r0, r1 and r2.
- *    On HYP function return, we return directly to SVC.
- *
- * Note that the above is used to execute code in Hyp-mode from a host-kernel
- * point of view, and is a different concept from performing a world-switch and
- * executing guest code SVC mode (with a VMID != 0).
- */
-
-/* Handle undef, svc, pabt, or dabt by crashing with a user notice */
-.macro bad_exception exception_code, panic_str
-	push	{r0-r2}
-	mrrc	p15, 6, r0, r1, c2	@ Read VTTBR
-	lsr	r1, r1, #16
-	ands	r1, r1, #0xff
-	beq	99f
-
-	load_vcpu			@ Load VCPU pointer
-	.if \exception_code == ARM_EXCEPTION_DATA_ABORT
-	mrc	p15, 4, r2, c5, c2, 0	@ HSR
-	mrc	p15, 4, r1, c6, c0, 0	@ HDFAR
-	str	r2, [vcpu, #VCPU_HSR]
-	str	r1, [vcpu, #VCPU_HxFAR]
-	.endif
-	.if \exception_code == ARM_EXCEPTION_PREF_ABORT
-	mrc	p15, 4, r2, c5, c2, 0	@ HSR
-	mrc	p15, 4, r1, c6, c0, 2	@ HIFAR
-	str	r2, [vcpu, #VCPU_HSR]
-	str	r1, [vcpu, #VCPU_HxFAR]
-	.endif
-	mov	r1, #\exception_code
-	b	__kvm_vcpu_return
-
-	@ We were in the host already. Let's craft a panic-ing return to SVC.
-99:	mrs	r2, cpsr
-	bic	r2, r2, #MODE_MASK
-	orr	r2, r2, #SVC_MODE
-THUMB(	orr	r2, r2, #PSR_T_BIT	)
-	msr	spsr_cxsf, r2
-	mrs	r1, ELR_hyp
-	ldr	r2, =panic
-	msr	ELR_hyp, r2
-	ldr	r0, =\panic_str
-	clrex				@ Clear exclusive monitor
-	eret
-.endm
-
-	.text
-
-	.align 5
-__kvm_hyp_vector:
-	.globl __kvm_hyp_vector
-
-	@ Hyp-mode exception vector
-	W(b)	hyp_reset
-	W(b)	hyp_undef
-	W(b)	hyp_svc
-	W(b)	hyp_pabt
-	W(b)	hyp_dabt
-	W(b)	hyp_hvc
-	W(b)	hyp_irq
-	W(b)	hyp_fiq
-
-	.align
-hyp_reset:
-	b	hyp_reset
-
-	.align
-hyp_undef:
-	bad_exception ARM_EXCEPTION_UNDEFINED, und_die_str
-
-	.align
-hyp_svc:
-	bad_exception ARM_EXCEPTION_HVC, svc_die_str
-
-	.align
-hyp_pabt:
-	bad_exception ARM_EXCEPTION_PREF_ABORT, pabt_die_str
-
-	.align
-hyp_dabt:
-	bad_exception ARM_EXCEPTION_DATA_ABORT, dabt_die_str
-
-	.align
-hyp_hvc:
-	/*
-	 * Getting here is either becuase of a trap from a guest or from calling
-	 * HVC from the host kernel, which means "switch to Hyp mode".
-	 */
-	push	{r0, r1, r2}
-
-	@ Check syndrome register
-	mrc	p15, 4, r1, c5, c2, 0	@ HSR
-	lsr	r0, r1, #HSR_EC_SHIFT
-	cmp	r0, #HSR_EC_HVC
-	bne	guest_trap		@ Not HVC instr.
-
-	/*
-	 * Let's check if the HVC came from VMID 0 and allow simple
-	 * switch to Hyp mode
-	 */
-	mrrc    p15, 6, r0, r2, c2
-	lsr     r2, r2, #16
-	and     r2, r2, #0xff
-	cmp     r2, #0
-	bne	guest_trap		@ Guest called HVC
-
-	/*
-	 * Getting here means host called HVC, we shift parameters and branch
-	 * to Hyp function.
-	 */
-	pop	{r0, r1, r2}
-
-	/* Check for __hyp_get_vectors */
-	cmp	r0, #-1
-	mrceq	p15, 4, r0, c12, c0, 0	@ get HVBAR
-	beq	1f
-
-	push	{lr}
-	mrs	lr, SPSR
-	push	{lr}
-
-	mov	lr, r0
-	mov	r0, r1
-	mov	r1, r2
-	mov	r2, r3
-
-THUMB(	orr	lr, #1)
-	blx	lr			@ Call the HYP function
-
-	pop	{lr}
-	msr	SPSR_csxf, lr
-	pop	{lr}
-1:	eret
-
-guest_trap:
-	load_vcpu			@ Load VCPU pointer to r0
-	str	r1, [vcpu, #VCPU_HSR]
-
-	@ Check if we need the fault information
-	lsr	r1, r1, #HSR_EC_SHIFT
-#ifdef CONFIG_VFPv3
-	cmp	r1, #HSR_EC_CP_0_13
-	beq	switch_to_guest_vfp
-#endif
-	cmp	r1, #HSR_EC_IABT
-	mrceq	p15, 4, r2, c6, c0, 2	@ HIFAR
-	beq	2f
-	cmp	r1, #HSR_EC_DABT
-	bne	1f
-	mrc	p15, 4, r2, c6, c0, 0	@ HDFAR
-
-2:	str	r2, [vcpu, #VCPU_HxFAR]
-
-	/*
-	 * B3.13.5 Reporting exceptions taken to the Non-secure PL2 mode:
-	 *
-	 * Abort on the stage 2 translation for a memory access from a
-	 * Non-secure PL1 or PL0 mode:
-	 *
-	 * For any Access flag fault or Translation fault, and also for any
-	 * Permission fault on the stage 2 translation of a memory access
-	 * made as part of a translation table walk for a stage 1 translation,
-	 * the HPFAR holds the IPA that caused the fault. Otherwise, the HPFAR
-	 * is UNKNOWN.
-	 */
-
-	/* Check for permission fault, and S1PTW */
-	mrc	p15, 4, r1, c5, c2, 0	@ HSR
-	and	r0, r1, #HSR_FSC_TYPE
-	cmp	r0, #FSC_PERM
-	tsteq	r1, #(1 << 7)		@ S1PTW
-	mrcne	p15, 4, r2, c6, c0, 4	@ HPFAR
-	bne	3f
-
-	/* Preserve PAR */
-	mrrc	p15, 0, r0, r1, c7	@ PAR
-	push	{r0, r1}
-
-	/* Resolve IPA using the xFAR */
-	mcr	p15, 0, r2, c7, c8, 0	@ ATS1CPR
-	isb
-	mrrc	p15, 0, r0, r1, c7	@ PAR
-	tst	r0, #1
-	bne	4f			@ Failed translation
-	ubfx	r2, r0, #12, #20
-	lsl	r2, r2, #4
-	orr	r2, r2, r1, lsl #24
-
-	/* Restore PAR */
-	pop	{r0, r1}
-	mcrr	p15, 0, r0, r1, c7	@ PAR
-
-3:	load_vcpu			@ Load VCPU pointer to r0
-	str	r2, [r0, #VCPU_HPFAR]
-
-1:	mov	r1, #ARM_EXCEPTION_HVC
-	b	__kvm_vcpu_return
-
-4:	pop	{r0, r1}		@ Failed translation, return to guest
-	mcrr	p15, 0, r0, r1, c7	@ PAR
-	clrex
-	pop	{r0, r1, r2}
-	eret
-
-/*
- * If VFPv3 support is not available, then we will not switch the VFP
- * registers; however cp10 and cp11 accesses will still trap and fallback
- * to the regular coprocessor emulation code, which currently will
- * inject an undefined exception to the guest.
- */
-#ifdef CONFIG_VFPv3
-switch_to_guest_vfp:
-	push	{r3-r7}
-
-	@ NEON/VFP used.  Turn on VFP access.
-	set_hcptr vmtrap, (HCPTR_TCP(10) | HCPTR_TCP(11))
-
-	@ Switch VFP/NEON hardware state to the guest's
-	add	r7, r0, #VCPU_VFP_HOST
-	ldr	r7, [r7]
-	store_vfp_state r7
-	add	r7, r0, #VCPU_VFP_GUEST
-	restore_vfp_state r7
-
-	pop	{r3-r7}
-	pop	{r0-r2}
-	clrex
-	eret
-#endif
-
-	.align
-hyp_irq:
-	push	{r0, r1, r2}
-	mov	r1, #ARM_EXCEPTION_IRQ
-	load_vcpu			@ Load VCPU pointer to r0
-	b	__kvm_vcpu_return
-
-	.align
-hyp_fiq:
-	b	hyp_fiq
-
-	.ltorg
-
-__kvm_hyp_code_end:
-	.globl	__kvm_hyp_code_end
-
-	.section ".rodata"
-
-und_die_str:
-	.ascii	"unexpected undefined exception in Hyp mode at: %#08x\n"
-pabt_die_str:
-	.ascii	"unexpected prefetch abort in Hyp mode at: %#08x\n"
-dabt_die_str:
-	.ascii	"unexpected data abort in Hyp mode at: %#08x\n"
-svc_die_str:
-	.ascii	"unexpected HVC/SVC trap in Hyp mode at: %#08x\n"
+ENDPROC(kvm_call_hyp)
diff --git a/arch/arm/kvm/interrupts_head.S b/arch/arm/kvm/interrupts_head.S
deleted file mode 100644
index 51a5950..0000000
--- a/arch/arm/kvm/interrupts_head.S
+++ /dev/null
@@ -1,648 +0,0 @@
-#include <linux/irqchip/arm-gic.h>
-#include <asm/assembler.h>
-
-#define VCPU_USR_REG(_reg_nr)	(VCPU_USR_REGS + (_reg_nr * 4))
-#define VCPU_USR_SP		(VCPU_USR_REG(13))
-#define VCPU_USR_LR		(VCPU_USR_REG(14))
-#define CP15_OFFSET(_cp15_reg_idx) (VCPU_CP15 + (_cp15_reg_idx * 4))
-
-/*
- * Many of these macros need to access the VCPU structure, which is always
- * held in r0. These macros should never clobber r1, as it is used to hold the
- * exception code on the return path (except of course the macro that switches
- * all the registers before the final jump to the VM).
- */
-vcpu	.req	r0		@ vcpu pointer always in r0
-
-/* Clobbers {r2-r6} */
-.macro store_vfp_state vfp_base
-	@ The VFPFMRX and VFPFMXR macros are the VMRS and VMSR instructions
-	VFPFMRX	r2, FPEXC
-	@ Make sure VFP is enabled so we can touch the registers.
-	orr	r6, r2, #FPEXC_EN
-	VFPFMXR	FPEXC, r6
-
-	VFPFMRX	r3, FPSCR
-	tst	r2, #FPEXC_EX		@ Check for VFP Subarchitecture
-	beq	1f
-	@ If FPEXC_EX is 0, then FPINST/FPINST2 reads are upredictable, so
-	@ we only need to save them if FPEXC_EX is set.
-	VFPFMRX r4, FPINST
-	tst	r2, #FPEXC_FP2V
-	VFPFMRX r5, FPINST2, ne		@ vmrsne
-	bic	r6, r2, #FPEXC_EX	@ FPEXC_EX disable
-	VFPFMXR	FPEXC, r6
-1:
-	VFPFSTMIA \vfp_base, r6		@ Save VFP registers
-	stm	\vfp_base, {r2-r5}	@ Save FPEXC, FPSCR, FPINST, FPINST2
-.endm
-
-/* Assume FPEXC_EN is on and FPEXC_EX is off, clobbers {r2-r6} */
-.macro restore_vfp_state vfp_base
-	VFPFLDMIA \vfp_base, r6		@ Load VFP registers
-	ldm	\vfp_base, {r2-r5}	@ Load FPEXC, FPSCR, FPINST, FPINST2
-
-	VFPFMXR FPSCR, r3
-	tst	r2, #FPEXC_EX		@ Check for VFP Subarchitecture
-	beq	1f
-	VFPFMXR FPINST, r4
-	tst	r2, #FPEXC_FP2V
-	VFPFMXR FPINST2, r5, ne
-1:
-	VFPFMXR FPEXC, r2	@ FPEXC	(last, in case !EN)
-.endm
-
-/* These are simply for the macros to work - value don't have meaning */
-.equ usr, 0
-.equ svc, 1
-.equ abt, 2
-.equ und, 3
-.equ irq, 4
-.equ fiq, 5
-
-.macro push_host_regs_mode mode
-	mrs	r2, SP_\mode
-	mrs	r3, LR_\mode
-	mrs	r4, SPSR_\mode
-	push	{r2, r3, r4}
-.endm
-
-/*
- * Store all host persistent registers on the stack.
- * Clobbers all registers, in all modes, except r0 and r1.
- */
-.macro save_host_regs
-	/* Hyp regs. Only ELR_hyp (SPSR_hyp already saved) */
-	mrs	r2, ELR_hyp
-	push	{r2}
-
-	/* usr regs */
-	push	{r4-r12}	@ r0-r3 are always clobbered
-	mrs	r2, SP_usr
-	mov	r3, lr
-	push	{r2, r3}
-
-	push_host_regs_mode svc
-	push_host_regs_mode abt
-	push_host_regs_mode und
-	push_host_regs_mode irq
-
-	/* fiq regs */
-	mrs	r2, r8_fiq
-	mrs	r3, r9_fiq
-	mrs	r4, r10_fiq
-	mrs	r5, r11_fiq
-	mrs	r6, r12_fiq
-	mrs	r7, SP_fiq
-	mrs	r8, LR_fiq
-	mrs	r9, SPSR_fiq
-	push	{r2-r9}
-.endm
-
-.macro pop_host_regs_mode mode
-	pop	{r2, r3, r4}
-	msr	SP_\mode, r2
-	msr	LR_\mode, r3
-	msr	SPSR_\mode, r4
-.endm
-
-/*
- * Restore all host registers from the stack.
- * Clobbers all registers, in all modes, except r0 and r1.
- */
-.macro restore_host_regs
-	pop	{r2-r9}
-	msr	r8_fiq, r2
-	msr	r9_fiq, r3
-	msr	r10_fiq, r4
-	msr	r11_fiq, r5
-	msr	r12_fiq, r6
-	msr	SP_fiq, r7
-	msr	LR_fiq, r8
-	msr	SPSR_fiq, r9
-
-	pop_host_regs_mode irq
-	pop_host_regs_mode und
-	pop_host_regs_mode abt
-	pop_host_regs_mode svc
-
-	pop	{r2, r3}
-	msr	SP_usr, r2
-	mov	lr, r3
-	pop	{r4-r12}
-
-	pop	{r2}
-	msr	ELR_hyp, r2
-.endm
-
-/*
- * Restore SP, LR and SPSR for a given mode. offset is the offset of
- * this mode's registers from the VCPU base.
- *
- * Assumes vcpu pointer in vcpu reg
- *
- * Clobbers r1, r2, r3, r4.
- */
-.macro restore_guest_regs_mode mode, offset
-	add	r1, vcpu, \offset
-	ldm	r1, {r2, r3, r4}
-	msr	SP_\mode, r2
-	msr	LR_\mode, r3
-	msr	SPSR_\mode, r4
-.endm
-
-/*
- * Restore all guest registers from the vcpu struct.
- *
- * Assumes vcpu pointer in vcpu reg
- *
- * Clobbers *all* registers.
- */
-.macro restore_guest_regs
-	restore_guest_regs_mode svc, #VCPU_SVC_REGS
-	restore_guest_regs_mode abt, #VCPU_ABT_REGS
-	restore_guest_regs_mode und, #VCPU_UND_REGS
-	restore_guest_regs_mode irq, #VCPU_IRQ_REGS
-
-	add	r1, vcpu, #VCPU_FIQ_REGS
-	ldm	r1, {r2-r9}
-	msr	r8_fiq, r2
-	msr	r9_fiq, r3
-	msr	r10_fiq, r4
-	msr	r11_fiq, r5
-	msr	r12_fiq, r6
-	msr	SP_fiq, r7
-	msr	LR_fiq, r8
-	msr	SPSR_fiq, r9
-
-	@ Load return state
-	ldr	r2, [vcpu, #VCPU_PC]
-	ldr	r3, [vcpu, #VCPU_CPSR]
-	msr	ELR_hyp, r2
-	msr	SPSR_cxsf, r3
-
-	@ Load user registers
-	ldr	r2, [vcpu, #VCPU_USR_SP]
-	ldr	r3, [vcpu, #VCPU_USR_LR]
-	msr	SP_usr, r2
-	mov	lr, r3
-	add	vcpu, vcpu, #(VCPU_USR_REGS)
-	ldm	vcpu, {r0-r12}
-.endm
-
-/*
- * Save SP, LR and SPSR for a given mode. offset is the offset of
- * this mode's registers from the VCPU base.
- *
- * Assumes vcpu pointer in vcpu reg
- *
- * Clobbers r2, r3, r4, r5.
- */
-.macro save_guest_regs_mode mode, offset
-	add	r2, vcpu, \offset
-	mrs	r3, SP_\mode
-	mrs	r4, LR_\mode
-	mrs	r5, SPSR_\mode
-	stm	r2, {r3, r4, r5}
-.endm
-
-/*
- * Save all guest registers to the vcpu struct
- * Expects guest's r0, r1, r2 on the stack.
- *
- * Assumes vcpu pointer in vcpu reg
- *
- * Clobbers r2, r3, r4, r5.
- */
-.macro save_guest_regs
-	@ Store usr registers
-	add	r2, vcpu, #VCPU_USR_REG(3)
-	stm	r2, {r3-r12}
-	add	r2, vcpu, #VCPU_USR_REG(0)
-	pop	{r3, r4, r5}		@ r0, r1, r2
-	stm	r2, {r3, r4, r5}
-	mrs	r2, SP_usr
-	mov	r3, lr
-	str	r2, [vcpu, #VCPU_USR_SP]
-	str	r3, [vcpu, #VCPU_USR_LR]
-
-	@ Store return state
-	mrs	r2, ELR_hyp
-	mrs	r3, spsr
-	str	r2, [vcpu, #VCPU_PC]
-	str	r3, [vcpu, #VCPU_CPSR]
-
-	@ Store other guest registers
-	save_guest_regs_mode svc, #VCPU_SVC_REGS
-	save_guest_regs_mode abt, #VCPU_ABT_REGS
-	save_guest_regs_mode und, #VCPU_UND_REGS
-	save_guest_regs_mode irq, #VCPU_IRQ_REGS
-.endm
-
-/* Reads cp15 registers from hardware and stores them in memory
- * @store_to_vcpu: If 0, registers are written in-order to the stack,
- * 		   otherwise to the VCPU struct pointed to by vcpup
- *
- * Assumes vcpu pointer in vcpu reg
- *
- * Clobbers r2 - r12
- */
-.macro read_cp15_state store_to_vcpu
-	mrc	p15, 0, r2, c1, c0, 0	@ SCTLR
-	mrc	p15, 0, r3, c1, c0, 2	@ CPACR
-	mrc	p15, 0, r4, c2, c0, 2	@ TTBCR
-	mrc	p15, 0, r5, c3, c0, 0	@ DACR
-	mrrc	p15, 0, r6, r7, c2	@ TTBR 0
-	mrrc	p15, 1, r8, r9, c2	@ TTBR 1
-	mrc	p15, 0, r10, c10, c2, 0	@ PRRR
-	mrc	p15, 0, r11, c10, c2, 1	@ NMRR
-	mrc	p15, 2, r12, c0, c0, 0	@ CSSELR
-
-	.if \store_to_vcpu == 0
-	push	{r2-r12}		@ Push CP15 registers
-	.else
-	str	r2, [vcpu, #CP15_OFFSET(c1_SCTLR)]
-	str	r3, [vcpu, #CP15_OFFSET(c1_CPACR)]
-	str	r4, [vcpu, #CP15_OFFSET(c2_TTBCR)]
-	str	r5, [vcpu, #CP15_OFFSET(c3_DACR)]
-	add	r2, vcpu, #CP15_OFFSET(c2_TTBR0)
-	strd	r6, r7, [r2]
-	add	r2, vcpu, #CP15_OFFSET(c2_TTBR1)
-	strd	r8, r9, [r2]
-	str	r10, [vcpu, #CP15_OFFSET(c10_PRRR)]
-	str	r11, [vcpu, #CP15_OFFSET(c10_NMRR)]
-	str	r12, [vcpu, #CP15_OFFSET(c0_CSSELR)]
-	.endif
-
-	mrc	p15, 0, r2, c13, c0, 1	@ CID
-	mrc	p15, 0, r3, c13, c0, 2	@ TID_URW
-	mrc	p15, 0, r4, c13, c0, 3	@ TID_URO
-	mrc	p15, 0, r5, c13, c0, 4	@ TID_PRIV
-	mrc	p15, 0, r6, c5, c0, 0	@ DFSR
-	mrc	p15, 0, r7, c5, c0, 1	@ IFSR
-	mrc	p15, 0, r8, c5, c1, 0	@ ADFSR
-	mrc	p15, 0, r9, c5, c1, 1	@ AIFSR
-	mrc	p15, 0, r10, c6, c0, 0	@ DFAR
-	mrc	p15, 0, r11, c6, c0, 2	@ IFAR
-	mrc	p15, 0, r12, c12, c0, 0	@ VBAR
-
-	.if \store_to_vcpu == 0
-	push	{r2-r12}		@ Push CP15 registers
-	.else
-	str	r2, [vcpu, #CP15_OFFSET(c13_CID)]
-	str	r3, [vcpu, #CP15_OFFSET(c13_TID_URW)]
-	str	r4, [vcpu, #CP15_OFFSET(c13_TID_URO)]
-	str	r5, [vcpu, #CP15_OFFSET(c13_TID_PRIV)]
-	str	r6, [vcpu, #CP15_OFFSET(c5_DFSR)]
-	str	r7, [vcpu, #CP15_OFFSET(c5_IFSR)]
-	str	r8, [vcpu, #CP15_OFFSET(c5_ADFSR)]
-	str	r9, [vcpu, #CP15_OFFSET(c5_AIFSR)]
-	str	r10, [vcpu, #CP15_OFFSET(c6_DFAR)]
-	str	r11, [vcpu, #CP15_OFFSET(c6_IFAR)]
-	str	r12, [vcpu, #CP15_OFFSET(c12_VBAR)]
-	.endif
-
-	mrc	p15, 0, r2, c14, c1, 0	@ CNTKCTL
-	mrrc	p15, 0, r4, r5, c7	@ PAR
-	mrc	p15, 0, r6, c10, c3, 0	@ AMAIR0
-	mrc	p15, 0, r7, c10, c3, 1	@ AMAIR1
-
-	.if \store_to_vcpu == 0
-	push	{r2,r4-r7}
-	.else
-	str	r2, [vcpu, #CP15_OFFSET(c14_CNTKCTL)]
-	add	r12, vcpu, #CP15_OFFSET(c7_PAR)
-	strd	r4, r5, [r12]
-	str	r6, [vcpu, #CP15_OFFSET(c10_AMAIR0)]
-	str	r7, [vcpu, #CP15_OFFSET(c10_AMAIR1)]
-	.endif
-.endm
-
-/*
- * Reads cp15 registers from memory and writes them to hardware
- * @read_from_vcpu: If 0, registers are read in-order from the stack,
- *		    otherwise from the VCPU struct pointed to by vcpup
- *
- * Assumes vcpu pointer in vcpu reg
- */
-.macro write_cp15_state read_from_vcpu
-	.if \read_from_vcpu == 0
-	pop	{r2,r4-r7}
-	.else
-	ldr	r2, [vcpu, #CP15_OFFSET(c14_CNTKCTL)]
-	add	r12, vcpu, #CP15_OFFSET(c7_PAR)
-	ldrd	r4, r5, [r12]
-	ldr	r6, [vcpu, #CP15_OFFSET(c10_AMAIR0)]
-	ldr	r7, [vcpu, #CP15_OFFSET(c10_AMAIR1)]
-	.endif
-
-	mcr	p15, 0, r2, c14, c1, 0	@ CNTKCTL
-	mcrr	p15, 0, r4, r5, c7	@ PAR
-	mcr	p15, 0, r6, c10, c3, 0	@ AMAIR0
-	mcr	p15, 0, r7, c10, c3, 1	@ AMAIR1
-
-	.if \read_from_vcpu == 0
-	pop	{r2-r12}
-	.else
-	ldr	r2, [vcpu, #CP15_OFFSET(c13_CID)]
-	ldr	r3, [vcpu, #CP15_OFFSET(c13_TID_URW)]
-	ldr	r4, [vcpu, #CP15_OFFSET(c13_TID_URO)]
-	ldr	r5, [vcpu, #CP15_OFFSET(c13_TID_PRIV)]
-	ldr	r6, [vcpu, #CP15_OFFSET(c5_DFSR)]
-	ldr	r7, [vcpu, #CP15_OFFSET(c5_IFSR)]
-	ldr	r8, [vcpu, #CP15_OFFSET(c5_ADFSR)]
-	ldr	r9, [vcpu, #CP15_OFFSET(c5_AIFSR)]
-	ldr	r10, [vcpu, #CP15_OFFSET(c6_DFAR)]
-	ldr	r11, [vcpu, #CP15_OFFSET(c6_IFAR)]
-	ldr	r12, [vcpu, #CP15_OFFSET(c12_VBAR)]
-	.endif
-
-	mcr	p15, 0, r2, c13, c0, 1	@ CID
-	mcr	p15, 0, r3, c13, c0, 2	@ TID_URW
-	mcr	p15, 0, r4, c13, c0, 3	@ TID_URO
-	mcr	p15, 0, r5, c13, c0, 4	@ TID_PRIV
-	mcr	p15, 0, r6, c5, c0, 0	@ DFSR
-	mcr	p15, 0, r7, c5, c0, 1	@ IFSR
-	mcr	p15, 0, r8, c5, c1, 0	@ ADFSR
-	mcr	p15, 0, r9, c5, c1, 1	@ AIFSR
-	mcr	p15, 0, r10, c6, c0, 0	@ DFAR
-	mcr	p15, 0, r11, c6, c0, 2	@ IFAR
-	mcr	p15, 0, r12, c12, c0, 0	@ VBAR
-
-	.if \read_from_vcpu == 0
-	pop	{r2-r12}
-	.else
-	ldr	r2, [vcpu, #CP15_OFFSET(c1_SCTLR)]
-	ldr	r3, [vcpu, #CP15_OFFSET(c1_CPACR)]
-	ldr	r4, [vcpu, #CP15_OFFSET(c2_TTBCR)]
-	ldr	r5, [vcpu, #CP15_OFFSET(c3_DACR)]
-	add	r12, vcpu, #CP15_OFFSET(c2_TTBR0)
-	ldrd	r6, r7, [r12]
-	add	r12, vcpu, #CP15_OFFSET(c2_TTBR1)
-	ldrd	r8, r9, [r12]
-	ldr	r10, [vcpu, #CP15_OFFSET(c10_PRRR)]
-	ldr	r11, [vcpu, #CP15_OFFSET(c10_NMRR)]
-	ldr	r12, [vcpu, #CP15_OFFSET(c0_CSSELR)]
-	.endif
-
-	mcr	p15, 0, r2, c1, c0, 0	@ SCTLR
-	mcr	p15, 0, r3, c1, c0, 2	@ CPACR
-	mcr	p15, 0, r4, c2, c0, 2	@ TTBCR
-	mcr	p15, 0, r5, c3, c0, 0	@ DACR
-	mcrr	p15, 0, r6, r7, c2	@ TTBR 0
-	mcrr	p15, 1, r8, r9, c2	@ TTBR 1
-	mcr	p15, 0, r10, c10, c2, 0	@ PRRR
-	mcr	p15, 0, r11, c10, c2, 1	@ NMRR
-	mcr	p15, 2, r12, c0, c0, 0	@ CSSELR
-.endm
-
-/*
- * Save the VGIC CPU state into memory
- *
- * Assumes vcpu pointer in vcpu reg
- */
-.macro save_vgic_state
-	/* Get VGIC VCTRL base into r2 */
-	ldr	r2, [vcpu, #VCPU_KVM]
-	ldr	r2, [r2, #KVM_VGIC_VCTRL]
-	cmp	r2, #0
-	beq	2f
-
-	/* Compute the address of struct vgic_cpu */
-	add	r11, vcpu, #VCPU_VGIC_CPU
-
-	/* Save all interesting registers */
-	ldr	r4, [r2, #GICH_VMCR]
-	ldr	r5, [r2, #GICH_MISR]
-	ldr	r6, [r2, #GICH_EISR0]
-	ldr	r7, [r2, #GICH_EISR1]
-	ldr	r8, [r2, #GICH_ELRSR0]
-	ldr	r9, [r2, #GICH_ELRSR1]
-	ldr	r10, [r2, #GICH_APR]
-ARM_BE8(rev	r4, r4	)
-ARM_BE8(rev	r5, r5	)
-ARM_BE8(rev	r6, r6	)
-ARM_BE8(rev	r7, r7	)
-ARM_BE8(rev	r8, r8	)
-ARM_BE8(rev	r9, r9	)
-ARM_BE8(rev	r10, r10	)
-
-	str	r4, [r11, #VGIC_V2_CPU_VMCR]
-	str	r5, [r11, #VGIC_V2_CPU_MISR]
-#ifdef CONFIG_CPU_ENDIAN_BE8
-	str	r6, [r11, #(VGIC_V2_CPU_EISR + 4)]
-	str	r7, [r11, #VGIC_V2_CPU_EISR]
-	str	r8, [r11, #(VGIC_V2_CPU_ELRSR + 4)]
-	str	r9, [r11, #VGIC_V2_CPU_ELRSR]
-#else
-	str	r6, [r11, #VGIC_V2_CPU_EISR]
-	str	r7, [r11, #(VGIC_V2_CPU_EISR + 4)]
-	str	r8, [r11, #VGIC_V2_CPU_ELRSR]
-	str	r9, [r11, #(VGIC_V2_CPU_ELRSR + 4)]
-#endif
-	str	r10, [r11, #VGIC_V2_CPU_APR]
-
-	/* Clear GICH_HCR */
-	mov	r5, #0
-	str	r5, [r2, #GICH_HCR]
-
-	/* Save list registers */
-	add	r2, r2, #GICH_LR0
-	add	r3, r11, #VGIC_V2_CPU_LR
-	ldr	r4, [r11, #VGIC_CPU_NR_LR]
-1:	ldr	r6, [r2], #4
-ARM_BE8(rev	r6, r6	)
-	str	r6, [r3], #4
-	subs	r4, r4, #1
-	bne	1b
-2:
-.endm
-
-/*
- * Restore the VGIC CPU state from memory
- *
- * Assumes vcpu pointer in vcpu reg
- */
-.macro restore_vgic_state
-	/* Get VGIC VCTRL base into r2 */
-	ldr	r2, [vcpu, #VCPU_KVM]
-	ldr	r2, [r2, #KVM_VGIC_VCTRL]
-	cmp	r2, #0
-	beq	2f
-
-	/* Compute the address of struct vgic_cpu */
-	add	r11, vcpu, #VCPU_VGIC_CPU
-
-	/* We only restore a minimal set of registers */
-	ldr	r3, [r11, #VGIC_V2_CPU_HCR]
-	ldr	r4, [r11, #VGIC_V2_CPU_VMCR]
-	ldr	r8, [r11, #VGIC_V2_CPU_APR]
-ARM_BE8(rev	r3, r3	)
-ARM_BE8(rev	r4, r4	)
-ARM_BE8(rev	r8, r8	)
-
-	str	r3, [r2, #GICH_HCR]
-	str	r4, [r2, #GICH_VMCR]
-	str	r8, [r2, #GICH_APR]
-
-	/* Restore list registers */
-	add	r2, r2, #GICH_LR0
-	add	r3, r11, #VGIC_V2_CPU_LR
-	ldr	r4, [r11, #VGIC_CPU_NR_LR]
-1:	ldr	r6, [r3], #4
-ARM_BE8(rev	r6, r6  )
-	str	r6, [r2], #4
-	subs	r4, r4, #1
-	bne	1b
-2:
-.endm
-
-#define CNTHCTL_PL1PCTEN	(1 << 0)
-#define CNTHCTL_PL1PCEN		(1 << 1)
-
-/*
- * Save the timer state onto the VCPU and allow physical timer/counter access
- * for the host.
- *
- * Assumes vcpu pointer in vcpu reg
- * Clobbers r2-r5
- */
-.macro save_timer_state
-	ldr	r4, [vcpu, #VCPU_KVM]
-	ldr	r2, [r4, #KVM_TIMER_ENABLED]
-	cmp	r2, #0
-	beq	1f
-
-	mrc	p15, 0, r2, c14, c3, 1	@ CNTV_CTL
-	str	r2, [vcpu, #VCPU_TIMER_CNTV_CTL]
-
-	isb
-
-	mrrc	p15, 3, rr_lo_hi(r2, r3), c14	@ CNTV_CVAL
-	ldr	r4, =VCPU_TIMER_CNTV_CVAL
-	add	r5, vcpu, r4
-	strd	r2, r3, [r5]
-
-	@ Ensure host CNTVCT == CNTPCT
-	mov	r2, #0
-	mcrr	p15, 4, r2, r2, c14	@ CNTVOFF
-
-1:
-	mov	r2, #0			@ Clear ENABLE
-	mcr	p15, 0, r2, c14, c3, 1	@ CNTV_CTL
-
-	@ Allow physical timer/counter access for the host
-	mrc	p15, 4, r2, c14, c1, 0	@ CNTHCTL
-	orr	r2, r2, #(CNTHCTL_PL1PCEN | CNTHCTL_PL1PCTEN)
-	mcr	p15, 4, r2, c14, c1, 0	@ CNTHCTL
-.endm
-
-/*
- * Load the timer state from the VCPU and deny physical timer/counter access
- * for the host.
- *
- * Assumes vcpu pointer in vcpu reg
- * Clobbers r2-r5
- */
-.macro restore_timer_state
-	@ Disallow physical timer access for the guest
-	@ Physical counter access is allowed
-	mrc	p15, 4, r2, c14, c1, 0	@ CNTHCTL
-	orr	r2, r2, #CNTHCTL_PL1PCTEN
-	bic	r2, r2, #CNTHCTL_PL1PCEN
-	mcr	p15, 4, r2, c14, c1, 0	@ CNTHCTL
-
-	ldr	r4, [vcpu, #VCPU_KVM]
-	ldr	r2, [r4, #KVM_TIMER_ENABLED]
-	cmp	r2, #0
-	beq	1f
-
-	ldr	r2, [r4, #KVM_TIMER_CNTVOFF]
-	ldr	r3, [r4, #(KVM_TIMER_CNTVOFF + 4)]
-	mcrr	p15, 4, rr_lo_hi(r2, r3), c14	@ CNTVOFF
-
-	ldr	r4, =VCPU_TIMER_CNTV_CVAL
-	add	r5, vcpu, r4
-	ldrd	r2, r3, [r5]
-	mcrr	p15, 3, rr_lo_hi(r2, r3), c14	@ CNTV_CVAL
-	isb
-
-	ldr	r2, [vcpu, #VCPU_TIMER_CNTV_CTL]
-	and	r2, r2, #3
-	mcr	p15, 0, r2, c14, c3, 1	@ CNTV_CTL
-1:
-.endm
-
-.equ vmentry,	0
-.equ vmexit,	1
-
-/* Configures the HSTR (Hyp System Trap Register) on entry/return
- * (hardware reset value is 0) */
-.macro set_hstr operation
-	mrc	p15, 4, r2, c1, c1, 3
-	ldr	r3, =HSTR_T(15)
-	.if \operation == vmentry
-	orr	r2, r2, r3		@ Trap CR{15}
-	.else
-	bic	r2, r2, r3		@ Don't trap any CRx accesses
-	.endif
-	mcr	p15, 4, r2, c1, c1, 3
-.endm
-
-/* Configures the HCPTR (Hyp Coprocessor Trap Register) on entry/return
- * (hardware reset value is 0). Keep previous value in r2.
- * An ISB is emited on vmexit/vmtrap, but executed on vmexit only if
- * VFP wasn't already enabled (always executed on vmtrap).
- * If a label is specified with vmexit, it is branched to if VFP wasn't
- * enabled.
- */
-.macro set_hcptr operation, mask, label = none
-	mrc	p15, 4, r2, c1, c1, 2
-	ldr	r3, =\mask
-	.if \operation == vmentry
-	orr	r3, r2, r3		@ Trap coproc-accesses defined in mask
-	.else
-	bic	r3, r2, r3		@ Don't trap defined coproc-accesses
-	.endif
-	mcr	p15, 4, r3, c1, c1, 2
-	.if \operation != vmentry
-	.if \operation == vmexit
-	tst	r2, #(HCPTR_TCP(10) | HCPTR_TCP(11))
-	beq	1f
-	.endif
-	isb
-	.if \label != none
-	b	\label
-	.endif
-1:
-	.endif
-.endm
-
-/* Configures the HDCR (Hyp Debug Configuration Register) on entry/return
- * (hardware reset value is 0) */
-.macro set_hdcr operation
-	mrc	p15, 4, r2, c1, c1, 1
-	ldr	r3, =(HDCR_TPM|HDCR_TPMCR)
-	.if \operation == vmentry
-	orr	r2, r2, r3		@ Trap some perfmon accesses
-	.else
-	bic	r2, r2, r3		@ Don't trap any perfmon accesses
-	.endif
-	mcr	p15, 4, r2, c1, c1, 1
-.endm
-
-/* Enable/Disable: stage-2 trans., trap interrupts, trap wfi, trap smc */
-.macro configure_hyp_role operation
-	.if \operation == vmentry
-	ldr	r2, [vcpu, #VCPU_HCR]
-	ldr	r3, [vcpu, #VCPU_IRQ_LINES]
-	orr	r2, r2, r3
-	.else
-	mov	r2, #0
-	.endif
-	mcr	p15, 4, r2, c1, c1, 0	@ HCR
-.endm
-
-.macro load_vcpu
-	mrc	p15, 4, vcpu, c13, c0, 2	@ HTPIDR
-.endm
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index aba61fd..58dbd5c 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -28,6 +28,7 @@
 #include <asm/kvm_mmio.h>
 #include <asm/kvm_asm.h>
 #include <asm/kvm_emulate.h>
+#include <asm/virt.h>
 
 #include "trace.h"
 
@@ -598,6 +599,9 @@
 	unsigned long start = KERN_TO_HYP((unsigned long)from);
 	unsigned long end = KERN_TO_HYP((unsigned long)to);
 
+	if (is_kernel_in_hyp_mode())
+		return 0;
+
 	start = start & PAGE_MASK;
 	end = PAGE_ALIGN(end);
 
@@ -630,6 +634,9 @@
 	unsigned long start = KERN_TO_HYP((unsigned long)from);
 	unsigned long end = KERN_TO_HYP((unsigned long)to);
 
+	if (is_kernel_in_hyp_mode())
+		return 0;
+
 	/* Check for a valid kernel IO mapping */
 	if (!is_vmalloc_addr(from) || !is_vmalloc_addr(to - 1))
 		return -EINVAL;
@@ -1431,6 +1438,22 @@
 		}
 
 		/*
+		 * Check for a cache maintenance operation. Since we
+		 * ended-up here, we know it is outside of any memory
+		 * slot. But we can't find out if that is for a device,
+		 * or if the guest is just being stupid. The only thing
+		 * we know for sure is that this range cannot be cached.
+		 *
+		 * So let's assume that the guest is just being
+		 * cautious, and skip the instruction.
+		 */
+		if (kvm_vcpu_dabt_is_cm(vcpu)) {
+			kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
+			ret = 1;
+			goto out_unlock;
+		}
+
+		/*
 		 * The IPA is reported as [MAX:12], so we need to
 		 * complement it with the bottom 12 bits from the
 		 * faulting VA. This is always 12 bits, irrespective
diff --git a/arch/arm/kvm/reset.c b/arch/arm/kvm/reset.c
index eeb8585..0048b5a 100644
--- a/arch/arm/kvm/reset.c
+++ b/arch/arm/kvm/reset.c
@@ -71,7 +71,7 @@
 	}
 
 	/* Reset core registers */
-	memcpy(&vcpu->arch.regs, reset_regs, sizeof(vcpu->arch.regs));
+	memcpy(&vcpu->arch.ctxt.gp_regs, reset_regs, sizeof(vcpu->arch.ctxt.gp_regs));
 
 	/* Reset CP15 registers */
 	kvm_reset_coprocs(vcpu);
diff --git a/arch/arm/mach-mvebu/Kconfig b/arch/arm/mach-mvebu/Kconfig
index 64e3d2c..b003e3a 100644
--- a/arch/arm/mach-mvebu/Kconfig
+++ b/arch/arm/mach-mvebu/Kconfig
@@ -3,7 +3,6 @@
 	depends on ARCH_MULTI_V7 || ARCH_MULTI_V5
 	select ARCH_SUPPORTS_BIG_ENDIAN
 	select CLKSRC_MMIO
-	select GENERIC_IRQ_CHIP
 	select PINCTRL
 	select PLAT_ORION
 	select SOC_BUS
@@ -29,6 +28,7 @@
 	bool "Marvell Armada 370 boards"
 	depends on ARCH_MULTI_V7
 	select ARMADA_370_CLK
+	select ARMADA_370_XP_IRQ
 	select CPU_PJ4B
 	select MACH_MVEBU_V7
 	select PINCTRL_ARMADA_370
@@ -39,6 +39,7 @@
 config MACH_ARMADA_375
 	bool "Marvell Armada 375 boards"
 	depends on ARCH_MULTI_V7
+	select ARMADA_370_XP_IRQ
 	select ARM_ERRATA_720789
 	select ARM_ERRATA_753970
 	select ARM_GIC
@@ -58,6 +59,7 @@
 	select ARM_ERRATA_720789
 	select ARM_ERRATA_753970
 	select ARM_GIC
+	select ARMADA_370_XP_IRQ
 	select ARMADA_38X_CLK
 	select HAVE_ARM_SCU
 	select HAVE_ARM_TWD if SMP
@@ -72,6 +74,7 @@
 	bool "Marvell Armada 39x boards"
 	depends on ARCH_MULTI_V7
 	select ARM_GIC
+	select ARMADA_370_XP_IRQ
 	select ARMADA_39X_CLK
 	select CACHE_L2X0
 	select HAVE_ARM_SCU
@@ -86,6 +89,7 @@
 config MACH_ARMADA_XP
 	bool "Marvell Armada XP boards"
 	depends on ARCH_MULTI_V7
+	select ARMADA_370_XP_IRQ
 	select ARMADA_XP_CLK
 	select CPU_PJ4B
 	select MACH_MVEBU_V7
diff --git a/arch/arm/mach-socfpga/Makefile b/arch/arm/mach-socfpga/Makefile
index b8f9e23..ed15db1 100644
--- a/arch/arm/mach-socfpga/Makefile
+++ b/arch/arm/mach-socfpga/Makefile
@@ -5,3 +5,5 @@
 obj-y					:= socfpga.o
 obj-$(CONFIG_SMP)	+= headsmp.o platsmp.o
 obj-$(CONFIG_SOCFPGA_SUSPEND)	+= pm.o self-refresh.o
+obj-$(CONFIG_EDAC_ALTERA_L2C)	+= l2_cache.o
+obj-$(CONFIG_EDAC_ALTERA_OCRAM)	+= ocram.o
diff --git a/arch/arm/mach-socfpga/core.h b/arch/arm/mach-socfpga/core.h
index 5bc6ea8..575195b 100644
--- a/arch/arm/mach-socfpga/core.h
+++ b/arch/arm/mach-socfpga/core.h
@@ -36,6 +36,8 @@
 
 extern void socfpga_init_clocks(void);
 extern void socfpga_sysmgr_init(void);
+void socfpga_init_l2_ecc(void);
+void socfpga_init_ocram_ecc(void);
 
 extern void __iomem *sys_manager_base_addr;
 extern void __iomem *rst_manager_base_addr;
diff --git a/arch/arm/mach-socfpga/l2_cache.c b/arch/arm/mach-socfpga/l2_cache.c
new file mode 100644
index 0000000..e3907ab
--- /dev/null
+++ b/arch/arm/mach-socfpga/l2_cache.c
@@ -0,0 +1,41 @@
+/*
+ * Copyright Altera Corporation (C) 2016. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/io.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+
+void socfpga_init_l2_ecc(void)
+{
+	struct device_node *np;
+	void __iomem *mapped_l2_edac_addr;
+
+	np = of_find_compatible_node(NULL, NULL, "altr,socfpga-l2-ecc");
+	if (!np) {
+		pr_err("Unable to find socfpga-l2-ecc in dtb\n");
+		return;
+	}
+
+	mapped_l2_edac_addr = of_iomap(np, 0);
+	of_node_put(np);
+	if (!mapped_l2_edac_addr) {
+		pr_err("Unable to find L2 ECC mapping in dtb\n");
+		return;
+	}
+
+	/* Enable ECC */
+	writel(0x01, mapped_l2_edac_addr);
+	iounmap(mapped_l2_edac_addr);
+}
diff --git a/arch/arm/mach-socfpga/ocram.c b/arch/arm/mach-socfpga/ocram.c
new file mode 100644
index 0000000..60ec643
--- /dev/null
+++ b/arch/arm/mach-socfpga/ocram.c
@@ -0,0 +1,49 @@
+/*
+ * Copyright Altera Corporation (C) 2016. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/io.h>
+#include <linux/genalloc.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+
+#define ALTR_OCRAM_CLEAR_ECC          0x00000018
+#define ALTR_OCRAM_ECC_EN             0x00000019
+
+void socfpga_init_ocram_ecc(void)
+{
+	struct device_node *np;
+	void __iomem *mapped_ocr_edac_addr;
+
+	/* Find the OCRAM EDAC device tree node */
+	np = of_find_compatible_node(NULL, NULL, "altr,socfpga-ocram-ecc");
+	if (!np) {
+		pr_err("Unable to find socfpga-ocram-ecc\n");
+		return;
+	}
+
+	mapped_ocr_edac_addr = of_iomap(np, 0);
+	of_node_put(np);
+	if (!mapped_ocr_edac_addr) {
+		pr_err("Unable to map OCRAM ecc regs.\n");
+		return;
+	}
+
+	/* Clear any pending OCRAM ECC interrupts, then enable ECC */
+	writel(ALTR_OCRAM_CLEAR_ECC, mapped_ocr_edac_addr);
+	writel(ALTR_OCRAM_ECC_EN, mapped_ocr_edac_addr);
+
+	iounmap(mapped_ocr_edac_addr);
+}
diff --git a/arch/arm/mach-socfpga/socfpga.c b/arch/arm/mach-socfpga/socfpga.c
index a1c0efa..7e0aad2 100644
--- a/arch/arm/mach-socfpga/socfpga.c
+++ b/arch/arm/mach-socfpga/socfpga.c
@@ -59,6 +59,11 @@
 {
 	irqchip_init();
 	socfpga_sysmgr_init();
+	if (IS_ENABLED(CONFIG_EDAC_ALTERA_L2C))
+		socfpga_init_l2_ecc();
+
+	if (IS_ENABLED(CONFIG_EDAC_ALTERA_OCRAM))
+		socfpga_init_ocram_ecc();
 }
 
 static void socfpga_cyclone5_restart(enum reboot_mode mode, const char *cmd)
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 8cc6228..cf118d9 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -750,6 +750,19 @@
 	  not support these instructions and requires the kernel to be
 	  built with binutils >= 2.25.
 
+config ARM64_VHE
+	bool "Enable support for Virtualization Host Extensions (VHE)"
+	default y
+	help
+	  Virtualization Host Extensions (VHE) allow the kernel to run
+	  directly at EL2 (instead of EL1) on processors that support
+	  it. This leads to better performance for KVM, as they reduce
+	  the cost of the world switch.
+
+	  Selecting this option allows the VHE feature to be detected
+	  at runtime, and does not affect processors that do not
+	  implement this feature.
+
 endmenu
 
 endmenu
diff --git a/arch/arm64/boot/dts/apm/apm-storm.dtsi b/arch/arm64/boot/dts/apm/apm-storm.dtsi
index fe30f76..3e40bd4 100644
--- a/arch/arm64/boot/dts/apm/apm-storm.dtsi
+++ b/arch/arm64/boot/dts/apm/apm-storm.dtsi
@@ -493,6 +493,11 @@
 			reg = <0x0 0x1054a000 0x0 0x20>;
 		};
 
+		rb: rb@7e000000 {
+			compatible = "apm,xgene-rb", "syscon";
+			reg = <0x0 0x7e000000 0x0 0x10>;
+		};
+
 		edac@78800000 {
 			compatible = "apm,xgene-edac";
 			#address-cells = <2>;
@@ -502,6 +507,7 @@
 			regmap-mcba = <&mcba>;
 			regmap-mcbb = <&mcbb>;
 			regmap-efuse = <&efuse>;
+			regmap-rb = <&rb>;
 			reg = <0x0 0x78800000 0x0 0x100>;
 			interrupts = <0x0 0x20 0x4>,
 				     <0x0 0x21 0x4>,
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 8f271b8..a5c769b 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -30,8 +30,12 @@
 #define ARM64_HAS_LSE_ATOMICS			5
 #define ARM64_WORKAROUND_CAVIUM_23154		6
 #define ARM64_WORKAROUND_834220			7
+/* #define ARM64_HAS_NO_HW_PREFETCH		8 */
+/* #define ARM64_HAS_UAO			9 */
+/* #define ARM64_ALT_PAN_NOT_UAO		10 */
+#define ARM64_HAS_VIRT_HOST_EXTN		11
 
-#define ARM64_NCAPS				8
+#define ARM64_NCAPS				12
 
 #ifndef __ASSEMBLY__
 
diff --git a/arch/arm64/include/asm/hw_breakpoint.h b/arch/arm64/include/asm/hw_breakpoint.h
index 9732908..115ea2a 100644
--- a/arch/arm64/include/asm/hw_breakpoint.h
+++ b/arch/arm64/include/asm/hw_breakpoint.h
@@ -18,6 +18,7 @@
 
 #include <asm/cputype.h>
 #include <asm/cpufeature.h>
+#include <asm/virt.h>
 
 #ifdef __KERNEL__
 
@@ -35,10 +36,21 @@
 	struct arch_hw_breakpoint_ctrl ctrl;
 };
 
+/* Privilege Levels */
+#define AARCH64_BREAKPOINT_EL1	1
+#define AARCH64_BREAKPOINT_EL0	2
+
+#define DBG_HMC_HYP		(1 << 13)
+
 static inline u32 encode_ctrl_reg(struct arch_hw_breakpoint_ctrl ctrl)
 {
-	return (ctrl.len << 5) | (ctrl.type << 3) | (ctrl.privilege << 1) |
+	u32 val = (ctrl.len << 5) | (ctrl.type << 3) | (ctrl.privilege << 1) |
 		ctrl.enabled;
+
+	if (is_kernel_in_hyp_mode() && ctrl.privilege == AARCH64_BREAKPOINT_EL1)
+		val |= DBG_HMC_HYP;
+
+	return val;
 }
 
 static inline void decode_ctrl_reg(u32 reg,
@@ -61,10 +73,6 @@
 #define ARM_BREAKPOINT_STORE	2
 #define AARCH64_ESR_ACCESS_MASK	(1 << 6)
 
-/* Privilege Levels */
-#define AARCH64_BREAKPOINT_EL1	1
-#define AARCH64_BREAKPOINT_EL0	2
-
 /* Lengths */
 #define ARM_BREAKPOINT_LEN_1	0x1
 #define ARM_BREAKPOINT_LEN_2	0x3
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index d201d4b..b56a0a8 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -23,6 +23,7 @@
 #include <asm/types.h>
 
 /* Hyp Configuration Register (HCR) bits */
+#define HCR_E2H		(UL(1) << 34)
 #define HCR_ID		(UL(1) << 33)
 #define HCR_CD		(UL(1) << 32)
 #define HCR_RW_SHIFT	31
@@ -81,7 +82,7 @@
 			 HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW)
 #define HCR_VIRT_EXCP_MASK (HCR_VA | HCR_VI | HCR_VF)
 #define HCR_INT_OVERRIDE   (HCR_FMO | HCR_IMO)
-
+#define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H)
 
 /* Hyp System Control Register (SCTLR_EL2) bits */
 #define SCTLR_EL2_EE	(1 << 25)
@@ -216,4 +217,7 @@
 	ECN(SOFTSTP_CUR), ECN(WATCHPT_LOW), ECN(WATCHPT_CUR), \
 	ECN(BKPT32), ECN(VECTOR32), ECN(BRK64)
 
+#define CPACR_EL1_FPEN		(3 << 20)
+#define CPACR_EL1_TTA		(1 << 28)
+
 #endif /* __ARM64_KVM_ARM_H__ */
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index 52b777b..2d02ba6 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -35,9 +35,6 @@
 
 extern char __kvm_hyp_vector[];
 
-#define	__kvm_hyp_code_start	__hyp_text_start
-#define	__kvm_hyp_code_end	__hyp_text_end
-
 extern void __kvm_flush_vm_context(void);
 extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
 extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
@@ -45,9 +42,12 @@
 extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
 
 extern u64 __vgic_v3_get_ich_vtr_el2(void);
+extern void __vgic_v3_init_lrs(void);
 
 extern u32 __kvm_get_mdcr_el2(void);
 
+extern void __init_stage2_translation(void);
+
 #endif
 
 #endif /* __ARM_KVM_ASM_H__ */
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 779a587..40bc168 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -29,6 +29,7 @@
 #include <asm/kvm_mmio.h>
 #include <asm/ptrace.h>
 #include <asm/cputype.h>
+#include <asm/virt.h>
 
 unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num);
 unsigned long *vcpu_spsr32(const struct kvm_vcpu *vcpu);
@@ -43,6 +44,8 @@
 static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
 {
 	vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
+	if (is_kernel_in_hyp_mode())
+		vcpu->arch.hcr_el2 |= HCR_E2H;
 	if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features))
 		vcpu->arch.hcr_el2 &= ~HCR_RW;
 }
@@ -189,6 +192,11 @@
 	return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW);
 }
 
+static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
+{
+	return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_CM);
+}
+
 static inline int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
 {
 	return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 689d4c9..71fa6fe 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -25,7 +25,9 @@
 #include <linux/types.h>
 #include <linux/kvm_types.h>
 #include <asm/kvm.h>
+#include <asm/kvm_asm.h>
 #include <asm/kvm_mmio.h>
+#include <asm/kvm_perf_event.h>
 
 #define __KVM_HAVE_ARCH_INTC_INITIALIZED
 
@@ -36,10 +38,11 @@
 
 #include <kvm/arm_vgic.h>
 #include <kvm/arm_arch_timer.h>
+#include <kvm/arm_pmu.h>
 
 #define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS
 
-#define KVM_VCPU_MAX_FEATURES 3
+#define KVM_VCPU_MAX_FEATURES 4
 
 int __attribute_const__ kvm_target_cpu(void);
 int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
@@ -114,6 +117,21 @@
 	MDSCR_EL1,	/* Monitor Debug System Control Register */
 	MDCCINT_EL1,	/* Monitor Debug Comms Channel Interrupt Enable Reg */
 
+	/* Performance Monitors Registers */
+	PMCR_EL0,	/* Control Register */
+	PMSELR_EL0,	/* Event Counter Selection Register */
+	PMEVCNTR0_EL0,	/* Event Counter Register (0-30) */
+	PMEVCNTR30_EL0 = PMEVCNTR0_EL0 + 30,
+	PMCCNTR_EL0,	/* Cycle Counter Register */
+	PMEVTYPER0_EL0,	/* Event Type Register (0-30) */
+	PMEVTYPER30_EL0 = PMEVTYPER0_EL0 + 30,
+	PMCCFILTR_EL0,	/* Cycle Count Filter Register */
+	PMCNTENSET_EL0,	/* Count Enable Set Register */
+	PMINTENSET_EL1,	/* Interrupt Enable Set Register */
+	PMOVSSET_EL0,	/* Overflow Flag Status Set Register */
+	PMSWINC_EL0,	/* Software Increment Register */
+	PMUSERENR_EL0,	/* User Enable Register */
+
 	/* 32bit specific registers. Keep them at the end of the range */
 	DACR32_EL2,	/* Domain Access Control Register */
 	IFSR32_EL2,	/* Instruction Fault Status Register */
@@ -211,6 +229,7 @@
 	/* VGIC state */
 	struct vgic_cpu vgic_cpu;
 	struct arch_timer_cpu timer_cpu;
+	struct kvm_pmu pmu;
 
 	/*
 	 * Anything that is not used directly from assembly code goes
@@ -342,5 +361,18 @@
 void kvm_arm_setup_debug(struct kvm_vcpu *vcpu);
 void kvm_arm_clear_debug(struct kvm_vcpu *vcpu);
 void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu);
+int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
+			       struct kvm_device_attr *attr);
+int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
+			       struct kvm_device_attr *attr);
+int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
+			       struct kvm_device_attr *attr);
+
+/* #define kvm_call_hyp(f, ...) __kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__) */
+
+static inline void __cpu_init_stage2(void)
+{
+	kvm_call_hyp(__init_stage2_translation);
+}
 
 #endif /* __ARM64_KVM_HOST_H__ */
diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h
new file mode 100644
index 0000000..a46b019
--- /dev/null
+++ b/arch/arm64/include/asm/kvm_hyp.h
@@ -0,0 +1,181 @@
+/*
+ * Copyright (C) 2015 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ARM64_KVM_HYP_H__
+#define __ARM64_KVM_HYP_H__
+
+#include <linux/compiler.h>
+#include <linux/kvm_host.h>
+#include <asm/kvm_mmu.h>
+#include <asm/kvm_perf_event.h>
+#include <asm/sysreg.h>
+
+#define __hyp_text __section(.hyp.text) notrace
+
+static inline unsigned long __kern_hyp_va(unsigned long v)
+{
+	asm volatile(ALTERNATIVE("and %0, %0, %1",
+				 "nop",
+				 ARM64_HAS_VIRT_HOST_EXTN)
+		     : "+r" (v) : "i" (HYP_PAGE_OFFSET_MASK));
+	return v;
+}
+
+#define kern_hyp_va(v) (typeof(v))(__kern_hyp_va((unsigned long)(v)))
+
+static inline unsigned long __hyp_kern_va(unsigned long v)
+{
+	u64 offset = PAGE_OFFSET - HYP_PAGE_OFFSET;
+	asm volatile(ALTERNATIVE("add %0, %0, %1",
+				 "nop",
+				 ARM64_HAS_VIRT_HOST_EXTN)
+		     : "+r" (v) : "r" (offset));
+	return v;
+}
+
+#define hyp_kern_va(v) (typeof(v))(__hyp_kern_va((unsigned long)(v)))
+
+#define read_sysreg_elx(r,nvh,vh)					\
+	({								\
+		u64 reg;						\
+		asm volatile(ALTERNATIVE("mrs %0, " __stringify(r##nvh),\
+					 "mrs_s %0, " __stringify(r##vh),\
+					 ARM64_HAS_VIRT_HOST_EXTN)	\
+			     : "=r" (reg));				\
+		reg;							\
+	})
+
+#define write_sysreg_elx(v,r,nvh,vh)					\
+	do {								\
+		u64 __val = (u64)(v);					\
+		asm volatile(ALTERNATIVE("msr " __stringify(r##nvh) ", %x0",\
+					 "msr_s " __stringify(r##vh) ", %x0",\
+					 ARM64_HAS_VIRT_HOST_EXTN)	\
+					 : : "rZ" (__val));		\
+	} while (0)
+
+/*
+ * Unified accessors for registers that have a different encoding
+ * between VHE and non-VHE. They must be specified without their "ELx"
+ * encoding.
+ */
+#define read_sysreg_el2(r)						\
+	({								\
+		u64 reg;						\
+		asm volatile(ALTERNATIVE("mrs %0, " __stringify(r##_EL2),\
+					 "mrs %0, " __stringify(r##_EL1),\
+					 ARM64_HAS_VIRT_HOST_EXTN)	\
+			     : "=r" (reg));				\
+		reg;							\
+	})
+
+#define write_sysreg_el2(v,r)						\
+	do {								\
+		u64 __val = (u64)(v);					\
+		asm volatile(ALTERNATIVE("msr " __stringify(r##_EL2) ", %x0",\
+					 "msr " __stringify(r##_EL1) ", %x0",\
+					 ARM64_HAS_VIRT_HOST_EXTN)	\
+					 : : "rZ" (__val));		\
+	} while (0)
+
+#define read_sysreg_el0(r)	read_sysreg_elx(r, _EL0, _EL02)
+#define write_sysreg_el0(v,r)	write_sysreg_elx(v, r, _EL0, _EL02)
+#define read_sysreg_el1(r)	read_sysreg_elx(r, _EL1, _EL12)
+#define write_sysreg_el1(v,r)	write_sysreg_elx(v, r, _EL1, _EL12)
+
+/* The VHE specific system registers and their encoding */
+#define sctlr_EL12              sys_reg(3, 5, 1, 0, 0)
+#define cpacr_EL12              sys_reg(3, 5, 1, 0, 2)
+#define ttbr0_EL12              sys_reg(3, 5, 2, 0, 0)
+#define ttbr1_EL12              sys_reg(3, 5, 2, 0, 1)
+#define tcr_EL12                sys_reg(3, 5, 2, 0, 2)
+#define afsr0_EL12              sys_reg(3, 5, 5, 1, 0)
+#define afsr1_EL12              sys_reg(3, 5, 5, 1, 1)
+#define esr_EL12                sys_reg(3, 5, 5, 2, 0)
+#define far_EL12                sys_reg(3, 5, 6, 0, 0)
+#define mair_EL12               sys_reg(3, 5, 10, 2, 0)
+#define amair_EL12              sys_reg(3, 5, 10, 3, 0)
+#define vbar_EL12               sys_reg(3, 5, 12, 0, 0)
+#define contextidr_EL12         sys_reg(3, 5, 13, 0, 1)
+#define cntkctl_EL12            sys_reg(3, 5, 14, 1, 0)
+#define cntp_tval_EL02          sys_reg(3, 5, 14, 2, 0)
+#define cntp_ctl_EL02           sys_reg(3, 5, 14, 2, 1)
+#define cntp_cval_EL02          sys_reg(3, 5, 14, 2, 2)
+#define cntv_tval_EL02          sys_reg(3, 5, 14, 3, 0)
+#define cntv_ctl_EL02           sys_reg(3, 5, 14, 3, 1)
+#define cntv_cval_EL02          sys_reg(3, 5, 14, 3, 2)
+#define spsr_EL12               sys_reg(3, 5, 4, 0, 0)
+#define elr_EL12                sys_reg(3, 5, 4, 0, 1)
+
+/**
+ * hyp_alternate_select - Generates patchable code sequences that are
+ * used to switch between two implementations of a function, depending
+ * on the availability of a feature.
+ *
+ * @fname: a symbol name that will be defined as a function returning a
+ * function pointer whose type will match @orig and @alt
+ * @orig: A pointer to the default function, as returned by @fname when
+ * @cond doesn't hold
+ * @alt: A pointer to the alternate function, as returned by @fname
+ * when @cond holds
+ * @cond: a CPU feature (as described in asm/cpufeature.h)
+ */
+#define hyp_alternate_select(fname, orig, alt, cond)			\
+typeof(orig) * __hyp_text fname(void)					\
+{									\
+	typeof(alt) *val = orig;					\
+	asm volatile(ALTERNATIVE("nop		\n",			\
+				 "mov	%0, %1	\n",			\
+				 cond)					\
+		     : "+r" (val) : "r" (alt));				\
+	return val;							\
+}
+
+void __vgic_v2_save_state(struct kvm_vcpu *vcpu);
+void __vgic_v2_restore_state(struct kvm_vcpu *vcpu);
+
+void __vgic_v3_save_state(struct kvm_vcpu *vcpu);
+void __vgic_v3_restore_state(struct kvm_vcpu *vcpu);
+
+void __timer_save_state(struct kvm_vcpu *vcpu);
+void __timer_restore_state(struct kvm_vcpu *vcpu);
+
+void __sysreg_save_host_state(struct kvm_cpu_context *ctxt);
+void __sysreg_restore_host_state(struct kvm_cpu_context *ctxt);
+void __sysreg_save_guest_state(struct kvm_cpu_context *ctxt);
+void __sysreg_restore_guest_state(struct kvm_cpu_context *ctxt);
+void __sysreg32_save_state(struct kvm_vcpu *vcpu);
+void __sysreg32_restore_state(struct kvm_vcpu *vcpu);
+
+void __debug_save_state(struct kvm_vcpu *vcpu,
+			struct kvm_guest_debug_arch *dbg,
+			struct kvm_cpu_context *ctxt);
+void __debug_restore_state(struct kvm_vcpu *vcpu,
+			   struct kvm_guest_debug_arch *dbg,
+			   struct kvm_cpu_context *ctxt);
+void __debug_cond_save_host_state(struct kvm_vcpu *vcpu);
+void __debug_cond_restore_host_state(struct kvm_vcpu *vcpu);
+
+void __fpsimd_save_state(struct user_fpsimd_state *fp_regs);
+void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs);
+bool __fpsimd_enabled(void);
+
+u64 __guest_enter(struct kvm_vcpu *vcpu, struct kvm_cpu_context *host_ctxt);
+void __noreturn __hyp_do_panic(unsigned long, ...);
+
+#endif /* __ARM64_KVM_HYP_H__ */
+
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 7364339..9a9318a 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -23,13 +23,16 @@
 #include <asm/cpufeature.h>
 
 /*
- * As we only have the TTBR0_EL2 register, we cannot express
+ * As ARMv8.0 only has the TTBR0_EL2 register, we cannot express
  * "negative" addresses. This makes it impossible to directly share
  * mappings with the kernel.
  *
  * Instead, give the HYP mode its own VA region at a fixed offset from
  * the kernel by just masking the top bits (which are all ones for a
  * kernel address).
+ *
+ * ARMv8.1 (using VHE) does have a TTBR1_EL2, and doesn't use these
+ * macros (the entire kernel runs at EL2).
  */
 #define HYP_PAGE_OFFSET_SHIFT	VA_BITS
 #define HYP_PAGE_OFFSET_MASK	((UL(1) << HYP_PAGE_OFFSET_SHIFT) - 1)
@@ -56,12 +59,19 @@
 
 #ifdef __ASSEMBLY__
 
+#include <asm/alternative.h>
+#include <asm/cpufeature.h>
+
 /*
  * Convert a kernel VA into a HYP VA.
  * reg: VA to be converted.
  */
 .macro kern_hyp_va	reg
+alternative_if_not ARM64_HAS_VIRT_HOST_EXTN	
 	and	\reg, \reg, #HYP_PAGE_OFFSET_MASK
+alternative_else
+	nop
+alternative_endif
 .endm
 
 #else
diff --git a/arch/arm64/include/asm/kvm_perf_event.h b/arch/arm64/include/asm/kvm_perf_event.h
new file mode 100644
index 0000000..c18fdeb
--- /dev/null
+++ b/arch/arm64/include/asm/kvm_perf_event.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2012 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ASM_KVM_PERF_EVENT_H
+#define __ASM_KVM_PERF_EVENT_H
+
+#define	ARMV8_PMU_MAX_COUNTERS	32
+#define	ARMV8_PMU_COUNTER_MASK	(ARMV8_PMU_MAX_COUNTERS - 1)
+
+/*
+ * Per-CPU PMCR: config reg
+ */
+#define ARMV8_PMU_PMCR_E	(1 << 0) /* Enable all counters */
+#define ARMV8_PMU_PMCR_P	(1 << 1) /* Reset all counters */
+#define ARMV8_PMU_PMCR_C	(1 << 2) /* Cycle counter reset */
+#define ARMV8_PMU_PMCR_D	(1 << 3) /* CCNT counts every 64th cpu cycle */
+#define ARMV8_PMU_PMCR_X	(1 << 4) /* Export to ETM */
+#define ARMV8_PMU_PMCR_DP	(1 << 5) /* Disable CCNT if non-invasive debug*/
+/* Determines which bit of PMCCNTR_EL0 generates an overflow */
+#define ARMV8_PMU_PMCR_LC	(1 << 6)
+#define	ARMV8_PMU_PMCR_N_SHIFT	11	 /* Number of counters supported */
+#define	ARMV8_PMU_PMCR_N_MASK	0x1f
+#define	ARMV8_PMU_PMCR_MASK	0x7f	 /* Mask for writable bits */
+
+/*
+ * PMOVSR: counters overflow flag status reg
+ */
+#define	ARMV8_PMU_OVSR_MASK		0xffffffff	/* Mask for writable bits */
+#define	ARMV8_PMU_OVERFLOWED_MASK	ARMV8_PMU_OVSR_MASK
+
+/*
+ * PMXEVTYPER: Event selection reg
+ */
+#define	ARMV8_PMU_EVTYPE_MASK	0xc80003ff	/* Mask for writable bits */
+#define	ARMV8_PMU_EVTYPE_EVENT	0x3ff		/* Mask for EVENT bits */
+
+#define ARMV8_PMU_EVTYPE_EVENT_SW_INCR	0	/* Software increment event */
+
+/*
+ * Event filters for PMUv3
+ */
+#define	ARMV8_PMU_EXCLUDE_EL1	(1 << 31)
+#define	ARMV8_PMU_EXCLUDE_EL0	(1 << 30)
+#define	ARMV8_PMU_INCLUDE_EL2	(1 << 27)
+
+/*
+ * PMUSERENR: user enable reg
+ */
+#define ARMV8_PMU_USERENR_MASK	0xf		/* Mask for writable bits */
+#define ARMV8_PMU_USERENR_EN	(1 << 0) /* PMU regs can be accessed at EL0 */
+#define ARMV8_PMU_USERENR_SW	(1 << 1) /* PMSWINC can be written at EL0 */
+#define ARMV8_PMU_USERENR_CR	(1 << 2) /* Cycle counter can be read at EL0 */
+#define ARMV8_PMU_USERENR_ER	(1 << 3) /* Event counter can be read at EL0 */
+
+#endif
diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h
index 7a5df52..9f22dd6 100644
--- a/arch/arm64/include/asm/virt.h
+++ b/arch/arm64/include/asm/virt.h
@@ -23,6 +23,8 @@
 
 #ifndef __ASSEMBLY__
 
+#include <asm/ptrace.h>
+
 /*
  * __boot_cpu_mode records what mode CPUs were booted in.
  * A correctly-implemented bootloader must start all CPUs in the same mode:
@@ -50,6 +52,14 @@
 	return __boot_cpu_mode[0] != __boot_cpu_mode[1];
 }
 
+static inline bool is_kernel_in_hyp_mode(void)
+{
+	u64 el;
+
+	asm("mrs %0, CurrentEL" : "=r" (el));
+	return el == CurrentEL_EL2;
+}
+
 /* The section containing the hypervisor text */
 extern char __hyp_text_start[];
 extern char __hyp_text_end[];
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
index 2d4ca4b..f209ea1 100644
--- a/arch/arm64/include/uapi/asm/kvm.h
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -94,6 +94,7 @@
 #define KVM_ARM_VCPU_POWER_OFF		0 /* CPU is started in OFF state */
 #define KVM_ARM_VCPU_EL1_32BIT		1 /* CPU running a 32bit VM */
 #define KVM_ARM_VCPU_PSCI_0_2		2 /* CPU uses PSCI v0.2 */
+#define KVM_ARM_VCPU_PMU_V3		3 /* Support guest PMUv3 */
 
 struct kvm_vcpu_init {
 	__u32 target;
@@ -204,6 +205,11 @@
 #define KVM_DEV_ARM_VGIC_GRP_CTRL	4
 #define   KVM_DEV_ARM_VGIC_CTRL_INIT	0
 
+/* Device Control API on vcpu fd */
+#define KVM_ARM_VCPU_PMU_V3_CTRL	0
+#define   KVM_ARM_VCPU_PMU_V3_IRQ	0
+#define   KVM_ARM_VCPU_PMU_V3_INIT	1
+
 /* KVM_IRQ_LINE irq field index values */
 #define KVM_ARM_IRQ_TYPE_SHIFT		24
 #define KVM_ARM_IRQ_TYPE_MASK		0xff
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index fffa4ac6..b0ab4e9 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -110,9 +110,6 @@
   DEFINE(CPU_USER_PT_REGS,	offsetof(struct kvm_regs, regs));
   DEFINE(CPU_FP_REGS,		offsetof(struct kvm_regs, fp_regs));
   DEFINE(VCPU_FPEXC32_EL2,	offsetof(struct kvm_vcpu, arch.ctxt.sys_regs[FPEXC32_EL2]));
-  DEFINE(VCPU_ESR_EL2,		offsetof(struct kvm_vcpu, arch.fault.esr_el2));
-  DEFINE(VCPU_FAR_EL2,		offsetof(struct kvm_vcpu, arch.fault.far_el2));
-  DEFINE(VCPU_HPFAR_EL2,	offsetof(struct kvm_vcpu, arch.fault.hpfar_el2));
   DEFINE(VCPU_HOST_CONTEXT,	offsetof(struct kvm_vcpu, arch.host_cpu_context));
 #endif
 #ifdef CONFIG_CPU_PM
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 5c90aa4..ba74519 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -26,6 +26,7 @@
 #include <asm/cpu_ops.h>
 #include <asm/processor.h>
 #include <asm/sysreg.h>
+#include <asm/virt.h>
 
 unsigned long elf_hwcap __read_mostly;
 EXPORT_SYMBOL_GPL(elf_hwcap);
@@ -621,6 +622,11 @@
 	return has_sre;
 }
 
+static bool runs_at_el2(const struct arm64_cpu_capabilities *entry)
+{
+	return is_kernel_in_hyp_mode();
+}
+
 static const struct arm64_cpu_capabilities arm64_features[] = {
 	{
 		.desc = "GIC system register CPU interface",
@@ -651,6 +657,11 @@
 		.min_field_value = 2,
 	},
 #endif /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */
+	{
+		.desc = "Virtualization Host Extensions",
+		.capability = ARM64_HAS_VIRT_HOST_EXTN,
+		.matches = runs_at_el2,
+	},
 	{},
 };
 
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 917d981..6f2f377 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -30,6 +30,7 @@
 #include <asm/cache.h>
 #include <asm/cputype.h>
 #include <asm/kernel-pgtable.h>
+#include <asm/kvm_arm.h>
 #include <asm/memory.h>
 #include <asm/pgtable-hwdef.h>
 #include <asm/pgtable.h>
@@ -464,9 +465,27 @@
 	isb
 	ret
 
+2:
+#ifdef CONFIG_ARM64_VHE
+	/*
+	 * Check for VHE being present. For the rest of the EL2 setup,
+	 * x2 being non-zero indicates that we do have VHE, and that the
+	 * kernel is intended to run at EL2.
+	 */
+	mrs	x2, id_aa64mmfr1_el1
+	ubfx	x2, x2, #8, #4
+#else
+	mov	x2, xzr
+#endif
+
 	/* Hyp configuration. */
-2:	mov	x0, #(1 << 31)			// 64-bit EL1
+	mov	x0, #HCR_RW			// 64-bit EL1
+	cbz	x2, set_hcr
+	orr	x0, x0, #HCR_TGE		// Enable Host Extensions
+	orr	x0, x0, #HCR_E2H
+set_hcr:
 	msr	hcr_el2, x0
+	isb
 
 	/* Generic timers. */
 	mrs	x0, cnthctl_el2
@@ -526,6 +545,13 @@
 	/* Stage-2 translation */
 	msr	vttbr_el2, xzr
 
+	cbz	x2, install_el2_stub
+
+	mov	w20, #BOOT_CPU_MODE_EL2		// This CPU booted in EL2
+	isb
+	ret
+
+install_el2_stub:
 	/* Hypervisor stub */
 	adrp	x0, __hyp_stub_vectors
 	add	x0, x0, #:lo12:__hyp_stub_vectors
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index f7ab14c..1b52269 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -20,6 +20,7 @@
  */
 
 #include <asm/irq_regs.h>
+#include <asm/virt.h>
 
 #include <linux/of.h>
 #include <linux/perf/arm_pmu.h>
@@ -691,9 +692,12 @@
 
 	if (attr->exclude_idle)
 		return -EPERM;
+	if (is_kernel_in_hyp_mode() &&
+	    attr->exclude_kernel != attr->exclude_hv)
+		return -EINVAL;
 	if (attr->exclude_user)
 		config_base |= ARMV8_EXCLUDE_EL0;
-	if (attr->exclude_kernel)
+	if (!is_kernel_in_hyp_mode() && attr->exclude_kernel)
 		config_base |= ARMV8_EXCLUDE_EL1;
 	if (!attr->exclude_hv)
 		config_base |= ARMV8_INCLUDE_EL2;
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index b1adc51..4607657 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -195,7 +195,7 @@
 	/*
 	 * OK, it's off to the idle thread for us
 	 */
-	cpu_startup_entry(CPUHP_ONLINE);
+	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index a5272c0..de7450d 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -36,6 +36,7 @@
 	select HAVE_KVM_EVENTFD
 	select HAVE_KVM_IRQFD
 	select KVM_ARM_VGIC_V3
+	select KVM_ARM_PMU if HW_PERF_EVENTS
 	---help---
 	  Support hosting virtualized guest machines.
 	  We don't support KVM with 16K page tables yet, due to the multiple
@@ -48,6 +49,12 @@
 	---help---
 	  Provides host support for ARM processors.
 
+config KVM_ARM_PMU
+	bool
+	---help---
+	  Adds support for a virtual Performance Monitoring Unit (PMU) in
+	  virtual machines.
+
 source drivers/vhost/Kconfig
 
 endif # VIRTUALIZATION
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
index caee9ee..122cff4 100644
--- a/arch/arm64/kvm/Makefile
+++ b/arch/arm64/kvm/Makefile
@@ -26,3 +26,4 @@
 kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v3.o
 kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v3-emul.o
 kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/arch_timer.o
+kvm-$(CONFIG_KVM_ARM_PMU) += $(KVM)/arm/pmu.o
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index 9e54ad7..32fad75 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -380,3 +380,54 @@
 	}
 	return 0;
 }
+
+int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
+			       struct kvm_device_attr *attr)
+{
+	int ret;
+
+	switch (attr->group) {
+	case KVM_ARM_VCPU_PMU_V3_CTRL:
+		ret = kvm_arm_pmu_v3_set_attr(vcpu, attr);
+		break;
+	default:
+		ret = -ENXIO;
+		break;
+	}
+
+	return ret;
+}
+
+int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
+			       struct kvm_device_attr *attr)
+{
+	int ret;
+
+	switch (attr->group) {
+	case KVM_ARM_VCPU_PMU_V3_CTRL:
+		ret = kvm_arm_pmu_v3_get_attr(vcpu, attr);
+		break;
+	default:
+		ret = -ENXIO;
+		break;
+	}
+
+	return ret;
+}
+
+int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
+			       struct kvm_device_attr *attr)
+{
+	int ret;
+
+	switch (attr->group) {
+	case KVM_ARM_VCPU_PMU_V3_CTRL:
+		ret = kvm_arm_pmu_v3_has_attr(vcpu, attr);
+		break;
+	default:
+		ret = -ENXIO;
+		break;
+	}
+
+	return ret;
+}
diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S
index d073b5a..7d8747c 100644
--- a/arch/arm64/kvm/hyp-init.S
+++ b/arch/arm64/kvm/hyp-init.S
@@ -87,26 +87,13 @@
 #endif
 	/*
 	 * Read the PARange bits from ID_AA64MMFR0_EL1 and set the PS bits in
-	 * TCR_EL2 and VTCR_EL2.
+	 * TCR_EL2.
 	 */
 	mrs	x5, ID_AA64MMFR0_EL1
 	bfi	x4, x5, #16, #3
 
 	msr	tcr_el2, x4
 
-	ldr	x4, =VTCR_EL2_FLAGS
-	bfi	x4, x5, #16, #3
-	/*
-	 * Read the VMIDBits bits from ID_AA64MMFR1_EL1 and set the VS bit in
-	 * VTCR_EL2.
-	 */
-	mrs	x5, ID_AA64MMFR1_EL1
-	ubfx	x5, x5, #5, #1
-	lsl	x5, x5, #VTCR_EL2_VS
-	orr	x4, x4, x5
-
-	msr	vtcr_el2, x4
-
 	mrs	x4, mair_el1
 	msr	mair_el2, x4
 	isb
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
index 0ccdcbb..0689a74 100644
--- a/arch/arm64/kvm/hyp.S
+++ b/arch/arm64/kvm/hyp.S
@@ -17,7 +17,9 @@
 
 #include <linux/linkage.h>
 
+#include <asm/alternative.h>
 #include <asm/assembler.h>
+#include <asm/cpufeature.h>
 
 /*
  * u64 kvm_call_hyp(void *hypfn, ...);
@@ -38,6 +40,11 @@
  * arch/arm64/kernel/hyp_stub.S.
  */
 ENTRY(kvm_call_hyp)
+alternative_if_not ARM64_HAS_VIRT_HOST_EXTN	
 	hvc	#0
 	ret
+alternative_else
+	b	__vhe_hyp_call
+	nop
+alternative_endif
 ENDPROC(kvm_call_hyp)
diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile
index 826032b..b6a8fc5 100644
--- a/arch/arm64/kvm/hyp/Makefile
+++ b/arch/arm64/kvm/hyp/Makefile
@@ -2,9 +2,12 @@
 # Makefile for Kernel-based Virtual Machine module, HYP part
 #
 
-obj-$(CONFIG_KVM_ARM_HOST) += vgic-v2-sr.o
+KVM=../../../../virt/kvm
+
+obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v2-sr.o
+obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/timer-sr.o
+
 obj-$(CONFIG_KVM_ARM_HOST) += vgic-v3-sr.o
-obj-$(CONFIG_KVM_ARM_HOST) += timer-sr.o
 obj-$(CONFIG_KVM_ARM_HOST) += sysreg-sr.o
 obj-$(CONFIG_KVM_ARM_HOST) += debug-sr.o
 obj-$(CONFIG_KVM_ARM_HOST) += entry.o
@@ -12,3 +15,4 @@
 obj-$(CONFIG_KVM_ARM_HOST) += fpsimd.o
 obj-$(CONFIG_KVM_ARM_HOST) += tlb.o
 obj-$(CONFIG_KVM_ARM_HOST) += hyp-entry.o
+obj-$(CONFIG_KVM_ARM_HOST) += s2-setup.o
diff --git a/arch/arm64/kvm/hyp/debug-sr.c b/arch/arm64/kvm/hyp/debug-sr.c
index c9c1e97..053cf8b 100644
--- a/arch/arm64/kvm/hyp/debug-sr.c
+++ b/arch/arm64/kvm/hyp/debug-sr.c
@@ -19,9 +19,7 @@
 #include <linux/kvm_host.h>
 
 #include <asm/kvm_asm.h>
-#include <asm/kvm_mmu.h>
-
-#include "hyp.h"
+#include <asm/kvm_hyp.h>
 
 #define read_debug(r,n)		read_sysreg(r##n##_el1)
 #define write_debug(v,r,n)	write_sysreg(v, r##n##_el1)
diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S
index fd0fbe9..ce9e5e5 100644
--- a/arch/arm64/kvm/hyp/entry.S
+++ b/arch/arm64/kvm/hyp/entry.S
@@ -130,9 +130,15 @@
 ENTRY(__fpsimd_guest_restore)
 	stp	x4, lr, [sp, #-16]!
 
+alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
 	mrs	x2, cptr_el2
 	bic	x2, x2, #CPTR_EL2_TFP
 	msr	cptr_el2, x2
+alternative_else
+	mrs	x2, cpacr_el1
+	orr	x2, x2, #CPACR_EL1_FPEN
+	msr	cpacr_el1, x2
+alternative_endif
 	isb
 
 	mrs	x3, tpidr_el2
diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S
index 93e8d983..3488894 100644
--- a/arch/arm64/kvm/hyp/hyp-entry.S
+++ b/arch/arm64/kvm/hyp/hyp-entry.S
@@ -19,7 +19,6 @@
 
 #include <asm/alternative.h>
 #include <asm/assembler.h>
-#include <asm/asm-offsets.h>
 #include <asm/cpufeature.h>
 #include <asm/kvm_arm.h>
 #include <asm/kvm_asm.h>
@@ -38,10 +37,42 @@
 	ldp	x0, x1, [sp], #16
 .endm
 
+.macro do_el2_call
+	/*
+	 * Shuffle the parameters before calling the function
+	 * pointed to in x0. Assumes parameters in x[1,2,3].
+	 */
+	sub	sp, sp, #16
+	str	lr, [sp]
+	mov	lr, x0
+	mov	x0, x1
+	mov	x1, x2
+	mov	x2, x3
+	blr	lr
+	ldr	lr, [sp]
+	add	sp, sp, #16
+.endm
+
+ENTRY(__vhe_hyp_call)
+	do_el2_call
+	/*
+	 * We used to rely on having an exception return to get
+	 * an implicit isb. In the E2H case, we don't have it anymore.
+	 * rather than changing all the leaf functions, just do it here
+	 * before returning to the rest of the kernel.
+	 */
+	isb
+	ret
+ENDPROC(__vhe_hyp_call)
+	
 el1_sync:				// Guest trapped into EL2
 	save_x0_to_x3
 
+alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
 	mrs	x1, esr_el2
+alternative_else
+	mrs	x1, esr_el1
+alternative_endif
 	lsr	x2, x1, #ESR_ELx_EC_SHIFT
 
 	cmp	x2, #ESR_ELx_EC_HVC64
@@ -58,19 +89,13 @@
 	mrs	x0, vbar_el2
 	b	2f
 
-1:	stp	lr, xzr, [sp, #-16]!
-
+1:
 	/*
-	 * Compute the function address in EL2, and shuffle the parameters.
+	 * Perform the EL2 call
 	 */
 	kern_hyp_va	x0
-	mov	lr, x0
-	mov	x0, x1
-	mov	x1, x2
-	mov	x2, x3
-	blr	lr
+	do_el2_call
 
-	ldp	lr, xzr, [sp], #16
 2:	eret
 
 el1_trap:
@@ -83,72 +108,10 @@
 	cmp	x2, #ESR_ELx_EC_FP_ASIMD
 	b.eq	__fpsimd_guest_restore
 
-	cmp	x2, #ESR_ELx_EC_DABT_LOW
-	mov	x0, #ESR_ELx_EC_IABT_LOW
-	ccmp	x2, x0, #4, ne
-	b.ne	1f		// Not an abort we care about
-
-	/* This is an abort. Check for permission fault */
-alternative_if_not ARM64_WORKAROUND_834220
-	and	x2, x1, #ESR_ELx_FSC_TYPE
-	cmp	x2, #FSC_PERM
-	b.ne	1f		// Not a permission fault
-alternative_else
-	nop			// Use the permission fault path to
-	nop			// check for a valid S1 translation,
-	nop			// regardless of the ESR value.
-alternative_endif
-
-	/*
-	 * Check for Stage-1 page table walk, which is guaranteed
-	 * to give a valid HPFAR_EL2.
-	 */
-	tbnz	x1, #7, 1f	// S1PTW is set
-
-	/* Preserve PAR_EL1 */
-	mrs	x3, par_el1
-	stp	x3, xzr, [sp, #-16]!
-
-	/*
-	 * Permission fault, HPFAR_EL2 is invalid.
-	 * Resolve the IPA the hard way using the guest VA.
-	 * Stage-1 translation already validated the memory access rights.
-	 * As such, we can use the EL1 translation regime, and don't have
-	 * to distinguish between EL0 and EL1 access.
-	 */
-	mrs	x2, far_el2
-	at	s1e1r, x2
-	isb
-
-	/* Read result */
-	mrs	x3, par_el1
-	ldp	x0, xzr, [sp], #16	// Restore PAR_EL1 from the stack
-	msr	par_el1, x0
-	tbnz	x3, #0, 3f		// Bail out if we failed the translation
-	ubfx	x3, x3, #12, #36	// Extract IPA
-	lsl	x3, x3, #4		// and present it like HPFAR
-	b	2f
-
-1:	mrs	x3, hpfar_el2
-	mrs	x2, far_el2
-
-2:	mrs	x0, tpidr_el2
-	str	w1, [x0, #VCPU_ESR_EL2]
-	str	x2, [x0, #VCPU_FAR_EL2]
-	str	x3, [x0, #VCPU_HPFAR_EL2]
-
+	mrs	x0, tpidr_el2
 	mov	x1, #ARM_EXCEPTION_TRAP
 	b	__guest_exit
 
-	/*
-	 * Translation failed. Just return to the guest and
-	 * let it fault again. Another CPU is probably playing
-	 * behind our back.
-	 */
-3:	restore_x0_to_x3
-
-	eret
-
 el1_irq:
 	save_x0_to_x3
 	mrs	x0, tpidr_el2
diff --git a/arch/arm64/kvm/hyp/hyp.h b/arch/arm64/kvm/hyp/hyp.h
deleted file mode 100644
index fb27517..0000000
--- a/arch/arm64/kvm/hyp/hyp.h
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * Copyright (C) 2015 - ARM Ltd
- * Author: Marc Zyngier <marc.zyngier@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef __ARM64_KVM_HYP_H__
-#define __ARM64_KVM_HYP_H__
-
-#include <linux/compiler.h>
-#include <linux/kvm_host.h>
-#include <asm/kvm_mmu.h>
-#include <asm/sysreg.h>
-
-#define __hyp_text __section(.hyp.text) notrace
-
-#define kern_hyp_va(v) (typeof(v))((unsigned long)(v) & HYP_PAGE_OFFSET_MASK)
-#define hyp_kern_va(v) (typeof(v))((unsigned long)(v) - HYP_PAGE_OFFSET \
-						      + PAGE_OFFSET)
-
-/**
- * hyp_alternate_select - Generates patchable code sequences that are
- * used to switch between two implementations of a function, depending
- * on the availability of a feature.
- *
- * @fname: a symbol name that will be defined as a function returning a
- * function pointer whose type will match @orig and @alt
- * @orig: A pointer to the default function, as returned by @fname when
- * @cond doesn't hold
- * @alt: A pointer to the alternate function, as returned by @fname
- * when @cond holds
- * @cond: a CPU feature (as described in asm/cpufeature.h)
- */
-#define hyp_alternate_select(fname, orig, alt, cond)			\
-typeof(orig) * __hyp_text fname(void)					\
-{									\
-	typeof(alt) *val = orig;					\
-	asm volatile(ALTERNATIVE("nop		\n",			\
-				 "mov	%0, %1	\n",			\
-				 cond)					\
-		     : "+r" (val) : "r" (alt));				\
-	return val;							\
-}
-
-void __vgic_v2_save_state(struct kvm_vcpu *vcpu);
-void __vgic_v2_restore_state(struct kvm_vcpu *vcpu);
-
-void __vgic_v3_save_state(struct kvm_vcpu *vcpu);
-void __vgic_v3_restore_state(struct kvm_vcpu *vcpu);
-
-void __timer_save_state(struct kvm_vcpu *vcpu);
-void __timer_restore_state(struct kvm_vcpu *vcpu);
-
-void __sysreg_save_state(struct kvm_cpu_context *ctxt);
-void __sysreg_restore_state(struct kvm_cpu_context *ctxt);
-void __sysreg32_save_state(struct kvm_vcpu *vcpu);
-void __sysreg32_restore_state(struct kvm_vcpu *vcpu);
-
-void __debug_save_state(struct kvm_vcpu *vcpu,
-			struct kvm_guest_debug_arch *dbg,
-			struct kvm_cpu_context *ctxt);
-void __debug_restore_state(struct kvm_vcpu *vcpu,
-			   struct kvm_guest_debug_arch *dbg,
-			   struct kvm_cpu_context *ctxt);
-void __debug_cond_save_host_state(struct kvm_vcpu *vcpu);
-void __debug_cond_restore_host_state(struct kvm_vcpu *vcpu);
-
-void __fpsimd_save_state(struct user_fpsimd_state *fp_regs);
-void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs);
-static inline bool __fpsimd_enabled(void)
-{
-	return !(read_sysreg(cptr_el2) & CPTR_EL2_TFP);
-}
-
-u64 __guest_enter(struct kvm_vcpu *vcpu, struct kvm_cpu_context *host_ctxt);
-void __noreturn __hyp_do_panic(unsigned long, ...);
-
-#endif /* __ARM64_KVM_HYP_H__ */
-
diff --git a/arch/arm64/kvm/hyp/s2-setup.c b/arch/arm64/kvm/hyp/s2-setup.c
new file mode 100644
index 0000000..bfc54fd
--- /dev/null
+++ b/arch/arm64/kvm/hyp/s2-setup.c
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2016 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/types.h>
+#include <asm/kvm_arm.h>
+#include <asm/kvm_asm.h>
+#include <asm/kvm_hyp.h>
+
+void __hyp_text __init_stage2_translation(void)
+{
+	u64 val = VTCR_EL2_FLAGS;
+	u64 tmp;
+
+	/*
+	 * Read the PARange bits from ID_AA64MMFR0_EL1 and set the PS
+	 * bits in VTCR_EL2. Amusingly, the PARange is 4 bits, while
+	 * PS is only 3. Fortunately, bit 19 is RES0 in VTCR_EL2...
+	 */
+	val |= (read_sysreg(id_aa64mmfr0_el1) & 7) << 16;
+
+	/*
+	 * Read the VMIDBits bits from ID_AA64MMFR1_EL1 and set the VS
+	 * bit in VTCR_EL2.
+	 */
+	tmp = (read_sysreg(id_aa64mmfr1_el1) >> 4) & 0xf;
+	val |= (tmp == 2) ? VTCR_EL2_VS : 0;
+
+	write_sysreg(val, vtcr_el2);
+}
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index f0e7bdf..437cfad 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -15,7 +15,53 @@
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
-#include "hyp.h"
+#include <linux/types.h>
+#include <asm/kvm_asm.h>
+#include <asm/kvm_hyp.h>
+
+static bool __hyp_text __fpsimd_enabled_nvhe(void)
+{
+	return !(read_sysreg(cptr_el2) & CPTR_EL2_TFP);
+}
+
+static bool __hyp_text __fpsimd_enabled_vhe(void)
+{
+	return !!(read_sysreg(cpacr_el1) & CPACR_EL1_FPEN);
+}
+
+static hyp_alternate_select(__fpsimd_is_enabled,
+			    __fpsimd_enabled_nvhe, __fpsimd_enabled_vhe,
+			    ARM64_HAS_VIRT_HOST_EXTN);
+
+bool __hyp_text __fpsimd_enabled(void)
+{
+	return __fpsimd_is_enabled()();
+}
+
+static void __hyp_text __activate_traps_vhe(void)
+{
+	u64 val;
+
+	val = read_sysreg(cpacr_el1);
+	val |= CPACR_EL1_TTA;
+	val &= ~CPACR_EL1_FPEN;
+	write_sysreg(val, cpacr_el1);
+
+	write_sysreg(__kvm_hyp_vector, vbar_el1);
+}
+
+static void __hyp_text __activate_traps_nvhe(void)
+{
+	u64 val;
+
+	val = CPTR_EL2_DEFAULT;
+	val |= CPTR_EL2_TTA | CPTR_EL2_TFP;
+	write_sysreg(val, cptr_el2);
+}
+
+static hyp_alternate_select(__activate_traps_arch,
+			    __activate_traps_nvhe, __activate_traps_vhe,
+			    ARM64_HAS_VIRT_HOST_EXTN);
 
 static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
 {
@@ -36,20 +82,37 @@
 	write_sysreg(val, hcr_el2);
 	/* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */
 	write_sysreg(1 << 15, hstr_el2);
-
-	val = CPTR_EL2_DEFAULT;
-	val |= CPTR_EL2_TTA | CPTR_EL2_TFP;
-	write_sysreg(val, cptr_el2);
-
+	/* Make sure we trap PMU access from EL0 to EL2 */
+	write_sysreg(ARMV8_PMU_USERENR_MASK, pmuserenr_el0);
 	write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
+	__activate_traps_arch()();
 }
 
+static void __hyp_text __deactivate_traps_vhe(void)
+{
+	extern char vectors[];	/* kernel exception vectors */
+
+	write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
+	write_sysreg(CPACR_EL1_FPEN, cpacr_el1);
+	write_sysreg(vectors, vbar_el1);
+}
+
+static void __hyp_text __deactivate_traps_nvhe(void)
+{
+	write_sysreg(HCR_RW, hcr_el2);
+	write_sysreg(CPTR_EL2_DEFAULT, cptr_el2);
+}
+
+static hyp_alternate_select(__deactivate_traps_arch,
+			    __deactivate_traps_nvhe, __deactivate_traps_vhe,
+			    ARM64_HAS_VIRT_HOST_EXTN);
+
 static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
 {
-	write_sysreg(HCR_RW, hcr_el2);
+	__deactivate_traps_arch()();
 	write_sysreg(0, hstr_el2);
 	write_sysreg(read_sysreg(mdcr_el2) & MDCR_EL2_HPMN_MASK, mdcr_el2);
-	write_sysreg(CPTR_EL2_DEFAULT, cptr_el2);
+	write_sysreg(0, pmuserenr_el0);
 }
 
 static void __hyp_text __activate_vm(struct kvm_vcpu *vcpu)
@@ -89,6 +152,86 @@
 	__vgic_call_restore_state()(vcpu);
 }
 
+static bool __hyp_text __true_value(void)
+{
+	return true;
+}
+
+static bool __hyp_text __false_value(void)
+{
+	return false;
+}
+
+static hyp_alternate_select(__check_arm_834220,
+			    __false_value, __true_value,
+			    ARM64_WORKAROUND_834220);
+
+static bool __hyp_text __translate_far_to_hpfar(u64 far, u64 *hpfar)
+{
+	u64 par, tmp;
+
+	/*
+	 * Resolve the IPA the hard way using the guest VA.
+	 *
+	 * Stage-1 translation already validated the memory access
+	 * rights. As such, we can use the EL1 translation regime, and
+	 * don't have to distinguish between EL0 and EL1 access.
+	 *
+	 * We do need to save/restore PAR_EL1 though, as we haven't
+	 * saved the guest context yet, and we may return early...
+	 */
+	par = read_sysreg(par_el1);
+	asm volatile("at s1e1r, %0" : : "r" (far));
+	isb();
+
+	tmp = read_sysreg(par_el1);
+	write_sysreg(par, par_el1);
+
+	if (unlikely(tmp & 1))
+		return false; /* Translation failed, back to guest */
+
+	/* Convert PAR to HPFAR format */
+	*hpfar = ((tmp >> 12) & ((1UL << 36) - 1)) << 4;
+	return true;
+}
+
+static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
+{
+	u64 esr = read_sysreg_el2(esr);
+	u8 ec = esr >> ESR_ELx_EC_SHIFT;
+	u64 hpfar, far;
+
+	vcpu->arch.fault.esr_el2 = esr;
+
+	if (ec != ESR_ELx_EC_DABT_LOW && ec != ESR_ELx_EC_IABT_LOW)
+		return true;
+
+	far = read_sysreg_el2(far);
+
+	/*
+	 * The HPFAR can be invalid if the stage 2 fault did not
+	 * happen during a stage 1 page table walk (the ESR_EL2.S1PTW
+	 * bit is clear) and one of the two following cases are true:
+	 *   1. The fault was due to a permission fault
+	 *   2. The processor carries errata 834220
+	 *
+	 * Therefore, for all non S1PTW faults where we either have a
+	 * permission fault or the errata workaround is enabled, we
+	 * resolve the IPA using the AT instruction.
+	 */
+	if (!(esr & ESR_ELx_S1PTW) &&
+	    (__check_arm_834220()() || (esr & ESR_ELx_FSC_TYPE) == FSC_PERM)) {
+		if (!__translate_far_to_hpfar(far, &hpfar))
+			return false;
+	} else {
+		hpfar = read_sysreg(hpfar_el2);
+	}
+
+	vcpu->arch.fault.far_el2 = far;
+	vcpu->arch.fault.hpfar_el2 = hpfar;
+	return true;
+}
+
 static int __hyp_text __guest_run(struct kvm_vcpu *vcpu)
 {
 	struct kvm_cpu_context *host_ctxt;
@@ -102,7 +245,7 @@
 	host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
 	guest_ctxt = &vcpu->arch.ctxt;
 
-	__sysreg_save_state(host_ctxt);
+	__sysreg_save_host_state(host_ctxt);
 	__debug_cond_save_host_state(vcpu);
 
 	__activate_traps(vcpu);
@@ -116,16 +259,20 @@
 	 * to Cortex-A57 erratum #852523.
 	 */
 	__sysreg32_restore_state(vcpu);
-	__sysreg_restore_state(guest_ctxt);
+	__sysreg_restore_guest_state(guest_ctxt);
 	__debug_restore_state(vcpu, kern_hyp_va(vcpu->arch.debug_ptr), guest_ctxt);
 
 	/* Jump in the fire! */
+again:
 	exit_code = __guest_enter(vcpu, host_ctxt);
 	/* And we're baaack! */
 
+	if (exit_code == ARM_EXCEPTION_TRAP && !__populate_fault_info(vcpu))
+		goto again;
+
 	fp_enabled = __fpsimd_enabled();
 
-	__sysreg_save_state(guest_ctxt);
+	__sysreg_save_guest_state(guest_ctxt);
 	__sysreg32_save_state(vcpu);
 	__timer_save_state(vcpu);
 	__vgic_save_state(vcpu);
@@ -133,7 +280,7 @@
 	__deactivate_traps(vcpu);
 	__deactivate_vm(vcpu);
 
-	__sysreg_restore_state(host_ctxt);
+	__sysreg_restore_host_state(host_ctxt);
 
 	if (fp_enabled) {
 		__fpsimd_save_state(&guest_ctxt->gp_regs.fp_regs);
@@ -150,11 +297,34 @@
 
 static const char __hyp_panic_string[] = "HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%p\n";
 
-void __hyp_text __noreturn __hyp_panic(void)
+static void __hyp_text __hyp_call_panic_nvhe(u64 spsr, u64 elr, u64 par)
 {
 	unsigned long str_va = (unsigned long)__hyp_panic_string;
-	u64 spsr = read_sysreg(spsr_el2);
-	u64 elr = read_sysreg(elr_el2);
+
+	__hyp_do_panic(hyp_kern_va(str_va),
+		       spsr,  elr,
+		       read_sysreg(esr_el2),   read_sysreg_el2(far),
+		       read_sysreg(hpfar_el2), par,
+		       (void *)read_sysreg(tpidr_el2));
+}
+
+static void __hyp_text __hyp_call_panic_vhe(u64 spsr, u64 elr, u64 par)
+{
+	panic(__hyp_panic_string,
+	      spsr,  elr,
+	      read_sysreg_el2(esr),   read_sysreg_el2(far),
+	      read_sysreg(hpfar_el2), par,
+	      (void *)read_sysreg(tpidr_el2));
+}
+
+static hyp_alternate_select(__hyp_call_panic,
+			    __hyp_call_panic_nvhe, __hyp_call_panic_vhe,
+			    ARM64_HAS_VIRT_HOST_EXTN);
+
+void __hyp_text __noreturn __hyp_panic(void)
+{
+	u64 spsr = read_sysreg_el2(spsr);
+	u64 elr = read_sysreg_el2(elr);
 	u64 par = read_sysreg(par_el1);
 
 	if (read_sysreg(vttbr_el2)) {
@@ -165,15 +335,11 @@
 		host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
 		__deactivate_traps(vcpu);
 		__deactivate_vm(vcpu);
-		__sysreg_restore_state(host_ctxt);
+		__sysreg_restore_host_state(host_ctxt);
 	}
 
 	/* Call panic for real */
-	__hyp_do_panic(hyp_kern_va(str_va),
-		       spsr,  elr,
-		       read_sysreg(esr_el2),   read_sysreg(far_el2),
-		       read_sysreg(hpfar_el2), par,
-		       (void *)read_sysreg(tpidr_el2));
+	__hyp_call_panic()(spsr, elr, par);
 
 	unreachable();
 }
diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c
index 42563098..0f7c40e 100644
--- a/arch/arm64/kvm/hyp/sysreg-sr.c
+++ b/arch/arm64/kvm/hyp/sysreg-sr.c
@@ -19,75 +19,122 @@
 #include <linux/kvm_host.h>
 
 #include <asm/kvm_asm.h>
-#include <asm/kvm_mmu.h>
+#include <asm/kvm_hyp.h>
 
-#include "hyp.h"
+/* Yes, this does nothing, on purpose */
+static void __hyp_text __sysreg_do_nothing(struct kvm_cpu_context *ctxt) { }
 
-/* ctxt is already in the HYP VA space */
-void __hyp_text __sysreg_save_state(struct kvm_cpu_context *ctxt)
+/*
+ * Non-VHE: Both host and guest must save everything.
+ *
+ * VHE: Host must save tpidr*_el[01], actlr_el1, sp0, pc, pstate, and
+ * guest must save everything.
+ */
+
+static void __hyp_text __sysreg_save_common_state(struct kvm_cpu_context *ctxt)
 {
-	ctxt->sys_regs[MPIDR_EL1]	= read_sysreg(vmpidr_el2);
-	ctxt->sys_regs[CSSELR_EL1]	= read_sysreg(csselr_el1);
-	ctxt->sys_regs[SCTLR_EL1]	= read_sysreg(sctlr_el1);
 	ctxt->sys_regs[ACTLR_EL1]	= read_sysreg(actlr_el1);
-	ctxt->sys_regs[CPACR_EL1]	= read_sysreg(cpacr_el1);
-	ctxt->sys_regs[TTBR0_EL1]	= read_sysreg(ttbr0_el1);
-	ctxt->sys_regs[TTBR1_EL1]	= read_sysreg(ttbr1_el1);
-	ctxt->sys_regs[TCR_EL1]		= read_sysreg(tcr_el1);
-	ctxt->sys_regs[ESR_EL1]		= read_sysreg(esr_el1);
-	ctxt->sys_regs[AFSR0_EL1]	= read_sysreg(afsr0_el1);
-	ctxt->sys_regs[AFSR1_EL1]	= read_sysreg(afsr1_el1);
-	ctxt->sys_regs[FAR_EL1]		= read_sysreg(far_el1);
-	ctxt->sys_regs[MAIR_EL1]	= read_sysreg(mair_el1);
-	ctxt->sys_regs[VBAR_EL1]	= read_sysreg(vbar_el1);
-	ctxt->sys_regs[CONTEXTIDR_EL1]	= read_sysreg(contextidr_el1);
 	ctxt->sys_regs[TPIDR_EL0]	= read_sysreg(tpidr_el0);
 	ctxt->sys_regs[TPIDRRO_EL0]	= read_sysreg(tpidrro_el0);
 	ctxt->sys_regs[TPIDR_EL1]	= read_sysreg(tpidr_el1);
-	ctxt->sys_regs[AMAIR_EL1]	= read_sysreg(amair_el1);
-	ctxt->sys_regs[CNTKCTL_EL1]	= read_sysreg(cntkctl_el1);
+	ctxt->gp_regs.regs.sp		= read_sysreg(sp_el0);
+	ctxt->gp_regs.regs.pc		= read_sysreg_el2(elr);
+	ctxt->gp_regs.regs.pstate	= read_sysreg_el2(spsr);
+}
+
+static void __hyp_text __sysreg_save_state(struct kvm_cpu_context *ctxt)
+{
+	ctxt->sys_regs[MPIDR_EL1]	= read_sysreg(vmpidr_el2);
+	ctxt->sys_regs[CSSELR_EL1]	= read_sysreg(csselr_el1);
+	ctxt->sys_regs[SCTLR_EL1]	= read_sysreg_el1(sctlr);
+	ctxt->sys_regs[CPACR_EL1]	= read_sysreg_el1(cpacr);
+	ctxt->sys_regs[TTBR0_EL1]	= read_sysreg_el1(ttbr0);
+	ctxt->sys_regs[TTBR1_EL1]	= read_sysreg_el1(ttbr1);
+	ctxt->sys_regs[TCR_EL1]		= read_sysreg_el1(tcr);
+	ctxt->sys_regs[ESR_EL1]		= read_sysreg_el1(esr);
+	ctxt->sys_regs[AFSR0_EL1]	= read_sysreg_el1(afsr0);
+	ctxt->sys_regs[AFSR1_EL1]	= read_sysreg_el1(afsr1);
+	ctxt->sys_regs[FAR_EL1]		= read_sysreg_el1(far);
+	ctxt->sys_regs[MAIR_EL1]	= read_sysreg_el1(mair);
+	ctxt->sys_regs[VBAR_EL1]	= read_sysreg_el1(vbar);
+	ctxt->sys_regs[CONTEXTIDR_EL1]	= read_sysreg_el1(contextidr);
+	ctxt->sys_regs[AMAIR_EL1]	= read_sysreg_el1(amair);
+	ctxt->sys_regs[CNTKCTL_EL1]	= read_sysreg_el1(cntkctl);
 	ctxt->sys_regs[PAR_EL1]		= read_sysreg(par_el1);
 	ctxt->sys_regs[MDSCR_EL1]	= read_sysreg(mdscr_el1);
 
-	ctxt->gp_regs.regs.sp		= read_sysreg(sp_el0);
-	ctxt->gp_regs.regs.pc		= read_sysreg(elr_el2);
-	ctxt->gp_regs.regs.pstate	= read_sysreg(spsr_el2);
 	ctxt->gp_regs.sp_el1		= read_sysreg(sp_el1);
-	ctxt->gp_regs.elr_el1		= read_sysreg(elr_el1);
-	ctxt->gp_regs.spsr[KVM_SPSR_EL1]= read_sysreg(spsr_el1);
+	ctxt->gp_regs.elr_el1		= read_sysreg_el1(elr);
+	ctxt->gp_regs.spsr[KVM_SPSR_EL1]= read_sysreg_el1(spsr);
 }
 
-void __hyp_text __sysreg_restore_state(struct kvm_cpu_context *ctxt)
+static hyp_alternate_select(__sysreg_call_save_host_state,
+			    __sysreg_save_state, __sysreg_do_nothing,
+			    ARM64_HAS_VIRT_HOST_EXTN);
+
+void __hyp_text __sysreg_save_host_state(struct kvm_cpu_context *ctxt)
 {
-	write_sysreg(ctxt->sys_regs[MPIDR_EL1],	  vmpidr_el2);
-	write_sysreg(ctxt->sys_regs[CSSELR_EL1],  csselr_el1);
-	write_sysreg(ctxt->sys_regs[SCTLR_EL1],	  sctlr_el1);
+	__sysreg_call_save_host_state()(ctxt);
+	__sysreg_save_common_state(ctxt);
+}
+
+void __hyp_text __sysreg_save_guest_state(struct kvm_cpu_context *ctxt)
+{
+	__sysreg_save_state(ctxt);
+	__sysreg_save_common_state(ctxt);
+}
+
+static void __hyp_text __sysreg_restore_common_state(struct kvm_cpu_context *ctxt)
+{
 	write_sysreg(ctxt->sys_regs[ACTLR_EL1],	  actlr_el1);
-	write_sysreg(ctxt->sys_regs[CPACR_EL1],	  cpacr_el1);
-	write_sysreg(ctxt->sys_regs[TTBR0_EL1],	  ttbr0_el1);
-	write_sysreg(ctxt->sys_regs[TTBR1_EL1],	  ttbr1_el1);
-	write_sysreg(ctxt->sys_regs[TCR_EL1],	  tcr_el1);
-	write_sysreg(ctxt->sys_regs[ESR_EL1],	  esr_el1);
-	write_sysreg(ctxt->sys_regs[AFSR0_EL1],	  afsr0_el1);
-	write_sysreg(ctxt->sys_regs[AFSR1_EL1],	  afsr1_el1);
-	write_sysreg(ctxt->sys_regs[FAR_EL1],	  far_el1);
-	write_sysreg(ctxt->sys_regs[MAIR_EL1],	  mair_el1);
-	write_sysreg(ctxt->sys_regs[VBAR_EL1],	  vbar_el1);
-	write_sysreg(ctxt->sys_regs[CONTEXTIDR_EL1], contextidr_el1);
 	write_sysreg(ctxt->sys_regs[TPIDR_EL0],	  tpidr_el0);
 	write_sysreg(ctxt->sys_regs[TPIDRRO_EL0], tpidrro_el0);
 	write_sysreg(ctxt->sys_regs[TPIDR_EL1],	  tpidr_el1);
-	write_sysreg(ctxt->sys_regs[AMAIR_EL1],	  amair_el1);
-	write_sysreg(ctxt->sys_regs[CNTKCTL_EL1], cntkctl_el1);
-	write_sysreg(ctxt->sys_regs[PAR_EL1],	  par_el1);
-	write_sysreg(ctxt->sys_regs[MDSCR_EL1],	  mdscr_el1);
+	write_sysreg(ctxt->gp_regs.regs.sp,	  sp_el0);
+	write_sysreg_el2(ctxt->gp_regs.regs.pc,	  elr);
+	write_sysreg_el2(ctxt->gp_regs.regs.pstate, spsr);
+}
 
-	write_sysreg(ctxt->gp_regs.regs.sp,	sp_el0);
-	write_sysreg(ctxt->gp_regs.regs.pc,	elr_el2);
-	write_sysreg(ctxt->gp_regs.regs.pstate,	spsr_el2);
-	write_sysreg(ctxt->gp_regs.sp_el1,	sp_el1);
-	write_sysreg(ctxt->gp_regs.elr_el1,	elr_el1);
-	write_sysreg(ctxt->gp_regs.spsr[KVM_SPSR_EL1], spsr_el1);
+static void __hyp_text __sysreg_restore_state(struct kvm_cpu_context *ctxt)
+{
+	write_sysreg(ctxt->sys_regs[MPIDR_EL1],		vmpidr_el2);
+	write_sysreg(ctxt->sys_regs[CSSELR_EL1],	csselr_el1);
+	write_sysreg_el1(ctxt->sys_regs[SCTLR_EL1],	sctlr);
+	write_sysreg_el1(ctxt->sys_regs[CPACR_EL1],	cpacr);
+	write_sysreg_el1(ctxt->sys_regs[TTBR0_EL1],	ttbr0);
+	write_sysreg_el1(ctxt->sys_regs[TTBR1_EL1],	ttbr1);
+	write_sysreg_el1(ctxt->sys_regs[TCR_EL1],	tcr);
+	write_sysreg_el1(ctxt->sys_regs[ESR_EL1],	esr);
+	write_sysreg_el1(ctxt->sys_regs[AFSR0_EL1],	afsr0);
+	write_sysreg_el1(ctxt->sys_regs[AFSR1_EL1],	afsr1);
+	write_sysreg_el1(ctxt->sys_regs[FAR_EL1],	far);
+	write_sysreg_el1(ctxt->sys_regs[MAIR_EL1],	mair);
+	write_sysreg_el1(ctxt->sys_regs[VBAR_EL1],	vbar);
+	write_sysreg_el1(ctxt->sys_regs[CONTEXTIDR_EL1],contextidr);
+	write_sysreg_el1(ctxt->sys_regs[AMAIR_EL1],	amair);
+	write_sysreg_el1(ctxt->sys_regs[CNTKCTL_EL1], 	cntkctl);
+	write_sysreg(ctxt->sys_regs[PAR_EL1],		par_el1);
+	write_sysreg(ctxt->sys_regs[MDSCR_EL1],		mdscr_el1);
+
+	write_sysreg(ctxt->gp_regs.sp_el1,		sp_el1);
+	write_sysreg_el1(ctxt->gp_regs.elr_el1,		elr);
+	write_sysreg_el1(ctxt->gp_regs.spsr[KVM_SPSR_EL1],spsr);
+}
+
+static hyp_alternate_select(__sysreg_call_restore_host_state,
+			    __sysreg_restore_state, __sysreg_do_nothing,
+			    ARM64_HAS_VIRT_HOST_EXTN);
+
+void __hyp_text __sysreg_restore_host_state(struct kvm_cpu_context *ctxt)
+{
+	__sysreg_call_restore_host_state()(ctxt);
+	__sysreg_restore_common_state(ctxt);
+}
+
+void __hyp_text __sysreg_restore_guest_state(struct kvm_cpu_context *ctxt)
+{
+	__sysreg_restore_state(ctxt);
+	__sysreg_restore_common_state(ctxt);
 }
 
 void __hyp_text __sysreg32_save_state(struct kvm_vcpu *vcpu)
diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c
index 2a7e0d8..be8177c 100644
--- a/arch/arm64/kvm/hyp/tlb.c
+++ b/arch/arm64/kvm/hyp/tlb.c
@@ -15,7 +15,7 @@
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
-#include "hyp.h"
+#include <asm/kvm_hyp.h>
 
 static void __hyp_text __tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
 {
diff --git a/arch/arm64/kvm/hyp/vgic-v2-sr.c b/arch/arm64/kvm/hyp/vgic-v2-sr.c
deleted file mode 100644
index e717612..0000000
--- a/arch/arm64/kvm/hyp/vgic-v2-sr.c
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * Copyright (C) 2012-2015 - ARM Ltd
- * Author: Marc Zyngier <marc.zyngier@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/compiler.h>
-#include <linux/irqchip/arm-gic.h>
-#include <linux/kvm_host.h>
-
-#include <asm/kvm_mmu.h>
-
-#include "hyp.h"
-
-/* vcpu is already in the HYP VA space */
-void __hyp_text __vgic_v2_save_state(struct kvm_vcpu *vcpu)
-{
-	struct kvm *kvm = kern_hyp_va(vcpu->kvm);
-	struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
-	struct vgic_dist *vgic = &kvm->arch.vgic;
-	void __iomem *base = kern_hyp_va(vgic->vctrl_base);
-	u32 eisr0, eisr1, elrsr0, elrsr1;
-	int i, nr_lr;
-
-	if (!base)
-		return;
-
-	nr_lr = vcpu->arch.vgic_cpu.nr_lr;
-	cpu_if->vgic_vmcr = readl_relaxed(base + GICH_VMCR);
-	cpu_if->vgic_misr = readl_relaxed(base + GICH_MISR);
-	eisr0  = readl_relaxed(base + GICH_EISR0);
-	elrsr0 = readl_relaxed(base + GICH_ELRSR0);
-	if (unlikely(nr_lr > 32)) {
-		eisr1  = readl_relaxed(base + GICH_EISR1);
-		elrsr1 = readl_relaxed(base + GICH_ELRSR1);
-	} else {
-		eisr1 = elrsr1 = 0;
-	}
-#ifdef CONFIG_CPU_BIG_ENDIAN
-	cpu_if->vgic_eisr  = ((u64)eisr0 << 32) | eisr1;
-	cpu_if->vgic_elrsr = ((u64)elrsr0 << 32) | elrsr1;
-#else
-	cpu_if->vgic_eisr  = ((u64)eisr1 << 32) | eisr0;
-	cpu_if->vgic_elrsr = ((u64)elrsr1 << 32) | elrsr0;
-#endif
-	cpu_if->vgic_apr    = readl_relaxed(base + GICH_APR);
-
-	writel_relaxed(0, base + GICH_HCR);
-
-	for (i = 0; i < nr_lr; i++)
-		cpu_if->vgic_lr[i] = readl_relaxed(base + GICH_LR0 + (i * 4));
-}
-
-/* vcpu is already in the HYP VA space */
-void __hyp_text __vgic_v2_restore_state(struct kvm_vcpu *vcpu)
-{
-	struct kvm *kvm = kern_hyp_va(vcpu->kvm);
-	struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
-	struct vgic_dist *vgic = &kvm->arch.vgic;
-	void __iomem *base = kern_hyp_va(vgic->vctrl_base);
-	int i, nr_lr;
-
-	if (!base)
-		return;
-
-	writel_relaxed(cpu_if->vgic_hcr, base + GICH_HCR);
-	writel_relaxed(cpu_if->vgic_vmcr, base + GICH_VMCR);
-	writel_relaxed(cpu_if->vgic_apr, base + GICH_APR);
-
-	nr_lr = vcpu->arch.vgic_cpu.nr_lr;
-	for (i = 0; i < nr_lr; i++)
-		writel_relaxed(cpu_if->vgic_lr[i], base + GICH_LR0 + (i * 4));
-}
diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/arch/arm64/kvm/hyp/vgic-v3-sr.c
index 5dd2a26..fff7cd4 100644
--- a/arch/arm64/kvm/hyp/vgic-v3-sr.c
+++ b/arch/arm64/kvm/hyp/vgic-v3-sr.c
@@ -19,9 +19,7 @@
 #include <linux/irqchip/arm-gic-v3.h>
 #include <linux/kvm_host.h>
 
-#include <asm/kvm_mmu.h>
-
-#include "hyp.h"
+#include <asm/kvm_hyp.h>
 
 #define vtr_to_max_lr_idx(v)		((v) & 0xf)
 #define vtr_to_nr_pri_bits(v)		(((u32)(v) >> 29) + 1)
@@ -39,12 +37,133 @@
 		asm volatile("msr_s " __stringify(r) ", %0" : : "r" (__val));\
 	} while (0)
 
-/* vcpu is already in the HYP VA space */
+static u64 __hyp_text __gic_v3_get_lr(unsigned int lr)
+{
+	switch (lr & 0xf) {
+	case 0:
+		return read_gicreg(ICH_LR0_EL2);
+	case 1:
+		return read_gicreg(ICH_LR1_EL2);
+	case 2:
+		return read_gicreg(ICH_LR2_EL2);
+	case 3:
+		return read_gicreg(ICH_LR3_EL2);
+	case 4:
+		return read_gicreg(ICH_LR4_EL2);
+	case 5:
+		return read_gicreg(ICH_LR5_EL2);
+	case 6:
+		return read_gicreg(ICH_LR6_EL2);
+	case 7:
+		return read_gicreg(ICH_LR7_EL2);
+	case 8:
+		return read_gicreg(ICH_LR8_EL2);
+	case 9:
+		return read_gicreg(ICH_LR9_EL2);
+	case 10:
+		return read_gicreg(ICH_LR10_EL2);
+	case 11:
+		return read_gicreg(ICH_LR11_EL2);
+	case 12:
+		return read_gicreg(ICH_LR12_EL2);
+	case 13:
+		return read_gicreg(ICH_LR13_EL2);
+	case 14:
+		return read_gicreg(ICH_LR14_EL2);
+	case 15:
+		return read_gicreg(ICH_LR15_EL2);
+	}
+
+	unreachable();
+}
+
+static void __hyp_text __gic_v3_set_lr(u64 val, int lr)
+{
+	switch (lr & 0xf) {
+	case 0:
+		write_gicreg(val, ICH_LR0_EL2);
+		break;
+	case 1:
+		write_gicreg(val, ICH_LR1_EL2);
+		break;
+	case 2:
+		write_gicreg(val, ICH_LR2_EL2);
+		break;
+	case 3:
+		write_gicreg(val, ICH_LR3_EL2);
+		break;
+	case 4:
+		write_gicreg(val, ICH_LR4_EL2);
+		break;
+	case 5:
+		write_gicreg(val, ICH_LR5_EL2);
+		break;
+	case 6:
+		write_gicreg(val, ICH_LR6_EL2);
+		break;
+	case 7:
+		write_gicreg(val, ICH_LR7_EL2);
+		break;
+	case 8:
+		write_gicreg(val, ICH_LR8_EL2);
+		break;
+	case 9:
+		write_gicreg(val, ICH_LR9_EL2);
+		break;
+	case 10:
+		write_gicreg(val, ICH_LR10_EL2);
+		break;
+	case 11:
+		write_gicreg(val, ICH_LR11_EL2);
+		break;
+	case 12:
+		write_gicreg(val, ICH_LR12_EL2);
+		break;
+	case 13:
+		write_gicreg(val, ICH_LR13_EL2);
+		break;
+	case 14:
+		write_gicreg(val, ICH_LR14_EL2);
+		break;
+	case 15:
+		write_gicreg(val, ICH_LR15_EL2);
+		break;
+	}
+}
+
+static void __hyp_text save_maint_int_state(struct kvm_vcpu *vcpu, int nr_lr)
+{
+	struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
+	int i;
+	bool expect_mi;
+
+	expect_mi = !!(cpu_if->vgic_hcr & ICH_HCR_UIE);
+
+	for (i = 0; i < nr_lr; i++) {
+		if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i)))
+				continue;
+
+		expect_mi |= (!(cpu_if->vgic_lr[i] & ICH_LR_HW) &&
+			      (cpu_if->vgic_lr[i] & ICH_LR_EOI));
+	}
+
+	if (expect_mi) {
+		cpu_if->vgic_misr  = read_gicreg(ICH_MISR_EL2);
+
+		if (cpu_if->vgic_misr & ICH_MISR_EOI)
+			cpu_if->vgic_eisr = read_gicreg(ICH_EISR_EL2);
+		else
+			cpu_if->vgic_eisr = 0;
+	} else {
+		cpu_if->vgic_misr = 0;
+		cpu_if->vgic_eisr = 0;
+	}
+}
+
 void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
 {
 	struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
 	u64 val;
-	u32 max_lr_idx, nr_pri_bits;
 
 	/*
 	 * Make sure stores to the GIC via the memory mapped interface
@@ -53,68 +172,66 @@
 	dsb(st);
 
 	cpu_if->vgic_vmcr  = read_gicreg(ICH_VMCR_EL2);
-	cpu_if->vgic_misr  = read_gicreg(ICH_MISR_EL2);
-	cpu_if->vgic_eisr  = read_gicreg(ICH_EISR_EL2);
-	cpu_if->vgic_elrsr = read_gicreg(ICH_ELSR_EL2);
 
-	write_gicreg(0, ICH_HCR_EL2);
-	val = read_gicreg(ICH_VTR_EL2);
-	max_lr_idx = vtr_to_max_lr_idx(val);
-	nr_pri_bits = vtr_to_nr_pri_bits(val);
+	if (vcpu->arch.vgic_cpu.live_lrs) {
+		int i;
+		u32 max_lr_idx, nr_pri_bits;
 
-	switch (max_lr_idx) {
-	case 15:
-		cpu_if->vgic_lr[VGIC_V3_LR_INDEX(15)] = read_gicreg(ICH_LR15_EL2);
-	case 14:
-		cpu_if->vgic_lr[VGIC_V3_LR_INDEX(14)] = read_gicreg(ICH_LR14_EL2);
-	case 13:
-		cpu_if->vgic_lr[VGIC_V3_LR_INDEX(13)] = read_gicreg(ICH_LR13_EL2);
-	case 12:
-		cpu_if->vgic_lr[VGIC_V3_LR_INDEX(12)] = read_gicreg(ICH_LR12_EL2);
-	case 11:
-		cpu_if->vgic_lr[VGIC_V3_LR_INDEX(11)] = read_gicreg(ICH_LR11_EL2);
-	case 10:
-		cpu_if->vgic_lr[VGIC_V3_LR_INDEX(10)] = read_gicreg(ICH_LR10_EL2);
-	case 9:
-		cpu_if->vgic_lr[VGIC_V3_LR_INDEX(9)] = read_gicreg(ICH_LR9_EL2);
-	case 8:
-		cpu_if->vgic_lr[VGIC_V3_LR_INDEX(8)] = read_gicreg(ICH_LR8_EL2);
-	case 7:
-		cpu_if->vgic_lr[VGIC_V3_LR_INDEX(7)] = read_gicreg(ICH_LR7_EL2);
-	case 6:
-		cpu_if->vgic_lr[VGIC_V3_LR_INDEX(6)] = read_gicreg(ICH_LR6_EL2);
-	case 5:
-		cpu_if->vgic_lr[VGIC_V3_LR_INDEX(5)] = read_gicreg(ICH_LR5_EL2);
-	case 4:
-		cpu_if->vgic_lr[VGIC_V3_LR_INDEX(4)] = read_gicreg(ICH_LR4_EL2);
-	case 3:
-		cpu_if->vgic_lr[VGIC_V3_LR_INDEX(3)] = read_gicreg(ICH_LR3_EL2);
-	case 2:
-		cpu_if->vgic_lr[VGIC_V3_LR_INDEX(2)] = read_gicreg(ICH_LR2_EL2);
-	case 1:
-		cpu_if->vgic_lr[VGIC_V3_LR_INDEX(1)] = read_gicreg(ICH_LR1_EL2);
-	case 0:
-		cpu_if->vgic_lr[VGIC_V3_LR_INDEX(0)] = read_gicreg(ICH_LR0_EL2);
-	}
+		cpu_if->vgic_elrsr = read_gicreg(ICH_ELSR_EL2);
 
-	switch (nr_pri_bits) {
-	case 7:
-		cpu_if->vgic_ap0r[3] = read_gicreg(ICH_AP0R3_EL2);
-		cpu_if->vgic_ap0r[2] = read_gicreg(ICH_AP0R2_EL2);
-	case 6:
-		cpu_if->vgic_ap0r[1] = read_gicreg(ICH_AP0R1_EL2);
-	default:
-		cpu_if->vgic_ap0r[0] = read_gicreg(ICH_AP0R0_EL2);
-	}
+		write_gicreg(0, ICH_HCR_EL2);
+		val = read_gicreg(ICH_VTR_EL2);
+		max_lr_idx = vtr_to_max_lr_idx(val);
+		nr_pri_bits = vtr_to_nr_pri_bits(val);
 
-	switch (nr_pri_bits) {
-	case 7:
-		cpu_if->vgic_ap1r[3] = read_gicreg(ICH_AP1R3_EL2);
-		cpu_if->vgic_ap1r[2] = read_gicreg(ICH_AP1R2_EL2);
-	case 6:
-		cpu_if->vgic_ap1r[1] = read_gicreg(ICH_AP1R1_EL2);
-	default:
-		cpu_if->vgic_ap1r[0] = read_gicreg(ICH_AP1R0_EL2);
+		save_maint_int_state(vcpu, max_lr_idx + 1);
+
+		for (i = 0; i <= max_lr_idx; i++) {
+			if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i)))
+				continue;
+
+			if (cpu_if->vgic_elrsr & (1 << i)) {
+				cpu_if->vgic_lr[i] &= ~ICH_LR_STATE;
+				continue;
+			}
+
+			cpu_if->vgic_lr[i] = __gic_v3_get_lr(i);
+			__gic_v3_set_lr(0, i);
+		}
+
+		switch (nr_pri_bits) {
+		case 7:
+			cpu_if->vgic_ap0r[3] = read_gicreg(ICH_AP0R3_EL2);
+			cpu_if->vgic_ap0r[2] = read_gicreg(ICH_AP0R2_EL2);
+		case 6:
+			cpu_if->vgic_ap0r[1] = read_gicreg(ICH_AP0R1_EL2);
+		default:
+			cpu_if->vgic_ap0r[0] = read_gicreg(ICH_AP0R0_EL2);
+		}
+
+		switch (nr_pri_bits) {
+		case 7:
+			cpu_if->vgic_ap1r[3] = read_gicreg(ICH_AP1R3_EL2);
+			cpu_if->vgic_ap1r[2] = read_gicreg(ICH_AP1R2_EL2);
+		case 6:
+			cpu_if->vgic_ap1r[1] = read_gicreg(ICH_AP1R1_EL2);
+		default:
+			cpu_if->vgic_ap1r[0] = read_gicreg(ICH_AP1R0_EL2);
+		}
+
+		vcpu->arch.vgic_cpu.live_lrs = 0;
+	} else {
+		cpu_if->vgic_misr  = 0;
+		cpu_if->vgic_eisr  = 0;
+		cpu_if->vgic_elrsr = 0xffff;
+		cpu_if->vgic_ap0r[0] = 0;
+		cpu_if->vgic_ap0r[1] = 0;
+		cpu_if->vgic_ap0r[2] = 0;
+		cpu_if->vgic_ap0r[3] = 0;
+		cpu_if->vgic_ap1r[0] = 0;
+		cpu_if->vgic_ap1r[1] = 0;
+		cpu_if->vgic_ap1r[2] = 0;
+		cpu_if->vgic_ap1r[3] = 0;
 	}
 
 	val = read_gicreg(ICC_SRE_EL2);
@@ -128,6 +245,8 @@
 	struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
 	u64 val;
 	u32 max_lr_idx, nr_pri_bits;
+	u16 live_lrs = 0;
+	int i;
 
 	/*
 	 * VFIQEn is RES1 if ICC_SRE_EL1.SRE is 1. This causes a
@@ -140,66 +259,46 @@
 	write_gicreg(cpu_if->vgic_sre, ICC_SRE_EL1);
 	isb();
 
-	write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
-	write_gicreg(cpu_if->vgic_vmcr, ICH_VMCR_EL2);
-
 	val = read_gicreg(ICH_VTR_EL2);
 	max_lr_idx = vtr_to_max_lr_idx(val);
 	nr_pri_bits = vtr_to_nr_pri_bits(val);
 
-	switch (nr_pri_bits) {
-	case 7:
-		 write_gicreg(cpu_if->vgic_ap0r[3], ICH_AP0R3_EL2);
-		 write_gicreg(cpu_if->vgic_ap0r[2], ICH_AP0R2_EL2);
-	case 6:
-		 write_gicreg(cpu_if->vgic_ap0r[1], ICH_AP0R1_EL2);
-	default:
-		 write_gicreg(cpu_if->vgic_ap0r[0], ICH_AP0R0_EL2);
+	for (i = 0; i <= max_lr_idx; i++) {
+		if (cpu_if->vgic_lr[i] & ICH_LR_STATE)
+			live_lrs |= (1 << i);
 	}
 
-	switch (nr_pri_bits) {
-	case 7:
-		 write_gicreg(cpu_if->vgic_ap1r[3], ICH_AP1R3_EL2);
-		 write_gicreg(cpu_if->vgic_ap1r[2], ICH_AP1R2_EL2);
-	case 6:
-		 write_gicreg(cpu_if->vgic_ap1r[1], ICH_AP1R1_EL2);
-	default:
-		 write_gicreg(cpu_if->vgic_ap1r[0], ICH_AP1R0_EL2);
-	}
+	write_gicreg(cpu_if->vgic_vmcr, ICH_VMCR_EL2);
 
-	switch (max_lr_idx) {
-	case 15:
-		write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(15)], ICH_LR15_EL2);
-	case 14:
-		write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(14)], ICH_LR14_EL2);
-	case 13:
-		write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(13)], ICH_LR13_EL2);
-	case 12:
-		write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(12)], ICH_LR12_EL2);
-	case 11:
-		write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(11)], ICH_LR11_EL2);
-	case 10:
-		write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(10)], ICH_LR10_EL2);
-	case 9:
-		write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(9)], ICH_LR9_EL2);
-	case 8:
-		write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(8)], ICH_LR8_EL2);
-	case 7:
-		write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(7)], ICH_LR7_EL2);
-	case 6:
-		write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(6)], ICH_LR6_EL2);
-	case 5:
-		write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(5)], ICH_LR5_EL2);
-	case 4:
-		write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(4)], ICH_LR4_EL2);
-	case 3:
-		write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(3)], ICH_LR3_EL2);
-	case 2:
-		write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(2)], ICH_LR2_EL2);
-	case 1:
-		write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(1)], ICH_LR1_EL2);
-	case 0:
-		write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(0)], ICH_LR0_EL2);
+	if (live_lrs) {
+		write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
+
+		switch (nr_pri_bits) {
+		case 7:
+			write_gicreg(cpu_if->vgic_ap0r[3], ICH_AP0R3_EL2);
+			write_gicreg(cpu_if->vgic_ap0r[2], ICH_AP0R2_EL2);
+		case 6:
+			write_gicreg(cpu_if->vgic_ap0r[1], ICH_AP0R1_EL2);
+		default:
+			write_gicreg(cpu_if->vgic_ap0r[0], ICH_AP0R0_EL2);
+		}
+
+		switch (nr_pri_bits) {
+		case 7:
+			write_gicreg(cpu_if->vgic_ap1r[3], ICH_AP1R3_EL2);
+			write_gicreg(cpu_if->vgic_ap1r[2], ICH_AP1R2_EL2);
+		case 6:
+			write_gicreg(cpu_if->vgic_ap1r[1], ICH_AP1R1_EL2);
+		default:
+			write_gicreg(cpu_if->vgic_ap1r[0], ICH_AP1R0_EL2);
+		}
+
+		for (i = 0; i <= max_lr_idx; i++) {
+			if (!(live_lrs & (1 << i)))
+				continue;
+
+			__gic_v3_set_lr(cpu_if->vgic_lr[i], i);
+		}
 	}
 
 	/*
@@ -209,6 +308,7 @@
 	 */
 	isb();
 	dsb(sy);
+	vcpu->arch.vgic_cpu.live_lrs = live_lrs;
 
 	/*
 	 * Prevent the guest from touching the GIC system registers if
@@ -220,6 +320,15 @@
 	}
 }
 
+void __hyp_text __vgic_v3_init_lrs(void)
+{
+	int max_lr_idx = vtr_to_max_lr_idx(read_gicreg(ICH_VTR_EL2));
+	int i;
+
+	for (i = 0; i <= max_lr_idx; i++)
+		__gic_v3_set_lr(0, i);
+}
+
 static u64 __hyp_text __vgic_v3_read_ich_vtr_el2(void)
 {
 	return read_gicreg(ICH_VTR_EL2);
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index f34745c..9677bf0 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -77,7 +77,11 @@
 	case KVM_CAP_GUEST_DEBUG_HW_WPS:
 		r = get_num_wrps();
 		break;
+	case KVM_CAP_ARM_PMU_V3:
+		r = kvm_arm_support_pmu_v3();
+		break;
 	case KVM_CAP_SET_GUEST_DEBUG:
+	case KVM_CAP_VCPU_ATTRIBUTES:
 		r = 1;
 		break;
 	default:
@@ -120,6 +124,9 @@
 	/* Reset system registers */
 	kvm_reset_sys_regs(vcpu);
 
+	/* Reset PMU */
+	kvm_pmu_vcpu_reset(vcpu);
+
 	/* Reset timer */
 	return kvm_timer_vcpu_reset(vcpu, cpu_vtimer_irq);
 }
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 2e90371..61ba591 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -20,6 +20,7 @@
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
+#include <linux/bsearch.h>
 #include <linux/kvm_host.h>
 #include <linux/mm.h>
 #include <linux/uaccess.h>
@@ -34,6 +35,7 @@
 #include <asm/kvm_emulate.h>
 #include <asm/kvm_host.h>
 #include <asm/kvm_mmu.h>
+#include <asm/perf_event.h>
 
 #include <trace/events/kvm.h>
 
@@ -439,6 +441,344 @@
 	vcpu_sys_reg(vcpu, MPIDR_EL1) = (1ULL << 31) | mpidr;
 }
 
+static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
+{
+	u64 pmcr, val;
+
+	asm volatile("mrs %0, pmcr_el0\n" : "=r" (pmcr));
+	/* Writable bits of PMCR_EL0 (ARMV8_PMU_PMCR_MASK) is reset to UNKNOWN
+	 * except PMCR.E resetting to zero.
+	 */
+	val = ((pmcr & ~ARMV8_PMU_PMCR_MASK)
+	       | (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E);
+	vcpu_sys_reg(vcpu, PMCR_EL0) = val;
+}
+
+static bool pmu_access_el0_disabled(struct kvm_vcpu *vcpu)
+{
+	u64 reg = vcpu_sys_reg(vcpu, PMUSERENR_EL0);
+
+	return !((reg & ARMV8_PMU_USERENR_EN) || vcpu_mode_priv(vcpu));
+}
+
+static bool pmu_write_swinc_el0_disabled(struct kvm_vcpu *vcpu)
+{
+	u64 reg = vcpu_sys_reg(vcpu, PMUSERENR_EL0);
+
+	return !((reg & (ARMV8_PMU_USERENR_SW | ARMV8_PMU_USERENR_EN))
+		 || vcpu_mode_priv(vcpu));
+}
+
+static bool pmu_access_cycle_counter_el0_disabled(struct kvm_vcpu *vcpu)
+{
+	u64 reg = vcpu_sys_reg(vcpu, PMUSERENR_EL0);
+
+	return !((reg & (ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_EN))
+		 || vcpu_mode_priv(vcpu));
+}
+
+static bool pmu_access_event_counter_el0_disabled(struct kvm_vcpu *vcpu)
+{
+	u64 reg = vcpu_sys_reg(vcpu, PMUSERENR_EL0);
+
+	return !((reg & (ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_EN))
+		 || vcpu_mode_priv(vcpu));
+}
+
+static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
+			const struct sys_reg_desc *r)
+{
+	u64 val;
+
+	if (!kvm_arm_pmu_v3_ready(vcpu))
+		return trap_raz_wi(vcpu, p, r);
+
+	if (pmu_access_el0_disabled(vcpu))
+		return false;
+
+	if (p->is_write) {
+		/* Only update writeable bits of PMCR */
+		val = vcpu_sys_reg(vcpu, PMCR_EL0);
+		val &= ~ARMV8_PMU_PMCR_MASK;
+		val |= p->regval & ARMV8_PMU_PMCR_MASK;
+		vcpu_sys_reg(vcpu, PMCR_EL0) = val;
+		kvm_pmu_handle_pmcr(vcpu, val);
+	} else {
+		/* PMCR.P & PMCR.C are RAZ */
+		val = vcpu_sys_reg(vcpu, PMCR_EL0)
+		      & ~(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C);
+		p->regval = val;
+	}
+
+	return true;
+}
+
+static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
+			  const struct sys_reg_desc *r)
+{
+	if (!kvm_arm_pmu_v3_ready(vcpu))
+		return trap_raz_wi(vcpu, p, r);
+
+	if (pmu_access_event_counter_el0_disabled(vcpu))
+		return false;
+
+	if (p->is_write)
+		vcpu_sys_reg(vcpu, PMSELR_EL0) = p->regval;
+	else
+		/* return PMSELR.SEL field */
+		p->regval = vcpu_sys_reg(vcpu, PMSELR_EL0)
+			    & ARMV8_PMU_COUNTER_MASK;
+
+	return true;
+}
+
+static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
+			  const struct sys_reg_desc *r)
+{
+	u64 pmceid;
+
+	if (!kvm_arm_pmu_v3_ready(vcpu))
+		return trap_raz_wi(vcpu, p, r);
+
+	BUG_ON(p->is_write);
+
+	if (pmu_access_el0_disabled(vcpu))
+		return false;
+
+	if (!(p->Op2 & 1))
+		asm volatile("mrs %0, pmceid0_el0\n" : "=r" (pmceid));
+	else
+		asm volatile("mrs %0, pmceid1_el0\n" : "=r" (pmceid));
+
+	p->regval = pmceid;
+
+	return true;
+}
+
+static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx)
+{
+	u64 pmcr, val;
+
+	pmcr = vcpu_sys_reg(vcpu, PMCR_EL0);
+	val = (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK;
+	if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX)
+		return false;
+
+	return true;
+}
+
+static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
+			      struct sys_reg_params *p,
+			      const struct sys_reg_desc *r)
+{
+	u64 idx;
+
+	if (!kvm_arm_pmu_v3_ready(vcpu))
+		return trap_raz_wi(vcpu, p, r);
+
+	if (r->CRn == 9 && r->CRm == 13) {
+		if (r->Op2 == 2) {
+			/* PMXEVCNTR_EL0 */
+			if (pmu_access_event_counter_el0_disabled(vcpu))
+				return false;
+
+			idx = vcpu_sys_reg(vcpu, PMSELR_EL0)
+			      & ARMV8_PMU_COUNTER_MASK;
+		} else if (r->Op2 == 0) {
+			/* PMCCNTR_EL0 */
+			if (pmu_access_cycle_counter_el0_disabled(vcpu))
+				return false;
+
+			idx = ARMV8_PMU_CYCLE_IDX;
+		} else {
+			BUG();
+		}
+	} else if (r->CRn == 14 && (r->CRm & 12) == 8) {
+		/* PMEVCNTRn_EL0 */
+		if (pmu_access_event_counter_el0_disabled(vcpu))
+			return false;
+
+		idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
+	} else {
+		BUG();
+	}
+
+	if (!pmu_counter_idx_valid(vcpu, idx))
+		return false;
+
+	if (p->is_write) {
+		if (pmu_access_el0_disabled(vcpu))
+			return false;
+
+		kvm_pmu_set_counter_value(vcpu, idx, p->regval);
+	} else {
+		p->regval = kvm_pmu_get_counter_value(vcpu, idx);
+	}
+
+	return true;
+}
+
+static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
+			       const struct sys_reg_desc *r)
+{
+	u64 idx, reg;
+
+	if (!kvm_arm_pmu_v3_ready(vcpu))
+		return trap_raz_wi(vcpu, p, r);
+
+	if (pmu_access_el0_disabled(vcpu))
+		return false;
+
+	if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) {
+		/* PMXEVTYPER_EL0 */
+		idx = vcpu_sys_reg(vcpu, PMSELR_EL0) & ARMV8_PMU_COUNTER_MASK;
+		reg = PMEVTYPER0_EL0 + idx;
+	} else if (r->CRn == 14 && (r->CRm & 12) == 12) {
+		idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
+		if (idx == ARMV8_PMU_CYCLE_IDX)
+			reg = PMCCFILTR_EL0;
+		else
+			/* PMEVTYPERn_EL0 */
+			reg = PMEVTYPER0_EL0 + idx;
+	} else {
+		BUG();
+	}
+
+	if (!pmu_counter_idx_valid(vcpu, idx))
+		return false;
+
+	if (p->is_write) {
+		kvm_pmu_set_counter_event_type(vcpu, p->regval, idx);
+		vcpu_sys_reg(vcpu, reg) = p->regval & ARMV8_PMU_EVTYPE_MASK;
+	} else {
+		p->regval = vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_MASK;
+	}
+
+	return true;
+}
+
+static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
+			   const struct sys_reg_desc *r)
+{
+	u64 val, mask;
+
+	if (!kvm_arm_pmu_v3_ready(vcpu))
+		return trap_raz_wi(vcpu, p, r);
+
+	if (pmu_access_el0_disabled(vcpu))
+		return false;
+
+	mask = kvm_pmu_valid_counter_mask(vcpu);
+	if (p->is_write) {
+		val = p->regval & mask;
+		if (r->Op2 & 0x1) {
+			/* accessing PMCNTENSET_EL0 */
+			vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val;
+			kvm_pmu_enable_counter(vcpu, val);
+		} else {
+			/* accessing PMCNTENCLR_EL0 */
+			vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val;
+			kvm_pmu_disable_counter(vcpu, val);
+		}
+	} else {
+		p->regval = vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask;
+	}
+
+	return true;
+}
+
+static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
+			   const struct sys_reg_desc *r)
+{
+	u64 mask = kvm_pmu_valid_counter_mask(vcpu);
+
+	if (!kvm_arm_pmu_v3_ready(vcpu))
+		return trap_raz_wi(vcpu, p, r);
+
+	if (!vcpu_mode_priv(vcpu))
+		return false;
+
+	if (p->is_write) {
+		u64 val = p->regval & mask;
+
+		if (r->Op2 & 0x1)
+			/* accessing PMINTENSET_EL1 */
+			vcpu_sys_reg(vcpu, PMINTENSET_EL1) |= val;
+		else
+			/* accessing PMINTENCLR_EL1 */
+			vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= ~val;
+	} else {
+		p->regval = vcpu_sys_reg(vcpu, PMINTENSET_EL1) & mask;
+	}
+
+	return true;
+}
+
+static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
+			 const struct sys_reg_desc *r)
+{
+	u64 mask = kvm_pmu_valid_counter_mask(vcpu);
+
+	if (!kvm_arm_pmu_v3_ready(vcpu))
+		return trap_raz_wi(vcpu, p, r);
+
+	if (pmu_access_el0_disabled(vcpu))
+		return false;
+
+	if (p->is_write) {
+		if (r->CRm & 0x2)
+			/* accessing PMOVSSET_EL0 */
+			kvm_pmu_overflow_set(vcpu, p->regval & mask);
+		else
+			/* accessing PMOVSCLR_EL0 */
+			vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= ~(p->regval & mask);
+	} else {
+		p->regval = vcpu_sys_reg(vcpu, PMOVSSET_EL0) & mask;
+	}
+
+	return true;
+}
+
+static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
+			   const struct sys_reg_desc *r)
+{
+	u64 mask;
+
+	if (!kvm_arm_pmu_v3_ready(vcpu))
+		return trap_raz_wi(vcpu, p, r);
+
+	if (pmu_write_swinc_el0_disabled(vcpu))
+		return false;
+
+	if (p->is_write) {
+		mask = kvm_pmu_valid_counter_mask(vcpu);
+		kvm_pmu_software_increment(vcpu, p->regval & mask);
+		return true;
+	}
+
+	return false;
+}
+
+static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
+			     const struct sys_reg_desc *r)
+{
+	if (!kvm_arm_pmu_v3_ready(vcpu))
+		return trap_raz_wi(vcpu, p, r);
+
+	if (p->is_write) {
+		if (!vcpu_mode_priv(vcpu))
+			return false;
+
+		vcpu_sys_reg(vcpu, PMUSERENR_EL0) = p->regval
+						    & ARMV8_PMU_USERENR_MASK;
+	} else {
+		p->regval = vcpu_sys_reg(vcpu, PMUSERENR_EL0)
+			    & ARMV8_PMU_USERENR_MASK;
+	}
+
+	return true;
+}
+
 /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
 #define DBG_BCR_BVR_WCR_WVR_EL1(n)					\
 	/* DBGBVRn_EL1 */						\
@@ -454,6 +794,20 @@
 	{ Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b111),	\
 	  trap_wcr, reset_wcr, n, 0,  get_wcr, set_wcr }
 
+/* Macro to expand the PMEVCNTRn_EL0 register */
+#define PMU_PMEVCNTR_EL0(n)						\
+	/* PMEVCNTRn_EL0 */						\
+	{ Op0(0b11), Op1(0b011), CRn(0b1110),				\
+	  CRm((0b1000 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)),		\
+	  access_pmu_evcntr, reset_unknown, (PMEVCNTR0_EL0 + n), }
+
+/* Macro to expand the PMEVTYPERn_EL0 register */
+#define PMU_PMEVTYPER_EL0(n)						\
+	/* PMEVTYPERn_EL0 */						\
+	{ Op0(0b11), Op1(0b011), CRn(0b1110),				\
+	  CRm((0b1100 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)),		\
+	  access_pmu_evtyper, reset_unknown, (PMEVTYPER0_EL0 + n), }
+
 /*
  * Architected system registers.
  * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
@@ -583,10 +937,10 @@
 
 	/* PMINTENSET_EL1 */
 	{ Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b001),
-	  trap_raz_wi },
+	  access_pminten, reset_unknown, PMINTENSET_EL1 },
 	/* PMINTENCLR_EL1 */
 	{ Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b010),
-	  trap_raz_wi },
+	  access_pminten, NULL, PMINTENSET_EL1 },
 
 	/* MAIR_EL1 */
 	{ Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0010), Op2(0b000),
@@ -623,43 +977,46 @@
 
 	/* PMCR_EL0 */
 	{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b000),
-	  trap_raz_wi },
+	  access_pmcr, reset_pmcr, },
 	/* PMCNTENSET_EL0 */
 	{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b001),
-	  trap_raz_wi },
+	  access_pmcnten, reset_unknown, PMCNTENSET_EL0 },
 	/* PMCNTENCLR_EL0 */
 	{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b010),
-	  trap_raz_wi },
+	  access_pmcnten, NULL, PMCNTENSET_EL0 },
 	/* PMOVSCLR_EL0 */
 	{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b011),
-	  trap_raz_wi },
+	  access_pmovs, NULL, PMOVSSET_EL0 },
 	/* PMSWINC_EL0 */
 	{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b100),
-	  trap_raz_wi },
+	  access_pmswinc, reset_unknown, PMSWINC_EL0 },
 	/* PMSELR_EL0 */
 	{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b101),
-	  trap_raz_wi },
+	  access_pmselr, reset_unknown, PMSELR_EL0 },
 	/* PMCEID0_EL0 */
 	{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b110),
-	  trap_raz_wi },
+	  access_pmceid },
 	/* PMCEID1_EL0 */
 	{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b111),
-	  trap_raz_wi },
+	  access_pmceid },
 	/* PMCCNTR_EL0 */
 	{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b000),
-	  trap_raz_wi },
+	  access_pmu_evcntr, reset_unknown, PMCCNTR_EL0 },
 	/* PMXEVTYPER_EL0 */
 	{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b001),
-	  trap_raz_wi },
+	  access_pmu_evtyper },
 	/* PMXEVCNTR_EL0 */
 	{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b010),
-	  trap_raz_wi },
-	/* PMUSERENR_EL0 */
+	  access_pmu_evcntr },
+	/* PMUSERENR_EL0
+	 * This register resets as unknown in 64bit mode while it resets as zero
+	 * in 32bit mode. Here we choose to reset it as zero for consistency.
+	 */
 	{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b000),
-	  trap_raz_wi },
+	  access_pmuserenr, reset_val, PMUSERENR_EL0, 0 },
 	/* PMOVSSET_EL0 */
 	{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b011),
-	  trap_raz_wi },
+	  access_pmovs, reset_unknown, PMOVSSET_EL0 },
 
 	/* TPIDR_EL0 */
 	{ Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b010),
@@ -668,6 +1025,77 @@
 	{ Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b011),
 	  NULL, reset_unknown, TPIDRRO_EL0 },
 
+	/* PMEVCNTRn_EL0 */
+	PMU_PMEVCNTR_EL0(0),
+	PMU_PMEVCNTR_EL0(1),
+	PMU_PMEVCNTR_EL0(2),
+	PMU_PMEVCNTR_EL0(3),
+	PMU_PMEVCNTR_EL0(4),
+	PMU_PMEVCNTR_EL0(5),
+	PMU_PMEVCNTR_EL0(6),
+	PMU_PMEVCNTR_EL0(7),
+	PMU_PMEVCNTR_EL0(8),
+	PMU_PMEVCNTR_EL0(9),
+	PMU_PMEVCNTR_EL0(10),
+	PMU_PMEVCNTR_EL0(11),
+	PMU_PMEVCNTR_EL0(12),
+	PMU_PMEVCNTR_EL0(13),
+	PMU_PMEVCNTR_EL0(14),
+	PMU_PMEVCNTR_EL0(15),
+	PMU_PMEVCNTR_EL0(16),
+	PMU_PMEVCNTR_EL0(17),
+	PMU_PMEVCNTR_EL0(18),
+	PMU_PMEVCNTR_EL0(19),
+	PMU_PMEVCNTR_EL0(20),
+	PMU_PMEVCNTR_EL0(21),
+	PMU_PMEVCNTR_EL0(22),
+	PMU_PMEVCNTR_EL0(23),
+	PMU_PMEVCNTR_EL0(24),
+	PMU_PMEVCNTR_EL0(25),
+	PMU_PMEVCNTR_EL0(26),
+	PMU_PMEVCNTR_EL0(27),
+	PMU_PMEVCNTR_EL0(28),
+	PMU_PMEVCNTR_EL0(29),
+	PMU_PMEVCNTR_EL0(30),
+	/* PMEVTYPERn_EL0 */
+	PMU_PMEVTYPER_EL0(0),
+	PMU_PMEVTYPER_EL0(1),
+	PMU_PMEVTYPER_EL0(2),
+	PMU_PMEVTYPER_EL0(3),
+	PMU_PMEVTYPER_EL0(4),
+	PMU_PMEVTYPER_EL0(5),
+	PMU_PMEVTYPER_EL0(6),
+	PMU_PMEVTYPER_EL0(7),
+	PMU_PMEVTYPER_EL0(8),
+	PMU_PMEVTYPER_EL0(9),
+	PMU_PMEVTYPER_EL0(10),
+	PMU_PMEVTYPER_EL0(11),
+	PMU_PMEVTYPER_EL0(12),
+	PMU_PMEVTYPER_EL0(13),
+	PMU_PMEVTYPER_EL0(14),
+	PMU_PMEVTYPER_EL0(15),
+	PMU_PMEVTYPER_EL0(16),
+	PMU_PMEVTYPER_EL0(17),
+	PMU_PMEVTYPER_EL0(18),
+	PMU_PMEVTYPER_EL0(19),
+	PMU_PMEVTYPER_EL0(20),
+	PMU_PMEVTYPER_EL0(21),
+	PMU_PMEVTYPER_EL0(22),
+	PMU_PMEVTYPER_EL0(23),
+	PMU_PMEVTYPER_EL0(24),
+	PMU_PMEVTYPER_EL0(25),
+	PMU_PMEVTYPER_EL0(26),
+	PMU_PMEVTYPER_EL0(27),
+	PMU_PMEVTYPER_EL0(28),
+	PMU_PMEVTYPER_EL0(29),
+	PMU_PMEVTYPER_EL0(30),
+	/* PMCCFILTR_EL0
+	 * This register resets as unknown in 64bit mode while it resets as zero
+	 * in 32bit mode. Here we choose to reset it as zero for consistency.
+	 */
+	{ Op0(0b11), Op1(0b011), CRn(0b1110), CRm(0b1111), Op2(0b111),
+	  access_pmu_evtyper, reset_val, PMCCFILTR_EL0, 0 },
+
 	/* DACR32_EL2 */
 	{ Op0(0b11), Op1(0b100), CRn(0b0011), CRm(0b0000), Op2(0b000),
 	  NULL, reset_unknown, DACR32_EL2 },
@@ -857,6 +1285,20 @@
 	{ Op1( 0), CRm( 2), .access = trap_raz_wi },
 };
 
+/* Macro to expand the PMEVCNTRn register */
+#define PMU_PMEVCNTR(n)							\
+	/* PMEVCNTRn */							\
+	{ Op1(0), CRn(0b1110),						\
+	  CRm((0b1000 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)),		\
+	  access_pmu_evcntr }
+
+/* Macro to expand the PMEVTYPERn register */
+#define PMU_PMEVTYPER(n)						\
+	/* PMEVTYPERn */						\
+	{ Op1(0), CRn(0b1110),						\
+	  CRm((0b1100 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)),		\
+	  access_pmu_evtyper }
+
 /*
  * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding,
  * depending on the way they are accessed (as a 32bit or a 64bit
@@ -885,19 +1327,21 @@
 	{ Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw },
 
 	/* PMU */
-	{ Op1( 0), CRn( 9), CRm(12), Op2( 0), trap_raz_wi },
-	{ Op1( 0), CRn( 9), CRm(12), Op2( 1), trap_raz_wi },
-	{ Op1( 0), CRn( 9), CRm(12), Op2( 2), trap_raz_wi },
-	{ Op1( 0), CRn( 9), CRm(12), Op2( 3), trap_raz_wi },
-	{ Op1( 0), CRn( 9), CRm(12), Op2( 5), trap_raz_wi },
-	{ Op1( 0), CRn( 9), CRm(12), Op2( 6), trap_raz_wi },
-	{ Op1( 0), CRn( 9), CRm(12), Op2( 7), trap_raz_wi },
-	{ Op1( 0), CRn( 9), CRm(13), Op2( 0), trap_raz_wi },
-	{ Op1( 0), CRn( 9), CRm(13), Op2( 1), trap_raz_wi },
-	{ Op1( 0), CRn( 9), CRm(13), Op2( 2), trap_raz_wi },
-	{ Op1( 0), CRn( 9), CRm(14), Op2( 0), trap_raz_wi },
-	{ Op1( 0), CRn( 9), CRm(14), Op2( 1), trap_raz_wi },
-	{ Op1( 0), CRn( 9), CRm(14), Op2( 2), trap_raz_wi },
+	{ Op1( 0), CRn( 9), CRm(12), Op2( 0), access_pmcr },
+	{ Op1( 0), CRn( 9), CRm(12), Op2( 1), access_pmcnten },
+	{ Op1( 0), CRn( 9), CRm(12), Op2( 2), access_pmcnten },
+	{ Op1( 0), CRn( 9), CRm(12), Op2( 3), access_pmovs },
+	{ Op1( 0), CRn( 9), CRm(12), Op2( 4), access_pmswinc },
+	{ Op1( 0), CRn( 9), CRm(12), Op2( 5), access_pmselr },
+	{ Op1( 0), CRn( 9), CRm(12), Op2( 6), access_pmceid },
+	{ Op1( 0), CRn( 9), CRm(12), Op2( 7), access_pmceid },
+	{ Op1( 0), CRn( 9), CRm(13), Op2( 0), access_pmu_evcntr },
+	{ Op1( 0), CRn( 9), CRm(13), Op2( 1), access_pmu_evtyper },
+	{ Op1( 0), CRn( 9), CRm(13), Op2( 2), access_pmu_evcntr },
+	{ Op1( 0), CRn( 9), CRm(14), Op2( 0), access_pmuserenr },
+	{ Op1( 0), CRn( 9), CRm(14), Op2( 1), access_pminten },
+	{ Op1( 0), CRn( 9), CRm(14), Op2( 2), access_pminten },
+	{ Op1( 0), CRn( 9), CRm(14), Op2( 3), access_pmovs },
 
 	{ Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, c10_PRRR },
 	{ Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, c10_NMRR },
@@ -908,10 +1352,78 @@
 	{ Op1( 0), CRn(12), CRm(12), Op2( 5), trap_raz_wi },
 
 	{ Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID },
+
+	/* PMEVCNTRn */
+	PMU_PMEVCNTR(0),
+	PMU_PMEVCNTR(1),
+	PMU_PMEVCNTR(2),
+	PMU_PMEVCNTR(3),
+	PMU_PMEVCNTR(4),
+	PMU_PMEVCNTR(5),
+	PMU_PMEVCNTR(6),
+	PMU_PMEVCNTR(7),
+	PMU_PMEVCNTR(8),
+	PMU_PMEVCNTR(9),
+	PMU_PMEVCNTR(10),
+	PMU_PMEVCNTR(11),
+	PMU_PMEVCNTR(12),
+	PMU_PMEVCNTR(13),
+	PMU_PMEVCNTR(14),
+	PMU_PMEVCNTR(15),
+	PMU_PMEVCNTR(16),
+	PMU_PMEVCNTR(17),
+	PMU_PMEVCNTR(18),
+	PMU_PMEVCNTR(19),
+	PMU_PMEVCNTR(20),
+	PMU_PMEVCNTR(21),
+	PMU_PMEVCNTR(22),
+	PMU_PMEVCNTR(23),
+	PMU_PMEVCNTR(24),
+	PMU_PMEVCNTR(25),
+	PMU_PMEVCNTR(26),
+	PMU_PMEVCNTR(27),
+	PMU_PMEVCNTR(28),
+	PMU_PMEVCNTR(29),
+	PMU_PMEVCNTR(30),
+	/* PMEVTYPERn */
+	PMU_PMEVTYPER(0),
+	PMU_PMEVTYPER(1),
+	PMU_PMEVTYPER(2),
+	PMU_PMEVTYPER(3),
+	PMU_PMEVTYPER(4),
+	PMU_PMEVTYPER(5),
+	PMU_PMEVTYPER(6),
+	PMU_PMEVTYPER(7),
+	PMU_PMEVTYPER(8),
+	PMU_PMEVTYPER(9),
+	PMU_PMEVTYPER(10),
+	PMU_PMEVTYPER(11),
+	PMU_PMEVTYPER(12),
+	PMU_PMEVTYPER(13),
+	PMU_PMEVTYPER(14),
+	PMU_PMEVTYPER(15),
+	PMU_PMEVTYPER(16),
+	PMU_PMEVTYPER(17),
+	PMU_PMEVTYPER(18),
+	PMU_PMEVTYPER(19),
+	PMU_PMEVTYPER(20),
+	PMU_PMEVTYPER(21),
+	PMU_PMEVTYPER(22),
+	PMU_PMEVTYPER(23),
+	PMU_PMEVTYPER(24),
+	PMU_PMEVTYPER(25),
+	PMU_PMEVTYPER(26),
+	PMU_PMEVTYPER(27),
+	PMU_PMEVTYPER(28),
+	PMU_PMEVTYPER(29),
+	PMU_PMEVTYPER(30),
+	/* PMCCFILTR */
+	{ Op1(0), CRn(14), CRm(15), Op2(7), access_pmu_evtyper },
 };
 
 static const struct sys_reg_desc cp15_64_regs[] = {
 	{ Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
+	{ Op1( 0), CRn( 0), CRm( 9), Op2( 0), access_pmu_evcntr },
 	{ Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi },
 	{ Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR1 },
 };
@@ -942,29 +1454,32 @@
 	}
 }
 
+#define reg_to_match_value(x)						\
+	({								\
+		unsigned long val;					\
+		val  = (x)->Op0 << 14;					\
+		val |= (x)->Op1 << 11;					\
+		val |= (x)->CRn << 7;					\
+		val |= (x)->CRm << 3;					\
+		val |= (x)->Op2;					\
+		val;							\
+	 })
+
+static int match_sys_reg(const void *key, const void *elt)
+{
+	const unsigned long pval = (unsigned long)key;
+	const struct sys_reg_desc *r = elt;
+
+	return pval - reg_to_match_value(r);
+}
+
 static const struct sys_reg_desc *find_reg(const struct sys_reg_params *params,
 					 const struct sys_reg_desc table[],
 					 unsigned int num)
 {
-	unsigned int i;
+	unsigned long pval = reg_to_match_value(params);
 
-	for (i = 0; i < num; i++) {
-		const struct sys_reg_desc *r = &table[i];
-
-		if (params->Op0 != r->Op0)
-			continue;
-		if (params->Op1 != r->Op1)
-			continue;
-		if (params->CRn != r->CRn)
-			continue;
-		if (params->CRm != r->CRm)
-			continue;
-		if (params->Op2 != r->Op2)
-			continue;
-
-		return r;
-	}
-	return NULL;
+	return bsearch((void *)pval, table, num, sizeof(table[0]), match_sys_reg);
 }
 
 int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
diff --git a/arch/avr32/include/asm/cmpxchg.h b/arch/avr32/include/asm/cmpxchg.h
index 366bbea..572739b 100644
--- a/arch/avr32/include/asm/cmpxchg.h
+++ b/arch/avr32/include/asm/cmpxchg.h
@@ -57,7 +57,7 @@
 		"	brne	1b\n"
 		"2:\n"
 		: [ret] "=&r"(ret), [m] "=m"(*m)
-		: "m"(m), [old] "ir"(old), [new] "r"(new)
+		: "m"(m), [old] "Ks21r"(old), [new] "r"(new)
 		: "memory", "cc");
 	return ret;
 }
diff --git a/arch/avr32/include/uapi/asm/unistd.h b/arch/avr32/include/uapi/asm/unistd.h
index b60132b..60c0f3a 100644
--- a/arch/avr32/include/uapi/asm/unistd.h
+++ b/arch/avr32/include/uapi/asm/unistd.h
@@ -337,5 +337,6 @@
 #define __NR_userfaultfd	322
 #define __NR_membarrier		323
 #define __NR_mlock2		324
+#define __NR_copy_file_range    325
 
 #endif /* _UAPI__ASM_AVR32_UNISTD_H */
diff --git a/arch/avr32/kernel/syscall-stubs.S b/arch/avr32/kernel/syscall-stubs.S
index f9c68fa..cb39915 100644
--- a/arch/avr32/kernel/syscall-stubs.S
+++ b/arch/avr32/kernel/syscall-stubs.S
@@ -124,3 +124,12 @@
 	call	sys_process_vm_writev
 	sub	sp, -4
 	popm	pc
+
+	.global __sys_copy_file_range
+	.type	__sys_copy_file_range,@function
+__sys_copy_file_range:
+	pushm	lr
+	st.w	--sp, ARG6
+	call	sys_copy_file_range
+	sub	sp, -4
+	popm	pc
diff --git a/arch/avr32/kernel/syscall_table.S b/arch/avr32/kernel/syscall_table.S
index 1915a443..64d71a7 100644
--- a/arch/avr32/kernel/syscall_table.S
+++ b/arch/avr32/kernel/syscall_table.S
@@ -338,4 +338,5 @@
 	.long	sys_userfaultfd
 	.long	sys_membarrier
 	.long	sys_mlock2
+	.long   __sys_copy_file_range   /* 325 */
 	.long	sys_ni_syscall		/* r8 is saturated at nr_syscalls */
diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c
index 0030e21..23c4ef5 100644
--- a/arch/blackfin/mach-common/smp.c
+++ b/arch/blackfin/mach-common/smp.c
@@ -333,7 +333,7 @@
 
 	/* We are done with local CPU inits, unblock the boot CPU. */
 	set_cpu_online(cpu, true);
-	cpu_startup_entry(CPUHP_ONLINE);
+	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
 }
 
 void __init smp_prepare_boot_cpu(void)
diff --git a/arch/hexagon/kernel/smp.c b/arch/hexagon/kernel/smp.c
index ff759f2..983bae7d2 100644
--- a/arch/hexagon/kernel/smp.c
+++ b/arch/hexagon/kernel/smp.c
@@ -180,7 +180,7 @@
 
 	local_irq_enable();
 
-	cpu_startup_entry(CPUHP_ONLINE);
+	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
 }
 
 
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index 0e76fad..74fe317 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -454,7 +454,7 @@
 	preempt_disable();
 	smp_callin();
 
-	cpu_startup_entry(CPUHP_ONLINE);
+	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
 	return 0;
 }
 
diff --git a/arch/m32r/kernel/smpboot.c b/arch/m32r/kernel/smpboot.c
index a468467..f98d2f6 100644
--- a/arch/m32r/kernel/smpboot.c
+++ b/arch/m32r/kernel/smpboot.c
@@ -432,7 +432,7 @@
 	 */
 	local_flush_tlb_all();
 
-	cpu_startup_entry(CPUHP_ONLINE);
+	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
 	return 0;
 }
 
diff --git a/arch/m68k/include/asm/MC68328.h b/arch/m68k/include/asm/MC68328.h
index 4ebf098..1a8080c 100644
--- a/arch/m68k/include/asm/MC68328.h
+++ b/arch/m68k/include/asm/MC68328.h
@@ -798,7 +798,7 @@
 
 /**********
  *
- * 0xFFFFF7xx -- Serial Periferial Interface Slave (SPIS)
+ * 0xFFFFF7xx -- Serial Peripheral Interface Slave (SPIS)
  *
  **********/
 
@@ -824,7 +824,7 @@
 
 /**********
  *
- * 0xFFFFF8xx -- Serial Periferial Interface Master (SPIM)
+ * 0xFFFFF8xx -- Serial Peripheral Interface Master (SPIM)
  *
  **********/
 
@@ -904,7 +904,7 @@
 
 #define UBAUD_PRESCALER_MASK	0x003f	/* Actual divisor is 65 - PRESCALER */
 #define UBAUD_PRESCALER_SHIFT	0
-#define UBAUD_DIVIDE_MASK	0x0700	/* Baud Rate freq. divizor */
+#define UBAUD_DIVIDE_MASK	0x0700	/* Baud Rate freq. divisor */
 #define UBAUD_DIVIDE_SHIFT	8
 #define UBAUD_BAUD_SRC		0x0800	/* Baud Rate Source */
 #define UBAUD_GPIOSRC		0x1000	/* GPIO source */
diff --git a/arch/m68k/include/asm/MC68EZ328.h b/arch/m68k/include/asm/MC68EZ328.h
index d1bde58..fedac87 100644
--- a/arch/m68k/include/asm/MC68EZ328.h
+++ b/arch/m68k/include/asm/MC68EZ328.h
@@ -631,7 +631,7 @@
 
 /**********
  *
- * 0xFFFFF8xx -- Serial Periferial Interface Master (SPIM)
+ * 0xFFFFF8xx -- Serial Peripheral Interface Master (SPIM)
  *
  **********/
 
@@ -712,7 +712,7 @@
 
 #define UBAUD_PRESCALER_MASK	0x003f	/* Actual divisor is 65 - PRESCALER */
 #define UBAUD_PRESCALER_SHIFT	0
-#define UBAUD_DIVIDE_MASK	0x0700	/* Baud Rate freq. divizor */
+#define UBAUD_DIVIDE_MASK	0x0700	/* Baud Rate freq. divisor */
 #define UBAUD_DIVIDE_SHIFT	8
 #define UBAUD_BAUD_SRC		0x0800	/* Baud Rate Source */
 #define UBAUD_UCLKDIR		0x2000	/* UCLK Direction */
@@ -1160,7 +1160,7 @@
 #define DRAMMC_COL10		0x0080	/* Col address bit for MD10 PA11/PA0  */
 #define DRAMMC_COL9		0x0040	/* Col address bit for MD9  PA10/PA0  */
 #define DRAMMC_COL8		0x0020	/* Col address bit for MD8  PA9/PA0   */
-#define DRAMMC_REF_MASK		0x001f	/* Reresh Cycle */
+#define DRAMMC_REF_MASK		0x001f	/* Refresh Cycle */
 #define DRAMMC_REF_SHIFT	0
 
 /*
diff --git a/arch/m68k/include/asm/MC68VZ328.h b/arch/m68k/include/asm/MC68VZ328.h
index 6bd1bf1..34a51b2 100644
--- a/arch/m68k/include/asm/MC68VZ328.h
+++ b/arch/m68k/include/asm/MC68VZ328.h
@@ -724,7 +724,7 @@
 
 /**********
  *
- * 0xFFFFF8xx -- Serial Periferial Interface Master (SPIM)
+ * 0xFFFFF8xx -- Serial Peripheral Interface Master (SPIM)
  *
  **********/
 
@@ -806,7 +806,7 @@
 
 #define UBAUD_PRESCALER_MASK	0x003f	/* Actual divisor is 65 - PRESCALER */
 #define UBAUD_PRESCALER_SHIFT	0
-#define UBAUD_DIVIDE_MASK	0x0700	/* Baud Rate freq. divizor */
+#define UBAUD_DIVIDE_MASK	0x0700	/* Baud Rate freq. divisor */
 #define UBAUD_DIVIDE_SHIFT	8
 #define UBAUD_BAUD_SRC		0x0800	/* Baud Rate Source */
 #define UBAUD_UCLKDIR		0x2000	/* UCLK Direction */
@@ -1256,7 +1256,7 @@
 #define DRAMMC_COL10		0x0080	/* Col address bit for MD10 PA11/PA0  */
 #define DRAMMC_COL9		0x0040	/* Col address bit for MD9  PA10/PA0  */
 #define DRAMMC_COL8		0x0020	/* Col address bit for MD8  PA9/PA0   */
-#define DRAMMC_REF_MASK		0x001f	/* Reresh Cycle */
+#define DRAMMC_REF_MASK		0x001f	/* Refresh Cycle */
 #define DRAMMC_REF_SHIFT	0
 
 /*
diff --git a/arch/m68k/include/asm/m54xxacr.h b/arch/m68k/include/asm/m54xxacr.h
index 6d13cae..59e1710 100644
--- a/arch/m68k/include/asm/m54xxacr.h
+++ b/arch/m68k/include/asm/m54xxacr.h
@@ -23,8 +23,8 @@
 #define CACR_IEC	0x00008000	/* Enable instruction cache */
 #define CACR_DNFB	0x00002000	/* Inhibited fill buffer */
 #define CACR_IDPI	0x00001000	/* Disable CPUSHL */
-#define CACR_IHLCK	0x00000800	/* Intruction cache half lock */
-#define CACR_IDCM	0x00000400	/* Intruction cache inhibit */
+#define CACR_IHLCK	0x00000800	/* Instruction cache half lock */
+#define CACR_IDCM	0x00000400	/* Instruction cache inhibit */
 #define CACR_ICINVA	0x00000100	/* Invalidate instr cache */
 #define CACR_EUSP	0x00000020	/* Enable separate user a7 */
 
diff --git a/arch/m68k/include/asm/mac_iop.h b/arch/m68k/include/asm/mac_iop.h
index fde874a..42566fd 100644
--- a/arch/m68k/include/asm/mac_iop.h
+++ b/arch/m68k/include/asm/mac_iop.h
@@ -48,7 +48,7 @@
 
 /* IOP message status codes */
 
-#define IOP_MSGSTATUS_UNUSED	0	/* Unusued message structure       */
+#define IOP_MSGSTATUS_UNUSED	0	/* Unused message structure        */
 #define IOP_MSGSTATUS_WAITING	1	/* waiting for channel             */
 #define IOP_MSGSTATUS_SENT	2	/* message sent, awaiting reply    */
 #define IOP_MSGSTATUS_COMPLETE	3	/* message complete and reply rcvd */
diff --git a/arch/m68k/include/asm/mcftimer.h b/arch/m68k/include/asm/mcftimer.h
index 089f0f1..1150e42 100644
--- a/arch/m68k/include/asm/mcftimer.h
+++ b/arch/m68k/include/asm/mcftimer.h
@@ -51,7 +51,7 @@
  *	Bit definitions for the Timer Event Registers (TER).
  */
 #define	MCFTIMER_TER_CAP	0x01		/* Capture event */
-#define	MCFTIMER_TER_REF	0x02		/* Refernece event */
+#define	MCFTIMER_TER_REF	0x02		/* Reference event */
 
 /****************************************************************************/
 #endif	/* mcftimer_h */
diff --git a/arch/m68k/kernel/entry.S b/arch/m68k/kernel/entry.S
index b54ac7a..97cd3ea 100644
--- a/arch/m68k/kernel/entry.S
+++ b/arch/m68k/kernel/entry.S
@@ -71,13 +71,19 @@
 
 ENTRY(sys_sigreturn)
 	SAVE_SWITCH_STACK
+	movel	%sp,%sp@-		  | switch_stack pointer
+	pea	%sp@(SWITCH_STACK_SIZE+4) | pt_regs pointer
 	jbsr	do_sigreturn
+	addql	#8,%sp
 	RESTORE_SWITCH_STACK
 	rts
 
 ENTRY(sys_rt_sigreturn)
 	SAVE_SWITCH_STACK
+	movel	%sp,%sp@-		  | switch_stack pointer
+	pea	%sp@(SWITCH_STACK_SIZE+4) | pt_regs pointer
 	jbsr	do_rt_sigreturn
+	addql	#8,%sp
 	RESTORE_SWITCH_STACK
 	rts
 
diff --git a/arch/m68k/kernel/signal.c b/arch/m68k/kernel/signal.c
index af1c4f3..2dcee3a 100644
--- a/arch/m68k/kernel/signal.c
+++ b/arch/m68k/kernel/signal.c
@@ -737,10 +737,8 @@
 	return 1;
 }
 
-asmlinkage int do_sigreturn(unsigned long __unused)
+asmlinkage int do_sigreturn(struct pt_regs *regs, struct switch_stack *sw)
 {
-	struct switch_stack *sw = (struct switch_stack *) &__unused;
-	struct pt_regs *regs = (struct pt_regs *) (sw + 1);
 	unsigned long usp = rdusp();
 	struct sigframe __user *frame = (struct sigframe __user *)(usp - 4);
 	sigset_t set;
@@ -764,10 +762,8 @@
 	return 0;
 }
 
-asmlinkage int do_rt_sigreturn(unsigned long __unused)
+asmlinkage int do_rt_sigreturn(struct pt_regs *regs, struct switch_stack *sw)
 {
-	struct switch_stack *sw = (struct switch_stack *) &__unused;
-	struct pt_regs *regs = (struct pt_regs *) (sw + 1);
 	unsigned long usp = rdusp();
 	struct rt_sigframe __user *frame = (struct rt_sigframe __user *)(usp - 4);
 	sigset_t set;
diff --git a/arch/m68k/mac/via.c b/arch/m68k/mac/via.c
index ce56e04..920ff63 100644
--- a/arch/m68k/mac/via.c
+++ b/arch/m68k/mac/via.c
@@ -68,7 +68,7 @@
  * interrupt. This limitation also seems to apply to VIA clone logic cores in
  * Quadra-like ASICs. (RBV and OSS machines don't have this limitation.)
  *
- * We used to fake it by configuring the relevent VIA pin as an output
+ * We used to fake it by configuring the relevant VIA pin as an output
  * (to mask the interrupt) or input (to unmask). That scheme did not work on
  * (at least) the Quadra 700. A NuBus card's /NMRQ signal is an open-collector
  * circuit (see Designing Cards and Drivers for Macintosh II and Macintosh SE,
diff --git a/arch/metag/kernel/smp.c b/arch/metag/kernel/smp.c
index c3c6f08..bad1323 100644
--- a/arch/metag/kernel/smp.c
+++ b/arch/metag/kernel/smp.c
@@ -396,7 +396,7 @@
 	/*
 	 * OK, it's off to the idle thread for us
 	 */
-	cpu_startup_entry(CPUHP_ONLINE);
+	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
 }
 
 void __init smp_cpus_done(unsigned int max_cpus)
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index d3da79d..a65eacf 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -151,6 +151,7 @@
 	select CSRC_R4K
 	select SYNC_R4K
 	select COMMON_CLK
+	select BCM6345_L1_IRQ
 	select BCM7038_L1_IRQ
 	select BCM7120_L2_IRQ
 	select BRCMSTB_L2_IRQ
@@ -2169,7 +2170,6 @@
 	select CPU_MIPSR2_IRQ_VI
 	select CPU_MIPSR2_IRQ_EI
 	select SYNC_R4K
-	select MIPS_GIC_IPI if MIPS_GIC
 	select MIPS_MT
 	select SMP
 	select SMP_UP
@@ -2267,7 +2267,6 @@
 config MIPS_CMP
 	bool "MIPS CMP framework support (DEPRECATED)"
 	depends on SYS_SUPPORTS_MIPS_CMP && !CPU_MIPSR6
-	select MIPS_GIC_IPI if MIPS_GIC
 	select SMP
 	select SYNC_R4K
 	select SYS_SUPPORTS_SMP
@@ -2287,7 +2286,6 @@
 	select MIPS_CM
 	select MIPS_CPC
 	select MIPS_CPS_PM if HOTPLUG_CPU
-	select MIPS_GIC_IPI if MIPS_GIC
 	select SMP
 	select SYNC_R4K if (CEVT_R4K || CSRC_R4K)
 	select SYS_SUPPORTS_HOTPLUG_CPU
@@ -2305,10 +2303,6 @@
 	select MIPS_CPC
 	bool
 
-config MIPS_GIC_IPI
-	depends on MIPS_GIC
-	bool
-
 config MIPS_CM
 	bool
 
diff --git a/arch/mips/ath79/irq.c b/arch/mips/ath79/irq.c
index 511c065..2dfff1f 100644
--- a/arch/mips/ath79/irq.c
+++ b/arch/mips/ath79/irq.c
@@ -26,90 +26,6 @@
 #include "common.h"
 #include "machtypes.h"
 
-static void __init ath79_misc_intc_domain_init(
-	struct device_node *node, int irq);
-
-static void ath79_misc_irq_handler(struct irq_desc *desc)
-{
-	struct irq_domain *domain = irq_desc_get_handler_data(desc);
-	void __iomem *base = domain->host_data;
-	u32 pending;
-
-	pending = __raw_readl(base + AR71XX_RESET_REG_MISC_INT_STATUS) &
-		  __raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE);
-
-	if (!pending) {
-		spurious_interrupt();
-		return;
-	}
-
-	while (pending) {
-		int bit = __ffs(pending);
-
-		generic_handle_irq(irq_linear_revmap(domain, bit));
-		pending &= ~BIT(bit);
-	}
-}
-
-static void ar71xx_misc_irq_unmask(struct irq_data *d)
-{
-	void __iomem *base = irq_data_get_irq_chip_data(d);
-	unsigned int irq = d->hwirq;
-	u32 t;
-
-	t = __raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE);
-	__raw_writel(t | (1 << irq), base + AR71XX_RESET_REG_MISC_INT_ENABLE);
-
-	/* flush write */
-	__raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE);
-}
-
-static void ar71xx_misc_irq_mask(struct irq_data *d)
-{
-	void __iomem *base = irq_data_get_irq_chip_data(d);
-	unsigned int irq = d->hwirq;
-	u32 t;
-
-	t = __raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE);
-	__raw_writel(t & ~(1 << irq), base + AR71XX_RESET_REG_MISC_INT_ENABLE);
-
-	/* flush write */
-	__raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE);
-}
-
-static void ar724x_misc_irq_ack(struct irq_data *d)
-{
-	void __iomem *base = irq_data_get_irq_chip_data(d);
-	unsigned int irq = d->hwirq;
-	u32 t;
-
-	t = __raw_readl(base + AR71XX_RESET_REG_MISC_INT_STATUS);
-	__raw_writel(t & ~(1 << irq), base + AR71XX_RESET_REG_MISC_INT_STATUS);
-
-	/* flush write */
-	__raw_readl(base + AR71XX_RESET_REG_MISC_INT_STATUS);
-}
-
-static struct irq_chip ath79_misc_irq_chip = {
-	.name		= "MISC",
-	.irq_unmask	= ar71xx_misc_irq_unmask,
-	.irq_mask	= ar71xx_misc_irq_mask,
-};
-
-static void __init ath79_misc_irq_init(void)
-{
-	if (soc_is_ar71xx() || soc_is_ar913x())
-		ath79_misc_irq_chip.irq_mask_ack = ar71xx_misc_irq_mask;
-	else if (soc_is_ar724x() ||
-		 soc_is_ar933x() ||
-		 soc_is_ar934x() ||
-		 soc_is_qca955x())
-		ath79_misc_irq_chip.irq_ack = ar724x_misc_irq_ack;
-	else
-		BUG();
-
-	ath79_misc_intc_domain_init(NULL, ATH79_CPU_IRQ(6));
-}
 
 static void ar934x_ip2_irq_dispatch(struct irq_desc *desc)
 {
@@ -212,142 +128,12 @@
 	irq_set_chained_handler(ATH79_CPU_IRQ(3), qca955x_ip3_irq_dispatch);
 }
 
-/*
- * The IP2/IP3 lines are tied to a PCI/WMAC/USB device. Drivers for
- * these devices typically allocate coherent DMA memory, however the
- * DMA controller may still have some unsynchronized data in the FIFO.
- * Issue a flush in the handlers to ensure that the driver sees
- * the update.
- *
- * This array map the interrupt lines to the DDR write buffer channels.
- */
-
-static unsigned irq_wb_chan[8] = {
-	-1, -1, -1, -1, -1, -1, -1, -1,
-};
-
-asmlinkage void plat_irq_dispatch(void)
-{
-	unsigned long pending;
-	int irq;
-
-	pending = read_c0_status() & read_c0_cause() & ST0_IM;
-
-	if (!pending) {
-		spurious_interrupt();
-		return;
-	}
-
-	pending >>= CAUSEB_IP;
-	while (pending) {
-		irq = fls(pending) - 1;
-		if (irq < ARRAY_SIZE(irq_wb_chan) && irq_wb_chan[irq] != -1)
-			ath79_ddr_wb_flush(irq_wb_chan[irq]);
-		do_IRQ(MIPS_CPU_IRQ_BASE + irq);
-		pending &= ~BIT(irq);
-	}
-}
-
-static int misc_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
-{
-	irq_set_chip_and_handler(irq, &ath79_misc_irq_chip, handle_level_irq);
-	irq_set_chip_data(irq, d->host_data);
-	return 0;
-}
-
-static const struct irq_domain_ops misc_irq_domain_ops = {
-	.xlate = irq_domain_xlate_onecell,
-	.map = misc_map,
-};
-
-static void __init ath79_misc_intc_domain_init(
-	struct device_node *node, int irq)
-{
-	void __iomem *base = ath79_reset_base;
-	struct irq_domain *domain;
-
-	domain = irq_domain_add_legacy(node, ATH79_MISC_IRQ_COUNT,
-			ATH79_MISC_IRQ_BASE, 0, &misc_irq_domain_ops, base);
-	if (!domain)
-		panic("Failed to add MISC irqdomain");
-
-	/* Disable and clear all interrupts */
-	__raw_writel(0, base + AR71XX_RESET_REG_MISC_INT_ENABLE);
-	__raw_writel(0, base + AR71XX_RESET_REG_MISC_INT_STATUS);
-
-	irq_set_chained_handler_and_data(irq, ath79_misc_irq_handler, domain);
-}
-
-static int __init ath79_misc_intc_of_init(
-	struct device_node *node, struct device_node *parent)
-{
-	int irq;
-
-	irq = irq_of_parse_and_map(node, 0);
-	if (!irq)
-		panic("Failed to get MISC IRQ");
-
-	ath79_misc_intc_domain_init(node, irq);
-	return 0;
-}
-
-static int __init ar7100_misc_intc_of_init(
-	struct device_node *node, struct device_node *parent)
-{
-	ath79_misc_irq_chip.irq_mask_ack = ar71xx_misc_irq_mask;
-	return ath79_misc_intc_of_init(node, parent);
-}
-
-IRQCHIP_DECLARE(ar7100_misc_intc, "qca,ar7100-misc-intc",
-		ar7100_misc_intc_of_init);
-
-static int __init ar7240_misc_intc_of_init(
-	struct device_node *node, struct device_node *parent)
-{
-	ath79_misc_irq_chip.irq_ack = ar724x_misc_irq_ack;
-	return ath79_misc_intc_of_init(node, parent);
-}
-
-IRQCHIP_DECLARE(ar7240_misc_intc, "qca,ar7240-misc-intc",
-		ar7240_misc_intc_of_init);
-
-static int __init ar79_cpu_intc_of_init(
-	struct device_node *node, struct device_node *parent)
-{
-	int err, i, count;
-
-	/* Fill the irq_wb_chan table */
-	count = of_count_phandle_with_args(
-		node, "qca,ddr-wb-channels", "#qca,ddr-wb-channel-cells");
-
-	for (i = 0; i < count; i++) {
-		struct of_phandle_args args;
-		u32 irq = i;
-
-		of_property_read_u32_index(
-			node, "qca,ddr-wb-channel-interrupts", i, &irq);
-		if (irq >= ARRAY_SIZE(irq_wb_chan))
-			continue;
-
-		err = of_parse_phandle_with_args(
-			node, "qca,ddr-wb-channels",
-			"#qca,ddr-wb-channel-cells",
-			i, &args);
-		if (err)
-			return err;
-
-		irq_wb_chan[irq] = args.args[0];
-		pr_info("IRQ: Set flush channel of IRQ%d to %d\n",
-			irq, args.args[0]);
-	}
-
-	return mips_cpu_irq_of_init(node, parent);
-}
-IRQCHIP_DECLARE(ar79_cpu_intc, "qca,ar7100-cpu-intc",
-		ar79_cpu_intc_of_init);
-
 void __init arch_init_irq(void)
 {
+	unsigned irq_wb_chan2 = -1;
+	unsigned irq_wb_chan3 = -1;
+	bool misc_is_ar71xx;
+
 	if (mips_machtype == ATH79_MACH_GENERIC_OF) {
 		irqchip_init();
 		return;
@@ -355,14 +141,26 @@
 
 	if (soc_is_ar71xx() || soc_is_ar724x() ||
 	    soc_is_ar913x() || soc_is_ar933x()) {
-		irq_wb_chan[2] = 3;
-		irq_wb_chan[3] = 2;
+		irq_wb_chan2 = 3;
+		irq_wb_chan3 = 2;
 	} else if (soc_is_ar934x()) {
-		irq_wb_chan[3] = 2;
+		irq_wb_chan3 = 2;
 	}
 
-	mips_cpu_irq_init();
-	ath79_misc_irq_init();
+	ath79_cpu_irq_init(irq_wb_chan2, irq_wb_chan3);
+
+	if (soc_is_ar71xx() || soc_is_ar913x())
+		misc_is_ar71xx = true;
+	else if (soc_is_ar724x() ||
+		 soc_is_ar933x() ||
+		 soc_is_ar934x() ||
+		 soc_is_qca955x())
+		misc_is_ar71xx = false;
+	else
+		BUG();
+	ath79_misc_irq_init(
+		ath79_reset_base + AR71XX_RESET_REG_MISC_INT_STATUS,
+		ATH79_CPU_IRQ(6), ATH79_MISC_IRQ_BASE, misc_is_ar71xx);
 
 	if (soc_is_ar934x())
 		ar934x_ip2_irq_init();
diff --git a/arch/mips/bmips/irq.c b/arch/mips/bmips/irq.c
index e7fc6f934..7efefcf 100644
--- a/arch/mips/bmips/irq.c
+++ b/arch/mips/bmips/irq.c
@@ -15,6 +15,12 @@
 #include <asm/irq_cpu.h>
 #include <asm/time.h>
 
+static const struct of_device_id smp_intc_dt_match[] = {
+	{ .compatible = "brcm,bcm7038-l1-intc" },
+	{ .compatible = "brcm,bcm6345-l1-intc" },
+	{}
+};
+
 unsigned int get_c0_compare_int(void)
 {
 	return CP0_LEGACY_COMPARE_IRQ;
@@ -24,8 +30,8 @@
 {
 	struct device_node *dn;
 
-	/* Only the STB (bcm7038) controller supports SMP IRQ affinity */
-	dn = of_find_compatible_node(NULL, NULL, "brcm,bcm7038-l1-intc");
+	/* Only these controllers support SMP IRQ affinity */
+	dn = of_find_matching_node(NULL, smp_intc_dt_match);
 	if (dn)
 		of_node_put(dn);
 	else
diff --git a/arch/mips/boot/dts/brcm/bcm6328.dtsi b/arch/mips/boot/dts/brcm/bcm6328.dtsi
index d61b161..9d19236 100644
--- a/arch/mips/boot/dts/brcm/bcm6328.dtsi
+++ b/arch/mips/boot/dts/brcm/bcm6328.dtsi
@@ -74,7 +74,7 @@
 		timer: timer@10000040 {
 			compatible = "syscon";
 			reg = <0x10000040 0x2c>;
-			little-endian;
+			native-endian;
 		};
 
 		reboot {
diff --git a/arch/mips/boot/dts/brcm/bcm6368.dtsi b/arch/mips/boot/dts/brcm/bcm6368.dtsi
index 9c8d3fe2..1f6b9b5 100644
--- a/arch/mips/boot/dts/brcm/bcm6368.dtsi
+++ b/arch/mips/boot/dts/brcm/bcm6368.dtsi
@@ -54,7 +54,7 @@
 		periph_cntl: syscon@10000000 {
 			compatible = "syscon";
 			reg = <0x10000000 0x14>;
-			little-endian;
+			native-endian;
 		};
 
 		reboot: syscon-reboot@10000008 {
diff --git a/arch/mips/boot/dts/brcm/bcm7125.dtsi b/arch/mips/boot/dts/brcm/bcm7125.dtsi
index 1a7efa8..3ae1605 100644
--- a/arch/mips/boot/dts/brcm/bcm7125.dtsi
+++ b/arch/mips/boot/dts/brcm/bcm7125.dtsi
@@ -98,7 +98,7 @@
 		sun_top_ctrl: syscon@404000 {
 			compatible = "brcm,bcm7125-sun-top-ctrl", "syscon";
 			reg = <0x404000 0x60c>;
-			little-endian;
+			native-endian;
 		};
 
 		reboot {
diff --git a/arch/mips/boot/dts/brcm/bcm7346.dtsi b/arch/mips/boot/dts/brcm/bcm7346.dtsi
index d4bf52c..be79919 100644
--- a/arch/mips/boot/dts/brcm/bcm7346.dtsi
+++ b/arch/mips/boot/dts/brcm/bcm7346.dtsi
@@ -118,7 +118,7 @@
 		sun_top_ctrl: syscon@404000 {
 			compatible = "brcm,bcm7346-sun-top-ctrl", "syscon";
 			reg = <0x404000 0x51c>;
-			little-endian;
+			native-endian;
 		};
 
 		reboot {
diff --git a/arch/mips/boot/dts/brcm/bcm7358.dtsi b/arch/mips/boot/dts/brcm/bcm7358.dtsi
index 8e25016..060805b 100644
--- a/arch/mips/boot/dts/brcm/bcm7358.dtsi
+++ b/arch/mips/boot/dts/brcm/bcm7358.dtsi
@@ -112,7 +112,7 @@
 		sun_top_ctrl: syscon@404000 {
 			compatible = "brcm,bcm7358-sun-top-ctrl", "syscon";
 			reg = <0x404000 0x51c>;
-			little-endian;
+			native-endian;
 		};
 
 		reboot {
diff --git a/arch/mips/boot/dts/brcm/bcm7360.dtsi b/arch/mips/boot/dts/brcm/bcm7360.dtsi
index 7e5f760..bcdb09b 100644
--- a/arch/mips/boot/dts/brcm/bcm7360.dtsi
+++ b/arch/mips/boot/dts/brcm/bcm7360.dtsi
@@ -112,7 +112,7 @@
 		sun_top_ctrl: syscon@404000 {
 			compatible = "brcm,bcm7360-sun-top-ctrl", "syscon";
 			reg = <0x404000 0x51c>;
-			little-endian;
+			native-endian;
 		};
 
 		reboot {
diff --git a/arch/mips/boot/dts/brcm/bcm7362.dtsi b/arch/mips/boot/dts/brcm/bcm7362.dtsi
index c739ea7..d3b1b76 100644
--- a/arch/mips/boot/dts/brcm/bcm7362.dtsi
+++ b/arch/mips/boot/dts/brcm/bcm7362.dtsi
@@ -118,7 +118,7 @@
 		sun_top_ctrl: syscon@404000 {
 			compatible = "brcm,bcm7362-sun-top-ctrl", "syscon";
 			reg = <0x404000 0x51c>;
-			little-endian;
+			native-endian;
 		};
 
 		reboot {
diff --git a/arch/mips/boot/dts/brcm/bcm7420.dtsi b/arch/mips/boot/dts/brcm/bcm7420.dtsi
index 5f55d0a..3302a1b 100644
--- a/arch/mips/boot/dts/brcm/bcm7420.dtsi
+++ b/arch/mips/boot/dts/brcm/bcm7420.dtsi
@@ -99,7 +99,7 @@
 		sun_top_ctrl: syscon@404000 {
 			compatible = "brcm,bcm7420-sun-top-ctrl", "syscon";
 			reg = <0x404000 0x60c>;
-			little-endian;
+			native-endian;
 		};
 
 		reboot {
diff --git a/arch/mips/boot/dts/brcm/bcm7425.dtsi b/arch/mips/boot/dts/brcm/bcm7425.dtsi
index e24d41a..15b27aa 100644
--- a/arch/mips/boot/dts/brcm/bcm7425.dtsi
+++ b/arch/mips/boot/dts/brcm/bcm7425.dtsi
@@ -100,7 +100,7 @@
 		sun_top_ctrl: syscon@404000 {
 			compatible = "brcm,bcm7425-sun-top-ctrl", "syscon";
 			reg = <0x404000 0x51c>;
-			little-endian;
+			native-endian;
 		};
 
 		reboot {
diff --git a/arch/mips/boot/dts/brcm/bcm7435.dtsi b/arch/mips/boot/dts/brcm/bcm7435.dtsi
index 8b9432c..adb33e3 100644
--- a/arch/mips/boot/dts/brcm/bcm7435.dtsi
+++ b/arch/mips/boot/dts/brcm/bcm7435.dtsi
@@ -114,7 +114,7 @@
 		sun_top_ctrl: syscon@404000 {
 			compatible = "brcm,bcm7425-sun-top-ctrl", "syscon";
 			reg = <0x404000 0x51c>;
-			little-endian;
+			native-endian;
 		};
 
 		reboot {
diff --git a/arch/mips/include/asm/mach-ath79/ath79.h b/arch/mips/include/asm/mach-ath79/ath79.h
index 2b34872..441faa9 100644
--- a/arch/mips/include/asm/mach-ath79/ath79.h
+++ b/arch/mips/include/asm/mach-ath79/ath79.h
@@ -144,4 +144,8 @@
 void ath79_device_reset_set(u32 mask);
 void ath79_device_reset_clear(u32 mask);
 
+void ath79_cpu_irq_init(unsigned irq_wb_chan2, unsigned irq_wb_chan3);
+void ath79_misc_irq_init(void __iomem *regs, int irq,
+			int irq_base, bool is_ar71xx);
+
 #endif /* __ASM_MACH_ATH79_H */
diff --git a/arch/mips/include/asm/smp-ops.h b/arch/mips/include/asm/smp-ops.h
index 6ba1fb8..db7c322 100644
--- a/arch/mips/include/asm/smp-ops.h
+++ b/arch/mips/include/asm/smp-ops.h
@@ -44,8 +44,9 @@
 	mp_ops->smp_setup();
 }
 
-extern void gic_send_ipi_single(int cpu, unsigned int action);
-extern void gic_send_ipi_mask(const struct cpumask *mask, unsigned int action);
+extern void mips_smp_send_ipi_single(int cpu, unsigned int action);
+extern void mips_smp_send_ipi_mask(const struct cpumask *mask,
+				      unsigned int action);
 
 #else /* !CONFIG_SMP */
 
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index 68e2b7d..b0988fd 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -52,7 +52,6 @@
 obj-$(CONFIG_MIPS_CMP)		+= smp-cmp.o
 obj-$(CONFIG_MIPS_CPS)		+= smp-cps.o cps-vec.o
 obj-$(CONFIG_MIPS_CPS_NS16550)	+= cps-vec-ns16550.o
-obj-$(CONFIG_MIPS_GIC_IPI)	+= smp-gic.o
 obj-$(CONFIG_MIPS_SPRAM)	+= spram.o
 
 obj-$(CONFIG_MIPS_VPE_LOADER)	+= vpe.o
diff --git a/arch/mips/kernel/smp-cmp.c b/arch/mips/kernel/smp-cmp.c
index d5e0f94..7692334 100644
--- a/arch/mips/kernel/smp-cmp.c
+++ b/arch/mips/kernel/smp-cmp.c
@@ -149,8 +149,8 @@
 }
 
 struct plat_smp_ops cmp_smp_ops = {
-	.send_ipi_single	= gic_send_ipi_single,
-	.send_ipi_mask		= gic_send_ipi_mask,
+	.send_ipi_single	= mips_smp_send_ipi_single,
+	.send_ipi_mask		= mips_smp_send_ipi_mask,
 	.init_secondary		= cmp_init_secondary,
 	.smp_finish		= cmp_smp_finish,
 	.boot_secondary		= cmp_boot_secondary,
diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c
index 2ad4e4c..253e140 100644
--- a/arch/mips/kernel/smp-cps.c
+++ b/arch/mips/kernel/smp-cps.c
@@ -472,8 +472,8 @@
 	.boot_secondary		= cps_boot_secondary,
 	.init_secondary		= cps_init_secondary,
 	.smp_finish		= cps_smp_finish,
-	.send_ipi_single	= gic_send_ipi_single,
-	.send_ipi_mask		= gic_send_ipi_mask,
+	.send_ipi_single	= mips_smp_send_ipi_single,
+	.send_ipi_mask		= mips_smp_send_ipi_mask,
 #ifdef CONFIG_HOTPLUG_CPU
 	.cpu_disable		= cps_cpu_disable,
 	.cpu_die		= cps_cpu_die,
diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c
index 86311a1..4f9570a 100644
--- a/arch/mips/kernel/smp-mt.c
+++ b/arch/mips/kernel/smp-mt.c
@@ -121,7 +121,7 @@
 
 #ifdef CONFIG_MIPS_GIC
 	if (gic_present) {
-		gic_send_ipi_single(cpu, action);
+		mips_smp_send_ipi_single(cpu, action);
 		return;
 	}
 #endif
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 2b521e0..37708d9 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -33,12 +33,16 @@
 #include <linux/cpu.h>
 #include <linux/err.h>
 #include <linux/ftrace.h>
+#include <linux/irqdomain.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
 
 #include <linux/atomic.h>
 #include <asm/cpu.h>
 #include <asm/processor.h>
 #include <asm/idle.h>
 #include <asm/r4k-timer.h>
+#include <asm/mips-cpc.h>
 #include <asm/mmu_context.h>
 #include <asm/time.h>
 #include <asm/setup.h>
@@ -79,6 +83,11 @@
 
 cpumask_t cpu_coherent_mask;
 
+#ifdef CONFIG_GENERIC_IRQ_IPI
+static struct irq_desc *call_desc;
+static struct irq_desc *sched_desc;
+#endif
+
 static inline void set_cpu_sibling_map(int cpu)
 {
 	int i;
@@ -146,6 +155,133 @@
 	mp_ops = ops;
 }
 
+#ifdef CONFIG_GENERIC_IRQ_IPI
+void mips_smp_send_ipi_single(int cpu, unsigned int action)
+{
+	mips_smp_send_ipi_mask(cpumask_of(cpu), action);
+}
+
+void mips_smp_send_ipi_mask(const struct cpumask *mask, unsigned int action)
+{
+	unsigned long flags;
+	unsigned int core;
+	int cpu;
+
+	local_irq_save(flags);
+
+	switch (action) {
+	case SMP_CALL_FUNCTION:
+		__ipi_send_mask(call_desc, mask);
+		break;
+
+	case SMP_RESCHEDULE_YOURSELF:
+		__ipi_send_mask(sched_desc, mask);
+		break;
+
+	default:
+		BUG();
+	}
+
+	if (mips_cpc_present()) {
+		for_each_cpu(cpu, mask) {
+			core = cpu_data[cpu].core;
+
+			if (core == current_cpu_data.core)
+				continue;
+
+			while (!cpumask_test_cpu(cpu, &cpu_coherent_mask)) {
+				mips_cpc_lock_other(core);
+				write_cpc_co_cmd(CPC_Cx_CMD_PWRUP);
+				mips_cpc_unlock_other();
+			}
+		}
+	}
+
+	local_irq_restore(flags);
+}
+
+
+static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
+{
+	scheduler_ipi();
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
+{
+	generic_smp_call_function_interrupt();
+
+	return IRQ_HANDLED;
+}
+
+static struct irqaction irq_resched = {
+	.handler	= ipi_resched_interrupt,
+	.flags		= IRQF_PERCPU,
+	.name		= "IPI resched"
+};
+
+static struct irqaction irq_call = {
+	.handler	= ipi_call_interrupt,
+	.flags		= IRQF_PERCPU,
+	.name		= "IPI call"
+};
+
+static __init void smp_ipi_init_one(unsigned int virq,
+				    struct irqaction *action)
+{
+	int ret;
+
+	irq_set_handler(virq, handle_percpu_irq);
+	ret = setup_irq(virq, action);
+	BUG_ON(ret);
+}
+
+static int __init mips_smp_ipi_init(void)
+{
+	unsigned int call_virq, sched_virq;
+	struct irq_domain *ipidomain;
+	struct device_node *node;
+
+	node = of_irq_find_parent(of_root);
+	ipidomain = irq_find_matching_host(node, DOMAIN_BUS_IPI);
+
+	/*
+	 * Some platforms have half DT setup. So if we found irq node but
+	 * didn't find an ipidomain, try to search for one that is not in the
+	 * DT.
+	 */
+	if (node && !ipidomain)
+		ipidomain = irq_find_matching_host(NULL, DOMAIN_BUS_IPI);
+
+	BUG_ON(!ipidomain);
+
+	call_virq = irq_reserve_ipi(ipidomain, cpu_possible_mask);
+	BUG_ON(!call_virq);
+
+	sched_virq = irq_reserve_ipi(ipidomain, cpu_possible_mask);
+	BUG_ON(!sched_virq);
+
+	if (irq_domain_is_ipi_per_cpu(ipidomain)) {
+		int cpu;
+
+		for_each_cpu(cpu, cpu_possible_mask) {
+			smp_ipi_init_one(call_virq + cpu, &irq_call);
+			smp_ipi_init_one(sched_virq + cpu, &irq_resched);
+		}
+	} else {
+		smp_ipi_init_one(call_virq, &irq_call);
+		smp_ipi_init_one(sched_virq, &irq_resched);
+	}
+
+	call_desc = irq_to_desc(call_virq);
+	sched_desc = irq_to_desc(sched_virq);
+
+	return 0;
+}
+early_initcall(mips_smp_ipi_init);
+#endif
+
 /*
  * First C code run on the secondary CPUs after being started up by
  * the master.
@@ -192,7 +328,7 @@
 	WARN_ON_ONCE(!irqs_disabled());
 	mp_ops->smp_finish();
 
-	cpu_startup_entry(CPUHP_ONLINE);
+	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
 }
 
 static void stop_this_cpu(void *dummy)
diff --git a/arch/mn10300/kernel/smp.c b/arch/mn10300/kernel/smp.c
index f984193..426173c 100644
--- a/arch/mn10300/kernel/smp.c
+++ b/arch/mn10300/kernel/smp.c
@@ -675,7 +675,7 @@
 #ifdef CONFIG_GENERIC_CLOCKEVENTS
 	init_clockevents();
 #endif
-	cpu_startup_entry(CPUHP_ONLINE);
+	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
 	return 0;
 }
 
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c
index 52e8597..c2a9cc5 100644
--- a/arch/parisc/kernel/smp.c
+++ b/arch/parisc/kernel/smp.c
@@ -305,7 +305,7 @@
 
 	local_irq_enable();  /* Interrupts have been off until now */
 
-	cpu_startup_entry(CPUHP_ONLINE);
+	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
 
 	/* NOTREACHED */
 	panic("smp_callin() AAAAaaaaahhhh....\n");
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
index 2aa79c8..7529aab 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -33,8 +33,6 @@
 }
 #endif
 
-#define SPAPR_TCE_SHIFT		12
-
 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
 #define KVM_DEFAULT_HPT_ORDER	24	/* 16MB HPT by default */
 #endif
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index c98afa5..d7b3431 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -182,7 +182,10 @@
 	struct list_head list;
 	struct kvm *kvm;
 	u64 liobn;
-	u32 window_size;
+	struct rcu_head rcu;
+	u32 page_shift;
+	u64 offset;		/* in pages */
+	u64 size;		/* window size in pages */
 	struct page *pages[0];
 };
 
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index 2241d53..2544eda 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -165,9 +165,25 @@
 extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu);
 
 extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
-				struct kvm_create_spapr_tce *args);
+				struct kvm_create_spapr_tce_64 *args);
+extern struct kvmppc_spapr_tce_table *kvmppc_find_table(
+		struct kvm_vcpu *vcpu, unsigned long liobn);
+extern long kvmppc_ioba_validate(struct kvmppc_spapr_tce_table *stt,
+		unsigned long ioba, unsigned long npages);
+extern long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *tt,
+		unsigned long tce);
+extern long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa,
+		unsigned long *ua, unsigned long **prmap);
+extern void kvmppc_tce_put(struct kvmppc_spapr_tce_table *tt,
+		unsigned long idx, unsigned long tce);
 extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
 			     unsigned long ioba, unsigned long tce);
+extern long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
+		unsigned long liobn, unsigned long ioba,
+		unsigned long tce_list, unsigned long npages);
+extern long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
+		unsigned long liobn, unsigned long ioba,
+		unsigned long tce_value, unsigned long npages);
 extern long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
 			     unsigned long ioba);
 extern struct page *kvm_alloc_hpt(unsigned long nr_pages);
@@ -437,6 +453,8 @@
 {
 	return vcpu->arch.irq_type == KVMPPC_IRQ_XICS;
 }
+extern void kvmppc_alloc_host_rm_ops(void);
+extern void kvmppc_free_host_rm_ops(void);
 extern void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu);
 extern int kvmppc_xics_create_icp(struct kvm_vcpu *vcpu, unsigned long server);
 extern int kvm_vm_ioctl_xics_irq(struct kvm *kvm, struct kvm_irq_level *args);
@@ -445,7 +463,11 @@
 extern int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
 extern int kvmppc_xics_connect_vcpu(struct kvm_device *dev,
 			struct kvm_vcpu *vcpu, u32 cpu);
+extern void kvmppc_xics_ipi_action(void);
+extern int h_ipi_redirect;
 #else
+static inline void kvmppc_alloc_host_rm_ops(void) {};
+static inline void kvmppc_free_host_rm_ops(void) {};
 static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
 	{ return 0; }
 static inline void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu) { }
@@ -459,6 +481,33 @@
 	{ return 0; }
 #endif
 
+/*
+ * Host-side operations we want to set up while running in real
+ * mode in the guest operating on the xics.
+ * Currently only VCPU wakeup is supported.
+ */
+
+union kvmppc_rm_state {
+	unsigned long raw;
+	struct {
+		u32 in_host;
+		u32 rm_action;
+	};
+};
+
+struct kvmppc_host_rm_core {
+	union kvmppc_rm_state rm_state;
+	void *rm_data;
+	char pad[112];
+};
+
+struct kvmppc_host_rm_ops {
+	struct kvmppc_host_rm_core	*rm_core;
+	void		(*vcpu_kick)(struct kvm_vcpu *vcpu);
+};
+
+extern struct kvmppc_host_rm_ops *kvmppc_host_rm_ops_hv;
+
 static inline unsigned long kvmppc_get_epr(struct kvm_vcpu *vcpu)
 {
 #ifdef CONFIG_KVM_BOOKE_HV
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index ac9fb11..47897a3 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -78,6 +78,9 @@
 	}
 	return __find_linux_pte_or_hugepte(pgdir, ea, is_thp, shift);
 }
+
+unsigned long vmalloc_to_phys(void *vmalloc_addr);
+
 #endif /* __ASSEMBLY__ */
 
 #endif /* _ASM_POWERPC_PGTABLE_H */
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index 825663c..78083ed 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -114,6 +114,9 @@
 #define PPC_MSG_TICK_BROADCAST	2
 #define PPC_MSG_DEBUGGER_BREAK  3
 
+/* This is only used by the powernv kernel */
+#define PPC_MSG_RM_HOST_ACTION	4
+
 /* for irq controllers that have dedicated ipis per message (4) */
 extern int smp_request_message_ipi(int virq, int message);
 extern const char *smp_ipi_name[];
@@ -121,6 +124,7 @@
 /* for irq controllers with only a single ipi */
 extern void smp_muxed_ipi_set_data(int cpu, unsigned long data);
 extern void smp_muxed_ipi_message_pass(int cpu, int msg);
+extern void smp_muxed_ipi_set_message(int cpu, int msg);
 extern irqreturn_t smp_ipi_demux(void);
 
 void smp_init_pSeries(void);
diff --git a/arch/powerpc/include/asm/xics.h b/arch/powerpc/include/asm/xics.h
index 0e25bdb..2546048 100644
--- a/arch/powerpc/include/asm/xics.h
+++ b/arch/powerpc/include/asm/xics.h
@@ -30,6 +30,7 @@
 #ifdef CONFIG_PPC_ICP_NATIVE
 extern int icp_native_init(void);
 extern void icp_native_flush_interrupt(void);
+extern void icp_native_cause_ipi_rm(int cpu);
 #else
 static inline int icp_native_init(void) { return -ENODEV; }
 #endif
diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h
index ab4d473..c93cf35 100644
--- a/arch/powerpc/include/uapi/asm/kvm.h
+++ b/arch/powerpc/include/uapi/asm/kvm.h
@@ -333,6 +333,15 @@
 	__u32 window_size;
 };
 
+/* for KVM_CAP_SPAPR_TCE_64 */
+struct kvm_create_spapr_tce_64 {
+	__u64 liobn;
+	__u32 page_shift;
+	__u32 flags;
+	__u64 offset;	/* in pages */
+	__u64 size;	/* in pages */
+};
+
 /* for KVM_ALLOCATE_RMA */
 struct kvm_allocate_rma {
 	__u64 rma_size;
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index ec9ec20..b7dea05f 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -206,7 +206,7 @@
 
 #ifdef CONFIG_PPC_SMP_MUXED_IPI
 struct cpu_messages {
-	int messages;			/* current messages */
+	long messages;			/* current messages */
 	unsigned long data;		/* data for cause ipi */
 };
 static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_messages, ipi_message);
@@ -218,7 +218,7 @@
 	info->data = data;
 }
 
-void smp_muxed_ipi_message_pass(int cpu, int msg)
+void smp_muxed_ipi_set_message(int cpu, int msg)
 {
 	struct cpu_messages *info = &per_cpu(ipi_message, cpu);
 	char *message = (char *)&info->messages;
@@ -228,6 +228,13 @@
 	 */
 	smp_mb();
 	message[msg] = 1;
+}
+
+void smp_muxed_ipi_message_pass(int cpu, int msg)
+{
+	struct cpu_messages *info = &per_cpu(ipi_message, cpu);
+
+	smp_muxed_ipi_set_message(cpu, msg);
 	/*
 	 * cause_ipi functions are required to include a full barrier
 	 * before doing whatever causes the IPI.
@@ -236,20 +243,31 @@
 }
 
 #ifdef __BIG_ENDIAN__
-#define IPI_MESSAGE(A) (1 << (24 - 8 * (A)))
+#define IPI_MESSAGE(A) (1uL << ((BITS_PER_LONG - 8) - 8 * (A)))
 #else
-#define IPI_MESSAGE(A) (1 << (8 * (A)))
+#define IPI_MESSAGE(A) (1uL << (8 * (A)))
 #endif
 
 irqreturn_t smp_ipi_demux(void)
 {
 	struct cpu_messages *info = this_cpu_ptr(&ipi_message);
-	unsigned int all;
+	unsigned long all;
 
 	mb();	/* order any irq clear */
 
 	do {
 		all = xchg(&info->messages, 0);
+#if defined(CONFIG_KVM_XICS) && defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
+		/*
+		 * Must check for PPC_MSG_RM_HOST_ACTION messages
+		 * before PPC_MSG_CALL_FUNCTION messages because when
+		 * a VM is destroyed, we call kick_all_cpus_sync()
+		 * to ensure that any pending PPC_MSG_RM_HOST_ACTION
+		 * messages have completed before we free any VCPUs.
+		 */
+		if (all & IPI_MESSAGE(PPC_MSG_RM_HOST_ACTION))
+			kvmppc_xics_ipi_action();
+#endif
 		if (all & IPI_MESSAGE(PPC_MSG_CALL_FUNCTION))
 			generic_smp_call_function_interrupt();
 		if (all & IPI_MESSAGE(PPC_MSG_RESCHEDULE))
@@ -727,7 +745,7 @@
 
 	local_irq_enable();
 
-	cpu_startup_entry(CPUHP_ONLINE);
+	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
 
 	BUG();
 }
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile
index 0570eef..7f7b6d8 100644
--- a/arch/powerpc/kvm/Makefile
+++ b/arch/powerpc/kvm/Makefile
@@ -8,7 +8,7 @@
 KVM := ../../../virt/kvm
 
 common-objs-y = $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o \
-		$(KVM)/eventfd.o
+		$(KVM)/eventfd.o $(KVM)/vfio.o
 
 CFLAGS_e500_mmu.o := -I.
 CFLAGS_e500_mmu_host.o := -I.
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 638c6d9..b34220d 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -807,7 +807,7 @@
 {
 
 #ifdef CONFIG_PPC64
-	INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables);
+	INIT_LIST_HEAD_RCU(&kvm->arch.spapr_tce_tables);
 	INIT_LIST_HEAD(&kvm->arch.rtas_tokens);
 #endif
 
diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
index 54cf9bc..2c2d103 100644
--- a/arch/powerpc/kvm/book3s_64_vio.c
+++ b/arch/powerpc/kvm/book3s_64_vio.c
@@ -14,6 +14,7 @@
  *
  * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
  * Copyright 2011 David Gibson, IBM Corporation <dwg@au1.ibm.com>
+ * Copyright 2016 Alexey Kardashevskiy, IBM Corporation <aik@au1.ibm.com>
  */
 
 #include <linux/types.h>
@@ -36,28 +37,69 @@
 #include <asm/ppc-opcode.h>
 #include <asm/kvm_host.h>
 #include <asm/udbg.h>
+#include <asm/iommu.h>
+#include <asm/tce.h>
 
-#define TCES_PER_PAGE	(PAGE_SIZE / sizeof(u64))
-
-static long kvmppc_stt_npages(unsigned long window_size)
+static unsigned long kvmppc_tce_pages(unsigned long iommu_pages)
 {
-	return ALIGN((window_size >> SPAPR_TCE_SHIFT)
-		     * sizeof(u64), PAGE_SIZE) / PAGE_SIZE;
+	return ALIGN(iommu_pages * sizeof(u64), PAGE_SIZE) / PAGE_SIZE;
 }
 
-static void release_spapr_tce_table(struct kvmppc_spapr_tce_table *stt)
+static unsigned long kvmppc_stt_pages(unsigned long tce_pages)
 {
-	struct kvm *kvm = stt->kvm;
-	int i;
+	unsigned long stt_bytes = sizeof(struct kvmppc_spapr_tce_table) +
+			(tce_pages * sizeof(struct page *));
 
-	mutex_lock(&kvm->lock);
-	list_del(&stt->list);
-	for (i = 0; i < kvmppc_stt_npages(stt->window_size); i++)
+	return tce_pages + ALIGN(stt_bytes, PAGE_SIZE) / PAGE_SIZE;
+}
+
+static long kvmppc_account_memlimit(unsigned long stt_pages, bool inc)
+{
+	long ret = 0;
+
+	if (!current || !current->mm)
+		return ret; /* process exited */
+
+	down_write(&current->mm->mmap_sem);
+
+	if (inc) {
+		unsigned long locked, lock_limit;
+
+		locked = current->mm->locked_vm + stt_pages;
+		lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
+		if (locked > lock_limit && !capable(CAP_IPC_LOCK))
+			ret = -ENOMEM;
+		else
+			current->mm->locked_vm += stt_pages;
+	} else {
+		if (WARN_ON_ONCE(stt_pages > current->mm->locked_vm))
+			stt_pages = current->mm->locked_vm;
+
+		current->mm->locked_vm -= stt_pages;
+	}
+
+	pr_debug("[%d] RLIMIT_MEMLOCK KVM %c%ld %ld/%ld%s\n", current->pid,
+			inc ? '+' : '-',
+			stt_pages << PAGE_SHIFT,
+			current->mm->locked_vm << PAGE_SHIFT,
+			rlimit(RLIMIT_MEMLOCK),
+			ret ? " - exceeded" : "");
+
+	up_write(&current->mm->mmap_sem);
+
+	return ret;
+}
+
+static void release_spapr_tce_table(struct rcu_head *head)
+{
+	struct kvmppc_spapr_tce_table *stt = container_of(head,
+			struct kvmppc_spapr_tce_table, rcu);
+	unsigned long i, npages = kvmppc_tce_pages(stt->size);
+
+	for (i = 0; i < npages; i++)
 		__free_page(stt->pages[i]);
-	kfree(stt);
-	mutex_unlock(&kvm->lock);
 
-	kvm_put_kvm(kvm);
+	kfree(stt);
 }
 
 static int kvm_spapr_tce_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
@@ -65,7 +107,7 @@
 	struct kvmppc_spapr_tce_table *stt = vma->vm_file->private_data;
 	struct page *page;
 
-	if (vmf->pgoff >= kvmppc_stt_npages(stt->window_size))
+	if (vmf->pgoff >= kvmppc_tce_pages(stt->size))
 		return VM_FAULT_SIGBUS;
 
 	page = stt->pages[vmf->pgoff];
@@ -88,7 +130,14 @@
 {
 	struct kvmppc_spapr_tce_table *stt = filp->private_data;
 
-	release_spapr_tce_table(stt);
+	list_del_rcu(&stt->list);
+
+	kvm_put_kvm(stt->kvm);
+
+	kvmppc_account_memlimit(
+		kvmppc_stt_pages(kvmppc_tce_pages(stt->size)), false);
+	call_rcu(&stt->rcu, release_spapr_tce_table);
+
 	return 0;
 }
 
@@ -98,20 +147,29 @@
 };
 
 long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
-				   struct kvm_create_spapr_tce *args)
+				   struct kvm_create_spapr_tce_64 *args)
 {
 	struct kvmppc_spapr_tce_table *stt = NULL;
-	long npages;
+	unsigned long npages, size;
 	int ret = -ENOMEM;
 	int i;
 
+	if (!args->size)
+		return -EINVAL;
+
 	/* Check this LIOBN hasn't been previously allocated */
 	list_for_each_entry(stt, &kvm->arch.spapr_tce_tables, list) {
 		if (stt->liobn == args->liobn)
 			return -EBUSY;
 	}
 
-	npages = kvmppc_stt_npages(args->window_size);
+	size = args->size;
+	npages = kvmppc_tce_pages(size);
+	ret = kvmppc_account_memlimit(kvmppc_stt_pages(npages), true);
+	if (ret) {
+		stt = NULL;
+		goto fail;
+	}
 
 	stt = kzalloc(sizeof(*stt) + npages * sizeof(struct page *),
 		      GFP_KERNEL);
@@ -119,7 +177,9 @@
 		goto fail;
 
 	stt->liobn = args->liobn;
-	stt->window_size = args->window_size;
+	stt->page_shift = args->page_shift;
+	stt->offset = args->offset;
+	stt->size = size;
 	stt->kvm = kvm;
 
 	for (i = 0; i < npages; i++) {
@@ -131,7 +191,7 @@
 	kvm_get_kvm(kvm);
 
 	mutex_lock(&kvm->lock);
-	list_add(&stt->list, &kvm->arch.spapr_tce_tables);
+	list_add_rcu(&stt->list, &kvm->arch.spapr_tce_tables);
 
 	mutex_unlock(&kvm->lock);
 
@@ -148,3 +208,59 @@
 	}
 	return ret;
 }
+
+long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
+		unsigned long liobn, unsigned long ioba,
+		unsigned long tce_list, unsigned long npages)
+{
+	struct kvmppc_spapr_tce_table *stt;
+	long i, ret = H_SUCCESS, idx;
+	unsigned long entry, ua = 0;
+	u64 __user *tces, tce;
+
+	stt = kvmppc_find_table(vcpu, liobn);
+	if (!stt)
+		return H_TOO_HARD;
+
+	entry = ioba >> stt->page_shift;
+	/*
+	 * SPAPR spec says that the maximum size of the list is 512 TCEs
+	 * so the whole table fits in 4K page
+	 */
+	if (npages > 512)
+		return H_PARAMETER;
+
+	if (tce_list & (SZ_4K - 1))
+		return H_PARAMETER;
+
+	ret = kvmppc_ioba_validate(stt, ioba, npages);
+	if (ret != H_SUCCESS)
+		return ret;
+
+	idx = srcu_read_lock(&vcpu->kvm->srcu);
+	if (kvmppc_gpa_to_ua(vcpu->kvm, tce_list, &ua, NULL)) {
+		ret = H_TOO_HARD;
+		goto unlock_exit;
+	}
+	tces = (u64 __user *) ua;
+
+	for (i = 0; i < npages; ++i) {
+		if (get_user(tce, tces + i)) {
+			ret = H_TOO_HARD;
+			goto unlock_exit;
+		}
+		tce = be64_to_cpu(tce);
+
+		ret = kvmppc_tce_validate(stt, tce);
+		if (ret != H_SUCCESS)
+			goto unlock_exit;
+
+		kvmppc_tce_put(stt, entry + i, tce);
+	}
+
+unlock_exit:
+	srcu_read_unlock(&vcpu->kvm->srcu, idx);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(kvmppc_h_put_tce_indirect);
diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c
index 89e96b3..44be73e 100644
--- a/arch/powerpc/kvm/book3s_64_vio_hv.c
+++ b/arch/powerpc/kvm/book3s_64_vio_hv.c
@@ -14,6 +14,7 @@
  *
  * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
  * Copyright 2011 David Gibson, IBM Corporation <dwg@au1.ibm.com>
+ * Copyright 2016 Alexey Kardashevskiy, IBM Corporation <aik@au1.ibm.com>
  */
 
 #include <linux/types.h>
@@ -30,76 +31,321 @@
 #include <asm/kvm_ppc.h>
 #include <asm/kvm_book3s.h>
 #include <asm/mmu-hash64.h>
+#include <asm/mmu_context.h>
 #include <asm/hvcall.h>
 #include <asm/synch.h>
 #include <asm/ppc-opcode.h>
 #include <asm/kvm_host.h>
 #include <asm/udbg.h>
+#include <asm/iommu.h>
+#include <asm/tce.h>
+#include <asm/iommu.h>
 
 #define TCES_PER_PAGE	(PAGE_SIZE / sizeof(u64))
 
-/* WARNING: This will be called in real-mode on HV KVM and virtual
+/*
+ * Finds a TCE table descriptor by LIOBN.
+ *
+ * WARNING: This will be called in real or virtual mode on HV KVM and virtual
  *          mode on PR KVM
  */
-long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
-		      unsigned long ioba, unsigned long tce)
+struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm_vcpu *vcpu,
+		unsigned long liobn)
 {
 	struct kvm *kvm = vcpu->kvm;
 	struct kvmppc_spapr_tce_table *stt;
 
+	list_for_each_entry_lockless(stt, &kvm->arch.spapr_tce_tables, list)
+		if (stt->liobn == liobn)
+			return stt;
+
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(kvmppc_find_table);
+
+/*
+ * Validates IO address.
+ *
+ * WARNING: This will be called in real-mode on HV KVM and virtual
+ *          mode on PR KVM
+ */
+long kvmppc_ioba_validate(struct kvmppc_spapr_tce_table *stt,
+		unsigned long ioba, unsigned long npages)
+{
+	unsigned long mask = (1ULL << stt->page_shift) - 1;
+	unsigned long idx = ioba >> stt->page_shift;
+
+	if ((ioba & mask) || (idx < stt->offset) ||
+			(idx - stt->offset + npages > stt->size) ||
+			(idx + npages < idx))
+		return H_PARAMETER;
+
+	return H_SUCCESS;
+}
+EXPORT_SYMBOL_GPL(kvmppc_ioba_validate);
+
+/*
+ * Validates TCE address.
+ * At the moment flags and page mask are validated.
+ * As the host kernel does not access those addresses (just puts them
+ * to the table and user space is supposed to process them), we can skip
+ * checking other things (such as TCE is a guest RAM address or the page
+ * was actually allocated).
+ *
+ * WARNING: This will be called in real-mode on HV KVM and virtual
+ *          mode on PR KVM
+ */
+long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt, unsigned long tce)
+{
+	unsigned long page_mask = ~((1ULL << stt->page_shift) - 1);
+	unsigned long mask = ~(page_mask | TCE_PCI_WRITE | TCE_PCI_READ);
+
+	if (tce & mask)
+		return H_PARAMETER;
+
+	return H_SUCCESS;
+}
+EXPORT_SYMBOL_GPL(kvmppc_tce_validate);
+
+/* Note on the use of page_address() in real mode,
+ *
+ * It is safe to use page_address() in real mode on ppc64 because
+ * page_address() is always defined as lowmem_page_address()
+ * which returns __va(PFN_PHYS(page_to_pfn(page))) which is arithmetic
+ * operation and does not access page struct.
+ *
+ * Theoretically page_address() could be defined different
+ * but either WANT_PAGE_VIRTUAL or HASHED_PAGE_VIRTUAL
+ * would have to be enabled.
+ * WANT_PAGE_VIRTUAL is never enabled on ppc32/ppc64,
+ * HASHED_PAGE_VIRTUAL could be enabled for ppc32 only and only
+ * if CONFIG_HIGHMEM is defined. As CONFIG_SPARSEMEM_VMEMMAP
+ * is not expected to be enabled on ppc32, page_address()
+ * is safe for ppc32 as well.
+ *
+ * WARNING: This will be called in real-mode on HV KVM and virtual
+ *          mode on PR KVM
+ */
+static u64 *kvmppc_page_address(struct page *page)
+{
+#if defined(HASHED_PAGE_VIRTUAL) || defined(WANT_PAGE_VIRTUAL)
+#error TODO: fix to avoid page_address() here
+#endif
+	return (u64 *) page_address(page);
+}
+
+/*
+ * Handles TCE requests for emulated devices.
+ * Puts guest TCE values to the table and expects user space to convert them.
+ * Called in both real and virtual modes.
+ * Cannot fail so kvmppc_tce_validate must be called before it.
+ *
+ * WARNING: This will be called in real-mode on HV KVM and virtual
+ *          mode on PR KVM
+ */
+void kvmppc_tce_put(struct kvmppc_spapr_tce_table *stt,
+		unsigned long idx, unsigned long tce)
+{
+	struct page *page;
+	u64 *tbl;
+
+	idx -= stt->offset;
+	page = stt->pages[idx / TCES_PER_PAGE];
+	tbl = kvmppc_page_address(page);
+
+	tbl[idx % TCES_PER_PAGE] = tce;
+}
+EXPORT_SYMBOL_GPL(kvmppc_tce_put);
+
+long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa,
+		unsigned long *ua, unsigned long **prmap)
+{
+	unsigned long gfn = gpa >> PAGE_SHIFT;
+	struct kvm_memory_slot *memslot;
+
+	memslot = search_memslots(kvm_memslots(kvm), gfn);
+	if (!memslot)
+		return -EINVAL;
+
+	*ua = __gfn_to_hva_memslot(memslot, gfn) |
+		(gpa & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE));
+
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+	if (prmap)
+		*prmap = &memslot->arch.rmap[gfn - memslot->base_gfn];
+#endif
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(kvmppc_gpa_to_ua);
+
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
+		      unsigned long ioba, unsigned long tce)
+{
+	struct kvmppc_spapr_tce_table *stt = kvmppc_find_table(vcpu, liobn);
+	long ret;
+
 	/* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
 	/* 	    liobn, ioba, tce); */
 
-	list_for_each_entry(stt, &kvm->arch.spapr_tce_tables, list) {
-		if (stt->liobn == liobn) {
-			unsigned long idx = ioba >> SPAPR_TCE_SHIFT;
-			struct page *page;
-			u64 *tbl;
+	if (!stt)
+		return H_TOO_HARD;
 
-			/* udbg_printf("H_PUT_TCE: liobn 0x%lx => stt=%p  window_size=0x%x\n", */
-			/* 	    liobn, stt, stt->window_size); */
-			if (ioba >= stt->window_size)
-				return H_PARAMETER;
+	ret = kvmppc_ioba_validate(stt, ioba, 1);
+	if (ret != H_SUCCESS)
+		return ret;
 
-			page = stt->pages[idx / TCES_PER_PAGE];
-			tbl = (u64 *)page_address(page);
+	ret = kvmppc_tce_validate(stt, tce);
+	if (ret != H_SUCCESS)
+		return ret;
 
-			/* FIXME: Need to validate the TCE itself */
-			/* udbg_printf("tce @ %p\n", &tbl[idx % TCES_PER_PAGE]); */
-			tbl[idx % TCES_PER_PAGE] = tce;
-			return H_SUCCESS;
-		}
-	}
+	kvmppc_tce_put(stt, ioba >> stt->page_shift, tce);
 
-	/* Didn't find the liobn, punt it to userspace */
-	return H_TOO_HARD;
+	return H_SUCCESS;
 }
 EXPORT_SYMBOL_GPL(kvmppc_h_put_tce);
 
+static long kvmppc_rm_ua_to_hpa(struct kvm_vcpu *vcpu,
+		unsigned long ua, unsigned long *phpa)
+{
+	pte_t *ptep, pte;
+	unsigned shift = 0;
+
+	ptep = __find_linux_pte_or_hugepte(vcpu->arch.pgdir, ua, NULL, &shift);
+	if (!ptep || !pte_present(*ptep))
+		return -ENXIO;
+	pte = *ptep;
+
+	if (!shift)
+		shift = PAGE_SHIFT;
+
+	/* Avoid handling anything potentially complicated in realmode */
+	if (shift > PAGE_SHIFT)
+		return -EAGAIN;
+
+	if (!pte_young(pte))
+		return -EAGAIN;
+
+	*phpa = (pte_pfn(pte) << PAGE_SHIFT) | (ua & ((1ULL << shift) - 1)) |
+			(ua & ~PAGE_MASK);
+
+	return 0;
+}
+
+long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
+		unsigned long liobn, unsigned long ioba,
+		unsigned long tce_list,	unsigned long npages)
+{
+	struct kvmppc_spapr_tce_table *stt;
+	long i, ret = H_SUCCESS;
+	unsigned long tces, entry, ua = 0;
+	unsigned long *rmap = NULL;
+
+	stt = kvmppc_find_table(vcpu, liobn);
+	if (!stt)
+		return H_TOO_HARD;
+
+	entry = ioba >> stt->page_shift;
+	/*
+	 * The spec says that the maximum size of the list is 512 TCEs
+	 * so the whole table addressed resides in 4K page
+	 */
+	if (npages > 512)
+		return H_PARAMETER;
+
+	if (tce_list & (SZ_4K - 1))
+		return H_PARAMETER;
+
+	ret = kvmppc_ioba_validate(stt, ioba, npages);
+	if (ret != H_SUCCESS)
+		return ret;
+
+	if (kvmppc_gpa_to_ua(vcpu->kvm, tce_list, &ua, &rmap))
+		return H_TOO_HARD;
+
+	rmap = (void *) vmalloc_to_phys(rmap);
+
+	/*
+	 * Synchronize with the MMU notifier callbacks in
+	 * book3s_64_mmu_hv.c (kvm_unmap_hva_hv etc.).
+	 * While we have the rmap lock, code running on other CPUs
+	 * cannot finish unmapping the host real page that backs
+	 * this guest real page, so we are OK to access the host
+	 * real page.
+	 */
+	lock_rmap(rmap);
+	if (kvmppc_rm_ua_to_hpa(vcpu, ua, &tces)) {
+		ret = H_TOO_HARD;
+		goto unlock_exit;
+	}
+
+	for (i = 0; i < npages; ++i) {
+		unsigned long tce = be64_to_cpu(((u64 *)tces)[i]);
+
+		ret = kvmppc_tce_validate(stt, tce);
+		if (ret != H_SUCCESS)
+			goto unlock_exit;
+
+		kvmppc_tce_put(stt, entry + i, tce);
+	}
+
+unlock_exit:
+	unlock_rmap(rmap);
+
+	return ret;
+}
+
+long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
+		unsigned long liobn, unsigned long ioba,
+		unsigned long tce_value, unsigned long npages)
+{
+	struct kvmppc_spapr_tce_table *stt;
+	long i, ret;
+
+	stt = kvmppc_find_table(vcpu, liobn);
+	if (!stt)
+		return H_TOO_HARD;
+
+	ret = kvmppc_ioba_validate(stt, ioba, npages);
+	if (ret != H_SUCCESS)
+		return ret;
+
+	/* Check permission bits only to allow userspace poison TCE for debug */
+	if (tce_value & (TCE_PCI_WRITE | TCE_PCI_READ))
+		return H_PARAMETER;
+
+	for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
+		kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value);
+
+	return H_SUCCESS;
+}
+EXPORT_SYMBOL_GPL(kvmppc_h_stuff_tce);
+
 long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
 		      unsigned long ioba)
 {
-	struct kvm *kvm = vcpu->kvm;
-	struct kvmppc_spapr_tce_table *stt;
+	struct kvmppc_spapr_tce_table *stt = kvmppc_find_table(vcpu, liobn);
+	long ret;
+	unsigned long idx;
+	struct page *page;
+	u64 *tbl;
 
-	list_for_each_entry(stt, &kvm->arch.spapr_tce_tables, list) {
-		if (stt->liobn == liobn) {
-			unsigned long idx = ioba >> SPAPR_TCE_SHIFT;
-			struct page *page;
-			u64 *tbl;
+	if (!stt)
+		return H_TOO_HARD;
 
-			if (ioba >= stt->window_size)
-				return H_PARAMETER;
+	ret = kvmppc_ioba_validate(stt, ioba, 1);
+	if (ret != H_SUCCESS)
+		return ret;
 
-			page = stt->pages[idx / TCES_PER_PAGE];
-			tbl = (u64 *)page_address(page);
+	idx = (ioba >> stt->page_shift) - stt->offset;
+	page = stt->pages[idx / TCES_PER_PAGE];
+	tbl = (u64 *)page_address(page);
 
-			vcpu->arch.gpr[4] = tbl[idx % TCES_PER_PAGE];
-			return H_SUCCESS;
-		}
-	}
+	vcpu->arch.gpr[4] = tbl[idx % TCES_PER_PAGE];
 
-	/* Didn't find the liobn, punt it to userspace */
-	return H_TOO_HARD;
+	return H_SUCCESS;
 }
 EXPORT_SYMBOL_GPL(kvmppc_h_get_tce);
+
+#endif /* KVM_BOOK3S_HV_POSSIBLE */
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index f1187bb..84fb4fc 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -81,6 +81,17 @@
 module_param(target_smt_mode, int, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(target_smt_mode, "Target threads per core (0 = max)");
 
+#ifdef CONFIG_KVM_XICS
+static struct kernel_param_ops module_param_ops = {
+	.set = param_set_int,
+	.get = param_get_int,
+};
+
+module_param_cb(h_ipi_redirect, &module_param_ops, &h_ipi_redirect,
+							S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(h_ipi_redirect, "Redirect H_IPI wakeup to a free host core");
+#endif
+
 static void kvmppc_end_cede(struct kvm_vcpu *vcpu);
 static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
 
@@ -768,7 +779,31 @@
 		if (kvmppc_xics_enabled(vcpu)) {
 			ret = kvmppc_xics_hcall(vcpu, req);
 			break;
-		} /* fallthrough */
+		}
+		return RESUME_HOST;
+	case H_PUT_TCE:
+		ret = kvmppc_h_put_tce(vcpu, kvmppc_get_gpr(vcpu, 4),
+						kvmppc_get_gpr(vcpu, 5),
+						kvmppc_get_gpr(vcpu, 6));
+		if (ret == H_TOO_HARD)
+			return RESUME_HOST;
+		break;
+	case H_PUT_TCE_INDIRECT:
+		ret = kvmppc_h_put_tce_indirect(vcpu, kvmppc_get_gpr(vcpu, 4),
+						kvmppc_get_gpr(vcpu, 5),
+						kvmppc_get_gpr(vcpu, 6),
+						kvmppc_get_gpr(vcpu, 7));
+		if (ret == H_TOO_HARD)
+			return RESUME_HOST;
+		break;
+	case H_STUFF_TCE:
+		ret = kvmppc_h_stuff_tce(vcpu, kvmppc_get_gpr(vcpu, 4),
+						kvmppc_get_gpr(vcpu, 5),
+						kvmppc_get_gpr(vcpu, 6),
+						kvmppc_get_gpr(vcpu, 7));
+		if (ret == H_TOO_HARD)
+			return RESUME_HOST;
+		break;
 	default:
 		return RESUME_HOST;
 	}
@@ -2279,6 +2314,46 @@
 }
 
 /*
+ * Clear core from the list of active host cores as we are about to
+ * enter the guest. Only do this if it is the primary thread of the
+ * core (not if a subcore) that is entering the guest.
+ */
+static inline void kvmppc_clear_host_core(int cpu)
+{
+	int core;
+
+	if (!kvmppc_host_rm_ops_hv || cpu_thread_in_core(cpu))
+		return;
+	/*
+	 * Memory barrier can be omitted here as we will do a smp_wmb()
+	 * later in kvmppc_start_thread and we need ensure that state is
+	 * visible to other CPUs only after we enter guest.
+	 */
+	core = cpu >> threads_shift;
+	kvmppc_host_rm_ops_hv->rm_core[core].rm_state.in_host = 0;
+}
+
+/*
+ * Advertise this core as an active host core since we exited the guest
+ * Only need to do this if it is the primary thread of the core that is
+ * exiting.
+ */
+static inline void kvmppc_set_host_core(int cpu)
+{
+	int core;
+
+	if (!kvmppc_host_rm_ops_hv || cpu_thread_in_core(cpu))
+		return;
+
+	/*
+	 * Memory barrier can be omitted here because we do a spin_unlock
+	 * immediately after this which provides the memory barrier.
+	 */
+	core = cpu >> threads_shift;
+	kvmppc_host_rm_ops_hv->rm_core[core].rm_state.in_host = 1;
+}
+
+/*
  * Run a set of guest threads on a physical core.
  * Called with vc->lock held.
  */
@@ -2390,6 +2465,8 @@
 		}
 	}
 
+	kvmppc_clear_host_core(pcpu);
+
 	/* Start all the threads */
 	active = 0;
 	for (sub = 0; sub < core_info.n_subcores; ++sub) {
@@ -2486,6 +2563,8 @@
 			kvmppc_ipi_thread(pcpu + i);
 	}
 
+	kvmppc_set_host_core(pcpu);
+
 	spin_unlock(&vc->lock);
 
 	/* make sure updates to secondary vcpu structs are visible now */
@@ -2983,6 +3062,114 @@
 	goto out_srcu;
 }
 
+#ifdef CONFIG_KVM_XICS
+static int kvmppc_cpu_notify(struct notifier_block *self, unsigned long action,
+			void *hcpu)
+{
+	unsigned long cpu = (long)hcpu;
+
+	switch (action) {
+	case CPU_UP_PREPARE:
+	case CPU_UP_PREPARE_FROZEN:
+		kvmppc_set_host_core(cpu);
+		break;
+
+#ifdef CONFIG_HOTPLUG_CPU
+	case CPU_DEAD:
+	case CPU_DEAD_FROZEN:
+	case CPU_UP_CANCELED:
+	case CPU_UP_CANCELED_FROZEN:
+		kvmppc_clear_host_core(cpu);
+		break;
+#endif
+	default:
+		break;
+	}
+
+	return NOTIFY_OK;
+}
+
+static struct notifier_block kvmppc_cpu_notifier = {
+	    .notifier_call = kvmppc_cpu_notify,
+};
+
+/*
+ * Allocate a per-core structure for managing state about which cores are
+ * running in the host versus the guest and for exchanging data between
+ * real mode KVM and CPU running in the host.
+ * This is only done for the first VM.
+ * The allocated structure stays even if all VMs have stopped.
+ * It is only freed when the kvm-hv module is unloaded.
+ * It's OK for this routine to fail, we just don't support host
+ * core operations like redirecting H_IPI wakeups.
+ */
+void kvmppc_alloc_host_rm_ops(void)
+{
+	struct kvmppc_host_rm_ops *ops;
+	unsigned long l_ops;
+	int cpu, core;
+	int size;
+
+	/* Not the first time here ? */
+	if (kvmppc_host_rm_ops_hv != NULL)
+		return;
+
+	ops = kzalloc(sizeof(struct kvmppc_host_rm_ops), GFP_KERNEL);
+	if (!ops)
+		return;
+
+	size = cpu_nr_cores() * sizeof(struct kvmppc_host_rm_core);
+	ops->rm_core = kzalloc(size, GFP_KERNEL);
+
+	if (!ops->rm_core) {
+		kfree(ops);
+		return;
+	}
+
+	get_online_cpus();
+
+	for (cpu = 0; cpu < nr_cpu_ids; cpu += threads_per_core) {
+		if (!cpu_online(cpu))
+			continue;
+
+		core = cpu >> threads_shift;
+		ops->rm_core[core].rm_state.in_host = 1;
+	}
+
+	ops->vcpu_kick = kvmppc_fast_vcpu_kick_hv;
+
+	/*
+	 * Make the contents of the kvmppc_host_rm_ops structure visible
+	 * to other CPUs before we assign it to the global variable.
+	 * Do an atomic assignment (no locks used here), but if someone
+	 * beats us to it, just free our copy and return.
+	 */
+	smp_wmb();
+	l_ops = (unsigned long) ops;
+
+	if (cmpxchg64((unsigned long *)&kvmppc_host_rm_ops_hv, 0, l_ops)) {
+		put_online_cpus();
+		kfree(ops->rm_core);
+		kfree(ops);
+		return;
+	}
+
+	register_cpu_notifier(&kvmppc_cpu_notifier);
+
+	put_online_cpus();
+}
+
+void kvmppc_free_host_rm_ops(void)
+{
+	if (kvmppc_host_rm_ops_hv) {
+		unregister_cpu_notifier(&kvmppc_cpu_notifier);
+		kfree(kvmppc_host_rm_ops_hv->rm_core);
+		kfree(kvmppc_host_rm_ops_hv);
+		kvmppc_host_rm_ops_hv = NULL;
+	}
+}
+#endif
+
 static int kvmppc_core_init_vm_hv(struct kvm *kvm)
 {
 	unsigned long lpcr, lpid;
@@ -2995,6 +3182,8 @@
 		return -ENOMEM;
 	kvm->arch.lpid = lpid;
 
+	kvmppc_alloc_host_rm_ops();
+
 	/*
 	 * Since we don't flush the TLB when tearing down a VM,
 	 * and this lpid might have previously been used,
@@ -3228,6 +3417,7 @@
 
 static void kvmppc_book3s_exit_hv(void)
 {
+	kvmppc_free_host_rm_ops();
 	kvmppc_hv_ops = NULL;
 }
 
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
index fd7006b..5f0380d 100644
--- a/arch/powerpc/kvm/book3s_hv_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_builtin.c
@@ -283,3 +283,6 @@
 			kvmhv_interrupt_vcore(vc, ee);
 	}
 }
+
+struct kvmppc_host_rm_ops *kvmppc_host_rm_ops_hv;
+EXPORT_SYMBOL_GPL(kvmppc_host_rm_ops_hv);
diff --git a/arch/powerpc/kvm/book3s_hv_rm_xics.c b/arch/powerpc/kvm/book3s_hv_rm_xics.c
index 24f5807..980d8a6 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_xics.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_xics.c
@@ -17,12 +17,16 @@
 #include <asm/xics.h>
 #include <asm/debug.h>
 #include <asm/synch.h>
+#include <asm/cputhreads.h>
 #include <asm/ppc-opcode.h>
 
 #include "book3s_xics.h"
 
 #define DEBUG_PASSUP
 
+int h_ipi_redirect = 1;
+EXPORT_SYMBOL(h_ipi_redirect);
+
 static void icp_rm_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
 			    u32 new_irq);
 
@@ -50,11 +54,84 @@
 
 /* -- ICP routines -- */
 
+#ifdef CONFIG_SMP
+static inline void icp_send_hcore_msg(int hcore, struct kvm_vcpu *vcpu)
+{
+	int hcpu;
+
+	hcpu = hcore << threads_shift;
+	kvmppc_host_rm_ops_hv->rm_core[hcore].rm_data = vcpu;
+	smp_muxed_ipi_set_message(hcpu, PPC_MSG_RM_HOST_ACTION);
+	icp_native_cause_ipi_rm(hcpu);
+}
+#else
+static inline void icp_send_hcore_msg(int hcore, struct kvm_vcpu *vcpu) { }
+#endif
+
+/*
+ * We start the search from our current CPU Id in the core map
+ * and go in a circle until we get back to our ID looking for a
+ * core that is running in host context and that hasn't already
+ * been targeted for another rm_host_ops.
+ *
+ * In the future, could consider using a fairer algorithm (one
+ * that distributes the IPIs better)
+ *
+ * Returns -1, if no CPU could be found in the host
+ * Else, returns a CPU Id which has been reserved for use
+ */
+static inline int grab_next_hostcore(int start,
+		struct kvmppc_host_rm_core *rm_core, int max, int action)
+{
+	bool success;
+	int core;
+	union kvmppc_rm_state old, new;
+
+	for (core = start + 1; core < max; core++)  {
+		old = new = READ_ONCE(rm_core[core].rm_state);
+
+		if (!old.in_host || old.rm_action)
+			continue;
+
+		/* Try to grab this host core if not taken already. */
+		new.rm_action = action;
+
+		success = cmpxchg64(&rm_core[core].rm_state.raw,
+						old.raw, new.raw) == old.raw;
+		if (success) {
+			/*
+			 * Make sure that the store to the rm_action is made
+			 * visible before we return to caller (and the
+			 * subsequent store to rm_data) to synchronize with
+			 * the IPI handler.
+			 */
+			smp_wmb();
+			return core;
+		}
+	}
+
+	return -1;
+}
+
+static inline int find_available_hostcore(int action)
+{
+	int core;
+	int my_core = smp_processor_id() >> threads_shift;
+	struct kvmppc_host_rm_core *rm_core = kvmppc_host_rm_ops_hv->rm_core;
+
+	core = grab_next_hostcore(my_core, rm_core, cpu_nr_cores(), action);
+	if (core == -1)
+		core = grab_next_hostcore(core, rm_core, my_core, action);
+
+	return core;
+}
+
 static void icp_rm_set_vcpu_irq(struct kvm_vcpu *vcpu,
 				struct kvm_vcpu *this_vcpu)
 {
 	struct kvmppc_icp *this_icp = this_vcpu->arch.icp;
 	int cpu;
+	int hcore;
 
 	/* Mark the target VCPU as having an interrupt pending */
 	vcpu->stat.queue_intr++;
@@ -66,11 +143,22 @@
 		return;
 	}
 
-	/* Check if the core is loaded, if not, too hard */
+	/*
+	 * Check if the core is loaded,
+	 * if not, find an available host core to post to wake the VCPU,
+	 * if we can't find one, set up state to eventually return too hard.
+	 */
 	cpu = vcpu->arch.thread_cpu;
 	if (cpu < 0 || cpu >= nr_cpu_ids) {
-		this_icp->rm_action |= XICS_RM_KICK_VCPU;
-		this_icp->rm_kick_target = vcpu;
+		hcore = -1;
+		if (kvmppc_host_rm_ops_hv && h_ipi_redirect)
+			hcore = find_available_hostcore(XICS_RM_KICK_VCPU);
+		if (hcore != -1) {
+			icp_send_hcore_msg(hcore, vcpu);
+		} else {
+			this_icp->rm_action |= XICS_RM_KICK_VCPU;
+			this_icp->rm_kick_target = vcpu;
+		}
 		return;
 	}
 
@@ -623,3 +711,40 @@
  bail:
 	return check_too_hard(xics, icp);
 }
+
+/*  --- Non-real mode XICS-related built-in routines ---  */
+
+/**
+ * Host Operations poked by RM KVM
+ */
+static void rm_host_ipi_action(int action, void *data)
+{
+	switch (action) {
+	case XICS_RM_KICK_VCPU:
+		kvmppc_host_rm_ops_hv->vcpu_kick(data);
+		break;
+	default:
+		WARN(1, "Unexpected rm_action=%d data=%p\n", action, data);
+		break;
+	}
+
+}
+
+void kvmppc_xics_ipi_action(void)
+{
+	int core;
+	unsigned int cpu = smp_processor_id();
+	struct kvmppc_host_rm_core *rm_corep;
+
+	core = cpu >> threads_shift;
+	rm_corep = &kvmppc_host_rm_ops_hv->rm_core[core];
+
+	if (rm_corep->rm_data) {
+		rm_host_ipi_action(rm_corep->rm_state.rm_action,
+							rm_corep->rm_data);
+		/* Order these stores against the real mode KVM */
+		rm_corep->rm_data = NULL;
+		smp_wmb();
+		rm_corep->rm_state.rm_action = 0;
+	}
+}
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 25ae2c9..85b32f1 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -2020,8 +2020,8 @@
 	.long	0		/* 0x12c */
 	.long	0		/* 0x130 */
 	.long	DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table
-	.long	0		/* 0x138 */
-	.long	0		/* 0x13c */
+	.long	DOTSYM(kvmppc_h_stuff_tce) - hcall_real_table
+	.long	DOTSYM(kvmppc_rm_h_put_tce_indirect) - hcall_real_table
 	.long	0		/* 0x140 */
 	.long	0		/* 0x144 */
 	.long	0		/* 0x148 */
diff --git a/arch/powerpc/kvm/book3s_pr_papr.c b/arch/powerpc/kvm/book3s_pr_papr.c
index f2c75a1..02176fd 100644
--- a/arch/powerpc/kvm/book3s_pr_papr.c
+++ b/arch/powerpc/kvm/book3s_pr_papr.c
@@ -280,6 +280,37 @@
 	return EMULATE_DONE;
 }
 
+static int kvmppc_h_pr_put_tce_indirect(struct kvm_vcpu *vcpu)
+{
+	unsigned long liobn = kvmppc_get_gpr(vcpu, 4);
+	unsigned long ioba = kvmppc_get_gpr(vcpu, 5);
+	unsigned long tce = kvmppc_get_gpr(vcpu, 6);
+	unsigned long npages = kvmppc_get_gpr(vcpu, 7);
+	long rc;
+
+	rc = kvmppc_h_put_tce_indirect(vcpu, liobn, ioba,
+			tce, npages);
+	if (rc == H_TOO_HARD)
+		return EMULATE_FAIL;
+	kvmppc_set_gpr(vcpu, 3, rc);
+	return EMULATE_DONE;
+}
+
+static int kvmppc_h_pr_stuff_tce(struct kvm_vcpu *vcpu)
+{
+	unsigned long liobn = kvmppc_get_gpr(vcpu, 4);
+	unsigned long ioba = kvmppc_get_gpr(vcpu, 5);
+	unsigned long tce_value = kvmppc_get_gpr(vcpu, 6);
+	unsigned long npages = kvmppc_get_gpr(vcpu, 7);
+	long rc;
+
+	rc = kvmppc_h_stuff_tce(vcpu, liobn, ioba, tce_value, npages);
+	if (rc == H_TOO_HARD)
+		return EMULATE_FAIL;
+	kvmppc_set_gpr(vcpu, 3, rc);
+	return EMULATE_DONE;
+}
+
 static int kvmppc_h_pr_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd)
 {
 	long rc = kvmppc_xics_hcall(vcpu, cmd);
@@ -306,6 +337,10 @@
 		return kvmppc_h_pr_bulk_remove(vcpu);
 	case H_PUT_TCE:
 		return kvmppc_h_pr_put_tce(vcpu);
+	case H_PUT_TCE_INDIRECT:
+		return kvmppc_h_pr_put_tce_indirect(vcpu);
+	case H_STUFF_TCE:
+		return kvmppc_h_pr_stuff_tce(vcpu);
 	case H_CEDE:
 		kvmppc_set_msr_fast(vcpu, kvmppc_get_msr(vcpu) | MSR_EE);
 		kvm_vcpu_block(vcpu);
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index a3b182d..19aa59b 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -33,6 +33,7 @@
 #include <asm/tlbflush.h>
 #include <asm/cputhreads.h>
 #include <asm/irqflags.h>
+#include <asm/iommu.h>
 #include "timing.h"
 #include "irq.h"
 #include "../mm/mmu_decl.h"
@@ -437,6 +438,16 @@
 	unsigned int i;
 	struct kvm_vcpu *vcpu;
 
+#ifdef CONFIG_KVM_XICS
+	/*
+	 * We call kick_all_cpus_sync() to ensure that all
+	 * CPUs have executed any pending IPIs before we
+	 * continue and free VCPUs structures below.
+	 */
+	if (is_kvmppc_hv_enabled(kvm))
+		kick_all_cpus_sync();
+#endif
+
 	kvm_for_each_vcpu(i, vcpu, kvm)
 		kvm_arch_vcpu_free(vcpu);
 
@@ -509,6 +520,7 @@
 
 #ifdef CONFIG_PPC_BOOK3S_64
 	case KVM_CAP_SPAPR_TCE:
+	case KVM_CAP_SPAPR_TCE_64:
 	case KVM_CAP_PPC_ALLOC_HTAB:
 	case KVM_CAP_PPC_RTAS:
 	case KVM_CAP_PPC_FIXUP_HCALL:
@@ -569,6 +581,9 @@
 	case KVM_CAP_PPC_GET_SMMU_INFO:
 		r = 1;
 		break;
+	case KVM_CAP_SPAPR_MULTITCE:
+		r = 1;
+		break;
 #endif
 	default:
 		r = 0;
@@ -1331,13 +1346,34 @@
 		break;
 	}
 #ifdef CONFIG_PPC_BOOK3S_64
+	case KVM_CREATE_SPAPR_TCE_64: {
+		struct kvm_create_spapr_tce_64 create_tce_64;
+
+		r = -EFAULT;
+		if (copy_from_user(&create_tce_64, argp, sizeof(create_tce_64)))
+			goto out;
+		if (create_tce_64.flags) {
+			r = -EINVAL;
+			goto out;
+		}
+		r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
+		goto out;
+	}
 	case KVM_CREATE_SPAPR_TCE: {
 		struct kvm_create_spapr_tce create_tce;
+		struct kvm_create_spapr_tce_64 create_tce_64;
 
 		r = -EFAULT;
 		if (copy_from_user(&create_tce, argp, sizeof(create_tce)))
 			goto out;
-		r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce);
+
+		create_tce_64.liobn = create_tce.liobn;
+		create_tce_64.page_shift = IOMMU_PAGE_SHIFT_4K;
+		create_tce_64.offset = 0;
+		create_tce_64.size = create_tce.window_size >>
+				IOMMU_PAGE_SHIFT_4K;
+		create_tce_64.flags = 0;
+		r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
 		goto out;
 	}
 	case KVM_PPC_GET_SMMU_INFO: {
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
index 83dfd79..de37ff4 100644
--- a/arch/powerpc/mm/pgtable.c
+++ b/arch/powerpc/mm/pgtable.c
@@ -243,3 +243,11 @@
 }
 #endif /* CONFIG_DEBUG_VM */
 
+unsigned long vmalloc_to_phys(void *va)
+{
+	unsigned long pfn = vmalloc_to_pfn(va);
+
+	BUG_ON(!pfn);
+	return __pa(pfn_to_kaddr(pfn)) + offset_in_page(va);
+}
+EXPORT_SYMBOL_GPL(vmalloc_to_phys);
diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c
index 9f9dfda..3b09ecf 100644
--- a/arch/powerpc/perf/hv-24x7.c
+++ b/arch/powerpc/perf/hv-24x7.c
@@ -493,14 +493,6 @@
 	}
 }
 
-static unsigned long vmalloc_to_phys(void *v)
-{
-	struct page *p = vmalloc_to_page(v);
-
-	BUG_ON(!p);
-	return page_to_phys(p) + offset_in_page(v);
-}
-
 /* */
 struct event_uniq {
 	struct rb_node node;
diff --git a/arch/powerpc/sysdev/xics/icp-native.c b/arch/powerpc/sysdev/xics/icp-native.c
index eae3265..afdf62f 100644
--- a/arch/powerpc/sysdev/xics/icp-native.c
+++ b/arch/powerpc/sysdev/xics/icp-native.c
@@ -159,6 +159,27 @@
 	icp_native_set_qirr(cpu, IPI_PRIORITY);
 }
 
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+void icp_native_cause_ipi_rm(int cpu)
+{
+	/*
+	 * Currently not used to send IPIs to another CPU
+	 * on the same core. Only caller is KVM real mode.
+	 * Need the physical address of the XICS to be
+	 * previously saved in kvm_hstate in the paca.
+	 */
+	unsigned long xics_phys;
+
+	/*
+	 * Just like the cause_ipi functions, it is required to
+	 * include a full barrier (out8 includes a sync) before
+	 * causing the IPI.
+	 */
+	xics_phys = paca[cpu].kvm_hstate.xics_phys;
+	out_rm8((u8 *)(xics_phys + XICS_MFRR), IPI_PRIORITY);
+}
+#endif
+
 /*
  * Called when an interrupt is received on an off-line CPU to
  * clear the interrupt, so that the CPU can go back to nap mode.
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 3be9c83..683af90 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -254,12 +254,12 @@
 	  older machines.
 
 config MARCH_Z13
-	bool "IBM z13"
+	bool "IBM z13s and z13"
 	select HAVE_MARCH_Z13_FEATURES
 	help
-	  Select this to enable optimizations for IBM z13 (2964 series).
-	  The kernel will be slightly faster but will not work on older
-	  machines.
+	  Select this to enable optimizations for IBM z13s and z13 (2965 and
+	  2964 series). The kernel will be slightly faster but will not work on
+	  older machines.
 
 endchoice
 
diff --git a/arch/s390/include/asm/clp.h b/arch/s390/include/asm/clp.h
index a0e71a5..5687d62 100644
--- a/arch/s390/include/asm/clp.h
+++ b/arch/s390/include/asm/clp.h
@@ -4,14 +4,23 @@
 /* CLP common request & response block size */
 #define CLP_BLK_SIZE			PAGE_SIZE
 
+#define CLP_LPS_BASE	0
+#define CLP_LPS_PCI	2
+
 struct clp_req_hdr {
 	u16 len;
 	u16 cmd;
+	u32 fmt		: 4;
+	u32 reserved1	: 28;
+	u64 reserved2;
 } __packed;
 
 struct clp_rsp_hdr {
 	u16 len;
 	u16 rsp;
+	u32 fmt		: 4;
+	u32 reserved1	: 28;
+	u64 reserved2;
 } __packed;
 
 /* CLP Response Codes */
@@ -25,4 +34,22 @@
 #define CLP_RC_NODATA			0x0080	/* No data available */
 #define CLP_RC_FC_UNKNOWN		0x0100	/* Function code not recognized */
 
+/* Store logical-processor characteristics request */
+struct clp_req_slpc {
+	struct clp_req_hdr hdr;
+} __packed;
+
+struct clp_rsp_slpc {
+	struct clp_rsp_hdr hdr;
+	u32 reserved2[4];
+	u32 lpif[8];
+	u32 reserved3[8];
+	u32 lpic[8];
+} __packed;
+
+struct clp_req_rsp_slpc {
+	struct clp_req_slpc request;
+	struct clp_rsp_slpc response;
+} __packed;
+
 #endif
diff --git a/arch/s390/include/asm/gmap.h b/arch/s390/include/asm/gmap.h
new file mode 100644
index 0000000..d054c1b
--- /dev/null
+++ b/arch/s390/include/asm/gmap.h
@@ -0,0 +1,64 @@
+/*
+ *  KVM guest address space mapping code
+ *
+ *    Copyright IBM Corp. 2007, 2016
+ *    Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#ifndef _ASM_S390_GMAP_H
+#define _ASM_S390_GMAP_H
+
+/**
+ * struct gmap_struct - guest address space
+ * @crst_list: list of all crst tables used in the guest address space
+ * @mm: pointer to the parent mm_struct
+ * @guest_to_host: radix tree with guest to host address translation
+ * @host_to_guest: radix tree with pointer to segment table entries
+ * @guest_table_lock: spinlock to protect all entries in the guest page table
+ * @table: pointer to the page directory
+ * @asce: address space control element for gmap page table
+ * @pfault_enabled: defines if pfaults are applicable for the guest
+ */
+struct gmap {
+	struct list_head list;
+	struct list_head crst_list;
+	struct mm_struct *mm;
+	struct radix_tree_root guest_to_host;
+	struct radix_tree_root host_to_guest;
+	spinlock_t guest_table_lock;
+	unsigned long *table;
+	unsigned long asce;
+	unsigned long asce_end;
+	void *private;
+	bool pfault_enabled;
+};
+
+/**
+ * struct gmap_notifier - notify function block for page invalidation
+ * @notifier_call: address of callback function
+ */
+struct gmap_notifier {
+	struct list_head list;
+	void (*notifier_call)(struct gmap *gmap, unsigned long gaddr);
+};
+
+struct gmap *gmap_alloc(struct mm_struct *mm, unsigned long limit);
+void gmap_free(struct gmap *gmap);
+void gmap_enable(struct gmap *gmap);
+void gmap_disable(struct gmap *gmap);
+int gmap_map_segment(struct gmap *gmap, unsigned long from,
+		     unsigned long to, unsigned long len);
+int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len);
+unsigned long __gmap_translate(struct gmap *, unsigned long gaddr);
+unsigned long gmap_translate(struct gmap *, unsigned long gaddr);
+int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr);
+int gmap_fault(struct gmap *, unsigned long gaddr, unsigned int fault_flags);
+void gmap_discard(struct gmap *, unsigned long from, unsigned long to);
+void __gmap_zap(struct gmap *, unsigned long gaddr);
+void gmap_unlink(struct mm_struct *, unsigned long *table, unsigned long vmaddr);
+
+void gmap_register_ipte_notifier(struct gmap_notifier *);
+void gmap_unregister_ipte_notifier(struct gmap_notifier *);
+int gmap_ipte_notify(struct gmap *, unsigned long start, unsigned long len);
+
+#endif /* _ASM_S390_GMAP_H */
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index b0c8ad0..6da41fa 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -20,6 +20,7 @@
 #include <linux/kvm_types.h>
 #include <linux/kvm_host.h>
 #include <linux/kvm.h>
+#include <linux/seqlock.h>
 #include <asm/debug.h>
 #include <asm/cpu.h>
 #include <asm/fpu/api.h>
@@ -229,17 +230,11 @@
 	__u8	data[256];
 } __packed;
 
-struct kvm_s390_vregs {
-	__vector128 vrs[32];
-	__u8	reserved200[512];	/* for future vector expansion */
-} __packed;
-
 struct sie_page {
 	struct kvm_s390_sie_block sie_block;
 	__u8 reserved200[1024];		/* 0x0200 */
 	struct kvm_s390_itdb itdb;	/* 0x0600 */
-	__u8 reserved700[1280];		/* 0x0700 */
-	struct kvm_s390_vregs vregs;	/* 0x0c00 */
+	__u8 reserved700[2304];		/* 0x0700 */
 } __packed;
 
 struct kvm_vcpu_stat {
@@ -558,6 +553,15 @@
 	unsigned long pfault_token;
 	unsigned long pfault_select;
 	unsigned long pfault_compare;
+	bool cputm_enabled;
+	/*
+	 * The seqcount protects updates to cputm_start and sie_block.cputm,
+	 * this way we can have non-blocking reads with consistent values.
+	 * Only the owning VCPU thread (vcpu->cpu) is allowed to change these
+	 * values and to start/stop/enable/disable cpu timer accounting.
+	 */
+	seqcount_t cputm_seqcount;
+	__u64 cputm_start;
 };
 
 struct kvm_vm_stat {
@@ -596,15 +600,11 @@
 #define S390_ARCH_FAC_MASK_SIZE_U64 \
 	(S390_ARCH_FAC_MASK_SIZE_BYTE / sizeof(u64))
 
-struct kvm_s390_fac {
-	/* facility list requested by guest */
-	__u64 list[S390_ARCH_FAC_LIST_SIZE_U64];
-	/* facility mask supported by kvm & hosting machine */
-	__u64 mask[S390_ARCH_FAC_LIST_SIZE_U64];
-};
-
 struct kvm_s390_cpu_model {
-	struct kvm_s390_fac *fac;
+	/* facility mask supported by kvm & hosting machine */
+	__u64 fac_mask[S390_ARCH_FAC_LIST_SIZE_U64];
+	/* facility list requested by guest (in dma page) */
+	__u64 *fac_list;
 	struct cpuid cpu_id;
 	unsigned short ibc;
 };
@@ -623,6 +623,16 @@
 	__u8    reserved80[128];                /* 0x0080 */
 };
 
+/*
+ * sie_page2 has to be allocated as DMA because fac_list and crycb need
+ * 31bit addresses in the sie control block.
+ */
+struct sie_page2 {
+	__u64 fac_list[S390_ARCH_FAC_LIST_SIZE_U64];	/* 0x0000 */
+	struct kvm_s390_crypto_cb crycb;		/* 0x0800 */
+	u8 reserved900[0x1000 - 0x900];			/* 0x0900 */
+} __packed;
+
 struct kvm_arch{
 	void *sca;
 	int use_esca;
@@ -643,6 +653,7 @@
 	int ipte_lock_count;
 	struct mutex ipte_mutex;
 	spinlock_t start_stop_lock;
+	struct sie_page2 *sie_page2;
 	struct kvm_s390_cpu_model model;
 	struct kvm_s390_crypto crypto;
 	u64 epoch;
diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
index c873e68..f833082d 100644
--- a/arch/s390/include/asm/pci.h
+++ b/arch/s390/include/asm/pci.h
@@ -45,7 +45,7 @@
 	u64 rpcit_ops;
 	u64 dma_rbytes;
 	u64 dma_wbytes;
-} __packed __aligned(16);
+} __packed __aligned(64);
 
 enum zpci_state {
 	ZPCI_FN_STATE_RESERVED,
@@ -66,7 +66,6 @@
 
 /* Private data per function */
 struct zpci_dev {
-	struct pci_dev	*pdev;
 	struct pci_bus	*bus;
 	struct list_head entry;		/* list of all zpci_devices, needed for hotplug, etc. */
 
@@ -192,7 +191,7 @@
 /* Debug */
 int zpci_debug_init(void);
 void zpci_debug_exit(void);
-void zpci_debug_init_device(struct zpci_dev *);
+void zpci_debug_init_device(struct zpci_dev *, const char *);
 void zpci_debug_exit_device(struct zpci_dev *);
 void zpci_debug_info(struct zpci_dev *, struct seq_file *);
 
diff --git a/arch/s390/include/asm/pci_clp.h b/arch/s390/include/asm/pci_clp.h
index dd78f92..e75c64cb 100644
--- a/arch/s390/include/asm/pci_clp.h
+++ b/arch/s390/include/asm/pci_clp.h
@@ -49,9 +49,6 @@
 /* List PCI functions request */
 struct clp_req_list_pci {
 	struct clp_req_hdr hdr;
-	u32 fmt			:  4;	/* cmd request block format */
-	u32			: 28;
-	u64 reserved1;
 	u64 resume_token;
 	u64 reserved2;
 } __packed;
@@ -59,9 +56,6 @@
 /* List PCI functions response */
 struct clp_rsp_list_pci {
 	struct clp_rsp_hdr hdr;
-	u32 fmt			:  4;	/* cmd request block format */
-	u32			: 28;
-	u64 reserved1;
 	u64 resume_token;
 	u32 reserved2;
 	u16 max_fn;
@@ -73,9 +67,6 @@
 /* Query PCI function request */
 struct clp_req_query_pci {
 	struct clp_req_hdr hdr;
-	u32 fmt			:  4;	/* cmd request block format */
-	u32			: 28;
-	u64 reserved1;
 	u32 fh;				/* function handle */
 	u32 reserved2;
 	u64 reserved3;
@@ -84,9 +75,6 @@
 /* Query PCI function response */
 struct clp_rsp_query_pci {
 	struct clp_rsp_hdr hdr;
-	u32 fmt			:  4;	/* cmd request block format */
-	u32			: 28;
-	u64			: 64;
 	u16 vfn;			/* virtual fn number */
 	u16			:  7;
 	u16 util_str_avail	:  1;	/* utility string available? */
@@ -108,21 +96,15 @@
 /* Query PCI function group request */
 struct clp_req_query_pci_grp {
 	struct clp_req_hdr hdr;
-	u32 fmt			:  4;	/* cmd request block format */
-	u32			: 28;
-	u64 reserved1;
-	u32			: 24;
+	u32 reserved2		: 24;
 	u32 pfgid		:  8;	/* function group id */
-	u32 reserved2;
-	u64 reserved3;
+	u32 reserved3;
+	u64 reserved4;
 } __packed;
 
 /* Query PCI function group response */
 struct clp_rsp_query_pci_grp {
 	struct clp_rsp_hdr hdr;
-	u32 fmt			:  4;	/* cmd request block format */
-	u32			: 28;
-	u64 reserved1;
 	u16			:  4;
 	u16 noi			: 12;	/* number of interrupts */
 	u8 version;
@@ -141,9 +123,6 @@
 /* Set PCI function request */
 struct clp_req_set_pci {
 	struct clp_req_hdr hdr;
-	u32 fmt			:  4;	/* cmd request block format */
-	u32			: 28;
-	u64 reserved1;
 	u32 fh;				/* function handle */
 	u16 reserved2;
 	u8 oc;				/* operation controls */
@@ -154,9 +133,6 @@
 /* Set PCI function response */
 struct clp_rsp_set_pci {
 	struct clp_rsp_hdr hdr;
-	u32 fmt			:  4;	/* cmd request block format */
-	u32			: 28;
-	u64 reserved1;
 	u32 fh;				/* function handle */
 	u32 reserved3;
 	u64 reserved4;
diff --git a/arch/s390/include/asm/percpu.h b/arch/s390/include/asm/percpu.h
index 6d6556c..90240df 100644
--- a/arch/s390/include/asm/percpu.h
+++ b/arch/s390/include/asm/percpu.h
@@ -178,7 +178,6 @@
 	ret__;								\
 })
 
-#define this_cpu_cmpxchg_double_4 arch_this_cpu_cmpxchg_double
 #define this_cpu_cmpxchg_double_8 arch_this_cpu_cmpxchg_double
 
 #include <asm-generic/percpu.h>
diff --git a/arch/s390/include/asm/perf_event.h b/arch/s390/include/asm/perf_event.h
index f897ec7..1f7ff85 100644
--- a/arch/s390/include/asm/perf_event.h
+++ b/arch/s390/include/asm/perf_event.h
@@ -21,7 +21,7 @@
 #define PMU_F_ERR_LSDA			0x0200
 #define PMU_F_ERR_MASK			(PMU_F_ERR_IBE|PMU_F_ERR_LSDA)
 
-/* Perf defintions for PMU event attributes in sysfs */
+/* Perf definitions for PMU event attributes in sysfs */
 extern __init const struct attribute_group **cpumf_cf_event_group(void);
 extern ssize_t cpumf_events_sysfs_show(struct device *dev,
 				       struct device_attribute *attr,
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index d7cc79f..9b3d9b6 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -23,10 +23,6 @@
 void page_table_free_rcu(struct mmu_gather *, unsigned long *, unsigned long);
 extern int page_table_allocate_pgste;
 
-int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
-			  unsigned long key, bool nq);
-unsigned long get_guest_storage_key(struct mm_struct *mm, unsigned long addr);
-
 static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
 {
 	typedef struct { char _[n]; } addrtype;
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 64ead80..2f66645 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -298,15 +298,15 @@
 
 /*
  * Segment table entry encoding (R = read-only, I = invalid, y = young bit):
- *				dy..R...I...wr
+ *				dy..R...I...rw
  * prot-none, clean, old	00..1...1...00
  * prot-none, clean, young	01..1...1...00
  * prot-none, dirty, old	10..1...1...00
  * prot-none, dirty, young	11..1...1...00
- * read-only, clean, old	00..1...1...01
- * read-only, clean, young	01..1...0...01
- * read-only, dirty, old	10..1...1...01
- * read-only, dirty, young	11..1...0...01
+ * read-only, clean, old	00..1...1...10
+ * read-only, clean, young	01..1...0...10
+ * read-only, dirty, old	10..1...1...10
+ * read-only, dirty, young	11..1...0...10
  * read-write, clean, old	00..1...1...11
  * read-write, clean, young	01..1...0...11
  * read-write, dirty, old	10..0...1...11
@@ -520,15 +520,6 @@
 	return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0;
 }
 
-#define  __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
-extern int pmdp_set_access_flags(struct vm_area_struct *vma,
-				 unsigned long address, pmd_t *pmdp,
-				 pmd_t entry, int dirty);
-
-#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
-extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
-				  unsigned long address, pmd_t *pmdp);
-
 #define __HAVE_ARCH_PMD_WRITE
 static inline int pmd_write(pmd_t pmd)
 {
@@ -631,208 +622,6 @@
 	return pmd;
 }
 
-static inline pgste_t pgste_get_lock(pte_t *ptep)
-{
-	unsigned long new = 0;
-#ifdef CONFIG_PGSTE
-	unsigned long old;
-
-	preempt_disable();
-	asm(
-		"	lg	%0,%2\n"
-		"0:	lgr	%1,%0\n"
-		"	nihh	%0,0xff7f\n"	/* clear PCL bit in old */
-		"	oihh	%1,0x0080\n"	/* set PCL bit in new */
-		"	csg	%0,%1,%2\n"
-		"	jl	0b\n"
-		: "=&d" (old), "=&d" (new), "=Q" (ptep[PTRS_PER_PTE])
-		: "Q" (ptep[PTRS_PER_PTE]) : "cc", "memory");
-#endif
-	return __pgste(new);
-}
-
-static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste)
-{
-#ifdef CONFIG_PGSTE
-	asm(
-		"	nihh	%1,0xff7f\n"	/* clear PCL bit */
-		"	stg	%1,%0\n"
-		: "=Q" (ptep[PTRS_PER_PTE])
-		: "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE])
-		: "cc", "memory");
-	preempt_enable();
-#endif
-}
-
-static inline pgste_t pgste_get(pte_t *ptep)
-{
-	unsigned long pgste = 0;
-#ifdef CONFIG_PGSTE
-	pgste = *(unsigned long *)(ptep + PTRS_PER_PTE);
-#endif
-	return __pgste(pgste);
-}
-
-static inline void pgste_set(pte_t *ptep, pgste_t pgste)
-{
-#ifdef CONFIG_PGSTE
-	*(pgste_t *)(ptep + PTRS_PER_PTE) = pgste;
-#endif
-}
-
-static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste,
-				       struct mm_struct *mm)
-{
-#ifdef CONFIG_PGSTE
-	unsigned long address, bits, skey;
-
-	if (!mm_use_skey(mm) || pte_val(*ptep) & _PAGE_INVALID)
-		return pgste;
-	address = pte_val(*ptep) & PAGE_MASK;
-	skey = (unsigned long) page_get_storage_key(address);
-	bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
-	/* Transfer page changed & referenced bit to guest bits in pgste */
-	pgste_val(pgste) |= bits << 48;		/* GR bit & GC bit */
-	/* Copy page access key and fetch protection bit to pgste */
-	pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT);
-	pgste_val(pgste) |= (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
-#endif
-	return pgste;
-
-}
-
-static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry,
-				 struct mm_struct *mm)
-{
-#ifdef CONFIG_PGSTE
-	unsigned long address;
-	unsigned long nkey;
-
-	if (!mm_use_skey(mm) || pte_val(entry) & _PAGE_INVALID)
-		return;
-	VM_BUG_ON(!(pte_val(*ptep) & _PAGE_INVALID));
-	address = pte_val(entry) & PAGE_MASK;
-	/*
-	 * Set page access key and fetch protection bit from pgste.
-	 * The guest C/R information is still in the PGSTE, set real
-	 * key C/R to 0.
-	 */
-	nkey = (pgste_val(pgste) & (PGSTE_ACC_BITS | PGSTE_FP_BIT)) >> 56;
-	nkey |= (pgste_val(pgste) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 48;
-	page_set_storage_key(address, nkey, 0);
-#endif
-}
-
-static inline pgste_t pgste_set_pte(pte_t *ptep, pgste_t pgste, pte_t entry)
-{
-	if ((pte_val(entry) & _PAGE_PRESENT) &&
-	    (pte_val(entry) & _PAGE_WRITE) &&
-	    !(pte_val(entry) & _PAGE_INVALID)) {
-		if (!MACHINE_HAS_ESOP) {
-			/*
-			 * Without enhanced suppression-on-protection force
-			 * the dirty bit on for all writable ptes.
-			 */
-			pte_val(entry) |= _PAGE_DIRTY;
-			pte_val(entry) &= ~_PAGE_PROTECT;
-		}
-		if (!(pte_val(entry) & _PAGE_PROTECT))
-			/* This pte allows write access, set user-dirty */
-			pgste_val(pgste) |= PGSTE_UC_BIT;
-	}
-	*ptep = entry;
-	return pgste;
-}
-
-/**
- * struct gmap_struct - guest address space
- * @crst_list: list of all crst tables used in the guest address space
- * @mm: pointer to the parent mm_struct
- * @guest_to_host: radix tree with guest to host address translation
- * @host_to_guest: radix tree with pointer to segment table entries
- * @guest_table_lock: spinlock to protect all entries in the guest page table
- * @table: pointer to the page directory
- * @asce: address space control element for gmap page table
- * @pfault_enabled: defines if pfaults are applicable for the guest
- */
-struct gmap {
-	struct list_head list;
-	struct list_head crst_list;
-	struct mm_struct *mm;
-	struct radix_tree_root guest_to_host;
-	struct radix_tree_root host_to_guest;
-	spinlock_t guest_table_lock;
-	unsigned long *table;
-	unsigned long asce;
-	unsigned long asce_end;
-	void *private;
-	bool pfault_enabled;
-};
-
-/**
- * struct gmap_notifier - notify function block for page invalidation
- * @notifier_call: address of callback function
- */
-struct gmap_notifier {
-	struct list_head list;
-	void (*notifier_call)(struct gmap *gmap, unsigned long gaddr);
-};
-
-struct gmap *gmap_alloc(struct mm_struct *mm, unsigned long limit);
-void gmap_free(struct gmap *gmap);
-void gmap_enable(struct gmap *gmap);
-void gmap_disable(struct gmap *gmap);
-int gmap_map_segment(struct gmap *gmap, unsigned long from,
-		     unsigned long to, unsigned long len);
-int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len);
-unsigned long __gmap_translate(struct gmap *, unsigned long gaddr);
-unsigned long gmap_translate(struct gmap *, unsigned long gaddr);
-int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr);
-int gmap_fault(struct gmap *, unsigned long gaddr, unsigned int fault_flags);
-void gmap_discard(struct gmap *, unsigned long from, unsigned long to);
-void __gmap_zap(struct gmap *, unsigned long gaddr);
-bool gmap_test_and_clear_dirty(unsigned long address, struct gmap *);
-
-
-void gmap_register_ipte_notifier(struct gmap_notifier *);
-void gmap_unregister_ipte_notifier(struct gmap_notifier *);
-int gmap_ipte_notify(struct gmap *, unsigned long start, unsigned long len);
-void gmap_do_ipte_notify(struct mm_struct *, unsigned long addr, pte_t *);
-
-static inline pgste_t pgste_ipte_notify(struct mm_struct *mm,
-					unsigned long addr,
-					pte_t *ptep, pgste_t pgste)
-{
-#ifdef CONFIG_PGSTE
-	if (pgste_val(pgste) & PGSTE_IN_BIT) {
-		pgste_val(pgste) &= ~PGSTE_IN_BIT;
-		gmap_do_ipte_notify(mm, addr, ptep);
-	}
-#endif
-	return pgste;
-}
-
-/*
- * Certain architectures need to do special things when PTEs
- * within a page table are directly modified.  Thus, the following
- * hook is made available.
- */
-static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
-			      pte_t *ptep, pte_t entry)
-{
-	pgste_t pgste;
-
-	if (mm_has_pgste(mm)) {
-		pgste = pgste_get_lock(ptep);
-		pgste_val(pgste) &= ~_PGSTE_GPS_ZERO;
-		pgste_set_key(ptep, pgste, entry, mm);
-		pgste = pgste_set_pte(ptep, pgste, entry);
-		pgste_set_unlock(ptep, pgste);
-	} else {
-		*ptep = entry;
-	}
-}
-
 /*
  * query functions pte_write/pte_dirty/pte_young only work if
  * pte_present() is true. Undefined behaviour if not..
@@ -998,105 +787,6 @@
 	} while (nr != 255);
 }
 
-static inline void ptep_flush_direct(struct mm_struct *mm,
-				     unsigned long address, pte_t *ptep)
-{
-	int active, count;
-
-	if (pte_val(*ptep) & _PAGE_INVALID)
-		return;
-	active = (mm == current->active_mm) ? 1 : 0;
-	count = atomic_add_return(0x10000, &mm->context.attach_count);
-	if (MACHINE_HAS_TLB_LC && (count & 0xffff) <= active &&
-	    cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
-		__ptep_ipte_local(address, ptep);
-	else
-		__ptep_ipte(address, ptep);
-	atomic_sub(0x10000, &mm->context.attach_count);
-}
-
-static inline void ptep_flush_lazy(struct mm_struct *mm,
-				   unsigned long address, pte_t *ptep)
-{
-	int active, count;
-
-	if (pte_val(*ptep) & _PAGE_INVALID)
-		return;
-	active = (mm == current->active_mm) ? 1 : 0;
-	count = atomic_add_return(0x10000, &mm->context.attach_count);
-	if ((count & 0xffff) <= active) {
-		pte_val(*ptep) |= _PAGE_INVALID;
-		mm->context.flush_mm = 1;
-	} else
-		__ptep_ipte(address, ptep);
-	atomic_sub(0x10000, &mm->context.attach_count);
-}
-
-/*
- * Get (and clear) the user dirty bit for a pte.
- */
-static inline int ptep_test_and_clear_user_dirty(struct mm_struct *mm,
-						 unsigned long addr,
-						 pte_t *ptep)
-{
-	pgste_t pgste;
-	pte_t pte;
-	int dirty;
-
-	if (!mm_has_pgste(mm))
-		return 0;
-	pgste = pgste_get_lock(ptep);
-	dirty = !!(pgste_val(pgste) & PGSTE_UC_BIT);
-	pgste_val(pgste) &= ~PGSTE_UC_BIT;
-	pte = *ptep;
-	if (dirty && (pte_val(pte) & _PAGE_PRESENT)) {
-		pgste = pgste_ipte_notify(mm, addr, ptep, pgste);
-		__ptep_ipte(addr, ptep);
-		if (MACHINE_HAS_ESOP || !(pte_val(pte) & _PAGE_WRITE))
-			pte_val(pte) |= _PAGE_PROTECT;
-		else
-			pte_val(pte) |= _PAGE_INVALID;
-		*ptep = pte;
-	}
-	pgste_set_unlock(ptep, pgste);
-	return dirty;
-}
-
-#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
-static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
-					    unsigned long addr, pte_t *ptep)
-{
-	pgste_t pgste;
-	pte_t pte, oldpte;
-	int young;
-
-	if (mm_has_pgste(vma->vm_mm)) {
-		pgste = pgste_get_lock(ptep);
-		pgste = pgste_ipte_notify(vma->vm_mm, addr, ptep, pgste);
-	}
-
-	oldpte = pte = *ptep;
-	ptep_flush_direct(vma->vm_mm, addr, ptep);
-	young = pte_young(pte);
-	pte = pte_mkold(pte);
-
-	if (mm_has_pgste(vma->vm_mm)) {
-		pgste = pgste_update_all(&oldpte, pgste, vma->vm_mm);
-		pgste = pgste_set_pte(ptep, pgste, pte);
-		pgste_set_unlock(ptep, pgste);
-	} else
-		*ptep = pte;
-
-	return young;
-}
-
-#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
-static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
-					 unsigned long address, pte_t *ptep)
-{
-	return ptep_test_and_clear_young(vma, address, ptep);
-}
-
 /*
  * This is hard to understand. ptep_get_and_clear and ptep_clear_flush
  * both clear the TLB for the unmapped pte. The reason is that
@@ -1110,91 +800,42 @@
  * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range
  * is a nop.
  */
+pte_t ptep_xchg_direct(struct mm_struct *, unsigned long, pte_t *, pte_t);
+pte_t ptep_xchg_lazy(struct mm_struct *, unsigned long, pte_t *, pte_t);
+
+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
+static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
+					    unsigned long addr, pte_t *ptep)
+{
+	pte_t pte = *ptep;
+
+	pte = ptep_xchg_direct(vma->vm_mm, addr, ptep, pte_mkold(pte));
+	return pte_young(pte);
+}
+
+#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
+static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
+					 unsigned long address, pte_t *ptep)
+{
+	return ptep_test_and_clear_young(vma, address, ptep);
+}
+
 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
 static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
-				       unsigned long address, pte_t *ptep)
+				       unsigned long addr, pte_t *ptep)
 {
-	pgste_t pgste;
-	pte_t pte;
-
-	if (mm_has_pgste(mm)) {
-		pgste = pgste_get_lock(ptep);
-		pgste = pgste_ipte_notify(mm, address, ptep, pgste);
-	}
-
-	pte = *ptep;
-	ptep_flush_lazy(mm, address, ptep);
-	pte_val(*ptep) = _PAGE_INVALID;
-
-	if (mm_has_pgste(mm)) {
-		pgste = pgste_update_all(&pte, pgste, mm);
-		pgste_set_unlock(ptep, pgste);
-	}
-	return pte;
+	return ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
 }
 
 #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
-static inline pte_t ptep_modify_prot_start(struct mm_struct *mm,
-					   unsigned long address,
-					   pte_t *ptep)
-{
-	pgste_t pgste;
-	pte_t pte;
-
-	if (mm_has_pgste(mm)) {
-		pgste = pgste_get_lock(ptep);
-		pgste_ipte_notify(mm, address, ptep, pgste);
-	}
-
-	pte = *ptep;
-	ptep_flush_lazy(mm, address, ptep);
-
-	if (mm_has_pgste(mm)) {
-		pgste = pgste_update_all(&pte, pgste, mm);
-		pgste_set(ptep, pgste);
-	}
-	return pte;
-}
-
-static inline void ptep_modify_prot_commit(struct mm_struct *mm,
-					   unsigned long address,
-					   pte_t *ptep, pte_t pte)
-{
-	pgste_t pgste;
-
-	if (mm_has_pgste(mm)) {
-		pgste = pgste_get(ptep);
-		pgste_set_key(ptep, pgste, pte, mm);
-		pgste = pgste_set_pte(ptep, pgste, pte);
-		pgste_set_unlock(ptep, pgste);
-	} else
-		*ptep = pte;
-}
+pte_t ptep_modify_prot_start(struct mm_struct *, unsigned long, pte_t *);
+void ptep_modify_prot_commit(struct mm_struct *, unsigned long, pte_t *, pte_t);
 
 #define __HAVE_ARCH_PTEP_CLEAR_FLUSH
 static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
-				     unsigned long address, pte_t *ptep)
+				     unsigned long addr, pte_t *ptep)
 {
-	pgste_t pgste;
-	pte_t pte;
-
-	if (mm_has_pgste(vma->vm_mm)) {
-		pgste = pgste_get_lock(ptep);
-		pgste = pgste_ipte_notify(vma->vm_mm, address, ptep, pgste);
-	}
-
-	pte = *ptep;
-	ptep_flush_direct(vma->vm_mm, address, ptep);
-	pte_val(*ptep) = _PAGE_INVALID;
-
-	if (mm_has_pgste(vma->vm_mm)) {
-		if ((pgste_val(pgste) & _PGSTE_GPS_USAGE_MASK) ==
-		    _PGSTE_GPS_USAGE_UNUSED)
-			pte_val(pte) |= _PAGE_UNUSED;
-		pgste = pgste_update_all(&pte, pgste, vma->vm_mm);
-		pgste_set_unlock(ptep, pgste);
-	}
-	return pte;
+	return ptep_xchg_direct(vma->vm_mm, addr, ptep, __pte(_PAGE_INVALID));
 }
 
 /*
@@ -1206,83 +847,69 @@
  */
 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
-					    unsigned long address,
+					    unsigned long addr,
 					    pte_t *ptep, int full)
 {
-	pgste_t pgste;
-	pte_t pte;
-
-	if (!full && mm_has_pgste(mm)) {
-		pgste = pgste_get_lock(ptep);
-		pgste = pgste_ipte_notify(mm, address, ptep, pgste);
+	if (full) {
+		pte_t pte = *ptep;
+		*ptep = __pte(_PAGE_INVALID);
+		return pte;
 	}
-
-	pte = *ptep;
-	if (!full)
-		ptep_flush_lazy(mm, address, ptep);
-	pte_val(*ptep) = _PAGE_INVALID;
-
-	if (!full && mm_has_pgste(mm)) {
-		pgste = pgste_update_all(&pte, pgste, mm);
-		pgste_set_unlock(ptep, pgste);
-	}
-	return pte;
+	return ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
 }
 
 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
-static inline pte_t ptep_set_wrprotect(struct mm_struct *mm,
-				       unsigned long address, pte_t *ptep)
+static inline void ptep_set_wrprotect(struct mm_struct *mm,
+				      unsigned long addr, pte_t *ptep)
 {
-	pgste_t pgste;
 	pte_t pte = *ptep;
 
-	if (pte_write(pte)) {
-		if (mm_has_pgste(mm)) {
-			pgste = pgste_get_lock(ptep);
-			pgste = pgste_ipte_notify(mm, address, ptep, pgste);
-		}
-
-		ptep_flush_lazy(mm, address, ptep);
-		pte = pte_wrprotect(pte);
-
-		if (mm_has_pgste(mm)) {
-			pgste = pgste_set_pte(ptep, pgste, pte);
-			pgste_set_unlock(ptep, pgste);
-		} else
-			*ptep = pte;
-	}
-	return pte;
+	if (pte_write(pte))
+		ptep_xchg_lazy(mm, addr, ptep, pte_wrprotect(pte));
 }
 
 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
 static inline int ptep_set_access_flags(struct vm_area_struct *vma,
-					unsigned long address, pte_t *ptep,
+					unsigned long addr, pte_t *ptep,
 					pte_t entry, int dirty)
 {
-	pgste_t pgste;
-	pte_t oldpte;
-
-	oldpte = *ptep;
-	if (pte_same(oldpte, entry))
+	if (pte_same(*ptep, entry))
 		return 0;
-	if (mm_has_pgste(vma->vm_mm)) {
-		pgste = pgste_get_lock(ptep);
-		pgste = pgste_ipte_notify(vma->vm_mm, address, ptep, pgste);
-	}
-
-	ptep_flush_direct(vma->vm_mm, address, ptep);
-
-	if (mm_has_pgste(vma->vm_mm)) {
-		if (pte_val(oldpte) & _PAGE_INVALID)
-			pgste_set_key(ptep, pgste, entry, vma->vm_mm);
-		pgste = pgste_set_pte(ptep, pgste, entry);
-		pgste_set_unlock(ptep, pgste);
-	} else
-		*ptep = entry;
+	ptep_xchg_direct(vma->vm_mm, addr, ptep, entry);
 	return 1;
 }
 
 /*
+ * Additional functions to handle KVM guest page tables
+ */
+void ptep_set_pte_at(struct mm_struct *mm, unsigned long addr,
+		     pte_t *ptep, pte_t entry);
+void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
+void ptep_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
+void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
+		     pte_t *ptep , int reset);
+void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
+
+bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long address);
+int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
+			  unsigned char key, bool nq);
+unsigned char get_guest_storage_key(struct mm_struct *mm, unsigned long addr);
+
+/*
+ * Certain architectures need to do special things when PTEs
+ * within a page table are directly modified.  Thus, the following
+ * hook is made available.
+ */
+static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
+			      pte_t *ptep, pte_t entry)
+{
+	if (mm_has_pgste(mm))
+		ptep_set_pte_at(mm, addr, ptep, entry);
+	else
+		*ptep = entry;
+}
+
+/*
  * Conversion functions: convert a page and protection to a page entry,
  * and a page entry and page directory to the page they refer to.
  */
@@ -1476,54 +1103,51 @@
 		: "cc" );
 }
 
-static inline void pmdp_flush_direct(struct mm_struct *mm,
-				     unsigned long address, pmd_t *pmdp)
-{
-	int active, count;
-
-	if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)
-		return;
-	if (!MACHINE_HAS_IDTE) {
-		__pmdp_csp(pmdp);
-		return;
-	}
-	active = (mm == current->active_mm) ? 1 : 0;
-	count = atomic_add_return(0x10000, &mm->context.attach_count);
-	if (MACHINE_HAS_TLB_LC && (count & 0xffff) <= active &&
-	    cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
-		__pmdp_idte_local(address, pmdp);
-	else
-		__pmdp_idte(address, pmdp);
-	atomic_sub(0x10000, &mm->context.attach_count);
-}
-
-static inline void pmdp_flush_lazy(struct mm_struct *mm,
-				   unsigned long address, pmd_t *pmdp)
-{
-	int active, count;
-
-	if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)
-		return;
-	active = (mm == current->active_mm) ? 1 : 0;
-	count = atomic_add_return(0x10000, &mm->context.attach_count);
-	if ((count & 0xffff) <= active) {
-		pmd_val(*pmdp) |= _SEGMENT_ENTRY_INVALID;
-		mm->context.flush_mm = 1;
-	} else if (MACHINE_HAS_IDTE)
-		__pmdp_idte(address, pmdp);
-	else
-		__pmdp_csp(pmdp);
-	atomic_sub(0x10000, &mm->context.attach_count);
-}
+pmd_t pmdp_xchg_direct(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
+pmd_t pmdp_xchg_lazy(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 
 #define __HAVE_ARCH_PGTABLE_DEPOSIT
-extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
-				       pgtable_t pgtable);
+void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
+				pgtable_t pgtable);
 
 #define __HAVE_ARCH_PGTABLE_WITHDRAW
-extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
+pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
+
+#define  __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
+static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
+					unsigned long addr, pmd_t *pmdp,
+					pmd_t entry, int dirty)
+{
+	VM_BUG_ON(addr & ~HPAGE_MASK);
+
+	entry = pmd_mkyoung(entry);
+	if (dirty)
+		entry = pmd_mkdirty(entry);
+	if (pmd_val(*pmdp) == pmd_val(entry))
+		return 0;
+	pmdp_xchg_direct(vma->vm_mm, addr, pmdp, entry);
+	return 1;
+}
+
+#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
+static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
+					    unsigned long addr, pmd_t *pmdp)
+{
+	pmd_t pmd = *pmdp;
+
+	pmd = pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd_mkold(pmd));
+	return pmd_young(pmd);
+}
+
+#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
+static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
+					 unsigned long addr, pmd_t *pmdp)
+{
+	VM_BUG_ON(addr & ~HPAGE_MASK);
+	return pmdp_test_and_clear_young(vma, addr, pmdp);
+}
 
 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
 			      pmd_t *pmdp, pmd_t entry)
@@ -1539,66 +1163,48 @@
 	return pmd;
 }
 
-#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
-static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
-					    unsigned long address, pmd_t *pmdp)
-{
-	pmd_t pmd;
-
-	pmd = *pmdp;
-	pmdp_flush_direct(vma->vm_mm, address, pmdp);
-	*pmdp = pmd_mkold(pmd);
-	return pmd_young(pmd);
-}
-
 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
-					    unsigned long address, pmd_t *pmdp)
+					    unsigned long addr, pmd_t *pmdp)
 {
-	pmd_t pmd = *pmdp;
-
-	pmdp_flush_direct(mm, address, pmdp);
-	pmd_clear(pmdp);
-	return pmd;
+	return pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_INVALID));
 }
 
 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
 static inline pmd_t pmdp_huge_get_and_clear_full(struct mm_struct *mm,
-						 unsigned long address,
+						 unsigned long addr,
 						 pmd_t *pmdp, int full)
 {
-	pmd_t pmd = *pmdp;
-
-	if (!full)
-		pmdp_flush_lazy(mm, address, pmdp);
-	pmd_clear(pmdp);
-	return pmd;
+	if (full) {
+		pmd_t pmd = *pmdp;
+		*pmdp = __pmd(_SEGMENT_ENTRY_INVALID);
+		return pmd;
+	}
+	return pmdp_xchg_lazy(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_INVALID));
 }
 
 #define __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
 static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
-					  unsigned long address, pmd_t *pmdp)
+					  unsigned long addr, pmd_t *pmdp)
 {
-	return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
+	return pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp);
 }
 
 #define __HAVE_ARCH_PMDP_INVALIDATE
 static inline void pmdp_invalidate(struct vm_area_struct *vma,
-				   unsigned long address, pmd_t *pmdp)
+				   unsigned long addr, pmd_t *pmdp)
 {
-	pmdp_flush_direct(vma->vm_mm, address, pmdp);
+	pmdp_xchg_direct(vma->vm_mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_INVALID));
 }
 
 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
 static inline void pmdp_set_wrprotect(struct mm_struct *mm,
-				      unsigned long address, pmd_t *pmdp)
+				      unsigned long addr, pmd_t *pmdp)
 {
 	pmd_t pmd = *pmdp;
 
-	if (pmd_write(pmd)) {
-		pmdp_flush_direct(mm, address, pmdp);
-		set_pmd_at(mm, address, pmdp, pmd_wrprotect(pmd));
-	}
+	if (pmd_write(pmd))
+		pmd = pmdp_xchg_lazy(mm, addr, pmdp, pmd_wrprotect(pmd));
 }
 
 static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 1c4fe12..d6fd22e 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -184,6 +184,10 @@
 struct mm_struct;
 struct seq_file;
 
+typedef int (*dump_trace_func_t)(void *data, unsigned long address);
+void dump_trace(dump_trace_func_t func, void *data,
+		struct task_struct *task, unsigned long sp);
+
 void show_cacheinfo(struct seq_file *m);
 
 /* Free all resources held by a thread. */
@@ -203,6 +207,14 @@
 /* Has task runtime instrumentation enabled ? */
 #define is_ri_task(tsk) (!!(tsk)->thread.ri_cb)
 
+static inline unsigned long current_stack_pointer(void)
+{
+	unsigned long sp;
+
+	asm volatile("la %0,0(15)" : "=a" (sp));
+	return sp;
+}
+
 static inline unsigned short stap(void)
 {
 	unsigned short cpu_address;
diff --git a/arch/s390/include/asm/rwsem.h b/arch/s390/include/asm/rwsem.h
index 4b43ee7..fead491 100644
--- a/arch/s390/include/asm/rwsem.h
+++ b/arch/s390/include/asm/rwsem.h
@@ -31,7 +31,7 @@
  * This should be totally fair - if anything is waiting, a process that wants a
  * lock will go to the back of the queue. When the currently active lock is
  * released, if there's a writer at the front of the queue, then that and only
- * that will be woken up; if there's a bunch of consequtive readers at the
+ * that will be woken up; if there's a bunch of consecutive readers at the
  * front, then they'll all be woken up, but no other readers will be.
  */
 
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index 6983722..c0f0efb 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -101,6 +101,8 @@
 #define pfault_fini()		do { } while (0)
 #endif /* CONFIG_PFAULT */
 
+void report_user_fault(struct pt_regs *regs, long signr, int is_mm_fault);
+
 extern void cmma_init(void);
 
 extern void (*_machine_restart)(char *command);
diff --git a/arch/s390/include/asm/xor.h b/arch/s390/include/asm/xor.h
index c82eb12..c988df7 100644
--- a/arch/s390/include/asm/xor.h
+++ b/arch/s390/include/asm/xor.h
@@ -1 +1,20 @@
-#include <asm-generic/xor.h>
+/*
+ * Optimited xor routines
+ *
+ * Copyright IBM Corp. 2016
+ * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+#ifndef _ASM_S390_XOR_H
+#define _ASM_S390_XOR_H
+
+extern struct xor_block_template xor_block_xc;
+
+#undef XOR_TRY_TEMPLATES
+#define XOR_TRY_TEMPLATES				\
+do {							\
+	xor_speed(&xor_block_xc);			\
+} while (0)
+
+#define XOR_SELECT_TEMPLATE(FASTEST)	(&xor_block_xc)
+
+#endif /* _ASM_S390_XOR_H */
diff --git a/arch/s390/include/uapi/asm/clp.h b/arch/s390/include/uapi/asm/clp.h
new file mode 100644
index 0000000..ab72d9d
--- /dev/null
+++ b/arch/s390/include/uapi/asm/clp.h
@@ -0,0 +1,28 @@
+/*
+ * ioctl interface for /dev/clp
+ *
+ * Copyright IBM Corp. 2016
+ * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#ifndef _ASM_CLP_H
+#define _ASM_CLP_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+struct clp_req {
+	unsigned int c : 1;
+	unsigned int r : 1;
+	unsigned int lps : 6;
+	unsigned int cmd : 8;
+	unsigned int : 16;
+	unsigned int reserved;
+	__u64 data_p;
+};
+
+#define CLP_IOCTL_MAGIC 'c'
+
+#define CLP_SYNC _IOWR(CLP_IOCTL_MAGIC, 0xC1, struct clp_req)
+
+#endif
diff --git a/arch/s390/include/uapi/asm/kvm.h b/arch/s390/include/uapi/asm/kvm.h
index fe84bd5..347fe5a 100644
--- a/arch/s390/include/uapi/asm/kvm.h
+++ b/arch/s390/include/uapi/asm/kvm.h
@@ -154,6 +154,7 @@
 #define KVM_SYNC_PFAULT (1UL << 5)
 #define KVM_SYNC_VRS    (1UL << 6)
 #define KVM_SYNC_RICCB  (1UL << 7)
+#define KVM_SYNC_FPRS   (1UL << 8)
 /* definition of registers in kvm_run */
 struct kvm_sync_regs {
 	__u64 prefix;	/* prefix register */
@@ -168,9 +169,12 @@
 	__u64 pft;	/* pfault token [PFAULT] */
 	__u64 pfs;	/* pfault select [PFAULT] */
 	__u64 pfc;	/* pfault compare [PFAULT] */
-	__u64 vrs[32][2];	/* vector registers */
+	union {
+		__u64 vrs[32][2];	/* vector registers (KVM_SYNC_VRS) */
+		__u64 fprs[16];		/* fp registers (KVM_SYNC_FPRS) */
+	};
 	__u8  reserved[512];	/* for future vector expansion */
-	__u32 fpc;	/* only valid with vector registers */
+	__u32 fpc;		/* valid on KVM_SYNC_VRS or KVM_SYNC_FPRS */
 	__u8 padding[52];	/* riccb needs to be 64byte aligned */
 	__u8 riccb[64];		/* runtime instrumentation controls block */
 };
diff --git a/arch/s390/include/uapi/asm/sie.h b/arch/s390/include/uapi/asm/sie.h
index ee69c08..5dbaa72 100644
--- a/arch/s390/include/uapi/asm/sie.h
+++ b/arch/s390/include/uapi/asm/sie.h
@@ -7,6 +7,7 @@
 	{ 0x9c, "DIAG (0x9c) time slice end directed" },	\
 	{ 0x204, "DIAG (0x204) logical-cpu utilization" },	\
 	{ 0x258, "DIAG (0x258) page-reference services" },	\
+	{ 0x288, "DIAG (0x288) watchdog functions" },		\
 	{ 0x308, "DIAG (0x308) ipl functions" },		\
 	{ 0x500, "DIAG (0x500) KVM virtio functions" },		\
 	{ 0x501, "DIAG (0x501) KVM breakpoint" }
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index 53bbc9e..1f95cc1 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -12,6 +12,7 @@
 #include <asm/idle.h>
 #include <asm/vdso.h>
 #include <asm/pgtable.h>
+#include <asm/gmap.h>
 
 /*
  * Make sure that the compiler is new enough. We want a compiler that
diff --git a/arch/s390/kernel/cpcmd.c b/arch/s390/kernel/cpcmd.c
index 7f76891..7f48e56 100644
--- a/arch/s390/kernel/cpcmd.c
+++ b/arch/s390/kernel/cpcmd.c
@@ -96,8 +96,7 @@
 			(((unsigned long)response + rlen) >> 31)) {
 		lowbuf = kmalloc(rlen, GFP_KERNEL | GFP_DMA);
 		if (!lowbuf) {
-			pr_warning("The cpcmd kernel function failed to "
-				   "allocate a response buffer\n");
+			pr_warn("The cpcmd kernel function failed to allocate a response buffer\n");
 			return -ENOMEM;
 		}
 		spin_lock_irqsave(&cpcmd_lock, flags);
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
index c890a55..aa12de7 100644
--- a/arch/s390/kernel/debug.c
+++ b/arch/s390/kernel/debug.c
@@ -699,8 +699,7 @@
 	/* Since debugfs currently does not support uid/gid other than root, */
 	/* we do not allow gid/uid != 0 until we get support for that. */
 	if ((uid != 0) || (gid != 0))
-		pr_warning("Root becomes the owner of all s390dbf files "
-			   "in sysfs\n");
+		pr_warn("Root becomes the owner of all s390dbf files in sysfs\n");
 	BUG_ON(!initialized);
 	mutex_lock(&debug_mutex);
 
@@ -1307,8 +1306,7 @@
 		new_level = debug_get_uint(str);
 	}
 	if(new_level < 0) {
-		pr_warning("%s is not a valid level for a debug "
-			   "feature\n", str);
+		pr_warn("%s is not a valid level for a debug feature\n", str);
 		rc = -EINVAL;
 	} else {
 		debug_set_level(id, new_level);
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
index 62973ef..8cb9bfd 100644
--- a/arch/s390/kernel/dis.c
+++ b/arch/s390/kernel/dis.c
@@ -1920,23 +1920,16 @@
 			}
 			if (separator)
 				ptr += sprintf(ptr, "%c", separator);
-			/*
-			 * Use four '%' characters below because of the
-			 * following two conversions:
-			 *
-			 *  1) sprintf: %%%%r -> %%r
-			 *  2) printk : %%r   -> %r
-			 */
 			if (operand->flags & OPERAND_GPR)
-				ptr += sprintf(ptr, "%%%%r%i", value);
+				ptr += sprintf(ptr, "%%r%i", value);
 			else if (operand->flags & OPERAND_FPR)
-				ptr += sprintf(ptr, "%%%%f%i", value);
+				ptr += sprintf(ptr, "%%f%i", value);
 			else if (operand->flags & OPERAND_AR)
-				ptr += sprintf(ptr, "%%%%a%i", value);
+				ptr += sprintf(ptr, "%%a%i", value);
 			else if (operand->flags & OPERAND_CR)
-				ptr += sprintf(ptr, "%%%%c%i", value);
+				ptr += sprintf(ptr, "%%c%i", value);
 			else if (operand->flags & OPERAND_VR)
-				ptr += sprintf(ptr, "%%%%v%i", value);
+				ptr += sprintf(ptr, "%%v%i", value);
 			else if (operand->flags & OPERAND_PCREL)
 				ptr += sprintf(ptr, "%lx", (signed int) value
 								      + addr);
diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c
index 14c1ed3..1b6081c 100644
--- a/arch/s390/kernel/dumpstack.c
+++ b/arch/s390/kernel/dumpstack.c
@@ -20,28 +20,28 @@
 #include <asm/ipl.h>
 
 /*
- * For show_trace we have tree different stack to consider:
+ * For dump_trace we have tree different stack to consider:
  *   - the panic stack which is used if the kernel stack has overflown
  *   - the asynchronous interrupt stack (cpu related)
  *   - the synchronous kernel stack (process related)
- * The stack trace can start at any of the three stack and can potentially
+ * The stack trace can start at any of the three stacks and can potentially
  * touch all of them. The order is: panic stack, async stack, sync stack.
  */
 static unsigned long
-__show_trace(unsigned long sp, unsigned long low, unsigned long high)
+__dump_trace(dump_trace_func_t func, void *data, unsigned long sp,
+	     unsigned long low, unsigned long high)
 {
 	struct stack_frame *sf;
 	struct pt_regs *regs;
-	unsigned long addr;
 
 	while (1) {
 		if (sp < low || sp > high - sizeof(*sf))
 			return sp;
 		sf = (struct stack_frame *) sp;
-		addr = sf->gprs[8];
-		printk("([<%016lx>] %pSR)\n", addr, (void *)addr);
 		/* Follow the backchain. */
 		while (1) {
+			if (func(data, sf->gprs[8]))
+				return sp;
 			low = sp;
 			sp = sf->back_chain;
 			if (!sp)
@@ -49,46 +49,58 @@
 			if (sp <= low || sp > high - sizeof(*sf))
 				return sp;
 			sf = (struct stack_frame *) sp;
-			addr = sf->gprs[8];
-			printk(" [<%016lx>] %pSR\n", addr, (void *)addr);
 		}
 		/* Zero backchain detected, check for interrupt frame. */
 		sp = (unsigned long) (sf + 1);
 		if (sp <= low || sp > high - sizeof(*regs))
 			return sp;
 		regs = (struct pt_regs *) sp;
-		addr = regs->psw.addr;
-		printk(" [<%016lx>] %pSR\n", addr, (void *)addr);
+		if (!user_mode(regs)) {
+			if (func(data, regs->psw.addr))
+				return sp;
+		}
 		low = sp;
 		sp = regs->gprs[15];
 	}
 }
 
-static void show_trace(struct task_struct *task, unsigned long *stack)
+void dump_trace(dump_trace_func_t func, void *data, struct task_struct *task,
+		unsigned long sp)
 {
-	const unsigned long frame_size =
-		STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
-	register unsigned long __r15 asm ("15");
-	unsigned long sp;
+	unsigned long frame_size;
 
-	sp = (unsigned long) stack;
-	if (!sp)
-		sp = task ? task->thread.ksp : __r15;
-	printk("Call Trace:\n");
+	frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
 #ifdef CONFIG_CHECK_STACK
-	sp = __show_trace(sp,
+	sp = __dump_trace(func, data, sp,
 			  S390_lowcore.panic_stack + frame_size - 4096,
 			  S390_lowcore.panic_stack + frame_size);
 #endif
-	sp = __show_trace(sp,
+	sp = __dump_trace(func, data, sp,
 			  S390_lowcore.async_stack + frame_size - ASYNC_SIZE,
 			  S390_lowcore.async_stack + frame_size);
 	if (task)
-		__show_trace(sp, (unsigned long) task_stack_page(task),
-			     (unsigned long) task_stack_page(task) + THREAD_SIZE);
+		__dump_trace(func, data, sp,
+			     (unsigned long)task_stack_page(task),
+			     (unsigned long)task_stack_page(task) + THREAD_SIZE);
 	else
-		__show_trace(sp, S390_lowcore.thread_info,
+		__dump_trace(func, data, sp,
+			     S390_lowcore.thread_info,
 			     S390_lowcore.thread_info + THREAD_SIZE);
+}
+EXPORT_SYMBOL_GPL(dump_trace);
+
+static int show_address(void *data, unsigned long address)
+{
+	printk("([<%016lx>] %pSR)\n", address, (void *)address);
+	return 0;
+}
+
+static void show_trace(struct task_struct *task, unsigned long sp)
+{
+	if (!sp)
+		sp = task ? task->thread.ksp : current_stack_pointer();
+	printk("Call Trace:\n");
+	dump_trace(show_address, NULL, task, sp);
 	if (!task)
 		task = current;
 	debug_show_held_locks(task);
@@ -96,15 +108,16 @@
 
 void show_stack(struct task_struct *task, unsigned long *sp)
 {
-	register unsigned long *__r15 asm ("15");
 	unsigned long *stack;
 	int i;
 
-	if (!sp)
-		stack = task ? (unsigned long *) task->thread.ksp : __r15;
-	else
-		stack = sp;
-
+	stack = sp;
+	if (!stack) {
+		if (!task)
+			stack = (unsigned long *)current_stack_pointer();
+		else
+			stack = (unsigned long *)task->thread.ksp;
+	}
 	for (i = 0; i < 20; i++) {
 		if (((addr_t) stack & (THREAD_SIZE-1)) == 0)
 			break;
@@ -113,7 +126,7 @@
 		printk("%016lx ", *stack++);
 	}
 	printk("\n");
-	show_trace(task, sp);
+	show_trace(task, (unsigned long)sp);
 }
 
 static void show_last_breaking_event(struct pt_regs *regs)
@@ -122,13 +135,9 @@
 	printk(" [<%016lx>] %pSR\n", regs->args[0], (void *)regs->args[0]);
 }
 
-static inline int mask_bits(struct pt_regs *regs, unsigned long bits)
-{
-	return (regs->psw.mask & bits) / ((~bits + 1) & bits);
-}
-
 void show_registers(struct pt_regs *regs)
 {
+	struct psw_bits *psw = &psw_bits(regs->psw);
 	char *mode;
 
 	mode = user_mode(regs) ? "User" : "Krnl";
@@ -137,13 +146,9 @@
 		printk(" (%pSR)", (void *)regs->psw.addr);
 	printk("\n");
 	printk("           R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x "
-	       "P:%x AS:%x CC:%x PM:%x", mask_bits(regs, PSW_MASK_PER),
-	       mask_bits(regs, PSW_MASK_DAT), mask_bits(regs, PSW_MASK_IO),
-	       mask_bits(regs, PSW_MASK_EXT), mask_bits(regs, PSW_MASK_KEY),
-	       mask_bits(regs, PSW_MASK_MCHECK), mask_bits(regs, PSW_MASK_WAIT),
-	       mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC),
-	       mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM));
-	printk(" EA:%x", mask_bits(regs, PSW_MASK_EA | PSW_MASK_BA));
+	       "P:%x AS:%x CC:%x PM:%x", psw->r, psw->t, psw->i, psw->e,
+	       psw->key, psw->m, psw->w, psw->p, psw->as, psw->cc, psw->pm);
+	printk(" RI:%x EA:%x", psw->ri, psw->eaba);
 	printk("\n%s GPRS: %016lx %016lx %016lx %016lx\n", mode,
 	       regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]);
 	printk("           %016lx %016lx %016lx %016lx\n",
@@ -161,7 +166,7 @@
 	show_registers(regs);
 	/* Show stack backtrace if pt_regs is from kernel mode */
 	if (!user_mode(regs))
-		show_trace(NULL, (unsigned long *) regs->gprs[15]);
+		show_trace(NULL, regs->gprs[15]);
 	show_last_breaking_event(regs);
 }
 
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index cd5a191..2d47f9c 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -186,6 +186,7 @@
 	stg	%r5,__LC_THREAD_INFO		# store thread info of next
 	stg	%r15,__LC_KERNEL_STACK		# store end of kernel stack
 	lg	%r15,__THREAD_ksp(%r1)		# load kernel stack of next
+	/* c4 is used in guest detection: arch/s390/kernel/perf_cpum_sf.c */
 	lctl	%c4,%c4,__TASK_pid(%r3)		# load pid to control reg. 4
 	mvc	__LC_CURRENT_PID(4,%r0),__TASK_pid(%r3) # store pid of next
 	lmg	%r6,%r15,__SF_GPRS(%r15)	# load gprs of next task
@@ -1199,114 +1200,12 @@
 	.quad	.Lpsw_idle_lpsw
 
 .Lcleanup_save_fpu_regs:
-	TSTMSK	__LC_CPU_FLAGS,_CIF_FPU
-	bor	%r14
-	clg	%r9,BASED(.Lcleanup_save_fpu_regs_done)
-	jhe	5f
-	clg	%r9,BASED(.Lcleanup_save_fpu_regs_fp)
-	jhe	4f
-	clg	%r9,BASED(.Lcleanup_save_fpu_regs_vx_high)
-	jhe	3f
-	clg	%r9,BASED(.Lcleanup_save_fpu_regs_vx_low)
-	jhe	2f
-	clg	%r9,BASED(.Lcleanup_save_fpu_fpc_end)
-	jhe	1f
-	lg	%r2,__LC_CURRENT
-	aghi	%r2,__TASK_thread
-0:	# Store floating-point controls
-	stfpc	__THREAD_FPU_fpc(%r2)
-1:	# Load register save area and check if VX is active
-	lg	%r3,__THREAD_FPU_regs(%r2)
-	TSTMSK	__LC_MACHINE_FLAGS,MACHINE_FLAG_VX
-	jz	4f			  # no VX -> store FP regs
-2:	# Store vector registers (V0-V15)
-	VSTM	%v0,%v15,0,%r3		  # vstm 0,15,0(3)
-3:	# Store vector registers (V16-V31)
-	VSTM	%v16,%v31,256,%r3	  # vstm 16,31,256(3)
-	j	5f			  # -> done, set CIF_FPU flag
-4:	# Store floating-point registers
-	std	0,0(%r3)
-	std	1,8(%r3)
-	std	2,16(%r3)
-	std	3,24(%r3)
-	std	4,32(%r3)
-	std	5,40(%r3)
-	std	6,48(%r3)
-	std	7,56(%r3)
-	std	8,64(%r3)
-	std	9,72(%r3)
-	std	10,80(%r3)
-	std	11,88(%r3)
-	std	12,96(%r3)
-	std	13,104(%r3)
-	std	14,112(%r3)
-	std	15,120(%r3)
-5:	# Set CIF_FPU flag
-	oi	__LC_CPU_FLAGS+7,_CIF_FPU
-	lg	%r9,48(%r11)		# return from save_fpu_regs
+	larl	%r9,save_fpu_regs
 	br	%r14
-.Lcleanup_save_fpu_fpc_end:
-	.quad	.Lsave_fpu_regs_fpc_end
-.Lcleanup_save_fpu_regs_vx_low:
-	.quad	.Lsave_fpu_regs_vx_low
-.Lcleanup_save_fpu_regs_vx_high:
-	.quad	.Lsave_fpu_regs_vx_high
-.Lcleanup_save_fpu_regs_fp:
-	.quad	.Lsave_fpu_regs_fp
-.Lcleanup_save_fpu_regs_done:
-	.quad	.Lsave_fpu_regs_done
 
 .Lcleanup_load_fpu_regs:
-	TSTMSK	__LC_CPU_FLAGS,_CIF_FPU
-	bnor	%r14
-	clg	%r9,BASED(.Lcleanup_load_fpu_regs_done)
-	jhe	1f
-	clg	%r9,BASED(.Lcleanup_load_fpu_regs_fp)
-	jhe	2f
-	clg	%r9,BASED(.Lcleanup_load_fpu_regs_vx_high)
-	jhe	3f
-	clg	%r9,BASED(.Lcleanup_load_fpu_regs_vx)
-	jhe	4f
-	lg	%r4,__LC_CURRENT
-	aghi	%r4,__TASK_thread
-	lfpc	__THREAD_FPU_fpc(%r4)
-	TSTMSK	__LC_MACHINE_FLAGS,MACHINE_FLAG_VX
-	lg	%r4,__THREAD_FPU_regs(%r4)	# %r4 <- reg save area
-	jz	2f				# -> no VX, load FP regs
-4:	# Load V0 ..V15 registers
-	VLM	%v0,%v15,0,%r4
-3:	# Load V16..V31 registers
-	VLM	%v16,%v31,256,%r4
-	j	1f
-2:	# Load floating-point registers
-	ld	0,0(%r4)
-	ld	1,8(%r4)
-	ld	2,16(%r4)
-	ld	3,24(%r4)
-	ld	4,32(%r4)
-	ld	5,40(%r4)
-	ld	6,48(%r4)
-	ld	7,56(%r4)
-	ld	8,64(%r4)
-	ld	9,72(%r4)
-	ld	10,80(%r4)
-	ld	11,88(%r4)
-	ld	12,96(%r4)
-	ld	13,104(%r4)
-	ld	14,112(%r4)
-	ld	15,120(%r4)
-1:	# Clear CIF_FPU bit
-	ni	__LC_CPU_FLAGS+7,255-_CIF_FPU
-	lg	%r9,48(%r11)		# return from load_fpu_regs
+	larl	%r9,load_fpu_regs
 	br	%r14
-.Lcleanup_load_fpu_regs_vx:
-	.quad	.Lload_fpu_regs_vx
-.Lcleanup_load_fpu_regs_vx_high:
-	.quad	.Lload_fpu_regs_vx_high
-.Lcleanup_load_fpu_regs_fp:
-	.quad	.Lload_fpu_regs_fp
-.Lcleanup_load_fpu_regs_done:
-	.quad	.Lload_fpu_regs_done
 
 /*
  * Integer constants
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index f41d520..c373a1d 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -164,8 +164,7 @@
 {
 	unsigned long old, new;
 
-	/* Get current stack pointer. */
-	asm volatile("la %0,0(15)" : "=a" (old));
+	old = current_stack_pointer();
 	/* Check against async. stack address range. */
 	new = S390_lowcore.async_stack;
 	if (((new - old) >> (PAGE_SHIFT + THREAD_ORDER)) != 0) {
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
index 929c147..58bf457 100644
--- a/arch/s390/kernel/perf_cpum_cf.c
+++ b/arch/s390/kernel/perf_cpum_cf.c
@@ -383,7 +383,7 @@
 
 	/* Validate the counter that is assigned to this event.
 	 * Because the counter facility can use numerous counters at the
-	 * same time without constraints, it is not necessary to explicity
+	 * same time without constraints, it is not necessary to explicitly
 	 * validate event groups (event->group_leader != event).
 	 */
 	err = validate_event(hwc);
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
index 3d8da1e..1a43474 100644
--- a/arch/s390/kernel/perf_cpum_sf.c
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -1022,10 +1022,13 @@
 	/*
 	 * A non-zero guest program parameter indicates a guest
 	 * sample.
-	 * Note that some early samples might be misaccounted to
-	 * the host.
+	 * Note that some early samples or samples from guests without
+	 * lpp usage would be misaccounted to the host. We use the asn
+	 * value as a heuristic to detect most of these guest samples.
+	 * If the value differs from the host hpp value, we assume
+	 * it to be a KVM guest.
 	 */
-	if (sfr->basic.gpp)
+	if (sfr->basic.gpp || sfr->basic.prim_asn != (u16) sfr->basic.hpp)
 		sde_regs->in_guest = 1;
 
 	overflow = 0;
diff --git a/arch/s390/kernel/perf_event.c b/arch/s390/kernel/perf_event.c
index 0943b11..c3e4099 100644
--- a/arch/s390/kernel/perf_event.c
+++ b/arch/s390/kernel/perf_event.c
@@ -222,67 +222,23 @@
 }
 arch_initcall(service_level_perf_register);
 
-/* See also arch/s390/kernel/traps.c */
-static unsigned long __store_trace(struct perf_callchain_entry *entry,
-				   unsigned long sp,
-				   unsigned long low, unsigned long high)
+static int __perf_callchain_kernel(void *data, unsigned long address)
 {
-	struct stack_frame *sf;
-	struct pt_regs *regs;
+	struct perf_callchain_entry *entry = data;
 
-	while (1) {
-		if (sp < low || sp > high - sizeof(*sf))
-			return sp;
-		sf = (struct stack_frame *) sp;
-		perf_callchain_store(entry, sf->gprs[8]);
-		/* Follow the backchain. */
-		while (1) {
-			low = sp;
-			sp = sf->back_chain;
-			if (!sp)
-				break;
-			if (sp <= low || sp > high - sizeof(*sf))
-				return sp;
-			sf = (struct stack_frame *) sp;
-			perf_callchain_store(entry, sf->gprs[8]);
-		}
-		/* Zero backchain detected, check for interrupt frame. */
-		sp = (unsigned long) (sf + 1);
-		if (sp <= low || sp > high - sizeof(*regs))
-			return sp;
-		regs = (struct pt_regs *) sp;
-		perf_callchain_store(entry, sf->gprs[8]);
-		low = sp;
-		sp = regs->gprs[15];
-	}
+	perf_callchain_store(entry, address);
+	return 0;
 }
 
 void perf_callchain_kernel(struct perf_callchain_entry *entry,
 			   struct pt_regs *regs)
 {
-	unsigned long head, frame_size;
-	struct stack_frame *head_sf;
-
 	if (user_mode(regs))
 		return;
-
-	frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
-	head = regs->gprs[15];
-	head_sf = (struct stack_frame *) head;
-
-	if (!head_sf || !head_sf->back_chain)
-		return;
-
-	head = head_sf->back_chain;
-	head = __store_trace(entry, head,
-			     S390_lowcore.async_stack + frame_size - ASYNC_SIZE,
-			     S390_lowcore.async_stack + frame_size);
-
-	__store_trace(entry, head, S390_lowcore.thread_info,
-		      S390_lowcore.thread_info + THREAD_SIZE);
+	dump_trace(__perf_callchain_kernel, entry, NULL, regs->gprs[15]);
 }
 
-/* Perf defintions for PMU event attributes in sysfs */
+/* Perf definitions for PMU event attributes in sysfs */
 ssize_t cpumf_events_sysfs_show(struct device *dev,
 				struct device_attribute *attr, char *page)
 {
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index cedb019..d3f9688 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -327,6 +327,7 @@
 		+ PAGE_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
 	lc->current_task = (unsigned long) init_thread_union.thread_info.task;
 	lc->thread_info = (unsigned long) &init_thread_union;
+	lc->lpp = LPP_MAGIC;
 	lc->machine_flags = S390_lowcore.machine_flags;
 	lc->stfl_fac_list = S390_lowcore.stfl_fac_list;
 	memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
@@ -779,6 +780,7 @@
 		strcpy(elf_platform, "zEC12");
 		break;
 	case 0x2964:
+	case 0x2965:
 		strcpy(elf_platform, "z13");
 		break;
 	}
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 3c65a8e..40a6b4f 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -798,7 +798,7 @@
 	set_cpu_online(smp_processor_id(), true);
 	inc_irq_stat(CPU_RST);
 	local_irq_enable();
-	cpu_startup_entry(CPUHP_ONLINE);
+	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
 }
 
 /* Upping and downing of CPUs */
diff --git a/arch/s390/kernel/stacktrace.c b/arch/s390/kernel/stacktrace.c
index 8f64ebd..44f84b2 100644
--- a/arch/s390/kernel/stacktrace.c
+++ b/arch/s390/kernel/stacktrace.c
@@ -10,78 +10,39 @@
 #include <linux/kallsyms.h>
 #include <linux/module.h>
 
-static unsigned long save_context_stack(struct stack_trace *trace,
-					unsigned long sp,
-					unsigned long low,
-					unsigned long high,
-					int savesched)
+static int __save_address(void *data, unsigned long address, int nosched)
 {
-	struct stack_frame *sf;
-	struct pt_regs *regs;
-	unsigned long addr;
+	struct stack_trace *trace = data;
 
-	while(1) {
-		if (sp < low || sp > high)
-			return sp;
-		sf = (struct stack_frame *)sp;
-		while(1) {
-			addr = sf->gprs[8];
-			if (!trace->skip)
-				trace->entries[trace->nr_entries++] = addr;
-			else
-				trace->skip--;
-			if (trace->nr_entries >= trace->max_entries)
-				return sp;
-			low = sp;
-			sp = sf->back_chain;
-			if (!sp)
-				break;
-			if (sp <= low || sp > high - sizeof(*sf))
-				return sp;
-			sf = (struct stack_frame *)sp;
-		}
-		/* Zero backchain detected, check for interrupt frame. */
-		sp = (unsigned long)(sf + 1);
-		if (sp <= low || sp > high - sizeof(*regs))
-			return sp;
-		regs = (struct pt_regs *)sp;
-		addr = regs->psw.addr;
-		if (savesched || !in_sched_functions(addr)) {
-			if (!trace->skip)
-				trace->entries[trace->nr_entries++] = addr;
-			else
-				trace->skip--;
-		}
-		if (trace->nr_entries >= trace->max_entries)
-			return sp;
-		low = sp;
-		sp = regs->gprs[15];
+	if (nosched && in_sched_functions(address))
+		return 0;
+	if (trace->skip > 0) {
+		trace->skip--;
+		return 0;
 	}
+	if (trace->nr_entries < trace->max_entries) {
+		trace->entries[trace->nr_entries++] = address;
+		return 0;
+	}
+	return 1;
 }
 
-static void __save_stack_trace(struct stack_trace *trace, unsigned long sp)
+static int save_address(void *data, unsigned long address)
 {
-	unsigned long new_sp, frame_size;
+	return __save_address(data, address, 0);
+}
 
-	frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
-	new_sp = save_context_stack(trace, sp,
-			S390_lowcore.panic_stack + frame_size - PAGE_SIZE,
-			S390_lowcore.panic_stack + frame_size, 1);
-	new_sp = save_context_stack(trace, new_sp,
-			S390_lowcore.async_stack + frame_size - ASYNC_SIZE,
-			S390_lowcore.async_stack + frame_size, 1);
-	save_context_stack(trace, new_sp,
-			   S390_lowcore.thread_info,
-			   S390_lowcore.thread_info + THREAD_SIZE, 1);
+static int save_address_nosched(void *data, unsigned long address)
+{
+	return __save_address(data, address, 1);
 }
 
 void save_stack_trace(struct stack_trace *trace)
 {
-	register unsigned long r15 asm ("15");
 	unsigned long sp;
 
-	sp = r15;
-	__save_stack_trace(trace, sp);
+	sp = current_stack_pointer();
+	dump_trace(save_address, trace, NULL, sp);
 	if (trace->nr_entries < trace->max_entries)
 		trace->entries[trace->nr_entries++] = ULONG_MAX;
 }
@@ -89,16 +50,12 @@
 
 void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
 {
-	unsigned long sp, low, high;
+	unsigned long sp;
 
 	sp = tsk->thread.ksp;
-	if (tsk == current) {
-		/* Get current stack pointer. */
-		asm volatile("la %0,0(15)" : "=a" (sp));
-	}
-	low = (unsigned long) task_stack_page(tsk);
-	high = (unsigned long) task_pt_regs(tsk);
-	save_context_stack(trace, sp, low, high, 0);
+	if (tsk == current)
+		sp = current_stack_pointer();
+	dump_trace(save_address_nosched, trace, tsk, sp);
 	if (trace->nr_entries < trace->max_entries)
 		trace->entries[trace->nr_entries++] = ULONG_MAX;
 }
@@ -109,7 +66,7 @@
 	unsigned long sp;
 
 	sp = kernel_stack_pointer(regs);
-	__save_stack_trace(trace, sp);
+	dump_trace(save_address, trace, NULL, sp);
 	if (trace->nr_entries < trace->max_entries)
 		trace->entries[trace->nr_entries++] = ULONG_MAX;
 }
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 99f84ac31..c4e5f18 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -499,8 +499,7 @@
 		if (etr_port0_online && etr_port1_online)
 			set_bit(CLOCK_SYNC_ETR, &clock_sync_flags);
 	} else if (etr_port0_online || etr_port1_online) {
-		pr_warning("The real or virtual hardware system does "
-			   "not provide an ETR interface\n");
+		pr_warn("The real or virtual hardware system does not provide an ETR interface\n");
 		etr_port0_online = etr_port1_online = 0;
 	}
 }
@@ -1464,8 +1463,7 @@
 	if (rc == 0)
 		set_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags);
 	else if (stp_online) {
-		pr_warning("The real or virtual hardware system does "
-			   "not provide an STP interface\n");
+		pr_warn("The real or virtual hardware system does not provide an STP interface\n");
 		free_page((unsigned long) stp_page);
 		stp_page = NULL;
 		stp_online = 0;
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index 017eb03d..dd97a3e 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -22,8 +22,6 @@
 #include <asm/fpu/api.h>
 #include "entry.h"
 
-int show_unhandled_signals = 1;
-
 static inline void __user *get_trap_ip(struct pt_regs *regs)
 {
 	unsigned long address;
@@ -35,21 +33,6 @@
 	return (void __user *) (address - (regs->int_code >> 16));
 }
 
-static inline void report_user_fault(struct pt_regs *regs, int signr)
-{
-	if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
-		return;
-	if (!unhandled_signal(current, signr))
-		return;
-	if (!printk_ratelimit())
-		return;
-	printk("User process fault: interruption code %04x ilc:%d ",
-	       regs->int_code & 0xffff, regs->int_code >> 17);
-	print_vma_addr("in ", regs->psw.addr);
-	printk("\n");
-	show_regs(regs);
-}
-
 int is_valid_bugaddr(unsigned long addr)
 {
 	return 1;
@@ -65,7 +48,7 @@
 		info.si_code = si_code;
 		info.si_addr = get_trap_ip(regs);
 		force_sig_info(si_signo, &info, current);
-		report_user_fault(regs, si_signo);
+		report_user_fault(regs, si_signo, 0);
         } else {
                 const struct exception_table_entry *fixup;
 		fixup = search_exception_tables(regs->psw.addr);
@@ -111,7 +94,7 @@
 void default_trap_handler(struct pt_regs *regs)
 {
 	if (user_mode(regs)) {
-		report_user_fault(regs, SIGSEGV);
+		report_user_fault(regs, SIGSEGV, 0);
 		do_exit(SIGSEGV);
 	} else
 		die(regs, "Unknown program exception");
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c
index 05f7de9..1ea4095 100644
--- a/arch/s390/kvm/diag.c
+++ b/arch/s390/kvm/diag.c
@@ -14,6 +14,7 @@
 #include <linux/kvm.h>
 #include <linux/kvm_host.h>
 #include <asm/pgalloc.h>
+#include <asm/gmap.h>
 #include <asm/virtio-ccw.h>
 #include "kvm-s390.h"
 #include "trace.h"
diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c
index d30db40..66938d2 100644
--- a/arch/s390/kvm/gaccess.c
+++ b/arch/s390/kvm/gaccess.c
@@ -373,7 +373,7 @@
 }
 
 static int ar_translation(struct kvm_vcpu *vcpu, union asce *asce, ar_t ar,
-			  int write)
+			  enum gacc_mode mode)
 {
 	union alet alet;
 	struct ale ale;
@@ -454,7 +454,7 @@
 		}
 	}
 
-	if (ale.fo == 1 && write)
+	if (ale.fo == 1 && mode == GACC_STORE)
 		return PGM_PROTECTION;
 
 	asce->val = aste.asce;
@@ -477,25 +477,28 @@
 };
 
 static int get_vcpu_asce(struct kvm_vcpu *vcpu, union asce *asce,
-			 ar_t ar, int write)
+			 ar_t ar, enum gacc_mode mode)
 {
 	int rc;
-	psw_t *psw = &vcpu->arch.sie_block->gpsw;
+	struct psw_bits psw = psw_bits(vcpu->arch.sie_block->gpsw);
 	struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm;
 	struct trans_exc_code_bits *tec_bits;
 
 	memset(pgm, 0, sizeof(*pgm));
 	tec_bits = (struct trans_exc_code_bits *)&pgm->trans_exc_code;
-	tec_bits->fsi = write ? FSI_STORE : FSI_FETCH;
-	tec_bits->as = psw_bits(*psw).as;
+	tec_bits->fsi = mode == GACC_STORE ? FSI_STORE : FSI_FETCH;
+	tec_bits->as = psw.as;
 
-	if (!psw_bits(*psw).t) {
+	if (!psw.t) {
 		asce->val = 0;
 		asce->r = 1;
 		return 0;
 	}
 
-	switch (psw_bits(vcpu->arch.sie_block->gpsw).as) {
+	if (mode == GACC_IFETCH)
+		psw.as = psw.as == PSW_AS_HOME ? PSW_AS_HOME : PSW_AS_PRIMARY;
+
+	switch (psw.as) {
 	case PSW_AS_PRIMARY:
 		asce->val = vcpu->arch.sie_block->gcr[1];
 		return 0;
@@ -506,7 +509,7 @@
 		asce->val = vcpu->arch.sie_block->gcr[13];
 		return 0;
 	case PSW_AS_ACCREG:
-		rc = ar_translation(vcpu, asce, ar, write);
+		rc = ar_translation(vcpu, asce, ar, mode);
 		switch (rc) {
 		case PGM_ALEN_TRANSLATION:
 		case PGM_ALE_SEQUENCE:
@@ -538,7 +541,7 @@
  * @gva: guest virtual address
  * @gpa: points to where guest physical (absolute) address should be stored
  * @asce: effective asce
- * @write: indicates if access is a write access
+ * @mode: indicates the access mode to be used
  *
  * Translate a guest virtual address into a guest absolute address by means
  * of dynamic address translation as specified by the architecture.
@@ -554,7 +557,7 @@
  */
 static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
 				     unsigned long *gpa, const union asce asce,
-				     int write)
+				     enum gacc_mode mode)
 {
 	union vaddress vaddr = {.addr = gva};
 	union raddress raddr = {.addr = gva};
@@ -699,7 +702,7 @@
 real_address:
 	raddr.addr = kvm_s390_real_to_abs(vcpu, raddr.addr);
 absolute_address:
-	if (write && dat_protection)
+	if (mode == GACC_STORE && dat_protection)
 		return PGM_PROTECTION;
 	if (kvm_is_error_gpa(vcpu->kvm, raddr.addr))
 		return PGM_ADDRESSING;
@@ -728,7 +731,7 @@
 
 static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga,
 			    unsigned long *pages, unsigned long nr_pages,
-			    const union asce asce, int write)
+			    const union asce asce, enum gacc_mode mode)
 {
 	struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm;
 	psw_t *psw = &vcpu->arch.sie_block->gpsw;
@@ -740,13 +743,13 @@
 	while (nr_pages) {
 		ga = kvm_s390_logical_to_effective(vcpu, ga);
 		tec_bits->addr = ga >> PAGE_SHIFT;
-		if (write && lap_enabled && is_low_address(ga)) {
+		if (mode == GACC_STORE && lap_enabled && is_low_address(ga)) {
 			pgm->code = PGM_PROTECTION;
 			return pgm->code;
 		}
 		ga &= PAGE_MASK;
 		if (psw_bits(*psw).t) {
-			rc = guest_translate(vcpu, ga, pages, asce, write);
+			rc = guest_translate(vcpu, ga, pages, asce, mode);
 			if (rc < 0)
 				return rc;
 			if (rc == PGM_PROTECTION)
@@ -768,7 +771,7 @@
 }
 
 int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
-		 unsigned long len, int write)
+		 unsigned long len, enum gacc_mode mode)
 {
 	psw_t *psw = &vcpu->arch.sie_block->gpsw;
 	unsigned long _len, nr_pages, gpa, idx;
@@ -780,7 +783,7 @@
 
 	if (!len)
 		return 0;
-	rc = get_vcpu_asce(vcpu, &asce, ar, write);
+	rc = get_vcpu_asce(vcpu, &asce, ar, mode);
 	if (rc)
 		return rc;
 	nr_pages = (((ga & ~PAGE_MASK) + len - 1) >> PAGE_SHIFT) + 1;
@@ -792,11 +795,11 @@
 	need_ipte_lock = psw_bits(*psw).t && !asce.r;
 	if (need_ipte_lock)
 		ipte_lock(vcpu);
-	rc = guest_page_range(vcpu, ga, pages, nr_pages, asce, write);
+	rc = guest_page_range(vcpu, ga, pages, nr_pages, asce, mode);
 	for (idx = 0; idx < nr_pages && !rc; idx++) {
 		gpa = *(pages + idx) + (ga & ~PAGE_MASK);
 		_len = min(PAGE_SIZE - (gpa & ~PAGE_MASK), len);
-		if (write)
+		if (mode == GACC_STORE)
 			rc = kvm_write_guest(vcpu->kvm, gpa, data, _len);
 		else
 			rc = kvm_read_guest(vcpu->kvm, gpa, data, _len);
@@ -812,7 +815,7 @@
 }
 
 int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
-		      void *data, unsigned long len, int write)
+		      void *data, unsigned long len, enum gacc_mode mode)
 {
 	unsigned long _len, gpa;
 	int rc = 0;
@@ -820,7 +823,7 @@
 	while (len && !rc) {
 		gpa = kvm_s390_real_to_abs(vcpu, gra);
 		_len = min(PAGE_SIZE - (gpa & ~PAGE_MASK), len);
-		if (write)
+		if (mode)
 			rc = write_guest_abs(vcpu, gpa, data, _len);
 		else
 			rc = read_guest_abs(vcpu, gpa, data, _len);
@@ -841,7 +844,7 @@
  * has to take care of this.
  */
 int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar,
-			    unsigned long *gpa, int write)
+			    unsigned long *gpa, enum gacc_mode mode)
 {
 	struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm;
 	psw_t *psw = &vcpu->arch.sie_block->gpsw;
@@ -851,19 +854,19 @@
 
 	gva = kvm_s390_logical_to_effective(vcpu, gva);
 	tec = (struct trans_exc_code_bits *)&pgm->trans_exc_code;
-	rc = get_vcpu_asce(vcpu, &asce, ar, write);
+	rc = get_vcpu_asce(vcpu, &asce, ar, mode);
 	tec->addr = gva >> PAGE_SHIFT;
 	if (rc)
 		return rc;
 	if (is_low_address(gva) && low_address_protection_enabled(vcpu, asce)) {
-		if (write) {
+		if (mode == GACC_STORE) {
 			rc = pgm->code = PGM_PROTECTION;
 			return rc;
 		}
 	}
 
 	if (psw_bits(*psw).t && !asce.r) {	/* Use DAT? */
-		rc = guest_translate(vcpu, gva, gpa, asce, write);
+		rc = guest_translate(vcpu, gva, gpa, asce, mode);
 		if (rc > 0) {
 			if (rc == PGM_PROTECTION)
 				tec->b61 = 1;
@@ -883,7 +886,7 @@
  * check_gva_range - test a range of guest virtual addresses for accessibility
  */
 int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar,
-		    unsigned long length, int is_write)
+		    unsigned long length, enum gacc_mode mode)
 {
 	unsigned long gpa;
 	unsigned long currlen;
@@ -892,7 +895,7 @@
 	ipte_lock(vcpu);
 	while (length > 0 && !rc) {
 		currlen = min(length, PAGE_SIZE - (gva % PAGE_SIZE));
-		rc = guest_translate_address(vcpu, gva, ar, &gpa, is_write);
+		rc = guest_translate_address(vcpu, gva, ar, &gpa, mode);
 		gva += currlen;
 		length -= currlen;
 	}
diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h
index ef03726..df0a79d 100644
--- a/arch/s390/kvm/gaccess.h
+++ b/arch/s390/kvm/gaccess.h
@@ -155,16 +155,22 @@
 	return kvm_read_guest(vcpu->kvm, gpa, data, len);
 }
 
+enum gacc_mode {
+	GACC_FETCH,
+	GACC_STORE,
+	GACC_IFETCH,
+};
+
 int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva,
-			    ar_t ar, unsigned long *gpa, int write);
+			    ar_t ar, unsigned long *gpa, enum gacc_mode mode);
 int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar,
-		    unsigned long length, int is_write);
+		    unsigned long length, enum gacc_mode mode);
 
 int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
-		 unsigned long len, int write);
+		 unsigned long len, enum gacc_mode mode);
 
 int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
-		      void *data, unsigned long len, int write);
+		      void *data, unsigned long len, enum gacc_mode mode);
 
 /**
  * write_guest - copy data from kernel space to guest space
@@ -215,7 +221,7 @@
 int write_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
 		unsigned long len)
 {
-	return access_guest(vcpu, ga, ar, data, len, 1);
+	return access_guest(vcpu, ga, ar, data, len, GACC_STORE);
 }
 
 /**
@@ -235,7 +241,27 @@
 int read_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
 	       unsigned long len)
 {
-	return access_guest(vcpu, ga, ar, data, len, 0);
+	return access_guest(vcpu, ga, ar, data, len, GACC_FETCH);
+}
+
+/**
+ * read_guest_instr - copy instruction data from guest space to kernel space
+ * @vcpu: virtual cpu
+ * @data: destination address in kernel space
+ * @len: number of bytes to copy
+ *
+ * Copy @len bytes from the current psw address (guest space) to @data (kernel
+ * space).
+ *
+ * The behaviour of read_guest_instr is identical to read_guest, except that
+ * instruction data will be read from primary space when in home-space or
+ * address-space mode.
+ */
+static inline __must_check
+int read_guest_instr(struct kvm_vcpu *vcpu, void *data, unsigned long len)
+{
+	return access_guest(vcpu, vcpu->arch.sie_block->gpsw.addr, 0, data, len,
+			    GACC_IFETCH);
 }
 
 /**
diff --git a/arch/s390/kvm/guestdbg.c b/arch/s390/kvm/guestdbg.c
index d697312..e8c6843 100644
--- a/arch/s390/kvm/guestdbg.c
+++ b/arch/s390/kvm/guestdbg.c
@@ -17,7 +17,7 @@
 /*
  * Extends the address range given by *start and *stop to include the address
  * range starting with estart and the length len. Takes care of overflowing
- * intervals and tries to minimize the overall intervall size.
+ * intervals and tries to minimize the overall interval size.
  */
 static void extend_address_range(u64 *start, u64 *stop, u64 estart, int len)
 {
@@ -72,7 +72,7 @@
 		return;
 
 	/*
-	 * If the guest is not interrested in branching events, we can savely
+	 * If the guest is not interested in branching events, we can safely
 	 * limit them to the PER address range.
 	 */
 	if (!(*cr9 & PER_EVENT_BRANCH))
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
index d53c107..2e6b54e 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/intercept.c
@@ -38,17 +38,32 @@
 	[0xeb] = kvm_s390_handle_eb,
 };
 
-void kvm_s390_rewind_psw(struct kvm_vcpu *vcpu, int ilc)
+u8 kvm_s390_get_ilen(struct kvm_vcpu *vcpu)
 {
 	struct kvm_s390_sie_block *sie_block = vcpu->arch.sie_block;
+	u8 ilen = 0;
 
-	/* Use the length of the EXECUTE instruction if necessary */
-	if (sie_block->icptstatus & 1) {
-		ilc = (sie_block->icptstatus >> 4) & 0x6;
-		if (!ilc)
-			ilc = 4;
+	switch (vcpu->arch.sie_block->icptcode) {
+	case ICPT_INST:
+	case ICPT_INSTPROGI:
+	case ICPT_OPEREXC:
+	case ICPT_PARTEXEC:
+	case ICPT_IOINST:
+		/* instruction only stored for these icptcodes */
+		ilen = insn_length(vcpu->arch.sie_block->ipa >> 8);
+		/* Use the length of the EXECUTE instruction if necessary */
+		if (sie_block->icptstatus & 1) {
+			ilen = (sie_block->icptstatus >> 4) & 0x6;
+			if (!ilen)
+				ilen = 4;
+		}
+		break;
+	case ICPT_PROGI:
+		/* bit 1+2 of pgmilc are the ilc, so we directly get ilen */
+		ilen = vcpu->arch.sie_block->pgmilc & 0x6;
+		break;
 	}
-	sie_block->gpsw.addr = __rewind_psw(sie_block->gpsw, ilc);
+	return ilen;
 }
 
 static int handle_noop(struct kvm_vcpu *vcpu)
@@ -121,11 +136,13 @@
 	return -EOPNOTSUPP;
 }
 
-static void __extract_prog_irq(struct kvm_vcpu *vcpu,
-			       struct kvm_s390_pgm_info *pgm_info)
+static int inject_prog_on_prog_intercept(struct kvm_vcpu *vcpu)
 {
-	memset(pgm_info, 0, sizeof(struct kvm_s390_pgm_info));
-	pgm_info->code = vcpu->arch.sie_block->iprcc;
+	struct kvm_s390_pgm_info pgm_info = {
+		.code = vcpu->arch.sie_block->iprcc,
+		/* the PSW has already been rewound */
+		.flags = KVM_S390_PGM_FLAGS_NO_REWIND,
+	};
 
 	switch (vcpu->arch.sie_block->iprcc & ~PGM_PER) {
 	case PGM_AFX_TRANSLATION:
@@ -138,7 +155,7 @@
 	case PGM_PRIMARY_AUTHORITY:
 	case PGM_SECONDARY_AUTHORITY:
 	case PGM_SPACE_SWITCH:
-		pgm_info->trans_exc_code = vcpu->arch.sie_block->tecmc;
+		pgm_info.trans_exc_code = vcpu->arch.sie_block->tecmc;
 		break;
 	case PGM_ALEN_TRANSLATION:
 	case PGM_ALE_SEQUENCE:
@@ -146,7 +163,7 @@
 	case PGM_ASTE_SEQUENCE:
 	case PGM_ASTE_VALIDITY:
 	case PGM_EXTENDED_AUTHORITY:
-		pgm_info->exc_access_id = vcpu->arch.sie_block->eai;
+		pgm_info.exc_access_id = vcpu->arch.sie_block->eai;
 		break;
 	case PGM_ASCE_TYPE:
 	case PGM_PAGE_TRANSLATION:
@@ -154,32 +171,33 @@
 	case PGM_REGION_SECOND_TRANS:
 	case PGM_REGION_THIRD_TRANS:
 	case PGM_SEGMENT_TRANSLATION:
-		pgm_info->trans_exc_code = vcpu->arch.sie_block->tecmc;
-		pgm_info->exc_access_id  = vcpu->arch.sie_block->eai;
-		pgm_info->op_access_id  = vcpu->arch.sie_block->oai;
+		pgm_info.trans_exc_code = vcpu->arch.sie_block->tecmc;
+		pgm_info.exc_access_id  = vcpu->arch.sie_block->eai;
+		pgm_info.op_access_id  = vcpu->arch.sie_block->oai;
 		break;
 	case PGM_MONITOR:
-		pgm_info->mon_class_nr = vcpu->arch.sie_block->mcn;
-		pgm_info->mon_code = vcpu->arch.sie_block->tecmc;
+		pgm_info.mon_class_nr = vcpu->arch.sie_block->mcn;
+		pgm_info.mon_code = vcpu->arch.sie_block->tecmc;
 		break;
 	case PGM_VECTOR_PROCESSING:
 	case PGM_DATA:
-		pgm_info->data_exc_code = vcpu->arch.sie_block->dxc;
+		pgm_info.data_exc_code = vcpu->arch.sie_block->dxc;
 		break;
 	case PGM_PROTECTION:
-		pgm_info->trans_exc_code = vcpu->arch.sie_block->tecmc;
-		pgm_info->exc_access_id  = vcpu->arch.sie_block->eai;
+		pgm_info.trans_exc_code = vcpu->arch.sie_block->tecmc;
+		pgm_info.exc_access_id  = vcpu->arch.sie_block->eai;
 		break;
 	default:
 		break;
 	}
 
 	if (vcpu->arch.sie_block->iprcc & PGM_PER) {
-		pgm_info->per_code = vcpu->arch.sie_block->perc;
-		pgm_info->per_atmid = vcpu->arch.sie_block->peratmid;
-		pgm_info->per_address = vcpu->arch.sie_block->peraddr;
-		pgm_info->per_access_id = vcpu->arch.sie_block->peraid;
+		pgm_info.per_code = vcpu->arch.sie_block->perc;
+		pgm_info.per_atmid = vcpu->arch.sie_block->peratmid;
+		pgm_info.per_address = vcpu->arch.sie_block->peraddr;
+		pgm_info.per_access_id = vcpu->arch.sie_block->peraid;
 	}
+	return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
 }
 
 /*
@@ -208,7 +226,6 @@
 
 static int handle_prog(struct kvm_vcpu *vcpu)
 {
-	struct kvm_s390_pgm_info pgm_info;
 	psw_t psw;
 	int rc;
 
@@ -234,8 +251,7 @@
 	if (rc)
 		return rc;
 
-	__extract_prog_irq(vcpu, &pgm_info);
-	return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
+	return inject_prog_on_prog_intercept(vcpu);
 }
 
 /**
@@ -302,7 +318,7 @@
 
 	/* Make sure that the source is paged-in */
 	rc = guest_translate_address(vcpu, vcpu->run->s.regs.gprs[reg2],
-				     reg2, &srcaddr, 0);
+				     reg2, &srcaddr, GACC_FETCH);
 	if (rc)
 		return kvm_s390_inject_prog_cond(vcpu, rc);
 	rc = kvm_arch_fault_in_page(vcpu, srcaddr, 0);
@@ -311,14 +327,14 @@
 
 	/* Make sure that the destination is paged-in */
 	rc = guest_translate_address(vcpu, vcpu->run->s.regs.gprs[reg1],
-				     reg1, &dstaddr, 1);
+				     reg1, &dstaddr, GACC_STORE);
 	if (rc)
 		return kvm_s390_inject_prog_cond(vcpu, rc);
 	rc = kvm_arch_fault_in_page(vcpu, dstaddr, 1);
 	if (rc != 0)
 		return rc;
 
-	kvm_s390_rewind_psw(vcpu, 4);
+	kvm_s390_retry_instr(vcpu);
 
 	return 0;
 }
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 9ffc732..84efc2b 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -23,6 +23,7 @@
 #include <asm/uaccess.h>
 #include <asm/sclp.h>
 #include <asm/isc.h>
+#include <asm/gmap.h>
 #include "kvm-s390.h"
 #include "gaccess.h"
 #include "trace-s390.h"
@@ -182,8 +183,9 @@
 
 static int cpu_timer_irq_pending(struct kvm_vcpu *vcpu)
 {
-	return (vcpu->arch.sie_block->cputm >> 63) &&
-	       cpu_timer_interrupts_enabled(vcpu);
+	if (!cpu_timer_interrupts_enabled(vcpu))
+		return 0;
+	return kvm_s390_get_cpu_timer(vcpu) >> 63;
 }
 
 static inline int is_ioirq(unsigned long irq_type)
@@ -335,23 +337,6 @@
 	set_intercept_indicators_stop(vcpu);
 }
 
-static u16 get_ilc(struct kvm_vcpu *vcpu)
-{
-	switch (vcpu->arch.sie_block->icptcode) {
-	case ICPT_INST:
-	case ICPT_INSTPROGI:
-	case ICPT_OPEREXC:
-	case ICPT_PARTEXEC:
-	case ICPT_IOINST:
-		/* last instruction only stored for these icptcodes */
-		return insn_length(vcpu->arch.sie_block->ipa >> 8);
-	case ICPT_PROGI:
-		return vcpu->arch.sie_block->pgmilc;
-	default:
-		return 0;
-	}
-}
-
 static int __must_check __deliver_cpu_timer(struct kvm_vcpu *vcpu)
 {
 	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
@@ -588,7 +573,7 @@
 	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
 	struct kvm_s390_pgm_info pgm_info;
 	int rc = 0, nullifying = false;
-	u16 ilc = get_ilc(vcpu);
+	u16 ilen;
 
 	spin_lock(&li->lock);
 	pgm_info = li->irq.pgm;
@@ -596,8 +581,9 @@
 	memset(&li->irq.pgm, 0, sizeof(pgm_info));
 	spin_unlock(&li->lock);
 
-	VCPU_EVENT(vcpu, 3, "deliver: program irq code 0x%x, ilc:%d",
-		   pgm_info.code, ilc);
+	ilen = pgm_info.flags & KVM_S390_PGM_FLAGS_ILC_MASK;
+	VCPU_EVENT(vcpu, 3, "deliver: program irq code 0x%x, ilen:%d",
+		   pgm_info.code, ilen);
 	vcpu->stat.deliver_program_int++;
 	trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
 					 pgm_info.code, 0);
@@ -681,10 +667,11 @@
 				   (u8 *) __LC_PER_ACCESS_ID);
 	}
 
-	if (nullifying && vcpu->arch.sie_block->icptcode == ICPT_INST)
-		kvm_s390_rewind_psw(vcpu, ilc);
+	if (nullifying && !(pgm_info.flags & KVM_S390_PGM_FLAGS_NO_REWIND))
+		kvm_s390_rewind_psw(vcpu, ilen);
 
-	rc |= put_guest_lc(vcpu, ilc, (u16 *) __LC_PGM_ILC);
+	/* bit 1+2 of the target are the ilc, so we can directly use ilen */
+	rc |= put_guest_lc(vcpu, ilen, (u16 *) __LC_PGM_ILC);
 	rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->gbea,
 				 (u64 *) __LC_LAST_BREAK);
 	rc |= put_guest_lc(vcpu, pgm_info.code,
@@ -923,9 +910,35 @@
 	return ckc_irq_pending(vcpu) || cpu_timer_irq_pending(vcpu);
 }
 
+static u64 __calculate_sltime(struct kvm_vcpu *vcpu)
+{
+	u64 now, cputm, sltime = 0;
+
+	if (ckc_interrupts_enabled(vcpu)) {
+		now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
+		sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
+		/* already expired or overflow? */
+		if (!sltime || vcpu->arch.sie_block->ckc <= now)
+			return 0;
+		if (cpu_timer_interrupts_enabled(vcpu)) {
+			cputm = kvm_s390_get_cpu_timer(vcpu);
+			/* already expired? */
+			if (cputm >> 63)
+				return 0;
+			return min(sltime, tod_to_ns(cputm));
+		}
+	} else if (cpu_timer_interrupts_enabled(vcpu)) {
+		sltime = kvm_s390_get_cpu_timer(vcpu);
+		/* already expired? */
+		if (sltime >> 63)
+			return 0;
+	}
+	return sltime;
+}
+
 int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
 {
-	u64 now, sltime;
+	u64 sltime;
 
 	vcpu->stat.exit_wait_state++;
 
@@ -938,22 +951,20 @@
 		return -EOPNOTSUPP; /* disabled wait */
 	}
 
-	if (!ckc_interrupts_enabled(vcpu)) {
+	if (!ckc_interrupts_enabled(vcpu) &&
+	    !cpu_timer_interrupts_enabled(vcpu)) {
 		VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer");
 		__set_cpu_idle(vcpu);
 		goto no_timer;
 	}
 
-	now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
-	sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
-
-	/* underflow */
-	if (vcpu->arch.sie_block->ckc < now)
+	sltime = __calculate_sltime(vcpu);
+	if (!sltime)
 		return 0;
 
 	__set_cpu_idle(vcpu);
 	hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL);
-	VCPU_EVENT(vcpu, 4, "enabled wait via clock comparator: %llu ns", sltime);
+	VCPU_EVENT(vcpu, 4, "enabled wait: %llu ns", sltime);
 no_timer:
 	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
 	kvm_vcpu_block(vcpu);
@@ -980,18 +991,16 @@
 enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer)
 {
 	struct kvm_vcpu *vcpu;
-	u64 now, sltime;
+	u64 sltime;
 
 	vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer);
-	now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
-	sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
+	sltime = __calculate_sltime(vcpu);
 
 	/*
 	 * If the monotonic clock runs faster than the tod clock we might be
 	 * woken up too early and have to go back to sleep to avoid deadlocks.
 	 */
-	if (vcpu->arch.sie_block->ckc > now &&
-	    hrtimer_forward_now(timer, ns_to_ktime(sltime)))
+	if (sltime && hrtimer_forward_now(timer, ns_to_ktime(sltime)))
 		return HRTIMER_RESTART;
 	kvm_s390_vcpu_wakeup(vcpu);
 	return HRTIMER_NORESTART;
@@ -1059,8 +1068,16 @@
 	trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
 				   irq->u.pgm.code, 0);
 
+	if (!(irq->u.pgm.flags & KVM_S390_PGM_FLAGS_ILC_VALID)) {
+		/* auto detection if no valid ILC was given */
+		irq->u.pgm.flags &= ~KVM_S390_PGM_FLAGS_ILC_MASK;
+		irq->u.pgm.flags |= kvm_s390_get_ilen(vcpu);
+		irq->u.pgm.flags |= KVM_S390_PGM_FLAGS_ILC_VALID;
+	}
+
 	if (irq->u.pgm.code == PGM_PER) {
 		li->irq.pgm.code |= PGM_PER;
+		li->irq.pgm.flags = irq->u.pgm.flags;
 		/* only modify PER related information */
 		li->irq.pgm.per_address = irq->u.pgm.per_address;
 		li->irq.pgm.per_code = irq->u.pgm.per_code;
@@ -1069,6 +1086,7 @@
 	} else if (!(irq->u.pgm.code & PGM_PER)) {
 		li->irq.pgm.code = (li->irq.pgm.code & PGM_PER) |
 				   irq->u.pgm.code;
+		li->irq.pgm.flags = irq->u.pgm.flags;
 		/* only modify non-PER information */
 		li->irq.pgm.trans_exc_code = irq->u.pgm.trans_exc_code;
 		li->irq.pgm.mon_code = irq->u.pgm.mon_code;
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 03dfe9c..668c087 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -30,6 +30,7 @@
 #include <asm/lowcore.h>
 #include <asm/etr.h>
 #include <asm/pgtable.h>
+#include <asm/gmap.h>
 #include <asm/nmi.h>
 #include <asm/switch_to.h>
 #include <asm/isc.h>
@@ -158,6 +159,8 @@
 		kvm->arch.epoch -= *delta;
 		kvm_for_each_vcpu(i, vcpu, kvm) {
 			vcpu->arch.sie_block->epoch -= *delta;
+			if (vcpu->arch.cputm_enabled)
+				vcpu->arch.cputm_start += *delta;
 		}
 	}
 	return NOTIFY_OK;
@@ -274,16 +277,17 @@
 	unsigned long address;
 	struct gmap *gmap = kvm->arch.gmap;
 
-	down_read(&gmap->mm->mmap_sem);
 	/* Loop over all guest pages */
 	last_gfn = memslot->base_gfn + memslot->npages;
 	for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
 		address = gfn_to_hva_memslot(memslot, cur_gfn);
 
-		if (gmap_test_and_clear_dirty(address, gmap))
+		if (test_and_clear_guest_dirty(gmap->mm, address))
 			mark_page_dirty(kvm, cur_gfn);
+		if (fatal_signal_pending(current))
+			return;
+		cond_resched();
 	}
-	up_read(&gmap->mm->mmap_sem);
 }
 
 /* Section: vm related */
@@ -352,8 +356,8 @@
 		if (atomic_read(&kvm->online_vcpus)) {
 			r = -EBUSY;
 		} else if (MACHINE_HAS_VX) {
-			set_kvm_facility(kvm->arch.model.fac->mask, 129);
-			set_kvm_facility(kvm->arch.model.fac->list, 129);
+			set_kvm_facility(kvm->arch.model.fac_mask, 129);
+			set_kvm_facility(kvm->arch.model.fac_list, 129);
 			r = 0;
 		} else
 			r = -EINVAL;
@@ -367,8 +371,8 @@
 		if (atomic_read(&kvm->online_vcpus)) {
 			r = -EBUSY;
 		} else if (test_facility(64)) {
-			set_kvm_facility(kvm->arch.model.fac->mask, 64);
-			set_kvm_facility(kvm->arch.model.fac->list, 64);
+			set_kvm_facility(kvm->arch.model.fac_mask, 64);
+			set_kvm_facility(kvm->arch.model.fac_list, 64);
 			r = 0;
 		}
 		mutex_unlock(&kvm->lock);
@@ -651,7 +655,7 @@
 		memcpy(&kvm->arch.model.cpu_id, &proc->cpuid,
 		       sizeof(struct cpuid));
 		kvm->arch.model.ibc = proc->ibc;
-		memcpy(kvm->arch.model.fac->list, proc->fac_list,
+		memcpy(kvm->arch.model.fac_list, proc->fac_list,
 		       S390_ARCH_FAC_LIST_SIZE_BYTE);
 	} else
 		ret = -EFAULT;
@@ -685,7 +689,8 @@
 	}
 	memcpy(&proc->cpuid, &kvm->arch.model.cpu_id, sizeof(struct cpuid));
 	proc->ibc = kvm->arch.model.ibc;
-	memcpy(&proc->fac_list, kvm->arch.model.fac->list, S390_ARCH_FAC_LIST_SIZE_BYTE);
+	memcpy(&proc->fac_list, kvm->arch.model.fac_list,
+	       S390_ARCH_FAC_LIST_SIZE_BYTE);
 	if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
 		ret = -EFAULT;
 	kfree(proc);
@@ -705,7 +710,7 @@
 	}
 	get_cpu_id((struct cpuid *) &mach->cpuid);
 	mach->ibc = sclp.ibc;
-	memcpy(&mach->fac_mask, kvm->arch.model.fac->mask,
+	memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
 	memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
@@ -1082,16 +1087,12 @@
 	cpu_id->version = 0xff;
 }
 
-static int kvm_s390_crypto_init(struct kvm *kvm)
+static void kvm_s390_crypto_init(struct kvm *kvm)
 {
 	if (!test_kvm_facility(kvm, 76))
-		return 0;
+		return;
 
-	kvm->arch.crypto.crycb = kzalloc(sizeof(*kvm->arch.crypto.crycb),
-					 GFP_KERNEL | GFP_DMA);
-	if (!kvm->arch.crypto.crycb)
-		return -ENOMEM;
-
+	kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
 	kvm_s390_set_crycb_format(kvm);
 
 	/* Enable AES/DEA protected key functions by default */
@@ -1101,8 +1102,6 @@
 			 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
 	get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
 			 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
-
-	return 0;
 }
 
 static void sca_dispose(struct kvm *kvm)
@@ -1156,37 +1155,30 @@
 	if (!kvm->arch.dbf)
 		goto out_err;
 
-	/*
-	 * The architectural maximum amount of facilities is 16 kbit. To store
-	 * this amount, 2 kbyte of memory is required. Thus we need a full
-	 * page to hold the guest facility list (arch.model.fac->list) and the
-	 * facility mask (arch.model.fac->mask). Its address size has to be
-	 * 31 bits and word aligned.
-	 */
-	kvm->arch.model.fac =
-		(struct kvm_s390_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
-	if (!kvm->arch.model.fac)
+	kvm->arch.sie_page2 =
+	     (struct sie_page2 *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+	if (!kvm->arch.sie_page2)
 		goto out_err;
 
 	/* Populate the facility mask initially. */
-	memcpy(kvm->arch.model.fac->mask, S390_lowcore.stfle_fac_list,
+	memcpy(kvm->arch.model.fac_mask, S390_lowcore.stfle_fac_list,
 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
 	for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
 		if (i < kvm_s390_fac_list_mask_size())
-			kvm->arch.model.fac->mask[i] &= kvm_s390_fac_list_mask[i];
+			kvm->arch.model.fac_mask[i] &= kvm_s390_fac_list_mask[i];
 		else
-			kvm->arch.model.fac->mask[i] = 0UL;
+			kvm->arch.model.fac_mask[i] = 0UL;
 	}
 
 	/* Populate the facility list initially. */
-	memcpy(kvm->arch.model.fac->list, kvm->arch.model.fac->mask,
+	kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
+	memcpy(kvm->arch.model.fac_list, kvm->arch.model.fac_mask,
 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
 
 	kvm_s390_get_cpu_id(&kvm->arch.model.cpu_id);
 	kvm->arch.model.ibc = sclp.ibc & 0x0fff;
 
-	if (kvm_s390_crypto_init(kvm) < 0)
-		goto out_err;
+	kvm_s390_crypto_init(kvm);
 
 	spin_lock_init(&kvm->arch.float_int.lock);
 	for (i = 0; i < FIRQ_LIST_COUNT; i++)
@@ -1222,8 +1214,7 @@
 
 	return 0;
 out_err:
-	kfree(kvm->arch.crypto.crycb);
-	free_page((unsigned long)kvm->arch.model.fac);
+	free_page((unsigned long)kvm->arch.sie_page2);
 	debug_unregister(kvm->arch.dbf);
 	sca_dispose(kvm);
 	KVM_EVENT(3, "creation of vm failed: %d", rc);
@@ -1269,10 +1260,9 @@
 void kvm_arch_destroy_vm(struct kvm *kvm)
 {
 	kvm_free_vcpus(kvm);
-	free_page((unsigned long)kvm->arch.model.fac);
 	sca_dispose(kvm);
 	debug_unregister(kvm->arch.dbf);
-	kfree(kvm->arch.crypto.crycb);
+	free_page((unsigned long)kvm->arch.sie_page2);
 	if (!kvm_is_ucontrol(kvm))
 		gmap_free(kvm->arch.gmap);
 	kvm_s390_destroy_adapters(kvm);
@@ -1414,8 +1404,13 @@
 				    KVM_SYNC_PFAULT;
 	if (test_kvm_facility(vcpu->kvm, 64))
 		vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
-	if (test_kvm_facility(vcpu->kvm, 129))
+	/* fprs can be synchronized via vrs, even if the guest has no vx. With
+	 * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
+	 */
+	if (MACHINE_HAS_VX)
 		vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
+	else
+		vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
 
 	if (kvm_is_ucontrol(vcpu->kvm))
 		return __kvm_ucontrol_vcpu_init(vcpu);
@@ -1423,6 +1418,93 @@
 	return 0;
 }
 
+/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
+static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
+{
+	WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
+	raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
+	vcpu->arch.cputm_start = get_tod_clock_fast();
+	raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
+}
+
+/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
+static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu)
+{
+	WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
+	raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
+	vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
+	vcpu->arch.cputm_start = 0;
+	raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
+}
+
+/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
+static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
+{
+	WARN_ON_ONCE(vcpu->arch.cputm_enabled);
+	vcpu->arch.cputm_enabled = true;
+	__start_cpu_timer_accounting(vcpu);
+}
+
+/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
+static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
+{
+	WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
+	__stop_cpu_timer_accounting(vcpu);
+	vcpu->arch.cputm_enabled = false;
+}
+
+static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
+{
+	preempt_disable(); /* protect from TOD sync and vcpu_load/put */
+	__enable_cpu_timer_accounting(vcpu);
+	preempt_enable();
+}
+
+static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
+{
+	preempt_disable(); /* protect from TOD sync and vcpu_load/put */
+	__disable_cpu_timer_accounting(vcpu);
+	preempt_enable();
+}
+
+/* set the cpu timer - may only be called from the VCPU thread itself */
+void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm)
+{
+	preempt_disable(); /* protect from TOD sync and vcpu_load/put */
+	raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
+	if (vcpu->arch.cputm_enabled)
+		vcpu->arch.cputm_start = get_tod_clock_fast();
+	vcpu->arch.sie_block->cputm = cputm;
+	raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
+	preempt_enable();
+}
+
+/* update and get the cpu timer - can also be called from other VCPU threads */
+__u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
+{
+	unsigned int seq;
+	__u64 value;
+
+	if (unlikely(!vcpu->arch.cputm_enabled))
+		return vcpu->arch.sie_block->cputm;
+
+	preempt_disable(); /* protect from TOD sync and vcpu_load/put */
+	do {
+		seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
+		/*
+		 * If the writer would ever execute a read in the critical
+		 * section, e.g. in irq context, we have a deadlock.
+		 */
+		WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
+		value = vcpu->arch.sie_block->cputm;
+		/* if cputm_start is 0, accounting is being started/stopped */
+		if (likely(vcpu->arch.cputm_start))
+			value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
+	} while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
+	preempt_enable();
+	return value;
+}
+
 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 {
 	/* Save host register state */
@@ -1430,10 +1512,10 @@
 	vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
 	vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
 
-	/* Depending on MACHINE_HAS_VX, data stored to vrs either
-	 * has vector register or floating point register format.
-	 */
-	current->thread.fpu.regs = vcpu->run->s.regs.vrs;
+	if (MACHINE_HAS_VX)
+		current->thread.fpu.regs = vcpu->run->s.regs.vrs;
+	else
+		current->thread.fpu.regs = vcpu->run->s.regs.fprs;
 	current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
 	if (test_fp_ctl(current->thread.fpu.fpc))
 		/* User space provided an invalid FPC, let's clear it */
@@ -1443,10 +1525,16 @@
 	restore_access_regs(vcpu->run->s.regs.acrs);
 	gmap_enable(vcpu->arch.gmap);
 	atomic_or(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
+	if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
+		__start_cpu_timer_accounting(vcpu);
+	vcpu->cpu = cpu;
 }
 
 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
 {
+	vcpu->cpu = -1;
+	if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
+		__stop_cpu_timer_accounting(vcpu);
 	atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
 	gmap_disable(vcpu->arch.gmap);
 
@@ -1468,7 +1556,7 @@
 	vcpu->arch.sie_block->gpsw.mask = 0UL;
 	vcpu->arch.sie_block->gpsw.addr = 0UL;
 	kvm_s390_set_prefix(vcpu, 0);
-	vcpu->arch.sie_block->cputm     = 0UL;
+	kvm_s390_set_cpu_timer(vcpu, 0);
 	vcpu->arch.sie_block->ckc       = 0UL;
 	vcpu->arch.sie_block->todpr     = 0;
 	memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
@@ -1538,7 +1626,8 @@
 
 	vcpu->arch.cpu_id = model->cpu_id;
 	vcpu->arch.sie_block->ibc = model->ibc;
-	vcpu->arch.sie_block->fac = (int) (long) model->fac->list;
+	if (test_kvm_facility(vcpu->kvm, 7))
+		vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list;
 }
 
 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
@@ -1616,6 +1705,7 @@
 	vcpu->arch.local_int.float_int = &kvm->arch.float_int;
 	vcpu->arch.local_int.wq = &vcpu->wq;
 	vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
+	seqcount_init(&vcpu->arch.cputm_seqcount);
 
 	rc = kvm_vcpu_init(vcpu, kvm, id);
 	if (rc)
@@ -1715,7 +1805,7 @@
 			     (u64 __user *)reg->addr);
 		break;
 	case KVM_REG_S390_CPU_TIMER:
-		r = put_user(vcpu->arch.sie_block->cputm,
+		r = put_user(kvm_s390_get_cpu_timer(vcpu),
 			     (u64 __user *)reg->addr);
 		break;
 	case KVM_REG_S390_CLOCK_COMP:
@@ -1753,6 +1843,7 @@
 					   struct kvm_one_reg *reg)
 {
 	int r = -EINVAL;
+	__u64 val;
 
 	switch (reg->id) {
 	case KVM_REG_S390_TODPR:
@@ -1764,8 +1855,9 @@
 			     (u64 __user *)reg->addr);
 		break;
 	case KVM_REG_S390_CPU_TIMER:
-		r = get_user(vcpu->arch.sie_block->cputm,
-			     (u64 __user *)reg->addr);
+		r = get_user(val, (u64 __user *)reg->addr);
+		if (!r)
+			kvm_s390_set_cpu_timer(vcpu, val);
 		break;
 	case KVM_REG_S390_CLOCK_COMP:
 		r = get_user(vcpu->arch.sie_block->ckc,
@@ -2158,8 +2250,10 @@
 
 static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
 {
-	psw_t *psw = &vcpu->arch.sie_block->gpsw;
-	u8 opcode;
+	struct kvm_s390_pgm_info pgm_info = {
+		.code = PGM_ADDRESSING,
+	};
+	u8 opcode, ilen;
 	int rc;
 
 	VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
@@ -2173,12 +2267,21 @@
 	 * to look up the current opcode to get the length of the instruction
 	 * to be able to forward the PSW.
 	 */
-	rc = read_guest(vcpu, psw->addr, 0, &opcode, 1);
-	if (rc)
-		return kvm_s390_inject_prog_cond(vcpu, rc);
-	psw->addr = __rewind_psw(*psw, -insn_length(opcode));
-
-	return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+	rc = read_guest_instr(vcpu, &opcode, 1);
+	ilen = insn_length(opcode);
+	if (rc < 0) {
+		return rc;
+	} else if (rc) {
+		/* Instruction-Fetching Exceptions - we can't detect the ilen.
+		 * Forward by arbitrary ilc, injection will take care of
+		 * nullification if necessary.
+		 */
+		pgm_info = vcpu->arch.pgm;
+		ilen = 4;
+	}
+	pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID;
+	kvm_s390_forward_psw(vcpu, ilen);
+	return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
 }
 
 static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
@@ -2244,10 +2347,12 @@
 		 */
 		local_irq_disable();
 		__kvm_guest_enter();
+		__disable_cpu_timer_accounting(vcpu);
 		local_irq_enable();
 		exit_reason = sie64a(vcpu->arch.sie_block,
 				     vcpu->run->s.regs.gprs);
 		local_irq_disable();
+		__enable_cpu_timer_accounting(vcpu);
 		__kvm_guest_exit();
 		local_irq_enable();
 		vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
@@ -2271,7 +2376,7 @@
 		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
 	}
 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
-		vcpu->arch.sie_block->cputm = kvm_run->s.regs.cputm;
+		kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
 		vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
 		vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
 		vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
@@ -2293,7 +2398,7 @@
 	kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
 	kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
 	memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
-	kvm_run->s.regs.cputm = vcpu->arch.sie_block->cputm;
+	kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
 	kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
 	kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
 	kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
@@ -2325,6 +2430,7 @@
 	}
 
 	sync_regs(vcpu, kvm_run);
+	enable_cpu_timer_accounting(vcpu);
 
 	might_fault();
 	rc = __vcpu_run(vcpu);
@@ -2344,6 +2450,7 @@
 		rc = 0;
 	}
 
+	disable_cpu_timer_accounting(vcpu);
 	store_regs(vcpu, kvm_run);
 
 	if (vcpu->sigset_active)
@@ -2364,7 +2471,7 @@
 	unsigned char archmode = 1;
 	freg_t fprs[NUM_FPRS];
 	unsigned int px;
-	u64 clkcomp;
+	u64 clkcomp, cputm;
 	int rc;
 
 	px = kvm_s390_get_prefix(vcpu);
@@ -2386,7 +2493,7 @@
 				     fprs, 128);
 	} else {
 		rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
-				     vcpu->run->s.regs.vrs, 128);
+				     vcpu->run->s.regs.fprs, 128);
 	}
 	rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
 			      vcpu->run->s.regs.gprs, 128);
@@ -2398,8 +2505,9 @@
 			      &vcpu->run->s.regs.fpc, 4);
 	rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
 			      &vcpu->arch.sie_block->todpr, 4);
+	cputm = kvm_s390_get_cpu_timer(vcpu);
 	rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
-			      &vcpu->arch.sie_block->cputm, 8);
+			      &cputm, 8);
 	clkcomp = vcpu->arch.sie_block->ckc >> 8;
 	rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
 			      &clkcomp, 8);
@@ -2605,7 +2713,8 @@
 	switch (mop->op) {
 	case KVM_S390_MEMOP_LOGICAL_READ:
 		if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
-			r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, false);
+			r = check_gva_range(vcpu, mop->gaddr, mop->ar,
+					    mop->size, GACC_FETCH);
 			break;
 		}
 		r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
@@ -2616,7 +2725,8 @@
 		break;
 	case KVM_S390_MEMOP_LOGICAL_WRITE:
 		if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
-			r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, true);
+			r = check_gva_range(vcpu, mop->gaddr, mop->ar,
+					    mop->size, GACC_STORE);
 			break;
 		}
 		if (copy_from_user(tmpbuf, uaddr, mop->size)) {
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index df1abad..8621ab0 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -19,6 +19,7 @@
 #include <linux/kvm.h>
 #include <linux/kvm_host.h>
 #include <asm/facility.h>
+#include <asm/processor.h>
 
 typedef int (*intercept_handler_t)(struct kvm_vcpu *vcpu);
 
@@ -53,6 +54,11 @@
 	return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED;
 }
 
+static inline int is_vcpu_idle(struct kvm_vcpu *vcpu)
+{
+	return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_WAIT;
+}
+
 static inline int kvm_is_ucontrol(struct kvm *kvm)
 {
 #ifdef CONFIG_KVM_S390_UCONTROL
@@ -154,8 +160,8 @@
 /* test availability of facility in a kvm instance */
 static inline int test_kvm_facility(struct kvm *kvm, unsigned long nr)
 {
-	return __test_facility(nr, kvm->arch.model.fac->mask) &&
-		__test_facility(nr, kvm->arch.model.fac->list);
+	return __test_facility(nr, kvm->arch.model.fac_mask) &&
+		__test_facility(nr, kvm->arch.model.fac_list);
 }
 
 static inline int set_kvm_facility(u64 *fac_list, unsigned long nr)
@@ -212,8 +218,22 @@
 int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked);
 
 /* implemented in intercept.c */
-void kvm_s390_rewind_psw(struct kvm_vcpu *vcpu, int ilc);
+u8 kvm_s390_get_ilen(struct kvm_vcpu *vcpu);
 int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu);
+static inline void kvm_s390_rewind_psw(struct kvm_vcpu *vcpu, int ilen)
+{
+	struct kvm_s390_sie_block *sie_block = vcpu->arch.sie_block;
+
+	sie_block->gpsw.addr = __rewind_psw(sie_block->gpsw, ilen);
+}
+static inline void kvm_s390_forward_psw(struct kvm_vcpu *vcpu, int ilen)
+{
+	kvm_s390_rewind_psw(vcpu, -ilen);
+}
+static inline void kvm_s390_retry_instr(struct kvm_vcpu *vcpu)
+{
+	kvm_s390_rewind_psw(vcpu, kvm_s390_get_ilen(vcpu));
+}
 
 /* implemented in priv.c */
 int is_valid_psw(psw_t *psw);
@@ -248,6 +268,8 @@
 void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu);
 unsigned long kvm_s390_fac_list_mask_size(void);
 extern unsigned long kvm_s390_fac_list_mask[];
+void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm);
+__u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu);
 
 /* implemented in diag.c */
 int kvm_s390_handle_diag(struct kvm_vcpu *vcpu);
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index ed74e86..0a1591d 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -23,6 +23,7 @@
 #include <asm/sysinfo.h>
 #include <asm/pgtable.h>
 #include <asm/pgalloc.h>
+#include <asm/gmap.h>
 #include <asm/io.h>
 #include <asm/ptrace.h>
 #include <asm/compat.h>
@@ -173,7 +174,7 @@
 	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
 		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
 
-	kvm_s390_rewind_psw(vcpu, 4);
+	kvm_s390_retry_instr(vcpu);
 	VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation");
 	return 0;
 }
@@ -184,7 +185,7 @@
 	if (psw_bits(vcpu->arch.sie_block->gpsw).p)
 		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
 	wait_event(vcpu->kvm->arch.ipte_wq, !ipte_lock_held(vcpu));
-	kvm_s390_rewind_psw(vcpu, 4);
+	kvm_s390_retry_instr(vcpu);
 	VCPU_EVENT(vcpu, 4, "%s", "retrying ipte interlock operation");
 	return 0;
 }
@@ -354,7 +355,7 @@
 	 * We need to shift the lower 32 facility bits (bit 0-31) from a u64
 	 * into a u32 memory representation. They will remain bits 0-31.
 	 */
-	fac = *vcpu->kvm->arch.model.fac->list >> 32;
+	fac = *vcpu->kvm->arch.model.fac_list >> 32;
 	rc = write_guest_lc(vcpu, offsetof(struct lowcore, stfl_fac_list),
 			    &fac, sizeof(fac));
 	if (rc)
@@ -759,8 +760,8 @@
 	if (((vcpu->arch.sie_block->ipb & 0xf0000000) >> 28) > 6)
 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
 
-	/* Rewind PSW to repeat the ESSA instruction */
-	kvm_s390_rewind_psw(vcpu, 4);
+	/* Retry the ESSA instruction */
+	kvm_s390_retry_instr(vcpu);
 	vcpu->arch.sie_block->cbrlo &= PAGE_MASK;	/* reset nceo */
 	cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo);
 	down_read(&gmap->mm->mmap_sem);
@@ -981,11 +982,12 @@
 		return -EOPNOTSUPP;
 	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
 		ipte_lock(vcpu);
-	ret = guest_translate_address(vcpu, address1, ar, &gpa, 1);
+	ret = guest_translate_address(vcpu, address1, ar, &gpa, GACC_STORE);
 	if (ret == PGM_PROTECTION) {
 		/* Write protected? Try again with read-only... */
 		cc = 1;
-		ret = guest_translate_address(vcpu, address1, ar, &gpa, 0);
+		ret = guest_translate_address(vcpu, address1, ar, &gpa,
+					      GACC_FETCH);
 	}
 	if (ret) {
 		if (ret == PGM_ADDRESSING || ret == PGM_TRANSLATION_SPEC) {
diff --git a/arch/s390/lib/Makefile b/arch/s390/lib/Makefile
index 0e8fefe..1d1af31 100644
--- a/arch/s390/lib/Makefile
+++ b/arch/s390/lib/Makefile
@@ -3,7 +3,7 @@
 #
 
 lib-y += delay.o string.o uaccess.o find.o
-obj-y += mem.o
+obj-y += mem.o xor.o
 lib-$(CONFIG_SMP) += spinlock.o
 lib-$(CONFIG_KPROBES) += probes.o
 lib-$(CONFIG_UPROBES) += probes.o
diff --git a/arch/s390/lib/xor.c b/arch/s390/lib/xor.c
new file mode 100644
index 0000000..7d94e3e
--- /dev/null
+++ b/arch/s390/lib/xor.c
@@ -0,0 +1,134 @@
+/*
+ * Optimized xor_block operation for RAID4/5
+ *
+ * Copyright IBM Corp. 2016
+ * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/raid/xor.h>
+
+static void xor_xc_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
+{
+	asm volatile(
+		"	larl	1,2f\n"
+		"	aghi	%0,-1\n"
+		"	jm	3f\n"
+		"	srlg	0,%0,8\n"
+		"	ltgr	0,0\n"
+		"	jz	1f\n"
+		"0:	xc	0(256,%1),0(%2)\n"
+		"	la	%1,256(%1)\n"
+		"	la	%2,256(%2)\n"
+		"	brctg	0,0b\n"
+		"1:	ex	%0,0(1)\n"
+		"	j	3f\n"
+		"2:	xc	0(1,%1),0(%2)\n"
+		"3:\n"
+		: : "d" (bytes), "a" (p1), "a" (p2)
+		: "0", "1", "cc", "memory");
+}
+
+static void xor_xc_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+		     unsigned long *p3)
+{
+	asm volatile(
+		"	larl	1,2f\n"
+		"	aghi	%0,-1\n"
+		"	jm	3f\n"
+		"	srlg	0,%0,8\n"
+		"	ltgr	0,0\n"
+		"	jz	1f\n"
+		"0:	xc	0(256,%1),0(%2)\n"
+		"	xc	0(256,%1),0(%3)\n"
+		"	la	%1,256(%1)\n"
+		"	la	%2,256(%2)\n"
+		"	la	%3,256(%3)\n"
+		"	brctg	0,0b\n"
+		"1:	ex	%0,0(1)\n"
+		"	ex	%0,6(1)\n"
+		"	j	3f\n"
+		"2:	xc	0(1,%1),0(%2)\n"
+		"	xc	0(1,%1),0(%3)\n"
+		"3:\n"
+		: "+d" (bytes), "+a" (p1), "+a" (p2), "+a" (p3)
+		: : "0", "1", "cc", "memory");
+}
+
+static void xor_xc_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+		     unsigned long *p3, unsigned long *p4)
+{
+	asm volatile(
+		"	larl	1,2f\n"
+		"	aghi	%0,-1\n"
+		"	jm	3f\n"
+		"	srlg	0,%0,8\n"
+		"	ltgr	0,0\n"
+		"	jz	1f\n"
+		"0:	xc	0(256,%1),0(%2)\n"
+		"	xc	0(256,%1),0(%3)\n"
+		"	xc	0(256,%1),0(%4)\n"
+		"	la	%1,256(%1)\n"
+		"	la	%2,256(%2)\n"
+		"	la	%3,256(%3)\n"
+		"	la	%4,256(%4)\n"
+		"	brctg	0,0b\n"
+		"1:	ex	%0,0(1)\n"
+		"	ex	%0,6(1)\n"
+		"	ex	%0,12(1)\n"
+		"	j	3f\n"
+		"2:	xc	0(1,%1),0(%2)\n"
+		"	xc	0(1,%1),0(%3)\n"
+		"	xc	0(1,%1),0(%4)\n"
+		"3:\n"
+		: "+d" (bytes), "+a" (p1), "+a" (p2), "+a" (p3), "+a" (p4)
+		: : "0", "1", "cc", "memory");
+}
+
+static void xor_xc_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+		     unsigned long *p3, unsigned long *p4, unsigned long *p5)
+{
+	/* Get around a gcc oddity */
+	register unsigned long *reg7 asm ("7") = p5;
+
+	asm volatile(
+		"	larl	1,2f\n"
+		"	aghi	%0,-1\n"
+		"	jm	3f\n"
+		"	srlg	0,%0,8\n"
+		"	ltgr	0,0\n"
+		"	jz	1f\n"
+		"0:	xc	0(256,%1),0(%2)\n"
+		"	xc	0(256,%1),0(%3)\n"
+		"	xc	0(256,%1),0(%4)\n"
+		"	xc	0(256,%1),0(%5)\n"
+		"	la	%1,256(%1)\n"
+		"	la	%2,256(%2)\n"
+		"	la	%3,256(%3)\n"
+		"	la	%4,256(%4)\n"
+		"	la	%5,256(%5)\n"
+		"	brctg	0,0b\n"
+		"1:	ex	%0,0(1)\n"
+		"	ex	%0,6(1)\n"
+		"	ex	%0,12(1)\n"
+		"	ex	%0,18(1)\n"
+		"	j	3f\n"
+		"2:	xc	0(1,%1),0(%2)\n"
+		"	xc	0(1,%1),0(%3)\n"
+		"	xc	0(1,%1),0(%4)\n"
+		"	xc	0(1,%1),0(%5)\n"
+		"3:\n"
+		: "+d" (bytes), "+a" (p1), "+a" (p2), "+a" (p3), "+a" (p4),
+		  "+a" (reg7)
+		: : "0", "1", "cc", "memory");
+}
+
+struct xor_block_template xor_block_xc = {
+	.name = "xc",
+	.do_2 = xor_xc_2,
+	.do_3 = xor_xc_3,
+	.do_4 = xor_xc_4,
+	.do_5 = xor_xc_5,
+};
+EXPORT_SYMBOL(xor_block_xc);
diff --git a/arch/s390/mm/Makefile b/arch/s390/mm/Makefile
index 839592c..2ae54ca 100644
--- a/arch/s390/mm/Makefile
+++ b/arch/s390/mm/Makefile
@@ -2,9 +2,11 @@
 # Makefile for the linux s390-specific parts of the memory manager.
 #
 
-obj-y		:= init.o fault.o extmem.o mmap.o vmem.o pgtable.o maccess.o
+obj-y		:= init.o fault.o extmem.o mmap.o vmem.o maccess.o
 obj-y		+= page-states.o gup.o extable.o pageattr.o mem_detect.o
+obj-y		+= pgtable.o pgalloc.o
 
 obj-$(CONFIG_CMM)		+= cmm.o
 obj-$(CONFIG_HUGETLB_PAGE)	+= hugetlbpage.o
 obj-$(CONFIG_S390_PTDUMP)	+= dump_pagetables.o
+obj-$(CONFIG_PGSTE)		+= gmap.o
diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c
index a1bf4ad..02042b6 100644
--- a/arch/s390/mm/extmem.c
+++ b/arch/s390/mm/extmem.c
@@ -265,7 +265,7 @@
 		goto out_free;
 	}
 	if (diag_cc > 1) {
-		pr_warning("Querying a DCSS type failed with rc=%ld\n", vmrc);
+		pr_warn("Querying a DCSS type failed with rc=%ld\n", vmrc);
 		rc = dcss_diag_translate_rc (vmrc);
 		goto out_free;
 	}
@@ -457,8 +457,7 @@
 		goto out_resource;
 	}
 	if (diag_cc > 1) {
-		pr_warning("Loading DCSS %s failed with rc=%ld\n", name,
-			   end_addr);
+		pr_warn("Loading DCSS %s failed with rc=%ld\n", name, end_addr);
 		rc = dcss_diag_translate_rc(end_addr);
 		dcss_diag(&purgeseg_scode, seg->dcss_name,
 				&dummy, &dummy);
@@ -574,8 +573,7 @@
 		goto out_unlock;
 	}
 	if (atomic_read (&seg->ref_count) != 1) {
-		pr_warning("DCSS %s is in use and cannot be reloaded\n",
-			   name);
+		pr_warn("DCSS %s is in use and cannot be reloaded\n", name);
 		rc = -EAGAIN;
 		goto out_unlock;
 	}
@@ -588,8 +586,8 @@
 			seg->res->flags |= IORESOURCE_READONLY;
 
 	if (request_resource(&iomem_resource, seg->res)) {
-		pr_warning("DCSS %s overlaps with used memory resources "
-			   "and cannot be reloaded\n", name);
+		pr_warn("DCSS %s overlaps with used memory resources and cannot be reloaded\n",
+			name);
 		rc = -EBUSY;
 		kfree(seg->res);
 		goto out_del_mem;
@@ -607,8 +605,8 @@
 		goto out_del_res;
 	}
 	if (diag_cc > 1) {
-		pr_warning("Reloading DCSS %s failed with rc=%ld\n", name,
-			   end_addr);
+		pr_warn("Reloading DCSS %s failed with rc=%ld\n",
+			name, end_addr);
 		rc = dcss_diag_translate_rc(end_addr);
 		goto out_del_res;
 	}
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 791a414..cce577f 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -32,6 +32,7 @@
 #include <asm/asm-offsets.h>
 #include <asm/diag.h>
 #include <asm/pgtable.h>
+#include <asm/gmap.h>
 #include <asm/irq.h>
 #include <asm/mmu_context.h>
 #include <asm/facility.h>
@@ -183,6 +184,8 @@
 {
 	unsigned long asce;
 
+	pr_alert("Failing address: %016lx TEID: %016lx\n",
+		 regs->int_parm_long & __FAIL_ADDR_MASK, regs->int_parm_long);
 	pr_alert("Fault in ");
 	switch (regs->int_parm_long & 3) {
 	case 3:
@@ -218,7 +221,9 @@
 	dump_pagetable(asce, regs->int_parm_long & __FAIL_ADDR_MASK);
 }
 
-static inline void report_user_fault(struct pt_regs *regs, long signr)
+int show_unhandled_signals = 1;
+
+void report_user_fault(struct pt_regs *regs, long signr, int is_mm_fault)
 {
 	if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
 		return;
@@ -230,9 +235,8 @@
 	       regs->int_code & 0xffff, regs->int_code >> 17);
 	print_vma_addr(KERN_CONT "in ", regs->psw.addr);
 	printk(KERN_CONT "\n");
-	printk(KERN_ALERT "failing address: %016lx TEID: %016lx\n",
-	       regs->int_parm_long & __FAIL_ADDR_MASK, regs->int_parm_long);
-	dump_fault_info(regs);
+	if (is_mm_fault)
+		dump_fault_info(regs);
 	show_regs(regs);
 }
 
@@ -244,7 +248,7 @@
 {
 	struct siginfo si;
 
-	report_user_fault(regs, SIGSEGV);
+	report_user_fault(regs, SIGSEGV, 1);
 	si.si_signo = SIGSEGV;
 	si.si_code = si_code;
 	si.si_addr = (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK);
@@ -272,8 +276,6 @@
 	else
 		printk(KERN_ALERT "Unable to handle kernel paging request"
 		       " in virtual user address space\n");
-	printk(KERN_ALERT "failing address: %016lx TEID: %016lx\n",
-	       regs->int_parm_long & __FAIL_ADDR_MASK, regs->int_parm_long);
 	dump_fault_info(regs);
 	die(regs, "Oops");
 	do_exit(SIGKILL);
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
new file mode 100644
index 0000000..69247b4
--- /dev/null
+++ b/arch/s390/mm/gmap.c
@@ -0,0 +1,774 @@
+/*
+ *  KVM guest address space mapping code
+ *
+ *    Copyright IBM Corp. 2007, 2016
+ *    Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/smp.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/swapops.h>
+#include <linux/ksm.h>
+#include <linux/mman.h>
+
+#include <asm/pgtable.h>
+#include <asm/pgalloc.h>
+#include <asm/gmap.h>
+#include <asm/tlb.h>
+
+/**
+ * gmap_alloc - allocate a guest address space
+ * @mm: pointer to the parent mm_struct
+ * @limit: maximum size of the gmap address space
+ *
+ * Returns a guest address space structure.
+ */
+struct gmap *gmap_alloc(struct mm_struct *mm, unsigned long limit)
+{
+	struct gmap *gmap;
+	struct page *page;
+	unsigned long *table;
+	unsigned long etype, atype;
+
+	if (limit < (1UL << 31)) {
+		limit = (1UL << 31) - 1;
+		atype = _ASCE_TYPE_SEGMENT;
+		etype = _SEGMENT_ENTRY_EMPTY;
+	} else if (limit < (1UL << 42)) {
+		limit = (1UL << 42) - 1;
+		atype = _ASCE_TYPE_REGION3;
+		etype = _REGION3_ENTRY_EMPTY;
+	} else if (limit < (1UL << 53)) {
+		limit = (1UL << 53) - 1;
+		atype = _ASCE_TYPE_REGION2;
+		etype = _REGION2_ENTRY_EMPTY;
+	} else {
+		limit = -1UL;
+		atype = _ASCE_TYPE_REGION1;
+		etype = _REGION1_ENTRY_EMPTY;
+	}
+	gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL);
+	if (!gmap)
+		goto out;
+	INIT_LIST_HEAD(&gmap->crst_list);
+	INIT_RADIX_TREE(&gmap->guest_to_host, GFP_KERNEL);
+	INIT_RADIX_TREE(&gmap->host_to_guest, GFP_ATOMIC);
+	spin_lock_init(&gmap->guest_table_lock);
+	gmap->mm = mm;
+	page = alloc_pages(GFP_KERNEL, 2);
+	if (!page)
+		goto out_free;
+	page->index = 0;
+	list_add(&page->lru, &gmap->crst_list);
+	table = (unsigned long *) page_to_phys(page);
+	crst_table_init(table, etype);
+	gmap->table = table;
+	gmap->asce = atype | _ASCE_TABLE_LENGTH |
+		_ASCE_USER_BITS | __pa(table);
+	gmap->asce_end = limit;
+	down_write(&mm->mmap_sem);
+	list_add(&gmap->list, &mm->context.gmap_list);
+	up_write(&mm->mmap_sem);
+	return gmap;
+
+out_free:
+	kfree(gmap);
+out:
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(gmap_alloc);
+
+static void gmap_flush_tlb(struct gmap *gmap)
+{
+	if (MACHINE_HAS_IDTE)
+		__tlb_flush_asce(gmap->mm, gmap->asce);
+	else
+		__tlb_flush_global();
+}
+
+static void gmap_radix_tree_free(struct radix_tree_root *root)
+{
+	struct radix_tree_iter iter;
+	unsigned long indices[16];
+	unsigned long index;
+	void **slot;
+	int i, nr;
+
+	/* A radix tree is freed by deleting all of its entries */
+	index = 0;
+	do {
+		nr = 0;
+		radix_tree_for_each_slot(slot, root, &iter, index) {
+			indices[nr] = iter.index;
+			if (++nr == 16)
+				break;
+		}
+		for (i = 0; i < nr; i++) {
+			index = indices[i];
+			radix_tree_delete(root, index);
+		}
+	} while (nr > 0);
+}
+
+/**
+ * gmap_free - free a guest address space
+ * @gmap: pointer to the guest address space structure
+ */
+void gmap_free(struct gmap *gmap)
+{
+	struct page *page, *next;
+
+	/* Flush tlb. */
+	if (MACHINE_HAS_IDTE)
+		__tlb_flush_asce(gmap->mm, gmap->asce);
+	else
+		__tlb_flush_global();
+
+	/* Free all segment & region tables. */
+	list_for_each_entry_safe(page, next, &gmap->crst_list, lru)
+		__free_pages(page, 2);
+	gmap_radix_tree_free(&gmap->guest_to_host);
+	gmap_radix_tree_free(&gmap->host_to_guest);
+	down_write(&gmap->mm->mmap_sem);
+	list_del(&gmap->list);
+	up_write(&gmap->mm->mmap_sem);
+	kfree(gmap);
+}
+EXPORT_SYMBOL_GPL(gmap_free);
+
+/**
+ * gmap_enable - switch primary space to the guest address space
+ * @gmap: pointer to the guest address space structure
+ */
+void gmap_enable(struct gmap *gmap)
+{
+	S390_lowcore.gmap = (unsigned long) gmap;
+}
+EXPORT_SYMBOL_GPL(gmap_enable);
+
+/**
+ * gmap_disable - switch back to the standard primary address space
+ * @gmap: pointer to the guest address space structure
+ */
+void gmap_disable(struct gmap *gmap)
+{
+	S390_lowcore.gmap = 0UL;
+}
+EXPORT_SYMBOL_GPL(gmap_disable);
+
+/*
+ * gmap_alloc_table is assumed to be called with mmap_sem held
+ */
+static int gmap_alloc_table(struct gmap *gmap, unsigned long *table,
+			    unsigned long init, unsigned long gaddr)
+{
+	struct page *page;
+	unsigned long *new;
+
+	/* since we dont free the gmap table until gmap_free we can unlock */
+	page = alloc_pages(GFP_KERNEL, 2);
+	if (!page)
+		return -ENOMEM;
+	new = (unsigned long *) page_to_phys(page);
+	crst_table_init(new, init);
+	spin_lock(&gmap->mm->page_table_lock);
+	if (*table & _REGION_ENTRY_INVALID) {
+		list_add(&page->lru, &gmap->crst_list);
+		*table = (unsigned long) new | _REGION_ENTRY_LENGTH |
+			(*table & _REGION_ENTRY_TYPE_MASK);
+		page->index = gaddr;
+		page = NULL;
+	}
+	spin_unlock(&gmap->mm->page_table_lock);
+	if (page)
+		__free_pages(page, 2);
+	return 0;
+}
+
+/**
+ * __gmap_segment_gaddr - find virtual address from segment pointer
+ * @entry: pointer to a segment table entry in the guest address space
+ *
+ * Returns the virtual address in the guest address space for the segment
+ */
+static unsigned long __gmap_segment_gaddr(unsigned long *entry)
+{
+	struct page *page;
+	unsigned long offset, mask;
+
+	offset = (unsigned long) entry / sizeof(unsigned long);
+	offset = (offset & (PTRS_PER_PMD - 1)) * PMD_SIZE;
+	mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
+	page = virt_to_page((void *)((unsigned long) entry & mask));
+	return page->index + offset;
+}
+
+/**
+ * __gmap_unlink_by_vmaddr - unlink a single segment via a host address
+ * @gmap: pointer to the guest address space structure
+ * @vmaddr: address in the host process address space
+ *
+ * Returns 1 if a TLB flush is required
+ */
+static int __gmap_unlink_by_vmaddr(struct gmap *gmap, unsigned long vmaddr)
+{
+	unsigned long *entry;
+	int flush = 0;
+
+	spin_lock(&gmap->guest_table_lock);
+	entry = radix_tree_delete(&gmap->host_to_guest, vmaddr >> PMD_SHIFT);
+	if (entry) {
+		flush = (*entry != _SEGMENT_ENTRY_INVALID);
+		*entry = _SEGMENT_ENTRY_INVALID;
+	}
+	spin_unlock(&gmap->guest_table_lock);
+	return flush;
+}
+
+/**
+ * __gmap_unmap_by_gaddr - unmap a single segment via a guest address
+ * @gmap: pointer to the guest address space structure
+ * @gaddr: address in the guest address space
+ *
+ * Returns 1 if a TLB flush is required
+ */
+static int __gmap_unmap_by_gaddr(struct gmap *gmap, unsigned long gaddr)
+{
+	unsigned long vmaddr;
+
+	vmaddr = (unsigned long) radix_tree_delete(&gmap->guest_to_host,
+						   gaddr >> PMD_SHIFT);
+	return vmaddr ? __gmap_unlink_by_vmaddr(gmap, vmaddr) : 0;
+}
+
+/**
+ * gmap_unmap_segment - unmap segment from the guest address space
+ * @gmap: pointer to the guest address space structure
+ * @to: address in the guest address space
+ * @len: length of the memory area to unmap
+ *
+ * Returns 0 if the unmap succeeded, -EINVAL if not.
+ */
+int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
+{
+	unsigned long off;
+	int flush;
+
+	if ((to | len) & (PMD_SIZE - 1))
+		return -EINVAL;
+	if (len == 0 || to + len < to)
+		return -EINVAL;
+
+	flush = 0;
+	down_write(&gmap->mm->mmap_sem);
+	for (off = 0; off < len; off += PMD_SIZE)
+		flush |= __gmap_unmap_by_gaddr(gmap, to + off);
+	up_write(&gmap->mm->mmap_sem);
+	if (flush)
+		gmap_flush_tlb(gmap);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(gmap_unmap_segment);
+
+/**
+ * gmap_map_segment - map a segment to the guest address space
+ * @gmap: pointer to the guest address space structure
+ * @from: source address in the parent address space
+ * @to: target address in the guest address space
+ * @len: length of the memory area to map
+ *
+ * Returns 0 if the mmap succeeded, -EINVAL or -ENOMEM if not.
+ */
+int gmap_map_segment(struct gmap *gmap, unsigned long from,
+		     unsigned long to, unsigned long len)
+{
+	unsigned long off;
+	int flush;
+
+	if ((from | to | len) & (PMD_SIZE - 1))
+		return -EINVAL;
+	if (len == 0 || from + len < from || to + len < to ||
+	    from + len > TASK_MAX_SIZE || to + len > gmap->asce_end)
+		return -EINVAL;
+
+	flush = 0;
+	down_write(&gmap->mm->mmap_sem);
+	for (off = 0; off < len; off += PMD_SIZE) {
+		/* Remove old translation */
+		flush |= __gmap_unmap_by_gaddr(gmap, to + off);
+		/* Store new translation */
+		if (radix_tree_insert(&gmap->guest_to_host,
+				      (to + off) >> PMD_SHIFT,
+				      (void *) from + off))
+			break;
+	}
+	up_write(&gmap->mm->mmap_sem);
+	if (flush)
+		gmap_flush_tlb(gmap);
+	if (off >= len)
+		return 0;
+	gmap_unmap_segment(gmap, to, len);
+	return -ENOMEM;
+}
+EXPORT_SYMBOL_GPL(gmap_map_segment);
+
+/**
+ * __gmap_translate - translate a guest address to a user space address
+ * @gmap: pointer to guest mapping meta data structure
+ * @gaddr: guest address
+ *
+ * Returns user space address which corresponds to the guest address or
+ * -EFAULT if no such mapping exists.
+ * This function does not establish potentially missing page table entries.
+ * The mmap_sem of the mm that belongs to the address space must be held
+ * when this function gets called.
+ */
+unsigned long __gmap_translate(struct gmap *gmap, unsigned long gaddr)
+{
+	unsigned long vmaddr;
+
+	vmaddr = (unsigned long)
+		radix_tree_lookup(&gmap->guest_to_host, gaddr >> PMD_SHIFT);
+	return vmaddr ? (vmaddr | (gaddr & ~PMD_MASK)) : -EFAULT;
+}
+EXPORT_SYMBOL_GPL(__gmap_translate);
+
+/**
+ * gmap_translate - translate a guest address to a user space address
+ * @gmap: pointer to guest mapping meta data structure
+ * @gaddr: guest address
+ *
+ * Returns user space address which corresponds to the guest address or
+ * -EFAULT if no such mapping exists.
+ * This function does not establish potentially missing page table entries.
+ */
+unsigned long gmap_translate(struct gmap *gmap, unsigned long gaddr)
+{
+	unsigned long rc;
+
+	down_read(&gmap->mm->mmap_sem);
+	rc = __gmap_translate(gmap, gaddr);
+	up_read(&gmap->mm->mmap_sem);
+	return rc;
+}
+EXPORT_SYMBOL_GPL(gmap_translate);
+
+/**
+ * gmap_unlink - disconnect a page table from the gmap shadow tables
+ * @gmap: pointer to guest mapping meta data structure
+ * @table: pointer to the host page table
+ * @vmaddr: vm address associated with the host page table
+ */
+void gmap_unlink(struct mm_struct *mm, unsigned long *table,
+		 unsigned long vmaddr)
+{
+	struct gmap *gmap;
+	int flush;
+
+	list_for_each_entry(gmap, &mm->context.gmap_list, list) {
+		flush = __gmap_unlink_by_vmaddr(gmap, vmaddr);
+		if (flush)
+			gmap_flush_tlb(gmap);
+	}
+}
+
+/**
+ * gmap_link - set up shadow page tables to connect a host to a guest address
+ * @gmap: pointer to guest mapping meta data structure
+ * @gaddr: guest address
+ * @vmaddr: vm address
+ *
+ * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
+ * if the vm address is already mapped to a different guest segment.
+ * The mmap_sem of the mm that belongs to the address space must be held
+ * when this function gets called.
+ */
+int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
+{
+	struct mm_struct *mm;
+	unsigned long *table;
+	spinlock_t *ptl;
+	pgd_t *pgd;
+	pud_t *pud;
+	pmd_t *pmd;
+	int rc;
+
+	/* Create higher level tables in the gmap page table */
+	table = gmap->table;
+	if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION1) {
+		table += (gaddr >> 53) & 0x7ff;
+		if ((*table & _REGION_ENTRY_INVALID) &&
+		    gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY,
+				     gaddr & 0xffe0000000000000UL))
+			return -ENOMEM;
+		table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
+	}
+	if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION2) {
+		table += (gaddr >> 42) & 0x7ff;
+		if ((*table & _REGION_ENTRY_INVALID) &&
+		    gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY,
+				     gaddr & 0xfffffc0000000000UL))
+			return -ENOMEM;
+		table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
+	}
+	if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION3) {
+		table += (gaddr >> 31) & 0x7ff;
+		if ((*table & _REGION_ENTRY_INVALID) &&
+		    gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY,
+				     gaddr & 0xffffffff80000000UL))
+			return -ENOMEM;
+		table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
+	}
+	table += (gaddr >> 20) & 0x7ff;
+	/* Walk the parent mm page table */
+	mm = gmap->mm;
+	pgd = pgd_offset(mm, vmaddr);
+	VM_BUG_ON(pgd_none(*pgd));
+	pud = pud_offset(pgd, vmaddr);
+	VM_BUG_ON(pud_none(*pud));
+	pmd = pmd_offset(pud, vmaddr);
+	VM_BUG_ON(pmd_none(*pmd));
+	/* large pmds cannot yet be handled */
+	if (pmd_large(*pmd))
+		return -EFAULT;
+	/* Link gmap segment table entry location to page table. */
+	rc = radix_tree_preload(GFP_KERNEL);
+	if (rc)
+		return rc;
+	ptl = pmd_lock(mm, pmd);
+	spin_lock(&gmap->guest_table_lock);
+	if (*table == _SEGMENT_ENTRY_INVALID) {
+		rc = radix_tree_insert(&gmap->host_to_guest,
+				       vmaddr >> PMD_SHIFT, table);
+		if (!rc)
+			*table = pmd_val(*pmd);
+	} else
+		rc = 0;
+	spin_unlock(&gmap->guest_table_lock);
+	spin_unlock(ptl);
+	radix_tree_preload_end();
+	return rc;
+}
+
+/**
+ * gmap_fault - resolve a fault on a guest address
+ * @gmap: pointer to guest mapping meta data structure
+ * @gaddr: guest address
+ * @fault_flags: flags to pass down to handle_mm_fault()
+ *
+ * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
+ * if the vm address is already mapped to a different guest segment.
+ */
+int gmap_fault(struct gmap *gmap, unsigned long gaddr,
+	       unsigned int fault_flags)
+{
+	unsigned long vmaddr;
+	int rc;
+	bool unlocked;
+
+	down_read(&gmap->mm->mmap_sem);
+
+retry:
+	unlocked = false;
+	vmaddr = __gmap_translate(gmap, gaddr);
+	if (IS_ERR_VALUE(vmaddr)) {
+		rc = vmaddr;
+		goto out_up;
+	}
+	if (fixup_user_fault(current, gmap->mm, vmaddr, fault_flags,
+			     &unlocked)) {
+		rc = -EFAULT;
+		goto out_up;
+	}
+	/*
+	 * In the case that fixup_user_fault unlocked the mmap_sem during
+	 * faultin redo __gmap_translate to not race with a map/unmap_segment.
+	 */
+	if (unlocked)
+		goto retry;
+
+	rc = __gmap_link(gmap, gaddr, vmaddr);
+out_up:
+	up_read(&gmap->mm->mmap_sem);
+	return rc;
+}
+EXPORT_SYMBOL_GPL(gmap_fault);
+
+/*
+ * this function is assumed to be called with mmap_sem held
+ */
+void __gmap_zap(struct gmap *gmap, unsigned long gaddr)
+{
+	unsigned long vmaddr;
+	spinlock_t *ptl;
+	pte_t *ptep;
+
+	/* Find the vm address for the guest address */
+	vmaddr = (unsigned long) radix_tree_lookup(&gmap->guest_to_host,
+						   gaddr >> PMD_SHIFT);
+	if (vmaddr) {
+		vmaddr |= gaddr & ~PMD_MASK;
+		/* Get pointer to the page table entry */
+		ptep = get_locked_pte(gmap->mm, vmaddr, &ptl);
+		if (likely(ptep))
+			ptep_zap_unused(gmap->mm, vmaddr, ptep, 0);
+		pte_unmap_unlock(ptep, ptl);
+	}
+}
+EXPORT_SYMBOL_GPL(__gmap_zap);
+
+void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to)
+{
+	unsigned long gaddr, vmaddr, size;
+	struct vm_area_struct *vma;
+
+	down_read(&gmap->mm->mmap_sem);
+	for (gaddr = from; gaddr < to;
+	     gaddr = (gaddr + PMD_SIZE) & PMD_MASK) {
+		/* Find the vm address for the guest address */
+		vmaddr = (unsigned long)
+			radix_tree_lookup(&gmap->guest_to_host,
+					  gaddr >> PMD_SHIFT);
+		if (!vmaddr)
+			continue;
+		vmaddr |= gaddr & ~PMD_MASK;
+		/* Find vma in the parent mm */
+		vma = find_vma(gmap->mm, vmaddr);
+		size = min(to - gaddr, PMD_SIZE - (gaddr & ~PMD_MASK));
+		zap_page_range(vma, vmaddr, size, NULL);
+	}
+	up_read(&gmap->mm->mmap_sem);
+}
+EXPORT_SYMBOL_GPL(gmap_discard);
+
+static LIST_HEAD(gmap_notifier_list);
+static DEFINE_SPINLOCK(gmap_notifier_lock);
+
+/**
+ * gmap_register_ipte_notifier - register a pte invalidation callback
+ * @nb: pointer to the gmap notifier block
+ */
+void gmap_register_ipte_notifier(struct gmap_notifier *nb)
+{
+	spin_lock(&gmap_notifier_lock);
+	list_add(&nb->list, &gmap_notifier_list);
+	spin_unlock(&gmap_notifier_lock);
+}
+EXPORT_SYMBOL_GPL(gmap_register_ipte_notifier);
+
+/**
+ * gmap_unregister_ipte_notifier - remove a pte invalidation callback
+ * @nb: pointer to the gmap notifier block
+ */
+void gmap_unregister_ipte_notifier(struct gmap_notifier *nb)
+{
+	spin_lock(&gmap_notifier_lock);
+	list_del_init(&nb->list);
+	spin_unlock(&gmap_notifier_lock);
+}
+EXPORT_SYMBOL_GPL(gmap_unregister_ipte_notifier);
+
+/**
+ * gmap_ipte_notify - mark a range of ptes for invalidation notification
+ * @gmap: pointer to guest mapping meta data structure
+ * @gaddr: virtual address in the guest address space
+ * @len: size of area
+ *
+ * Returns 0 if for each page in the given range a gmap mapping exists and
+ * the invalidation notification could be set. If the gmap mapping is missing
+ * for one or more pages -EFAULT is returned. If no memory could be allocated
+ * -ENOMEM is returned. This function establishes missing page table entries.
+ */
+int gmap_ipte_notify(struct gmap *gmap, unsigned long gaddr, unsigned long len)
+{
+	unsigned long addr;
+	spinlock_t *ptl;
+	pte_t *ptep;
+	bool unlocked;
+	int rc = 0;
+
+	if ((gaddr & ~PAGE_MASK) || (len & ~PAGE_MASK))
+		return -EINVAL;
+	down_read(&gmap->mm->mmap_sem);
+	while (len) {
+		unlocked = false;
+		/* Convert gmap address and connect the page tables */
+		addr = __gmap_translate(gmap, gaddr);
+		if (IS_ERR_VALUE(addr)) {
+			rc = addr;
+			break;
+		}
+		/* Get the page mapped */
+		if (fixup_user_fault(current, gmap->mm, addr, FAULT_FLAG_WRITE,
+				     &unlocked)) {
+			rc = -EFAULT;
+			break;
+		}
+		/* While trying to map mmap_sem got unlocked. Let us retry */
+		if (unlocked)
+			continue;
+		rc = __gmap_link(gmap, gaddr, addr);
+		if (rc)
+			break;
+		/* Walk the process page table, lock and get pte pointer */
+		ptep = get_locked_pte(gmap->mm, addr, &ptl);
+		VM_BUG_ON(!ptep);
+		/* Set notification bit in the pgste of the pte */
+		if ((pte_val(*ptep) & (_PAGE_INVALID | _PAGE_PROTECT)) == 0) {
+			ptep_set_notify(gmap->mm, addr, ptep);
+			gaddr += PAGE_SIZE;
+			len -= PAGE_SIZE;
+		}
+		pte_unmap_unlock(ptep, ptl);
+	}
+	up_read(&gmap->mm->mmap_sem);
+	return rc;
+}
+EXPORT_SYMBOL_GPL(gmap_ipte_notify);
+
+/**
+ * ptep_notify - call all invalidation callbacks for a specific pte.
+ * @mm: pointer to the process mm_struct
+ * @addr: virtual address in the process address space
+ * @pte: pointer to the page table entry
+ *
+ * This function is assumed to be called with the page table lock held
+ * for the pte to notify.
+ */
+void ptep_notify(struct mm_struct *mm, unsigned long vmaddr, pte_t *pte)
+{
+	unsigned long offset, gaddr;
+	unsigned long *table;
+	struct gmap_notifier *nb;
+	struct gmap *gmap;
+
+	offset = ((unsigned long) pte) & (255 * sizeof(pte_t));
+	offset = offset * (4096 / sizeof(pte_t));
+	spin_lock(&gmap_notifier_lock);
+	list_for_each_entry(gmap, &mm->context.gmap_list, list) {
+		table = radix_tree_lookup(&gmap->host_to_guest,
+					  vmaddr >> PMD_SHIFT);
+		if (!table)
+			continue;
+		gaddr = __gmap_segment_gaddr(table) + offset;
+		list_for_each_entry(nb, &gmap_notifier_list, list)
+			nb->notifier_call(gmap, gaddr);
+	}
+	spin_unlock(&gmap_notifier_lock);
+}
+EXPORT_SYMBOL_GPL(ptep_notify);
+
+static inline void thp_split_mm(struct mm_struct *mm)
+{
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+	struct vm_area_struct *vma;
+	unsigned long addr;
+
+	for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) {
+		for (addr = vma->vm_start;
+		     addr < vma->vm_end;
+		     addr += PAGE_SIZE)
+			follow_page(vma, addr, FOLL_SPLIT);
+		vma->vm_flags &= ~VM_HUGEPAGE;
+		vma->vm_flags |= VM_NOHUGEPAGE;
+	}
+	mm->def_flags |= VM_NOHUGEPAGE;
+#endif
+}
+
+/*
+ * switch on pgstes for its userspace process (for kvm)
+ */
+int s390_enable_sie(void)
+{
+	struct mm_struct *mm = current->mm;
+
+	/* Do we have pgstes? if yes, we are done */
+	if (mm_has_pgste(mm))
+		return 0;
+	/* Fail if the page tables are 2K */
+	if (!mm_alloc_pgste(mm))
+		return -EINVAL;
+	down_write(&mm->mmap_sem);
+	mm->context.has_pgste = 1;
+	/* split thp mappings and disable thp for future mappings */
+	thp_split_mm(mm);
+	up_write(&mm->mmap_sem);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(s390_enable_sie);
+
+/*
+ * Enable storage key handling from now on and initialize the storage
+ * keys with the default key.
+ */
+static int __s390_enable_skey(pte_t *pte, unsigned long addr,
+			      unsigned long next, struct mm_walk *walk)
+{
+	/*
+	 * Remove all zero page mappings,
+	 * after establishing a policy to forbid zero page mappings
+	 * following faults for that page will get fresh anonymous pages
+	 */
+	if (is_zero_pfn(pte_pfn(*pte)))
+		ptep_xchg_direct(walk->mm, addr, pte, __pte(_PAGE_INVALID));
+	/* Clear storage key */
+	ptep_zap_key(walk->mm, addr, pte);
+	return 0;
+}
+
+int s390_enable_skey(void)
+{
+	struct mm_walk walk = { .pte_entry = __s390_enable_skey };
+	struct mm_struct *mm = current->mm;
+	struct vm_area_struct *vma;
+	int rc = 0;
+
+	down_write(&mm->mmap_sem);
+	if (mm_use_skey(mm))
+		goto out_up;
+
+	mm->context.use_skey = 1;
+	for (vma = mm->mmap; vma; vma = vma->vm_next) {
+		if (ksm_madvise(vma, vma->vm_start, vma->vm_end,
+				MADV_UNMERGEABLE, &vma->vm_flags)) {
+			mm->context.use_skey = 0;
+			rc = -ENOMEM;
+			goto out_up;
+		}
+	}
+	mm->def_flags &= ~VM_MERGEABLE;
+
+	walk.mm = mm;
+	walk_page_range(0, TASK_SIZE, &walk);
+
+out_up:
+	up_write(&mm->mmap_sem);
+	return rc;
+}
+EXPORT_SYMBOL_GPL(s390_enable_skey);
+
+/*
+ * Reset CMMA state, make all pages stable again.
+ */
+static int __s390_reset_cmma(pte_t *pte, unsigned long addr,
+			     unsigned long next, struct mm_walk *walk)
+{
+	ptep_zap_unused(walk->mm, addr, pte, 1);
+	return 0;
+}
+
+void s390_reset_cmma(struct mm_struct *mm)
+{
+	struct mm_walk walk = { .pte_entry = __s390_reset_cmma };
+
+	down_write(&mm->mmap_sem);
+	walk.mm = mm;
+	walk_page_range(0, TASK_SIZE, &walk);
+	up_write(&mm->mmap_sem);
+}
+EXPORT_SYMBOL_GPL(s390_reset_cmma);
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
index f81096b..1b5e898 100644
--- a/arch/s390/mm/hugetlbpage.c
+++ b/arch/s390/mm/hugetlbpage.c
@@ -105,11 +105,10 @@
 			      unsigned long addr, pte_t *ptep)
 {
 	pmd_t *pmdp = (pmd_t *) ptep;
-	pte_t pte = huge_ptep_get(ptep);
+	pmd_t old;
 
-	pmdp_flush_direct(mm, addr, pmdp);
-	pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
-	return pte;
+	old = pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
+	return __pmd_to_pte(old);
 }
 
 pte_t *huge_pte_alloc(struct mm_struct *mm,
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
index 749c984..f2a5c29 100644
--- a/arch/s390/mm/pageattr.c
+++ b/arch/s390/mm/pageattr.c
@@ -65,19 +65,17 @@
 static void change_page_attr(unsigned long addr, int numpages,
 			     pte_t (*set) (pte_t))
 {
-	pte_t *ptep, pte;
+	pte_t *ptep;
 	int i;
 
 	for (i = 0; i < numpages; i++) {
 		ptep = walk_page_table(addr);
 		if (WARN_ON_ONCE(!ptep))
 			break;
-		pte = *ptep;
-		pte = set(pte);
-		__ptep_ipte(addr, ptep);
-		*ptep = pte;
+		*ptep = set(*ptep);
 		addr += PAGE_SIZE;
 	}
+	__tlb_flush_kernel();
 }
 
 int set_memory_ro(unsigned long addr, int numpages)
diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
new file mode 100644
index 0000000..f6c3de2
--- /dev/null
+++ b/arch/s390/mm/pgalloc.c
@@ -0,0 +1,360 @@
+/*
+ *  Page table allocation functions
+ *
+ *    Copyright IBM Corp. 2016
+ *    Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#include <linux/mm.h>
+#include <linux/sysctl.h>
+#include <asm/mmu_context.h>
+#include <asm/pgalloc.h>
+#include <asm/gmap.h>
+#include <asm/tlb.h>
+#include <asm/tlbflush.h>
+
+#ifdef CONFIG_PGSTE
+
+static int page_table_allocate_pgste_min = 0;
+static int page_table_allocate_pgste_max = 1;
+int page_table_allocate_pgste = 0;
+EXPORT_SYMBOL(page_table_allocate_pgste);
+
+static struct ctl_table page_table_sysctl[] = {
+	{
+		.procname	= "allocate_pgste",
+		.data		= &page_table_allocate_pgste,
+		.maxlen		= sizeof(int),
+		.mode		= S_IRUGO | S_IWUSR,
+		.proc_handler	= proc_dointvec,
+		.extra1		= &page_table_allocate_pgste_min,
+		.extra2		= &page_table_allocate_pgste_max,
+	},
+	{ }
+};
+
+static struct ctl_table page_table_sysctl_dir[] = {
+	{
+		.procname	= "vm",
+		.maxlen		= 0,
+		.mode		= 0555,
+		.child		= page_table_sysctl,
+	},
+	{ }
+};
+
+static int __init page_table_register_sysctl(void)
+{
+	return register_sysctl_table(page_table_sysctl_dir) ? 0 : -ENOMEM;
+}
+__initcall(page_table_register_sysctl);
+
+#endif /* CONFIG_PGSTE */
+
+unsigned long *crst_table_alloc(struct mm_struct *mm)
+{
+	struct page *page = alloc_pages(GFP_KERNEL, 2);
+
+	if (!page)
+		return NULL;
+	return (unsigned long *) page_to_phys(page);
+}
+
+void crst_table_free(struct mm_struct *mm, unsigned long *table)
+{
+	free_pages((unsigned long) table, 2);
+}
+
+static void __crst_table_upgrade(void *arg)
+{
+	struct mm_struct *mm = arg;
+
+	if (current->active_mm == mm) {
+		clear_user_asce();
+		set_user_asce(mm);
+	}
+	__tlb_flush_local();
+}
+
+int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
+{
+	unsigned long *table, *pgd;
+	unsigned long entry;
+	int flush;
+
+	BUG_ON(limit > TASK_MAX_SIZE);
+	flush = 0;
+repeat:
+	table = crst_table_alloc(mm);
+	if (!table)
+		return -ENOMEM;
+	spin_lock_bh(&mm->page_table_lock);
+	if (mm->context.asce_limit < limit) {
+		pgd = (unsigned long *) mm->pgd;
+		if (mm->context.asce_limit <= (1UL << 31)) {
+			entry = _REGION3_ENTRY_EMPTY;
+			mm->context.asce_limit = 1UL << 42;
+			mm->context.asce_bits = _ASCE_TABLE_LENGTH |
+						_ASCE_USER_BITS |
+						_ASCE_TYPE_REGION3;
+		} else {
+			entry = _REGION2_ENTRY_EMPTY;
+			mm->context.asce_limit = 1UL << 53;
+			mm->context.asce_bits = _ASCE_TABLE_LENGTH |
+						_ASCE_USER_BITS |
+						_ASCE_TYPE_REGION2;
+		}
+		crst_table_init(table, entry);
+		pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
+		mm->pgd = (pgd_t *) table;
+		mm->task_size = mm->context.asce_limit;
+		table = NULL;
+		flush = 1;
+	}
+	spin_unlock_bh(&mm->page_table_lock);
+	if (table)
+		crst_table_free(mm, table);
+	if (mm->context.asce_limit < limit)
+		goto repeat;
+	if (flush)
+		on_each_cpu(__crst_table_upgrade, mm, 0);
+	return 0;
+}
+
+void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
+{
+	pgd_t *pgd;
+
+	if (current->active_mm == mm) {
+		clear_user_asce();
+		__tlb_flush_mm(mm);
+	}
+	while (mm->context.asce_limit > limit) {
+		pgd = mm->pgd;
+		switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
+		case _REGION_ENTRY_TYPE_R2:
+			mm->context.asce_limit = 1UL << 42;
+			mm->context.asce_bits = _ASCE_TABLE_LENGTH |
+						_ASCE_USER_BITS |
+						_ASCE_TYPE_REGION3;
+			break;
+		case _REGION_ENTRY_TYPE_R3:
+			mm->context.asce_limit = 1UL << 31;
+			mm->context.asce_bits = _ASCE_TABLE_LENGTH |
+						_ASCE_USER_BITS |
+						_ASCE_TYPE_SEGMENT;
+			break;
+		default:
+			BUG();
+		}
+		mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
+		mm->task_size = mm->context.asce_limit;
+		crst_table_free(mm, (unsigned long *) pgd);
+	}
+	if (current->active_mm == mm)
+		set_user_asce(mm);
+}
+
+static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
+{
+	unsigned int old, new;
+
+	do {
+		old = atomic_read(v);
+		new = old ^ bits;
+	} while (atomic_cmpxchg(v, old, new) != old);
+	return new;
+}
+
+/*
+ * page table entry allocation/free routines.
+ */
+unsigned long *page_table_alloc(struct mm_struct *mm)
+{
+	unsigned long *table;
+	struct page *page;
+	unsigned int mask, bit;
+
+	/* Try to get a fragment of a 4K page as a 2K page table */
+	if (!mm_alloc_pgste(mm)) {
+		table = NULL;
+		spin_lock_bh(&mm->context.list_lock);
+		if (!list_empty(&mm->context.pgtable_list)) {
+			page = list_first_entry(&mm->context.pgtable_list,
+						struct page, lru);
+			mask = atomic_read(&page->_mapcount);
+			mask = (mask | (mask >> 4)) & 3;
+			if (mask != 3) {
+				table = (unsigned long *) page_to_phys(page);
+				bit = mask & 1;		/* =1 -> second 2K */
+				if (bit)
+					table += PTRS_PER_PTE;
+				atomic_xor_bits(&page->_mapcount, 1U << bit);
+				list_del(&page->lru);
+			}
+		}
+		spin_unlock_bh(&mm->context.list_lock);
+		if (table)
+			return table;
+	}
+	/* Allocate a fresh page */
+	page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
+	if (!page)
+		return NULL;
+	if (!pgtable_page_ctor(page)) {
+		__free_page(page);
+		return NULL;
+	}
+	/* Initialize page table */
+	table = (unsigned long *) page_to_phys(page);
+	if (mm_alloc_pgste(mm)) {
+		/* Return 4K page table with PGSTEs */
+		atomic_set(&page->_mapcount, 3);
+		clear_table(table, _PAGE_INVALID, PAGE_SIZE/2);
+		clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2);
+	} else {
+		/* Return the first 2K fragment of the page */
+		atomic_set(&page->_mapcount, 1);
+		clear_table(table, _PAGE_INVALID, PAGE_SIZE);
+		spin_lock_bh(&mm->context.list_lock);
+		list_add(&page->lru, &mm->context.pgtable_list);
+		spin_unlock_bh(&mm->context.list_lock);
+	}
+	return table;
+}
+
+void page_table_free(struct mm_struct *mm, unsigned long *table)
+{
+	struct page *page;
+	unsigned int bit, mask;
+
+	page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
+	if (!mm_alloc_pgste(mm)) {
+		/* Free 2K page table fragment of a 4K page */
+		bit = (__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t));
+		spin_lock_bh(&mm->context.list_lock);
+		mask = atomic_xor_bits(&page->_mapcount, 1U << bit);
+		if (mask & 3)
+			list_add(&page->lru, &mm->context.pgtable_list);
+		else
+			list_del(&page->lru);
+		spin_unlock_bh(&mm->context.list_lock);
+		if (mask != 0)
+			return;
+	}
+
+	pgtable_page_dtor(page);
+	atomic_set(&page->_mapcount, -1);
+	__free_page(page);
+}
+
+void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table,
+			 unsigned long vmaddr)
+{
+	struct mm_struct *mm;
+	struct page *page;
+	unsigned int bit, mask;
+
+	mm = tlb->mm;
+	page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
+	if (mm_alloc_pgste(mm)) {
+		gmap_unlink(mm, table, vmaddr);
+		table = (unsigned long *) (__pa(table) | 3);
+		tlb_remove_table(tlb, table);
+		return;
+	}
+	bit = (__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t));
+	spin_lock_bh(&mm->context.list_lock);
+	mask = atomic_xor_bits(&page->_mapcount, 0x11U << bit);
+	if (mask & 3)
+		list_add_tail(&page->lru, &mm->context.pgtable_list);
+	else
+		list_del(&page->lru);
+	spin_unlock_bh(&mm->context.list_lock);
+	table = (unsigned long *) (__pa(table) | (1U << bit));
+	tlb_remove_table(tlb, table);
+}
+
+static void __tlb_remove_table(void *_table)
+{
+	unsigned int mask = (unsigned long) _table & 3;
+	void *table = (void *)((unsigned long) _table ^ mask);
+	struct page *page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
+
+	switch (mask) {
+	case 0:		/* pmd or pud */
+		free_pages((unsigned long) table, 2);
+		break;
+	case 1:		/* lower 2K of a 4K page table */
+	case 2:		/* higher 2K of a 4K page table */
+		if (atomic_xor_bits(&page->_mapcount, mask << 4) != 0)
+			break;
+		/* fallthrough */
+	case 3:		/* 4K page table with pgstes */
+		pgtable_page_dtor(page);
+		atomic_set(&page->_mapcount, -1);
+		__free_page(page);
+		break;
+	}
+}
+
+static void tlb_remove_table_smp_sync(void *arg)
+{
+	/* Simply deliver the interrupt */
+}
+
+static void tlb_remove_table_one(void *table)
+{
+	/*
+	 * This isn't an RCU grace period and hence the page-tables cannot be
+	 * assumed to be actually RCU-freed.
+	 *
+	 * It is however sufficient for software page-table walkers that rely
+	 * on IRQ disabling. See the comment near struct mmu_table_batch.
+	 */
+	smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
+	__tlb_remove_table(table);
+}
+
+static void tlb_remove_table_rcu(struct rcu_head *head)
+{
+	struct mmu_table_batch *batch;
+	int i;
+
+	batch = container_of(head, struct mmu_table_batch, rcu);
+
+	for (i = 0; i < batch->nr; i++)
+		__tlb_remove_table(batch->tables[i]);
+
+	free_page((unsigned long)batch);
+}
+
+void tlb_table_flush(struct mmu_gather *tlb)
+{
+	struct mmu_table_batch **batch = &tlb->batch;
+
+	if (*batch) {
+		call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
+		*batch = NULL;
+	}
+}
+
+void tlb_remove_table(struct mmu_gather *tlb, void *table)
+{
+	struct mmu_table_batch **batch = &tlb->batch;
+
+	tlb->mm->context.flush_mm = 1;
+	if (*batch == NULL) {
+		*batch = (struct mmu_table_batch *)
+			__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
+		if (*batch == NULL) {
+			__tlb_flush_mm_lazy(tlb->mm);
+			tlb_remove_table_one(table);
+			return;
+		}
+		(*batch)->nr = 0;
+	}
+	(*batch)->tables[(*batch)->nr++] = table;
+	if ((*batch)->nr == MAX_TABLE_BATCH)
+		tlb_flush_mmu(tlb);
+}
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 5109827..4324b87 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -24,1312 +24,333 @@
 #include <asm/tlbflush.h>
 #include <asm/mmu_context.h>
 
-unsigned long *crst_table_alloc(struct mm_struct *mm)
+static inline pte_t ptep_flush_direct(struct mm_struct *mm,
+				      unsigned long addr, pte_t *ptep)
 {
-	struct page *page = alloc_pages(GFP_KERNEL, 2);
+	int active, count;
+	pte_t old;
 
-	if (!page)
-		return NULL;
-	return (unsigned long *) page_to_phys(page);
-}
-
-void crst_table_free(struct mm_struct *mm, unsigned long *table)
-{
-	free_pages((unsigned long) table, 2);
-}
-
-static void __crst_table_upgrade(void *arg)
-{
-	struct mm_struct *mm = arg;
-
-	if (current->active_mm == mm) {
-		clear_user_asce();
-		set_user_asce(mm);
-	}
-	__tlb_flush_local();
-}
-
-int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
-{
-	unsigned long *table, *pgd;
-	unsigned long entry;
-	int flush;
-
-	BUG_ON(limit > TASK_MAX_SIZE);
-	flush = 0;
-repeat:
-	table = crst_table_alloc(mm);
-	if (!table)
-		return -ENOMEM;
-	spin_lock_bh(&mm->page_table_lock);
-	if (mm->context.asce_limit < limit) {
-		pgd = (unsigned long *) mm->pgd;
-		if (mm->context.asce_limit <= (1UL << 31)) {
-			entry = _REGION3_ENTRY_EMPTY;
-			mm->context.asce_limit = 1UL << 42;
-			mm->context.asce_bits = _ASCE_TABLE_LENGTH |
-						_ASCE_USER_BITS |
-						_ASCE_TYPE_REGION3;
-		} else {
-			entry = _REGION2_ENTRY_EMPTY;
-			mm->context.asce_limit = 1UL << 53;
-			mm->context.asce_bits = _ASCE_TABLE_LENGTH |
-						_ASCE_USER_BITS |
-						_ASCE_TYPE_REGION2;
-		}
-		crst_table_init(table, entry);
-		pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
-		mm->pgd = (pgd_t *) table;
-		mm->task_size = mm->context.asce_limit;
-		table = NULL;
-		flush = 1;
-	}
-	spin_unlock_bh(&mm->page_table_lock);
-	if (table)
-		crst_table_free(mm, table);
-	if (mm->context.asce_limit < limit)
-		goto repeat;
-	if (flush)
-		on_each_cpu(__crst_table_upgrade, mm, 0);
-	return 0;
-}
-
-void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
-{
-	pgd_t *pgd;
-
-	if (current->active_mm == mm) {
-		clear_user_asce();
-		__tlb_flush_mm(mm);
-	}
-	while (mm->context.asce_limit > limit) {
-		pgd = mm->pgd;
-		switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
-		case _REGION_ENTRY_TYPE_R2:
-			mm->context.asce_limit = 1UL << 42;
-			mm->context.asce_bits = _ASCE_TABLE_LENGTH |
-						_ASCE_USER_BITS |
-						_ASCE_TYPE_REGION3;
-			break;
-		case _REGION_ENTRY_TYPE_R3:
-			mm->context.asce_limit = 1UL << 31;
-			mm->context.asce_bits = _ASCE_TABLE_LENGTH |
-						_ASCE_USER_BITS |
-						_ASCE_TYPE_SEGMENT;
-			break;
-		default:
-			BUG();
-		}
-		mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
-		mm->task_size = mm->context.asce_limit;
-		crst_table_free(mm, (unsigned long *) pgd);
-	}
-	if (current->active_mm == mm)
-		set_user_asce(mm);
-}
-
-#ifdef CONFIG_PGSTE
-
-/**
- * gmap_alloc - allocate a guest address space
- * @mm: pointer to the parent mm_struct
- * @limit: maximum address of the gmap address space
- *
- * Returns a guest address space structure.
- */
-struct gmap *gmap_alloc(struct mm_struct *mm, unsigned long limit)
-{
-	struct gmap *gmap;
-	struct page *page;
-	unsigned long *table;
-	unsigned long etype, atype;
-
-	if (limit < (1UL << 31)) {
-		limit = (1UL << 31) - 1;
-		atype = _ASCE_TYPE_SEGMENT;
-		etype = _SEGMENT_ENTRY_EMPTY;
-	} else if (limit < (1UL << 42)) {
-		limit = (1UL << 42) - 1;
-		atype = _ASCE_TYPE_REGION3;
-		etype = _REGION3_ENTRY_EMPTY;
-	} else if (limit < (1UL << 53)) {
-		limit = (1UL << 53) - 1;
-		atype = _ASCE_TYPE_REGION2;
-		etype = _REGION2_ENTRY_EMPTY;
-	} else {
-		limit = -1UL;
-		atype = _ASCE_TYPE_REGION1;
-		etype = _REGION1_ENTRY_EMPTY;
-	}
-	gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL);
-	if (!gmap)
-		goto out;
-	INIT_LIST_HEAD(&gmap->crst_list);
-	INIT_RADIX_TREE(&gmap->guest_to_host, GFP_KERNEL);
-	INIT_RADIX_TREE(&gmap->host_to_guest, GFP_ATOMIC);
-	spin_lock_init(&gmap->guest_table_lock);
-	gmap->mm = mm;
-	page = alloc_pages(GFP_KERNEL, 2);
-	if (!page)
-		goto out_free;
-	page->index = 0;
-	list_add(&page->lru, &gmap->crst_list);
-	table = (unsigned long *) page_to_phys(page);
-	crst_table_init(table, etype);
-	gmap->table = table;
-	gmap->asce = atype | _ASCE_TABLE_LENGTH |
-		_ASCE_USER_BITS | __pa(table);
-	gmap->asce_end = limit;
-	down_write(&mm->mmap_sem);
-	list_add(&gmap->list, &mm->context.gmap_list);
-	up_write(&mm->mmap_sem);
-	return gmap;
-
-out_free:
-	kfree(gmap);
-out:
-	return NULL;
-}
-EXPORT_SYMBOL_GPL(gmap_alloc);
-
-static void gmap_flush_tlb(struct gmap *gmap)
-{
-	if (MACHINE_HAS_IDTE)
-		__tlb_flush_asce(gmap->mm, gmap->asce);
+	old = *ptep;
+	if (unlikely(pte_val(old) & _PAGE_INVALID))
+		return old;
+	active = (mm == current->active_mm) ? 1 : 0;
+	count = atomic_add_return(0x10000, &mm->context.attach_count);
+	if (MACHINE_HAS_TLB_LC && (count & 0xffff) <= active &&
+	    cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
+		__ptep_ipte_local(addr, ptep);
 	else
-		__tlb_flush_global();
+		__ptep_ipte(addr, ptep);
+	atomic_sub(0x10000, &mm->context.attach_count);
+	return old;
 }
 
-static void gmap_radix_tree_free(struct radix_tree_root *root)
+static inline pte_t ptep_flush_lazy(struct mm_struct *mm,
+				    unsigned long addr, pte_t *ptep)
 {
-	struct radix_tree_iter iter;
-	unsigned long indices[16];
-	unsigned long index;
-	void **slot;
-	int i, nr;
+	int active, count;
+	pte_t old;
 
-	/* A radix tree is freed by deleting all of its entries */
-	index = 0;
-	do {
-		nr = 0;
-		radix_tree_for_each_slot(slot, root, &iter, index) {
-			indices[nr] = iter.index;
-			if (++nr == 16)
-				break;
-		}
-		for (i = 0; i < nr; i++) {
-			index = indices[i];
-			radix_tree_delete(root, index);
-		}
-	} while (nr > 0);
-}
-
-/**
- * gmap_free - free a guest address space
- * @gmap: pointer to the guest address space structure
- */
-void gmap_free(struct gmap *gmap)
-{
-	struct page *page, *next;
-
-	/* Flush tlb. */
-	if (MACHINE_HAS_IDTE)
-		__tlb_flush_asce(gmap->mm, gmap->asce);
-	else
-		__tlb_flush_global();
-
-	/* Free all segment & region tables. */
-	list_for_each_entry_safe(page, next, &gmap->crst_list, lru)
-		__free_pages(page, 2);
-	gmap_radix_tree_free(&gmap->guest_to_host);
-	gmap_radix_tree_free(&gmap->host_to_guest);
-	down_write(&gmap->mm->mmap_sem);
-	list_del(&gmap->list);
-	up_write(&gmap->mm->mmap_sem);
-	kfree(gmap);
-}
-EXPORT_SYMBOL_GPL(gmap_free);
-
-/**
- * gmap_enable - switch primary space to the guest address space
- * @gmap: pointer to the guest address space structure
- */
-void gmap_enable(struct gmap *gmap)
-{
-	S390_lowcore.gmap = (unsigned long) gmap;
-}
-EXPORT_SYMBOL_GPL(gmap_enable);
-
-/**
- * gmap_disable - switch back to the standard primary address space
- * @gmap: pointer to the guest address space structure
- */
-void gmap_disable(struct gmap *gmap)
-{
-	S390_lowcore.gmap = 0UL;
-}
-EXPORT_SYMBOL_GPL(gmap_disable);
-
-/*
- * gmap_alloc_table is assumed to be called with mmap_sem held
- */
-static int gmap_alloc_table(struct gmap *gmap, unsigned long *table,
-			    unsigned long init, unsigned long gaddr)
-{
-	struct page *page;
-	unsigned long *new;
-
-	/* since we dont free the gmap table until gmap_free we can unlock */
-	page = alloc_pages(GFP_KERNEL, 2);
-	if (!page)
-		return -ENOMEM;
-	new = (unsigned long *) page_to_phys(page);
-	crst_table_init(new, init);
-	spin_lock(&gmap->mm->page_table_lock);
-	if (*table & _REGION_ENTRY_INVALID) {
-		list_add(&page->lru, &gmap->crst_list);
-		*table = (unsigned long) new | _REGION_ENTRY_LENGTH |
-			(*table & _REGION_ENTRY_TYPE_MASK);
-		page->index = gaddr;
-		page = NULL;
-	}
-	spin_unlock(&gmap->mm->page_table_lock);
-	if (page)
-		__free_pages(page, 2);
-	return 0;
-}
-
-/**
- * __gmap_segment_gaddr - find virtual address from segment pointer
- * @entry: pointer to a segment table entry in the guest address space
- *
- * Returns the virtual address in the guest address space for the segment
- */
-static unsigned long __gmap_segment_gaddr(unsigned long *entry)
-{
-	struct page *page;
-	unsigned long offset, mask;
-
-	offset = (unsigned long) entry / sizeof(unsigned long);
-	offset = (offset & (PTRS_PER_PMD - 1)) * PMD_SIZE;
-	mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
-	page = virt_to_page((void *)((unsigned long) entry & mask));
-	return page->index + offset;
-}
-
-/**
- * __gmap_unlink_by_vmaddr - unlink a single segment via a host address
- * @gmap: pointer to the guest address space structure
- * @vmaddr: address in the host process address space
- *
- * Returns 1 if a TLB flush is required
- */
-static int __gmap_unlink_by_vmaddr(struct gmap *gmap, unsigned long vmaddr)
-{
-	unsigned long *entry;
-	int flush = 0;
-
-	spin_lock(&gmap->guest_table_lock);
-	entry = radix_tree_delete(&gmap->host_to_guest, vmaddr >> PMD_SHIFT);
-	if (entry) {
-		flush = (*entry != _SEGMENT_ENTRY_INVALID);
-		*entry = _SEGMENT_ENTRY_INVALID;
-	}
-	spin_unlock(&gmap->guest_table_lock);
-	return flush;
-}
-
-/**
- * __gmap_unmap_by_gaddr - unmap a single segment via a guest address
- * @gmap: pointer to the guest address space structure
- * @gaddr: address in the guest address space
- *
- * Returns 1 if a TLB flush is required
- */
-static int __gmap_unmap_by_gaddr(struct gmap *gmap, unsigned long gaddr)
-{
-	unsigned long vmaddr;
-
-	vmaddr = (unsigned long) radix_tree_delete(&gmap->guest_to_host,
-						   gaddr >> PMD_SHIFT);
-	return vmaddr ? __gmap_unlink_by_vmaddr(gmap, vmaddr) : 0;
-}
-
-/**
- * gmap_unmap_segment - unmap segment from the guest address space
- * @gmap: pointer to the guest address space structure
- * @to: address in the guest address space
- * @len: length of the memory area to unmap
- *
- * Returns 0 if the unmap succeeded, -EINVAL if not.
- */
-int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
-{
-	unsigned long off;
-	int flush;
-
-	if ((to | len) & (PMD_SIZE - 1))
-		return -EINVAL;
-	if (len == 0 || to + len < to)
-		return -EINVAL;
-
-	flush = 0;
-	down_write(&gmap->mm->mmap_sem);
-	for (off = 0; off < len; off += PMD_SIZE)
-		flush |= __gmap_unmap_by_gaddr(gmap, to + off);
-	up_write(&gmap->mm->mmap_sem);
-	if (flush)
-		gmap_flush_tlb(gmap);
-	return 0;
-}
-EXPORT_SYMBOL_GPL(gmap_unmap_segment);
-
-/**
- * gmap_mmap_segment - map a segment to the guest address space
- * @gmap: pointer to the guest address space structure
- * @from: source address in the parent address space
- * @to: target address in the guest address space
- * @len: length of the memory area to map
- *
- * Returns 0 if the mmap succeeded, -EINVAL or -ENOMEM if not.
- */
-int gmap_map_segment(struct gmap *gmap, unsigned long from,
-		     unsigned long to, unsigned long len)
-{
-	unsigned long off;
-	int flush;
-
-	if ((from | to | len) & (PMD_SIZE - 1))
-		return -EINVAL;
-	if (len == 0 || from + len < from || to + len < to ||
-	    from + len - 1 > TASK_MAX_SIZE || to + len - 1 > gmap->asce_end)
-		return -EINVAL;
-
-	flush = 0;
-	down_write(&gmap->mm->mmap_sem);
-	for (off = 0; off < len; off += PMD_SIZE) {
-		/* Remove old translation */
-		flush |= __gmap_unmap_by_gaddr(gmap, to + off);
-		/* Store new translation */
-		if (radix_tree_insert(&gmap->guest_to_host,
-				      (to + off) >> PMD_SHIFT,
-				      (void *) from + off))
-			break;
-	}
-	up_write(&gmap->mm->mmap_sem);
-	if (flush)
-		gmap_flush_tlb(gmap);
-	if (off >= len)
-		return 0;
-	gmap_unmap_segment(gmap, to, len);
-	return -ENOMEM;
-}
-EXPORT_SYMBOL_GPL(gmap_map_segment);
-
-/**
- * __gmap_translate - translate a guest address to a user space address
- * @gmap: pointer to guest mapping meta data structure
- * @gaddr: guest address
- *
- * Returns user space address which corresponds to the guest address or
- * -EFAULT if no such mapping exists.
- * This function does not establish potentially missing page table entries.
- * The mmap_sem of the mm that belongs to the address space must be held
- * when this function gets called.
- */
-unsigned long __gmap_translate(struct gmap *gmap, unsigned long gaddr)
-{
-	unsigned long vmaddr;
-
-	vmaddr = (unsigned long)
-		radix_tree_lookup(&gmap->guest_to_host, gaddr >> PMD_SHIFT);
-	return vmaddr ? (vmaddr | (gaddr & ~PMD_MASK)) : -EFAULT;
-}
-EXPORT_SYMBOL_GPL(__gmap_translate);
-
-/**
- * gmap_translate - translate a guest address to a user space address
- * @gmap: pointer to guest mapping meta data structure
- * @gaddr: guest address
- *
- * Returns user space address which corresponds to the guest address or
- * -EFAULT if no such mapping exists.
- * This function does not establish potentially missing page table entries.
- */
-unsigned long gmap_translate(struct gmap *gmap, unsigned long gaddr)
-{
-	unsigned long rc;
-
-	down_read(&gmap->mm->mmap_sem);
-	rc = __gmap_translate(gmap, gaddr);
-	up_read(&gmap->mm->mmap_sem);
-	return rc;
-}
-EXPORT_SYMBOL_GPL(gmap_translate);
-
-/**
- * gmap_unlink - disconnect a page table from the gmap shadow tables
- * @gmap: pointer to guest mapping meta data structure
- * @table: pointer to the host page table
- * @vmaddr: vm address associated with the host page table
- */
-static void gmap_unlink(struct mm_struct *mm, unsigned long *table,
-			unsigned long vmaddr)
-{
-	struct gmap *gmap;
-	int flush;
-
-	list_for_each_entry(gmap, &mm->context.gmap_list, list) {
-		flush = __gmap_unlink_by_vmaddr(gmap, vmaddr);
-		if (flush)
-			gmap_flush_tlb(gmap);
-	}
-}
-
-/**
- * gmap_link - set up shadow page tables to connect a host to a guest address
- * @gmap: pointer to guest mapping meta data structure
- * @gaddr: guest address
- * @vmaddr: vm address
- *
- * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
- * if the vm address is already mapped to a different guest segment.
- * The mmap_sem of the mm that belongs to the address space must be held
- * when this function gets called.
- */
-int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
-{
-	struct mm_struct *mm;
-	unsigned long *table;
-	spinlock_t *ptl;
-	pgd_t *pgd;
-	pud_t *pud;
-	pmd_t *pmd;
-	int rc;
-
-	/* Create higher level tables in the gmap page table */
-	table = gmap->table;
-	if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION1) {
-		table += (gaddr >> 53) & 0x7ff;
-		if ((*table & _REGION_ENTRY_INVALID) &&
-		    gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY,
-				     gaddr & 0xffe0000000000000UL))
-			return -ENOMEM;
-		table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
-	}
-	if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION2) {
-		table += (gaddr >> 42) & 0x7ff;
-		if ((*table & _REGION_ENTRY_INVALID) &&
-		    gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY,
-				     gaddr & 0xfffffc0000000000UL))
-			return -ENOMEM;
-		table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
-	}
-	if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION3) {
-		table += (gaddr >> 31) & 0x7ff;
-		if ((*table & _REGION_ENTRY_INVALID) &&
-		    gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY,
-				     gaddr & 0xffffffff80000000UL))
-			return -ENOMEM;
-		table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
-	}
-	table += (gaddr >> 20) & 0x7ff;
-	/* Walk the parent mm page table */
-	mm = gmap->mm;
-	pgd = pgd_offset(mm, vmaddr);
-	VM_BUG_ON(pgd_none(*pgd));
-	pud = pud_offset(pgd, vmaddr);
-	VM_BUG_ON(pud_none(*pud));
-	pmd = pmd_offset(pud, vmaddr);
-	VM_BUG_ON(pmd_none(*pmd));
-	/* large pmds cannot yet be handled */
-	if (pmd_large(*pmd))
-		return -EFAULT;
-	/* Link gmap segment table entry location to page table. */
-	rc = radix_tree_preload(GFP_KERNEL);
-	if (rc)
-		return rc;
-	ptl = pmd_lock(mm, pmd);
-	spin_lock(&gmap->guest_table_lock);
-	if (*table == _SEGMENT_ENTRY_INVALID) {
-		rc = radix_tree_insert(&gmap->host_to_guest,
-				       vmaddr >> PMD_SHIFT, table);
-		if (!rc)
-			*table = pmd_val(*pmd);
+	old = *ptep;
+	if (unlikely(pte_val(old) & _PAGE_INVALID))
+		return old;
+	active = (mm == current->active_mm) ? 1 : 0;
+	count = atomic_add_return(0x10000, &mm->context.attach_count);
+	if ((count & 0xffff) <= active) {
+		pte_val(*ptep) |= _PAGE_INVALID;
+		mm->context.flush_mm = 1;
 	} else
-		rc = 0;
-	spin_unlock(&gmap->guest_table_lock);
-	spin_unlock(ptl);
-	radix_tree_preload_end();
-	return rc;
+		__ptep_ipte(addr, ptep);
+	atomic_sub(0x10000, &mm->context.attach_count);
+	return old;
 }
 
-/**
- * gmap_fault - resolve a fault on a guest address
- * @gmap: pointer to guest mapping meta data structure
- * @gaddr: guest address
- * @fault_flags: flags to pass down to handle_mm_fault()
- *
- * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
- * if the vm address is already mapped to a different guest segment.
- */
-int gmap_fault(struct gmap *gmap, unsigned long gaddr,
-	       unsigned int fault_flags)
+static inline pgste_t pgste_get_lock(pte_t *ptep)
 {
-	unsigned long vmaddr;
-	int rc;
-	bool unlocked;
+	unsigned long new = 0;
+#ifdef CONFIG_PGSTE
+	unsigned long old;
 
-	down_read(&gmap->mm->mmap_sem);
+	preempt_disable();
+	asm(
+		"	lg	%0,%2\n"
+		"0:	lgr	%1,%0\n"
+		"	nihh	%0,0xff7f\n"	/* clear PCL bit in old */
+		"	oihh	%1,0x0080\n"	/* set PCL bit in new */
+		"	csg	%0,%1,%2\n"
+		"	jl	0b\n"
+		: "=&d" (old), "=&d" (new), "=Q" (ptep[PTRS_PER_PTE])
+		: "Q" (ptep[PTRS_PER_PTE]) : "cc", "memory");
+#endif
+	return __pgste(new);
+}
 
-retry:
-	unlocked = false;
-	vmaddr = __gmap_translate(gmap, gaddr);
-	if (IS_ERR_VALUE(vmaddr)) {
-		rc = vmaddr;
-		goto out_up;
-	}
-	if (fixup_user_fault(current, gmap->mm, vmaddr, fault_flags,
-			     &unlocked)) {
-		rc = -EFAULT;
-		goto out_up;
-	}
+static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste)
+{
+#ifdef CONFIG_PGSTE
+	asm(
+		"	nihh	%1,0xff7f\n"	/* clear PCL bit */
+		"	stg	%1,%0\n"
+		: "=Q" (ptep[PTRS_PER_PTE])
+		: "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE])
+		: "cc", "memory");
+	preempt_enable();
+#endif
+}
+
+static inline pgste_t pgste_get(pte_t *ptep)
+{
+	unsigned long pgste = 0;
+#ifdef CONFIG_PGSTE
+	pgste = *(unsigned long *)(ptep + PTRS_PER_PTE);
+#endif
+	return __pgste(pgste);
+}
+
+static inline void pgste_set(pte_t *ptep, pgste_t pgste)
+{
+#ifdef CONFIG_PGSTE
+	*(pgste_t *)(ptep + PTRS_PER_PTE) = pgste;
+#endif
+}
+
+static inline pgste_t pgste_update_all(pte_t pte, pgste_t pgste,
+				       struct mm_struct *mm)
+{
+#ifdef CONFIG_PGSTE
+	unsigned long address, bits, skey;
+
+	if (!mm_use_skey(mm) || pte_val(pte) & _PAGE_INVALID)
+		return pgste;
+	address = pte_val(pte) & PAGE_MASK;
+	skey = (unsigned long) page_get_storage_key(address);
+	bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
+	/* Transfer page changed & referenced bit to guest bits in pgste */
+	pgste_val(pgste) |= bits << 48;		/* GR bit & GC bit */
+	/* Copy page access key and fetch protection bit to pgste */
+	pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT);
+	pgste_val(pgste) |= (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
+#endif
+	return pgste;
+
+}
+
+static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry,
+				 struct mm_struct *mm)
+{
+#ifdef CONFIG_PGSTE
+	unsigned long address;
+	unsigned long nkey;
+
+	if (!mm_use_skey(mm) || pte_val(entry) & _PAGE_INVALID)
+		return;
+	VM_BUG_ON(!(pte_val(*ptep) & _PAGE_INVALID));
+	address = pte_val(entry) & PAGE_MASK;
 	/*
-	 * In the case that fixup_user_fault unlocked the mmap_sem during
-	 * faultin redo __gmap_translate to not race with a map/unmap_segment.
+	 * Set page access key and fetch protection bit from pgste.
+	 * The guest C/R information is still in the PGSTE, set real
+	 * key C/R to 0.
 	 */
-	if (unlocked)
-		goto retry;
-
-	rc = __gmap_link(gmap, gaddr, vmaddr);
-out_up:
-	up_read(&gmap->mm->mmap_sem);
-	return rc;
-}
-EXPORT_SYMBOL_GPL(gmap_fault);
-
-static void gmap_zap_swap_entry(swp_entry_t entry, struct mm_struct *mm)
-{
-	if (!non_swap_entry(entry))
-		dec_mm_counter(mm, MM_SWAPENTS);
-	else if (is_migration_entry(entry)) {
-		struct page *page = migration_entry_to_page(entry);
-
-		dec_mm_counter(mm, mm_counter(page));
-	}
-	free_swap_and_cache(entry);
+	nkey = (pgste_val(pgste) & (PGSTE_ACC_BITS | PGSTE_FP_BIT)) >> 56;
+	nkey |= (pgste_val(pgste) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 48;
+	page_set_storage_key(address, nkey, 0);
+#endif
 }
 
-/*
- * this function is assumed to be called with mmap_sem held
- */
-void __gmap_zap(struct gmap *gmap, unsigned long gaddr)
+static inline pgste_t pgste_set_pte(pte_t *ptep, pgste_t pgste, pte_t entry)
 {
-	unsigned long vmaddr, ptev, pgstev;
-	pte_t *ptep, pte;
-	spinlock_t *ptl;
-	pgste_t pgste;
-
-	/* Find the vm address for the guest address */
-	vmaddr = (unsigned long) radix_tree_lookup(&gmap->guest_to_host,
-						   gaddr >> PMD_SHIFT);
-	if (!vmaddr)
-		return;
-	vmaddr |= gaddr & ~PMD_MASK;
-	/* Get pointer to the page table entry */
-	ptep = get_locked_pte(gmap->mm, vmaddr, &ptl);
-	if (unlikely(!ptep))
-		return;
-	pte = *ptep;
-	if (!pte_swap(pte))
-		goto out_pte;
-	/* Zap unused and logically-zero pages */
-	pgste = pgste_get_lock(ptep);
-	pgstev = pgste_val(pgste);
-	ptev = pte_val(pte);
-	if (((pgstev & _PGSTE_GPS_USAGE_MASK) == _PGSTE_GPS_USAGE_UNUSED) ||
-	    ((pgstev & _PGSTE_GPS_ZERO) && (ptev & _PAGE_INVALID))) {
-		gmap_zap_swap_entry(pte_to_swp_entry(pte), gmap->mm);
-		pte_clear(gmap->mm, vmaddr, ptep);
-	}
-	pgste_set_unlock(ptep, pgste);
-out_pte:
-	pte_unmap_unlock(ptep, ptl);
-}
-EXPORT_SYMBOL_GPL(__gmap_zap);
-
-void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to)
-{
-	unsigned long gaddr, vmaddr, size;
-	struct vm_area_struct *vma;
-
-	down_read(&gmap->mm->mmap_sem);
-	for (gaddr = from; gaddr < to;
-	     gaddr = (gaddr + PMD_SIZE) & PMD_MASK) {
-		/* Find the vm address for the guest address */
-		vmaddr = (unsigned long)
-			radix_tree_lookup(&gmap->guest_to_host,
-					  gaddr >> PMD_SHIFT);
-		if (!vmaddr)
-			continue;
-		vmaddr |= gaddr & ~PMD_MASK;
-		/* Find vma in the parent mm */
-		vma = find_vma(gmap->mm, vmaddr);
-		size = min(to - gaddr, PMD_SIZE - (gaddr & ~PMD_MASK));
-		zap_page_range(vma, vmaddr, size, NULL);
-	}
-	up_read(&gmap->mm->mmap_sem);
-}
-EXPORT_SYMBOL_GPL(gmap_discard);
-
-static LIST_HEAD(gmap_notifier_list);
-static DEFINE_SPINLOCK(gmap_notifier_lock);
-
-/**
- * gmap_register_ipte_notifier - register a pte invalidation callback
- * @nb: pointer to the gmap notifier block
- */
-void gmap_register_ipte_notifier(struct gmap_notifier *nb)
-{
-	spin_lock(&gmap_notifier_lock);
-	list_add(&nb->list, &gmap_notifier_list);
-	spin_unlock(&gmap_notifier_lock);
-}
-EXPORT_SYMBOL_GPL(gmap_register_ipte_notifier);
-
-/**
- * gmap_unregister_ipte_notifier - remove a pte invalidation callback
- * @nb: pointer to the gmap notifier block
- */
-void gmap_unregister_ipte_notifier(struct gmap_notifier *nb)
-{
-	spin_lock(&gmap_notifier_lock);
-	list_del_init(&nb->list);
-	spin_unlock(&gmap_notifier_lock);
-}
-EXPORT_SYMBOL_GPL(gmap_unregister_ipte_notifier);
-
-/**
- * gmap_ipte_notify - mark a range of ptes for invalidation notification
- * @gmap: pointer to guest mapping meta data structure
- * @gaddr: virtual address in the guest address space
- * @len: size of area
- *
- * Returns 0 if for each page in the given range a gmap mapping exists and
- * the invalidation notification could be set. If the gmap mapping is missing
- * for one or more pages -EFAULT is returned. If no memory could be allocated
- * -ENOMEM is returned. This function establishes missing page table entries.
- */
-int gmap_ipte_notify(struct gmap *gmap, unsigned long gaddr, unsigned long len)
-{
-	unsigned long addr;
-	spinlock_t *ptl;
-	pte_t *ptep, entry;
-	pgste_t pgste;
-	bool unlocked;
-	int rc = 0;
-
-	if ((gaddr & ~PAGE_MASK) || (len & ~PAGE_MASK))
-		return -EINVAL;
-	down_read(&gmap->mm->mmap_sem);
-	while (len) {
-		unlocked = false;
-		/* Convert gmap address and connect the page tables */
-		addr = __gmap_translate(gmap, gaddr);
-		if (IS_ERR_VALUE(addr)) {
-			rc = addr;
-			break;
+#ifdef CONFIG_PGSTE
+	if ((pte_val(entry) & _PAGE_PRESENT) &&
+	    (pte_val(entry) & _PAGE_WRITE) &&
+	    !(pte_val(entry) & _PAGE_INVALID)) {
+		if (!MACHINE_HAS_ESOP) {
+			/*
+			 * Without enhanced suppression-on-protection force
+			 * the dirty bit on for all writable ptes.
+			 */
+			pte_val(entry) |= _PAGE_DIRTY;
+			pte_val(entry) &= ~_PAGE_PROTECT;
 		}
-		/* Get the page mapped */
-		if (fixup_user_fault(current, gmap->mm, addr, FAULT_FLAG_WRITE,
-				     &unlocked)) {
-			rc = -EFAULT;
-			break;
-		}
-		/* While trying to map mmap_sem got unlocked. Let us retry */
-		if (unlocked)
-			continue;
-		rc = __gmap_link(gmap, gaddr, addr);
-		if (rc)
-			break;
-		/* Walk the process page table, lock and get pte pointer */
-		ptep = get_locked_pte(gmap->mm, addr, &ptl);
-		VM_BUG_ON(!ptep);
-		/* Set notification bit in the pgste of the pte */
-		entry = *ptep;
-		if ((pte_val(entry) & (_PAGE_INVALID | _PAGE_PROTECT)) == 0) {
-			pgste = pgste_get_lock(ptep);
-			pgste_val(pgste) |= PGSTE_IN_BIT;
-			pgste_set_unlock(ptep, pgste);
-			gaddr += PAGE_SIZE;
-			len -= PAGE_SIZE;
-		}
-		pte_unmap_unlock(ptep, ptl);
+		if (!(pte_val(entry) & _PAGE_PROTECT))
+			/* This pte allows write access, set user-dirty */
+			pgste_val(pgste) |= PGSTE_UC_BIT;
 	}
-	up_read(&gmap->mm->mmap_sem);
-	return rc;
+#endif
+	*ptep = entry;
+	return pgste;
 }
-EXPORT_SYMBOL_GPL(gmap_ipte_notify);
 
-/**
- * gmap_do_ipte_notify - call all invalidation callbacks for a specific pte.
- * @mm: pointer to the process mm_struct
- * @addr: virtual address in the process address space
- * @pte: pointer to the page table entry
- *
- * This function is assumed to be called with the page table lock held
- * for the pte to notify.
- */
-void gmap_do_ipte_notify(struct mm_struct *mm, unsigned long vmaddr, pte_t *pte)
+static inline pgste_t pgste_ipte_notify(struct mm_struct *mm,
+					unsigned long addr,
+					pte_t *ptep, pgste_t pgste)
 {
-	unsigned long offset, gaddr;
-	unsigned long *table;
-	struct gmap_notifier *nb;
-	struct gmap *gmap;
-
-	offset = ((unsigned long) pte) & (255 * sizeof(pte_t));
-	offset = offset * (4096 / sizeof(pte_t));
-	spin_lock(&gmap_notifier_lock);
-	list_for_each_entry(gmap, &mm->context.gmap_list, list) {
-		table = radix_tree_lookup(&gmap->host_to_guest,
-					  vmaddr >> PMD_SHIFT);
-		if (!table)
-			continue;
-		gaddr = __gmap_segment_gaddr(table) + offset;
-		list_for_each_entry(nb, &gmap_notifier_list, list)
-			nb->notifier_call(gmap, gaddr);
+#ifdef CONFIG_PGSTE
+	if (pgste_val(pgste) & PGSTE_IN_BIT) {
+		pgste_val(pgste) &= ~PGSTE_IN_BIT;
+		ptep_notify(mm, addr, ptep);
 	}
-	spin_unlock(&gmap_notifier_lock);
+#endif
+	return pgste;
 }
-EXPORT_SYMBOL_GPL(gmap_do_ipte_notify);
 
-int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
-			  unsigned long key, bool nq)
+static inline pgste_t ptep_xchg_start(struct mm_struct *mm,
+				      unsigned long addr, pte_t *ptep)
 {
-	spinlock_t *ptl;
-	pgste_t old, new;
-	pte_t *ptep;
-	bool unlocked;
+	pgste_t pgste = __pgste(0);
 
-	down_read(&mm->mmap_sem);
-retry:
-	unlocked = false;
-	ptep = get_locked_pte(mm, addr, &ptl);
-	if (unlikely(!ptep)) {
-		up_read(&mm->mmap_sem);
-		return -EFAULT;
+	if (mm_has_pgste(mm)) {
+		pgste = pgste_get_lock(ptep);
+		pgste = pgste_ipte_notify(mm, addr, ptep, pgste);
 	}
-	if (!(pte_val(*ptep) & _PAGE_INVALID) &&
-	     (pte_val(*ptep) & _PAGE_PROTECT)) {
-		pte_unmap_unlock(ptep, ptl);
-		/*
-		 * We do not really care about unlocked. We will retry either
-		 * way. But this allows fixup_user_fault to enable userfaultfd.
-		 */
-		if (fixup_user_fault(current, mm, addr, FAULT_FLAG_WRITE,
-				     &unlocked)) {
-			up_read(&mm->mmap_sem);
-			return -EFAULT;
+	return pgste;
+}
+
+static inline void ptep_xchg_commit(struct mm_struct *mm,
+				    unsigned long addr, pte_t *ptep,
+				    pgste_t pgste, pte_t old, pte_t new)
+{
+	if (mm_has_pgste(mm)) {
+		if (pte_val(old) & _PAGE_INVALID)
+			pgste_set_key(ptep, pgste, new, mm);
+		if (pte_val(new) & _PAGE_INVALID) {
+			pgste = pgste_update_all(old, pgste, mm);
+			if ((pgste_val(pgste) & _PGSTE_GPS_USAGE_MASK) ==
+			    _PGSTE_GPS_USAGE_UNUSED)
+				pte_val(old) |= _PAGE_UNUSED;
 		}
-		goto retry;
-	}
-
-	new = old = pgste_get_lock(ptep);
-	pgste_val(new) &= ~(PGSTE_GR_BIT | PGSTE_GC_BIT |
-			    PGSTE_ACC_BITS | PGSTE_FP_BIT);
-	pgste_val(new) |= (key & (_PAGE_CHANGED | _PAGE_REFERENCED)) << 48;
-	pgste_val(new) |= (key & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
-	if (!(pte_val(*ptep) & _PAGE_INVALID)) {
-		unsigned long address, bits, skey;
-
-		address = pte_val(*ptep) & PAGE_MASK;
-		skey = (unsigned long) page_get_storage_key(address);
-		bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
-		skey = key & (_PAGE_ACC_BITS | _PAGE_FP_BIT);
-		/* Set storage key ACC and FP */
-		page_set_storage_key(address, skey, !nq);
-		/* Merge host changed & referenced into pgste  */
-		pgste_val(new) |= bits << 52;
-	}
-	/* changing the guest storage key is considered a change of the page */
-	if ((pgste_val(new) ^ pgste_val(old)) &
-	    (PGSTE_ACC_BITS | PGSTE_FP_BIT | PGSTE_GR_BIT | PGSTE_GC_BIT))
-		pgste_val(new) |= PGSTE_UC_BIT;
-
-	pgste_set_unlock(ptep, new);
-	pte_unmap_unlock(ptep, ptl);
-	up_read(&mm->mmap_sem);
-	return 0;
-}
-EXPORT_SYMBOL(set_guest_storage_key);
-
-unsigned long get_guest_storage_key(struct mm_struct *mm, unsigned long addr)
-{
-	spinlock_t *ptl;
-	pgste_t pgste;
-	pte_t *ptep;
-	uint64_t physaddr;
-	unsigned long key = 0;
-
-	down_read(&mm->mmap_sem);
-	ptep = get_locked_pte(mm, addr, &ptl);
-	if (unlikely(!ptep)) {
-		up_read(&mm->mmap_sem);
-		return -EFAULT;
-	}
-	pgste = pgste_get_lock(ptep);
-
-	if (pte_val(*ptep) & _PAGE_INVALID) {
-		key |= (pgste_val(pgste) & PGSTE_ACC_BITS) >> 56;
-		key |= (pgste_val(pgste) & PGSTE_FP_BIT) >> 56;
-		key |= (pgste_val(pgste) & PGSTE_GR_BIT) >> 48;
-		key |= (pgste_val(pgste) & PGSTE_GC_BIT) >> 48;
+		pgste = pgste_set_pte(ptep, pgste, new);
+		pgste_set_unlock(ptep, pgste);
 	} else {
-		physaddr = pte_val(*ptep) & PAGE_MASK;
-		key = page_get_storage_key(physaddr);
-
-		/* Reflect guest's logical view, not physical */
-		if (pgste_val(pgste) & PGSTE_GR_BIT)
-			key |= _PAGE_REFERENCED;
-		if (pgste_val(pgste) & PGSTE_GC_BIT)
-			key |= _PAGE_CHANGED;
+		*ptep = new;
 	}
-
-	pgste_set_unlock(ptep, pgste);
-	pte_unmap_unlock(ptep, ptl);
-	up_read(&mm->mmap_sem);
-	return key;
-}
-EXPORT_SYMBOL(get_guest_storage_key);
-
-static int page_table_allocate_pgste_min = 0;
-static int page_table_allocate_pgste_max = 1;
-int page_table_allocate_pgste = 0;
-EXPORT_SYMBOL(page_table_allocate_pgste);
-
-static struct ctl_table page_table_sysctl[] = {
-	{
-		.procname	= "allocate_pgste",
-		.data		= &page_table_allocate_pgste,
-		.maxlen		= sizeof(int),
-		.mode		= S_IRUGO | S_IWUSR,
-		.proc_handler	= proc_dointvec,
-		.extra1		= &page_table_allocate_pgste_min,
-		.extra2		= &page_table_allocate_pgste_max,
-	},
-	{ }
-};
-
-static struct ctl_table page_table_sysctl_dir[] = {
-	{
-		.procname	= "vm",
-		.maxlen		= 0,
-		.mode		= 0555,
-		.child		= page_table_sysctl,
-	},
-	{ }
-};
-
-static int __init page_table_register_sysctl(void)
-{
-	return register_sysctl_table(page_table_sysctl_dir) ? 0 : -ENOMEM;
-}
-__initcall(page_table_register_sysctl);
-
-#else /* CONFIG_PGSTE */
-
-static inline void gmap_unlink(struct mm_struct *mm, unsigned long *table,
-			unsigned long vmaddr)
-{
 }
 
-#endif /* CONFIG_PGSTE */
-
-static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
+pte_t ptep_xchg_direct(struct mm_struct *mm, unsigned long addr,
+		       pte_t *ptep, pte_t new)
 {
-	unsigned int old, new;
+	pgste_t pgste;
+	pte_t old;
 
-	do {
-		old = atomic_read(v);
-		new = old ^ bits;
-	} while (atomic_cmpxchg(v, old, new) != old);
-	return new;
+	pgste = ptep_xchg_start(mm, addr, ptep);
+	old = ptep_flush_direct(mm, addr, ptep);
+	ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
+	return old;
 }
+EXPORT_SYMBOL(ptep_xchg_direct);
 
-/*
- * page table entry allocation/free routines.
- */
-unsigned long *page_table_alloc(struct mm_struct *mm)
+pte_t ptep_xchg_lazy(struct mm_struct *mm, unsigned long addr,
+		     pte_t *ptep, pte_t new)
 {
-	unsigned long *table;
-	struct page *page;
-	unsigned int mask, bit;
+	pgste_t pgste;
+	pte_t old;
 
-	/* Try to get a fragment of a 4K page as a 2K page table */
-	if (!mm_alloc_pgste(mm)) {
-		table = NULL;
-		spin_lock_bh(&mm->context.list_lock);
-		if (!list_empty(&mm->context.pgtable_list)) {
-			page = list_first_entry(&mm->context.pgtable_list,
-						struct page, lru);
-			mask = atomic_read(&page->_mapcount);
-			mask = (mask | (mask >> 4)) & 3;
-			if (mask != 3) {
-				table = (unsigned long *) page_to_phys(page);
-				bit = mask & 1;		/* =1 -> second 2K */
-				if (bit)
-					table += PTRS_PER_PTE;
-				atomic_xor_bits(&page->_mapcount, 1U << bit);
-				list_del(&page->lru);
-			}
-		}
-		spin_unlock_bh(&mm->context.list_lock);
-		if (table)
-			return table;
+	pgste = ptep_xchg_start(mm, addr, ptep);
+	old = ptep_flush_lazy(mm, addr, ptep);
+	ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
+	return old;
+}
+EXPORT_SYMBOL(ptep_xchg_lazy);
+
+pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr,
+			     pte_t *ptep)
+{
+	pgste_t pgste;
+	pte_t old;
+
+	pgste = ptep_xchg_start(mm, addr, ptep);
+	old = ptep_flush_lazy(mm, addr, ptep);
+	if (mm_has_pgste(mm)) {
+		pgste = pgste_update_all(old, pgste, mm);
+		pgste_set(ptep, pgste);
 	}
-	/* Allocate a fresh page */
-	page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
-	if (!page)
-		return NULL;
-	if (!pgtable_page_ctor(page)) {
-		__free_page(page);
-		return NULL;
-	}
-	/* Initialize page table */
-	table = (unsigned long *) page_to_phys(page);
-	if (mm_alloc_pgste(mm)) {
-		/* Return 4K page table with PGSTEs */
-		atomic_set(&page->_mapcount, 3);
-		clear_table(table, _PAGE_INVALID, PAGE_SIZE/2);
-		clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2);
+	return old;
+}
+EXPORT_SYMBOL(ptep_modify_prot_start);
+
+void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
+			     pte_t *ptep, pte_t pte)
+{
+	pgste_t pgste;
+
+	if (mm_has_pgste(mm)) {
+		pgste = pgste_get(ptep);
+		pgste_set_key(ptep, pgste, pte, mm);
+		pgste = pgste_set_pte(ptep, pgste, pte);
+		pgste_set_unlock(ptep, pgste);
 	} else {
-		/* Return the first 2K fragment of the page */
-		atomic_set(&page->_mapcount, 1);
-		clear_table(table, _PAGE_INVALID, PAGE_SIZE);
-		spin_lock_bh(&mm->context.list_lock);
-		list_add(&page->lru, &mm->context.pgtable_list);
-		spin_unlock_bh(&mm->context.list_lock);
+		*ptep = pte;
 	}
-	return table;
 }
+EXPORT_SYMBOL(ptep_modify_prot_commit);
 
-void page_table_free(struct mm_struct *mm, unsigned long *table)
+static inline pmd_t pmdp_flush_direct(struct mm_struct *mm,
+				      unsigned long addr, pmd_t *pmdp)
 {
-	struct page *page;
-	unsigned int bit, mask;
+	int active, count;
+	pmd_t old;
 
-	page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
-	if (!mm_alloc_pgste(mm)) {
-		/* Free 2K page table fragment of a 4K page */
-		bit = (__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t));
-		spin_lock_bh(&mm->context.list_lock);
-		mask = atomic_xor_bits(&page->_mapcount, 1U << bit);
-		if (mask & 3)
-			list_add(&page->lru, &mm->context.pgtable_list);
-		else
-			list_del(&page->lru);
-		spin_unlock_bh(&mm->context.list_lock);
-		if (mask != 0)
-			return;
+	old = *pmdp;
+	if (pmd_val(old) & _SEGMENT_ENTRY_INVALID)
+		return old;
+	if (!MACHINE_HAS_IDTE) {
+		__pmdp_csp(pmdp);
+		return old;
 	}
-
-	pgtable_page_dtor(page);
-	atomic_set(&page->_mapcount, -1);
-	__free_page(page);
-}
-
-void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table,
-			 unsigned long vmaddr)
-{
-	struct mm_struct *mm;
-	struct page *page;
-	unsigned int bit, mask;
-
-	mm = tlb->mm;
-	page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
-	if (mm_alloc_pgste(mm)) {
-		gmap_unlink(mm, table, vmaddr);
-		table = (unsigned long *) (__pa(table) | 3);
-		tlb_remove_table(tlb, table);
-		return;
-	}
-	bit = (__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t));
-	spin_lock_bh(&mm->context.list_lock);
-	mask = atomic_xor_bits(&page->_mapcount, 0x11U << bit);
-	if (mask & 3)
-		list_add_tail(&page->lru, &mm->context.pgtable_list);
+	active = (mm == current->active_mm) ? 1 : 0;
+	count = atomic_add_return(0x10000, &mm->context.attach_count);
+	if (MACHINE_HAS_TLB_LC && (count & 0xffff) <= active &&
+	    cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
+		__pmdp_idte_local(addr, pmdp);
 	else
-		list_del(&page->lru);
-	spin_unlock_bh(&mm->context.list_lock);
-	table = (unsigned long *) (__pa(table) | (1U << bit));
-	tlb_remove_table(tlb, table);
+		__pmdp_idte(addr, pmdp);
+	atomic_sub(0x10000, &mm->context.attach_count);
+	return old;
 }
 
-static void __tlb_remove_table(void *_table)
+static inline pmd_t pmdp_flush_lazy(struct mm_struct *mm,
+				    unsigned long addr, pmd_t *pmdp)
 {
-	unsigned int mask = (unsigned long) _table & 3;
-	void *table = (void *)((unsigned long) _table ^ mask);
-	struct page *page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
+	int active, count;
+	pmd_t old;
 
-	switch (mask) {
-	case 0:		/* pmd or pud */
-		free_pages((unsigned long) table, 2);
-		break;
-	case 1:		/* lower 2K of a 4K page table */
-	case 2:		/* higher 2K of a 4K page table */
-		if (atomic_xor_bits(&page->_mapcount, mask << 4) != 0)
-			break;
-		/* fallthrough */
-	case 3:		/* 4K page table with pgstes */
-		pgtable_page_dtor(page);
-		atomic_set(&page->_mapcount, -1);
-		__free_page(page);
-		break;
-	}
+	old = *pmdp;
+	if (pmd_val(old) & _SEGMENT_ENTRY_INVALID)
+		return old;
+	active = (mm == current->active_mm) ? 1 : 0;
+	count = atomic_add_return(0x10000, &mm->context.attach_count);
+	if ((count & 0xffff) <= active) {
+		pmd_val(*pmdp) |= _SEGMENT_ENTRY_INVALID;
+		mm->context.flush_mm = 1;
+	} else if (MACHINE_HAS_IDTE)
+		__pmdp_idte(addr, pmdp);
+	else
+		__pmdp_csp(pmdp);
+	atomic_sub(0x10000, &mm->context.attach_count);
+	return old;
 }
 
-static void tlb_remove_table_smp_sync(void *arg)
+pmd_t pmdp_xchg_direct(struct mm_struct *mm, unsigned long addr,
+		       pmd_t *pmdp, pmd_t new)
 {
-	/* Simply deliver the interrupt */
-}
+	pmd_t old;
 
-static void tlb_remove_table_one(void *table)
+	old = pmdp_flush_direct(mm, addr, pmdp);
+	*pmdp = new;
+	return old;
+}
+EXPORT_SYMBOL(pmdp_xchg_direct);
+
+pmd_t pmdp_xchg_lazy(struct mm_struct *mm, unsigned long addr,
+		     pmd_t *pmdp, pmd_t new)
 {
-	/*
-	 * This isn't an RCU grace period and hence the page-tables cannot be
-	 * assumed to be actually RCU-freed.
-	 *
-	 * It is however sufficient for software page-table walkers that rely
-	 * on IRQ disabling. See the comment near struct mmu_table_batch.
-	 */
-	smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
-	__tlb_remove_table(table);
+	pmd_t old;
+
+	old = pmdp_flush_lazy(mm, addr, pmdp);
+	*pmdp = new;
+	return old;
 }
-
-static void tlb_remove_table_rcu(struct rcu_head *head)
-{
-	struct mmu_table_batch *batch;
-	int i;
-
-	batch = container_of(head, struct mmu_table_batch, rcu);
-
-	for (i = 0; i < batch->nr; i++)
-		__tlb_remove_table(batch->tables[i]);
-
-	free_page((unsigned long)batch);
-}
-
-void tlb_table_flush(struct mmu_gather *tlb)
-{
-	struct mmu_table_batch **batch = &tlb->batch;
-
-	if (*batch) {
-		call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
-		*batch = NULL;
-	}
-}
-
-void tlb_remove_table(struct mmu_gather *tlb, void *table)
-{
-	struct mmu_table_batch **batch = &tlb->batch;
-
-	tlb->mm->context.flush_mm = 1;
-	if (*batch == NULL) {
-		*batch = (struct mmu_table_batch *)
-			__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
-		if (*batch == NULL) {
-			__tlb_flush_mm_lazy(tlb->mm);
-			tlb_remove_table_one(table);
-			return;
-		}
-		(*batch)->nr = 0;
-	}
-	(*batch)->tables[(*batch)->nr++] = table;
-	if ((*batch)->nr == MAX_TABLE_BATCH)
-		tlb_flush_mmu(tlb);
-}
+EXPORT_SYMBOL(pmdp_xchg_lazy);
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
-static inline void thp_split_vma(struct vm_area_struct *vma)
-{
-	unsigned long addr;
-
-	for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE)
-		follow_page(vma, addr, FOLL_SPLIT);
-}
-
-static inline void thp_split_mm(struct mm_struct *mm)
-{
-	struct vm_area_struct *vma;
-
-	for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) {
-		thp_split_vma(vma);
-		vma->vm_flags &= ~VM_HUGEPAGE;
-		vma->vm_flags |= VM_NOHUGEPAGE;
-	}
-	mm->def_flags |= VM_NOHUGEPAGE;
-}
-#else
-static inline void thp_split_mm(struct mm_struct *mm)
-{
-}
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
-
-/*
- * switch on pgstes for its userspace process (for kvm)
- */
-int s390_enable_sie(void)
-{
-	struct mm_struct *mm = current->mm;
-
-	/* Do we have pgstes? if yes, we are done */
-	if (mm_has_pgste(mm))
-		return 0;
-	/* Fail if the page tables are 2K */
-	if (!mm_alloc_pgste(mm))
-		return -EINVAL;
-	down_write(&mm->mmap_sem);
-	mm->context.has_pgste = 1;
-	/* split thp mappings and disable thp for future mappings */
-	thp_split_mm(mm);
-	up_write(&mm->mmap_sem);
-	return 0;
-}
-EXPORT_SYMBOL_GPL(s390_enable_sie);
-
-/*
- * Enable storage key handling from now on and initialize the storage
- * keys with the default key.
- */
-static int __s390_enable_skey(pte_t *pte, unsigned long addr,
-			      unsigned long next, struct mm_walk *walk)
-{
-	unsigned long ptev;
-	pgste_t pgste;
-
-	pgste = pgste_get_lock(pte);
-	/*
-	 * Remove all zero page mappings,
-	 * after establishing a policy to forbid zero page mappings
-	 * following faults for that page will get fresh anonymous pages
-	 */
-	if (is_zero_pfn(pte_pfn(*pte))) {
-		ptep_flush_direct(walk->mm, addr, pte);
-		pte_val(*pte) = _PAGE_INVALID;
-	}
-	/* Clear storage key */
-	pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT |
-			      PGSTE_GR_BIT | PGSTE_GC_BIT);
-	ptev = pte_val(*pte);
-	if (!(ptev & _PAGE_INVALID) && (ptev & _PAGE_WRITE))
-		page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 1);
-	pgste_set_unlock(pte, pgste);
-	return 0;
-}
-
-int s390_enable_skey(void)
-{
-	struct mm_walk walk = { .pte_entry = __s390_enable_skey };
-	struct mm_struct *mm = current->mm;
-	struct vm_area_struct *vma;
-	int rc = 0;
-
-	down_write(&mm->mmap_sem);
-	if (mm_use_skey(mm))
-		goto out_up;
-
-	mm->context.use_skey = 1;
-	for (vma = mm->mmap; vma; vma = vma->vm_next) {
-		if (ksm_madvise(vma, vma->vm_start, vma->vm_end,
-				MADV_UNMERGEABLE, &vma->vm_flags)) {
-			mm->context.use_skey = 0;
-			rc = -ENOMEM;
-			goto out_up;
-		}
-	}
-	mm->def_flags &= ~VM_MERGEABLE;
-
-	walk.mm = mm;
-	walk_page_range(0, TASK_SIZE, &walk);
-
-out_up:
-	up_write(&mm->mmap_sem);
-	return rc;
-}
-EXPORT_SYMBOL_GPL(s390_enable_skey);
-
-/*
- * Reset CMMA state, make all pages stable again.
- */
-static int __s390_reset_cmma(pte_t *pte, unsigned long addr,
-			     unsigned long next, struct mm_walk *walk)
-{
-	pgste_t pgste;
-
-	pgste = pgste_get_lock(pte);
-	pgste_val(pgste) &= ~_PGSTE_GPS_USAGE_MASK;
-	pgste_set_unlock(pte, pgste);
-	return 0;
-}
-
-void s390_reset_cmma(struct mm_struct *mm)
-{
-	struct mm_walk walk = { .pte_entry = __s390_reset_cmma };
-
-	down_write(&mm->mmap_sem);
-	walk.mm = mm;
-	walk_page_range(0, TASK_SIZE, &walk);
-	up_write(&mm->mmap_sem);
-}
-EXPORT_SYMBOL_GPL(s390_reset_cmma);
-
-/*
- * Test and reset if a guest page is dirty
- */
-bool gmap_test_and_clear_dirty(unsigned long address, struct gmap *gmap)
-{
-	pte_t *pte;
-	spinlock_t *ptl;
-	bool dirty = false;
-
-	pte = get_locked_pte(gmap->mm, address, &ptl);
-	if (unlikely(!pte))
-		return false;
-
-	if (ptep_test_and_clear_user_dirty(gmap->mm, address, pte))
-		dirty = true;
-
-	spin_unlock(ptl);
-	return dirty;
-}
-EXPORT_SYMBOL_GPL(gmap_test_and_clear_dirty);
-
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-int pmdp_clear_flush_young(struct vm_area_struct *vma, unsigned long address,
-			   pmd_t *pmdp)
-{
-	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
-	/* No need to flush TLB
-	 * On s390 reference bits are in storage key and never in TLB */
-	return pmdp_test_and_clear_young(vma, address, pmdp);
-}
-
-int pmdp_set_access_flags(struct vm_area_struct *vma,
-			  unsigned long address, pmd_t *pmdp,
-			  pmd_t entry, int dirty)
-{
-	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
-
-	entry = pmd_mkyoung(entry);
-	if (dirty)
-		entry = pmd_mkdirty(entry);
-	if (pmd_same(*pmdp, entry))
-		return 0;
-	pmdp_invalidate(vma, address, pmdp);
-	set_pmd_at(vma->vm_mm, address, pmdp, entry);
-	return 1;
-}
-
 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
 				pgtable_t pgtable)
 {
@@ -1369,3 +390,193 @@
 	return pgtable;
 }
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+#ifdef CONFIG_PGSTE
+void ptep_set_pte_at(struct mm_struct *mm, unsigned long addr,
+		     pte_t *ptep, pte_t entry)
+{
+	pgste_t pgste;
+
+	/* the mm_has_pgste() check is done in set_pte_at() */
+	pgste = pgste_get_lock(ptep);
+	pgste_val(pgste) &= ~_PGSTE_GPS_ZERO;
+	pgste_set_key(ptep, pgste, entry, mm);
+	pgste = pgste_set_pte(ptep, pgste, entry);
+	pgste_set_unlock(ptep, pgste);
+}
+
+void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+{
+	pgste_t pgste;
+
+	pgste = pgste_get_lock(ptep);
+	pgste_val(pgste) |= PGSTE_IN_BIT;
+	pgste_set_unlock(ptep, pgste);
+}
+
+static void ptep_zap_swap_entry(struct mm_struct *mm, swp_entry_t entry)
+{
+	if (!non_swap_entry(entry))
+		dec_mm_counter(mm, MM_SWAPENTS);
+	else if (is_migration_entry(entry)) {
+		struct page *page = migration_entry_to_page(entry);
+
+		dec_mm_counter(mm, mm_counter(page));
+	}
+	free_swap_and_cache(entry);
+}
+
+void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
+		     pte_t *ptep, int reset)
+{
+	unsigned long pgstev;
+	pgste_t pgste;
+	pte_t pte;
+
+	/* Zap unused and logically-zero pages */
+	pgste = pgste_get_lock(ptep);
+	pgstev = pgste_val(pgste);
+	pte = *ptep;
+	if (pte_swap(pte) &&
+	    ((pgstev & _PGSTE_GPS_USAGE_MASK) == _PGSTE_GPS_USAGE_UNUSED ||
+	     (pgstev & _PGSTE_GPS_ZERO))) {
+		ptep_zap_swap_entry(mm, pte_to_swp_entry(pte));
+		pte_clear(mm, addr, ptep);
+	}
+	if (reset)
+		pgste_val(pgste) &= ~_PGSTE_GPS_USAGE_MASK;
+	pgste_set_unlock(ptep, pgste);
+}
+
+void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+{
+	unsigned long ptev;
+	pgste_t pgste;
+
+	/* Clear storage key */
+	pgste = pgste_get_lock(ptep);
+	pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT |
+			      PGSTE_GR_BIT | PGSTE_GC_BIT);
+	ptev = pte_val(*ptep);
+	if (!(ptev & _PAGE_INVALID) && (ptev & _PAGE_WRITE))
+		page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 1);
+	pgste_set_unlock(ptep, pgste);
+}
+
+/*
+ * Test and reset if a guest page is dirty
+ */
+bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long addr)
+{
+	spinlock_t *ptl;
+	pgste_t pgste;
+	pte_t *ptep;
+	pte_t pte;
+	bool dirty;
+
+	ptep = get_locked_pte(mm, addr, &ptl);
+	if (unlikely(!ptep))
+		return false;
+
+	pgste = pgste_get_lock(ptep);
+	dirty = !!(pgste_val(pgste) & PGSTE_UC_BIT);
+	pgste_val(pgste) &= ~PGSTE_UC_BIT;
+	pte = *ptep;
+	if (dirty && (pte_val(pte) & _PAGE_PRESENT)) {
+		pgste = pgste_ipte_notify(mm, addr, ptep, pgste);
+		__ptep_ipte(addr, ptep);
+		if (MACHINE_HAS_ESOP || !(pte_val(pte) & _PAGE_WRITE))
+			pte_val(pte) |= _PAGE_PROTECT;
+		else
+			pte_val(pte) |= _PAGE_INVALID;
+		*ptep = pte;
+	}
+	pgste_set_unlock(ptep, pgste);
+
+	spin_unlock(ptl);
+	return dirty;
+}
+EXPORT_SYMBOL_GPL(test_and_clear_guest_dirty);
+
+int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
+			  unsigned char key, bool nq)
+{
+	unsigned long keyul;
+	spinlock_t *ptl;
+	pgste_t old, new;
+	pte_t *ptep;
+
+	down_read(&mm->mmap_sem);
+	ptep = get_locked_pte(mm, addr, &ptl);
+	if (unlikely(!ptep)) {
+		up_read(&mm->mmap_sem);
+		return -EFAULT;
+	}
+
+	new = old = pgste_get_lock(ptep);
+	pgste_val(new) &= ~(PGSTE_GR_BIT | PGSTE_GC_BIT |
+			    PGSTE_ACC_BITS | PGSTE_FP_BIT);
+	keyul = (unsigned long) key;
+	pgste_val(new) |= (keyul & (_PAGE_CHANGED | _PAGE_REFERENCED)) << 48;
+	pgste_val(new) |= (keyul & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
+	if (!(pte_val(*ptep) & _PAGE_INVALID)) {
+		unsigned long address, bits, skey;
+
+		address = pte_val(*ptep) & PAGE_MASK;
+		skey = (unsigned long) page_get_storage_key(address);
+		bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
+		skey = key & (_PAGE_ACC_BITS | _PAGE_FP_BIT);
+		/* Set storage key ACC and FP */
+		page_set_storage_key(address, skey, !nq);
+		/* Merge host changed & referenced into pgste  */
+		pgste_val(new) |= bits << 52;
+	}
+	/* changing the guest storage key is considered a change of the page */
+	if ((pgste_val(new) ^ pgste_val(old)) &
+	    (PGSTE_ACC_BITS | PGSTE_FP_BIT | PGSTE_GR_BIT | PGSTE_GC_BIT))
+		pgste_val(new) |= PGSTE_UC_BIT;
+
+	pgste_set_unlock(ptep, new);
+	pte_unmap_unlock(ptep, ptl);
+	up_read(&mm->mmap_sem);
+	return 0;
+}
+EXPORT_SYMBOL(set_guest_storage_key);
+
+unsigned char get_guest_storage_key(struct mm_struct *mm, unsigned long addr)
+{
+	unsigned char key;
+	spinlock_t *ptl;
+	pgste_t pgste;
+	pte_t *ptep;
+
+	down_read(&mm->mmap_sem);
+	ptep = get_locked_pte(mm, addr, &ptl);
+	if (unlikely(!ptep)) {
+		up_read(&mm->mmap_sem);
+		return -EFAULT;
+	}
+	pgste = pgste_get_lock(ptep);
+
+	if (pte_val(*ptep) & _PAGE_INVALID) {
+		key  = (pgste_val(pgste) & PGSTE_ACC_BITS) >> 56;
+		key |= (pgste_val(pgste) & PGSTE_FP_BIT) >> 56;
+		key |= (pgste_val(pgste) & PGSTE_GR_BIT) >> 48;
+		key |= (pgste_val(pgste) & PGSTE_GC_BIT) >> 48;
+	} else {
+		key = page_get_storage_key(pte_val(*ptep) & PAGE_MASK);
+
+		/* Reflect guest's logical view, not physical */
+		if (pgste_val(pgste) & PGSTE_GR_BIT)
+			key |= _PAGE_REFERENCED;
+		if (pgste_val(pgste) & PGSTE_GC_BIT)
+			key |= _PAGE_CHANGED;
+	}
+
+	pgste_set_unlock(ptep, pgste);
+	pte_unmap_unlock(ptep, ptl);
+	up_read(&mm->mmap_sem);
+	return key;
+}
+EXPORT_SYMBOL(get_guest_storage_key);
+#endif
diff --git a/arch/s390/oprofile/Makefile b/arch/s390/oprofile/Makefile
index 1bd2301..496e4a7 100644
--- a/arch/s390/oprofile/Makefile
+++ b/arch/s390/oprofile/Makefile
@@ -6,5 +6,5 @@
 		oprofilefs.o oprofile_stats.o  \
 		timer_int.o )
 
-oprofile-y :=	$(DRIVER_OBJS) init.o backtrace.o
+oprofile-y :=	$(DRIVER_OBJS) init.o
 oprofile-y +=	hwsampler.o
diff --git a/arch/s390/oprofile/backtrace.c b/arch/s390/oprofile/backtrace.c
deleted file mode 100644
index 1884e17..0000000
--- a/arch/s390/oprofile/backtrace.c
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * S390 Version
- *   Copyright IBM Corp. 2005
- *   Author(s): Andreas Krebbel <Andreas.Krebbel@de.ibm.com>
- */
-
-#include <linux/oprofile.h>
-
-#include <asm/processor.h> /* for struct stack_frame */
-
-static unsigned long
-__show_trace(unsigned int *depth, unsigned long sp,
-	     unsigned long low, unsigned long high)
-{
-	struct stack_frame *sf;
-	struct pt_regs *regs;
-
-	while (*depth) {
-		if (sp < low || sp > high - sizeof(*sf))
-			return sp;
-		sf = (struct stack_frame *) sp;
-		(*depth)--;
-		oprofile_add_trace(sf->gprs[8]);
-
-		/* Follow the backchain.  */
-		while (*depth) {
-			low = sp;
-			sp = sf->back_chain;
-			if (!sp)
-				break;
-			if (sp <= low || sp > high - sizeof(*sf))
-				return sp;
-			sf = (struct stack_frame *) sp;
-			(*depth)--;
-			oprofile_add_trace(sf->gprs[8]);
-
-		}
-
-		if (*depth == 0)
-			break;
-
-		/* Zero backchain detected, check for interrupt frame.  */
-		sp = (unsigned long) (sf + 1);
-		if (sp <= low || sp > high - sizeof(*regs))
-			return sp;
-		regs = (struct pt_regs *) sp;
-		(*depth)--;
-		oprofile_add_trace(sf->gprs[8]);
-		low = sp;
-		sp = regs->gprs[15];
-	}
-	return sp;
-}
-
-void s390_backtrace(struct pt_regs * const regs, unsigned int depth)
-{
-	unsigned long head, frame_size;
-	struct stack_frame* head_sf;
-
-	if (user_mode(regs))
-		return;
-
-	frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
-	head = regs->gprs[15];
-	head_sf = (struct stack_frame*)head;
-
-	if (!head_sf->back_chain)
-		return;
-
-	head = head_sf->back_chain;
-
-	head = __show_trace(&depth, head,
-			    S390_lowcore.async_stack + frame_size - ASYNC_SIZE,
-			    S390_lowcore.async_stack + frame_size);
-
-	__show_trace(&depth, head, S390_lowcore.thread_info,
-		     S390_lowcore.thread_info + THREAD_SIZE);
-}
diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c
index 9cfa2ff..791935a 100644
--- a/arch/s390/oprofile/init.c
+++ b/arch/s390/oprofile/init.c
@@ -20,8 +20,6 @@
 
 #include "../../../drivers/oprofile/oprof.h"
 
-extern void s390_backtrace(struct pt_regs * const regs, unsigned int depth);
-
 #include "hwsampler.h"
 #include "op_counter.h"
 
@@ -456,6 +454,7 @@
 		case 0x2097: case 0x2098: ops->cpu_type = "s390/z10"; break;
 		case 0x2817: case 0x2818: ops->cpu_type = "s390/z196"; break;
 		case 0x2827: case 0x2828: ops->cpu_type = "s390/zEC12"; break;
+		case 0x2964: case 0x2965: ops->cpu_type = "s390/z13"; break;
 		default: return -ENODEV;
 		}
 	}
@@ -494,6 +493,24 @@
 	hwsampler_shutdown();
 }
 
+static int __s390_backtrace(void *data, unsigned long address)
+{
+	unsigned int *depth = data;
+
+	if (*depth == 0)
+		return 1;
+	(*depth)--;
+	oprofile_add_trace(address);
+	return 0;
+}
+
+static void s390_backtrace(struct pt_regs *regs, unsigned int depth)
+{
+	if (user_mode(regs))
+		return;
+	dump_trace(__s390_backtrace, &depth, NULL, regs->gprs[15]);
+}
+
 int __init oprofile_arch_init(struct oprofile_operations *ops)
 {
 	ops->backtrace = s390_backtrace;
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index 8f19c8f..9fd59a7 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -637,11 +637,9 @@
 
 int pcibios_add_device(struct pci_dev *pdev)
 {
-	struct zpci_dev *zdev = to_zpci(pdev);
 	struct resource *res;
 	int i;
 
-	zdev->pdev = pdev;
 	pdev->dev.groups = zpci_attr_groups;
 	zpci_map_resources(pdev);
 
@@ -664,8 +662,7 @@
 {
 	struct zpci_dev *zdev = to_zpci(pdev);
 
-	zdev->pdev = pdev;
-	zpci_debug_init_device(zdev);
+	zpci_debug_init_device(zdev, dev_name(&pdev->dev));
 	zpci_fmb_enable_device(zdev);
 
 	return pci_enable_resources(pdev, mask);
@@ -677,7 +674,6 @@
 
 	zpci_fmb_disable_device(zdev);
 	zpci_debug_exit_device(zdev);
-	zdev->pdev = NULL;
 }
 
 #ifdef CONFIG_HIBERNATE_CALLBACKS
@@ -864,8 +860,11 @@
 
 static int zpci_mem_init(void)
 {
+	BUILD_BUG_ON(!is_power_of_2(__alignof__(struct zpci_fmb)) ||
+		     __alignof__(struct zpci_fmb) < sizeof(struct zpci_fmb));
+
 	zdev_fmb_cache = kmem_cache_create("PCI_FMB_cache", sizeof(struct zpci_fmb),
-				16, 0, NULL);
+					   __alignof__(struct zpci_fmb), 0, NULL);
 	if (!zdev_fmb_cache)
 		goto error_fmb;
 
diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c
index d6e411e..21591dd 100644
--- a/arch/s390/pci/pci_clp.c
+++ b/arch/s390/pci/pci_clp.c
@@ -8,13 +8,19 @@
 #define KMSG_COMPONENT "zpci"
 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
 
+#include <linux/compat.h>
 #include <linux/kernel.h>
+#include <linux/miscdevice.h>
 #include <linux/slab.h>
 #include <linux/err.h>
 #include <linux/delay.h>
 #include <linux/pci.h>
+#include <linux/uaccess.h>
 #include <asm/pci_debug.h>
 #include <asm/pci_clp.h>
+#include <asm/compat.h>
+#include <asm/clp.h>
+#include <uapi/asm/clp.h>
 
 static inline void zpci_err_clp(unsigned int rsp, int rc)
 {
@@ -27,21 +33,43 @@
 }
 
 /*
- * Call Logical Processor
- * Retry logic is handled by the caller.
+ * Call Logical Processor with c=1, lps=0 and command 1
+ * to get the bit mask of installed logical processors
  */
-static inline u8 clp_instr(void *data)
+static inline int clp_get_ilp(unsigned long *ilp)
+{
+	unsigned long mask;
+	int cc = 3;
+
+	asm volatile (
+		"	.insn	rrf,0xb9a00000,%[mask],%[cmd],8,0\n"
+		"0:	ipm	%[cc]\n"
+		"	srl	%[cc],28\n"
+		"1:\n"
+		EX_TABLE(0b, 1b)
+		: [cc] "+d" (cc), [mask] "=d" (mask) : [cmd] "a" (1)
+		: "cc");
+	*ilp = mask;
+	return cc;
+}
+
+/*
+ * Call Logical Processor with c=0, the give constant lps and an lpcb request.
+ */
+static inline int clp_req(void *data, unsigned int lps)
 {
 	struct { u8 _[CLP_BLK_SIZE]; } *req = data;
 	u64 ignored;
-	u8 cc;
+	int cc = 3;
 
 	asm volatile (
-		"	.insn	rrf,0xb9a00000,%[ign],%[req],0x0,0x2\n"
-		"	ipm	%[cc]\n"
+		"	.insn	rrf,0xb9a00000,%[ign],%[req],0,%[lps]\n"
+		"0:	ipm	%[cc]\n"
 		"	srl	%[cc],28\n"
-		: [cc] "=d" (cc), [ign] "=d" (ignored), "+m" (*req)
-		: [req] "a" (req)
+		"1:\n"
+		EX_TABLE(0b, 1b)
+		: [cc] "+d" (cc), [ign] "=d" (ignored), "+m" (*req)
+		: [req] "a" (req), [lps] "i" (lps)
 		: "cc");
 	return cc;
 }
@@ -90,7 +118,7 @@
 	rrb->response.hdr.len = sizeof(rrb->response);
 	rrb->request.pfgid = pfgid;
 
-	rc = clp_instr(rrb);
+	rc = clp_req(rrb, CLP_LPS_PCI);
 	if (!rc && rrb->response.hdr.rsp == CLP_RC_OK)
 		clp_store_query_pci_fngrp(zdev, &rrb->response);
 	else {
@@ -143,7 +171,7 @@
 	rrb->response.hdr.len = sizeof(rrb->response);
 	rrb->request.fh = fh;
 
-	rc = clp_instr(rrb);
+	rc = clp_req(rrb, CLP_LPS_PCI);
 	if (!rc && rrb->response.hdr.rsp == CLP_RC_OK) {
 		rc = clp_store_query_pci_fn(zdev, &rrb->response);
 		if (rc)
@@ -214,7 +242,7 @@
 		rrb->request.oc = command;
 		rrb->request.ndas = nr_dma_as;
 
-		rc = clp_instr(rrb);
+		rc = clp_req(rrb, CLP_LPS_PCI);
 		if (rrb->response.hdr.rsp == CLP_RC_SETPCIFN_BUSY) {
 			retries--;
 			if (retries < 0)
@@ -280,7 +308,7 @@
 		rrb->request.resume_token = resume_token;
 
 		/* Get PCI function handle list */
-		rc = clp_instr(rrb);
+		rc = clp_req(rrb, CLP_LPS_PCI);
 		if (rc || rrb->response.hdr.rsp != CLP_RC_OK) {
 			zpci_err("List PCI FN:\n");
 			zpci_err_clp(rrb->response.hdr.rsp, rc);
@@ -391,3 +419,198 @@
 	clp_free_block(rrb);
 	return rc;
 }
+
+static int clp_base_slpc(struct clp_req *req, struct clp_req_rsp_slpc *lpcb)
+{
+	unsigned long limit = PAGE_SIZE - sizeof(lpcb->request);
+
+	if (lpcb->request.hdr.len != sizeof(lpcb->request) ||
+	    lpcb->response.hdr.len > limit)
+		return -EINVAL;
+	return clp_req(lpcb, CLP_LPS_BASE) ? -EOPNOTSUPP : 0;
+}
+
+static int clp_base_command(struct clp_req *req, struct clp_req_hdr *lpcb)
+{
+	switch (lpcb->cmd) {
+	case 0x0001: /* store logical-processor characteristics */
+		return clp_base_slpc(req, (void *) lpcb);
+	default:
+		return -EINVAL;
+	}
+}
+
+static int clp_pci_slpc(struct clp_req *req, struct clp_req_rsp_slpc *lpcb)
+{
+	unsigned long limit = PAGE_SIZE - sizeof(lpcb->request);
+
+	if (lpcb->request.hdr.len != sizeof(lpcb->request) ||
+	    lpcb->response.hdr.len > limit)
+		return -EINVAL;
+	return clp_req(lpcb, CLP_LPS_PCI) ? -EOPNOTSUPP : 0;
+}
+
+static int clp_pci_list(struct clp_req *req, struct clp_req_rsp_list_pci *lpcb)
+{
+	unsigned long limit = PAGE_SIZE - sizeof(lpcb->request);
+
+	if (lpcb->request.hdr.len != sizeof(lpcb->request) ||
+	    lpcb->response.hdr.len > limit)
+		return -EINVAL;
+	if (lpcb->request.reserved2 != 0)
+		return -EINVAL;
+	return clp_req(lpcb, CLP_LPS_PCI) ? -EOPNOTSUPP : 0;
+}
+
+static int clp_pci_query(struct clp_req *req,
+			 struct clp_req_rsp_query_pci *lpcb)
+{
+	unsigned long limit = PAGE_SIZE - sizeof(lpcb->request);
+
+	if (lpcb->request.hdr.len != sizeof(lpcb->request) ||
+	    lpcb->response.hdr.len > limit)
+		return -EINVAL;
+	if (lpcb->request.reserved2 != 0 || lpcb->request.reserved3 != 0)
+		return -EINVAL;
+	return clp_req(lpcb, CLP_LPS_PCI) ? -EOPNOTSUPP : 0;
+}
+
+static int clp_pci_query_grp(struct clp_req *req,
+			     struct clp_req_rsp_query_pci_grp *lpcb)
+{
+	unsigned long limit = PAGE_SIZE - sizeof(lpcb->request);
+
+	if (lpcb->request.hdr.len != sizeof(lpcb->request) ||
+	    lpcb->response.hdr.len > limit)
+		return -EINVAL;
+	if (lpcb->request.reserved2 != 0 || lpcb->request.reserved3 != 0 ||
+	    lpcb->request.reserved4 != 0)
+		return -EINVAL;
+	return clp_req(lpcb, CLP_LPS_PCI) ? -EOPNOTSUPP : 0;
+}
+
+static int clp_pci_command(struct clp_req *req, struct clp_req_hdr *lpcb)
+{
+	switch (lpcb->cmd) {
+	case 0x0001: /* store logical-processor characteristics */
+		return clp_pci_slpc(req, (void *) lpcb);
+	case 0x0002: /* list PCI functions */
+		return clp_pci_list(req, (void *) lpcb);
+	case 0x0003: /* query PCI function */
+		return clp_pci_query(req, (void *) lpcb);
+	case 0x0004: /* query PCI function group */
+		return clp_pci_query_grp(req, (void *) lpcb);
+	default:
+		return -EINVAL;
+	}
+}
+
+static int clp_normal_command(struct clp_req *req)
+{
+	struct clp_req_hdr *lpcb;
+	void __user *uptr;
+	int rc;
+
+	rc = -EINVAL;
+	if (req->lps != 0 && req->lps != 2)
+		goto out;
+
+	rc = -ENOMEM;
+	lpcb = clp_alloc_block(GFP_KERNEL);
+	if (!lpcb)
+		goto out;
+
+	rc = -EFAULT;
+	uptr = (void __force __user *)(unsigned long) req->data_p;
+	if (copy_from_user(lpcb, uptr, PAGE_SIZE) != 0)
+		goto out_free;
+
+	rc = -EINVAL;
+	if (lpcb->fmt != 0 || lpcb->reserved1 != 0 || lpcb->reserved2 != 0)
+		goto out_free;
+
+	switch (req->lps) {
+	case 0:
+		rc = clp_base_command(req, lpcb);
+		break;
+	case 2:
+		rc = clp_pci_command(req, lpcb);
+		break;
+	}
+	if (rc)
+		goto out_free;
+
+	rc = -EFAULT;
+	if (copy_to_user(uptr, lpcb, PAGE_SIZE) != 0)
+		goto out_free;
+
+	rc = 0;
+
+out_free:
+	clp_free_block(lpcb);
+out:
+	return rc;
+}
+
+static int clp_immediate_command(struct clp_req *req)
+{
+	void __user *uptr;
+	unsigned long ilp;
+	int exists;
+
+	if (req->cmd > 1 || clp_get_ilp(&ilp) != 0)
+		return -EINVAL;
+
+	uptr = (void __force __user *)(unsigned long) req->data_p;
+	if (req->cmd == 0) {
+		/* Command code 0: test for a specific processor */
+		exists = test_bit_inv(req->lps, &ilp);
+		return put_user(exists, (int __user *) uptr);
+	}
+	/* Command code 1: return bit mask of installed processors */
+	return put_user(ilp, (unsigned long __user *) uptr);
+}
+
+static long clp_misc_ioctl(struct file *filp, unsigned int cmd,
+			   unsigned long arg)
+{
+	struct clp_req req;
+	void __user *argp;
+
+	if (cmd != CLP_SYNC)
+		return -EINVAL;
+
+	argp = is_compat_task() ? compat_ptr(arg) : (void __user *) arg;
+	if (copy_from_user(&req, argp, sizeof(req)))
+		return -EFAULT;
+	if (req.r != 0)
+		return -EINVAL;
+	return req.c ? clp_immediate_command(&req) : clp_normal_command(&req);
+}
+
+static int clp_misc_release(struct inode *inode, struct file *filp)
+{
+	return 0;
+}
+
+static const struct file_operations clp_misc_fops = {
+	.owner = THIS_MODULE,
+	.open = nonseekable_open,
+	.release = clp_misc_release,
+	.unlocked_ioctl = clp_misc_ioctl,
+	.compat_ioctl = clp_misc_ioctl,
+	.llseek = no_llseek,
+};
+
+static struct miscdevice clp_misc_device = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = "clp",
+	.fops = &clp_misc_fops,
+};
+
+static int __init clp_misc_init(void)
+{
+	return misc_register(&clp_misc_device);
+}
+
+device_initcall(clp_misc_init);
diff --git a/arch/s390/pci/pci_debug.c b/arch/s390/pci/pci_debug.c
index 4129b0a..c555de3 100644
--- a/arch/s390/pci/pci_debug.c
+++ b/arch/s390/pci/pci_debug.c
@@ -128,10 +128,9 @@
 	.release = single_release,
 };
 
-void zpci_debug_init_device(struct zpci_dev *zdev)
+void zpci_debug_init_device(struct zpci_dev *zdev, const char *name)
 {
-	zdev->debugfs_dev = debugfs_create_dir(dev_name(&zdev->pdev->dev),
-					       debugfs_root);
+	zdev->debugfs_dev = debugfs_create_dir(name, debugfs_root);
 	if (IS_ERR(zdev->debugfs_dev))
 		zdev->debugfs_dev = NULL;
 
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c
index 4638b93..a06ce80 100644
--- a/arch/s390/pci/pci_dma.c
+++ b/arch/s390/pci/pci_dma.c
@@ -217,27 +217,29 @@
 	dma_free_cpu_table(table);
 }
 
-static unsigned long __dma_alloc_iommu(struct zpci_dev *zdev,
+static unsigned long __dma_alloc_iommu(struct device *dev,
 				       unsigned long start, int size)
 {
+	struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
 	unsigned long boundary_size;
 
-	boundary_size = ALIGN(dma_get_seg_boundary(&zdev->pdev->dev) + 1,
+	boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
 			      PAGE_SIZE) >> PAGE_SHIFT;
 	return iommu_area_alloc(zdev->iommu_bitmap, zdev->iommu_pages,
 				start, size, 0, boundary_size, 0);
 }
 
-static unsigned long dma_alloc_iommu(struct zpci_dev *zdev, int size)
+static unsigned long dma_alloc_iommu(struct device *dev, int size)
 {
+	struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
 	unsigned long offset, flags;
 	int wrap = 0;
 
 	spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
-	offset = __dma_alloc_iommu(zdev, zdev->next_bit, size);
+	offset = __dma_alloc_iommu(dev, zdev->next_bit, size);
 	if (offset == -1) {
 		/* wrap-around */
-		offset = __dma_alloc_iommu(zdev, 0, size);
+		offset = __dma_alloc_iommu(dev, 0, size);
 		wrap = 1;
 	}
 
@@ -251,8 +253,9 @@
 	return offset;
 }
 
-static void dma_free_iommu(struct zpci_dev *zdev, unsigned long offset, int size)
+static void dma_free_iommu(struct device *dev, unsigned long offset, int size)
 {
+	struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
 	unsigned long flags;
 
 	spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
@@ -293,7 +296,7 @@
 
 	/* This rounds up number of pages based on size and offset */
 	nr_pages = iommu_num_pages(pa, size, PAGE_SIZE);
-	iommu_page_index = dma_alloc_iommu(zdev, nr_pages);
+	iommu_page_index = dma_alloc_iommu(dev, nr_pages);
 	if (iommu_page_index == -1) {
 		ret = -ENOSPC;
 		goto out_err;
@@ -319,7 +322,7 @@
 	return dma_addr + (offset & ~PAGE_MASK);
 
 out_free:
-	dma_free_iommu(zdev, iommu_page_index, nr_pages);
+	dma_free_iommu(dev, iommu_page_index, nr_pages);
 out_err:
 	zpci_err("map error:\n");
 	zpci_err_dma(ret, pa);
@@ -346,7 +349,7 @@
 
 	atomic64_add(npages, &zdev->unmapped_pages);
 	iommu_page_index = (dma_addr - zdev->start_dma) >> PAGE_SHIFT;
-	dma_free_iommu(zdev, iommu_page_index, npages);
+	dma_free_iommu(dev, iommu_page_index, npages);
 }
 
 static void *s390_dma_alloc(struct device *dev, size_t size,
diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c
index b0e0475..fb2a9a5 100644
--- a/arch/s390/pci/pci_event.c
+++ b/arch/s390/pci/pci_event.c
@@ -46,11 +46,14 @@
 static void __zpci_event_error(struct zpci_ccdf_err *ccdf)
 {
 	struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid);
-	struct pci_dev *pdev = zdev ? zdev->pdev : NULL;
+	struct pci_dev *pdev = NULL;
 
 	zpci_err("error CCDF:\n");
 	zpci_err_hex(ccdf, sizeof(*ccdf));
 
+	if (zdev)
+		pdev = pci_get_slot(zdev->bus, ZPCI_DEVFN);
+
 	pr_err("%s: Event 0x%x reports an error for PCI function 0x%x\n",
 	       pdev ? pci_name(pdev) : "n/a", ccdf->pec, ccdf->fid);
 
@@ -58,6 +61,7 @@
 		return;
 
 	pdev->error_state = pci_channel_io_perm_failure;
+	pci_dev_put(pdev);
 }
 
 void zpci_event_error(void *data)
@@ -69,9 +73,12 @@
 static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
 {
 	struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid);
-	struct pci_dev *pdev = zdev ? zdev->pdev : NULL;
+	struct pci_dev *pdev = NULL;
 	int ret;
 
+	if (zdev)
+		pdev = pci_get_slot(zdev->bus, ZPCI_DEVFN);
+
 	pr_info("%s: Event 0x%x reconfigured PCI function 0x%x\n",
 		pdev ? pci_name(pdev) : "n/a", ccdf->pec, ccdf->fid);
 	zpci_err("avail CCDF:\n");
@@ -138,6 +145,8 @@
 	default:
 		break;
 	}
+	if (pdev)
+		pci_dev_put(pdev);
 }
 
 void zpci_event_availability(void *data)
diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c
index de6be00..13f633a 100644
--- a/arch/sh/kernel/smp.c
+++ b/arch/sh/kernel/smp.c
@@ -203,7 +203,7 @@
 	set_cpu_online(cpu, true);
 	per_cpu(cpu_state, cpu) = CPU_ONLINE;
 
-	cpu_startup_entry(CPUHP_ONLINE);
+	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
 }
 
 extern struct {
diff --git a/arch/sparc/kernel/smp_32.c b/arch/sparc/kernel/smp_32.c
index b3a5d81..fb30e7c 100644
--- a/arch/sparc/kernel/smp_32.c
+++ b/arch/sparc/kernel/smp_32.c
@@ -364,7 +364,7 @@
 	local_irq_enable();
 
 	wmb();
-	cpu_startup_entry(CPUHP_ONLINE);
+	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
 
 	/* We should never reach here! */
 	BUG();
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index 19cd08d..8a6151a 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -134,7 +134,7 @@
 
 	local_irq_enable();
 
-	cpu_startup_entry(CPUHP_ONLINE);
+	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
 }
 
 void cpu_panic(void)
diff --git a/arch/tile/kernel/smpboot.c b/arch/tile/kernel/smpboot.c
index 20d52a9..6c0abaa 100644
--- a/arch/tile/kernel/smpboot.c
+++ b/arch/tile/kernel/smpboot.c
@@ -208,7 +208,7 @@
 	/* Set up tile-timer clock-event device on this cpu */
 	setup_tile_timer();
 
-	cpu_startup_entry(CPUHP_ONLINE);
+	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
 }
 
 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index b105105..8f2e665 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -1163,22 +1163,23 @@
 	bool "CPU microcode loading support"
 	default y
 	depends on CPU_SUP_AMD || CPU_SUP_INTEL
-	depends on BLK_DEV_INITRD
 	select FW_LOADER
 	---help---
-
 	  If you say Y here, you will be able to update the microcode on
-	  certain Intel and AMD processors. The Intel support is for the
-	  IA32 family, e.g. Pentium Pro, Pentium II, Pentium III, Pentium 4,
-	  Xeon etc. The AMD support is for families 0x10 and later. You will
-	  obviously need the actual microcode binary data itself which is not
-	  shipped with the Linux kernel.
+	  Intel and AMD processors. The Intel support is for the IA32 family,
+	  e.g. Pentium Pro, Pentium II, Pentium III, Pentium 4, Xeon etc. The
+	  AMD support is for families 0x10 and later. You will obviously need
+	  the actual microcode binary data itself which is not shipped with
+	  the Linux kernel.
 
-	  This option selects the general module only, you need to select
-	  at least one vendor specific module as well.
+	  The preferred method to load microcode from a detached initrd is described
+	  in Documentation/x86/early-microcode.txt. For that you need to enable
+	  CONFIG_BLK_DEV_INITRD in order for the loader to be able to scan the
+	  initrd for microcode blobs.
 
-	  To compile this driver as a module, choose M here: the module
-	  will be called microcode.
+	  In addition, you can build-in the microcode into the kernel. For that you
+	  need to enable FIRMWARE_IN_KERNEL and add the vendor-supplied microcode
+	  to the CONFIG_EXTRA_FIRMWARE config option.
 
 config MICROCODE_INTEL
 	bool "Intel microcode loading support"
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index 7816b7b..67eec55 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -338,16 +338,6 @@
 
 	  If unsure say N here.
 
-config X86_DEBUG_STATIC_CPU_HAS
-	bool "Debug alternatives"
-	depends on DEBUG_KERNEL
-	---help---
-	  This option causes additional code to be generated which
-	  fails if static_cpu_has() is used before alternatives have
-	  run.
-
-	  If unsure, say N.
-
 config X86_DEBUG_FPU
 	bool "Debug the x86 FPU code"
 	depends on DEBUG_KERNEL
diff --git a/arch/x86/boot/cpuflags.h b/arch/x86/boot/cpuflags.h
index ea97697..4cb404f 100644
--- a/arch/x86/boot/cpuflags.h
+++ b/arch/x86/boot/cpuflags.h
@@ -1,7 +1,7 @@
 #ifndef BOOT_CPUFLAGS_H
 #define BOOT_CPUFLAGS_H
 
-#include <asm/cpufeature.h>
+#include <asm/cpufeatures.h>
 #include <asm/processor-flags.h>
 
 struct cpu_features {
diff --git a/arch/x86/boot/mkcpustr.c b/arch/x86/boot/mkcpustr.c
index 637097e..f72498d 100644
--- a/arch/x86/boot/mkcpustr.c
+++ b/arch/x86/boot/mkcpustr.c
@@ -17,7 +17,7 @@
 
 #include "../include/asm/required-features.h"
 #include "../include/asm/disabled-features.h"
-#include "../include/asm/cpufeature.h"
+#include "../include/asm/cpufeatures.h"
 #include "../kernel/cpu/capflags.c"
 
 int main(void)
diff --git a/arch/x86/boot/tools/build.c b/arch/x86/boot/tools/build.c
index a7661c4..0702d25 100644
--- a/arch/x86/boot/tools/build.c
+++ b/arch/x86/boot/tools/build.c
@@ -49,7 +49,6 @@
 
 /* This must be large enough to hold the entire setup */
 u8 buf[SETUP_SECT_MAX*512];
-int is_big_kernel;
 
 #define PECOFF_RELOC_RESERVE 0x20
 
diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig
index 028be48..e25a163 100644
--- a/arch/x86/configs/i386_defconfig
+++ b/arch/x86/configs/i386_defconfig
@@ -288,7 +288,7 @@
 CONFIG_NLS_UTF8=y
 CONFIG_PRINTK_TIME=y
 # CONFIG_ENABLE_WARN_DEPRECATED is not set
-CONFIG_FRAME_WARN=2048
+CONFIG_FRAME_WARN=1024
 CONFIG_MAGIC_SYSRQ=y
 # CONFIG_UNUSED_SYMBOLS is not set
 CONFIG_DEBUG_KERNEL=y
diff --git a/arch/x86/crypto/crc32-pclmul_glue.c b/arch/x86/crypto/crc32-pclmul_glue.c
index 07d2c6c..27226df 100644
--- a/arch/x86/crypto/crc32-pclmul_glue.c
+++ b/arch/x86/crypto/crc32-pclmul_glue.c
@@ -33,7 +33,7 @@
 #include <linux/crc32.h>
 #include <crypto/internal/hash.h>
 
-#include <asm/cpufeature.h>
+#include <asm/cpufeatures.h>
 #include <asm/cpu_device_id.h>
 #include <asm/fpu/api.h>
 
diff --git a/arch/x86/crypto/crc32c-intel_glue.c b/arch/x86/crypto/crc32c-intel_glue.c
index 0e98716..0857b1a 100644
--- a/arch/x86/crypto/crc32c-intel_glue.c
+++ b/arch/x86/crypto/crc32c-intel_glue.c
@@ -30,7 +30,7 @@
 #include <linux/kernel.h>
 #include <crypto/internal/hash.h>
 
-#include <asm/cpufeature.h>
+#include <asm/cpufeatures.h>
 #include <asm/cpu_device_id.h>
 #include <asm/fpu/internal.h>
 
diff --git a/arch/x86/crypto/crct10dif-pclmul_glue.c b/arch/x86/crypto/crct10dif-pclmul_glue.c
index a3fcfc9..cd4df93 100644
--- a/arch/x86/crypto/crct10dif-pclmul_glue.c
+++ b/arch/x86/crypto/crct10dif-pclmul_glue.c
@@ -30,7 +30,7 @@
 #include <linux/string.h>
 #include <linux/kernel.h>
 #include <asm/fpu/api.h>
-#include <asm/cpufeature.h>
+#include <asm/cpufeatures.h>
 #include <asm/cpu_device_id.h>
 
 asmlinkage __u16 crc_t10dif_pcl(__u16 crc, const unsigned char *buf,
diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h
index e32206e..9a9e588 100644
--- a/arch/x86/entry/calling.h
+++ b/arch/x86/entry/calling.h
@@ -201,37 +201,6 @@
 	.byte 0xf1
 	.endm
 
-#else /* CONFIG_X86_64 */
-
-/*
- * For 32bit only simplified versions of SAVE_ALL/RESTORE_ALL. These
- * are different from the entry_32.S versions in not changing the segment
- * registers. So only suitable for in kernel use, not when transitioning
- * from or to user space. The resulting stack frame is not a standard
- * pt_regs frame. The main use case is calling C code from assembler
- * when all the registers need to be preserved.
- */
-
-	.macro SAVE_ALL
-	pushl %eax
-	pushl %ebp
-	pushl %edi
-	pushl %esi
-	pushl %edx
-	pushl %ecx
-	pushl %ebx
-	.endm
-
-	.macro RESTORE_ALL
-	popl %ebx
-	popl %ecx
-	popl %edx
-	popl %esi
-	popl %edi
-	popl %ebp
-	popl %eax
-	.endm
-
 #endif /* CONFIG_X86_64 */
 
 /*
diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
index 0366374..e79d93d 100644
--- a/arch/x86/entry/common.c
+++ b/arch/x86/entry/common.c
@@ -26,6 +26,7 @@
 #include <asm/traps.h>
 #include <asm/vdso.h>
 #include <asm/uaccess.h>
+#include <asm/cpufeature.h>
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/syscalls.h>
@@ -44,6 +45,8 @@
 	CT_WARN_ON(ct_state() != CONTEXT_USER);
 	user_exit();
 }
+#else
+static inline void enter_from_user_mode(void) {}
 #endif
 
 static void do_audit_syscall_entry(struct pt_regs *regs, u32 arch)
@@ -84,17 +87,6 @@
 
 	work = ACCESS_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY;
 
-#ifdef CONFIG_CONTEXT_TRACKING
-	/*
-	 * If TIF_NOHZ is set, we are required to call user_exit() before
-	 * doing anything that could touch RCU.
-	 */
-	if (work & _TIF_NOHZ) {
-		enter_from_user_mode();
-		work &= ~_TIF_NOHZ;
-	}
-#endif
-
 #ifdef CONFIG_SECCOMP
 	/*
 	 * Do seccomp first -- it should minimize exposure of other
@@ -171,16 +163,6 @@
 	if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
 		BUG_ON(regs != task_pt_regs(current));
 
-	/*
-	 * If we stepped into a sysenter/syscall insn, it trapped in
-	 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
-	 * If user-mode had set TF itself, then it's still clear from
-	 * do_debug() and we need to set it again to restore the user
-	 * state.  If we entered on the slow path, TF was already set.
-	 */
-	if (work & _TIF_SINGLESTEP)
-		regs->flags |= X86_EFLAGS_TF;
-
 #ifdef CONFIG_SECCOMP
 	/*
 	 * Call seccomp_phase2 before running the other hooks so that
@@ -268,6 +250,7 @@
 /* Called with IRQs disabled. */
 __visible inline void prepare_exit_to_usermode(struct pt_regs *regs)
 {
+	struct thread_info *ti = pt_regs_to_thread_info(regs);
 	u32 cached_flags;
 
 	if (IS_ENABLED(CONFIG_PROVE_LOCKING) && WARN_ON(!irqs_disabled()))
@@ -275,12 +258,22 @@
 
 	lockdep_sys_exit();
 
-	cached_flags =
-		READ_ONCE(pt_regs_to_thread_info(regs)->flags);
+	cached_flags = READ_ONCE(ti->flags);
 
 	if (unlikely(cached_flags & EXIT_TO_USERMODE_LOOP_FLAGS))
 		exit_to_usermode_loop(regs, cached_flags);
 
+#ifdef CONFIG_COMPAT
+	/*
+	 * Compat syscalls set TS_COMPAT.  Make sure we clear it before
+	 * returning to user mode.  We need to clear it *after* signal
+	 * handling, because syscall restart has a fixup for compat
+	 * syscalls.  The fixup is exercised by the ptrace_syscall_32
+	 * selftest.
+	 */
+	ti->status &= ~TS_COMPAT;
+#endif
+
 	user_enter();
 }
 
@@ -332,33 +325,45 @@
 	if (unlikely(cached_flags & SYSCALL_EXIT_WORK_FLAGS))
 		syscall_slow_exit_work(regs, cached_flags);
 
-#ifdef CONFIG_COMPAT
-	/*
-	 * Compat syscalls set TS_COMPAT.  Make sure we clear it before
-	 * returning to user mode.
-	 */
-	ti->status &= ~TS_COMPAT;
-#endif
-
 	local_irq_disable();
 	prepare_exit_to_usermode(regs);
 }
 
+#ifdef CONFIG_X86_64
+__visible void do_syscall_64(struct pt_regs *regs)
+{
+	struct thread_info *ti = pt_regs_to_thread_info(regs);
+	unsigned long nr = regs->orig_ax;
+
+	enter_from_user_mode();
+	local_irq_enable();
+
+	if (READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY)
+		nr = syscall_trace_enter(regs);
+
+	/*
+	 * NB: Native and x32 syscalls are dispatched from the same
+	 * table.  The only functional difference is the x32 bit in
+	 * regs->orig_ax, which changes the behavior of some syscalls.
+	 */
+	if (likely((nr & __SYSCALL_MASK) < NR_syscalls)) {
+		regs->ax = sys_call_table[nr & __SYSCALL_MASK](
+			regs->di, regs->si, regs->dx,
+			regs->r10, regs->r8, regs->r9);
+	}
+
+	syscall_return_slowpath(regs);
+}
+#endif
+
 #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
 /*
- * Does a 32-bit syscall.  Called with IRQs on and does all entry and
- * exit work and returns with IRQs off.  This function is extremely hot
- * in workloads that use it, and it's usually called from
+ * Does a 32-bit syscall.  Called with IRQs on in CONTEXT_KERNEL.  Does
+ * all entry and exit work and returns with IRQs off.  This function is
+ * extremely hot in workloads that use it, and it's usually called from
  * do_fast_syscall_32, so forcibly inline it to improve performance.
  */
-#ifdef CONFIG_X86_32
-/* 32-bit kernels use a trap gate for INT80, and the asm code calls here. */
-__visible
-#else
-/* 64-bit kernels use do_syscall_32_irqs_off() instead. */
-static
-#endif
-__always_inline void do_syscall_32_irqs_on(struct pt_regs *regs)
+static __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs)
 {
 	struct thread_info *ti = pt_regs_to_thread_info(regs);
 	unsigned int nr = (unsigned int)regs->orig_ax;
@@ -393,14 +398,13 @@
 	syscall_return_slowpath(regs);
 }
 
-#ifdef CONFIG_X86_64
-/* Handles INT80 on 64-bit kernels */
-__visible void do_syscall_32_irqs_off(struct pt_regs *regs)
+/* Handles int $0x80 */
+__visible void do_int80_syscall_32(struct pt_regs *regs)
 {
+	enter_from_user_mode();
 	local_irq_enable();
 	do_syscall_32_irqs_on(regs);
 }
-#endif
 
 /* Returns 0 to return using IRET or 1 to return using SYSEXIT/SYSRETL. */
 __visible long do_fast_syscall_32(struct pt_regs *regs)
@@ -420,12 +424,11 @@
 	 */
 	regs->ip = landing_pad;
 
-	/*
-	 * Fetch EBP from where the vDSO stashed it.
-	 *
-	 * WARNING: We are in CONTEXT_USER and RCU isn't paying attention!
-	 */
+	enter_from_user_mode();
+
 	local_irq_enable();
+
+	/* Fetch EBP from where the vDSO stashed it. */
 	if (
 #ifdef CONFIG_X86_64
 		/*
@@ -443,9 +446,6 @@
 		/* User code screwed up. */
 		local_irq_disable();
 		regs->ax = -EFAULT;
-#ifdef CONFIG_CONTEXT_TRACKING
-		enter_from_user_mode();
-#endif
 		prepare_exit_to_usermode(regs);
 		return 0;	/* Keep it simple: use IRET. */
 	}
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index bb3e376..10868aa 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -40,7 +40,7 @@
 #include <asm/processor-flags.h>
 #include <asm/ftrace.h>
 #include <asm/irq_vectors.h>
-#include <asm/cpufeature.h>
+#include <asm/cpufeatures.h>
 #include <asm/alternative-asm.h>
 #include <asm/asm.h>
 #include <asm/smap.h>
@@ -287,14 +287,64 @@
 END(resume_kernel)
 #endif
 
-	# SYSENTER  call handler stub
+GLOBAL(__begin_SYSENTER_singlestep_region)
+/*
+ * All code from here through __end_SYSENTER_singlestep_region is subject
+ * to being single-stepped if a user program sets TF and executes SYSENTER.
+ * There is absolutely nothing that we can do to prevent this from happening
+ * (thanks Intel!).  To keep our handling of this situation as simple as
+ * possible, we handle TF just like AC and NT, except that our #DB handler
+ * will ignore all of the single-step traps generated in this range.
+ */
+
+#ifdef CONFIG_XEN
+/*
+ * Xen doesn't set %esp to be precisely what the normal SYSENTER
+ * entry point expects, so fix it up before using the normal path.
+ */
+ENTRY(xen_sysenter_target)
+	addl	$5*4, %esp			/* remove xen-provided frame */
+	jmp	sysenter_past_esp
+#endif
+
+/*
+ * 32-bit SYSENTER entry.
+ *
+ * 32-bit system calls through the vDSO's __kernel_vsyscall enter here
+ * if X86_FEATURE_SEP is available.  This is the preferred system call
+ * entry on 32-bit systems.
+ *
+ * The SYSENTER instruction, in principle, should *only* occur in the
+ * vDSO.  In practice, a small number of Android devices were shipped
+ * with a copy of Bionic that inlined a SYSENTER instruction.  This
+ * never happened in any of Google's Bionic versions -- it only happened
+ * in a narrow range of Intel-provided versions.
+ *
+ * SYSENTER loads SS, ESP, CS, and EIP from previously programmed MSRs.
+ * IF and VM in RFLAGS are cleared (IOW: interrupts are off).
+ * SYSENTER does not save anything on the stack,
+ * and does not save old EIP (!!!), ESP, or EFLAGS.
+ *
+ * To avoid losing track of EFLAGS.VM (and thus potentially corrupting
+ * user and/or vm86 state), we explicitly disable the SYSENTER
+ * instruction in vm86 mode by reprogramming the MSRs.
+ *
+ * Arguments:
+ * eax  system call number
+ * ebx  arg1
+ * ecx  arg2
+ * edx  arg3
+ * esi  arg4
+ * edi  arg5
+ * ebp  user stack
+ * 0(%ebp) arg6
+ */
 ENTRY(entry_SYSENTER_32)
 	movl	TSS_sysenter_sp0(%esp), %esp
 sysenter_past_esp:
 	pushl	$__USER_DS		/* pt_regs->ss */
 	pushl	%ebp			/* pt_regs->sp (stashed in bp) */
 	pushfl				/* pt_regs->flags (except IF = 0) */
-	ASM_CLAC			/* Clear AC after saving FLAGS */
 	orl	$X86_EFLAGS_IF, (%esp)	/* Fix IF */
 	pushl	$__USER_CS		/* pt_regs->cs */
 	pushl	$0			/* pt_regs->ip = 0 (placeholder) */
@@ -302,6 +352,29 @@
 	SAVE_ALL pt_regs_ax=$-ENOSYS	/* save rest */
 
 	/*
+	 * SYSENTER doesn't filter flags, so we need to clear NT, AC
+	 * and TF ourselves.  To save a few cycles, we can check whether
+	 * either was set instead of doing an unconditional popfq.
+	 * This needs to happen before enabling interrupts so that
+	 * we don't get preempted with NT set.
+	 *
+	 * If TF is set, we will single-step all the way to here -- do_debug
+	 * will ignore all the traps.  (Yes, this is slow, but so is
+	 * single-stepping in general.  This allows us to avoid having
+	 * a more complicated code to handle the case where a user program
+	 * forces us to single-step through the SYSENTER entry code.)
+	 *
+	 * NB.: .Lsysenter_fix_flags is a label with the code under it moved
+	 * out-of-line as an optimization: NT is unlikely to be set in the
+	 * majority of the cases and instead of polluting the I$ unnecessarily,
+	 * we're keeping that code behind a branch which will predict as
+	 * not-taken and therefore its instructions won't be fetched.
+	 */
+	testl	$X86_EFLAGS_NT|X86_EFLAGS_AC|X86_EFLAGS_TF, PT_EFLAGS(%esp)
+	jnz	.Lsysenter_fix_flags
+.Lsysenter_flags_fixed:
+
+	/*
 	 * User mode is traced as though IRQs are on, and SYSENTER
 	 * turned them off.
 	 */
@@ -327,6 +400,15 @@
 	popl	%eax			/* pt_regs->ax */
 
 	/*
+	 * Restore all flags except IF. (We restore IF separately because
+	 * STI gives a one-instruction window in which we won't be interrupted,
+	 * whereas POPF does not.)
+	 */
+	addl	$PT_EFLAGS-PT_DS, %esp	/* point esp at pt_regs->flags */
+	btr	$X86_EFLAGS_IF_BIT, (%esp)
+	popfl
+
+	/*
 	 * Return back to the vDSO, which will pop ecx and edx.
 	 * Don't bother with DS and ES (they already contain __USER_DS).
 	 */
@@ -339,28 +421,63 @@
 .popsection
 	_ASM_EXTABLE(1b, 2b)
 	PTGS_TO_GS_EX
+
+.Lsysenter_fix_flags:
+	pushl	$X86_EFLAGS_FIXED
+	popfl
+	jmp	.Lsysenter_flags_fixed
+GLOBAL(__end_SYSENTER_singlestep_region)
 ENDPROC(entry_SYSENTER_32)
 
-	# system call handler stub
+/*
+ * 32-bit legacy system call entry.
+ *
+ * 32-bit x86 Linux system calls traditionally used the INT $0x80
+ * instruction.  INT $0x80 lands here.
+ *
+ * This entry point can be used by any 32-bit perform system calls.
+ * Instances of INT $0x80 can be found inline in various programs and
+ * libraries.  It is also used by the vDSO's __kernel_vsyscall
+ * fallback for hardware that doesn't support a faster entry method.
+ * Restarted 32-bit system calls also fall back to INT $0x80
+ * regardless of what instruction was originally used to do the system
+ * call.  (64-bit programs can use INT $0x80 as well, but they can
+ * only run on 64-bit kernels and therefore land in
+ * entry_INT80_compat.)
+ *
+ * This is considered a slow path.  It is not used by most libc
+ * implementations on modern hardware except during process startup.
+ *
+ * Arguments:
+ * eax  system call number
+ * ebx  arg1
+ * ecx  arg2
+ * edx  arg3
+ * esi  arg4
+ * edi  arg5
+ * ebp  arg6
+ */
 ENTRY(entry_INT80_32)
 	ASM_CLAC
 	pushl	%eax			/* pt_regs->orig_ax */
 	SAVE_ALL pt_regs_ax=$-ENOSYS	/* save rest */
 
 	/*
-	 * User mode is traced as though IRQs are on.  Unlike the 64-bit
-	 * case, INT80 is a trap gate on 32-bit kernels, so interrupts
-	 * are already on (unless user code is messing around with iopl).
+	 * User mode is traced as though IRQs are on, and the interrupt gate
+	 * turned them off.
 	 */
+	TRACE_IRQS_OFF
 
 	movl	%esp, %eax
-	call	do_syscall_32_irqs_on
+	call	do_int80_syscall_32
 .Lsyscall_32_done:
 
 restore_all:
 	TRACE_IRQS_IRET
 restore_all_notrace:
 #ifdef CONFIG_X86_ESPFIX32
+	ALTERNATIVE	"jmp restore_nocheck", "", X86_BUG_ESPFIX
+
 	movl	PT_EFLAGS(%esp), %eax		# mix EFLAGS, SS and CS
 	/*
 	 * Warning: PT_OLDSS(%esp) contains the wrong/random values if we
@@ -387,19 +504,6 @@
 
 #ifdef CONFIG_X86_ESPFIX32
 ldt_ss:
-#ifdef CONFIG_PARAVIRT
-	/*
-	 * The kernel can't run on a non-flat stack if paravirt mode
-	 * is active.  Rather than try to fixup the high bits of
-	 * ESP, bypass this code entirely.  This may break DOSemu
-	 * and/or Wine support in a paravirt VM, although the option
-	 * is still available to implement the setting of the high
-	 * 16-bits in the INTERRUPT_RETURN paravirt-op.
-	 */
-	cmpl	$0, pv_info+PARAVIRT_enabled
-	jne	restore_nocheck
-#endif
-
 /*
  * Setup and switch to ESPFIX stack
  *
@@ -632,14 +736,6 @@
 END(spurious_interrupt_bug)
 
 #ifdef CONFIG_XEN
-/*
- * Xen doesn't set %esp to be precisely what the normal SYSENTER
- * entry point expects, so fix it up before using the normal path.
- */
-ENTRY(xen_sysenter_target)
-	addl	$5*4, %esp			/* remove xen-provided frame */
-	jmp	sysenter_past_esp
-
 ENTRY(xen_hypervisor_callback)
 	pushl	$-1				/* orig_ax = -1 => not a system call */
 	SAVE_ALL
@@ -939,51 +1035,48 @@
 	jmp	ret_from_exception
 END(page_fault)
 
-/*
- * Debug traps and NMI can happen at the one SYSENTER instruction
- * that sets up the real kernel stack. Check here, since we can't
- * allow the wrong stack to be used.
- *
- * "TSS_sysenter_sp0+12" is because the NMI/debug handler will have
- * already pushed 3 words if it hits on the sysenter instruction:
- * eflags, cs and eip.
- *
- * We just load the right stack, and push the three (known) values
- * by hand onto the new stack - while updating the return eip past
- * the instruction that would have done it for sysenter.
- */
-.macro FIX_STACK offset ok label
-	cmpw	$__KERNEL_CS, 4(%esp)
-	jne	\ok
-\label:
-	movl	TSS_sysenter_sp0 + \offset(%esp), %esp
-	pushfl
-	pushl	$__KERNEL_CS
-	pushl	$sysenter_past_esp
-.endm
-
 ENTRY(debug)
+	/*
+	 * #DB can happen at the first instruction of
+	 * entry_SYSENTER_32 or in Xen's SYSENTER prologue.  If this
+	 * happens, then we will be running on a very small stack.  We
+	 * need to detect this condition and switch to the thread
+	 * stack before calling any C code at all.
+	 *
+	 * If you edit this code, keep in mind that NMIs can happen in here.
+	 */
 	ASM_CLAC
-	cmpl	$entry_SYSENTER_32, (%esp)
-	jne	debug_stack_correct
-	FIX_STACK 12, debug_stack_correct, debug_esp_fix_insn
-debug_stack_correct:
 	pushl	$-1				# mark this as an int
 	SAVE_ALL
-	TRACE_IRQS_OFF
 	xorl	%edx, %edx			# error code 0
 	movl	%esp, %eax			# pt_regs pointer
+
+	/* Are we currently on the SYSENTER stack? */
+	PER_CPU(cpu_tss + CPU_TSS_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx)
+	subl	%eax, %ecx	/* ecx = (end of SYSENTER_stack) - esp */
+	cmpl	$SIZEOF_SYSENTER_stack, %ecx
+	jb	.Ldebug_from_sysenter_stack
+
+	TRACE_IRQS_OFF
 	call	do_debug
 	jmp	ret_from_exception
+
+.Ldebug_from_sysenter_stack:
+	/* We're on the SYSENTER stack.  Switch off. */
+	movl	%esp, %ebp
+	movl	PER_CPU_VAR(cpu_current_top_of_stack), %esp
+	TRACE_IRQS_OFF
+	call	do_debug
+	movl	%ebp, %esp
+	jmp	ret_from_exception
 END(debug)
 
 /*
- * NMI is doubly nasty. It can happen _while_ we're handling
- * a debug fault, and the debug fault hasn't yet been able to
- * clear up the stack. So we first check whether we got  an
- * NMI on the sysenter entry path, but after that we need to
- * check whether we got an NMI on the debug path where the debug
- * fault happened on the sysenter path.
+ * NMI is doubly nasty.  It can happen on the first instruction of
+ * entry_SYSENTER_32 (just like #DB), but it can also interrupt the beginning
+ * of the #DB handler even if that #DB in turn hit before entry_SYSENTER_32
+ * switched stacks.  We handle both conditions by simply checking whether we
+ * interrupted kernel code running on the SYSENTER stack.
  */
 ENTRY(nmi)
 	ASM_CLAC
@@ -994,41 +1087,32 @@
 	popl	%eax
 	je	nmi_espfix_stack
 #endif
-	cmpl	$entry_SYSENTER_32, (%esp)
-	je	nmi_stack_fixup
-	pushl	%eax
-	movl	%esp, %eax
-	/*
-	 * Do not access memory above the end of our stack page,
-	 * it might not exist.
-	 */
-	andl	$(THREAD_SIZE-1), %eax
-	cmpl	$(THREAD_SIZE-20), %eax
-	popl	%eax
-	jae	nmi_stack_correct
-	cmpl	$entry_SYSENTER_32, 12(%esp)
-	je	nmi_debug_stack_check
-nmi_stack_correct:
-	pushl	%eax
+
+	pushl	%eax				# pt_regs->orig_ax
 	SAVE_ALL
 	xorl	%edx, %edx			# zero error code
 	movl	%esp, %eax			# pt_regs pointer
+
+	/* Are we currently on the SYSENTER stack? */
+	PER_CPU(cpu_tss + CPU_TSS_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx)
+	subl	%eax, %ecx	/* ecx = (end of SYSENTER_stack) - esp */
+	cmpl	$SIZEOF_SYSENTER_stack, %ecx
+	jb	.Lnmi_from_sysenter_stack
+
+	/* Not on SYSENTER stack. */
 	call	do_nmi
 	jmp	restore_all_notrace
 
-nmi_stack_fixup:
-	FIX_STACK 12, nmi_stack_correct, 1
-	jmp	nmi_stack_correct
-
-nmi_debug_stack_check:
-	cmpw	$__KERNEL_CS, 16(%esp)
-	jne	nmi_stack_correct
-	cmpl	$debug, (%esp)
-	jb	nmi_stack_correct
-	cmpl	$debug_esp_fix_insn, (%esp)
-	ja	nmi_stack_correct
-	FIX_STACK 24, nmi_stack_correct, 1
-	jmp	nmi_stack_correct
+.Lnmi_from_sysenter_stack:
+	/*
+	 * We're on the SYSENTER stack.  Switch off.  No one (not even debug)
+	 * is using the thread stack right now, so it's safe for us to use it.
+	 */
+	movl	%esp, %ebp
+	movl	PER_CPU_VAR(cpu_current_top_of_stack), %esp
+	call	do_nmi
+	movl	%ebp, %esp
+	jmp	restore_all_notrace
 
 #ifdef CONFIG_X86_ESPFIX32
 nmi_espfix_stack:
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 9d34d3c..858b555 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -103,6 +103,16 @@
 /*
  * 64-bit SYSCALL instruction entry. Up to 6 arguments in registers.
  *
+ * This is the only entry point used for 64-bit system calls.  The
+ * hardware interface is reasonably well designed and the register to
+ * argument mapping Linux uses fits well with the registers that are
+ * available when SYSCALL is used.
+ *
+ * SYSCALL instructions can be found inlined in libc implementations as
+ * well as some other programs and libraries.  There are also a handful
+ * of SYSCALL instructions in the vDSO used, for example, as a
+ * clock_gettimeofday fallback.
+ *
  * 64-bit SYSCALL saves rip to rcx, clears rflags.RF, then saves rflags to r11,
  * then loads new ss, cs, and rip from previously programmed MSRs.
  * rflags gets masked by a value from another MSR (so CLD and CLAC
@@ -145,17 +155,11 @@
 	movq	%rsp, PER_CPU_VAR(rsp_scratch)
 	movq	PER_CPU_VAR(cpu_current_top_of_stack), %rsp
 
+	TRACE_IRQS_OFF
+
 	/* Construct struct pt_regs on stack */
 	pushq	$__USER_DS			/* pt_regs->ss */
 	pushq	PER_CPU_VAR(rsp_scratch)	/* pt_regs->sp */
-	/*
-	 * Re-enable interrupts.
-	 * We use 'rsp_scratch' as a scratch space, hence irq-off block above
-	 * must execute atomically in the face of possible interrupt-driven
-	 * task preemption. We must enable interrupts only after we're done
-	 * with using rsp_scratch:
-	 */
-	ENABLE_INTERRUPTS(CLBR_NONE)
 	pushq	%r11				/* pt_regs->flags */
 	pushq	$__USER_CS			/* pt_regs->cs */
 	pushq	%rcx				/* pt_regs->ip */
@@ -171,9 +175,21 @@
 	pushq	%r11				/* pt_regs->r11 */
 	sub	$(6*8), %rsp			/* pt_regs->bp, bx, r12-15 not saved */
 
-	testl	$_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
-	jnz	tracesys
+	/*
+	 * If we need to do entry work or if we guess we'll need to do
+	 * exit work, go straight to the slow path.
+	 */
+	testl	$_TIF_WORK_SYSCALL_ENTRY|_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
+	jnz	entry_SYSCALL64_slow_path
+
 entry_SYSCALL_64_fastpath:
+	/*
+	 * Easy case: enable interrupts and issue the syscall.  If the syscall
+	 * needs pt_regs, we'll call a stub that disables interrupts again
+	 * and jumps to the slow path.
+	 */
+	TRACE_IRQS_ON
+	ENABLE_INTERRUPTS(CLBR_NONE)
 #if __SYSCALL_MASK == ~0
 	cmpq	$__NR_syscall_max, %rax
 #else
@@ -182,103 +198,56 @@
 #endif
 	ja	1f				/* return -ENOSYS (already in pt_regs->ax) */
 	movq	%r10, %rcx
+
+	/*
+	 * This call instruction is handled specially in stub_ptregs_64.
+	 * It might end up jumping to the slow path.  If it jumps, RAX
+	 * and all argument registers are clobbered.
+	 */
 	call	*sys_call_table(, %rax, 8)
+.Lentry_SYSCALL_64_after_fastpath_call:
+
 	movq	%rax, RAX(%rsp)
 1:
-/*
- * Syscall return path ending with SYSRET (fast path).
- * Has incompletely filled pt_regs.
- */
-	LOCKDEP_SYS_EXIT
+
 	/*
-	 * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
-	 * it is too small to ever cause noticeable irq latency.
+	 * If we get here, then we know that pt_regs is clean for SYSRET64.
+	 * If we see that no exit work is required (which we are required
+	 * to check with IRQs off), then we can go straight to SYSRET64.
 	 */
 	DISABLE_INTERRUPTS(CLBR_NONE)
-
-	/*
-	 * We must check ti flags with interrupts (or at least preemption)
-	 * off because we must *never* return to userspace without
-	 * processing exit work that is enqueued if we're preempted here.
-	 * In particular, returning to userspace with any of the one-shot
-	 * flags (TIF_NOTIFY_RESUME, TIF_USER_RETURN_NOTIFY, etc) set is
-	 * very bad.
-	 */
+	TRACE_IRQS_OFF
 	testl	$_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
-	jnz	int_ret_from_sys_call_irqs_off	/* Go to the slow path */
+	jnz	1f
 
-	RESTORE_C_REGS_EXCEPT_RCX_R11
+	LOCKDEP_SYS_EXIT
+	TRACE_IRQS_ON		/* user mode is traced as IRQs on */
 	movq	RIP(%rsp), %rcx
 	movq	EFLAGS(%rsp), %r11
+	RESTORE_C_REGS_EXCEPT_RCX_R11
 	movq	RSP(%rsp), %rsp
-	/*
-	 * 64-bit SYSRET restores rip from rcx,
-	 * rflags from r11 (but RF and VM bits are forced to 0),
-	 * cs and ss are loaded from MSRs.
-	 * Restoration of rflags re-enables interrupts.
-	 *
-	 * NB: On AMD CPUs with the X86_BUG_SYSRET_SS_ATTRS bug, the ss
-	 * descriptor is not reinitialized.  This means that we should
-	 * avoid SYSRET with SS == NULL, which could happen if we schedule,
-	 * exit the kernel, and re-enter using an interrupt vector.  (All
-	 * interrupt entries on x86_64 set SS to NULL.)  We prevent that
-	 * from happening by reloading SS in __switch_to.  (Actually
-	 * detecting the failure in 64-bit userspace is tricky but can be
-	 * done.)
-	 */
 	USERGS_SYSRET64
 
-GLOBAL(int_ret_from_sys_call_irqs_off)
+1:
+	/*
+	 * The fast path looked good when we started, but something changed
+	 * along the way and we need to switch to the slow path.  Calling
+	 * raise(3) will trigger this, for example.  IRQs are off.
+	 */
 	TRACE_IRQS_ON
 	ENABLE_INTERRUPTS(CLBR_NONE)
-	jmp int_ret_from_sys_call
-
-	/* Do syscall entry tracing */
-tracesys:
-	movq	%rsp, %rdi
-	movl	$AUDIT_ARCH_X86_64, %esi
-	call	syscall_trace_enter_phase1
-	test	%rax, %rax
-	jnz	tracesys_phase2			/* if needed, run the slow path */
-	RESTORE_C_REGS_EXCEPT_RAX		/* else restore clobbered regs */
-	movq	ORIG_RAX(%rsp), %rax
-	jmp	entry_SYSCALL_64_fastpath	/* and return to the fast path */
-
-tracesys_phase2:
-	SAVE_EXTRA_REGS
-	movq	%rsp, %rdi
-	movl	$AUDIT_ARCH_X86_64, %esi
-	movq	%rax, %rdx
-	call	syscall_trace_enter_phase2
-
-	/*
-	 * Reload registers from stack in case ptrace changed them.
-	 * We don't reload %rax because syscall_trace_entry_phase2() returned
-	 * the value it wants us to use in the table lookup.
-	 */
-	RESTORE_C_REGS_EXCEPT_RAX
-	RESTORE_EXTRA_REGS
-#if __SYSCALL_MASK == ~0
-	cmpq	$__NR_syscall_max, %rax
-#else
-	andl	$__SYSCALL_MASK, %eax
-	cmpl	$__NR_syscall_max, %eax
-#endif
-	ja	1f				/* return -ENOSYS (already in pt_regs->ax) */
-	movq	%r10, %rcx			/* fixup for C */
-	call	*sys_call_table(, %rax, 8)
-	movq	%rax, RAX(%rsp)
-1:
-	/* Use IRET because user could have changed pt_regs->foo */
-
-/*
- * Syscall return path ending with IRET.
- * Has correct iret frame.
- */
-GLOBAL(int_ret_from_sys_call)
 	SAVE_EXTRA_REGS
 	movq	%rsp, %rdi
 	call	syscall_return_slowpath	/* returns with IRQs disabled */
+	jmp	return_from_SYSCALL_64
+
+entry_SYSCALL64_slow_path:
+	/* IRQs are off. */
+	SAVE_EXTRA_REGS
+	movq	%rsp, %rdi
+	call	do_syscall_64		/* returns with IRQs disabled */
+
+return_from_SYSCALL_64:
 	RESTORE_EXTRA_REGS
 	TRACE_IRQS_IRETQ		/* we're about to change IF */
 
@@ -355,83 +324,45 @@
 	jmp	restore_c_regs_and_iret
 END(entry_SYSCALL_64)
 
-
-	.macro FORK_LIKE func
-ENTRY(stub_\func)
-	SAVE_EXTRA_REGS 8
-	jmp	sys_\func
-END(stub_\func)
-	.endm
-
-	FORK_LIKE  clone
-	FORK_LIKE  fork
-	FORK_LIKE  vfork
-
-ENTRY(stub_execve)
-	call	sys_execve
-return_from_execve:
-	testl	%eax, %eax
-	jz	1f
-	/* exec failed, can use fast SYSRET code path in this case */
-	ret
-1:
-	/* must use IRET code path (pt_regs->cs may have changed) */
-	addq	$8, %rsp
-	ZERO_EXTRA_REGS
-	movq	%rax, RAX(%rsp)
-	jmp	int_ret_from_sys_call
-END(stub_execve)
-/*
- * Remaining execve stubs are only 7 bytes long.
- * ENTRY() often aligns to 16 bytes, which in this case has no benefits.
- */
-	.align	8
-GLOBAL(stub_execveat)
-	call	sys_execveat
-	jmp	return_from_execve
-END(stub_execveat)
-
-#if defined(CONFIG_X86_X32_ABI)
-	.align	8
-GLOBAL(stub_x32_execve)
-	call	compat_sys_execve
-	jmp	return_from_execve
-END(stub_x32_execve)
-	.align	8
-GLOBAL(stub_x32_execveat)
-	call	compat_sys_execveat
-	jmp	return_from_execve
-END(stub_x32_execveat)
-#endif
-
-/*
- * sigreturn is special because it needs to restore all registers on return.
- * This cannot be done with SYSRET, so use the IRET return path instead.
- */
-ENTRY(stub_rt_sigreturn)
+ENTRY(stub_ptregs_64)
 	/*
-	 * SAVE_EXTRA_REGS result is not normally needed:
-	 * sigreturn overwrites all pt_regs->GPREGS.
-	 * But sigreturn can fail (!), and there is no easy way to detect that.
-	 * To make sure RESTORE_EXTRA_REGS doesn't restore garbage on error,
-	 * we SAVE_EXTRA_REGS here.
+	 * Syscalls marked as needing ptregs land here.
+	 * If we are on the fast path, we need to save the extra regs,
+	 * which we achieve by trying again on the slow path.  If we are on
+	 * the slow path, the extra regs are already saved.
+	 *
+	 * RAX stores a pointer to the C function implementing the syscall.
+	 * IRQs are on.
 	 */
-	SAVE_EXTRA_REGS 8
-	call	sys_rt_sigreturn
-return_from_stub:
-	addq	$8, %rsp
-	RESTORE_EXTRA_REGS
-	movq	%rax, RAX(%rsp)
-	jmp	int_ret_from_sys_call
-END(stub_rt_sigreturn)
+	cmpq	$.Lentry_SYSCALL_64_after_fastpath_call, (%rsp)
+	jne	1f
 
-#ifdef CONFIG_X86_X32_ABI
-ENTRY(stub_x32_rt_sigreturn)
-	SAVE_EXTRA_REGS 8
-	call	sys32_x32_rt_sigreturn
-	jmp	return_from_stub
-END(stub_x32_rt_sigreturn)
-#endif
+	/*
+	 * Called from fast path -- disable IRQs again, pop return address
+	 * and jump to slow path
+	 */
+	DISABLE_INTERRUPTS(CLBR_NONE)
+	TRACE_IRQS_OFF
+	popq	%rax
+	jmp	entry_SYSCALL64_slow_path
+
+1:
+	/* Called from C */
+	jmp	*%rax				/* called from C */
+END(stub_ptregs_64)
+
+.macro ptregs_stub func
+ENTRY(ptregs_\func)
+	leaq	\func(%rip), %rax
+	jmp	stub_ptregs_64
+END(ptregs_\func)
+.endm
+
+/* Instantiate ptregs_stub for each ptregs-using syscall */
+#define __SYSCALL_64_QUAL_(sym)
+#define __SYSCALL_64_QUAL_ptregs(sym) ptregs_stub sym
+#define __SYSCALL_64(nr, sym, qual) __SYSCALL_64_QUAL_##qual(sym)
+#include <asm/syscalls_64.h>
 
 /*
  * A newly forked process directly context switches into this address.
@@ -439,7 +370,6 @@
  * rdi: prev task we switched from
  */
 ENTRY(ret_from_fork)
-
 	LOCK ; btr $TIF_FORK, TI_flags(%r8)
 
 	pushq	$0x0002
@@ -447,28 +377,32 @@
 
 	call	schedule_tail			/* rdi: 'prev' task parameter */
 
-	RESTORE_EXTRA_REGS
-
 	testb	$3, CS(%rsp)			/* from kernel_thread? */
+	jnz	1f
 
 	/*
-	 * By the time we get here, we have no idea whether our pt_regs,
-	 * ti flags, and ti status came from the 64-bit SYSCALL fast path,
-	 * the slow path, or one of the 32-bit compat paths.
-	 * Use IRET code path to return, since it can safely handle
-	 * all of the above.
+	 * We came from kernel_thread.  This code path is quite twisted, and
+	 * someone should clean it up.
+	 *
+	 * copy_thread_tls stashes the function pointer in RBX and the
+	 * parameter to be passed in RBP.  The called function is permitted
+	 * to call do_execve and thereby jump to user mode.
 	 */
-	jnz	int_ret_from_sys_call
-
-	/*
-	 * We came from kernel_thread
-	 * nb: we depend on RESTORE_EXTRA_REGS above
-	 */
-	movq	%rbp, %rdi
-	call	*%rbx
+	movq	RBP(%rsp), %rdi
+	call	*RBX(%rsp)
 	movl	$0, RAX(%rsp)
-	RESTORE_EXTRA_REGS
-	jmp	int_ret_from_sys_call
+
+	/*
+	 * Fall through as though we're exiting a syscall.  This makes a
+	 * twisted sort of sense if we just called do_execve.
+	 */
+
+1:
+	movq	%rsp, %rdi
+	call	syscall_return_slowpath	/* returns with IRQs disabled */
+	TRACE_IRQS_ON			/* user mode is traced as IRQS on */
+	SWAPGS
+	jmp	restore_regs_and_iret
 END(ret_from_fork)
 
 /*
diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
index 3c990ee..847f2f0 100644
--- a/arch/x86/entry/entry_64_compat.S
+++ b/arch/x86/entry/entry_64_compat.S
@@ -19,12 +19,21 @@
 	.section .entry.text, "ax"
 
 /*
- * 32-bit SYSENTER instruction entry.
+ * 32-bit SYSENTER entry.
  *
- * SYSENTER loads ss, rsp, cs, and rip from previously programmed MSRs.
- * IF and VM in rflags are cleared (IOW: interrupts are off).
+ * 32-bit system calls through the vDSO's __kernel_vsyscall enter here
+ * on 64-bit kernels running on Intel CPUs.
+ *
+ * The SYSENTER instruction, in principle, should *only* occur in the
+ * vDSO.  In practice, a small number of Android devices were shipped
+ * with a copy of Bionic that inlined a SYSENTER instruction.  This
+ * never happened in any of Google's Bionic versions -- it only happened
+ * in a narrow range of Intel-provided versions.
+ *
+ * SYSENTER loads SS, RSP, CS, and RIP from previously programmed MSRs.
+ * IF and VM in RFLAGS are cleared (IOW: interrupts are off).
  * SYSENTER does not save anything on the stack,
- * and does not save old rip (!!!) and rflags.
+ * and does not save old RIP (!!!), RSP, or RFLAGS.
  *
  * Arguments:
  * eax  system call number
@@ -35,10 +44,6 @@
  * edi  arg5
  * ebp  user stack
  * 0(%ebp) arg6
- *
- * This is purely a fast path. For anything complicated we use the int 0x80
- * path below. We set up a complete hardware stack frame to share code
- * with the int 0x80 path.
  */
 ENTRY(entry_SYSENTER_compat)
 	/* Interrupts are off on entry. */
@@ -66,8 +71,6 @@
 	 */
 	pushfq				/* pt_regs->flags (except IF = 0) */
 	orl	$X86_EFLAGS_IF, (%rsp)	/* Fix saved flags */
-	ASM_CLAC			/* Clear AC after saving FLAGS */
-
 	pushq	$__USER32_CS		/* pt_regs->cs */
 	xorq    %r8,%r8
 	pushq	%r8			/* pt_regs->ip = 0 (placeholder) */
@@ -90,19 +93,25 @@
 	cld
 
 	/*
-	 * Sysenter doesn't filter flags, so we need to clear NT
+	 * SYSENTER doesn't filter flags, so we need to clear NT and AC
 	 * ourselves.  To save a few cycles, we can check whether
-	 * NT was set instead of doing an unconditional popfq.
+	 * either was set instead of doing an unconditional popfq.
 	 * This needs to happen before enabling interrupts so that
 	 * we don't get preempted with NT set.
 	 *
+	 * If TF is set, we will single-step all the way to here -- do_debug
+	 * will ignore all the traps.  (Yes, this is slow, but so is
+	 * single-stepping in general.  This allows us to avoid having
+	 * a more complicated code to handle the case where a user program
+	 * forces us to single-step through the SYSENTER entry code.)
+	 *
 	 * NB.: .Lsysenter_fix_flags is a label with the code under it moved
 	 * out-of-line as an optimization: NT is unlikely to be set in the
 	 * majority of the cases and instead of polluting the I$ unnecessarily,
 	 * we're keeping that code behind a branch which will predict as
 	 * not-taken and therefore its instructions won't be fetched.
 	 */
-	testl	$X86_EFLAGS_NT, EFLAGS(%rsp)
+	testl	$X86_EFLAGS_NT|X86_EFLAGS_AC|X86_EFLAGS_TF, EFLAGS(%rsp)
 	jnz	.Lsysenter_fix_flags
 .Lsysenter_flags_fixed:
 
@@ -123,20 +132,42 @@
 	pushq	$X86_EFLAGS_FIXED
 	popfq
 	jmp	.Lsysenter_flags_fixed
+GLOBAL(__end_entry_SYSENTER_compat)
 ENDPROC(entry_SYSENTER_compat)
 
 /*
- * 32-bit SYSCALL instruction entry.
+ * 32-bit SYSCALL entry.
  *
- * 32-bit SYSCALL saves rip to rcx, clears rflags.RF, then saves rflags to r11,
- * then loads new ss, cs, and rip from previously programmed MSRs.
- * rflags gets masked by a value from another MSR (so CLD and CLAC
- * are not needed). SYSCALL does not save anything on the stack
- * and does not change rsp.
+ * 32-bit system calls through the vDSO's __kernel_vsyscall enter here
+ * on 64-bit kernels running on AMD CPUs.
  *
- * Note: rflags saving+masking-with-MSR happens only in Long mode
+ * The SYSCALL instruction, in principle, should *only* occur in the
+ * vDSO.  In practice, it appears that this really is the case.
+ * As evidence:
+ *
+ *  - The calling convention for SYSCALL has changed several times without
+ *    anyone noticing.
+ *
+ *  - Prior to the in-kernel X86_BUG_SYSRET_SS_ATTRS fixup, anything
+ *    user task that did SYSCALL without immediately reloading SS
+ *    would randomly crash.
+ *
+ *  - Most programmers do not directly target AMD CPUs, and the 32-bit
+ *    SYSCALL instruction does not exist on Intel CPUs.  Even on AMD
+ *    CPUs, Linux disables the SYSCALL instruction on 32-bit kernels
+ *    because the SYSCALL instruction in legacy/native 32-bit mode (as
+ *    opposed to compat mode) is sufficiently poorly designed as to be
+ *    essentially unusable.
+ *
+ * 32-bit SYSCALL saves RIP to RCX, clears RFLAGS.RF, then saves
+ * RFLAGS to R11, then loads new SS, CS, and RIP from previously
+ * programmed MSRs.  RFLAGS gets masked by a value from another MSR
+ * (so CLD and CLAC are not needed).  SYSCALL does not save anything on
+ * the stack and does not change RSP.
+ *
+ * Note: RFLAGS saving+masking-with-MSR happens only in Long mode
  * (in legacy 32-bit mode, IF, RF and VM bits are cleared and that's it).
- * Don't get confused: rflags saving+masking depends on Long Mode Active bit
+ * Don't get confused: RFLAGS saving+masking depends on Long Mode Active bit
  * (EFER.LMA=1), NOT on bitness of userspace where SYSCALL executes
  * or target CS descriptor's L bit (SYSCALL does not read segment descriptors).
  *
@@ -236,7 +267,21 @@
 END(entry_SYSCALL_compat)
 
 /*
- * Emulated IA32 system calls via int 0x80.
+ * 32-bit legacy system call entry.
+ *
+ * 32-bit x86 Linux system calls traditionally used the INT $0x80
+ * instruction.  INT $0x80 lands here.
+ *
+ * This entry point can be used by 32-bit and 64-bit programs to perform
+ * 32-bit system calls.  Instances of INT $0x80 can be found inline in
+ * various programs and libraries.  It is also used by the vDSO's
+ * __kernel_vsyscall fallback for hardware that doesn't support a faster
+ * entry method.  Restarted 32-bit system calls also fall back to INT
+ * $0x80 regardless of what instruction was originally used to do the
+ * system call.
+ *
+ * This is considered a slow path.  It is not used by most libc
+ * implementations on modern hardware except during process startup.
  *
  * Arguments:
  * eax  system call number
@@ -245,17 +290,8 @@
  * edx  arg3
  * esi  arg4
  * edi  arg5
- * ebp  arg6	(note: not saved in the stack frame, should not be touched)
- *
- * Notes:
- * Uses the same stack frame as the x86-64 version.
- * All registers except eax must be saved (but ptrace may violate that).
- * Arguments are zero extended. For system calls that want sign extension and
- * take long arguments a wrapper is needed. Most calls can just be called
- * directly.
- * Assumes it is only called from user space and entered with interrupts off.
+ * ebp  arg6
  */
-
 ENTRY(entry_INT80_compat)
 	/*
 	 * Interrupts are off on entry.
@@ -300,7 +336,7 @@
 	TRACE_IRQS_OFF
 
 	movq	%rsp, %rdi
-	call	do_syscall_32_irqs_off
+	call	do_int80_syscall_32
 .Lsyscall_32_done:
 
 	/* Go back to user mode. */
diff --git a/arch/x86/entry/syscall_32.c b/arch/x86/entry/syscall_32.c
index 9a66498..8f895ee 100644
--- a/arch/x86/entry/syscall_32.c
+++ b/arch/x86/entry/syscall_32.c
@@ -6,17 +6,11 @@
 #include <asm/asm-offsets.h>
 #include <asm/syscall.h>
 
-#ifdef CONFIG_IA32_EMULATION
-#define SYM(sym, compat) compat
-#else
-#define SYM(sym, compat) sym
-#endif
-
-#define __SYSCALL_I386(nr, sym, compat) extern asmlinkage long SYM(sym, compat)(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) ;
+#define __SYSCALL_I386(nr, sym, qual) extern asmlinkage long sym(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) ;
 #include <asm/syscalls_32.h>
 #undef __SYSCALL_I386
 
-#define __SYSCALL_I386(nr, sym, compat) [nr] = SYM(sym, compat),
+#define __SYSCALL_I386(nr, sym, qual) [nr] = sym,
 
 extern asmlinkage long sys_ni_syscall(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
 
diff --git a/arch/x86/entry/syscall_64.c b/arch/x86/entry/syscall_64.c
index 41283d2..9dbc5ab 100644
--- a/arch/x86/entry/syscall_64.c
+++ b/arch/x86/entry/syscall_64.c
@@ -6,19 +6,14 @@
 #include <asm/asm-offsets.h>
 #include <asm/syscall.h>
 
-#define __SYSCALL_COMMON(nr, sym, compat) __SYSCALL_64(nr, sym, compat)
+#define __SYSCALL_64_QUAL_(sym) sym
+#define __SYSCALL_64_QUAL_ptregs(sym) ptregs_##sym
 
-#ifdef CONFIG_X86_X32_ABI
-# define __SYSCALL_X32(nr, sym, compat) __SYSCALL_64(nr, sym, compat)
-#else
-# define __SYSCALL_X32(nr, sym, compat) /* nothing */
-#endif
-
-#define __SYSCALL_64(nr, sym, compat) extern asmlinkage long sym(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) ;
+#define __SYSCALL_64(nr, sym, qual) extern asmlinkage long __SYSCALL_64_QUAL_##qual(sym)(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
 #include <asm/syscalls_64.h>
 #undef __SYSCALL_64
 
-#define __SYSCALL_64(nr, sym, compat) [nr] = sym,
+#define __SYSCALL_64(nr, sym, qual) [nr] = __SYSCALL_64_QUAL_##qual(sym),
 
 extern long sys_ni_syscall(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
 
diff --git a/arch/x86/entry/syscalls/syscall_64.tbl b/arch/x86/entry/syscalls/syscall_64.tbl
index dc1040a..2e5b565 100644
--- a/arch/x86/entry/syscalls/syscall_64.tbl
+++ b/arch/x86/entry/syscalls/syscall_64.tbl
@@ -21,7 +21,7 @@
 12	common	brk			sys_brk
 13	64	rt_sigaction		sys_rt_sigaction
 14	common	rt_sigprocmask		sys_rt_sigprocmask
-15	64	rt_sigreturn		stub_rt_sigreturn
+15	64	rt_sigreturn		sys_rt_sigreturn/ptregs
 16	64	ioctl			sys_ioctl
 17	common	pread64			sys_pread64
 18	common	pwrite64		sys_pwrite64
@@ -62,10 +62,10 @@
 53	common	socketpair		sys_socketpair
 54	64	setsockopt		sys_setsockopt
 55	64	getsockopt		sys_getsockopt
-56	common	clone			stub_clone
-57	common	fork			stub_fork
-58	common	vfork			stub_vfork
-59	64	execve			stub_execve
+56	common	clone			sys_clone/ptregs
+57	common	fork			sys_fork/ptregs
+58	common	vfork			sys_vfork/ptregs
+59	64	execve			sys_execve/ptregs
 60	common	exit			sys_exit
 61	common	wait4			sys_wait4
 62	common	kill			sys_kill
@@ -178,7 +178,7 @@
 169	common	reboot			sys_reboot
 170	common	sethostname		sys_sethostname
 171	common	setdomainname		sys_setdomainname
-172	common	iopl			sys_iopl
+172	common	iopl			sys_iopl/ptregs
 173	common	ioperm			sys_ioperm
 174	64	create_module
 175	common	init_module		sys_init_module
@@ -328,7 +328,7 @@
 319	common	memfd_create		sys_memfd_create
 320	common	kexec_file_load		sys_kexec_file_load
 321	common	bpf			sys_bpf
-322	64	execveat		stub_execveat
+322	64	execveat		sys_execveat/ptregs
 323	common	userfaultfd		sys_userfaultfd
 324	common	membarrier		sys_membarrier
 325	common	mlock2			sys_mlock2
@@ -339,14 +339,14 @@
 # for native 64-bit operation.
 #
 512	x32	rt_sigaction		compat_sys_rt_sigaction
-513	x32	rt_sigreturn		stub_x32_rt_sigreturn
+513	x32	rt_sigreturn		sys32_x32_rt_sigreturn
 514	x32	ioctl			compat_sys_ioctl
 515	x32	readv			compat_sys_readv
 516	x32	writev			compat_sys_writev
 517	x32	recvfrom		compat_sys_recvfrom
 518	x32	sendmsg			compat_sys_sendmsg
 519	x32	recvmsg			compat_sys_recvmsg
-520	x32	execve			stub_x32_execve
+520	x32	execve			compat_sys_execve/ptregs
 521	x32	ptrace			compat_sys_ptrace
 522	x32	rt_sigpending		compat_sys_rt_sigpending
 523	x32	rt_sigtimedwait		compat_sys_rt_sigtimedwait
@@ -371,4 +371,4 @@
 542	x32	getsockopt		compat_sys_getsockopt
 543	x32	io_setup		compat_sys_io_setup
 544	x32	io_submit		compat_sys_io_submit
-545	x32	execveat		stub_x32_execveat
+545	x32	execveat		compat_sys_execveat/ptregs
diff --git a/arch/x86/entry/syscalls/syscalltbl.sh b/arch/x86/entry/syscalls/syscalltbl.sh
index 0e7f8ec..cd3d301 100644
--- a/arch/x86/entry/syscalls/syscalltbl.sh
+++ b/arch/x86/entry/syscalls/syscalltbl.sh
@@ -3,13 +3,63 @@
 in="$1"
 out="$2"
 
+syscall_macro() {
+    abi="$1"
+    nr="$2"
+    entry="$3"
+
+    # Entry can be either just a function name or "function/qualifier"
+    real_entry="${entry%%/*}"
+    qualifier="${entry:${#real_entry}}"		# Strip the function name
+    qualifier="${qualifier:1}"			# Strip the slash, if any
+
+    echo "__SYSCALL_${abi}($nr, $real_entry, $qualifier)"
+}
+
+emit() {
+    abi="$1"
+    nr="$2"
+    entry="$3"
+    compat="$4"
+
+    if [ "$abi" == "64" -a -n "$compat" ]; then
+	echo "a compat entry for a 64-bit syscall makes no sense" >&2
+	exit 1
+    fi
+
+    if [ -z "$compat" ]; then
+	if [ -n "$entry" ]; then
+	    syscall_macro "$abi" "$nr" "$entry"
+	fi
+    else
+	echo "#ifdef CONFIG_X86_32"
+	if [ -n "$entry" ]; then
+	    syscall_macro "$abi" "$nr" "$entry"
+	fi
+	echo "#else"
+	syscall_macro "$abi" "$nr" "$compat"
+	echo "#endif"
+    fi
+}
+
 grep '^[0-9]' "$in" | sort -n | (
     while read nr abi name entry compat; do
 	abi=`echo "$abi" | tr '[a-z]' '[A-Z]'`
-	if [ -n "$compat" ]; then
-	    echo "__SYSCALL_${abi}($nr, $entry, $compat)"
-	elif [ -n "$entry" ]; then
-	    echo "__SYSCALL_${abi}($nr, $entry, $entry)"
+	if [ "$abi" == "COMMON" -o "$abi" == "64" ]; then
+	    # COMMON is the same as 64, except that we don't expect X32
+	    # programs to use it.  Our expectation has nothing to do with
+	    # any generated code, so treat them the same.
+	    emit 64 "$nr" "$entry" "$compat"
+	elif [ "$abi" == "X32" ]; then
+	    # X32 is equivalent to 64 on an X32-compatible kernel.
+	    echo "#ifdef CONFIG_X86_X32_ABI"
+	    emit 64 "$nr" "$entry" "$compat"
+	    echo "#endif"
+	elif [ "$abi" == "I386" ]; then
+	    emit "$abi" "$nr" "$entry" "$compat"
+	else
+	    echo "Unknown abi $abi" >&2
+	    exit 1
 	fi
     done
 ) > "$out"
diff --git a/arch/x86/entry/vdso/vdso2c.h b/arch/x86/entry/vdso/vdso2c.h
index 3f69326..63a03bb 100644
--- a/arch/x86/entry/vdso/vdso2c.h
+++ b/arch/x86/entry/vdso/vdso2c.h
@@ -150,16 +150,9 @@
 	}
 	fprintf(outfile, "\n};\n\n");
 
-	fprintf(outfile, "static struct page *pages[%lu];\n\n",
-		mapping_size / 4096);
-
 	fprintf(outfile, "const struct vdso_image %s = {\n", name);
 	fprintf(outfile, "\t.data = raw_data,\n");
 	fprintf(outfile, "\t.size = %lu,\n", mapping_size);
-	fprintf(outfile, "\t.text_mapping = {\n");
-	fprintf(outfile, "\t\t.name = \"[vdso]\",\n");
-	fprintf(outfile, "\t\t.pages = pages,\n");
-	fprintf(outfile, "\t},\n");
 	if (alt_sec) {
 		fprintf(outfile, "\t.alt = %lu,\n",
 			(unsigned long)GET_LE(&alt_sec->sh_offset));
diff --git a/arch/x86/entry/vdso/vdso32-setup.c b/arch/x86/entry/vdso/vdso32-setup.c
index 08a317a..7853b53 100644
--- a/arch/x86/entry/vdso/vdso32-setup.c
+++ b/arch/x86/entry/vdso/vdso32-setup.c
@@ -11,7 +11,6 @@
 #include <linux/kernel.h>
 #include <linux/mm_types.h>
 
-#include <asm/cpufeature.h>
 #include <asm/processor.h>
 #include <asm/vdso.h>
 
diff --git a/arch/x86/entry/vdso/vdso32/system_call.S b/arch/x86/entry/vdso/vdso32/system_call.S
index 3a1d929..0109ac6 100644
--- a/arch/x86/entry/vdso/vdso32/system_call.S
+++ b/arch/x86/entry/vdso/vdso32/system_call.S
@@ -3,7 +3,7 @@
 */
 
 #include <asm/dwarf2.h>
-#include <asm/cpufeature.h>
+#include <asm/cpufeatures.h>
 #include <asm/alternative-asm.h>
 
 /*
diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c
index b8f69e2..10f7045 100644
--- a/arch/x86/entry/vdso/vma.c
+++ b/arch/x86/entry/vdso/vma.c
@@ -20,6 +20,7 @@
 #include <asm/page.h>
 #include <asm/hpet.h>
 #include <asm/desc.h>
+#include <asm/cpufeature.h>
 
 #if defined(CONFIG_X86_64)
 unsigned int __read_mostly vdso64_enabled = 1;
@@ -27,13 +28,7 @@
 
 void __init init_vdso_image(const struct vdso_image *image)
 {
-	int i;
-	int npages = (image->size) / PAGE_SIZE;
-
 	BUG_ON(image->size % PAGE_SIZE != 0);
-	for (i = 0; i < npages; i++)
-		image->text_mapping.pages[i] =
-			virt_to_page(image->data + i*PAGE_SIZE);
 
 	apply_alternatives((struct alt_instr *)(image->data + image->alt),
 			   (struct alt_instr *)(image->data + image->alt +
@@ -90,18 +85,87 @@
 #endif
 }
 
+static int vdso_fault(const struct vm_special_mapping *sm,
+		      struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+	const struct vdso_image *image = vma->vm_mm->context.vdso_image;
+
+	if (!image || (vmf->pgoff << PAGE_SHIFT) >= image->size)
+		return VM_FAULT_SIGBUS;
+
+	vmf->page = virt_to_page(image->data + (vmf->pgoff << PAGE_SHIFT));
+	get_page(vmf->page);
+	return 0;
+}
+
+static const struct vm_special_mapping text_mapping = {
+	.name = "[vdso]",
+	.fault = vdso_fault,
+};
+
+static int vvar_fault(const struct vm_special_mapping *sm,
+		      struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+	const struct vdso_image *image = vma->vm_mm->context.vdso_image;
+	long sym_offset;
+	int ret = -EFAULT;
+
+	if (!image)
+		return VM_FAULT_SIGBUS;
+
+	sym_offset = (long)(vmf->pgoff << PAGE_SHIFT) +
+		image->sym_vvar_start;
+
+	/*
+	 * Sanity check: a symbol offset of zero means that the page
+	 * does not exist for this vdso image, not that the page is at
+	 * offset zero relative to the text mapping.  This should be
+	 * impossible here, because sym_offset should only be zero for
+	 * the page past the end of the vvar mapping.
+	 */
+	if (sym_offset == 0)
+		return VM_FAULT_SIGBUS;
+
+	if (sym_offset == image->sym_vvar_page) {
+		ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address,
+				    __pa_symbol(&__vvar_page) >> PAGE_SHIFT);
+	} else if (sym_offset == image->sym_hpet_page) {
+#ifdef CONFIG_HPET_TIMER
+		if (hpet_address && vclock_was_used(VCLOCK_HPET)) {
+			ret = vm_insert_pfn_prot(
+				vma,
+				(unsigned long)vmf->virtual_address,
+				hpet_address >> PAGE_SHIFT,
+				pgprot_noncached(PAGE_READONLY));
+		}
+#endif
+	} else if (sym_offset == image->sym_pvclock_page) {
+		struct pvclock_vsyscall_time_info *pvti =
+			pvclock_pvti_cpu0_va();
+		if (pvti && vclock_was_used(VCLOCK_PVCLOCK)) {
+			ret = vm_insert_pfn(
+				vma,
+				(unsigned long)vmf->virtual_address,
+				__pa(pvti) >> PAGE_SHIFT);
+		}
+	}
+
+	if (ret == 0 || ret == -EBUSY)
+		return VM_FAULT_NOPAGE;
+
+	return VM_FAULT_SIGBUS;
+}
+
 static int map_vdso(const struct vdso_image *image, bool calculate_addr)
 {
 	struct mm_struct *mm = current->mm;
 	struct vm_area_struct *vma;
 	unsigned long addr, text_start;
 	int ret = 0;
-	static struct page *no_pages[] = {NULL};
-	static struct vm_special_mapping vvar_mapping = {
+	static const struct vm_special_mapping vvar_mapping = {
 		.name = "[vvar]",
-		.pages = no_pages,
+		.fault = vvar_fault,
 	};
-	struct pvclock_vsyscall_time_info *pvti;
 
 	if (calculate_addr) {
 		addr = vdso_addr(current->mm->start_stack,
@@ -121,6 +185,7 @@
 
 	text_start = addr - image->sym_vvar_start;
 	current->mm->context.vdso = (void __user *)text_start;
+	current->mm->context.vdso_image = image;
 
 	/*
 	 * MAYWRITE to allow gdb to COW and set breakpoints
@@ -130,7 +195,7 @@
 				       image->size,
 				       VM_READ|VM_EXEC|
 				       VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
-				       &image->text_mapping);
+				       &text_mapping);
 
 	if (IS_ERR(vma)) {
 		ret = PTR_ERR(vma);
@@ -140,7 +205,8 @@
 	vma = _install_special_mapping(mm,
 				       addr,
 				       -image->sym_vvar_start,
-				       VM_READ|VM_MAYREAD,
+				       VM_READ|VM_MAYREAD|VM_IO|VM_DONTDUMP|
+				       VM_PFNMAP,
 				       &vvar_mapping);
 
 	if (IS_ERR(vma)) {
@@ -148,41 +214,6 @@
 		goto up_fail;
 	}
 
-	if (image->sym_vvar_page)
-		ret = remap_pfn_range(vma,
-				      text_start + image->sym_vvar_page,
-				      __pa_symbol(&__vvar_page) >> PAGE_SHIFT,
-				      PAGE_SIZE,
-				      PAGE_READONLY);
-
-	if (ret)
-		goto up_fail;
-
-#ifdef CONFIG_HPET_TIMER
-	if (hpet_address && image->sym_hpet_page) {
-		ret = io_remap_pfn_range(vma,
-			text_start + image->sym_hpet_page,
-			hpet_address >> PAGE_SHIFT,
-			PAGE_SIZE,
-			pgprot_noncached(PAGE_READONLY));
-
-		if (ret)
-			goto up_fail;
-	}
-#endif
-
-	pvti = pvclock_pvti_cpu0_va();
-	if (pvti && image->sym_pvclock_page) {
-		ret = remap_pfn_range(vma,
-				      text_start + image->sym_pvclock_page,
-				      __pa(pvti) >> PAGE_SHIFT,
-				      PAGE_SIZE,
-				      PAGE_READONLY);
-
-		if (ret)
-			goto up_fail;
-	}
-
 up_fail:
 	if (ret)
 		current->mm->context.vdso = NULL;
@@ -254,7 +285,7 @@
 #ifdef CONFIG_NUMA
 	node = cpu_to_node(cpu);
 #endif
-	if (cpu_has(&cpu_data(cpu), X86_FEATURE_RDTSCP))
+	if (static_cpu_has(X86_FEATURE_RDTSCP))
 		write_rdtscp_aux((node << 12) | cpu);
 
 	/*
diff --git a/arch/x86/entry/vsyscall/vsyscall_gtod.c b/arch/x86/entry/vsyscall/vsyscall_gtod.c
index 51e3304..0fb3a10 100644
--- a/arch/x86/entry/vsyscall/vsyscall_gtod.c
+++ b/arch/x86/entry/vsyscall/vsyscall_gtod.c
@@ -16,6 +16,8 @@
 #include <asm/vgtod.h>
 #include <asm/vvar.h>
 
+int vclocks_used __read_mostly;
+
 DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data);
 
 void update_vsyscall_tz(void)
@@ -26,12 +28,17 @@
 
 void update_vsyscall(struct timekeeper *tk)
 {
+	int vclock_mode = tk->tkr_mono.clock->archdata.vclock_mode;
 	struct vsyscall_gtod_data *vdata = &vsyscall_gtod_data;
 
+	/* Mark the new vclock used. */
+	BUILD_BUG_ON(VCLOCK_MAX >= 32);
+	WRITE_ONCE(vclocks_used, READ_ONCE(vclocks_used) | (1 << vclock_mode));
+
 	gtod_write_begin(vdata);
 
 	/* copy vsyscall data */
-	vdata->vclock_mode	= tk->tkr_mono.clock->archdata.vclock_mode;
+	vdata->vclock_mode	= vclock_mode;
 	vdata->cycle_last	= tk->tkr_mono.cycle_last;
 	vdata->mask		= tk->tkr_mono.mask;
 	vdata->mult		= tk->tkr_mono.mult;
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
index 7bfc85b..99afb66 100644
--- a/arch/x86/include/asm/alternative.h
+++ b/arch/x86/include/asm/alternative.h
@@ -152,12 +152,6 @@
 	".popsection"
 
 /*
- * This must be included *after* the definition of ALTERNATIVE due to
- * <asm/arch_hweight.h>
- */
-#include <asm/cpufeature.h>
-
-/*
  * Alternative instructions for different CPU types or capabilities.
  *
  * This allows to use optimized instructions even on generic binary
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index c80f6b6..0899cfc 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -6,7 +6,6 @@
 
 #include <asm/alternative.h>
 #include <asm/cpufeature.h>
-#include <asm/processor.h>
 #include <asm/apicdef.h>
 #include <linux/atomic.h>
 #include <asm/fixmap.h>
diff --git a/arch/x86/include/asm/arch_hweight.h b/arch/x86/include/asm/arch_hweight.h
index 259a7c1..02e799f 100644
--- a/arch/x86/include/asm/arch_hweight.h
+++ b/arch/x86/include/asm/arch_hweight.h
@@ -1,6 +1,8 @@
 #ifndef _ASM_X86_HWEIGHT_H
 #define _ASM_X86_HWEIGHT_H
 
+#include <asm/cpufeatures.h>
+
 #ifdef CONFIG_64BIT
 /* popcnt %edi, %eax -- redundant REX prefix for alignment */
 #define POPCNT32 ".byte 0xf3,0x40,0x0f,0xb8,0xc7"
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
index cfe3b95..7766d1c 100644
--- a/arch/x86/include/asm/bitops.h
+++ b/arch/x86/include/asm/bitops.h
@@ -91,7 +91,7 @@
  * If it's called on the same region of memory simultaneously, the effect
  * may be that only one operation succeeds.
  */
-static inline void __set_bit(long nr, volatile unsigned long *addr)
+static __always_inline void __set_bit(long nr, volatile unsigned long *addr)
 {
 	asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory");
 }
@@ -128,13 +128,13 @@
  * clear_bit() is atomic and implies release semantics before the memory
  * operation. It can be used for an unlock.
  */
-static inline void clear_bit_unlock(long nr, volatile unsigned long *addr)
+static __always_inline void clear_bit_unlock(long nr, volatile unsigned long *addr)
 {
 	barrier();
 	clear_bit(nr, addr);
 }
 
-static inline void __clear_bit(long nr, volatile unsigned long *addr)
+static __always_inline void __clear_bit(long nr, volatile unsigned long *addr)
 {
 	asm volatile("btr %1,%0" : ADDR : "Ir" (nr));
 }
@@ -151,7 +151,7 @@
  * No memory barrier is required here, because x86 cannot reorder stores past
  * older loads. Same principle as spin_unlock.
  */
-static inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
+static __always_inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
 {
 	barrier();
 	__clear_bit(nr, addr);
@@ -166,7 +166,7 @@
  * If it's called on the same region of memory simultaneously, the effect
  * may be that only one operation succeeds.
  */
-static inline void __change_bit(long nr, volatile unsigned long *addr)
+static __always_inline void __change_bit(long nr, volatile unsigned long *addr)
 {
 	asm volatile("btc %1,%0" : ADDR : "Ir" (nr));
 }
@@ -180,7 +180,7 @@
  * Note that @nr may be almost arbitrarily large; this function is not
  * restricted to acting on a single-word quantity.
  */
-static inline void change_bit(long nr, volatile unsigned long *addr)
+static __always_inline void change_bit(long nr, volatile unsigned long *addr)
 {
 	if (IS_IMMEDIATE(nr)) {
 		asm volatile(LOCK_PREFIX "xorb %1,%0"
@@ -201,7 +201,7 @@
  * This operation is atomic and cannot be reordered.
  * It also implies a memory barrier.
  */
-static inline int test_and_set_bit(long nr, volatile unsigned long *addr)
+static __always_inline int test_and_set_bit(long nr, volatile unsigned long *addr)
 {
 	GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
 }
@@ -228,7 +228,7 @@
  * If two examples of this operation race, one can appear to succeed
  * but actually fail.  You must protect multiple accesses with a lock.
  */
-static inline int __test_and_set_bit(long nr, volatile unsigned long *addr)
+static __always_inline int __test_and_set_bit(long nr, volatile unsigned long *addr)
 {
 	int oldbit;
 
@@ -247,7 +247,7 @@
  * This operation is atomic and cannot be reordered.
  * It also implies a memory barrier.
  */
-static inline int test_and_clear_bit(long nr, volatile unsigned long *addr)
+static __always_inline int test_and_clear_bit(long nr, volatile unsigned long *addr)
 {
 	GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
 }
@@ -268,7 +268,7 @@
  * accessed from a hypervisor on the same CPU if running in a VM: don't change
  * this without also updating arch/x86/kernel/kvm.c
  */
-static inline int __test_and_clear_bit(long nr, volatile unsigned long *addr)
+static __always_inline int __test_and_clear_bit(long nr, volatile unsigned long *addr)
 {
 	int oldbit;
 
@@ -280,7 +280,7 @@
 }
 
 /* WARNING: non atomic and it can be reordered! */
-static inline int __test_and_change_bit(long nr, volatile unsigned long *addr)
+static __always_inline int __test_and_change_bit(long nr, volatile unsigned long *addr)
 {
 	int oldbit;
 
@@ -300,7 +300,7 @@
  * This operation is atomic and cannot be reordered.
  * It also implies a memory barrier.
  */
-static inline int test_and_change_bit(long nr, volatile unsigned long *addr)
+static __always_inline int test_and_change_bit(long nr, volatile unsigned long *addr)
 {
 	GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
 }
@@ -311,7 +311,7 @@
 		(addr[nr >> _BITOPS_LONG_SHIFT])) != 0;
 }
 
-static inline int variable_test_bit(long nr, volatile const unsigned long *addr)
+static __always_inline int variable_test_bit(long nr, volatile const unsigned long *addr)
 {
 	int oldbit;
 
@@ -343,7 +343,7 @@
  *
  * Undefined if no bit exists, so code should check against 0 first.
  */
-static inline unsigned long __ffs(unsigned long word)
+static __always_inline unsigned long __ffs(unsigned long word)
 {
 	asm("rep; bsf %1,%0"
 		: "=r" (word)
@@ -357,7 +357,7 @@
  *
  * Undefined if no zero exists, so code should check against ~0UL first.
  */
-static inline unsigned long ffz(unsigned long word)
+static __always_inline unsigned long ffz(unsigned long word)
 {
 	asm("rep; bsf %1,%0"
 		: "=r" (word)
@@ -371,7 +371,7 @@
  *
  * Undefined if no set bit exists, so code should check against 0 first.
  */
-static inline unsigned long __fls(unsigned long word)
+static __always_inline unsigned long __fls(unsigned long word)
 {
 	asm("bsr %1,%0"
 	    : "=r" (word)
@@ -393,7 +393,7 @@
  * set bit if value is nonzero. The first (least significant) bit
  * is at position 1.
  */
-static inline int ffs(int x)
+static __always_inline int ffs(int x)
 {
 	int r;
 
@@ -434,7 +434,7 @@
  * set bit if value is nonzero. The last (most significant) bit is
  * at position 32.
  */
-static inline int fls(int x)
+static __always_inline int fls(int x)
 {
 	int r;
 
diff --git a/arch/x86/include/asm/clocksource.h b/arch/x86/include/asm/clocksource.h
index eda81dc..d194266 100644
--- a/arch/x86/include/asm/clocksource.h
+++ b/arch/x86/include/asm/clocksource.h
@@ -3,10 +3,11 @@
 #ifndef _ASM_X86_CLOCKSOURCE_H
 #define _ASM_X86_CLOCKSOURCE_H
 
-#define VCLOCK_NONE 0  /* No vDSO clock available.	*/
-#define VCLOCK_TSC  1  /* vDSO should use vread_tsc.	*/
-#define VCLOCK_HPET 2  /* vDSO should use vread_hpet.	*/
-#define VCLOCK_PVCLOCK 3 /* vDSO should use vread_pvclock. */
+#define VCLOCK_NONE	0  /* No vDSO clock available.	*/
+#define VCLOCK_TSC	1  /* vDSO should use vread_tsc.	*/
+#define VCLOCK_HPET	2  /* vDSO should use vread_hpet.	*/
+#define VCLOCK_PVCLOCK	3 /* vDSO should use vread_pvclock. */
+#define VCLOCK_MAX	3
 
 struct arch_clocksource_data {
 	int vclock_mode;
diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
index ad19841..9733361 100644
--- a/arch/x86/include/asm/cmpxchg.h
+++ b/arch/x86/include/asm/cmpxchg.h
@@ -2,6 +2,7 @@
 #define ASM_X86_CMPXCHG_H
 
 #include <linux/compiler.h>
+#include <asm/cpufeatures.h>
 #include <asm/alternative.h> /* Provides LOCK_PREFIX */
 
 /*
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 7ad8c94..68e4e82 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -1,288 +1,7 @@
-/*
- * Defines x86 CPU feature bits
- */
 #ifndef _ASM_X86_CPUFEATURE_H
 #define _ASM_X86_CPUFEATURE_H
 
-#ifndef _ASM_X86_REQUIRED_FEATURES_H
-#include <asm/required-features.h>
-#endif
-
-#ifndef _ASM_X86_DISABLED_FEATURES_H
-#include <asm/disabled-features.h>
-#endif
-
-#define NCAPINTS	16	/* N 32-bit words worth of info */
-#define NBUGINTS	1	/* N 32-bit bug flags */
-
-/*
- * Note: If the comment begins with a quoted string, that string is used
- * in /proc/cpuinfo instead of the macro name.  If the string is "",
- * this feature bit is not displayed in /proc/cpuinfo at all.
- */
-
-/* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */
-#define X86_FEATURE_FPU		( 0*32+ 0) /* Onboard FPU */
-#define X86_FEATURE_VME		( 0*32+ 1) /* Virtual Mode Extensions */
-#define X86_FEATURE_DE		( 0*32+ 2) /* Debugging Extensions */
-#define X86_FEATURE_PSE		( 0*32+ 3) /* Page Size Extensions */
-#define X86_FEATURE_TSC		( 0*32+ 4) /* Time Stamp Counter */
-#define X86_FEATURE_MSR		( 0*32+ 5) /* Model-Specific Registers */
-#define X86_FEATURE_PAE		( 0*32+ 6) /* Physical Address Extensions */
-#define X86_FEATURE_MCE		( 0*32+ 7) /* Machine Check Exception */
-#define X86_FEATURE_CX8		( 0*32+ 8) /* CMPXCHG8 instruction */
-#define X86_FEATURE_APIC	( 0*32+ 9) /* Onboard APIC */
-#define X86_FEATURE_SEP		( 0*32+11) /* SYSENTER/SYSEXIT */
-#define X86_FEATURE_MTRR	( 0*32+12) /* Memory Type Range Registers */
-#define X86_FEATURE_PGE		( 0*32+13) /* Page Global Enable */
-#define X86_FEATURE_MCA		( 0*32+14) /* Machine Check Architecture */
-#define X86_FEATURE_CMOV	( 0*32+15) /* CMOV instructions */
-					  /* (plus FCMOVcc, FCOMI with FPU) */
-#define X86_FEATURE_PAT		( 0*32+16) /* Page Attribute Table */
-#define X86_FEATURE_PSE36	( 0*32+17) /* 36-bit PSEs */
-#define X86_FEATURE_PN		( 0*32+18) /* Processor serial number */
-#define X86_FEATURE_CLFLUSH	( 0*32+19) /* CLFLUSH instruction */
-#define X86_FEATURE_DS		( 0*32+21) /* "dts" Debug Store */
-#define X86_FEATURE_ACPI	( 0*32+22) /* ACPI via MSR */
-#define X86_FEATURE_MMX		( 0*32+23) /* Multimedia Extensions */
-#define X86_FEATURE_FXSR	( 0*32+24) /* FXSAVE/FXRSTOR, CR4.OSFXSR */
-#define X86_FEATURE_XMM		( 0*32+25) /* "sse" */
-#define X86_FEATURE_XMM2	( 0*32+26) /* "sse2" */
-#define X86_FEATURE_SELFSNOOP	( 0*32+27) /* "ss" CPU self snoop */
-#define X86_FEATURE_HT		( 0*32+28) /* Hyper-Threading */
-#define X86_FEATURE_ACC		( 0*32+29) /* "tm" Automatic clock control */
-#define X86_FEATURE_IA64	( 0*32+30) /* IA-64 processor */
-#define X86_FEATURE_PBE		( 0*32+31) /* Pending Break Enable */
-
-/* AMD-defined CPU features, CPUID level 0x80000001, word 1 */
-/* Don't duplicate feature flags which are redundant with Intel! */
-#define X86_FEATURE_SYSCALL	( 1*32+11) /* SYSCALL/SYSRET */
-#define X86_FEATURE_MP		( 1*32+19) /* MP Capable. */
-#define X86_FEATURE_NX		( 1*32+20) /* Execute Disable */
-#define X86_FEATURE_MMXEXT	( 1*32+22) /* AMD MMX extensions */
-#define X86_FEATURE_FXSR_OPT	( 1*32+25) /* FXSAVE/FXRSTOR optimizations */
-#define X86_FEATURE_GBPAGES	( 1*32+26) /* "pdpe1gb" GB pages */
-#define X86_FEATURE_RDTSCP	( 1*32+27) /* RDTSCP */
-#define X86_FEATURE_LM		( 1*32+29) /* Long Mode (x86-64) */
-#define X86_FEATURE_3DNOWEXT	( 1*32+30) /* AMD 3DNow! extensions */
-#define X86_FEATURE_3DNOW	( 1*32+31) /* 3DNow! */
-
-/* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */
-#define X86_FEATURE_RECOVERY	( 2*32+ 0) /* CPU in recovery mode */
-#define X86_FEATURE_LONGRUN	( 2*32+ 1) /* Longrun power control */
-#define X86_FEATURE_LRTI	( 2*32+ 3) /* LongRun table interface */
-
-/* Other features, Linux-defined mapping, word 3 */
-/* This range is used for feature bits which conflict or are synthesized */
-#define X86_FEATURE_CXMMX	( 3*32+ 0) /* Cyrix MMX extensions */
-#define X86_FEATURE_K6_MTRR	( 3*32+ 1) /* AMD K6 nonstandard MTRRs */
-#define X86_FEATURE_CYRIX_ARR	( 3*32+ 2) /* Cyrix ARRs (= MTRRs) */
-#define X86_FEATURE_CENTAUR_MCR	( 3*32+ 3) /* Centaur MCRs (= MTRRs) */
-/* cpu types for specific tunings: */
-#define X86_FEATURE_K8		( 3*32+ 4) /* "" Opteron, Athlon64 */
-#define X86_FEATURE_K7		( 3*32+ 5) /* "" Athlon */
-#define X86_FEATURE_P3		( 3*32+ 6) /* "" P3 */
-#define X86_FEATURE_P4		( 3*32+ 7) /* "" P4 */
-#define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* TSC ticks at a constant rate */
-#define X86_FEATURE_UP		( 3*32+ 9) /* smp kernel running on up */
-/* free, was #define X86_FEATURE_FXSAVE_LEAK ( 3*32+10) * "" FXSAVE leaks FOP/FIP/FOP */
-#define X86_FEATURE_ARCH_PERFMON ( 3*32+11) /* Intel Architectural PerfMon */
-#define X86_FEATURE_PEBS	( 3*32+12) /* Precise-Event Based Sampling */
-#define X86_FEATURE_BTS		( 3*32+13) /* Branch Trace Store */
-#define X86_FEATURE_SYSCALL32	( 3*32+14) /* "" syscall in ia32 userspace */
-#define X86_FEATURE_SYSENTER32	( 3*32+15) /* "" sysenter in ia32 userspace */
-#define X86_FEATURE_REP_GOOD	( 3*32+16) /* rep microcode works well */
-#define X86_FEATURE_MFENCE_RDTSC ( 3*32+17) /* "" Mfence synchronizes RDTSC */
-#define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) /* "" Lfence synchronizes RDTSC */
-/* free, was #define X86_FEATURE_11AP	( 3*32+19) * "" Bad local APIC aka 11AP */
-#define X86_FEATURE_NOPL	( 3*32+20) /* The NOPL (0F 1F) instructions */
-#define X86_FEATURE_ALWAYS	( 3*32+21) /* "" Always-present feature */
-#define X86_FEATURE_XTOPOLOGY	( 3*32+22) /* cpu topology enum extensions */
-#define X86_FEATURE_TSC_RELIABLE ( 3*32+23) /* TSC is known to be reliable */
-#define X86_FEATURE_NONSTOP_TSC	( 3*32+24) /* TSC does not stop in C states */
-/* free, was #define X86_FEATURE_CLFLUSH_MONITOR ( 3*32+25) * "" clflush reqd with monitor */
-#define X86_FEATURE_EXTD_APICID	( 3*32+26) /* has extended APICID (8 bits) */
-#define X86_FEATURE_AMD_DCM     ( 3*32+27) /* multi-node processor */
-#define X86_FEATURE_APERFMPERF	( 3*32+28) /* APERFMPERF */
-#define X86_FEATURE_EAGER_FPU	( 3*32+29) /* "eagerfpu" Non lazy FPU restore */
-#define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */
-
-/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
-#define X86_FEATURE_XMM3	( 4*32+ 0) /* "pni" SSE-3 */
-#define X86_FEATURE_PCLMULQDQ	( 4*32+ 1) /* PCLMULQDQ instruction */
-#define X86_FEATURE_DTES64	( 4*32+ 2) /* 64-bit Debug Store */
-#define X86_FEATURE_MWAIT	( 4*32+ 3) /* "monitor" Monitor/Mwait support */
-#define X86_FEATURE_DSCPL	( 4*32+ 4) /* "ds_cpl" CPL Qual. Debug Store */
-#define X86_FEATURE_VMX		( 4*32+ 5) /* Hardware virtualization */
-#define X86_FEATURE_SMX		( 4*32+ 6) /* Safer mode */
-#define X86_FEATURE_EST		( 4*32+ 7) /* Enhanced SpeedStep */
-#define X86_FEATURE_TM2		( 4*32+ 8) /* Thermal Monitor 2 */
-#define X86_FEATURE_SSSE3	( 4*32+ 9) /* Supplemental SSE-3 */
-#define X86_FEATURE_CID		( 4*32+10) /* Context ID */
-#define X86_FEATURE_SDBG	( 4*32+11) /* Silicon Debug */
-#define X86_FEATURE_FMA		( 4*32+12) /* Fused multiply-add */
-#define X86_FEATURE_CX16	( 4*32+13) /* CMPXCHG16B */
-#define X86_FEATURE_XTPR	( 4*32+14) /* Send Task Priority Messages */
-#define X86_FEATURE_PDCM	( 4*32+15) /* Performance Capabilities */
-#define X86_FEATURE_PCID	( 4*32+17) /* Process Context Identifiers */
-#define X86_FEATURE_DCA		( 4*32+18) /* Direct Cache Access */
-#define X86_FEATURE_XMM4_1	( 4*32+19) /* "sse4_1" SSE-4.1 */
-#define X86_FEATURE_XMM4_2	( 4*32+20) /* "sse4_2" SSE-4.2 */
-#define X86_FEATURE_X2APIC	( 4*32+21) /* x2APIC */
-#define X86_FEATURE_MOVBE	( 4*32+22) /* MOVBE instruction */
-#define X86_FEATURE_POPCNT      ( 4*32+23) /* POPCNT instruction */
-#define X86_FEATURE_TSC_DEADLINE_TIMER	( 4*32+24) /* Tsc deadline timer */
-#define X86_FEATURE_AES		( 4*32+25) /* AES instructions */
-#define X86_FEATURE_XSAVE	( 4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */
-#define X86_FEATURE_OSXSAVE	( 4*32+27) /* "" XSAVE enabled in the OS */
-#define X86_FEATURE_AVX		( 4*32+28) /* Advanced Vector Extensions */
-#define X86_FEATURE_F16C	( 4*32+29) /* 16-bit fp conversions */
-#define X86_FEATURE_RDRAND	( 4*32+30) /* The RDRAND instruction */
-#define X86_FEATURE_HYPERVISOR	( 4*32+31) /* Running on a hypervisor */
-
-/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */
-#define X86_FEATURE_XSTORE	( 5*32+ 2) /* "rng" RNG present (xstore) */
-#define X86_FEATURE_XSTORE_EN	( 5*32+ 3) /* "rng_en" RNG enabled */
-#define X86_FEATURE_XCRYPT	( 5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */
-#define X86_FEATURE_XCRYPT_EN	( 5*32+ 7) /* "ace_en" on-CPU crypto enabled */
-#define X86_FEATURE_ACE2	( 5*32+ 8) /* Advanced Cryptography Engine v2 */
-#define X86_FEATURE_ACE2_EN	( 5*32+ 9) /* ACE v2 enabled */
-#define X86_FEATURE_PHE		( 5*32+10) /* PadLock Hash Engine */
-#define X86_FEATURE_PHE_EN	( 5*32+11) /* PHE enabled */
-#define X86_FEATURE_PMM		( 5*32+12) /* PadLock Montgomery Multiplier */
-#define X86_FEATURE_PMM_EN	( 5*32+13) /* PMM enabled */
-
-/* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */
-#define X86_FEATURE_LAHF_LM	( 6*32+ 0) /* LAHF/SAHF in long mode */
-#define X86_FEATURE_CMP_LEGACY	( 6*32+ 1) /* If yes HyperThreading not valid */
-#define X86_FEATURE_SVM		( 6*32+ 2) /* Secure virtual machine */
-#define X86_FEATURE_EXTAPIC	( 6*32+ 3) /* Extended APIC space */
-#define X86_FEATURE_CR8_LEGACY	( 6*32+ 4) /* CR8 in 32-bit mode */
-#define X86_FEATURE_ABM		( 6*32+ 5) /* Advanced bit manipulation */
-#define X86_FEATURE_SSE4A	( 6*32+ 6) /* SSE-4A */
-#define X86_FEATURE_MISALIGNSSE ( 6*32+ 7) /* Misaligned SSE mode */
-#define X86_FEATURE_3DNOWPREFETCH ( 6*32+ 8) /* 3DNow prefetch instructions */
-#define X86_FEATURE_OSVW	( 6*32+ 9) /* OS Visible Workaround */
-#define X86_FEATURE_IBS		( 6*32+10) /* Instruction Based Sampling */
-#define X86_FEATURE_XOP		( 6*32+11) /* extended AVX instructions */
-#define X86_FEATURE_SKINIT	( 6*32+12) /* SKINIT/STGI instructions */
-#define X86_FEATURE_WDT		( 6*32+13) /* Watchdog timer */
-#define X86_FEATURE_LWP		( 6*32+15) /* Light Weight Profiling */
-#define X86_FEATURE_FMA4	( 6*32+16) /* 4 operands MAC instructions */
-#define X86_FEATURE_TCE		( 6*32+17) /* translation cache extension */
-#define X86_FEATURE_NODEID_MSR	( 6*32+19) /* NodeId MSR */
-#define X86_FEATURE_TBM		( 6*32+21) /* trailing bit manipulations */
-#define X86_FEATURE_TOPOEXT	( 6*32+22) /* topology extensions CPUID leafs */
-#define X86_FEATURE_PERFCTR_CORE ( 6*32+23) /* core performance counter extensions */
-#define X86_FEATURE_PERFCTR_NB  ( 6*32+24) /* NB performance counter extensions */
-#define X86_FEATURE_BPEXT	(6*32+26) /* data breakpoint extension */
-#define X86_FEATURE_PERFCTR_L2	( 6*32+28) /* L2 performance counter extensions */
-#define X86_FEATURE_MWAITX	( 6*32+29) /* MWAIT extension (MONITORX/MWAITX) */
-
-/*
- * Auxiliary flags: Linux defined - For features scattered in various
- * CPUID levels like 0x6, 0xA etc, word 7.
- *
- * Reuse free bits when adding new feature flags!
- */
-
-#define X86_FEATURE_CPB		( 7*32+ 2) /* AMD Core Performance Boost */
-#define X86_FEATURE_EPB		( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */
-
-#define X86_FEATURE_HW_PSTATE	( 7*32+ 8) /* AMD HW-PState */
-#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
-
-#define X86_FEATURE_INTEL_PT	( 7*32+15) /* Intel Processor Trace */
-
-/* Virtualization flags: Linux defined, word 8 */
-#define X86_FEATURE_TPR_SHADOW  ( 8*32+ 0) /* Intel TPR Shadow */
-#define X86_FEATURE_VNMI        ( 8*32+ 1) /* Intel Virtual NMI */
-#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 2) /* Intel FlexPriority */
-#define X86_FEATURE_EPT         ( 8*32+ 3) /* Intel Extended Page Table */
-#define X86_FEATURE_VPID        ( 8*32+ 4) /* Intel Virtual Processor ID */
-
-#define X86_FEATURE_VMMCALL     ( 8*32+15) /* Prefer vmmcall to vmcall */
-#define X86_FEATURE_XENPV       ( 8*32+16) /* "" Xen paravirtual guest */
-
-
-/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
-#define X86_FEATURE_FSGSBASE	( 9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
-#define X86_FEATURE_TSC_ADJUST	( 9*32+ 1) /* TSC adjustment MSR 0x3b */
-#define X86_FEATURE_BMI1	( 9*32+ 3) /* 1st group bit manipulation extensions */
-#define X86_FEATURE_HLE		( 9*32+ 4) /* Hardware Lock Elision */
-#define X86_FEATURE_AVX2	( 9*32+ 5) /* AVX2 instructions */
-#define X86_FEATURE_SMEP	( 9*32+ 7) /* Supervisor Mode Execution Protection */
-#define X86_FEATURE_BMI2	( 9*32+ 8) /* 2nd group bit manipulation extensions */
-#define X86_FEATURE_ERMS	( 9*32+ 9) /* Enhanced REP MOVSB/STOSB */
-#define X86_FEATURE_INVPCID	( 9*32+10) /* Invalidate Processor Context ID */
-#define X86_FEATURE_RTM		( 9*32+11) /* Restricted Transactional Memory */
-#define X86_FEATURE_CQM		( 9*32+12) /* Cache QoS Monitoring */
-#define X86_FEATURE_MPX		( 9*32+14) /* Memory Protection Extension */
-#define X86_FEATURE_AVX512F	( 9*32+16) /* AVX-512 Foundation */
-#define X86_FEATURE_RDSEED	( 9*32+18) /* The RDSEED instruction */
-#define X86_FEATURE_ADX		( 9*32+19) /* The ADCX and ADOX instructions */
-#define X86_FEATURE_SMAP	( 9*32+20) /* Supervisor Mode Access Prevention */
-#define X86_FEATURE_PCOMMIT	( 9*32+22) /* PCOMMIT instruction */
-#define X86_FEATURE_CLFLUSHOPT	( 9*32+23) /* CLFLUSHOPT instruction */
-#define X86_FEATURE_CLWB	( 9*32+24) /* CLWB instruction */
-#define X86_FEATURE_AVX512PF	( 9*32+26) /* AVX-512 Prefetch */
-#define X86_FEATURE_AVX512ER	( 9*32+27) /* AVX-512 Exponential and Reciprocal */
-#define X86_FEATURE_AVX512CD	( 9*32+28) /* AVX-512 Conflict Detection */
-#define X86_FEATURE_SHA_NI	( 9*32+29) /* SHA1/SHA256 Instruction Extensions */
-
-/* Extended state features, CPUID level 0x0000000d:1 (eax), word 10 */
-#define X86_FEATURE_XSAVEOPT	(10*32+ 0) /* XSAVEOPT */
-#define X86_FEATURE_XSAVEC	(10*32+ 1) /* XSAVEC */
-#define X86_FEATURE_XGETBV1	(10*32+ 2) /* XGETBV with ECX = 1 */
-#define X86_FEATURE_XSAVES	(10*32+ 3) /* XSAVES/XRSTORS */
-
-/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:0 (edx), word 11 */
-#define X86_FEATURE_CQM_LLC	(11*32+ 1) /* LLC QoS if 1 */
-
-/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:1 (edx), word 12 */
-#define X86_FEATURE_CQM_OCCUP_LLC (12*32+ 0) /* LLC occupancy monitoring if 1 */
-
-/* AMD-defined CPU features, CPUID level 0x80000008 (ebx), word 13 */
-#define X86_FEATURE_CLZERO	(13*32+0) /* CLZERO instruction */
-
-/* Thermal and Power Management Leaf, CPUID level 0x00000006 (eax), word 14 */
-#define X86_FEATURE_DTHERM	(14*32+ 0) /* Digital Thermal Sensor */
-#define X86_FEATURE_IDA		(14*32+ 1) /* Intel Dynamic Acceleration */
-#define X86_FEATURE_ARAT	(14*32+ 2) /* Always Running APIC Timer */
-#define X86_FEATURE_PLN		(14*32+ 4) /* Intel Power Limit Notification */
-#define X86_FEATURE_PTS		(14*32+ 6) /* Intel Package Thermal Status */
-#define X86_FEATURE_HWP		(14*32+ 7) /* Intel Hardware P-states */
-#define X86_FEATURE_HWP_NOTIFY	(14*32+ 8) /* HWP Notification */
-#define X86_FEATURE_HWP_ACT_WINDOW (14*32+ 9) /* HWP Activity Window */
-#define X86_FEATURE_HWP_EPP	(14*32+10) /* HWP Energy Perf. Preference */
-#define X86_FEATURE_HWP_PKG_REQ (14*32+11) /* HWP Package Level Request */
-
-/* AMD SVM Feature Identification, CPUID level 0x8000000a (edx), word 15 */
-#define X86_FEATURE_NPT		(15*32+ 0) /* Nested Page Table support */
-#define X86_FEATURE_LBRV	(15*32+ 1) /* LBR Virtualization support */
-#define X86_FEATURE_SVML	(15*32+ 2) /* "svm_lock" SVM locking MSR */
-#define X86_FEATURE_NRIPS	(15*32+ 3) /* "nrip_save" SVM next_rip save */
-#define X86_FEATURE_TSCRATEMSR  (15*32+ 4) /* "tsc_scale" TSC scaling support */
-#define X86_FEATURE_VMCBCLEAN   (15*32+ 5) /* "vmcb_clean" VMCB clean bits support */
-#define X86_FEATURE_FLUSHBYASID (15*32+ 6) /* flush-by-ASID support */
-#define X86_FEATURE_DECODEASSISTS (15*32+ 7) /* Decode Assists support */
-#define X86_FEATURE_PAUSEFILTER (15*32+10) /* filtered pause intercept */
-#define X86_FEATURE_PFTHRESHOLD (15*32+12) /* pause filter threshold */
-
-/*
- * BUG word(s)
- */
-#define X86_BUG(x)		(NCAPINTS*32 + (x))
-
-#define X86_BUG_F00F		X86_BUG(0) /* Intel F00F */
-#define X86_BUG_FDIV		X86_BUG(1) /* FPU FDIV */
-#define X86_BUG_COMA		X86_BUG(2) /* Cyrix 6x86 coma */
-#define X86_BUG_AMD_TLB_MMATCH	X86_BUG(3) /* "tlb_mmatch" AMD Erratum 383 */
-#define X86_BUG_AMD_APIC_C1E	X86_BUG(4) /* "apic_c1e" AMD Erratum 400 */
-#define X86_BUG_11AP		X86_BUG(5) /* Bad local APIC aka 11AP */
-#define X86_BUG_FXSAVE_LEAK	X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */
-#define X86_BUG_CLFLUSH_MONITOR	X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */
-#define X86_BUG_SYSRET_SS_ATTRS	X86_BUG(8) /* SYSRET doesn't fix up SS attrs */
+#include <asm/processor.h>
 
 #if defined(__KERNEL__) && !defined(__ASSEMBLY__)
 
@@ -369,8 +88,7 @@
  * is not relevant.
  */
 #define cpu_feature_enabled(bit)	\
-	(__builtin_constant_p(bit) && DISABLED_MASK_BIT_SET(bit) ? 0 :	\
-	 cpu_has(&boot_cpu_data, bit))
+	(__builtin_constant_p(bit) && DISABLED_MASK_BIT_SET(bit) ? 0 : static_cpu_has(bit))
 
 #define boot_cpu_has(bit)	cpu_has(&boot_cpu_data, bit)
 
@@ -406,106 +124,19 @@
 #define cpu_has_osxsave		boot_cpu_has(X86_FEATURE_OSXSAVE)
 #define cpu_has_hypervisor	boot_cpu_has(X86_FEATURE_HYPERVISOR)
 /*
- * Do not add any more of those clumsy macros - use static_cpu_has_safe() for
+ * Do not add any more of those clumsy macros - use static_cpu_has() for
  * fast paths and boot_cpu_has() otherwise!
  */
 
-#if __GNUC__ >= 4 && defined(CONFIG_X86_FAST_FEATURE_TESTS)
-extern void warn_pre_alternatives(void);
-extern bool __static_cpu_has_safe(u16 bit);
-
+#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_X86_FAST_FEATURE_TESTS)
 /*
  * Static testing of CPU features.  Used the same as boot_cpu_has().
- * These are only valid after alternatives have run, but will statically
- * patch the target code for additional performance.
+ * These will statically patch the target code for additional
+ * performance.
  */
-static __always_inline __pure bool __static_cpu_has(u16 bit)
+static __always_inline __pure bool _static_cpu_has(u16 bit)
 {
-#ifdef CC_HAVE_ASM_GOTO
-
-#ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
-
-		/*
-		 * Catch too early usage of this before alternatives
-		 * have run.
-		 */
-		asm_volatile_goto("1: jmp %l[t_warn]\n"
-			 "2:\n"
-			 ".section .altinstructions,\"a\"\n"
-			 " .long 1b - .\n"
-			 " .long 0\n"		/* no replacement */
-			 " .word %P0\n"		/* 1: do replace */
-			 " .byte 2b - 1b\n"	/* source len */
-			 " .byte 0\n"		/* replacement len */
-			 " .byte 0\n"		/* pad len */
-			 ".previous\n"
-			 /* skipping size check since replacement size = 0 */
-			 : : "i" (X86_FEATURE_ALWAYS) : : t_warn);
-
-#endif
-
-		asm_volatile_goto("1: jmp %l[t_no]\n"
-			 "2:\n"
-			 ".section .altinstructions,\"a\"\n"
-			 " .long 1b - .\n"
-			 " .long 0\n"		/* no replacement */
-			 " .word %P0\n"		/* feature bit */
-			 " .byte 2b - 1b\n"	/* source len */
-			 " .byte 0\n"		/* replacement len */
-			 " .byte 0\n"		/* pad len */
-			 ".previous\n"
-			 /* skipping size check since replacement size = 0 */
-			 : : "i" (bit) : : t_no);
-		return true;
-	t_no:
-		return false;
-
-#ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
-	t_warn:
-		warn_pre_alternatives();
-		return false;
-#endif
-
-#else /* CC_HAVE_ASM_GOTO */
-
-		u8 flag;
-		/* Open-coded due to __stringify() in ALTERNATIVE() */
-		asm volatile("1: movb $0,%0\n"
-			     "2:\n"
-			     ".section .altinstructions,\"a\"\n"
-			     " .long 1b - .\n"
-			     " .long 3f - .\n"
-			     " .word %P1\n"		/* feature bit */
-			     " .byte 2b - 1b\n"		/* source len */
-			     " .byte 4f - 3f\n"		/* replacement len */
-			     " .byte 0\n"		/* pad len */
-			     ".previous\n"
-			     ".section .discard,\"aw\",@progbits\n"
-			     " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
-			     ".previous\n"
-			     ".section .altinstr_replacement,\"ax\"\n"
-			     "3: movb $1,%0\n"
-			     "4:\n"
-			     ".previous\n"
-			     : "=qm" (flag) : "i" (bit));
-		return flag;
-
-#endif /* CC_HAVE_ASM_GOTO */
-}
-
-#define static_cpu_has(bit)					\
-(								\
-	__builtin_constant_p(boot_cpu_has(bit)) ?		\
-		boot_cpu_has(bit) :				\
-	__builtin_constant_p(bit) ?				\
-		__static_cpu_has(bit) :				\
-		boot_cpu_has(bit)				\
-)
-
-static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
-{
-#ifdef CC_HAVE_ASM_GOTO
-		asm_volatile_goto("1: jmp %l[t_dynamic]\n"
+		asm_volatile_goto("1: jmp 6f\n"
 			 "2:\n"
 			 ".skip -(((5f-4f) - (2b-1b)) > 0) * "
 			         "((5f-4f) - (2b-1b)),0x90\n"
@@ -530,66 +161,34 @@
 			 " .byte 0\n"			/* repl len */
 			 " .byte 0\n"			/* pad len */
 			 ".previous\n"
-			 : : "i" (bit), "i" (X86_FEATURE_ALWAYS)
-			 : : t_dynamic, t_no);
+			 ".section .altinstr_aux,\"ax\"\n"
+			 "6:\n"
+			 " testb %[bitnum],%[cap_byte]\n"
+			 " jnz %l[t_yes]\n"
+			 " jmp %l[t_no]\n"
+			 ".previous\n"
+			 : : "i" (bit), "i" (X86_FEATURE_ALWAYS),
+			     [bitnum] "i" (1 << (bit & 7)),
+			     [cap_byte] "m" (((const char *)boot_cpu_data.x86_capability)[bit >> 3])
+			 : : t_yes, t_no);
+	t_yes:
 		return true;
 	t_no:
 		return false;
-	t_dynamic:
-		return __static_cpu_has_safe(bit);
-#else
-		u8 flag;
-		/* Open-coded due to __stringify() in ALTERNATIVE() */
-		asm volatile("1: movb $2,%0\n"
-			     "2:\n"
-			     ".section .altinstructions,\"a\"\n"
-			     " .long 1b - .\n"		/* src offset */
-			     " .long 3f - .\n"		/* repl offset */
-			     " .word %P2\n"		/* always replace */
-			     " .byte 2b - 1b\n"		/* source len */
-			     " .byte 4f - 3f\n"		/* replacement len */
-			     " .byte 0\n"		/* pad len */
-			     ".previous\n"
-			     ".section .discard,\"aw\",@progbits\n"
-			     " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
-			     ".previous\n"
-			     ".section .altinstr_replacement,\"ax\"\n"
-			     "3: movb $0,%0\n"
-			     "4:\n"
-			     ".previous\n"
-			     ".section .altinstructions,\"a\"\n"
-			     " .long 1b - .\n"		/* src offset */
-			     " .long 5f - .\n"		/* repl offset */
-			     " .word %P1\n"		/* feature bit */
-			     " .byte 4b - 3b\n"		/* src len */
-			     " .byte 6f - 5f\n"		/* repl len */
-			     " .byte 0\n"		/* pad len */
-			     ".previous\n"
-			     ".section .discard,\"aw\",@progbits\n"
-			     " .byte 0xff + (6f-5f) - (4b-3b)\n" /* size check */
-			     ".previous\n"
-			     ".section .altinstr_replacement,\"ax\"\n"
-			     "5: movb $1,%0\n"
-			     "6:\n"
-			     ".previous\n"
-			     : "=qm" (flag)
-			     : "i" (bit), "i" (X86_FEATURE_ALWAYS));
-		return (flag == 2 ? __static_cpu_has_safe(bit) : flag);
-#endif /* CC_HAVE_ASM_GOTO */
 }
 
-#define static_cpu_has_safe(bit)				\
+#define static_cpu_has(bit)					\
 (								\
 	__builtin_constant_p(boot_cpu_has(bit)) ?		\
 		boot_cpu_has(bit) :				\
-		_static_cpu_has_safe(bit)			\
+		_static_cpu_has(bit)				\
 )
 #else
 /*
- * gcc 3.x is too stupid to do the static test; fall back to dynamic.
+ * Fall back to dynamic for gcc versions which don't support asm goto. Should be
+ * a minority now anyway.
  */
 #define static_cpu_has(bit)		boot_cpu_has(bit)
-#define static_cpu_has_safe(bit)	boot_cpu_has(bit)
 #endif
 
 #define cpu_has_bug(c, bit)		cpu_has(c, (bit))
@@ -597,7 +196,6 @@
 #define clear_cpu_bug(c, bit)		clear_cpu_cap(c, (bit))
 
 #define static_cpu_has_bug(bit)		static_cpu_has((bit))
-#define static_cpu_has_bug_safe(bit)	static_cpu_has_safe((bit))
 #define boot_cpu_has_bug(bit)		cpu_has_bug(&boot_cpu_data, (bit))
 
 #define MAX_CPU_FEATURES		(NCAPINTS * 32)
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
new file mode 100644
index 0000000..074b760
--- /dev/null
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -0,0 +1,300 @@
+#ifndef _ASM_X86_CPUFEATURES_H
+#define _ASM_X86_CPUFEATURES_H
+
+#ifndef _ASM_X86_REQUIRED_FEATURES_H
+#include <asm/required-features.h>
+#endif
+
+#ifndef _ASM_X86_DISABLED_FEATURES_H
+#include <asm/disabled-features.h>
+#endif
+
+/*
+ * Defines x86 CPU feature bits
+ */
+#define NCAPINTS	16	/* N 32-bit words worth of info */
+#define NBUGINTS	1	/* N 32-bit bug flags */
+
+/*
+ * Note: If the comment begins with a quoted string, that string is used
+ * in /proc/cpuinfo instead of the macro name.  If the string is "",
+ * this feature bit is not displayed in /proc/cpuinfo at all.
+ */
+
+/* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */
+#define X86_FEATURE_FPU		( 0*32+ 0) /* Onboard FPU */
+#define X86_FEATURE_VME		( 0*32+ 1) /* Virtual Mode Extensions */
+#define X86_FEATURE_DE		( 0*32+ 2) /* Debugging Extensions */
+#define X86_FEATURE_PSE		( 0*32+ 3) /* Page Size Extensions */
+#define X86_FEATURE_TSC		( 0*32+ 4) /* Time Stamp Counter */
+#define X86_FEATURE_MSR		( 0*32+ 5) /* Model-Specific Registers */
+#define X86_FEATURE_PAE		( 0*32+ 6) /* Physical Address Extensions */
+#define X86_FEATURE_MCE		( 0*32+ 7) /* Machine Check Exception */
+#define X86_FEATURE_CX8		( 0*32+ 8) /* CMPXCHG8 instruction */
+#define X86_FEATURE_APIC	( 0*32+ 9) /* Onboard APIC */
+#define X86_FEATURE_SEP		( 0*32+11) /* SYSENTER/SYSEXIT */
+#define X86_FEATURE_MTRR	( 0*32+12) /* Memory Type Range Registers */
+#define X86_FEATURE_PGE		( 0*32+13) /* Page Global Enable */
+#define X86_FEATURE_MCA		( 0*32+14) /* Machine Check Architecture */
+#define X86_FEATURE_CMOV	( 0*32+15) /* CMOV instructions */
+					  /* (plus FCMOVcc, FCOMI with FPU) */
+#define X86_FEATURE_PAT		( 0*32+16) /* Page Attribute Table */
+#define X86_FEATURE_PSE36	( 0*32+17) /* 36-bit PSEs */
+#define X86_FEATURE_PN		( 0*32+18) /* Processor serial number */
+#define X86_FEATURE_CLFLUSH	( 0*32+19) /* CLFLUSH instruction */
+#define X86_FEATURE_DS		( 0*32+21) /* "dts" Debug Store */
+#define X86_FEATURE_ACPI	( 0*32+22) /* ACPI via MSR */
+#define X86_FEATURE_MMX		( 0*32+23) /* Multimedia Extensions */
+#define X86_FEATURE_FXSR	( 0*32+24) /* FXSAVE/FXRSTOR, CR4.OSFXSR */
+#define X86_FEATURE_XMM		( 0*32+25) /* "sse" */
+#define X86_FEATURE_XMM2	( 0*32+26) /* "sse2" */
+#define X86_FEATURE_SELFSNOOP	( 0*32+27) /* "ss" CPU self snoop */
+#define X86_FEATURE_HT		( 0*32+28) /* Hyper-Threading */
+#define X86_FEATURE_ACC		( 0*32+29) /* "tm" Automatic clock control */
+#define X86_FEATURE_IA64	( 0*32+30) /* IA-64 processor */
+#define X86_FEATURE_PBE		( 0*32+31) /* Pending Break Enable */
+
+/* AMD-defined CPU features, CPUID level 0x80000001, word 1 */
+/* Don't duplicate feature flags which are redundant with Intel! */
+#define X86_FEATURE_SYSCALL	( 1*32+11) /* SYSCALL/SYSRET */
+#define X86_FEATURE_MP		( 1*32+19) /* MP Capable. */
+#define X86_FEATURE_NX		( 1*32+20) /* Execute Disable */
+#define X86_FEATURE_MMXEXT	( 1*32+22) /* AMD MMX extensions */
+#define X86_FEATURE_FXSR_OPT	( 1*32+25) /* FXSAVE/FXRSTOR optimizations */
+#define X86_FEATURE_GBPAGES	( 1*32+26) /* "pdpe1gb" GB pages */
+#define X86_FEATURE_RDTSCP	( 1*32+27) /* RDTSCP */
+#define X86_FEATURE_LM		( 1*32+29) /* Long Mode (x86-64) */
+#define X86_FEATURE_3DNOWEXT	( 1*32+30) /* AMD 3DNow! extensions */
+#define X86_FEATURE_3DNOW	( 1*32+31) /* 3DNow! */
+
+/* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */
+#define X86_FEATURE_RECOVERY	( 2*32+ 0) /* CPU in recovery mode */
+#define X86_FEATURE_LONGRUN	( 2*32+ 1) /* Longrun power control */
+#define X86_FEATURE_LRTI	( 2*32+ 3) /* LongRun table interface */
+
+/* Other features, Linux-defined mapping, word 3 */
+/* This range is used for feature bits which conflict or are synthesized */
+#define X86_FEATURE_CXMMX	( 3*32+ 0) /* Cyrix MMX extensions */
+#define X86_FEATURE_K6_MTRR	( 3*32+ 1) /* AMD K6 nonstandard MTRRs */
+#define X86_FEATURE_CYRIX_ARR	( 3*32+ 2) /* Cyrix ARRs (= MTRRs) */
+#define X86_FEATURE_CENTAUR_MCR	( 3*32+ 3) /* Centaur MCRs (= MTRRs) */
+/* cpu types for specific tunings: */
+#define X86_FEATURE_K8		( 3*32+ 4) /* "" Opteron, Athlon64 */
+#define X86_FEATURE_K7		( 3*32+ 5) /* "" Athlon */
+#define X86_FEATURE_P3		( 3*32+ 6) /* "" P3 */
+#define X86_FEATURE_P4		( 3*32+ 7) /* "" P4 */
+#define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* TSC ticks at a constant rate */
+#define X86_FEATURE_UP		( 3*32+ 9) /* smp kernel running on up */
+#define X86_FEATURE_ART		( 3*32+10) /* Platform has always running timer (ART) */
+#define X86_FEATURE_ARCH_PERFMON ( 3*32+11) /* Intel Architectural PerfMon */
+#define X86_FEATURE_PEBS	( 3*32+12) /* Precise-Event Based Sampling */
+#define X86_FEATURE_BTS		( 3*32+13) /* Branch Trace Store */
+#define X86_FEATURE_SYSCALL32	( 3*32+14) /* "" syscall in ia32 userspace */
+#define X86_FEATURE_SYSENTER32	( 3*32+15) /* "" sysenter in ia32 userspace */
+#define X86_FEATURE_REP_GOOD	( 3*32+16) /* rep microcode works well */
+#define X86_FEATURE_MFENCE_RDTSC ( 3*32+17) /* "" Mfence synchronizes RDTSC */
+#define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) /* "" Lfence synchronizes RDTSC */
+/* free, was #define X86_FEATURE_11AP	( 3*32+19) * "" Bad local APIC aka 11AP */
+#define X86_FEATURE_NOPL	( 3*32+20) /* The NOPL (0F 1F) instructions */
+#define X86_FEATURE_ALWAYS	( 3*32+21) /* "" Always-present feature */
+#define X86_FEATURE_XTOPOLOGY	( 3*32+22) /* cpu topology enum extensions */
+#define X86_FEATURE_TSC_RELIABLE ( 3*32+23) /* TSC is known to be reliable */
+#define X86_FEATURE_NONSTOP_TSC	( 3*32+24) /* TSC does not stop in C states */
+/* free, was #define X86_FEATURE_CLFLUSH_MONITOR ( 3*32+25) * "" clflush reqd with monitor */
+#define X86_FEATURE_EXTD_APICID	( 3*32+26) /* has extended APICID (8 bits) */
+#define X86_FEATURE_AMD_DCM     ( 3*32+27) /* multi-node processor */
+#define X86_FEATURE_APERFMPERF	( 3*32+28) /* APERFMPERF */
+#define X86_FEATURE_EAGER_FPU	( 3*32+29) /* "eagerfpu" Non lazy FPU restore */
+#define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */
+#define X86_FEATURE_MCE_RECOVERY ( 3*32+31) /* cpu has recoverable machine checks */
+
+/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
+#define X86_FEATURE_XMM3	( 4*32+ 0) /* "pni" SSE-3 */
+#define X86_FEATURE_PCLMULQDQ	( 4*32+ 1) /* PCLMULQDQ instruction */
+#define X86_FEATURE_DTES64	( 4*32+ 2) /* 64-bit Debug Store */
+#define X86_FEATURE_MWAIT	( 4*32+ 3) /* "monitor" Monitor/Mwait support */
+#define X86_FEATURE_DSCPL	( 4*32+ 4) /* "ds_cpl" CPL Qual. Debug Store */
+#define X86_FEATURE_VMX		( 4*32+ 5) /* Hardware virtualization */
+#define X86_FEATURE_SMX		( 4*32+ 6) /* Safer mode */
+#define X86_FEATURE_EST		( 4*32+ 7) /* Enhanced SpeedStep */
+#define X86_FEATURE_TM2		( 4*32+ 8) /* Thermal Monitor 2 */
+#define X86_FEATURE_SSSE3	( 4*32+ 9) /* Supplemental SSE-3 */
+#define X86_FEATURE_CID		( 4*32+10) /* Context ID */
+#define X86_FEATURE_SDBG	( 4*32+11) /* Silicon Debug */
+#define X86_FEATURE_FMA		( 4*32+12) /* Fused multiply-add */
+#define X86_FEATURE_CX16	( 4*32+13) /* CMPXCHG16B */
+#define X86_FEATURE_XTPR	( 4*32+14) /* Send Task Priority Messages */
+#define X86_FEATURE_PDCM	( 4*32+15) /* Performance Capabilities */
+#define X86_FEATURE_PCID	( 4*32+17) /* Process Context Identifiers */
+#define X86_FEATURE_DCA		( 4*32+18) /* Direct Cache Access */
+#define X86_FEATURE_XMM4_1	( 4*32+19) /* "sse4_1" SSE-4.1 */
+#define X86_FEATURE_XMM4_2	( 4*32+20) /* "sse4_2" SSE-4.2 */
+#define X86_FEATURE_X2APIC	( 4*32+21) /* x2APIC */
+#define X86_FEATURE_MOVBE	( 4*32+22) /* MOVBE instruction */
+#define X86_FEATURE_POPCNT      ( 4*32+23) /* POPCNT instruction */
+#define X86_FEATURE_TSC_DEADLINE_TIMER	( 4*32+24) /* Tsc deadline timer */
+#define X86_FEATURE_AES		( 4*32+25) /* AES instructions */
+#define X86_FEATURE_XSAVE	( 4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */
+#define X86_FEATURE_OSXSAVE	( 4*32+27) /* "" XSAVE enabled in the OS */
+#define X86_FEATURE_AVX		( 4*32+28) /* Advanced Vector Extensions */
+#define X86_FEATURE_F16C	( 4*32+29) /* 16-bit fp conversions */
+#define X86_FEATURE_RDRAND	( 4*32+30) /* The RDRAND instruction */
+#define X86_FEATURE_HYPERVISOR	( 4*32+31) /* Running on a hypervisor */
+
+/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */
+#define X86_FEATURE_XSTORE	( 5*32+ 2) /* "rng" RNG present (xstore) */
+#define X86_FEATURE_XSTORE_EN	( 5*32+ 3) /* "rng_en" RNG enabled */
+#define X86_FEATURE_XCRYPT	( 5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */
+#define X86_FEATURE_XCRYPT_EN	( 5*32+ 7) /* "ace_en" on-CPU crypto enabled */
+#define X86_FEATURE_ACE2	( 5*32+ 8) /* Advanced Cryptography Engine v2 */
+#define X86_FEATURE_ACE2_EN	( 5*32+ 9) /* ACE v2 enabled */
+#define X86_FEATURE_PHE		( 5*32+10) /* PadLock Hash Engine */
+#define X86_FEATURE_PHE_EN	( 5*32+11) /* PHE enabled */
+#define X86_FEATURE_PMM		( 5*32+12) /* PadLock Montgomery Multiplier */
+#define X86_FEATURE_PMM_EN	( 5*32+13) /* PMM enabled */
+
+/* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */
+#define X86_FEATURE_LAHF_LM	( 6*32+ 0) /* LAHF/SAHF in long mode */
+#define X86_FEATURE_CMP_LEGACY	( 6*32+ 1) /* If yes HyperThreading not valid */
+#define X86_FEATURE_SVM		( 6*32+ 2) /* Secure virtual machine */
+#define X86_FEATURE_EXTAPIC	( 6*32+ 3) /* Extended APIC space */
+#define X86_FEATURE_CR8_LEGACY	( 6*32+ 4) /* CR8 in 32-bit mode */
+#define X86_FEATURE_ABM		( 6*32+ 5) /* Advanced bit manipulation */
+#define X86_FEATURE_SSE4A	( 6*32+ 6) /* SSE-4A */
+#define X86_FEATURE_MISALIGNSSE ( 6*32+ 7) /* Misaligned SSE mode */
+#define X86_FEATURE_3DNOWPREFETCH ( 6*32+ 8) /* 3DNow prefetch instructions */
+#define X86_FEATURE_OSVW	( 6*32+ 9) /* OS Visible Workaround */
+#define X86_FEATURE_IBS		( 6*32+10) /* Instruction Based Sampling */
+#define X86_FEATURE_XOP		( 6*32+11) /* extended AVX instructions */
+#define X86_FEATURE_SKINIT	( 6*32+12) /* SKINIT/STGI instructions */
+#define X86_FEATURE_WDT		( 6*32+13) /* Watchdog timer */
+#define X86_FEATURE_LWP		( 6*32+15) /* Light Weight Profiling */
+#define X86_FEATURE_FMA4	( 6*32+16) /* 4 operands MAC instructions */
+#define X86_FEATURE_TCE		( 6*32+17) /* translation cache extension */
+#define X86_FEATURE_NODEID_MSR	( 6*32+19) /* NodeId MSR */
+#define X86_FEATURE_TBM		( 6*32+21) /* trailing bit manipulations */
+#define X86_FEATURE_TOPOEXT	( 6*32+22) /* topology extensions CPUID leafs */
+#define X86_FEATURE_PERFCTR_CORE ( 6*32+23) /* core performance counter extensions */
+#define X86_FEATURE_PERFCTR_NB  ( 6*32+24) /* NB performance counter extensions */
+#define X86_FEATURE_BPEXT	(6*32+26) /* data breakpoint extension */
+#define X86_FEATURE_PERFCTR_L2	( 6*32+28) /* L2 performance counter extensions */
+#define X86_FEATURE_MWAITX	( 6*32+29) /* MWAIT extension (MONITORX/MWAITX) */
+
+/*
+ * Auxiliary flags: Linux defined - For features scattered in various
+ * CPUID levels like 0x6, 0xA etc, word 7.
+ *
+ * Reuse free bits when adding new feature flags!
+ */
+
+#define X86_FEATURE_CPB		( 7*32+ 2) /* AMD Core Performance Boost */
+#define X86_FEATURE_EPB		( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */
+
+#define X86_FEATURE_HW_PSTATE	( 7*32+ 8) /* AMD HW-PState */
+#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
+
+#define X86_FEATURE_INTEL_PT	( 7*32+15) /* Intel Processor Trace */
+
+/* Virtualization flags: Linux defined, word 8 */
+#define X86_FEATURE_TPR_SHADOW  ( 8*32+ 0) /* Intel TPR Shadow */
+#define X86_FEATURE_VNMI        ( 8*32+ 1) /* Intel Virtual NMI */
+#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 2) /* Intel FlexPriority */
+#define X86_FEATURE_EPT         ( 8*32+ 3) /* Intel Extended Page Table */
+#define X86_FEATURE_VPID        ( 8*32+ 4) /* Intel Virtual Processor ID */
+
+#define X86_FEATURE_VMMCALL     ( 8*32+15) /* Prefer vmmcall to vmcall */
+#define X86_FEATURE_XENPV       ( 8*32+16) /* "" Xen paravirtual guest */
+
+
+/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
+#define X86_FEATURE_FSGSBASE	( 9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
+#define X86_FEATURE_TSC_ADJUST	( 9*32+ 1) /* TSC adjustment MSR 0x3b */
+#define X86_FEATURE_BMI1	( 9*32+ 3) /* 1st group bit manipulation extensions */
+#define X86_FEATURE_HLE		( 9*32+ 4) /* Hardware Lock Elision */
+#define X86_FEATURE_AVX2	( 9*32+ 5) /* AVX2 instructions */
+#define X86_FEATURE_SMEP	( 9*32+ 7) /* Supervisor Mode Execution Protection */
+#define X86_FEATURE_BMI2	( 9*32+ 8) /* 2nd group bit manipulation extensions */
+#define X86_FEATURE_ERMS	( 9*32+ 9) /* Enhanced REP MOVSB/STOSB */
+#define X86_FEATURE_INVPCID	( 9*32+10) /* Invalidate Processor Context ID */
+#define X86_FEATURE_RTM		( 9*32+11) /* Restricted Transactional Memory */
+#define X86_FEATURE_CQM		( 9*32+12) /* Cache QoS Monitoring */
+#define X86_FEATURE_MPX		( 9*32+14) /* Memory Protection Extension */
+#define X86_FEATURE_AVX512F	( 9*32+16) /* AVX-512 Foundation */
+#define X86_FEATURE_AVX512DQ	( 9*32+17) /* AVX-512 DQ (Double/Quad granular) Instructions */
+#define X86_FEATURE_RDSEED	( 9*32+18) /* The RDSEED instruction */
+#define X86_FEATURE_ADX		( 9*32+19) /* The ADCX and ADOX instructions */
+#define X86_FEATURE_SMAP	( 9*32+20) /* Supervisor Mode Access Prevention */
+#define X86_FEATURE_PCOMMIT	( 9*32+22) /* PCOMMIT instruction */
+#define X86_FEATURE_CLFLUSHOPT	( 9*32+23) /* CLFLUSHOPT instruction */
+#define X86_FEATURE_CLWB	( 9*32+24) /* CLWB instruction */
+#define X86_FEATURE_AVX512PF	( 9*32+26) /* AVX-512 Prefetch */
+#define X86_FEATURE_AVX512ER	( 9*32+27) /* AVX-512 Exponential and Reciprocal */
+#define X86_FEATURE_AVX512CD	( 9*32+28) /* AVX-512 Conflict Detection */
+#define X86_FEATURE_SHA_NI	( 9*32+29) /* SHA1/SHA256 Instruction Extensions */
+#define X86_FEATURE_AVX512BW	( 9*32+30) /* AVX-512 BW (Byte/Word granular) Instructions */
+#define X86_FEATURE_AVX512VL	( 9*32+31) /* AVX-512 VL (128/256 Vector Length) Extensions */
+
+/* Extended state features, CPUID level 0x0000000d:1 (eax), word 10 */
+#define X86_FEATURE_XSAVEOPT	(10*32+ 0) /* XSAVEOPT */
+#define X86_FEATURE_XSAVEC	(10*32+ 1) /* XSAVEC */
+#define X86_FEATURE_XGETBV1	(10*32+ 2) /* XGETBV with ECX = 1 */
+#define X86_FEATURE_XSAVES	(10*32+ 3) /* XSAVES/XRSTORS */
+
+/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:0 (edx), word 11 */
+#define X86_FEATURE_CQM_LLC	(11*32+ 1) /* LLC QoS if 1 */
+
+/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:1 (edx), word 12 */
+#define X86_FEATURE_CQM_OCCUP_LLC (12*32+ 0) /* LLC occupancy monitoring if 1 */
+
+/* AMD-defined CPU features, CPUID level 0x80000008 (ebx), word 13 */
+#define X86_FEATURE_CLZERO	(13*32+0) /* CLZERO instruction */
+
+/* Thermal and Power Management Leaf, CPUID level 0x00000006 (eax), word 14 */
+#define X86_FEATURE_DTHERM	(14*32+ 0) /* Digital Thermal Sensor */
+#define X86_FEATURE_IDA		(14*32+ 1) /* Intel Dynamic Acceleration */
+#define X86_FEATURE_ARAT	(14*32+ 2) /* Always Running APIC Timer */
+#define X86_FEATURE_PLN		(14*32+ 4) /* Intel Power Limit Notification */
+#define X86_FEATURE_PTS		(14*32+ 6) /* Intel Package Thermal Status */
+#define X86_FEATURE_HWP		(14*32+ 7) /* Intel Hardware P-states */
+#define X86_FEATURE_HWP_NOTIFY	(14*32+ 8) /* HWP Notification */
+#define X86_FEATURE_HWP_ACT_WINDOW (14*32+ 9) /* HWP Activity Window */
+#define X86_FEATURE_HWP_EPP	(14*32+10) /* HWP Energy Perf. Preference */
+#define X86_FEATURE_HWP_PKG_REQ (14*32+11) /* HWP Package Level Request */
+
+/* AMD SVM Feature Identification, CPUID level 0x8000000a (edx), word 15 */
+#define X86_FEATURE_NPT		(15*32+ 0) /* Nested Page Table support */
+#define X86_FEATURE_LBRV	(15*32+ 1) /* LBR Virtualization support */
+#define X86_FEATURE_SVML	(15*32+ 2) /* "svm_lock" SVM locking MSR */
+#define X86_FEATURE_NRIPS	(15*32+ 3) /* "nrip_save" SVM next_rip save */
+#define X86_FEATURE_TSCRATEMSR  (15*32+ 4) /* "tsc_scale" TSC scaling support */
+#define X86_FEATURE_VMCBCLEAN   (15*32+ 5) /* "vmcb_clean" VMCB clean bits support */
+#define X86_FEATURE_FLUSHBYASID (15*32+ 6) /* flush-by-ASID support */
+#define X86_FEATURE_DECODEASSISTS (15*32+ 7) /* Decode Assists support */
+#define X86_FEATURE_PAUSEFILTER (15*32+10) /* filtered pause intercept */
+#define X86_FEATURE_PFTHRESHOLD (15*32+12) /* pause filter threshold */
+#define X86_FEATURE_AVIC	(15*32+13) /* Virtual Interrupt Controller */
+
+/*
+ * BUG word(s)
+ */
+#define X86_BUG(x)		(NCAPINTS*32 + (x))
+
+#define X86_BUG_F00F		X86_BUG(0) /* Intel F00F */
+#define X86_BUG_FDIV		X86_BUG(1) /* FPU FDIV */
+#define X86_BUG_COMA		X86_BUG(2) /* Cyrix 6x86 coma */
+#define X86_BUG_AMD_TLB_MMATCH	X86_BUG(3) /* "tlb_mmatch" AMD Erratum 383 */
+#define X86_BUG_AMD_APIC_C1E	X86_BUG(4) /* "apic_c1e" AMD Erratum 400 */
+#define X86_BUG_11AP		X86_BUG(5) /* Bad local APIC aka 11AP */
+#define X86_BUG_FXSAVE_LEAK	X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */
+#define X86_BUG_CLFLUSH_MONITOR	X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */
+#define X86_BUG_SYSRET_SS_ATTRS	X86_BUG(8) /* SYSRET doesn't fix up SS attrs */
+
+#ifdef CONFIG_X86_32
+/*
+ * 64-bit kernels don't use X86_BUG_ESPFIX.  Make the define conditional
+ * to avoid confusion.
+ */
+#define X86_BUG_ESPFIX		X86_BUG(9) /* "" IRET to 16-bit SS corrupts ESP/RSP high bits */
+#endif
+
+#endif /* _ASM_X86_CPUFEATURES_H */
diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
index 278441f..eb5deb4 100644
--- a/arch/x86/include/asm/desc_defs.h
+++ b/arch/x86/include/asm/desc_defs.h
@@ -98,4 +98,27 @@
 
 #endif /* !__ASSEMBLY__ */
 
+/* Access rights as returned by LAR */
+#define AR_TYPE_RODATA		(0 * (1 << 9))
+#define AR_TYPE_RWDATA		(1 * (1 << 9))
+#define AR_TYPE_RODATA_EXPDOWN	(2 * (1 << 9))
+#define AR_TYPE_RWDATA_EXPDOWN	(3 * (1 << 9))
+#define AR_TYPE_XOCODE		(4 * (1 << 9))
+#define AR_TYPE_XRCODE		(5 * (1 << 9))
+#define AR_TYPE_XOCODE_CONF	(6 * (1 << 9))
+#define AR_TYPE_XRCODE_CONF	(7 * (1 << 9))
+#define AR_TYPE_MASK		(7 * (1 << 9))
+
+#define AR_DPL0			(0 * (1 << 13))
+#define AR_DPL3			(3 * (1 << 13))
+#define AR_DPL_MASK		(3 * (1 << 13))
+
+#define AR_A			(1 << 8)   /* "Accessed" */
+#define AR_S			(1 << 12)  /* If clear, "System" segment */
+#define AR_P			(1 << 15)  /* "Present" */
+#define AR_AVL			(1 << 20)  /* "AVaiLable" (no HW effect) */
+#define AR_L			(1 << 21)  /* "Long mode" for code segments */
+#define AR_DB			(1 << 22)  /* D/B, effect depends on type */
+#define AR_G			(1 << 23)  /* "Granularity" (limit in pages) */
+
 #endif /* _ASM_X86_DESC_DEFS_H */
diff --git a/arch/x86/include/asm/dmi.h b/arch/x86/include/asm/dmi.h
index 535192f..3c69fed 100644
--- a/arch/x86/include/asm/dmi.h
+++ b/arch/x86/include/asm/dmi.h
@@ -15,7 +15,7 @@
 /* Use early IO mappings for DMI because it's initialized early */
 #define dmi_early_remap		early_ioremap
 #define dmi_early_unmap		early_iounmap
-#define dmi_remap		ioremap
+#define dmi_remap		ioremap_cache
 #define dmi_unmap		iounmap
 
 #endif /* _ASM_X86_DMI_H */
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
index 6d7d0e5..8554f96 100644
--- a/arch/x86/include/asm/fixmap.h
+++ b/arch/x86/include/asm/fixmap.h
@@ -138,7 +138,7 @@
 extern int fixmaps_set;
 
 extern pte_t *kmap_pte;
-extern pgprot_t kmap_prot;
+#define kmap_prot PAGE_KERNEL
 extern pte_t *pkmap_page_table;
 
 void __native_set_fixmap(enum fixed_addresses idx, pte_t pte);
diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
index 0fd440d..a212434 100644
--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
@@ -17,6 +17,7 @@
 #include <asm/user.h>
 #include <asm/fpu/api.h>
 #include <asm/fpu/xstate.h>
+#include <asm/cpufeature.h>
 
 /*
  * High level FPU state handling functions:
@@ -58,22 +59,22 @@
  */
 static __always_inline __pure bool use_eager_fpu(void)
 {
-	return static_cpu_has_safe(X86_FEATURE_EAGER_FPU);
+	return static_cpu_has(X86_FEATURE_EAGER_FPU);
 }
 
 static __always_inline __pure bool use_xsaveopt(void)
 {
-	return static_cpu_has_safe(X86_FEATURE_XSAVEOPT);
+	return static_cpu_has(X86_FEATURE_XSAVEOPT);
 }
 
 static __always_inline __pure bool use_xsave(void)
 {
-	return static_cpu_has_safe(X86_FEATURE_XSAVE);
+	return static_cpu_has(X86_FEATURE_XSAVE);
 }
 
 static __always_inline __pure bool use_fxsr(void)
 {
-	return static_cpu_has_safe(X86_FEATURE_FXSR);
+	return static_cpu_has(X86_FEATURE_FXSR);
 }
 
 /*
@@ -300,7 +301,7 @@
 
 	WARN_ON(system_state != SYSTEM_BOOTING);
 
-	if (static_cpu_has_safe(X86_FEATURE_XSAVES))
+	if (static_cpu_has(X86_FEATURE_XSAVES))
 		XSTATE_OP(XSAVES, xstate, lmask, hmask, err);
 	else
 		XSTATE_OP(XSAVE, xstate, lmask, hmask, err);
@@ -322,7 +323,7 @@
 
 	WARN_ON(system_state != SYSTEM_BOOTING);
 
-	if (static_cpu_has_safe(X86_FEATURE_XSAVES))
+	if (static_cpu_has(X86_FEATURE_XSAVES))
 		XSTATE_OP(XRSTORS, xstate, lmask, hmask, err);
 	else
 		XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
@@ -460,7 +461,7 @@
 	 * pending. Clear the x87 state here by setting it to fixed values.
 	 * "m" is a random variable that should be in L1.
 	 */
-	if (unlikely(static_cpu_has_bug_safe(X86_BUG_FXSAVE_LEAK))) {
+	if (unlikely(static_cpu_has_bug(X86_BUG_FXSAVE_LEAK))) {
 		asm volatile(
 			"fnclex\n\t"
 			"emms\n\t"
@@ -589,7 +590,8 @@
 	 * If the task has used the math, pre-load the FPU on xsave processors
 	 * or if the past 5 consecutive context-switches used math.
 	 */
-	fpu.preload = new_fpu->fpstate_active &&
+	fpu.preload = static_cpu_has(X86_FEATURE_FPU) &&
+		      new_fpu->fpstate_active &&
 		      (use_eager_fpu() || new_fpu->counter > 5);
 
 	if (old_fpu->fpregs_active) {
diff --git a/arch/x86/include/asm/frame.h b/arch/x86/include/asm/frame.h
index 793179c..6e4d170 100644
--- a/arch/x86/include/asm/frame.h
+++ b/arch/x86/include/asm/frame.h
@@ -1,23 +1,44 @@
-#ifdef __ASSEMBLY__
+#ifndef _ASM_X86_FRAME_H
+#define _ASM_X86_FRAME_H
 
 #include <asm/asm.h>
 
-/* The annotation hides the frame from the unwinder and makes it look
-   like a ordinary ebp save/restore. This avoids some special cases for
-   frame pointer later */
-#ifdef CONFIG_FRAME_POINTER
-	.macro FRAME
-	__ASM_SIZE(push,)	%__ASM_REG(bp)
-	__ASM_SIZE(mov)		%__ASM_REG(sp), %__ASM_REG(bp)
-	.endm
-	.macro ENDFRAME
-	__ASM_SIZE(pop,)	%__ASM_REG(bp)
-	.endm
-#else
-	.macro FRAME
-	.endm
-	.macro ENDFRAME
-	.endm
-#endif
+/*
+ * These are stack frame creation macros.  They should be used by every
+ * callable non-leaf asm function to make kernel stack traces more reliable.
+ */
 
-#endif  /*  __ASSEMBLY__  */
+#ifdef CONFIG_FRAME_POINTER
+
+#ifdef __ASSEMBLY__
+
+.macro FRAME_BEGIN
+	push %_ASM_BP
+	_ASM_MOV %_ASM_SP, %_ASM_BP
+.endm
+
+.macro FRAME_END
+	pop %_ASM_BP
+.endm
+
+#else /* !__ASSEMBLY__ */
+
+#define FRAME_BEGIN				\
+	"push %" _ASM_BP "\n"			\
+	_ASM_MOV "%" _ASM_SP ", %" _ASM_BP "\n"
+
+#define FRAME_END "pop %" _ASM_BP "\n"
+
+#endif /* __ASSEMBLY__ */
+
+#define FRAME_OFFSET __ASM_SEL(4, 8)
+
+#else /* !CONFIG_FRAME_POINTER */
+
+#define FRAME_BEGIN
+#define FRAME_END
+#define FRAME_OFFSET 0
+
+#endif /* CONFIG_FRAME_POINTER */
+
+#endif /* _ASM_X86_FRAME_H */
diff --git a/arch/x86/include/asm/imr.h b/arch/x86/include/asm/imr.h
index cd2ce40..ebea2c9 100644
--- a/arch/x86/include/asm/imr.h
+++ b/arch/x86/include/asm/imr.h
@@ -53,7 +53,7 @@
 #define IMR_MASK		(IMR_ALIGN - 1)
 
 int imr_add_range(phys_addr_t base, size_t size,
-		  unsigned int rmask, unsigned int wmask, bool lock);
+		  unsigned int rmask, unsigned int wmask);
 
 int imr_remove_range(phys_addr_t base, size_t size);
 
diff --git a/arch/x86/include/asm/ipi.h b/arch/x86/include/asm/ipi.h
index cfc9a0d..a4fe16e 100644
--- a/arch/x86/include/asm/ipi.h
+++ b/arch/x86/include/asm/ipi.h
@@ -57,67 +57,13 @@
 		cpu_relax();
 }
 
-static inline void
-__default_send_IPI_shortcut(unsigned int shortcut, int vector, unsigned int dest)
-{
-	/*
-	 * Subtle. In the case of the 'never do double writes' workaround
-	 * we have to lock out interrupts to be safe.  As we don't care
-	 * of the value read we use an atomic rmw access to avoid costly
-	 * cli/sti.  Otherwise we use an even cheaper single atomic write
-	 * to the APIC.
-	 */
-	unsigned int cfg;
-
-	/*
-	 * Wait for idle.
-	 */
-	__xapic_wait_icr_idle();
-
-	/*
-	 * No need to touch the target chip field
-	 */
-	cfg = __prepare_ICR(shortcut, vector, dest);
-
-	/*
-	 * Send the IPI. The write to APIC_ICR fires this off.
-	 */
-	native_apic_mem_write(APIC_ICR, cfg);
-}
+void __default_send_IPI_shortcut(unsigned int shortcut, int vector, unsigned int dest);
 
 /*
  * This is used to send an IPI with no shorthand notation (the destination is
  * specified in bits 56 to 63 of the ICR).
  */
-static inline void
- __default_send_IPI_dest_field(unsigned int mask, int vector, unsigned int dest)
-{
-	unsigned long cfg;
-
-	/*
-	 * Wait for idle.
-	 */
-	if (unlikely(vector == NMI_VECTOR))
-		safe_apic_wait_icr_idle();
-	else
-		__xapic_wait_icr_idle();
-
-	/*
-	 * prepare target chip field
-	 */
-	cfg = __prepare_ICR2(mask);
-	native_apic_mem_write(APIC_ICR2, cfg);
-
-	/*
-	 * program the ICR
-	 */
-	cfg = __prepare_ICR(0, vector, dest);
-
-	/*
-	 * Send the IPI. The write to APIC_ICR fires this off.
-	 */
-	native_apic_mem_write(APIC_ICR, cfg);
-}
+void __default_send_IPI_dest_field(unsigned int mask, int vector, unsigned int dest);
 
 extern void default_send_IPI_single(int cpu, int vector);
 extern void default_send_IPI_single_phys(int cpu, int vector);
diff --git a/arch/x86/include/asm/irq_work.h b/arch/x86/include/asm/irq_work.h
index 78162f8..d0afb05 100644
--- a/arch/x86/include/asm/irq_work.h
+++ b/arch/x86/include/asm/irq_work.h
@@ -1,7 +1,7 @@
 #ifndef _ASM_IRQ_WORK_H
 #define _ASM_IRQ_WORK_H
 
-#include <asm/processor.h>
+#include <asm/cpufeature.h>
 
 static inline bool arch_irq_work_has_interrupt(void)
 {
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 44adbb8..01c8b50 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -32,6 +32,7 @@
 #include <asm/mtrr.h>
 #include <asm/msr-index.h>
 #include <asm/asm.h>
+#include <asm/kvm_page_track.h>
 
 #define KVM_MAX_VCPUS 255
 #define KVM_SOFT_MAX_VCPUS 160
@@ -214,6 +215,14 @@
 	void *objects[KVM_NR_MEM_OBJS];
 };
 
+/*
+ * the pages used as guest page table on soft mmu are tracked by
+ * kvm_memory_slot.arch.gfn_track which is 16 bits, so the role bits used
+ * by indirect shadow page can not be more than 15 bits.
+ *
+ * Currently, we used 14 bits that are @level, @cr4_pae, @quadrant, @access,
+ * @nxe, @cr0_wp, @smep_andnot_wp and @smap_andnot_wp.
+ */
 union kvm_mmu_page_role {
 	unsigned word;
 	struct {
@@ -276,7 +285,7 @@
 #endif
 
 	/* Number of writes since the last time traversal visited this page.  */
-	int write_flooding_count;
+	atomic_t write_flooding_count;
 };
 
 struct kvm_pio_request {
@@ -338,12 +347,8 @@
 
 	struct rsvd_bits_validate guest_rsvd_check;
 
-	/*
-	 * Bitmap: bit set = last pte in walk
-	 * index[0:1]: level (zero-based)
-	 * index[2]: pte.ps
-	 */
-	u8 last_pte_bitmap;
+	/* Can have large pages at levels 2..last_nonleaf_level-1. */
+	u8 last_nonleaf_level;
 
 	bool nx;
 
@@ -498,7 +503,6 @@
 	struct kvm_mmu_memory_cache mmu_page_header_cache;
 
 	struct fpu guest_fpu;
-	bool eager_fpu;
 	u64 xcr0;
 	u64 guest_supported_xcr0;
 	u32 guest_xstate_size;
@@ -644,12 +648,13 @@
 };
 
 struct kvm_lpage_info {
-	int write_count;
+	int disallow_lpage;
 };
 
 struct kvm_arch_memory_slot {
 	struct kvm_rmap_head *rmap[KVM_NR_PAGE_SIZES];
 	struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1];
+	unsigned short *gfn_track[KVM_PAGE_TRACK_MAX];
 };
 
 /*
@@ -694,6 +699,8 @@
 	 */
 	struct list_head active_mmu_pages;
 	struct list_head zapped_obsolete_pages;
+	struct kvm_page_track_notifier_node mmu_sp_tracker;
+	struct kvm_page_track_notifier_head track_notifier_head;
 
 	struct list_head assigned_dev_head;
 	struct iommu_domain *iommu_domain;
@@ -754,6 +761,8 @@
 
 	bool irqchip_split;
 	u8 nr_reserved_ioapic_pins;
+
+	bool disabled_lapic_found;
 };
 
 struct kvm_vm_stat {
@@ -988,6 +997,8 @@
 void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
 int kvm_mmu_create(struct kvm_vcpu *vcpu);
 void kvm_mmu_setup(struct kvm_vcpu *vcpu);
+void kvm_mmu_init_vm(struct kvm *kvm);
+void kvm_mmu_uninit_vm(struct kvm *kvm);
 void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
 		u64 dirty_mask, u64 nx_mask, u64 x_mask);
 
@@ -1127,8 +1138,6 @@
 
 void kvm_inject_nmi(struct kvm_vcpu *vcpu);
 
-void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
-		       const u8 *new, int bytes);
 int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn);
 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
 void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/include/asm/kvm_page_track.h b/arch/x86/include/asm/kvm_page_track.h
new file mode 100644
index 0000000..c2b8d24
--- /dev/null
+++ b/arch/x86/include/asm/kvm_page_track.h
@@ -0,0 +1,61 @@
+#ifndef _ASM_X86_KVM_PAGE_TRACK_H
+#define _ASM_X86_KVM_PAGE_TRACK_H
+
+enum kvm_page_track_mode {
+	KVM_PAGE_TRACK_WRITE,
+	KVM_PAGE_TRACK_MAX,
+};
+
+/*
+ * The notifier represented by @kvm_page_track_notifier_node is linked into
+ * the head which will be notified when guest is triggering the track event.
+ *
+ * Write access on the head is protected by kvm->mmu_lock, read access
+ * is protected by track_srcu.
+ */
+struct kvm_page_track_notifier_head {
+	struct srcu_struct track_srcu;
+	struct hlist_head track_notifier_list;
+};
+
+struct kvm_page_track_notifier_node {
+	struct hlist_node node;
+
+	/*
+	 * It is called when guest is writing the write-tracked page
+	 * and write emulation is finished at that time.
+	 *
+	 * @vcpu: the vcpu where the write access happened.
+	 * @gpa: the physical address written by guest.
+	 * @new: the data was written to the address.
+	 * @bytes: the written length.
+	 */
+	void (*track_write)(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new,
+			    int bytes);
+};
+
+void kvm_page_track_init(struct kvm *kvm);
+
+void kvm_page_track_free_memslot(struct kvm_memory_slot *free,
+				 struct kvm_memory_slot *dont);
+int kvm_page_track_create_memslot(struct kvm_memory_slot *slot,
+				  unsigned long npages);
+
+void kvm_slot_page_track_add_page(struct kvm *kvm,
+				  struct kvm_memory_slot *slot, gfn_t gfn,
+				  enum kvm_page_track_mode mode);
+void kvm_slot_page_track_remove_page(struct kvm *kvm,
+				     struct kvm_memory_slot *slot, gfn_t gfn,
+				     enum kvm_page_track_mode mode);
+bool kvm_page_track_is_active(struct kvm_vcpu *vcpu, gfn_t gfn,
+			      enum kvm_page_track_mode mode);
+
+void
+kvm_page_track_register_notifier(struct kvm *kvm,
+				 struct kvm_page_track_notifier_node *n);
+void
+kvm_page_track_unregister_notifier(struct kvm *kvm,
+				   struct kvm_page_track_notifier_node *n);
+void kvm_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new,
+			  int bytes);
+#endif
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index cfff341..92b6f65 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -135,6 +135,7 @@
 	bool ignore_ce;
 	bool disabled;
 	bool ser;
+	bool recovery;
 	bool bios_cmci_threshold;
 	u8 banks;
 	s8 bootlog;
diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
index 1e1b07a..9d3a96c 100644
--- a/arch/x86/include/asm/microcode.h
+++ b/arch/x86/include/asm/microcode.h
@@ -3,6 +3,7 @@
 
 #include <asm/cpu.h>
 #include <linux/earlycpio.h>
+#include <linux/initrd.h>
 
 #define native_rdmsr(msr, val1, val2)			\
 do {							\
@@ -143,4 +144,29 @@
 static inline bool
 get_builtin_firmware(struct cpio_data *cd, const char *name)	{ return false; }
 #endif
+
+static inline unsigned long get_initrd_start(void)
+{
+#ifdef CONFIG_BLK_DEV_INITRD
+	return initrd_start;
+#else
+	return 0;
+#endif
+}
+
+static inline unsigned long get_initrd_start_addr(void)
+{
+#ifdef CONFIG_BLK_DEV_INITRD
+#ifdef CONFIG_X86_32
+	unsigned long *initrd_start_p = (unsigned long *)__pa_nodebug(&initrd_start);
+
+	return (unsigned long)__pa_nodebug(*initrd_start_p);
+#else
+	return get_initrd_start();
+#endif
+#else /* CONFIG_BLK_DEV_INITRD */
+	return 0;
+#endif
+}
+
 #endif /* _ASM_X86_MICROCODE_H */
diff --git a/arch/x86/include/asm/microcode_intel.h b/arch/x86/include/asm/microcode_intel.h
index 8559b01..603417f 100644
--- a/arch/x86/include/asm/microcode_intel.h
+++ b/arch/x86/include/asm/microcode_intel.h
@@ -40,7 +40,6 @@
 #define DEFAULT_UCODE_TOTALSIZE (DEFAULT_UCODE_DATASIZE + MC_HEADER_SIZE)
 #define EXT_HEADER_SIZE		(sizeof(struct extended_sigtable))
 #define EXT_SIGNATURE_SIZE	(sizeof(struct extended_signature))
-#define DWSIZE			(sizeof(u32))
 
 #define get_totalsize(mc) \
 	(((struct microcode_intel *)mc)->hdr.datasize ? \
diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
index 55234d5..1ea0bae 100644
--- a/arch/x86/include/asm/mmu.h
+++ b/arch/x86/include/asm/mmu.h
@@ -19,7 +19,8 @@
 #endif
 
 	struct mutex lock;
-	void __user *vdso;
+	void __user *vdso;			/* vdso base address */
+	const struct vdso_image *vdso_image;	/* vdso image in use */
 
 	atomic_t perf_rdpmc_allowed;	/* nonzero if rdpmc is allowed */
 } mm_context_t;
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index b05402e..984ab75 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -1,7 +1,12 @@
 #ifndef _ASM_X86_MSR_INDEX_H
 #define _ASM_X86_MSR_INDEX_H
 
-/* CPU model specific register (MSR) numbers */
+/*
+ * CPU model specific register (MSR) numbers.
+ *
+ * Do not add new entries to this file unless the definitions are shared
+ * between multiple compilation units.
+ */
 
 /* x86-64 specific MSRs */
 #define MSR_EFER		0xc0000080 /* extended feature register */
diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h
index c70689b..0deeb2d 100644
--- a/arch/x86/include/asm/mwait.h
+++ b/arch/x86/include/asm/mwait.h
@@ -3,6 +3,8 @@
 
 #include <linux/sched.h>
 
+#include <asm/cpufeature.h>
+
 #define MWAIT_SUBSTATE_MASK		0xf
 #define MWAIT_CSTATE_MASK		0xf
 #define MWAIT_SUBSTATE_SIZE		4
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 813384e..983738a 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -13,7 +13,7 @@
 #include <asm/types.h>
 #include <uapi/asm/sigcontext.h>
 #include <asm/current.h>
-#include <asm/cpufeature.h>
+#include <asm/cpufeatures.h>
 #include <asm/page.h>
 #include <asm/pgtable_types.h>
 #include <asm/percpu.h>
@@ -24,7 +24,6 @@
 #include <asm/fpu/types.h>
 
 #include <linux/personality.h>
-#include <linux/cpumask.h>
 #include <linux/cache.h>
 #include <linux/threads.h>
 #include <linux/math64.h>
@@ -300,10 +299,13 @@
 	 */
 	unsigned long		io_bitmap[IO_BITMAP_LONGS + 1];
 
+#ifdef CONFIG_X86_32
 	/*
-	 * Space for the temporary SYSENTER stack:
+	 * Space for the temporary SYSENTER stack.
 	 */
+	unsigned long		SYSENTER_stack_canary;
 	unsigned long		SYSENTER_stack[64];
+#endif
 
 } ____cacheline_aligned;
 
diff --git a/arch/x86/include/asm/proto.h b/arch/x86/include/asm/proto.h
index a4a7728..9b9b30b1 100644
--- a/arch/x86/include/asm/proto.h
+++ b/arch/x86/include/asm/proto.h
@@ -7,12 +7,23 @@
 
 void syscall_init(void);
 
+#ifdef CONFIG_X86_64
 void entry_SYSCALL_64(void);
-void entry_SYSCALL_compat(void);
+#endif
+
+#ifdef CONFIG_X86_32
 void entry_INT80_32(void);
-void entry_INT80_compat(void);
 void entry_SYSENTER_32(void);
+void __begin_SYSENTER_singlestep_region(void);
+void __end_SYSENTER_singlestep_region(void);
+#endif
+
+#ifdef CONFIG_IA32_EMULATION
 void entry_SYSENTER_compat(void);
+void __end_entry_SYSENTER_compat(void);
+void entry_SYSCALL_compat(void);
+void entry_INT80_compat(void);
+#endif
 
 void x86_configure_nx(void);
 void x86_report_nx(void);
diff --git a/arch/x86/include/asm/sighandling.h b/arch/x86/include/asm/sighandling.h
index 89db467..452c88b 100644
--- a/arch/x86/include/asm/sighandling.h
+++ b/arch/x86/include/asm/sighandling.h
@@ -13,7 +13,6 @@
 			 X86_EFLAGS_CF | X86_EFLAGS_RF)
 
 void signal_fault(struct pt_regs *regs, void __user *frame, char *where);
-int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc);
 int setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate,
 		     struct pt_regs *regs, unsigned long mask);
 
diff --git a/arch/x86/include/asm/smap.h b/arch/x86/include/asm/smap.h
index ba665eb..db33330 100644
--- a/arch/x86/include/asm/smap.h
+++ b/arch/x86/include/asm/smap.h
@@ -15,7 +15,7 @@
 
 #include <linux/stringify.h>
 #include <asm/nops.h>
-#include <asm/cpufeature.h>
+#include <asm/cpufeatures.h>
 
 /* "Raw" instruction opcodes */
 #define __ASM_CLAC	.byte 0x0f,0x01,0xca
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index dfcf072..20a3de5 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -16,7 +16,6 @@
 #endif
 #include <asm/thread_info.h>
 #include <asm/cpumask.h>
-#include <asm/cpufeature.h>
 
 extern int smp_num_siblings;
 extern unsigned int num_processors;
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index c7b5510..8286669 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -49,7 +49,7 @@
  */
 #ifndef __ASSEMBLY__
 struct task_struct;
-#include <asm/processor.h>
+#include <asm/cpufeature.h>
 #include <linux/atomic.h>
 
 struct thread_info {
@@ -134,10 +134,13 @@
 #define _TIF_ADDR32		(1 << TIF_ADDR32)
 #define _TIF_X32		(1 << TIF_X32)
 
-/* work to do in syscall_trace_enter() */
+/*
+ * work to do in syscall_trace_enter().  Also includes TIF_NOHZ for
+ * enter_from_user_mode()
+ */
 #define _TIF_WORK_SYSCALL_ENTRY	\
 	(_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT |	\
-	 _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT |	\
+	 _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT |	\
 	 _TIF_NOHZ)
 
 /* work to do on any return to user space */
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 6df2029..c24b422 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -5,8 +5,57 @@
 #include <linux/sched.h>
 
 #include <asm/processor.h>
+#include <asm/cpufeature.h>
 #include <asm/special_insns.h>
 
+static inline void __invpcid(unsigned long pcid, unsigned long addr,
+			     unsigned long type)
+{
+	struct { u64 d[2]; } desc = { { pcid, addr } };
+
+	/*
+	 * The memory clobber is because the whole point is to invalidate
+	 * stale TLB entries and, especially if we're flushing global
+	 * mappings, we don't want the compiler to reorder any subsequent
+	 * memory accesses before the TLB flush.
+	 *
+	 * The hex opcode is invpcid (%ecx), %eax in 32-bit mode and
+	 * invpcid (%rcx), %rax in long mode.
+	 */
+	asm volatile (".byte 0x66, 0x0f, 0x38, 0x82, 0x01"
+		      : : "m" (desc), "a" (type), "c" (&desc) : "memory");
+}
+
+#define INVPCID_TYPE_INDIV_ADDR		0
+#define INVPCID_TYPE_SINGLE_CTXT	1
+#define INVPCID_TYPE_ALL_INCL_GLOBAL	2
+#define INVPCID_TYPE_ALL_NON_GLOBAL	3
+
+/* Flush all mappings for a given pcid and addr, not including globals. */
+static inline void invpcid_flush_one(unsigned long pcid,
+				     unsigned long addr)
+{
+	__invpcid(pcid, addr, INVPCID_TYPE_INDIV_ADDR);
+}
+
+/* Flush all mappings for a given PCID, not including globals. */
+static inline void invpcid_flush_single_context(unsigned long pcid)
+{
+	__invpcid(pcid, 0, INVPCID_TYPE_SINGLE_CTXT);
+}
+
+/* Flush all mappings, including globals, for all PCIDs. */
+static inline void invpcid_flush_all(void)
+{
+	__invpcid(0, 0, INVPCID_TYPE_ALL_INCL_GLOBAL);
+}
+
+/* Flush all mappings for all PCIDs except globals. */
+static inline void invpcid_flush_all_nonglobals(void)
+{
+	__invpcid(0, 0, INVPCID_TYPE_ALL_NON_GLOBAL);
+}
+
 #ifdef CONFIG_PARAVIRT
 #include <asm/paravirt.h>
 #else
@@ -104,6 +153,15 @@
 {
 	unsigned long flags;
 
+	if (static_cpu_has(X86_FEATURE_INVPCID)) {
+		/*
+		 * Using INVPCID is considerably faster than a pair of writes
+		 * to CR4 sandwiched inside an IRQ flag save/restore.
+		 */
+		invpcid_flush_all();
+		return;
+	}
+
 	/*
 	 * Read-modify-write to CR4 - protect it from preemption and
 	 * from interrupts. (Use the raw variant because this code can
diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h
index 6d7c547..174c421 100644
--- a/arch/x86/include/asm/tsc.h
+++ b/arch/x86/include/asm/tsc.h
@@ -29,6 +29,8 @@
 	return rdtsc();
 }
 
+extern struct system_counterval_t convert_art_to_tsc(cycle_t art);
+
 extern void tsc_init(void);
 extern void mark_tsc_unstable(char *reason);
 extern int unsynchronized_tsc(void);
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index b89c34c..3076986 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -8,7 +8,7 @@
 #include <linux/errno.h>
 #include <linux/lockdep.h>
 #include <asm/alternative.h>
-#include <asm/cpufeature.h>
+#include <asm/cpufeatures.h>
 #include <asm/page.h>
 
 /*
diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
index deabaf9..43dc55b 100644
--- a/arch/x86/include/asm/vdso.h
+++ b/arch/x86/include/asm/vdso.h
@@ -13,9 +13,6 @@
 	void *data;
 	unsigned long size;   /* Always a multiple of PAGE_SIZE */
 
-	/* text_mapping.pages is big enough for data/size page pointers */
-	struct vm_special_mapping text_mapping;
-
 	unsigned long alt, alt_len;
 
 	long sym_vvar_start;  /* Negative offset to the vvar area */
diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h
index f556c48..e728699 100644
--- a/arch/x86/include/asm/vgtod.h
+++ b/arch/x86/include/asm/vgtod.h
@@ -37,6 +37,12 @@
 };
 extern struct vsyscall_gtod_data vsyscall_gtod_data;
 
+extern int vclocks_used;
+static inline bool vclock_was_used(int vclock)
+{
+	return READ_ONCE(vclocks_used) & (1 << vclock);
+}
+
 static inline unsigned gtod_read_begin(const struct vsyscall_gtod_data *s)
 {
 	unsigned ret;
diff --git a/arch/x86/include/uapi/asm/hyperv.h b/arch/x86/include/uapi/asm/hyperv.h
index 7956412..9b1a918 100644
--- a/arch/x86/include/uapi/asm/hyperv.h
+++ b/arch/x86/include/uapi/asm/hyperv.h
@@ -226,7 +226,9 @@
 		(~((1ull << HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT) - 1))
 
 /* Declare the various hypercall operations. */
-#define HV_X64_HV_NOTIFY_LONG_SPIN_WAIT		0x0008
+#define HVCALL_NOTIFY_LONG_SPIN_WAIT		0x0008
+#define HVCALL_POST_MESSAGE			0x005c
+#define HVCALL_SIGNAL_EVENT			0x005d
 
 #define HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE		0x00000001
 #define HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT	12
diff --git a/arch/x86/include/uapi/asm/sigcontext.h b/arch/x86/include/uapi/asm/sigcontext.h
index d485232..62d4111 100644
--- a/arch/x86/include/uapi/asm/sigcontext.h
+++ b/arch/x86/include/uapi/asm/sigcontext.h
@@ -256,7 +256,7 @@
 	__u16				cs;
 	__u16				gs;
 	__u16				fs;
-	__u16				__pad0;
+	__u16				ss;
 	__u64				err;
 	__u64				trapno;
 	__u64				oldmask;
@@ -341,9 +341,37 @@
 	__u64				rip;
 	__u64				eflags;		/* RFLAGS */
 	__u16				cs;
+
+	/*
+	 * Prior to 2.5.64 ("[PATCH] x86-64 updates for 2.5.64-bk3"),
+	 * Linux saved and restored fs and gs in these slots.  This
+	 * was counterproductive, as fsbase and gsbase were never
+	 * saved, so arch_prctl was presumably unreliable.
+	 *
+	 * These slots should never be reused without extreme caution:
+	 *
+	 *  - Some DOSEMU versions stash fs and gs in these slots manually,
+	 *    thus overwriting anything the kernel expects to be preserved
+	 *    in these slots.
+	 *
+	 *  - If these slots are ever needed for any other purpose,
+	 *    there is some risk that very old 64-bit binaries could get
+	 *    confused.  I doubt that many such binaries still work,
+	 *    though, since the same patch in 2.5.64 also removed the
+	 *    64-bit set_thread_area syscall, so it appears that there
+	 *    is no TLS API beyond modify_ldt that works in both pre-
+	 *    and post-2.5.64 kernels.
+	 *
+	 * If the kernel ever adds explicit fs, gs, fsbase, and gsbase
+	 * save/restore, it will most likely need to be opt-in and use
+	 * different context slots.
+	 */
 	__u16				gs;
 	__u16				fs;
-	__u16				__pad0;
+	union {
+		__u16			ss;	/* If UC_SIGCONTEXT_SS */
+		__u16			__pad0;	/* Alias name for old (!UC_SIGCONTEXT_SS) user-space */
+	};
 	__u64				err;
 	__u64				trapno;
 	__u64				oldmask;
diff --git a/arch/x86/include/uapi/asm/ucontext.h b/arch/x86/include/uapi/asm/ucontext.h
index b7c29c8..e3d1ec9 100644
--- a/arch/x86/include/uapi/asm/ucontext.h
+++ b/arch/x86/include/uapi/asm/ucontext.h
@@ -1,11 +1,54 @@
 #ifndef _ASM_X86_UCONTEXT_H
 #define _ASM_X86_UCONTEXT_H
 
-#define UC_FP_XSTATE	0x1	/* indicates the presence of extended state
-				 * information in the memory layout pointed
-				 * by the fpstate pointer in the ucontext's
-				 * sigcontext struct (uc_mcontext).
-				 */
+/*
+ * Indicates the presence of extended state information in the memory
+ * layout pointed by the fpstate pointer in the ucontext's sigcontext
+ * struct (uc_mcontext).
+ */
+#define UC_FP_XSTATE	0x1
+
+#ifdef __x86_64__
+/*
+ * UC_SIGCONTEXT_SS will be set when delivering 64-bit or x32 signals on
+ * kernels that save SS in the sigcontext.  All kernels that set
+ * UC_SIGCONTEXT_SS will correctly restore at least the low 32 bits of esp
+ * regardless of SS (i.e. they implement espfix).
+ *
+ * Kernels that set UC_SIGCONTEXT_SS will also set UC_STRICT_RESTORE_SS
+ * when delivering a signal that came from 64-bit code.
+ *
+ * Sigreturn restores SS as follows:
+ *
+ * if (saved SS is valid || UC_STRICT_RESTORE_SS is set ||
+ *     saved CS is not 64-bit)
+ *         new SS = saved SS  (will fail IRET and signal if invalid)
+ * else
+ *         new SS = a flat 32-bit data segment
+ *
+ * This behavior serves three purposes:
+ *
+ * - Legacy programs that construct a 64-bit sigcontext from scratch
+ *   with zero or garbage in the SS slot (e.g. old CRIU) and call
+ *   sigreturn will still work.
+ *
+ * - Old DOSEMU versions sometimes catch a signal from a segmented
+ *   context, delete the old SS segment (with modify_ldt), and change
+ *   the saved CS to a 64-bit segment.  These DOSEMU versions expect
+ *   sigreturn to send them back to 64-bit mode without killing them,
+ *   despite the fact that the SS selector when the signal was raised is
+ *   no longer valid.  UC_STRICT_RESTORE_SS will be clear, so the kernel
+ *   will fix up SS for these DOSEMU versions.
+ *
+ * - Old and new programs that catch a signal and return without
+ *   modifying the saved context will end up in exactly the state they
+ *   started in, even if they were running in a segmented context when
+ *   the signal was raised..  Old kernels would lose track of the
+ *   previous SS value.
+ */
+#define UC_SIGCONTEXT_SS	0x2
+#define UC_STRICT_RESTORE_SS	0x4
+#endif
 
 #include <asm-generic/ucontext.h>
 
diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
index 9968f30..76f89e2 100644
--- a/arch/x86/kernel/apic/apic_flat_64.c
+++ b/arch/x86/kernel/apic/apic_flat_64.c
@@ -53,7 +53,7 @@
 	apic_write(APIC_LDR, val);
 }
 
-static inline void _flat_send_IPI_mask(unsigned long mask, int vector)
+static void _flat_send_IPI_mask(unsigned long mask, int vector)
 {
 	unsigned long flags;
 
diff --git a/arch/x86/kernel/apic/apic_numachip.c b/arch/x86/kernel/apic/apic_numachip.c
index c80c02c..ab5c2c6 100644
--- a/arch/x86/kernel/apic/apic_numachip.c
+++ b/arch/x86/kernel/apic/apic_numachip.c
@@ -30,7 +30,7 @@
 	unsigned long value;
 	unsigned int id = (x >> 24) & 0xff;
 
-	if (static_cpu_has_safe(X86_FEATURE_NODEID_MSR)) {
+	if (static_cpu_has(X86_FEATURE_NODEID_MSR)) {
 		rdmsrl(MSR_FAM10H_NODE_ID, value);
 		id |= (value << 2) & 0xff00;
 	}
@@ -178,7 +178,7 @@
 	this_cpu_write(cpu_llc_id, node);
 
 	/* Account for nodes per socket in multi-core-module processors */
-	if (static_cpu_has_safe(X86_FEATURE_NODEID_MSR)) {
+	if (static_cpu_has(X86_FEATURE_NODEID_MSR)) {
 		rdmsrl(MSR_FAM10H_NODE_ID, val);
 		nodes = ((val >> 3) & 7) + 1;
 	}
diff --git a/arch/x86/kernel/apic/ipi.c b/arch/x86/kernel/apic/ipi.c
index eb45fc9..28bde88 100644
--- a/arch/x86/kernel/apic/ipi.c
+++ b/arch/x86/kernel/apic/ipi.c
@@ -18,6 +18,66 @@
 #include <asm/proto.h>
 #include <asm/ipi.h>
 
+void __default_send_IPI_shortcut(unsigned int shortcut, int vector, unsigned int dest)
+{
+	/*
+	 * Subtle. In the case of the 'never do double writes' workaround
+	 * we have to lock out interrupts to be safe.  As we don't care
+	 * of the value read we use an atomic rmw access to avoid costly
+	 * cli/sti.  Otherwise we use an even cheaper single atomic write
+	 * to the APIC.
+	 */
+	unsigned int cfg;
+
+	/*
+	 * Wait for idle.
+	 */
+	__xapic_wait_icr_idle();
+
+	/*
+	 * No need to touch the target chip field
+	 */
+	cfg = __prepare_ICR(shortcut, vector, dest);
+
+	/*
+	 * Send the IPI. The write to APIC_ICR fires this off.
+	 */
+	native_apic_mem_write(APIC_ICR, cfg);
+}
+
+/*
+ * This is used to send an IPI with no shorthand notation (the destination is
+ * specified in bits 56 to 63 of the ICR).
+ */
+void __default_send_IPI_dest_field(unsigned int mask, int vector, unsigned int dest)
+{
+	unsigned long cfg;
+
+	/*
+	 * Wait for idle.
+	 */
+	if (unlikely(vector == NMI_VECTOR))
+		safe_apic_wait_icr_idle();
+	else
+		__xapic_wait_icr_idle();
+
+	/*
+	 * prepare target chip field
+	 */
+	cfg = __prepare_ICR2(mask);
+	native_apic_mem_write(APIC_ICR2, cfg);
+
+	/*
+	 * program the ICR
+	 */
+	cfg = __prepare_ICR(0, vector, dest);
+
+	/*
+	 * Send the IPI. The write to APIC_ICR fires this off.
+	 */
+	native_apic_mem_write(APIC_ICR, cfg);
+}
+
 void default_send_IPI_single_phys(int cpu, int vector)
 {
 	unsigned long flags;
diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
index 84a7524..5c04246 100644
--- a/arch/x86/kernel/asm-offsets.c
+++ b/arch/x86/kernel/asm-offsets.c
@@ -59,7 +59,6 @@
 
 #ifdef CONFIG_PARAVIRT
 	BLANK();
-	OFFSET(PARAVIRT_enabled, pv_info, paravirt_enabled);
 	OFFSET(PARAVIRT_PATCH_pv_cpu_ops, paravirt_patch_template, pv_cpu_ops);
 	OFFSET(PARAVIRT_PATCH_pv_irq_ops, paravirt_patch_template, pv_irq_ops);
 	OFFSET(PV_IRQ_irq_disable, pv_irq_ops, irq_disable);
diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c
index 6ce3902..ecdc1d2 100644
--- a/arch/x86/kernel/asm-offsets_32.c
+++ b/arch/x86/kernel/asm-offsets_32.c
@@ -7,7 +7,7 @@
 #include <linux/lguest.h>
 #include "../../../drivers/lguest/lg.h"
 
-#define __SYSCALL_I386(nr, sym, compat) [nr] = 1,
+#define __SYSCALL_I386(nr, sym, qual) [nr] = 1,
 static char syscalls[] = {
 #include <asm/syscalls_32.h>
 };
@@ -52,6 +52,11 @@
 	DEFINE(TSS_sysenter_sp0, offsetof(struct tss_struct, x86_tss.sp0) -
 	       offsetofend(struct tss_struct, SYSENTER_stack));
 
+	/* Offset from cpu_tss to SYSENTER_stack */
+	OFFSET(CPU_TSS_SYSENTER_stack, tss_struct, SYSENTER_stack);
+	/* Size of SYSENTER_stack */
+	DEFINE(SIZEOF_SYSENTER_stack, sizeof(((struct tss_struct *)0)->SYSENTER_stack));
+
 #if defined(CONFIG_LGUEST) || defined(CONFIG_LGUEST_GUEST) || defined(CONFIG_LGUEST_MODULE)
 	BLANK();
 	OFFSET(LGUEST_DATA_irq_enabled, lguest_data, irq_enabled);
diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
index f2edafb..d875f97 100644
--- a/arch/x86/kernel/asm-offsets_64.c
+++ b/arch/x86/kernel/asm-offsets_64.c
@@ -4,17 +4,11 @@
 
 #include <asm/ia32.h>
 
-#define __SYSCALL_64(nr, sym, compat) [nr] = 1,
-#define __SYSCALL_COMMON(nr, sym, compat) [nr] = 1,
-#ifdef CONFIG_X86_X32_ABI
-# define __SYSCALL_X32(nr, sym, compat) [nr] = 1,
-#else
-# define __SYSCALL_X32(nr, sym, compat) /* nothing */
-#endif
+#define __SYSCALL_64(nr, sym, qual) [nr] = 1,
 static char syscalls_64[] = {
 #include <asm/syscalls_64.h>
 };
-#define __SYSCALL_I386(nr, sym, compat) [nr] = 1,
+#define __SYSCALL_I386(nr, sym, qual) [nr] = 1,
 static char syscalls_ia32[] = {
 #include <asm/syscalls_32.h>
 };
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index 7a60424..0d373d7 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -42,7 +42,7 @@
 quiet_cmd_mkcapflags = MKCAP   $@
       cmd_mkcapflags = $(CONFIG_SHELL) $(srctree)/$(src)/mkcapflags.sh $< $@
 
-cpufeature = $(src)/../../include/asm/cpufeature.h
+cpufeature = $(src)/../../include/asm/cpufeatures.h
 
 targets += capflags.c
 $(obj)/capflags.c: $(cpufeature) $(src)/mkcapflags.sh FORCE
diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c
index ce197bb..1661d8e 100644
--- a/arch/x86/kernel/cpu/centaur.c
+++ b/arch/x86/kernel/cpu/centaur.c
@@ -1,7 +1,7 @@
 #include <linux/bitops.h>
 #include <linux/kernel.h>
 
-#include <asm/processor.h>
+#include <asm/cpufeature.h>
 #include <asm/e820.h>
 #include <asm/mtrr.h>
 #include <asm/msr.h>
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 81cf716..249461f 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -162,6 +162,22 @@
 }
 __setup("nompx", x86_mpx_setup);
 
+static int __init x86_noinvpcid_setup(char *s)
+{
+	/* noinvpcid doesn't accept parameters */
+	if (s)
+		return -EINVAL;
+
+	/* do not emit a message if the feature is not present */
+	if (!boot_cpu_has(X86_FEATURE_INVPCID))
+		return 0;
+
+	setup_clear_cpu_cap(X86_FEATURE_INVPCID);
+	pr_info("noinvpcid: INVPCID feature disabled\n");
+	return 0;
+}
+early_param("noinvpcid", x86_noinvpcid_setup);
+
 #ifdef CONFIG_X86_32
 static int cachesize_override = -1;
 static int disable_x86_serial_nr = 1;
@@ -801,6 +817,31 @@
 #else
 	set_cpu_cap(c, X86_FEATURE_NOPL);
 #endif
+
+	/*
+	 * ESPFIX is a strange bug.  All real CPUs have it.  Paravirt
+	 * systems that run Linux at CPL > 0 may or may not have the
+	 * issue, but, even if they have the issue, there's absolutely
+	 * nothing we can do about it because we can't use the real IRET
+	 * instruction.
+	 *
+	 * NB: For the time being, only 32-bit kernels support
+	 * X86_BUG_ESPFIX as such.  64-bit kernels directly choose
+	 * whether to apply espfix using paravirt hooks.  If any
+	 * non-paravirt system ever shows up that does *not* have the
+	 * ESPFIX issue, we can change this.
+	 */
+#ifdef CONFIG_X86_32
+#ifdef CONFIG_PARAVIRT
+	do {
+		extern void native_iret(void);
+		if (pv_cpu_ops.iret == native_iret)
+			set_cpu_bug(c, X86_BUG_ESPFIX);
+	} while (0);
+#else
+	set_cpu_bug(c, X86_BUG_ESPFIX);
+#endif
+#endif
 }
 
 static void generic_identify(struct cpuinfo_x86 *c)
@@ -1475,20 +1516,6 @@
 }
 #endif
 
-#ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
-void warn_pre_alternatives(void)
-{
-	WARN(1, "You're using static_cpu_has before alternatives have run!\n");
-}
-EXPORT_SYMBOL_GPL(warn_pre_alternatives);
-#endif
-
-inline bool __static_cpu_has_safe(u16 bit)
-{
-	return boot_cpu_has(bit);
-}
-EXPORT_SYMBOL_GPL(__static_cpu_has_safe);
-
 static void bsp_resume(void)
 {
 	if (this_cpu->c_bsp_resume)
diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c
index 187bb58..6adef9c 100644
--- a/arch/x86/kernel/cpu/cyrix.c
+++ b/arch/x86/kernel/cpu/cyrix.c
@@ -8,6 +8,7 @@
 #include <linux/timer.h>
 #include <asm/pci-direct.h>
 #include <asm/tsc.h>
+#include <asm/cpufeature.h>
 
 #include "cpu.h"
 
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 38766c2..1f7fdb9 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -8,7 +8,7 @@
 #include <linux/module.h>
 #include <linux/uaccess.h>
 
-#include <asm/processor.h>
+#include <asm/cpufeature.h>
 #include <asm/pgtable.h>
 #include <asm/msr.h>
 #include <asm/bugs.h>
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 6ed779e..de6626c 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -14,7 +14,7 @@
 #include <linux/sysfs.h>
 #include <linux/pci.h>
 
-#include <asm/processor.h>
+#include <asm/cpufeature.h>
 #include <asm/amd_nb.h>
 #include <asm/smp.h>
 
diff --git a/arch/x86/kernel/cpu/match.c b/arch/x86/kernel/cpu/match.c
index afa9f0d..fbb5e90 100644
--- a/arch/x86/kernel/cpu/match.c
+++ b/arch/x86/kernel/cpu/match.c
@@ -1,5 +1,5 @@
 #include <asm/cpu_device_id.h>
-#include <asm/processor.h>
+#include <asm/cpufeature.h>
 #include <linux/cpu.h>
 #include <linux/module.h>
 #include <linux/slab.h>
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 524f2a8..f0c921b 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -1578,6 +1578,17 @@
 
 		if (c->x86 == 6 && c->x86_model == 45)
 			quirk_no_way_out = quirk_sandybridge_ifu;
+		/*
+		 * MCG_CAP.MCG_SER_P is necessary but not sufficient to know
+		 * whether this processor will actually generate recoverable
+		 * machine checks. Check to see if this is an E7 model Xeon.
+		 * We can't do a model number check because E5 and E7 use the
+		 * same model number. E5 doesn't support recovery, E7 does.
+		 */
+		if (mca_cfg.recovery || (mca_cfg.ser &&
+			!strncmp(c->x86_model_id,
+				 "Intel(R) Xeon(R) CPU E7-", 24)))
+			set_cpu_cap(c, X86_FEATURE_MCE_RECOVERY);
 	}
 	if (cfg->monarch_timeout < 0)
 		cfg->monarch_timeout = 0;
@@ -2030,6 +2041,8 @@
 		cfg->bootlog = (str[0] == 'b');
 	else if (!strcmp(str, "bios_cmci_threshold"))
 		cfg->bios_cmci_threshold = true;
+	else if (!strcmp(str, "recovery"))
+		cfg->recovery = true;
 	else if (isdigit(str[0])) {
 		if (get_option(&str, &cfg->tolerant) == 2)
 			get_option(&str, &(cfg->monarch_timeout));
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
index 75d3aab..8581963 100644
--- a/arch/x86/kernel/cpu/microcode/amd.c
+++ b/arch/x86/kernel/cpu/microcode/amd.c
@@ -431,10 +431,6 @@
 	else
 		container = cont_va;
 
-	if (ucode_new_rev)
-		pr_info("microcode: updated early to new patch_level=0x%08x\n",
-			ucode_new_rev);
-
 	eax   = cpuid_eax(0x00000001);
 	eax   = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);
 
@@ -469,8 +465,7 @@
 	if (mc && rev < mc->hdr.patch_id) {
 		if (!__apply_microcode_amd(mc)) {
 			ucode_new_rev = mc->hdr.patch_id;
-			pr_info("microcode: reload patch_level=0x%08x\n",
-				ucode_new_rev);
+			pr_info("reload patch_level=0x%08x\n", ucode_new_rev);
 		}
 	}
 }
@@ -793,15 +788,13 @@
 		return -EINVAL;
 	}
 
-	patch->data = kzalloc(patch_size, GFP_KERNEL);
+	patch->data = kmemdup(fw + SECTION_HDR_SIZE, patch_size, GFP_KERNEL);
 	if (!patch->data) {
 		pr_err("Patch data allocation failure.\n");
 		kfree(patch);
 		return -EINVAL;
 	}
 
-	/* All looks ok, copy patch... */
-	memcpy(patch->data, fw + SECTION_HDR_SIZE, patch_size);
 	INIT_LIST_HEAD(&patch->plist);
 	patch->patch_id  = mc_hdr->patch_id;
 	patch->equiv_cpu = proc_id;
@@ -957,6 +950,10 @@
 		return NULL;
 	}
 
+	if (ucode_new_rev)
+		pr_info_once("microcode updated early to new patch_level=0x%08x\n",
+			     ucode_new_rev);
+
 	return &microcode_amd_ops;
 }
 
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index faec712..ac360bf 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -43,16 +43,8 @@
 #define MICROCODE_VERSION	"2.01"
 
 static struct microcode_ops	*microcode_ops;
-
 static bool dis_ucode_ldr;
 
-static int __init disable_loader(char *str)
-{
-	dis_ucode_ldr = true;
-	return 1;
-}
-__setup("dis_ucode_ldr", disable_loader);
-
 /*
  * Synchronization.
  *
@@ -81,15 +73,16 @@
 
 static bool __init check_loader_disabled_bsp(void)
 {
+	static const char *__dis_opt_str = "dis_ucode_ldr";
+
 #ifdef CONFIG_X86_32
 	const char *cmdline = (const char *)__pa_nodebug(boot_command_line);
-	const char *opt	    = "dis_ucode_ldr";
-	const char *option  = (const char *)__pa_nodebug(opt);
+	const char *option  = (const char *)__pa_nodebug(__dis_opt_str);
 	bool *res = (bool *)__pa_nodebug(&dis_ucode_ldr);
 
 #else /* CONFIG_X86_64 */
 	const char *cmdline = boot_command_line;
-	const char *option  = "dis_ucode_ldr";
+	const char *option  = __dis_opt_str;
 	bool *res = &dis_ucode_ldr;
 #endif
 
@@ -479,7 +472,7 @@
 	enum ucode_state ustate;
 	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
 
-	if (uci && uci->valid)
+	if (uci->valid)
 		return UCODE_OK;
 
 	if (collect_cpu_info(cpu))
@@ -630,7 +623,7 @@
 	struct cpuinfo_x86 *c = &boot_cpu_data;
 	int error;
 
-	if (paravirt_enabled() || dis_ucode_ldr)
+	if (dis_ucode_ldr)
 		return -EINVAL;
 
 	if (c->x86_vendor == X86_VENDOR_INTEL)
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index ee81c54..cbb3cf0 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -39,9 +39,15 @@
 #include <asm/setup.h>
 #include <asm/msr.h>
 
-static unsigned long mc_saved_in_initrd[MAX_UCODE_COUNT];
+/*
+ * Temporary microcode blobs pointers storage. We note here the pointers to
+ * microcode blobs we've got from whatever storage (detached initrd, builtin).
+ * Later on, we put those into final storage mc_saved_data.mc_saved.
+ */
+static unsigned long mc_tmp_ptrs[MAX_UCODE_COUNT];
+
 static struct mc_saved_data {
-	unsigned int mc_saved_count;
+	unsigned int num_saved;
 	struct microcode_intel **mc_saved;
 } mc_saved_data;
 
@@ -78,53 +84,50 @@
 }
 
 static inline void
-copy_initrd_ptrs(struct microcode_intel **mc_saved, unsigned long *initrd,
-		  unsigned long off, int num_saved)
+copy_ptrs(struct microcode_intel **mc_saved, unsigned long *mc_ptrs,
+	  unsigned long off, int num_saved)
 {
 	int i;
 
 	for (i = 0; i < num_saved; i++)
-		mc_saved[i] = (struct microcode_intel *)(initrd[i] + off);
+		mc_saved[i] = (struct microcode_intel *)(mc_ptrs[i] + off);
 }
 
 #ifdef CONFIG_X86_32
 static void
-microcode_phys(struct microcode_intel **mc_saved_tmp,
-	       struct mc_saved_data *mc_saved_data)
+microcode_phys(struct microcode_intel **mc_saved_tmp, struct mc_saved_data *mcs)
 {
 	int i;
 	struct microcode_intel ***mc_saved;
 
-	mc_saved = (struct microcode_intel ***)
-		   __pa_nodebug(&mc_saved_data->mc_saved);
-	for (i = 0; i < mc_saved_data->mc_saved_count; i++) {
+	mc_saved = (struct microcode_intel ***)__pa_nodebug(&mcs->mc_saved);
+
+	for (i = 0; i < mcs->num_saved; i++) {
 		struct microcode_intel *p;
 
-		p = *(struct microcode_intel **)
-			__pa_nodebug(mc_saved_data->mc_saved + i);
+		p = *(struct microcode_intel **)__pa_nodebug(mcs->mc_saved + i);
 		mc_saved_tmp[i] = (struct microcode_intel *)__pa_nodebug(p);
 	}
 }
 #endif
 
 static enum ucode_state
-load_microcode(struct mc_saved_data *mc_saved_data, unsigned long *initrd,
-	       unsigned long initrd_start, struct ucode_cpu_info *uci)
+load_microcode(struct mc_saved_data *mcs, unsigned long *mc_ptrs,
+	       unsigned long offset, struct ucode_cpu_info *uci)
 {
 	struct microcode_intel *mc_saved_tmp[MAX_UCODE_COUNT];
-	unsigned int count = mc_saved_data->mc_saved_count;
+	unsigned int count = mcs->num_saved;
 
-	if (!mc_saved_data->mc_saved) {
-		copy_initrd_ptrs(mc_saved_tmp, initrd, initrd_start, count);
+	if (!mcs->mc_saved) {
+		copy_ptrs(mc_saved_tmp, mc_ptrs, offset, count);
 
 		return load_microcode_early(mc_saved_tmp, count, uci);
 	} else {
 #ifdef CONFIG_X86_32
-		microcode_phys(mc_saved_tmp, mc_saved_data);
+		microcode_phys(mc_saved_tmp, mcs);
 		return load_microcode_early(mc_saved_tmp, count, uci);
 #else
-		return load_microcode_early(mc_saved_data->mc_saved,
-						    count, uci);
+		return load_microcode_early(mcs->mc_saved, count, uci);
 #endif
 	}
 }
@@ -175,25 +178,25 @@
 }
 
 static int
-save_microcode(struct mc_saved_data *mc_saved_data,
+save_microcode(struct mc_saved_data *mcs,
 	       struct microcode_intel **mc_saved_src,
-	       unsigned int mc_saved_count)
+	       unsigned int num_saved)
 {
 	int i, j;
 	struct microcode_intel **saved_ptr;
 	int ret;
 
-	if (!mc_saved_count)
+	if (!num_saved)
 		return -EINVAL;
 
 	/*
 	 * Copy new microcode data.
 	 */
-	saved_ptr = kcalloc(mc_saved_count, sizeof(struct microcode_intel *), GFP_KERNEL);
+	saved_ptr = kcalloc(num_saved, sizeof(struct microcode_intel *), GFP_KERNEL);
 	if (!saved_ptr)
 		return -ENOMEM;
 
-	for (i = 0; i < mc_saved_count; i++) {
+	for (i = 0; i < num_saved; i++) {
 		struct microcode_header_intel *mc_hdr;
 		struct microcode_intel *mc;
 		unsigned long size;
@@ -207,20 +210,18 @@
 		mc_hdr = &mc->hdr;
 		size   = get_totalsize(mc_hdr);
 
-		saved_ptr[i] = kmalloc(size, GFP_KERNEL);
+		saved_ptr[i] = kmemdup(mc, size, GFP_KERNEL);
 		if (!saved_ptr[i]) {
 			ret = -ENOMEM;
 			goto err;
 		}
-
-		memcpy(saved_ptr[i], mc, size);
 	}
 
 	/*
 	 * Point to newly saved microcode.
 	 */
-	mc_saved_data->mc_saved = saved_ptr;
-	mc_saved_data->mc_saved_count = mc_saved_count;
+	mcs->mc_saved  = saved_ptr;
+	mcs->num_saved = num_saved;
 
 	return 0;
 
@@ -284,22 +285,20 @@
  * BSP can stay in the platform.
  */
 static enum ucode_state __init
-get_matching_model_microcode(int cpu, unsigned long start,
-			     void *data, size_t size,
-			     struct mc_saved_data *mc_saved_data,
-			     unsigned long *mc_saved_in_initrd,
+get_matching_model_microcode(unsigned long start, void *data, size_t size,
+			     struct mc_saved_data *mcs, unsigned long *mc_ptrs,
 			     struct ucode_cpu_info *uci)
 {
-	u8 *ucode_ptr = data;
-	unsigned int leftover = size;
-	enum ucode_state state = UCODE_OK;
-	unsigned int mc_size;
-	struct microcode_header_intel *mc_header;
 	struct microcode_intel *mc_saved_tmp[MAX_UCODE_COUNT];
-	unsigned int mc_saved_count = mc_saved_data->mc_saved_count;
+	struct microcode_header_intel *mc_header;
+	unsigned int num_saved = mcs->num_saved;
+	enum ucode_state state = UCODE_OK;
+	unsigned int leftover = size;
+	u8 *ucode_ptr = data;
+	unsigned int mc_size;
 	int i;
 
-	while (leftover && mc_saved_count < ARRAY_SIZE(mc_saved_tmp)) {
+	while (leftover && num_saved < ARRAY_SIZE(mc_saved_tmp)) {
 
 		if (leftover < sizeof(mc_header))
 			break;
@@ -318,32 +317,31 @@
 		 * the platform, we need to find and save microcode patches
 		 * with the same family and model as the BSP.
 		 */
-		if (matching_model_microcode(mc_header, uci->cpu_sig.sig) !=
-			 UCODE_OK) {
+		if (matching_model_microcode(mc_header, uci->cpu_sig.sig) != UCODE_OK) {
 			ucode_ptr += mc_size;
 			continue;
 		}
 
-		mc_saved_count = _save_mc(mc_saved_tmp, ucode_ptr, mc_saved_count);
+		num_saved = _save_mc(mc_saved_tmp, ucode_ptr, num_saved);
 
 		ucode_ptr += mc_size;
 	}
 
 	if (leftover) {
 		state = UCODE_ERROR;
-		goto out;
+		return state;
 	}
 
-	if (mc_saved_count == 0) {
+	if (!num_saved) {
 		state = UCODE_NFOUND;
-		goto out;
+		return state;
 	}
 
-	for (i = 0; i < mc_saved_count; i++)
-		mc_saved_in_initrd[i] = (unsigned long)mc_saved_tmp[i] - start;
+	for (i = 0; i < num_saved; i++)
+		mc_ptrs[i] = (unsigned long)mc_saved_tmp[i] - start;
 
-	mc_saved_data->mc_saved_count = mc_saved_count;
-out:
+	mcs->num_saved = num_saved;
+
 	return state;
 }
 
@@ -373,7 +371,7 @@
 		native_rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]);
 		csig.pf = 1 << ((val[1] >> 18) & 7);
 	}
-	native_wrmsr(MSR_IA32_UCODE_REV, 0, 0);
+	native_wrmsrl(MSR_IA32_UCODE_REV, 0);
 
 	/* As documented in the SDM: Do a CPUID 1 here */
 	sync_core();
@@ -396,11 +394,11 @@
 	unsigned int sig, pf, rev, total_size, data_size, date;
 	struct ucode_cpu_info uci;
 
-	if (mc_saved_data.mc_saved_count == 0) {
+	if (!mc_saved_data.num_saved) {
 		pr_debug("no microcode data saved.\n");
 		return;
 	}
-	pr_debug("Total microcode saved: %d\n", mc_saved_data.mc_saved_count);
+	pr_debug("Total microcode saved: %d\n", mc_saved_data.num_saved);
 
 	collect_cpu_info_early(&uci);
 
@@ -409,7 +407,7 @@
 	rev = uci.cpu_sig.rev;
 	pr_debug("CPU: sig=0x%x, pf=0x%x, rev=0x%x\n", sig, pf, rev);
 
-	for (i = 0; i < mc_saved_data.mc_saved_count; i++) {
+	for (i = 0; i < mc_saved_data.num_saved; i++) {
 		struct microcode_header_intel *mc_saved_header;
 		struct extended_sigtable *ext_header;
 		int ext_sigcount;
@@ -465,7 +463,7 @@
 {
 	struct microcode_intel *mc_saved_tmp[MAX_UCODE_COUNT];
 	unsigned int mc_saved_count_init;
-	unsigned int mc_saved_count;
+	unsigned int num_saved;
 	struct microcode_intel **mc_saved;
 	int ret = 0;
 	int i;
@@ -476,23 +474,23 @@
 	 */
 	mutex_lock(&x86_cpu_microcode_mutex);
 
-	mc_saved_count_init = mc_saved_data.mc_saved_count;
-	mc_saved_count = mc_saved_data.mc_saved_count;
+	mc_saved_count_init = mc_saved_data.num_saved;
+	num_saved = mc_saved_data.num_saved;
 	mc_saved = mc_saved_data.mc_saved;
 
-	if (mc_saved && mc_saved_count)
+	if (mc_saved && num_saved)
 		memcpy(mc_saved_tmp, mc_saved,
-		       mc_saved_count * sizeof(struct microcode_intel *));
+		       num_saved * sizeof(struct microcode_intel *));
 	/*
 	 * Save the microcode patch mc in mc_save_tmp structure if it's a newer
 	 * version.
 	 */
-	mc_saved_count = _save_mc(mc_saved_tmp, mc, mc_saved_count);
+	num_saved = _save_mc(mc_saved_tmp, mc, num_saved);
 
 	/*
 	 * Save the mc_save_tmp in global mc_saved_data.
 	 */
-	ret = save_microcode(&mc_saved_data, mc_saved_tmp, mc_saved_count);
+	ret = save_microcode(&mc_saved_data, mc_saved_tmp, num_saved);
 	if (ret) {
 		pr_err("Cannot save microcode patch.\n");
 		goto out;
@@ -536,7 +534,7 @@
 
 static __initdata char ucode_name[] = "kernel/x86/microcode/GenuineIntel.bin";
 static __init enum ucode_state
-scan_microcode(struct mc_saved_data *mc_saved_data, unsigned long *initrd,
+scan_microcode(struct mc_saved_data *mcs, unsigned long *mc_ptrs,
 	       unsigned long start, unsigned long size,
 	       struct ucode_cpu_info *uci)
 {
@@ -551,14 +549,18 @@
 	cd.data = NULL;
 	cd.size = 0;
 
-	cd = find_cpio_data(p, (void *)start, size, &offset);
-	if (!cd.data) {
+	/* try built-in microcode if no initrd */
+	if (!size) {
 		if (!load_builtin_intel_microcode(&cd))
 			return UCODE_ERROR;
+	} else {
+		cd = find_cpio_data(p, (void *)start, size, &offset);
+		if (!cd.data)
+			return UCODE_ERROR;
 	}
 
-	return get_matching_model_microcode(0, start, cd.data, cd.size,
-					    mc_saved_data, initrd, uci);
+	return get_matching_model_microcode(start, cd.data, cd.size,
+					    mcs, mc_ptrs, uci);
 }
 
 /*
@@ -567,14 +569,11 @@
 static void
 print_ucode_info(struct ucode_cpu_info *uci, unsigned int date)
 {
-	int cpu = smp_processor_id();
-
-	pr_info("CPU%d microcode updated early to revision 0x%x, date = %04x-%02x-%02x\n",
-		cpu,
-		uci->cpu_sig.rev,
-		date & 0xffff,
-		date >> 24,
-		(date >> 16) & 0xff);
+	pr_info_once("microcode updated early to revision 0x%x, date = %04x-%02x-%02x\n",
+		     uci->cpu_sig.rev,
+		     date & 0xffff,
+		     date >> 24,
+		     (date >> 16) & 0xff);
 }
 
 #ifdef CONFIG_X86_32
@@ -603,19 +602,19 @@
  */
 static void print_ucode(struct ucode_cpu_info *uci)
 {
-	struct microcode_intel *mc_intel;
+	struct microcode_intel *mc;
 	int *delay_ucode_info_p;
 	int *current_mc_date_p;
 
-	mc_intel = uci->mc;
-	if (mc_intel == NULL)
+	mc = uci->mc;
+	if (!mc)
 		return;
 
 	delay_ucode_info_p = (int *)__pa_nodebug(&delay_ucode_info);
 	current_mc_date_p = (int *)__pa_nodebug(&current_mc_date);
 
 	*delay_ucode_info_p = 1;
-	*current_mc_date_p = mc_intel->hdr.date;
+	*current_mc_date_p = mc->hdr.date;
 }
 #else
 
@@ -630,37 +629,35 @@
 
 static inline void print_ucode(struct ucode_cpu_info *uci)
 {
-	struct microcode_intel *mc_intel;
+	struct microcode_intel *mc;
 
-	mc_intel = uci->mc;
-	if (mc_intel == NULL)
+	mc = uci->mc;
+	if (!mc)
 		return;
 
-	print_ucode_info(uci, mc_intel->hdr.date);
+	print_ucode_info(uci, mc->hdr.date);
 }
 #endif
 
 static int apply_microcode_early(struct ucode_cpu_info *uci, bool early)
 {
-	struct microcode_intel *mc_intel;
+	struct microcode_intel *mc;
 	unsigned int val[2];
 
-	mc_intel = uci->mc;
-	if (mc_intel == NULL)
+	mc = uci->mc;
+	if (!mc)
 		return 0;
 
 	/* write microcode via MSR 0x79 */
-	native_wrmsr(MSR_IA32_UCODE_WRITE,
-	      (unsigned long) mc_intel->bits,
-	      (unsigned long) mc_intel->bits >> 16 >> 16);
-	native_wrmsr(MSR_IA32_UCODE_REV, 0, 0);
+	native_wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
+	native_wrmsrl(MSR_IA32_UCODE_REV, 0);
 
 	/* As documented in the SDM: Do a CPUID 1 here */
 	sync_core();
 
 	/* get the current revision from MSR 0x8B */
 	native_rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]);
-	if (val[1] != mc_intel->hdr.rev)
+	if (val[1] != mc->hdr.rev)
 		return -1;
 
 #ifdef CONFIG_X86_64
@@ -672,25 +669,26 @@
 	if (early)
 		print_ucode(uci);
 	else
-		print_ucode_info(uci, mc_intel->hdr.date);
+		print_ucode_info(uci, mc->hdr.date);
 
 	return 0;
 }
 
 /*
  * This function converts microcode patch offsets previously stored in
- * mc_saved_in_initrd to pointers and stores the pointers in mc_saved_data.
+ * mc_tmp_ptrs to pointers and stores the pointers in mc_saved_data.
  */
 int __init save_microcode_in_initrd_intel(void)
 {
-	unsigned int count = mc_saved_data.mc_saved_count;
+	unsigned int count = mc_saved_data.num_saved;
 	struct microcode_intel *mc_saved[MAX_UCODE_COUNT];
 	int ret = 0;
 
-	if (count == 0)
+	if (!count)
 		return ret;
 
-	copy_initrd_ptrs(mc_saved, mc_saved_in_initrd, initrd_start, count);
+	copy_ptrs(mc_saved, mc_tmp_ptrs, get_initrd_start(), count);
+
 	ret = save_microcode(&mc_saved_data, mc_saved, count);
 	if (ret)
 		pr_err("Cannot save microcode patches from initrd.\n");
@@ -701,8 +699,7 @@
 }
 
 static void __init
-_load_ucode_intel_bsp(struct mc_saved_data *mc_saved_data,
-		      unsigned long *initrd,
+_load_ucode_intel_bsp(struct mc_saved_data *mcs, unsigned long *mc_ptrs,
 		      unsigned long start, unsigned long size)
 {
 	struct ucode_cpu_info uci;
@@ -710,11 +707,11 @@
 
 	collect_cpu_info_early(&uci);
 
-	ret = scan_microcode(mc_saved_data, initrd, start, size, &uci);
+	ret = scan_microcode(mcs, mc_ptrs, start, size, &uci);
 	if (ret != UCODE_OK)
 		return;
 
-	ret = load_microcode(mc_saved_data, initrd, start, &uci);
+	ret = load_microcode(mcs, mc_ptrs, start, &uci);
 	if (ret != UCODE_OK)
 		return;
 
@@ -728,53 +725,49 @@
 	struct boot_params *p;
 
 	p	= (struct boot_params *)__pa_nodebug(&boot_params);
-	start	= p->hdr.ramdisk_image;
 	size	= p->hdr.ramdisk_size;
 
-	_load_ucode_intel_bsp(
-			(struct mc_saved_data *)__pa_nodebug(&mc_saved_data),
-			(unsigned long *)__pa_nodebug(&mc_saved_in_initrd),
-			start, size);
-#else
-	start	= boot_params.hdr.ramdisk_image + PAGE_OFFSET;
-	size	= boot_params.hdr.ramdisk_size;
+	/*
+	 * Set start only if we have an initrd image. We cannot use initrd_start
+	 * because it is not set that early yet.
+	 */
+	start	= (size ? p->hdr.ramdisk_image : 0);
 
-	_load_ucode_intel_bsp(&mc_saved_data, mc_saved_in_initrd, start, size);
+	_load_ucode_intel_bsp((struct mc_saved_data *)__pa_nodebug(&mc_saved_data),
+			      (unsigned long *)__pa_nodebug(&mc_tmp_ptrs),
+			      start, size);
+#else
+	size	= boot_params.hdr.ramdisk_size;
+	start	= (size ? boot_params.hdr.ramdisk_image + PAGE_OFFSET : 0);
+
+	_load_ucode_intel_bsp(&mc_saved_data, mc_tmp_ptrs, start, size);
 #endif
 }
 
 void load_ucode_intel_ap(void)
 {
-	struct mc_saved_data *mc_saved_data_p;
+	unsigned long *mcs_tmp_p;
+	struct mc_saved_data *mcs_p;
 	struct ucode_cpu_info uci;
-	unsigned long *mc_saved_in_initrd_p;
-	unsigned long initrd_start_addr;
 	enum ucode_state ret;
 #ifdef CONFIG_X86_32
-	unsigned long *initrd_start_p;
 
-	mc_saved_in_initrd_p =
-		(unsigned long *)__pa_nodebug(mc_saved_in_initrd);
-	mc_saved_data_p = (struct mc_saved_data *)__pa_nodebug(&mc_saved_data);
-	initrd_start_p = (unsigned long *)__pa_nodebug(&initrd_start);
-	initrd_start_addr = (unsigned long)__pa_nodebug(*initrd_start_p);
+	mcs_tmp_p = (unsigned long *)__pa_nodebug(mc_tmp_ptrs);
+	mcs_p = (struct mc_saved_data *)__pa_nodebug(&mc_saved_data);
 #else
-	mc_saved_data_p = &mc_saved_data;
-	mc_saved_in_initrd_p = mc_saved_in_initrd;
-	initrd_start_addr = initrd_start;
+	mcs_tmp_p = mc_tmp_ptrs;
+	mcs_p = &mc_saved_data;
 #endif
 
 	/*
 	 * If there is no valid ucode previously saved in memory, no need to
 	 * update ucode on this AP.
 	 */
-	if (mc_saved_data_p->mc_saved_count == 0)
+	if (!mcs_p->num_saved)
 		return;
 
 	collect_cpu_info_early(&uci);
-	ret = load_microcode(mc_saved_data_p, mc_saved_in_initrd_p,
-			     initrd_start_addr, &uci);
-
+	ret = load_microcode(mcs_p, mcs_tmp_p, get_initrd_start_addr(), &uci);
 	if (ret != UCODE_OK)
 		return;
 
@@ -786,13 +779,13 @@
 	struct ucode_cpu_info uci;
 	enum ucode_state ret;
 
-	if (!mc_saved_data.mc_saved_count)
+	if (!mc_saved_data.num_saved)
 		return;
 
 	collect_cpu_info_early(&uci);
 
 	ret = load_microcode_early(mc_saved_data.mc_saved,
-				   mc_saved_data.mc_saved_count, &uci);
+				   mc_saved_data.num_saved, &uci);
 	if (ret != UCODE_OK)
 		return;
 
@@ -825,7 +818,7 @@
  * return 0 - no update found
  * return 1 - found update
  */
-static int get_matching_mc(struct microcode_intel *mc_intel, int cpu)
+static int get_matching_mc(struct microcode_intel *mc, int cpu)
 {
 	struct cpu_signature cpu_sig;
 	unsigned int csig, cpf, crev;
@@ -836,39 +829,36 @@
 	cpf = cpu_sig.pf;
 	crev = cpu_sig.rev;
 
-	return has_newer_microcode(mc_intel, csig, cpf, crev);
+	return has_newer_microcode(mc, csig, cpf, crev);
 }
 
 static int apply_microcode_intel(int cpu)
 {
-	struct microcode_intel *mc_intel;
+	struct microcode_intel *mc;
 	struct ucode_cpu_info *uci;
+	struct cpuinfo_x86 *c;
 	unsigned int val[2];
-	int cpu_num = raw_smp_processor_id();
-	struct cpuinfo_x86 *c = &cpu_data(cpu_num);
-
-	uci = ucode_cpu_info + cpu;
-	mc_intel = uci->mc;
 
 	/* We should bind the task to the CPU */
-	BUG_ON(cpu_num != cpu);
+	if (WARN_ON(raw_smp_processor_id() != cpu))
+		return -1;
 
-	if (mc_intel == NULL)
+	uci = ucode_cpu_info + cpu;
+	mc = uci->mc;
+	if (!mc)
 		return 0;
 
 	/*
 	 * Microcode on this CPU could be updated earlier. Only apply the
-	 * microcode patch in mc_intel when it is newer than the one on this
+	 * microcode patch in mc when it is newer than the one on this
 	 * CPU.
 	 */
-	if (get_matching_mc(mc_intel, cpu) == 0)
+	if (!get_matching_mc(mc, cpu))
 		return 0;
 
 	/* write microcode via MSR 0x79 */
-	wrmsr(MSR_IA32_UCODE_WRITE,
-	      (unsigned long) mc_intel->bits,
-	      (unsigned long) mc_intel->bits >> 16 >> 16);
-	wrmsr(MSR_IA32_UCODE_REV, 0, 0);
+	wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
+	wrmsrl(MSR_IA32_UCODE_REV, 0);
 
 	/* As documented in the SDM: Do a CPUID 1 here */
 	sync_core();
@@ -876,16 +866,19 @@
 	/* get the current revision from MSR 0x8B */
 	rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]);
 
-	if (val[1] != mc_intel->hdr.rev) {
+	if (val[1] != mc->hdr.rev) {
 		pr_err("CPU%d update to revision 0x%x failed\n",
-		       cpu_num, mc_intel->hdr.rev);
+		       cpu, mc->hdr.rev);
 		return -1;
 	}
+
 	pr_info("CPU%d updated to revision 0x%x, date = %04x-%02x-%02x\n",
-		cpu_num, val[1],
-		mc_intel->hdr.date & 0xffff,
-		mc_intel->hdr.date >> 24,
-		(mc_intel->hdr.date >> 16) & 0xff);
+		cpu, val[1],
+		mc->hdr.date & 0xffff,
+		mc->hdr.date >> 24,
+		(mc->hdr.date >> 16) & 0xff);
+
+	c = &cpu_data(cpu);
 
 	uci->cpu_sig.rev = val[1];
 	c->microcode = val[1];
diff --git a/arch/x86/kernel/cpu/microcode/intel_lib.c b/arch/x86/kernel/cpu/microcode/intel_lib.c
index b96896b..2ce1a7d 100644
--- a/arch/x86/kernel/cpu/microcode/intel_lib.c
+++ b/arch/x86/kernel/cpu/microcode/intel_lib.c
@@ -49,7 +49,7 @@
 	unsigned long total_size, data_size, ext_table_size;
 	struct microcode_header_intel *mc_header = mc;
 	struct extended_sigtable *ext_header = NULL;
-	int sum, orig_sum, ext_sigcount = 0, i;
+	u32 sum, orig_sum, ext_sigcount = 0, i;
 	struct extended_signature *ext_sig;
 
 	total_size = get_totalsize(mc_header);
@@ -57,69 +57,85 @@
 
 	if (data_size + MC_HEADER_SIZE > total_size) {
 		if (print_err)
-			pr_err("error! Bad data size in microcode data file\n");
+			pr_err("Error: bad microcode data file size.\n");
 		return -EINVAL;
 	}
 
 	if (mc_header->ldrver != 1 || mc_header->hdrver != 1) {
 		if (print_err)
-			pr_err("error! Unknown microcode update format\n");
+			pr_err("Error: invalid/unknown microcode update format.\n");
 		return -EINVAL;
 	}
+
 	ext_table_size = total_size - (MC_HEADER_SIZE + data_size);
 	if (ext_table_size) {
+		u32 ext_table_sum = 0;
+		u32 *ext_tablep;
+
 		if ((ext_table_size < EXT_HEADER_SIZE)
 		 || ((ext_table_size - EXT_HEADER_SIZE) % EXT_SIGNATURE_SIZE)) {
 			if (print_err)
-				pr_err("error! Small exttable size in microcode data file\n");
+				pr_err("Error: truncated extended signature table.\n");
 			return -EINVAL;
 		}
+
 		ext_header = mc + MC_HEADER_SIZE + data_size;
 		if (ext_table_size != exttable_size(ext_header)) {
 			if (print_err)
-				pr_err("error! Bad exttable size in microcode data file\n");
+				pr_err("Error: extended signature table size mismatch.\n");
 			return -EFAULT;
 		}
+
 		ext_sigcount = ext_header->count;
-	}
 
-	/* check extended table checksum */
-	if (ext_table_size) {
-		int ext_table_sum = 0;
-		int *ext_tablep = (int *)ext_header;
+		/*
+		 * Check extended table checksum: the sum of all dwords that
+		 * comprise a valid table must be 0.
+		 */
+		ext_tablep = (u32 *)ext_header;
 
-		i = ext_table_size / DWSIZE;
+		i = ext_table_size / sizeof(u32);
 		while (i--)
 			ext_table_sum += ext_tablep[i];
+
 		if (ext_table_sum) {
 			if (print_err)
-				pr_warn("aborting, bad extended signature table checksum\n");
+				pr_warn("Bad extended signature table checksum, aborting.\n");
 			return -EINVAL;
 		}
 	}
 
-	/* calculate the checksum */
+	/*
+	 * Calculate the checksum of update data and header. The checksum of
+	 * valid update data and header including the extended signature table
+	 * must be 0.
+	 */
 	orig_sum = 0;
-	i = (MC_HEADER_SIZE + data_size) / DWSIZE;
+	i = (MC_HEADER_SIZE + data_size) / sizeof(u32);
 	while (i--)
-		orig_sum += ((int *)mc)[i];
+		orig_sum += ((u32 *)mc)[i];
+
 	if (orig_sum) {
 		if (print_err)
-			pr_err("aborting, bad checksum\n");
+			pr_err("Bad microcode data checksum, aborting.\n");
 		return -EINVAL;
 	}
+
 	if (!ext_table_size)
 		return 0;
-	/* check extended signature checksum */
+
+	/*
+	 * Check extended signature checksum: 0 => valid.
+	 */
 	for (i = 0; i < ext_sigcount; i++) {
 		ext_sig = (void *)ext_header + EXT_HEADER_SIZE +
 			  EXT_SIGNATURE_SIZE * i;
-		sum = orig_sum
-			- (mc_header->sig + mc_header->pf + mc_header->cksum)
-			+ (ext_sig->sig + ext_sig->pf + ext_sig->cksum);
+
+		sum = (mc_header->sig + mc_header->pf + mc_header->cksum) -
+		      (ext_sig->sig + ext_sig->pf + ext_sig->cksum);
 		if (sum) {
 			if (print_err)
-				pr_err("aborting, bad checksum\n");
+				pr_err("Bad extended signature checksum, aborting.\n");
 			return -EINVAL;
 		}
 	}
diff --git a/arch/x86/kernel/cpu/mkcapflags.sh b/arch/x86/kernel/cpu/mkcapflags.sh
index 3f20710..6988c74 100644
--- a/arch/x86/kernel/cpu/mkcapflags.sh
+++ b/arch/x86/kernel/cpu/mkcapflags.sh
@@ -1,6 +1,6 @@
 #!/bin/sh
 #
-# Generate the x86_cap/bug_flags[] arrays from include/asm/cpufeature.h
+# Generate the x86_cap/bug_flags[] arrays from include/asm/cpufeatures.h
 #
 
 IN=$1
@@ -49,8 +49,8 @@
 trap 'rm "$OUT"' EXIT
 
 (
-	echo "#ifndef _ASM_X86_CPUFEATURE_H"
-	echo "#include <asm/cpufeature.h>"
+	echo "#ifndef _ASM_X86_CPUFEATURES_H"
+	echo "#include <asm/cpufeatures.h>"
 	echo "#endif"
 	echo ""
 
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
index ba80d68..10f8d47 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -47,7 +47,7 @@
 #include <linux/smp.h>
 #include <linux/syscore_ops.h>
 
-#include <asm/processor.h>
+#include <asm/cpufeature.h>
 #include <asm/e820.h>
 #include <asm/mtrr.h>
 #include <asm/msr.h>
diff --git a/arch/x86/kernel/cpu/transmeta.c b/arch/x86/kernel/cpu/transmeta.c
index e3b4d18..3417856 100644
--- a/arch/x86/kernel/cpu/transmeta.c
+++ b/arch/x86/kernel/cpu/transmeta.c
@@ -1,6 +1,6 @@
 #include <linux/kernel.h>
 #include <linux/mm.h>
-#include <asm/processor.h>
+#include <asm/cpufeature.h>
 #include <asm/msr.h>
 #include "cpu.h"
 
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index 837365f..621b501 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -24,6 +24,7 @@
 #include <asm/e820.h>
 #include <asm/proto.h>
 #include <asm/setup.h>
+#include <asm/cpufeature.h>
 
 /*
  * The e820 map is the map that gets modified e.g. with command line parameters
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index d5804ad..0b1b9ab 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -114,6 +114,10 @@
 	kernel_fpu_disable();
 
 	if (fpu->fpregs_active) {
+		/*
+		 * Ignore return value -- we don't care if reg state
+		 * is clobbered.
+		 */
 		copy_fpregs_to_fpstate(fpu);
 	} else {
 		this_cpu_write(fpu_fpregs_owner_ctx, NULL);
@@ -189,8 +193,12 @@
 
 	preempt_disable();
 	if (fpu->fpregs_active) {
-		if (!copy_fpregs_to_fpstate(fpu))
-			fpregs_deactivate(fpu);
+		if (!copy_fpregs_to_fpstate(fpu)) {
+			if (use_eager_fpu())
+				copy_kernel_to_fpregs(&fpu->state);
+			else
+				fpregs_deactivate(fpu);
+		}
 	}
 	preempt_enable();
 }
@@ -223,14 +231,15 @@
 }
 EXPORT_SYMBOL_GPL(fpstate_init);
 
-/*
- * Copy the current task's FPU state to a new task's FPU context.
- *
- * In both the 'eager' and the 'lazy' case we save hardware registers
- * directly to the destination buffer.
- */
-static void fpu_copy(struct fpu *dst_fpu, struct fpu *src_fpu)
+int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
 {
+	dst_fpu->counter = 0;
+	dst_fpu->fpregs_active = 0;
+	dst_fpu->last_cpu = -1;
+
+	if (!src_fpu->fpstate_active || !cpu_has_fpu)
+		return 0;
+
 	WARN_ON_FPU(src_fpu != &current->thread.fpu);
 
 	/*
@@ -243,10 +252,9 @@
 	/*
 	 * Save current FPU registers directly into the child
 	 * FPU context, without any memory-to-memory copying.
-	 *
-	 * If the FPU context got destroyed in the process (FNSAVE
-	 * done on old CPUs) then copy it back into the source
-	 * context and mark the current task for lazy restore.
+	 * In lazy mode, if the FPU context isn't loaded into
+	 * fpregs, CR0.TS will be set and do_device_not_available
+	 * will load the FPU context.
 	 *
 	 * We have to do all this with preemption disabled,
 	 * mostly because of the FNSAVE case, because in that
@@ -259,19 +267,13 @@
 	preempt_disable();
 	if (!copy_fpregs_to_fpstate(dst_fpu)) {
 		memcpy(&src_fpu->state, &dst_fpu->state, xstate_size);
-		fpregs_deactivate(src_fpu);
+
+		if (use_eager_fpu())
+			copy_kernel_to_fpregs(&src_fpu->state);
+		else
+			fpregs_deactivate(src_fpu);
 	}
 	preempt_enable();
-}
-
-int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
-{
-	dst_fpu->counter = 0;
-	dst_fpu->fpregs_active = 0;
-	dst_fpu->last_cpu = -1;
-
-	if (src_fpu->fpstate_active && cpu_has_fpu)
-		fpu_copy(dst_fpu, src_fpu);
 
 	return 0;
 }
@@ -425,7 +427,7 @@
 {
 	WARN_ON_FPU(fpu != &current->thread.fpu); /* Almost certainly an anomaly */
 
-	if (!use_eager_fpu()) {
+	if (!use_eager_fpu() || !static_cpu_has(X86_FEATURE_FPU)) {
 		/* FPU state will be reallocated lazily at the first use. */
 		fpu__drop(fpu);
 	} else {
diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c
index bd08fb7..54c86ff 100644
--- a/arch/x86/kernel/fpu/init.c
+++ b/arch/x86/kernel/fpu/init.c
@@ -262,7 +262,10 @@
  * not only saved the restores along the way, but we also have the
  * FPU ready to be used for the original task.
  *
- * 'eager' switching is used on modern CPUs, there we switch the FPU
+ * 'lazy' is deprecated because it's almost never a performance win
+ * and it's much more complicated than 'eager'.
+ *
+ * 'eager' switching is by default on all CPUs, there we switch the FPU
  * state during every context switch, regardless of whether the task
  * has used FPU instructions in that time slice or not. This is done
  * because modern FPU context saving instructions are able to optimize
@@ -273,7 +276,7 @@
  *   to use 'eager' restores, if we detect that a task is using the FPU
  *   frequently. See the fpu->counter logic in fpu/internal.h for that. ]
  */
-static enum { AUTO, ENABLE, DISABLE } eagerfpu = AUTO;
+static enum { ENABLE, DISABLE } eagerfpu = ENABLE;
 
 /*
  * Find supported xfeatures based on cpu features and command-line input.
@@ -344,15 +347,9 @@
  */
 static void __init fpu__init_parse_early_param(void)
 {
-	/*
-	 * No need to check "eagerfpu=auto" again, since it is the
-	 * initial default.
-	 */
 	if (cmdline_find_option_bool(boot_command_line, "eagerfpu=off")) {
 		eagerfpu = DISABLE;
 		fpu__clear_eager_fpu_features();
-	} else if (cmdline_find_option_bool(boot_command_line, "eagerfpu=on")) {
-		eagerfpu = ENABLE;
 	}
 
 	if (cmdline_find_option_bool(boot_command_line, "no387"))
diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
index d425cda5..6e8354f 100644
--- a/arch/x86/kernel/fpu/xstate.c
+++ b/arch/x86/kernel/fpu/xstate.c
@@ -51,6 +51,9 @@
 	setup_clear_cpu_cap(X86_FEATURE_AVX512PF);
 	setup_clear_cpu_cap(X86_FEATURE_AVX512ER);
 	setup_clear_cpu_cap(X86_FEATURE_AVX512CD);
+	setup_clear_cpu_cap(X86_FEATURE_AVX512DQ);
+	setup_clear_cpu_cap(X86_FEATURE_AVX512BW);
+	setup_clear_cpu_cap(X86_FEATURE_AVX512VL);
 	setup_clear_cpu_cap(X86_FEATURE_MPX);
 	setup_clear_cpu_cap(X86_FEATURE_XGETBV1);
 }
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 05c9e3f..702547c 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -697,9 +697,8 @@
 #endif
 
 /* Defined as markers to the end of the ftrace default trampolines */
-extern void ftrace_caller_end(void);
 extern void ftrace_regs_caller_end(void);
-extern void ftrace_return(void);
+extern void ftrace_epilogue(void);
 extern void ftrace_caller_op_ptr(void);
 extern void ftrace_regs_caller_op_ptr(void);
 
@@ -746,7 +745,7 @@
 		op_offset = (unsigned long)ftrace_regs_caller_op_ptr;
 	} else {
 		start_offset = (unsigned long)ftrace_caller;
-		end_offset = (unsigned long)ftrace_caller_end;
+		end_offset = (unsigned long)ftrace_epilogue;
 		op_offset = (unsigned long)ftrace_caller_op_ptr;
 	}
 
@@ -754,7 +753,7 @@
 
 	/*
 	 * Allocate enough size to store the ftrace_caller code,
-	 * the jmp to ftrace_return, as well as the address of
+	 * the jmp to ftrace_epilogue, as well as the address of
 	 * the ftrace_ops this trampoline is used for.
 	 */
 	trampoline = alloc_tramp(size + MCOUNT_INSN_SIZE + sizeof(void *));
@@ -772,8 +771,8 @@
 
 	ip = (unsigned long)trampoline + size;
 
-	/* The trampoline ends with a jmp to ftrace_return */
-	jmp = ftrace_jmp_replace(ip, (unsigned long)ftrace_return);
+	/* The trampoline ends with a jmp to ftrace_epilogue */
+	jmp = ftrace_jmp_replace(ip, (unsigned long)ftrace_epilogue);
 	memcpy(trampoline + size, jmp, MCOUNT_INSN_SIZE);
 
 	/*
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 2c0f340..1f4422d 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -40,13 +40,8 @@
 /* Wipe all early page tables except for the kernel symbol map */
 static void __init reset_early_page_tables(void)
 {
-	unsigned long i;
-
-	for (i = 0; i < PTRS_PER_PGD-1; i++)
-		early_level4_pgt[i].pgd = 0;
-
+	memset(early_level4_pgt, 0, sizeof(pgd_t)*(PTRS_PER_PGD-1));
 	next_early_pgt = 0;
-
 	write_cr3(__pa_nodebug(early_level4_pgt));
 }
 
@@ -54,7 +49,6 @@
 int __init early_make_pgtable(unsigned long address)
 {
 	unsigned long physaddr = address - __PAGE_OFFSET;
-	unsigned long i;
 	pgdval_t pgd, *pgd_p;
 	pudval_t pud, *pud_p;
 	pmdval_t pmd, *pmd_p;
@@ -81,8 +75,7 @@
 		}
 
 		pud_p = (pudval_t *)early_dynamic_pgts[next_early_pgt++];
-		for (i = 0; i < PTRS_PER_PUD; i++)
-			pud_p[i] = 0;
+		memset(pud_p, 0, sizeof(*pud_p) * PTRS_PER_PUD);
 		*pgd_p = (pgdval_t)pud_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
 	}
 	pud_p += pud_index(address);
@@ -97,8 +90,7 @@
 		}
 
 		pmd_p = (pmdval_t *)early_dynamic_pgts[next_early_pgt++];
-		for (i = 0; i < PTRS_PER_PMD; i++)
-			pmd_p[i] = 0;
+		memset(pmd_p, 0, sizeof(*pmd_p) * PTRS_PER_PMD);
 		*pud_p = (pudval_t)pmd_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
 	}
 	pmd = (physaddr & PMD_MASK) + early_pmd_flags;
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index 6bc9ae2..54cdbd2 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -19,7 +19,7 @@
 #include <asm/setup.h>
 #include <asm/processor-flags.h>
 #include <asm/msr-index.h>
-#include <asm/cpufeature.h>
+#include <asm/cpufeatures.h>
 #include <asm/percpu.h>
 #include <asm/nops.h>
 #include <asm/bootparam.h>
@@ -389,6 +389,12 @@
 	/* Make changes effective */
 	wrmsr
 
+	/*
+	 * And make sure that all the mappings we set up have NX set from
+	 * the beginning.
+	 */
+	orl $(1 << (_PAGE_BIT_NX - 32)), pa(__supported_pte_mask + 4)
+
 enable_paging:
 
 /*
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index ffdc0e8..22fbf9d 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -38,7 +38,6 @@
 #define pud_index(x)	(((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
 
 L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
-L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
 L3_START_KERNEL = pud_index(__START_KERNEL_map)
 
@@ -76,9 +75,7 @@
 	subq	$_text - __START_KERNEL_map, %rbp
 
 	/* Is the address not 2M aligned? */
-	movq	%rbp, %rax
-	andl	$~PMD_PAGE_MASK, %eax
-	testl	%eax, %eax
+	testl	$~PMD_PAGE_MASK, %ebp
 	jnz	bad_address
 
 	/*
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index b8e6ff5..be0ebbb 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -12,6 +12,7 @@
 #include <linux/pm.h>
 #include <linux/io.h>
 
+#include <asm/cpufeature.h>
 #include <asm/irqdomain.h>
 #include <asm/fixmap.h>
 #include <asm/hpet.h>
diff --git a/arch/x86/kernel/mcount_64.S b/arch/x86/kernel/mcount_64.S
index 87e1762..ed48a9f 100644
--- a/arch/x86/kernel/mcount_64.S
+++ b/arch/x86/kernel/mcount_64.S
@@ -168,12 +168,14 @@
 	restore_mcount_regs
 
 	/*
-	 * The copied trampoline must call ftrace_return as it
+	 * The copied trampoline must call ftrace_epilogue as it
 	 * still may need to call the function graph tracer.
+	 *
+	 * The code up to this label is copied into trampolines so
+	 * think twice before adding any new code or changing the
+	 * layout here.
 	 */
-GLOBAL(ftrace_caller_end)
-
-GLOBAL(ftrace_return)
+GLOBAL(ftrace_epilogue)
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 GLOBAL(ftrace_graph_call)
@@ -244,14 +246,14 @@
 	popfq
 
 	/*
-	 * As this jmp to ftrace_return can be a short jump
+	 * As this jmp to ftrace_epilogue can be a short jump
 	 * it must not be copied into the trampoline.
 	 * The trampoline will add the code to jump
 	 * to the return.
 	 */
 GLOBAL(ftrace_regs_caller_end)
 
-	jmp ftrace_return
+	jmp ftrace_epilogue
 
 END(ftrace_regs_caller)
 
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
index 64f9616..7f3550a 100644
--- a/arch/x86/kernel/msr.c
+++ b/arch/x86/kernel/msr.c
@@ -40,7 +40,7 @@
 #include <linux/uaccess.h>
 #include <linux/gfp.h>
 
-#include <asm/processor.h>
+#include <asm/cpufeature.h>
 #include <asm/msr.h>
 
 static struct class *msr_class;
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 9decee2..2915d54 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -57,6 +57,9 @@
 	  */
 	.io_bitmap		= { [0 ... IO_BITMAP_LONGS] = ~0 },
 #endif
+#ifdef CONFIG_X86_32
+	.SYSENTER_stack_canary	= STACK_END_MAGIC,
+#endif
 };
 EXPORT_PER_CPU_SYMBOL(cpu_tss);
 
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index cb6282c..548ddf7 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -61,7 +61,38 @@
 	regs->seg = GET_SEG(seg) | 3;			\
 } while (0)
 
-int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
+#ifdef CONFIG_X86_64
+/*
+ * If regs->ss will cause an IRET fault, change it.  Otherwise leave it
+ * alone.  Using this generally makes no sense unless
+ * user_64bit_mode(regs) would return true.
+ */
+static void force_valid_ss(struct pt_regs *regs)
+{
+	u32 ar;
+	asm volatile ("lar %[old_ss], %[ar]\n\t"
+		      "jz 1f\n\t"		/* If invalid: */
+		      "xorl %[ar], %[ar]\n\t"	/* set ar = 0 */
+		      "1:"
+		      : [ar] "=r" (ar)
+		      : [old_ss] "rm" ((u16)regs->ss));
+
+	/*
+	 * For a valid 64-bit user context, we need DPL 3, type
+	 * read-write data or read-write exp-down data, and S and P
+	 * set.  We can't use VERW because VERW doesn't check the
+	 * P bit.
+	 */
+	ar &= AR_DPL_MASK | AR_S | AR_P | AR_TYPE_MASK;
+	if (ar != (AR_DPL3 | AR_S | AR_P | AR_TYPE_RWDATA) &&
+	    ar != (AR_DPL3 | AR_S | AR_P | AR_TYPE_RWDATA_EXPDOWN))
+		regs->ss = __USER_DS;
+}
+#endif
+
+static int restore_sigcontext(struct pt_regs *regs,
+			      struct sigcontext __user *sc,
+			      unsigned long uc_flags)
 {
 	unsigned long buf_val;
 	void __user *buf;
@@ -94,15 +125,18 @@
 		COPY(r15);
 #endif /* CONFIG_X86_64 */
 
-#ifdef CONFIG_X86_32
 		COPY_SEG_CPL3(cs);
 		COPY_SEG_CPL3(ss);
-#else /* !CONFIG_X86_32 */
-		/* Kernel saves and restores only the CS segment register on signals,
-		 * which is the bare minimum needed to allow mixed 32/64-bit code.
-		 * App's signal handler can save/restore other segments if needed. */
-		COPY_SEG_CPL3(cs);
-#endif /* CONFIG_X86_32 */
+
+#ifdef CONFIG_X86_64
+		/*
+		 * Fix up SS if needed for the benefit of old DOSEMU and
+		 * CRIU.
+		 */
+		if (unlikely(!(uc_flags & UC_STRICT_RESTORE_SS) &&
+			     user_64bit_mode(regs)))
+			force_valid_ss(regs);
+#endif
 
 		get_user_ex(tmpflags, &sc->flags);
 		regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
@@ -165,6 +199,7 @@
 		put_user_ex(regs->cs, &sc->cs);
 		put_user_ex(0, &sc->gs);
 		put_user_ex(0, &sc->fs);
+		put_user_ex(regs->ss, &sc->ss);
 #endif /* CONFIG_X86_32 */
 
 		put_user_ex(fpstate, &sc->fpstate);
@@ -403,6 +438,21 @@
 	return 0;
 }
 #else /* !CONFIG_X86_32 */
+static unsigned long frame_uc_flags(struct pt_regs *regs)
+{
+	unsigned long flags;
+
+	if (cpu_has_xsave)
+		flags = UC_FP_XSTATE | UC_SIGCONTEXT_SS;
+	else
+		flags = UC_SIGCONTEXT_SS;
+
+	if (likely(user_64bit_mode(regs)))
+		flags |= UC_STRICT_RESTORE_SS;
+
+	return flags;
+}
+
 static int __setup_rt_frame(int sig, struct ksignal *ksig,
 			    sigset_t *set, struct pt_regs *regs)
 {
@@ -422,10 +472,7 @@
 
 	put_user_try {
 		/* Create the ucontext.  */
-		if (cpu_has_xsave)
-			put_user_ex(UC_FP_XSTATE, &frame->uc.uc_flags);
-		else
-			put_user_ex(0, &frame->uc.uc_flags);
+		put_user_ex(frame_uc_flags(regs), &frame->uc.uc_flags);
 		put_user_ex(0, &frame->uc.uc_link);
 		save_altstack_ex(&frame->uc.uc_stack, regs->sp);
 
@@ -459,10 +506,28 @@
 
 	regs->sp = (unsigned long)frame;
 
-	/* Set up the CS register to run signal handlers in 64-bit mode,
-	   even if the handler happens to be interrupting 32-bit code. */
+	/*
+	 * Set up the CS and SS registers to run signal handlers in
+	 * 64-bit mode, even if the handler happens to be interrupting
+	 * 32-bit or 16-bit code.
+	 *
+	 * SS is subtle.  In 64-bit mode, we don't need any particular
+	 * SS descriptor, but we do need SS to be valid.  It's possible
+	 * that the old SS is entirely bogus -- this can happen if the
+	 * signal we're trying to deliver is #GP or #SS caused by a bad
+	 * SS value.  We also have a compatbility issue here: DOSEMU
+	 * relies on the contents of the SS register indicating the
+	 * SS value at the time of the signal, even though that code in
+	 * DOSEMU predates sigreturn's ability to restore SS.  (DOSEMU
+	 * avoids relying on sigreturn to restore SS; instead it uses
+	 * a trampoline.)  So we do our best: if the old SS was valid,
+	 * we keep it.  Otherwise we replace it.
+	 */
 	regs->cs = __USER_CS;
 
+	if (unlikely(regs->ss != __USER_DS))
+		force_valid_ss(regs);
+
 	return 0;
 }
 #endif /* CONFIG_X86_32 */
@@ -489,10 +554,7 @@
 
 	put_user_try {
 		/* Create the ucontext.  */
-		if (cpu_has_xsave)
-			put_user_ex(UC_FP_XSTATE, &frame->uc.uc_flags);
-		else
-			put_user_ex(0, &frame->uc.uc_flags);
+		put_user_ex(frame_uc_flags(regs), &frame->uc.uc_flags);
 		put_user_ex(0, &frame->uc.uc_link);
 		compat_save_altstack_ex(&frame->uc.uc_stack, regs->sp);
 		put_user_ex(0, &frame->uc.uc__pad0);
@@ -554,7 +616,11 @@
 
 	set_current_blocked(&set);
 
-	if (restore_sigcontext(regs, &frame->sc))
+	/*
+	 * x86_32 has no uc_flags bits relevant to restore_sigcontext.
+	 * Save a few cycles by skipping the __get_user.
+	 */
+	if (restore_sigcontext(regs, &frame->sc, 0))
 		goto badframe;
 	return regs->ax;
 
@@ -570,16 +636,19 @@
 	struct pt_regs *regs = current_pt_regs();
 	struct rt_sigframe __user *frame;
 	sigset_t set;
+	unsigned long uc_flags;
 
 	frame = (struct rt_sigframe __user *)(regs->sp - sizeof(long));
 	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
 		goto badframe;
 	if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
 		goto badframe;
+	if (__get_user(uc_flags, &frame->uc.uc_flags))
+		goto badframe;
 
 	set_current_blocked(&set);
 
-	if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
+	if (restore_sigcontext(regs, &frame->uc.uc_mcontext, uc_flags))
 		goto badframe;
 
 	if (restore_altstack(&frame->uc.uc_stack))
@@ -692,12 +761,15 @@
 
 static inline unsigned long get_nr_restart_syscall(const struct pt_regs *regs)
 {
-#if defined(CONFIG_X86_32) || !defined(CONFIG_X86_64)
+#ifdef CONFIG_X86_64
+	if (is_ia32_task())
+		return __NR_ia32_restart_syscall;
+#endif
+#ifdef CONFIG_X86_X32_ABI
+	return __NR_restart_syscall | (regs->orig_ax & __X32_SYSCALL_BIT);
+#else
 	return __NR_restart_syscall;
-#else /* !CONFIG_X86_32 && CONFIG_X86_64 */
-	return test_thread_flag(TIF_IA32) ? __NR_ia32_restart_syscall :
-		__NR_restart_syscall | (regs->orig_ax & __X32_SYSCALL_BIT);
-#endif /* CONFIG_X86_32 || !CONFIG_X86_64 */
+#endif
 }
 
 /*
@@ -763,6 +835,7 @@
 	struct pt_regs *regs = current_pt_regs();
 	struct rt_sigframe_x32 __user *frame;
 	sigset_t set;
+	unsigned long uc_flags;
 
 	frame = (struct rt_sigframe_x32 __user *)(regs->sp - 8);
 
@@ -770,10 +843,12 @@
 		goto badframe;
 	if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
 		goto badframe;
+	if (__get_user(uc_flags, &frame->uc.uc_flags))
+		goto badframe;
 
 	set_current_blocked(&set);
 
-	if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
+	if (restore_sigcontext(regs, &frame->uc.uc_mcontext, uc_flags))
 		goto badframe;
 
 	if (compat_restore_altstack(&frame->uc.uc_stack))
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 3bf1e0b..643dbdc 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -256,7 +256,7 @@
 	x86_cpuinit.setup_percpu_clockev();
 
 	wmb();
-	cpu_startup_entry(CPUHP_ONLINE);
+	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
 }
 
 int topology_update_package_map(unsigned int apicid, unsigned int cpu)
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 211c11c..06cbe25 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -83,32 +83,18 @@
 DECLARE_BITMAP(used_vectors, NR_VECTORS);
 EXPORT_SYMBOL_GPL(used_vectors);
 
-static inline void conditional_sti(struct pt_regs *regs)
+static inline void cond_local_irq_enable(struct pt_regs *regs)
 {
 	if (regs->flags & X86_EFLAGS_IF)
 		local_irq_enable();
 }
 
-static inline void preempt_conditional_sti(struct pt_regs *regs)
-{
-	preempt_count_inc();
-	if (regs->flags & X86_EFLAGS_IF)
-		local_irq_enable();
-}
-
-static inline void conditional_cli(struct pt_regs *regs)
+static inline void cond_local_irq_disable(struct pt_regs *regs)
 {
 	if (regs->flags & X86_EFLAGS_IF)
 		local_irq_disable();
 }
 
-static inline void preempt_conditional_cli(struct pt_regs *regs)
-{
-	if (regs->flags & X86_EFLAGS_IF)
-		local_irq_disable();
-	preempt_count_dec();
-}
-
 void ist_enter(struct pt_regs *regs)
 {
 	if (user_mode(regs)) {
@@ -262,7 +248,6 @@
 	tsk->thread.error_code = error_code;
 	tsk->thread.trap_nr = trapnr;
 
-#ifdef CONFIG_X86_64
 	if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
 	    printk_ratelimit()) {
 		pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
@@ -271,7 +256,6 @@
 		print_vma_addr(" in ", regs->ip);
 		pr_cont("\n");
 	}
-#endif
 
 	force_sig_info(signr, info ?: SEND_SIG_PRIV, tsk);
 }
@@ -286,7 +270,7 @@
 
 	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) !=
 			NOTIFY_STOP) {
-		conditional_sti(regs);
+		cond_local_irq_enable(regs);
 		do_trap(trapnr, signr, str, regs, error_code,
 			fill_trap_info(regs, signr, trapnr, &info));
 	}
@@ -368,7 +352,7 @@
 	if (notify_die(DIE_TRAP, "bounds", regs, error_code,
 			X86_TRAP_BR, SIGSEGV) == NOTIFY_STOP)
 		return;
-	conditional_sti(regs);
+	cond_local_irq_enable(regs);
 
 	if (!user_mode(regs))
 		die("bounds", regs, error_code);
@@ -443,7 +427,7 @@
 	struct task_struct *tsk;
 
 	RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
-	conditional_sti(regs);
+	cond_local_irq_enable(regs);
 
 	if (v8086_mode(regs)) {
 		local_irq_enable();
@@ -517,9 +501,11 @@
 	 * as we may switch to the interrupt stack.
 	 */
 	debug_stack_usage_inc();
-	preempt_conditional_sti(regs);
+	preempt_disable();
+	cond_local_irq_enable(regs);
 	do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL);
-	preempt_conditional_cli(regs);
+	cond_local_irq_disable(regs);
+	preempt_enable_no_resched();
 	debug_stack_usage_dec();
 exit:
 	ist_exit(regs);
@@ -571,6 +557,29 @@
 NOKPROBE_SYMBOL(fixup_bad_iret);
 #endif
 
+static bool is_sysenter_singlestep(struct pt_regs *regs)
+{
+	/*
+	 * We don't try for precision here.  If we're anywhere in the region of
+	 * code that can be single-stepped in the SYSENTER entry path, then
+	 * assume that this is a useless single-step trap due to SYSENTER
+	 * being invoked with TF set.  (We don't know in advance exactly
+	 * which instructions will be hit because BTF could plausibly
+	 * be set.)
+	 */
+#ifdef CONFIG_X86_32
+	return (regs->ip - (unsigned long)__begin_SYSENTER_singlestep_region) <
+		(unsigned long)__end_SYSENTER_singlestep_region -
+		(unsigned long)__begin_SYSENTER_singlestep_region;
+#elif defined(CONFIG_IA32_EMULATION)
+	return (regs->ip - (unsigned long)entry_SYSENTER_compat) <
+		(unsigned long)__end_entry_SYSENTER_compat -
+		(unsigned long)entry_SYSENTER_compat;
+#else
+	return false;
+#endif
+}
+
 /*
  * Our handling of the processor debug registers is non-trivial.
  * We do not clear them on entry and exit from the kernel. Therefore
@@ -605,11 +614,42 @@
 	ist_enter(regs);
 
 	get_debugreg(dr6, 6);
+	/*
+	 * The Intel SDM says:
+	 *
+	 *   Certain debug exceptions may clear bits 0-3. The remaining
+	 *   contents of the DR6 register are never cleared by the
+	 *   processor. To avoid confusion in identifying debug
+	 *   exceptions, debug handlers should clear the register before
+	 *   returning to the interrupted task.
+	 *
+	 * Keep it simple: clear DR6 immediately.
+	 */
+	set_debugreg(0, 6);
 
 	/* Filter out all the reserved bits which are preset to 1 */
 	dr6 &= ~DR6_RESERVED;
 
 	/*
+	 * The SDM says "The processor clears the BTF flag when it
+	 * generates a debug exception."  Clear TIF_BLOCKSTEP to keep
+	 * TIF_BLOCKSTEP in sync with the hardware BTF flag.
+	 */
+	clear_tsk_thread_flag(tsk, TIF_BLOCKSTEP);
+
+	if (unlikely(!user_mode(regs) && (dr6 & DR_STEP) &&
+		     is_sysenter_singlestep(regs))) {
+		dr6 &= ~DR_STEP;
+		if (!dr6)
+			goto exit;
+		/*
+		 * else we might have gotten a single-step trap and hit a
+		 * watchpoint at the same time, in which case we should fall
+		 * through and handle the watchpoint.
+		 */
+	}
+
+	/*
 	 * If dr6 has no reason to give us about the origin of this trap,
 	 * then it's very likely the result of an icebp/int01 trap.
 	 * User wants a sigtrap for that.
@@ -617,18 +657,10 @@
 	if (!dr6 && user_mode(regs))
 		user_icebp = 1;
 
-	/* Catch kmemcheck conditions first of all! */
+	/* Catch kmemcheck conditions! */
 	if ((dr6 & DR_STEP) && kmemcheck_trap(regs))
 		goto exit;
 
-	/* DR6 may or may not be cleared by the CPU */
-	set_debugreg(0, 6);
-
-	/*
-	 * The processor cleared BTF, so don't mark that we need it set.
-	 */
-	clear_tsk_thread_flag(tsk, TIF_BLOCKSTEP);
-
 	/* Store the virtualized DR6 value */
 	tsk->thread.debugreg6 = dr6;
 
@@ -648,24 +680,25 @@
 	debug_stack_usage_inc();
 
 	/* It's safe to allow irq's after DR6 has been saved */
-	preempt_conditional_sti(regs);
+	preempt_disable();
+	cond_local_irq_enable(regs);
 
 	if (v8086_mode(regs)) {
 		handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
 					X86_TRAP_DB);
-		preempt_conditional_cli(regs);
+		cond_local_irq_disable(regs);
+		preempt_enable_no_resched();
 		debug_stack_usage_dec();
 		goto exit;
 	}
 
-	/*
-	 * Single-stepping through system calls: ignore any exceptions in
-	 * kernel space, but re-enable TF when returning to user mode.
-	 *
-	 * We already checked v86 mode above, so we can check for kernel mode
-	 * by just checking the CPL of CS.
-	 */
-	if ((dr6 & DR_STEP) && !user_mode(regs)) {
+	if (WARN_ON_ONCE((dr6 & DR_STEP) && !user_mode(regs))) {
+		/*
+		 * Historical junk that used to handle SYSENTER single-stepping.
+		 * This should be unreachable now.  If we survive for a while
+		 * without anyone hitting this warning, we'll turn this into
+		 * an oops.
+		 */
 		tsk->thread.debugreg6 &= ~DR_STEP;
 		set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
 		regs->flags &= ~X86_EFLAGS_TF;
@@ -673,10 +706,19 @@
 	si_code = get_si_code(tsk->thread.debugreg6);
 	if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp)
 		send_sigtrap(tsk, regs, error_code, si_code);
-	preempt_conditional_cli(regs);
+	cond_local_irq_disable(regs);
+	preempt_enable_no_resched();
 	debug_stack_usage_dec();
 
 exit:
+#if defined(CONFIG_X86_32)
+	/*
+	 * This is the most likely code path that involves non-trivial use
+	 * of the SYSENTER stack.  Check that we haven't overrun it.
+	 */
+	WARN(this_cpu_read(cpu_tss.SYSENTER_stack_canary) != STACK_END_MAGIC,
+	     "Overran or corrupted SYSENTER stack\n");
+#endif
 	ist_exit(regs);
 }
 NOKPROBE_SYMBOL(do_debug);
@@ -696,7 +738,7 @@
 
 	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, SIGFPE) == NOTIFY_STOP)
 		return;
-	conditional_sti(regs);
+	cond_local_irq_enable(regs);
 
 	if (!user_mode(regs)) {
 		if (!fixup_exception(regs, trapnr)) {
@@ -743,20 +785,19 @@
 dotraplinkage void
 do_spurious_interrupt_bug(struct pt_regs *regs, long error_code)
 {
-	conditional_sti(regs);
+	cond_local_irq_enable(regs);
 }
 
 dotraplinkage void
 do_device_not_available(struct pt_regs *regs, long error_code)
 {
 	RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
-	BUG_ON(use_eager_fpu());
 
 #ifdef CONFIG_MATH_EMULATION
-	if (read_cr0() & X86_CR0_EM) {
+	if (!boot_cpu_has(X86_FEATURE_FPU) && (read_cr0() & X86_CR0_EM)) {
 		struct math_emu_info info = { };
 
-		conditional_sti(regs);
+		cond_local_irq_enable(regs);
 
 		info.regs = regs;
 		math_emulate(&info);
@@ -765,7 +806,7 @@
 #endif
 	fpu__restore(&current->thread.fpu); /* interrupts still off */
 #ifdef CONFIG_X86_32
-	conditional_sti(regs);
+	cond_local_irq_enable(regs);
 #endif
 }
 NOKPROBE_SYMBOL(do_device_not_available);
@@ -868,7 +909,7 @@
 #endif
 
 #ifdef CONFIG_X86_32
-	set_system_trap_gate(IA32_SYSCALL_VECTOR, entry_INT80_32);
+	set_system_intr_gate(IA32_SYSCALL_VECTOR, entry_INT80_32);
 	set_bit(IA32_SYSCALL_VECTOR, used_vectors);
 #endif
 
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 3d743da..5638044 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -43,6 +43,11 @@
 
 int tsc_clocksource_reliable;
 
+static u32 art_to_tsc_numerator;
+static u32 art_to_tsc_denominator;
+static u64 art_to_tsc_offset;
+struct clocksource *art_related_clocksource;
+
 /*
  * Use a ring-buffer like data structure, where a writer advances the head by
  * writing a new data entry and a reader advances the tail when it observes a
@@ -964,6 +969,37 @@
 
 #endif /* CONFIG_CPU_FREQ */
 
+#define ART_CPUID_LEAF (0x15)
+#define ART_MIN_DENOMINATOR (1)
+
+
+/*
+ * If ART is present detect the numerator:denominator to convert to TSC
+ */
+static void detect_art(void)
+{
+	unsigned int unused[2];
+
+	if (boot_cpu_data.cpuid_level < ART_CPUID_LEAF)
+		return;
+
+	cpuid(ART_CPUID_LEAF, &art_to_tsc_denominator,
+	      &art_to_tsc_numerator, unused, unused+1);
+
+	/* Don't enable ART in a VM, non-stop TSC required */
+	if (boot_cpu_has(X86_FEATURE_HYPERVISOR) ||
+	    !boot_cpu_has(X86_FEATURE_NONSTOP_TSC) ||
+	    art_to_tsc_denominator < ART_MIN_DENOMINATOR)
+		return;
+
+	if (rdmsrl_safe(MSR_IA32_TSC_ADJUST, &art_to_tsc_offset))
+		return;
+
+	/* Make this sticky over multiple CPU init calls */
+	setup_force_cpu_cap(X86_FEATURE_ART);
+}
+
+
 /* clocksource code */
 
 static struct clocksource clocksource_tsc;
@@ -1071,6 +1107,25 @@
 	return 0;
 }
 
+/*
+ * Convert ART to TSC given numerator/denominator found in detect_art()
+ */
+struct system_counterval_t convert_art_to_tsc(cycle_t art)
+{
+	u64 tmp, res, rem;
+
+	rem = do_div(art, art_to_tsc_denominator);
+
+	res = art * art_to_tsc_numerator;
+	tmp = rem * art_to_tsc_numerator;
+
+	do_div(tmp, art_to_tsc_denominator);
+	res += tmp + art_to_tsc_offset;
+
+	return (struct system_counterval_t) {.cs = art_related_clocksource,
+			.cycles = res};
+}
+EXPORT_SYMBOL(convert_art_to_tsc);
 
 static void tsc_refine_calibration_work(struct work_struct *work);
 static DECLARE_DELAYED_WORK(tsc_irqwork, tsc_refine_calibration_work);
@@ -1142,6 +1197,8 @@
 		(unsigned long)tsc_khz % 1000);
 
 out:
+	if (boot_cpu_has(X86_FEATURE_ART))
+		art_related_clocksource = &clocksource_tsc;
 	clocksource_register_khz(&clocksource_tsc, tsc_khz);
 }
 
@@ -1235,6 +1292,8 @@
 		mark_tsc_unstable("TSCs unsynchronized");
 
 	check_system_tsc_reliable();
+
+	detect_art();
 }
 
 #ifdef CONFIG_SMP
@@ -1246,14 +1305,14 @@
  */
 unsigned long calibrate_delay_is_known(void)
 {
-	int i, cpu = smp_processor_id();
+	int sibling, cpu = smp_processor_id();
 
 	if (!tsc_disabled && !cpu_has(&cpu_data(cpu), X86_FEATURE_CONSTANT_TSC))
 		return 0;
 
-	for_each_online_cpu(i)
-		if (cpu_data(i).phys_proc_id == cpu_data(cpu).phys_proc_id)
-			return cpu_data(i).loops_per_jiffy;
+	sibling = cpumask_any_but(topology_core_cpumask(cpu), cpu);
+	if (sibling < nr_cpu_ids)
+		return cpu_data(sibling).loops_per_jiffy;
 	return 0;
 }
 #endif
diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
index 07efb35..014ea59 100644
--- a/arch/x86/kernel/verify_cpu.S
+++ b/arch/x86/kernel/verify_cpu.S
@@ -30,7 +30,7 @@
  * 	appropriately. Either display a message or halt.
  */
 
-#include <asm/cpufeature.h>
+#include <asm/cpufeatures.h>
 #include <asm/msr-index.h>
 
 verify_cpu:
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
index e574b85..3dce1ca 100644
--- a/arch/x86/kernel/vm86_32.c
+++ b/arch/x86/kernel/vm86_32.c
@@ -362,7 +362,7 @@
 	/* make room for real-mode segments */
 	tsk->thread.sp0 += 16;
 
-	if (static_cpu_has_safe(X86_FEATURE_SEP))
+	if (static_cpu_has(X86_FEATURE_SEP))
 		tsk->thread.sysenter_cs = 0;
 
 	load_sp0(tss, &tsk->thread);
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index fe133b7..5af9958 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -192,6 +192,17 @@
 	:init
 #endif
 
+	/*
+	 * Section for code used exclusively before alternatives are run. All
+	 * references to such code must be patched out by alternatives, normally
+	 * by using X86_FEATURE_ALWAYS CPU feature bit.
+	 *
+	 * See static_cpu_has() for an example.
+	 */
+	.altinstr_aux : AT(ADDR(.altinstr_aux) - LOAD_OFFSET) {
+		*(.altinstr_aux)
+	}
+
 	INIT_DATA_SECTION(16)
 
 	.x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile
index a1ff508..464fa47 100644
--- a/arch/x86/kvm/Makefile
+++ b/arch/x86/kvm/Makefile
@@ -13,9 +13,10 @@
 
 kvm-y			+= x86.o mmu.o emulate.o i8259.o irq.o lapic.o \
 			   i8254.o ioapic.o irq_comm.o cpuid.o pmu.o mtrr.o \
-			   hyperv.o
+			   hyperv.o page_track.o
 
 kvm-$(CONFIG_KVM_DEVICE_ASSIGNMENT)	+= assigned-dev.o iommu.o
+
 kvm-intel-y		+= vmx.o pmu_intel.o
 kvm-amd-y		+= svm.o pmu_amd.o
 
diff --git a/arch/x86/kvm/assigned-dev.c b/arch/x86/kvm/assigned-dev.c
index 9dc091a..308b859 100644
--- a/arch/x86/kvm/assigned-dev.c
+++ b/arch/x86/kvm/assigned-dev.c
@@ -51,11 +51,9 @@
 static struct kvm_assigned_dev_kernel *kvm_find_assigned_dev(struct list_head *head,
 						      int assigned_dev_id)
 {
-	struct list_head *ptr;
 	struct kvm_assigned_dev_kernel *match;
 
-	list_for_each(ptr, head) {
-		match = list_entry(ptr, struct kvm_assigned_dev_kernel, list);
+	list_for_each_entry(match, head, list) {
 		if (match->assigned_dev_id == assigned_dev_id)
 			return match;
 	}
@@ -373,14 +371,10 @@
 
 void kvm_free_all_assigned_devices(struct kvm *kvm)
 {
-	struct list_head *ptr, *ptr2;
-	struct kvm_assigned_dev_kernel *assigned_dev;
+	struct kvm_assigned_dev_kernel *assigned_dev, *tmp;
 
-	list_for_each_safe(ptr, ptr2, &kvm->arch.assigned_dev_head) {
-		assigned_dev = list_entry(ptr,
-					  struct kvm_assigned_dev_kernel,
-					  list);
-
+	list_for_each_entry_safe(assigned_dev, tmp,
+				 &kvm->arch.assigned_dev_head, list) {
 		kvm_free_assigned_device(kvm, assigned_dev);
 	}
 }
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 6525e92..0029644 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -46,11 +46,18 @@
 	return ret;
 }
 
+bool kvm_mpx_supported(void)
+{
+	return ((host_xcr0 & (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR))
+		 && kvm_x86_ops->mpx_supported());
+}
+EXPORT_SYMBOL_GPL(kvm_mpx_supported);
+
 u64 kvm_supported_xcr0(void)
 {
 	u64 xcr0 = KVM_SUPPORTED_XCR0 & host_xcr0;
 
-	if (!kvm_x86_ops->mpx_supported())
+	if (!kvm_mpx_supported())
 		xcr0 &= ~(XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR);
 
 	return xcr0;
@@ -97,8 +104,7 @@
 	if (best && (best->eax & (F(XSAVES) | F(XSAVEC))))
 		best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
 
-	vcpu->arch.eager_fpu = use_eager_fpu() || guest_cpuid_has_mpx(vcpu);
-	if (vcpu->arch.eager_fpu)
+	if (use_eager_fpu())
 		kvm_x86_ops->fpu_activate(vcpu);
 
 	/*
@@ -295,7 +301,7 @@
 #endif
 	unsigned f_rdtscp = kvm_x86_ops->rdtscp_supported() ? F(RDTSCP) : 0;
 	unsigned f_invpcid = kvm_x86_ops->invpcid_supported() ? F(INVPCID) : 0;
-	unsigned f_mpx = kvm_x86_ops->mpx_supported() ? F(MPX) : 0;
+	unsigned f_mpx = kvm_mpx_supported() ? F(MPX) : 0;
 	unsigned f_xsaves = kvm_x86_ops->xsaves_supported() ? F(XSAVES) : 0;
 
 	/* cpuid 1.edx */
diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
index c8eda14..66a6581 100644
--- a/arch/x86/kvm/cpuid.h
+++ b/arch/x86/kvm/cpuid.h
@@ -5,6 +5,7 @@
 #include <asm/cpu.h>
 
 int kvm_update_cpuid(struct kvm_vcpu *vcpu);
+bool kvm_mpx_supported(void);
 struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
 					      u32 function, u32 index);
 int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
@@ -135,14 +136,6 @@
 	return best && (best->ebx & bit(X86_FEATURE_RTM));
 }
 
-static inline bool guest_cpuid_has_mpx(struct kvm_vcpu *vcpu)
-{
-	struct kvm_cpuid_entry2 *best;
-
-	best = kvm_find_cpuid_entry(vcpu, 7, 0);
-	return best && (best->ebx & bit(X86_FEATURE_MPX));
-}
-
 static inline bool guest_cpuid_has_pcommit(struct kvm_vcpu *vcpu)
 {
 	struct kvm_cpuid_entry2 *best;
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index c58ba67..5ff3485 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -1043,6 +1043,27 @@
 	return kvm->arch.hyperv.hv_hypercall & HV_X64_MSR_HYPERCALL_ENABLE;
 }
 
+static void kvm_hv_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result)
+{
+	bool longmode;
+
+	longmode = is_64_bit_mode(vcpu);
+	if (longmode)
+		kvm_register_write(vcpu, VCPU_REGS_RAX, result);
+	else {
+		kvm_register_write(vcpu, VCPU_REGS_RDX, result >> 32);
+		kvm_register_write(vcpu, VCPU_REGS_RAX, result & 0xffffffff);
+	}
+}
+
+static int kvm_hv_hypercall_complete_userspace(struct kvm_vcpu *vcpu)
+{
+	struct kvm_run *run = vcpu->run;
+
+	kvm_hv_hypercall_set_result(vcpu, run->hyperv.u.hcall.result);
+	return 1;
+}
+
 int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
 {
 	u64 param, ingpa, outgpa, ret;
@@ -1055,7 +1076,7 @@
 	 */
 	if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) {
 		kvm_queue_exception(vcpu, UD_VECTOR);
-		return 0;
+		return 1;
 	}
 
 	longmode = is_64_bit_mode(vcpu);
@@ -1083,22 +1104,33 @@
 
 	trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa);
 
+	/* Hypercall continuation is not supported yet */
+	if (rep_cnt || rep_idx) {
+		res = HV_STATUS_INVALID_HYPERCALL_CODE;
+		goto set_result;
+	}
+
 	switch (code) {
-	case HV_X64_HV_NOTIFY_LONG_SPIN_WAIT:
+	case HVCALL_NOTIFY_LONG_SPIN_WAIT:
 		kvm_vcpu_on_spin(vcpu);
 		break;
+	case HVCALL_POST_MESSAGE:
+	case HVCALL_SIGNAL_EVENT:
+		vcpu->run->exit_reason = KVM_EXIT_HYPERV;
+		vcpu->run->hyperv.type = KVM_EXIT_HYPERV_HCALL;
+		vcpu->run->hyperv.u.hcall.input = param;
+		vcpu->run->hyperv.u.hcall.params[0] = ingpa;
+		vcpu->run->hyperv.u.hcall.params[1] = outgpa;
+		vcpu->arch.complete_userspace_io =
+				kvm_hv_hypercall_complete_userspace;
+		return 0;
 	default:
 		res = HV_STATUS_INVALID_HYPERCALL_CODE;
 		break;
 	}
 
+set_result:
 	ret = res | (((u64)rep_done & 0xfff) << 32);
-	if (longmode) {
-		kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
-	} else {
-		kvm_register_write(vcpu, VCPU_REGS_RDX, ret >> 32);
-		kvm_register_write(vcpu, VCPU_REGS_RAX, ret & 0xffffffff);
-	}
-
+	kvm_hv_hypercall_set_result(vcpu, ret);
 	return 1;
 }
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
index b0ea42b..a4bf5b4 100644
--- a/arch/x86/kvm/i8254.c
+++ b/arch/x86/kvm/i8254.c
@@ -51,32 +51,9 @@
 #define RW_STATE_WORD0 3
 #define RW_STATE_WORD1 4
 
-/* Compute with 96 bit intermediate result: (a*b)/c */
-static u64 muldiv64(u64 a, u32 b, u32 c)
+static void pit_set_gate(struct kvm_pit *pit, int channel, u32 val)
 {
-	union {
-		u64 ll;
-		struct {
-			u32 low, high;
-		} l;
-	} u, res;
-	u64 rl, rh;
-
-	u.ll = a;
-	rl = (u64)u.l.low * (u64)b;
-	rh = (u64)u.l.high * (u64)b;
-	rh += (rl >> 32);
-	res.l.high = div64_u64(rh, c);
-	res.l.low = div64_u64(((mod_64(rh, c) << 32) + (rl & 0xffffffff)), c);
-	return res.ll;
-}
-
-static void pit_set_gate(struct kvm *kvm, int channel, u32 val)
-{
-	struct kvm_kpit_channel_state *c =
-		&kvm->arch.vpit->pit_state.channels[channel];
-
-	WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock));
+	struct kvm_kpit_channel_state *c = &pit->pit_state.channels[channel];
 
 	switch (c->mode) {
 	default:
@@ -97,18 +74,16 @@
 	c->gate = val;
 }
 
-static int pit_get_gate(struct kvm *kvm, int channel)
+static int pit_get_gate(struct kvm_pit *pit, int channel)
 {
-	WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock));
-
-	return kvm->arch.vpit->pit_state.channels[channel].gate;
+	return pit->pit_state.channels[channel].gate;
 }
 
-static s64 __kpit_elapsed(struct kvm *kvm)
+static s64 __kpit_elapsed(struct kvm_pit *pit)
 {
 	s64 elapsed;
 	ktime_t remaining;
-	struct kvm_kpit_state *ps = &kvm->arch.vpit->pit_state;
+	struct kvm_kpit_state *ps = &pit->pit_state;
 
 	if (!ps->period)
 		return 0;
@@ -128,26 +103,23 @@
 	return elapsed;
 }
 
-static s64 kpit_elapsed(struct kvm *kvm, struct kvm_kpit_channel_state *c,
+static s64 kpit_elapsed(struct kvm_pit *pit, struct kvm_kpit_channel_state *c,
 			int channel)
 {
 	if (channel == 0)
-		return __kpit_elapsed(kvm);
+		return __kpit_elapsed(pit);
 
 	return ktime_to_ns(ktime_sub(ktime_get(), c->count_load_time));
 }
 
-static int pit_get_count(struct kvm *kvm, int channel)
+static int pit_get_count(struct kvm_pit *pit, int channel)
 {
-	struct kvm_kpit_channel_state *c =
-		&kvm->arch.vpit->pit_state.channels[channel];
+	struct kvm_kpit_channel_state *c = &pit->pit_state.channels[channel];
 	s64 d, t;
 	int counter;
 
-	WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock));
-
-	t = kpit_elapsed(kvm, c, channel);
-	d = muldiv64(t, KVM_PIT_FREQ, NSEC_PER_SEC);
+	t = kpit_elapsed(pit, c, channel);
+	d = mul_u64_u32_div(t, KVM_PIT_FREQ, NSEC_PER_SEC);
 
 	switch (c->mode) {
 	case 0:
@@ -167,17 +139,14 @@
 	return counter;
 }
 
-static int pit_get_out(struct kvm *kvm, int channel)
+static int pit_get_out(struct kvm_pit *pit, int channel)
 {
-	struct kvm_kpit_channel_state *c =
-		&kvm->arch.vpit->pit_state.channels[channel];
+	struct kvm_kpit_channel_state *c = &pit->pit_state.channels[channel];
 	s64 d, t;
 	int out;
 
-	WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock));
-
-	t = kpit_elapsed(kvm, c, channel);
-	d = muldiv64(t, KVM_PIT_FREQ, NSEC_PER_SEC);
+	t = kpit_elapsed(pit, c, channel);
+	d = mul_u64_u32_div(t, KVM_PIT_FREQ, NSEC_PER_SEC);
 
 	switch (c->mode) {
 	default:
@@ -202,29 +171,23 @@
 	return out;
 }
 
-static void pit_latch_count(struct kvm *kvm, int channel)
+static void pit_latch_count(struct kvm_pit *pit, int channel)
 {
-	struct kvm_kpit_channel_state *c =
-		&kvm->arch.vpit->pit_state.channels[channel];
-
-	WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock));
+	struct kvm_kpit_channel_state *c = &pit->pit_state.channels[channel];
 
 	if (!c->count_latched) {
-		c->latched_count = pit_get_count(kvm, channel);
+		c->latched_count = pit_get_count(pit, channel);
 		c->count_latched = c->rw_mode;
 	}
 }
 
-static void pit_latch_status(struct kvm *kvm, int channel)
+static void pit_latch_status(struct kvm_pit *pit, int channel)
 {
-	struct kvm_kpit_channel_state *c =
-		&kvm->arch.vpit->pit_state.channels[channel];
-
-	WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock));
+	struct kvm_kpit_channel_state *c = &pit->pit_state.channels[channel];
 
 	if (!c->status_latched) {
 		/* TODO: Return NULL COUNT (bit 6). */
-		c->status = ((pit_get_out(kvm, channel) << 7) |
+		c->status = ((pit_get_out(pit, channel) << 7) |
 				(c->rw_mode << 4) |
 				(c->mode << 1) |
 				c->bcd);
@@ -232,26 +195,24 @@
 	}
 }
 
+static inline struct kvm_pit *pit_state_to_pit(struct kvm_kpit_state *ps)
+{
+	return container_of(ps, struct kvm_pit, pit_state);
+}
+
 static void kvm_pit_ack_irq(struct kvm_irq_ack_notifier *kian)
 {
 	struct kvm_kpit_state *ps = container_of(kian, struct kvm_kpit_state,
 						 irq_ack_notifier);
-	int value;
+	struct kvm_pit *pit = pit_state_to_pit(ps);
 
-	spin_lock(&ps->inject_lock);
-	value = atomic_dec_return(&ps->pending);
-	if (value < 0)
-		/* spurious acks can be generated if, for example, the
-		 * PIC is being reset.  Handle it gracefully here
-		 */
-		atomic_inc(&ps->pending);
-	else if (value > 0)
-		/* in this case, we had multiple outstanding pit interrupts
-		 * that we needed to inject.  Reinject
-		 */
-		queue_kthread_work(&ps->pit->worker, &ps->pit->expired);
-	ps->irq_ack = 1;
-	spin_unlock(&ps->inject_lock);
+	atomic_set(&ps->irq_ack, 1);
+	/* irq_ack should be set before pending is read.  Order accesses with
+	 * inc(pending) in pit_timer_fn and xchg(irq_ack, 0) in pit_do_work.
+	 */
+	smp_mb();
+	if (atomic_dec_if_positive(&ps->pending) > 0)
+		queue_kthread_work(&pit->worker, &pit->expired);
 }
 
 void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu)
@@ -282,45 +243,36 @@
 	struct kvm_vcpu *vcpu;
 	int i;
 	struct kvm_kpit_state *ps = &pit->pit_state;
-	int inject = 0;
 
-	/* Try to inject pending interrupts when
-	 * last one has been acked.
+	if (atomic_read(&ps->reinject) && !atomic_xchg(&ps->irq_ack, 0))
+		return;
+
+	kvm_set_irq(kvm, pit->irq_source_id, 0, 1, false);
+	kvm_set_irq(kvm, pit->irq_source_id, 0, 0, false);
+
+	/*
+	 * Provides NMI watchdog support via Virtual Wire mode.
+	 * The route is: PIT -> LVT0 in NMI mode.
+	 *
+	 * Note: Our Virtual Wire implementation does not follow
+	 * the MP specification.  We propagate a PIT interrupt to all
+	 * VCPUs and only when LVT0 is in NMI mode.  The interrupt can
+	 * also be simultaneously delivered through PIC and IOAPIC.
 	 */
-	spin_lock(&ps->inject_lock);
-	if (ps->irq_ack) {
-		ps->irq_ack = 0;
-		inject = 1;
-	}
-	spin_unlock(&ps->inject_lock);
-	if (inject) {
-		kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 1, false);
-		kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 0, false);
-
-		/*
-		 * Provides NMI watchdog support via Virtual Wire mode.
-		 * The route is: PIT -> PIC -> LVT0 in NMI mode.
-		 *
-		 * Note: Our Virtual Wire implementation is simplified, only
-		 * propagating PIT interrupts to all VCPUs when they have set
-		 * LVT0 to NMI delivery. Other PIC interrupts are just sent to
-		 * VCPU0, and only if its LVT0 is in EXTINT mode.
-		 */
-		if (atomic_read(&kvm->arch.vapics_in_nmi_mode) > 0)
-			kvm_for_each_vcpu(i, vcpu, kvm)
-				kvm_apic_nmi_wd_deliver(vcpu);
-	}
+	if (atomic_read(&kvm->arch.vapics_in_nmi_mode) > 0)
+		kvm_for_each_vcpu(i, vcpu, kvm)
+			kvm_apic_nmi_wd_deliver(vcpu);
 }
 
 static enum hrtimer_restart pit_timer_fn(struct hrtimer *data)
 {
 	struct kvm_kpit_state *ps = container_of(data, struct kvm_kpit_state, timer);
-	struct kvm_pit *pt = ps->kvm->arch.vpit;
+	struct kvm_pit *pt = pit_state_to_pit(ps);
 
-	if (ps->reinject || !atomic_read(&ps->pending)) {
+	if (atomic_read(&ps->reinject))
 		atomic_inc(&ps->pending);
-		queue_kthread_work(&pt->worker, &pt->expired);
-	}
+
+	queue_kthread_work(&pt->worker, &pt->expired);
 
 	if (ps->is_periodic) {
 		hrtimer_add_expires_ns(&ps->timer, ps->period);
@@ -329,30 +281,54 @@
 		return HRTIMER_NORESTART;
 }
 
-static void create_pit_timer(struct kvm *kvm, u32 val, int is_period)
+static inline void kvm_pit_reset_reinject(struct kvm_pit *pit)
 {
-	struct kvm_kpit_state *ps = &kvm->arch.vpit->pit_state;
+	atomic_set(&pit->pit_state.pending, 0);
+	atomic_set(&pit->pit_state.irq_ack, 1);
+}
+
+void kvm_pit_set_reinject(struct kvm_pit *pit, bool reinject)
+{
+	struct kvm_kpit_state *ps = &pit->pit_state;
+	struct kvm *kvm = pit->kvm;
+
+	if (atomic_read(&ps->reinject) == reinject)
+		return;
+
+	if (reinject) {
+		/* The initial state is preserved while ps->reinject == 0. */
+		kvm_pit_reset_reinject(pit);
+		kvm_register_irq_ack_notifier(kvm, &ps->irq_ack_notifier);
+		kvm_register_irq_mask_notifier(kvm, 0, &pit->mask_notifier);
+	} else {
+		kvm_unregister_irq_ack_notifier(kvm, &ps->irq_ack_notifier);
+		kvm_unregister_irq_mask_notifier(kvm, 0, &pit->mask_notifier);
+	}
+
+	atomic_set(&ps->reinject, reinject);
+}
+
+static void create_pit_timer(struct kvm_pit *pit, u32 val, int is_period)
+{
+	struct kvm_kpit_state *ps = &pit->pit_state;
+	struct kvm *kvm = pit->kvm;
 	s64 interval;
 
 	if (!ioapic_in_kernel(kvm) ||
 	    ps->flags & KVM_PIT_FLAGS_HPET_LEGACY)
 		return;
 
-	interval = muldiv64(val, NSEC_PER_SEC, KVM_PIT_FREQ);
+	interval = mul_u64_u32_div(val, NSEC_PER_SEC, KVM_PIT_FREQ);
 
 	pr_debug("create pit timer, interval is %llu nsec\n", interval);
 
 	/* TODO The new value only affected after the retriggered */
 	hrtimer_cancel(&ps->timer);
-	flush_kthread_work(&ps->pit->expired);
+	flush_kthread_work(&pit->expired);
 	ps->period = interval;
 	ps->is_periodic = is_period;
 
-	ps->timer.function = pit_timer_fn;
-	ps->kvm = ps->pit->kvm;
-
-	atomic_set(&ps->pending, 0);
-	ps->irq_ack = 1;
+	kvm_pit_reset_reinject(pit);
 
 	/*
 	 * Do not allow the guest to program periodic timers with small
@@ -375,11 +351,9 @@
 		      HRTIMER_MODE_ABS);
 }
 
-static void pit_load_count(struct kvm *kvm, int channel, u32 val)
+static void pit_load_count(struct kvm_pit *pit, int channel, u32 val)
 {
-	struct kvm_kpit_state *ps = &kvm->arch.vpit->pit_state;
-
-	WARN_ON(!mutex_is_locked(&ps->lock));
+	struct kvm_kpit_state *ps = &pit->pit_state;
 
 	pr_debug("load_count val is %d, channel is %d\n", val, channel);
 
@@ -404,29 +378,33 @@
 	case 1:
         /* FIXME: enhance mode 4 precision */
 	case 4:
-		create_pit_timer(kvm, val, 0);
+		create_pit_timer(pit, val, 0);
 		break;
 	case 2:
 	case 3:
-		create_pit_timer(kvm, val, 1);
+		create_pit_timer(pit, val, 1);
 		break;
 	default:
-		destroy_pit_timer(kvm->arch.vpit);
+		destroy_pit_timer(pit);
 	}
 }
 
-void kvm_pit_load_count(struct kvm *kvm, int channel, u32 val, int hpet_legacy_start)
+void kvm_pit_load_count(struct kvm_pit *pit, int channel, u32 val,
+		int hpet_legacy_start)
 {
 	u8 saved_mode;
+
+	WARN_ON_ONCE(!mutex_is_locked(&pit->pit_state.lock));
+
 	if (hpet_legacy_start) {
 		/* save existing mode for later reenablement */
 		WARN_ON(channel != 0);
-		saved_mode = kvm->arch.vpit->pit_state.channels[0].mode;
-		kvm->arch.vpit->pit_state.channels[0].mode = 0xff; /* disable timer */
-		pit_load_count(kvm, channel, val);
-		kvm->arch.vpit->pit_state.channels[0].mode = saved_mode;
+		saved_mode = pit->pit_state.channels[0].mode;
+		pit->pit_state.channels[0].mode = 0xff; /* disable timer */
+		pit_load_count(pit, channel, val);
+		pit->pit_state.channels[0].mode = saved_mode;
 	} else {
-		pit_load_count(kvm, channel, val);
+		pit_load_count(pit, channel, val);
 	}
 }
 
@@ -452,7 +430,6 @@
 {
 	struct kvm_pit *pit = dev_to_pit(this);
 	struct kvm_kpit_state *pit_state = &pit->pit_state;
-	struct kvm *kvm = pit->kvm;
 	int channel, access;
 	struct kvm_kpit_channel_state *s;
 	u32 val = *(u32 *) data;
@@ -476,9 +453,9 @@
 				s = &pit_state->channels[channel];
 				if (val & (2 << channel)) {
 					if (!(val & 0x20))
-						pit_latch_count(kvm, channel);
+						pit_latch_count(pit, channel);
 					if (!(val & 0x10))
-						pit_latch_status(kvm, channel);
+						pit_latch_status(pit, channel);
 				}
 			}
 		} else {
@@ -486,7 +463,7 @@
 			s = &pit_state->channels[channel];
 			access = (val >> 4) & KVM_PIT_CHANNEL_MASK;
 			if (access == 0) {
-				pit_latch_count(kvm, channel);
+				pit_latch_count(pit, channel);
 			} else {
 				s->rw_mode = access;
 				s->read_state = access;
@@ -503,17 +480,17 @@
 		switch (s->write_state) {
 		default:
 		case RW_STATE_LSB:
-			pit_load_count(kvm, addr, val);
+			pit_load_count(pit, addr, val);
 			break;
 		case RW_STATE_MSB:
-			pit_load_count(kvm, addr, val << 8);
+			pit_load_count(pit, addr, val << 8);
 			break;
 		case RW_STATE_WORD0:
 			s->write_latch = val;
 			s->write_state = RW_STATE_WORD1;
 			break;
 		case RW_STATE_WORD1:
-			pit_load_count(kvm, addr, s->write_latch | (val << 8));
+			pit_load_count(pit, addr, s->write_latch | (val << 8));
 			s->write_state = RW_STATE_WORD0;
 			break;
 		}
@@ -529,7 +506,6 @@
 {
 	struct kvm_pit *pit = dev_to_pit(this);
 	struct kvm_kpit_state *pit_state = &pit->pit_state;
-	struct kvm *kvm = pit->kvm;
 	int ret, count;
 	struct kvm_kpit_channel_state *s;
 	if (!pit_in_range(addr))
@@ -566,20 +542,20 @@
 		switch (s->read_state) {
 		default:
 		case RW_STATE_LSB:
-			count = pit_get_count(kvm, addr);
+			count = pit_get_count(pit, addr);
 			ret = count & 0xff;
 			break;
 		case RW_STATE_MSB:
-			count = pit_get_count(kvm, addr);
+			count = pit_get_count(pit, addr);
 			ret = (count >> 8) & 0xff;
 			break;
 		case RW_STATE_WORD0:
-			count = pit_get_count(kvm, addr);
+			count = pit_get_count(pit, addr);
 			ret = count & 0xff;
 			s->read_state = RW_STATE_WORD1;
 			break;
 		case RW_STATE_WORD1:
-			count = pit_get_count(kvm, addr);
+			count = pit_get_count(pit, addr);
 			ret = (count >> 8) & 0xff;
 			s->read_state = RW_STATE_WORD0;
 			break;
@@ -600,14 +576,13 @@
 {
 	struct kvm_pit *pit = speaker_to_pit(this);
 	struct kvm_kpit_state *pit_state = &pit->pit_state;
-	struct kvm *kvm = pit->kvm;
 	u32 val = *(u32 *) data;
 	if (addr != KVM_SPEAKER_BASE_ADDRESS)
 		return -EOPNOTSUPP;
 
 	mutex_lock(&pit_state->lock);
 	pit_state->speaker_data_on = (val >> 1) & 1;
-	pit_set_gate(kvm, 2, val & 1);
+	pit_set_gate(pit, 2, val & 1);
 	mutex_unlock(&pit_state->lock);
 	return 0;
 }
@@ -618,7 +593,6 @@
 {
 	struct kvm_pit *pit = speaker_to_pit(this);
 	struct kvm_kpit_state *pit_state = &pit->pit_state;
-	struct kvm *kvm = pit->kvm;
 	unsigned int refresh_clock;
 	int ret;
 	if (addr != KVM_SPEAKER_BASE_ADDRESS)
@@ -628,8 +602,8 @@
 	refresh_clock = ((unsigned int)ktime_to_ns(ktime_get()) >> 14) & 1;
 
 	mutex_lock(&pit_state->lock);
-	ret = ((pit_state->speaker_data_on << 1) | pit_get_gate(kvm, 2) |
-		(pit_get_out(kvm, 2) << 5) | (refresh_clock << 4));
+	ret = ((pit_state->speaker_data_on << 1) | pit_get_gate(pit, 2) |
+		(pit_get_out(pit, 2) << 5) | (refresh_clock << 4));
 	if (len > sizeof(ret))
 		len = sizeof(ret);
 	memcpy(data, (char *)&ret, len);
@@ -637,33 +611,28 @@
 	return 0;
 }
 
-void kvm_pit_reset(struct kvm_pit *pit)
+static void kvm_pit_reset(struct kvm_pit *pit)
 {
 	int i;
 	struct kvm_kpit_channel_state *c;
 
-	mutex_lock(&pit->pit_state.lock);
 	pit->pit_state.flags = 0;
 	for (i = 0; i < 3; i++) {
 		c = &pit->pit_state.channels[i];
 		c->mode = 0xff;
 		c->gate = (i != 2);
-		pit_load_count(pit->kvm, i, 0);
+		pit_load_count(pit, i, 0);
 	}
-	mutex_unlock(&pit->pit_state.lock);
 
-	atomic_set(&pit->pit_state.pending, 0);
-	pit->pit_state.irq_ack = 1;
+	kvm_pit_reset_reinject(pit);
 }
 
 static void pit_mask_notifer(struct kvm_irq_mask_notifier *kimn, bool mask)
 {
 	struct kvm_pit *pit = container_of(kimn, struct kvm_pit, mask_notifier);
 
-	if (!mask) {
-		atomic_set(&pit->pit_state.pending, 0);
-		pit->pit_state.irq_ack = 1;
-	}
+	if (!mask)
+		kvm_pit_reset_reinject(pit);
 }
 
 static const struct kvm_io_device_ops pit_dev_ops = {
@@ -690,14 +659,10 @@
 		return NULL;
 
 	pit->irq_source_id = kvm_request_irq_source_id(kvm);
-	if (pit->irq_source_id < 0) {
-		kfree(pit);
-		return NULL;
-	}
+	if (pit->irq_source_id < 0)
+		goto fail_request;
 
 	mutex_init(&pit->pit_state.lock);
-	mutex_lock(&pit->pit_state.lock);
-	spin_lock_init(&pit->pit_state.inject_lock);
 
 	pid = get_pid(task_tgid(current));
 	pid_nr = pid_vnr(pid);
@@ -706,36 +671,30 @@
 	init_kthread_worker(&pit->worker);
 	pit->worker_task = kthread_run(kthread_worker_fn, &pit->worker,
 				       "kvm-pit/%d", pid_nr);
-	if (IS_ERR(pit->worker_task)) {
-		mutex_unlock(&pit->pit_state.lock);
-		kvm_free_irq_source_id(kvm, pit->irq_source_id);
-		kfree(pit);
-		return NULL;
-	}
+	if (IS_ERR(pit->worker_task))
+		goto fail_kthread;
+
 	init_kthread_work(&pit->expired, pit_do_work);
 
-	kvm->arch.vpit = pit;
 	pit->kvm = kvm;
 
 	pit_state = &pit->pit_state;
-	pit_state->pit = pit;
 	hrtimer_init(&pit_state->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+	pit_state->timer.function = pit_timer_fn;
+
 	pit_state->irq_ack_notifier.gsi = 0;
 	pit_state->irq_ack_notifier.irq_acked = kvm_pit_ack_irq;
-	kvm_register_irq_ack_notifier(kvm, &pit_state->irq_ack_notifier);
-	pit_state->reinject = true;
-	mutex_unlock(&pit->pit_state.lock);
+	pit->mask_notifier.func = pit_mask_notifer;
 
 	kvm_pit_reset(pit);
 
-	pit->mask_notifier.func = pit_mask_notifer;
-	kvm_register_irq_mask_notifier(kvm, 0, &pit->mask_notifier);
+	kvm_pit_set_reinject(pit, true);
 
 	kvm_iodevice_init(&pit->dev, &pit_dev_ops);
 	ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS, KVM_PIT_BASE_ADDRESS,
 				      KVM_PIT_MEM_LENGTH, &pit->dev);
 	if (ret < 0)
-		goto fail;
+		goto fail_register_pit;
 
 	if (flags & KVM_PIT_SPEAKER_DUMMY) {
 		kvm_iodevice_init(&pit->speaker_dev, &speaker_dev_ops);
@@ -743,42 +702,35 @@
 					      KVM_SPEAKER_BASE_ADDRESS, 4,
 					      &pit->speaker_dev);
 		if (ret < 0)
-			goto fail_unregister;
+			goto fail_register_speaker;
 	}
 
 	return pit;
 
-fail_unregister:
+fail_register_speaker:
 	kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &pit->dev);
-
-fail:
-	kvm_unregister_irq_mask_notifier(kvm, 0, &pit->mask_notifier);
-	kvm_unregister_irq_ack_notifier(kvm, &pit_state->irq_ack_notifier);
-	kvm_free_irq_source_id(kvm, pit->irq_source_id);
+fail_register_pit:
+	kvm_pit_set_reinject(pit, false);
 	kthread_stop(pit->worker_task);
+fail_kthread:
+	kvm_free_irq_source_id(kvm, pit->irq_source_id);
+fail_request:
 	kfree(pit);
 	return NULL;
 }
 
 void kvm_free_pit(struct kvm *kvm)
 {
-	struct hrtimer *timer;
+	struct kvm_pit *pit = kvm->arch.vpit;
 
-	if (kvm->arch.vpit) {
-		kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &kvm->arch.vpit->dev);
-		kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS,
-					      &kvm->arch.vpit->speaker_dev);
-		kvm_unregister_irq_mask_notifier(kvm, 0,
-					       &kvm->arch.vpit->mask_notifier);
-		kvm_unregister_irq_ack_notifier(kvm,
-				&kvm->arch.vpit->pit_state.irq_ack_notifier);
-		mutex_lock(&kvm->arch.vpit->pit_state.lock);
-		timer = &kvm->arch.vpit->pit_state.timer;
-		hrtimer_cancel(timer);
-		flush_kthread_work(&kvm->arch.vpit->expired);
-		kthread_stop(kvm->arch.vpit->worker_task);
-		kvm_free_irq_source_id(kvm, kvm->arch.vpit->irq_source_id);
-		mutex_unlock(&kvm->arch.vpit->pit_state.lock);
-		kfree(kvm->arch.vpit);
+	if (pit) {
+		kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &pit->dev);
+		kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &pit->speaker_dev);
+		kvm_pit_set_reinject(pit, false);
+		hrtimer_cancel(&pit->pit_state.timer);
+		flush_kthread_work(&pit->expired);
+		kthread_stop(pit->worker_task);
+		kvm_free_irq_source_id(kvm, pit->irq_source_id);
+		kfree(pit);
 	}
 }
diff --git a/arch/x86/kvm/i8254.h b/arch/x86/kvm/i8254.h
index c84990b..2f5af07 100644
--- a/arch/x86/kvm/i8254.h
+++ b/arch/x86/kvm/i8254.h
@@ -22,19 +22,18 @@
 };
 
 struct kvm_kpit_state {
+	/* All members before "struct mutex lock" are protected by the lock. */
 	struct kvm_kpit_channel_state channels[3];
 	u32 flags;
 	bool is_periodic;
 	s64 period; 				/* unit: ns */
 	struct hrtimer timer;
-	atomic_t pending;			/* accumulated triggered timers */
-	bool reinject;
-	struct kvm *kvm;
 	u32    speaker_data_on;
+
 	struct mutex lock;
-	struct kvm_pit *pit;
-	spinlock_t inject_lock;
-	unsigned long irq_ack;
+	atomic_t reinject;
+	atomic_t pending; /* accumulated triggered timers */
+	atomic_t irq_ack;
 	struct kvm_irq_ack_notifier irq_ack_notifier;
 };
 
@@ -57,9 +56,11 @@
 #define KVM_MAX_PIT_INTR_INTERVAL   HZ / 100
 #define KVM_PIT_CHANNEL_MASK	    0x3
 
-void kvm_pit_load_count(struct kvm *kvm, int channel, u32 val, int hpet_legacy_start);
 struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags);
 void kvm_free_pit(struct kvm *kvm);
-void kvm_pit_reset(struct kvm_pit *pit);
+
+void kvm_pit_load_count(struct kvm_pit *pit, int channel, u32 val,
+		int hpet_legacy_start);
+void kvm_pit_set_reinject(struct kvm_pit *pit, bool reinject);
 
 #endif
diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c
index 1facfd6..9db47090 100644
--- a/arch/x86/kvm/ioapic.c
+++ b/arch/x86/kvm/ioapic.c
@@ -94,7 +94,7 @@
 static void rtc_irq_eoi_tracking_reset(struct kvm_ioapic *ioapic)
 {
 	ioapic->rtc_status.pending_eoi = 0;
-	bitmap_zero(ioapic->rtc_status.dest_map, KVM_MAX_VCPUS);
+	bitmap_zero(ioapic->rtc_status.dest_map.map, KVM_MAX_VCPUS);
 }
 
 static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic);
@@ -117,16 +117,16 @@
 		return;
 
 	new_val = kvm_apic_pending_eoi(vcpu, e->fields.vector);
-	old_val = test_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map);
+	old_val = test_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map.map);
 
 	if (new_val == old_val)
 		return;
 
 	if (new_val) {
-		__set_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map);
+		__set_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map.map);
 		ioapic->rtc_status.pending_eoi++;
 	} else {
-		__clear_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map);
+		__clear_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map.map);
 		ioapic->rtc_status.pending_eoi--;
 		rtc_status_pending_eoi_check_valid(ioapic);
 	}
@@ -156,7 +156,8 @@
 
 static void rtc_irq_eoi(struct kvm_ioapic *ioapic, struct kvm_vcpu *vcpu)
 {
-	if (test_and_clear_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map)) {
+	if (test_and_clear_bit(vcpu->vcpu_id,
+			       ioapic->rtc_status.dest_map.map)) {
 		--ioapic->rtc_status.pending_eoi;
 		rtc_status_pending_eoi_check_valid(ioapic);
 	}
@@ -236,10 +237,17 @@
 void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu, ulong *ioapic_handled_vectors)
 {
 	struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
+	struct dest_map *dest_map = &ioapic->rtc_status.dest_map;
 	union kvm_ioapic_redirect_entry *e;
 	int index;
 
 	spin_lock(&ioapic->lock);
+
+	/* Make sure we see any missing RTC EOI */
+	if (test_bit(vcpu->vcpu_id, dest_map->map))
+		__set_bit(dest_map->vectors[vcpu->vcpu_id],
+			  ioapic_handled_vectors);
+
 	for (index = 0; index < IOAPIC_NUM_PINS; index++) {
 		e = &ioapic->redirtbl[index];
 		if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG ||
@@ -346,7 +354,7 @@
 		 */
 		BUG_ON(ioapic->rtc_status.pending_eoi != 0);
 		ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe,
-				ioapic->rtc_status.dest_map);
+					       &ioapic->rtc_status.dest_map);
 		ioapic->rtc_status.pending_eoi = (ret < 0 ? 0 : ret);
 	} else
 		ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe, NULL);
@@ -407,8 +415,14 @@
 static void __kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu,
 			struct kvm_ioapic *ioapic, int vector, int trigger_mode)
 {
-	int i;
+	struct dest_map *dest_map = &ioapic->rtc_status.dest_map;
 	struct kvm_lapic *apic = vcpu->arch.apic;
+	int i;
+
+	/* RTC special handling */
+	if (test_bit(vcpu->vcpu_id, dest_map->map) &&
+	    vector == dest_map->vectors[vcpu->vcpu_id])
+		rtc_irq_eoi(ioapic, vcpu);
 
 	for (i = 0; i < IOAPIC_NUM_PINS; i++) {
 		union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i];
@@ -416,8 +430,6 @@
 		if (ent->fields.vector != vector)
 			continue;
 
-		if (i == RTC_GSI)
-			rtc_irq_eoi(ioapic, vcpu);
 		/*
 		 * We are dropping lock while calling ack notifiers because ack
 		 * notifier callbacks for assigned devices call into IOAPIC
diff --git a/arch/x86/kvm/ioapic.h b/arch/x86/kvm/ioapic.h
index 2d16dc2..7d2692a 100644
--- a/arch/x86/kvm/ioapic.h
+++ b/arch/x86/kvm/ioapic.h
@@ -40,9 +40,21 @@
 #define RTC_GSI -1U
 #endif
 
+struct dest_map {
+	/* vcpu bitmap where IRQ has been sent */
+	DECLARE_BITMAP(map, KVM_MAX_VCPUS);
+
+	/*
+	 * Vector sent to a given vcpu, only valid when
+	 * the vcpu's bit in map is set
+	 */
+	u8 vectors[KVM_MAX_VCPUS];
+};
+
+
 struct rtc_status {
 	int pending_eoi;
-	DECLARE_BITMAP(dest_map, KVM_MAX_VCPUS);
+	struct dest_map dest_map;
 };
 
 union kvm_ioapic_redirect_entry {
@@ -118,7 +130,8 @@
 		       int level, bool line_status);
 void kvm_ioapic_clear_all(struct kvm_ioapic *ioapic, int irq_source_id);
 int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
-		struct kvm_lapic_irq *irq, unsigned long *dest_map);
+			     struct kvm_lapic_irq *irq,
+			     struct dest_map *dest_map);
 int kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state);
 int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state);
 void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu,
diff --git a/arch/x86/kvm/irq.c b/arch/x86/kvm/irq.c
index 3982b47..95fcc7b 100644
--- a/arch/x86/kvm/irq.c
+++ b/arch/x86/kvm/irq.c
@@ -33,7 +33,10 @@
  */
 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
 {
-	return apic_has_pending_timer(vcpu);
+	if (lapic_in_kernel(vcpu))
+		return apic_has_pending_timer(vcpu);
+
+	return 0;
 }
 EXPORT_SYMBOL(kvm_cpu_has_pending_timer);
 
@@ -137,8 +140,8 @@
 
 void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu)
 {
-	kvm_inject_apic_timer_irqs(vcpu);
-	/* TODO: PIT, RTC etc. */
+	if (lapic_in_kernel(vcpu))
+		kvm_inject_apic_timer_irqs(vcpu);
 }
 EXPORT_SYMBOL_GPL(kvm_inject_pending_timer_irqs);
 
diff --git a/arch/x86/kvm/irq.h b/arch/x86/kvm/irq.h
index ae5c78f..61ebdc1 100644
--- a/arch/x86/kvm/irq.h
+++ b/arch/x86/kvm/irq.h
@@ -109,14 +109,6 @@
 	return ret;
 }
 
-static inline int lapic_in_kernel(struct kvm_vcpu *vcpu)
-{
-	/* Same as irqchip_in_kernel(vcpu->kvm), but with less
-	 * pointer chasing and no unnecessary memory barriers.
-	 */
-	return vcpu->arch.apic != NULL;
-}
-
 void kvm_pic_reset(struct kvm_kpic_state *s);
 
 void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/kvm/irq_comm.c b/arch/x86/kvm/irq_comm.c
index 8fc89ef..54ead79 100644
--- a/arch/x86/kvm/irq_comm.c
+++ b/arch/x86/kvm/irq_comm.c
@@ -34,6 +34,7 @@
 #include "lapic.h"
 
 #include "hyperv.h"
+#include "x86.h"
 
 static int kvm_set_pic_irq(struct kvm_kernel_irq_routing_entry *e,
 			   struct kvm *kvm, int irq_source_id, int level,
@@ -53,10 +54,12 @@
 }
 
 int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
-		struct kvm_lapic_irq *irq, unsigned long *dest_map)
+		struct kvm_lapic_irq *irq, struct dest_map *dest_map)
 {
 	int i, r = -1;
 	struct kvm_vcpu *vcpu, *lowest = NULL;
+	unsigned long dest_vcpu_bitmap[BITS_TO_LONGS(KVM_MAX_VCPUS)];
+	unsigned int dest_vcpus = 0;
 
 	if (irq->dest_mode == 0 && irq->dest_id == 0xff &&
 			kvm_lowest_prio_delivery(irq)) {
@@ -67,6 +70,8 @@
 	if (kvm_irq_delivery_to_apic_fast(kvm, src, irq, &r, dest_map))
 		return r;
 
+	memset(dest_vcpu_bitmap, 0, sizeof(dest_vcpu_bitmap));
+
 	kvm_for_each_vcpu(i, vcpu, kvm) {
 		if (!kvm_apic_present(vcpu))
 			continue;
@@ -80,13 +85,25 @@
 				r = 0;
 			r += kvm_apic_set_irq(vcpu, irq, dest_map);
 		} else if (kvm_lapic_enabled(vcpu)) {
-			if (!lowest)
-				lowest = vcpu;
-			else if (kvm_apic_compare_prio(vcpu, lowest) < 0)
-				lowest = vcpu;
+			if (!kvm_vector_hashing_enabled()) {
+				if (!lowest)
+					lowest = vcpu;
+				else if (kvm_apic_compare_prio(vcpu, lowest) < 0)
+					lowest = vcpu;
+			} else {
+				__set_bit(i, dest_vcpu_bitmap);
+				dest_vcpus++;
+			}
 		}
 	}
 
+	if (dest_vcpus != 0) {
+		int idx = kvm_vector_to_index(irq->vector, dest_vcpus,
+					dest_vcpu_bitmap, KVM_MAX_VCPUS);
+
+		lowest = kvm_get_vcpu(kvm, idx);
+	}
+
 	if (lowest)
 		r = kvm_apic_set_irq(lowest, irq, dest_map);
 
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 3a045f3..443d2a5 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -281,7 +281,7 @@
 	struct kvm_cpuid_entry2 *feat;
 	u32 v = APIC_VERSION;
 
-	if (!kvm_vcpu_has_lapic(vcpu))
+	if (!lapic_in_kernel(vcpu))
 		return;
 
 	feat = kvm_find_cpuid_entry(apic->vcpu, 0x1, 0);
@@ -475,26 +475,20 @@
 
 int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu)
 {
-	int highest_irr;
-
 	/* This may race with setting of irr in __apic_accept_irq() and
 	 * value returned may be wrong, but kvm_vcpu_kick() in __apic_accept_irq
 	 * will cause vmexit immediately and the value will be recalculated
 	 * on the next vmentry.
 	 */
-	if (!kvm_vcpu_has_lapic(vcpu))
-		return 0;
-	highest_irr = apic_find_highest_irr(vcpu->arch.apic);
-
-	return highest_irr;
+	return apic_find_highest_irr(vcpu->arch.apic);
 }
 
 static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
 			     int vector, int level, int trig_mode,
-			     unsigned long *dest_map);
+			     struct dest_map *dest_map);
 
 int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq,
-		unsigned long *dest_map)
+		     struct dest_map *dest_map)
 {
 	struct kvm_lapic *apic = vcpu->arch.apic;
 
@@ -675,8 +669,33 @@
 	}
 }
 
+int kvm_vector_to_index(u32 vector, u32 dest_vcpus,
+		       const unsigned long *bitmap, u32 bitmap_size)
+{
+	u32 mod;
+	int i, idx = -1;
+
+	mod = vector % dest_vcpus;
+
+	for (i = 0; i <= mod; i++) {
+		idx = find_next_bit(bitmap, bitmap_size, idx + 1);
+		BUG_ON(idx == bitmap_size);
+	}
+
+	return idx;
+}
+
+static void kvm_apic_disabled_lapic_found(struct kvm *kvm)
+{
+	if (!kvm->arch.disabled_lapic_found) {
+		kvm->arch.disabled_lapic_found = true;
+		printk(KERN_INFO
+		       "Disabled LAPIC found during irq injection\n");
+	}
+}
+
 bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
-		struct kvm_lapic_irq *irq, int *r, unsigned long *dest_map)
+		struct kvm_lapic_irq *irq, int *r, struct dest_map *dest_map)
 {
 	struct kvm_apic_map *map;
 	unsigned long bitmap = 1;
@@ -727,21 +746,42 @@
 
 		dst = map->logical_map[cid];
 
-		if (kvm_lowest_prio_delivery(irq)) {
+		if (!kvm_lowest_prio_delivery(irq))
+			goto set_irq;
+
+		if (!kvm_vector_hashing_enabled()) {
 			int l = -1;
 			for_each_set_bit(i, &bitmap, 16) {
 				if (!dst[i])
 					continue;
 				if (l < 0)
 					l = i;
-				else if (kvm_apic_compare_prio(dst[i]->vcpu, dst[l]->vcpu) < 0)
+				else if (kvm_apic_compare_prio(dst[i]->vcpu,
+							dst[l]->vcpu) < 0)
 					l = i;
 			}
-
 			bitmap = (l >= 0) ? 1 << l : 0;
+		} else {
+			int idx;
+			unsigned int dest_vcpus;
+
+			dest_vcpus = hweight16(bitmap);
+			if (dest_vcpus == 0)
+				goto out;
+
+			idx = kvm_vector_to_index(irq->vector,
+				dest_vcpus, &bitmap, 16);
+
+			if (!dst[idx]) {
+				kvm_apic_disabled_lapic_found(kvm);
+				goto out;
+			}
+
+			bitmap = (idx >= 0) ? 1 << idx : 0;
 		}
 	}
 
+set_irq:
 	for_each_set_bit(i, &bitmap, 16) {
 		if (!dst[i])
 			continue;
@@ -754,6 +794,20 @@
 	return ret;
 }
 
+/*
+ * This routine tries to handler interrupts in posted mode, here is how
+ * it deals with different cases:
+ * - For single-destination interrupts, handle it in posted mode
+ * - Else if vector hashing is enabled and it is a lowest-priority
+ *   interrupt, handle it in posted mode and use the following mechanism
+ *   to find the destinaiton vCPU.
+ *	1. For lowest-priority interrupts, store all the possible
+ *	   destination vCPUs in an array.
+ *	2. Use "guest vector % max number of destination vCPUs" to find
+ *	   the right destination vCPU in the array for the lowest-priority
+ *	   interrupt.
+ * - Otherwise, use remapped mode to inject the interrupt.
+ */
 bool kvm_intr_is_single_vcpu_fast(struct kvm *kvm, struct kvm_lapic_irq *irq,
 			struct kvm_vcpu **dest_vcpu)
 {
@@ -795,16 +849,37 @@
 		if (cid >= ARRAY_SIZE(map->logical_map))
 			goto out;
 
-		for_each_set_bit(i, &bitmap, 16) {
-			dst = map->logical_map[cid][i];
-			if (++r == 2)
+		if (kvm_vector_hashing_enabled() &&
+				kvm_lowest_prio_delivery(irq)) {
+			int idx;
+			unsigned int dest_vcpus;
+
+			dest_vcpus = hweight16(bitmap);
+			if (dest_vcpus == 0)
+				goto out;
+
+			idx = kvm_vector_to_index(irq->vector, dest_vcpus,
+						  &bitmap, 16);
+
+			dst = map->logical_map[cid][idx];
+			if (!dst) {
+				kvm_apic_disabled_lapic_found(kvm);
+				goto out;
+			}
+
+			*dest_vcpu = dst->vcpu;
+		} else {
+			for_each_set_bit(i, &bitmap, 16) {
+				dst = map->logical_map[cid][i];
+				if (++r == 2)
+					goto out;
+			}
+
+			if (dst && kvm_apic_present(dst->vcpu))
+				*dest_vcpu = dst->vcpu;
+			else
 				goto out;
 		}
-
-		if (dst && kvm_apic_present(dst->vcpu))
-			*dest_vcpu = dst->vcpu;
-		else
-			goto out;
 	}
 
 	ret = true;
@@ -819,7 +894,7 @@
  */
 static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
 			     int vector, int level, int trig_mode,
-			     unsigned long *dest_map)
+			     struct dest_map *dest_map)
 {
 	int result = 0;
 	struct kvm_vcpu *vcpu = apic->vcpu;
@@ -839,8 +914,10 @@
 
 		result = 1;
 
-		if (dest_map)
-			__set_bit(vcpu->vcpu_id, dest_map);
+		if (dest_map) {
+			__set_bit(vcpu->vcpu_id, dest_map->map);
+			dest_map->vectors[vcpu->vcpu_id] = vector;
+		}
 
 		if (apic_test_vector(vector, apic->regs + APIC_TMR) != !!trig_mode) {
 			if (trig_mode)
@@ -1239,7 +1316,7 @@
 	struct kvm_lapic *apic = vcpu->arch.apic;
 	u64 guest_tsc, tsc_deadline;
 
-	if (!kvm_vcpu_has_lapic(vcpu))
+	if (!lapic_in_kernel(vcpu))
 		return;
 
 	if (apic->lapic_timer.expired_tscdeadline == 0)
@@ -1515,8 +1592,7 @@
 
 void kvm_lapic_set_eoi(struct kvm_vcpu *vcpu)
 {
-	if (kvm_vcpu_has_lapic(vcpu))
-		apic_reg_write(vcpu->arch.apic, APIC_EOI, 0);
+	apic_reg_write(vcpu->arch.apic, APIC_EOI, 0);
 }
 EXPORT_SYMBOL_GPL(kvm_lapic_set_eoi);
 
@@ -1566,7 +1642,7 @@
 {
 	struct kvm_lapic *apic = vcpu->arch.apic;
 
-	if (!kvm_vcpu_has_lapic(vcpu) || apic_lvtt_oneshot(apic) ||
+	if (!lapic_in_kernel(vcpu) || apic_lvtt_oneshot(apic) ||
 			apic_lvtt_period(apic))
 		return 0;
 
@@ -1577,7 +1653,7 @@
 {
 	struct kvm_lapic *apic = vcpu->arch.apic;
 
-	if (!kvm_vcpu_has_lapic(vcpu) || apic_lvtt_oneshot(apic) ||
+	if (!lapic_in_kernel(vcpu) || apic_lvtt_oneshot(apic) ||
 			apic_lvtt_period(apic))
 		return;
 
@@ -1590,9 +1666,6 @@
 {
 	struct kvm_lapic *apic = vcpu->arch.apic;
 
-	if (!kvm_vcpu_has_lapic(vcpu))
-		return;
-
 	apic_set_tpr(apic, ((cr8 & 0x0f) << 4)
 		     | (kvm_apic_get_reg(apic, APIC_TASKPRI) & 4));
 }
@@ -1601,9 +1674,6 @@
 {
 	u64 tpr;
 
-	if (!kvm_vcpu_has_lapic(vcpu))
-		return 0;
-
 	tpr = (u64) kvm_apic_get_reg(vcpu->arch.apic, APIC_TASKPRI);
 
 	return (tpr & 0xf0) >> 4;
@@ -1728,8 +1798,7 @@
 {
 	struct kvm_lapic *apic = vcpu->arch.apic;
 
-	if (kvm_vcpu_has_lapic(vcpu) && apic_enabled(apic) &&
-			apic_lvt_enabled(apic, APIC_LVTT))
+	if (apic_enabled(apic) && apic_lvt_enabled(apic, APIC_LVTT))
 		return atomic_read(&apic->lapic_timer.pending);
 
 	return 0;
@@ -1826,7 +1895,7 @@
 	struct kvm_lapic *apic = vcpu->arch.apic;
 	int highest_irr;
 
-	if (!kvm_vcpu_has_lapic(vcpu) || !apic_enabled(apic))
+	if (!apic_enabled(apic))
 		return -1;
 
 	apic_update_ppr(apic);
@@ -1854,9 +1923,6 @@
 {
 	struct kvm_lapic *apic = vcpu->arch.apic;
 
-	if (!kvm_vcpu_has_lapic(vcpu))
-		return;
-
 	if (atomic_read(&apic->lapic_timer.pending) > 0) {
 		kvm_apic_local_deliver(apic, APIC_LVTT);
 		if (apic_lvtt_tscdeadline(apic))
@@ -1932,7 +1998,7 @@
 {
 	struct hrtimer *timer;
 
-	if (!kvm_vcpu_has_lapic(vcpu))
+	if (!lapic_in_kernel(vcpu))
 		return;
 
 	timer = &vcpu->arch.apic->lapic_timer.timer;
@@ -2105,7 +2171,7 @@
 {
 	struct kvm_lapic *apic = vcpu->arch.apic;
 
-	if (!kvm_vcpu_has_lapic(vcpu))
+	if (!lapic_in_kernel(vcpu))
 		return 1;
 
 	/* if this is ICR write vector before command */
@@ -2119,7 +2185,7 @@
 	struct kvm_lapic *apic = vcpu->arch.apic;
 	u32 low, high = 0;
 
-	if (!kvm_vcpu_has_lapic(vcpu))
+	if (!lapic_in_kernel(vcpu))
 		return 1;
 
 	if (apic_reg_read(apic, reg, 4, &low))
@@ -2151,7 +2217,7 @@
 	u8 sipi_vector;
 	unsigned long pe;
 
-	if (!kvm_vcpu_has_lapic(vcpu) || !apic->pending_events)
+	if (!lapic_in_kernel(vcpu) || !apic->pending_events)
 		return;
 
 	/*
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index 41bdb35..f71183e 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -42,6 +42,9 @@
 	unsigned long pending_events;
 	unsigned int sipi_vector;
 };
+
+struct dest_map;
+
 int kvm_create_lapic(struct kvm_vcpu *vcpu);
 void kvm_free_lapic(struct kvm_vcpu *vcpu);
 
@@ -60,11 +63,11 @@
 void __kvm_apic_update_irr(u32 *pir, void *regs);
 void kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir);
 int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq,
-		unsigned long *dest_map);
+		     struct dest_map *dest_map);
 int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type);
 
 bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
-		struct kvm_lapic_irq *irq, int *r, unsigned long *dest_map);
+		struct kvm_lapic_irq *irq, int *r, struct dest_map *dest_map);
 
 u64 kvm_get_apic_base(struct kvm_vcpu *vcpu);
 int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
@@ -103,7 +106,7 @@
 
 extern struct static_key kvm_no_apic_vcpu;
 
-static inline bool kvm_vcpu_has_lapic(struct kvm_vcpu *vcpu)
+static inline bool lapic_in_kernel(struct kvm_vcpu *vcpu)
 {
 	if (static_key_false(&kvm_no_apic_vcpu))
 		return vcpu->arch.apic;
@@ -130,7 +133,7 @@
 
 static inline bool kvm_apic_present(struct kvm_vcpu *vcpu)
 {
-	return kvm_vcpu_has_lapic(vcpu) && kvm_apic_hw_enabled(vcpu->arch.apic);
+	return lapic_in_kernel(vcpu) && kvm_apic_hw_enabled(vcpu->arch.apic);
 }
 
 static inline int kvm_lapic_enabled(struct kvm_vcpu *vcpu)
@@ -150,7 +153,7 @@
 
 static inline bool kvm_apic_has_events(struct kvm_vcpu *vcpu)
 {
-	return kvm_vcpu_has_lapic(vcpu) && vcpu->arch.apic->pending_events;
+	return lapic_in_kernel(vcpu) && vcpu->arch.apic->pending_events;
 }
 
 static inline bool kvm_lowest_prio_delivery(struct kvm_lapic_irq *irq)
@@ -161,7 +164,7 @@
 
 static inline int kvm_lapic_latched_init(struct kvm_vcpu *vcpu)
 {
-	return kvm_vcpu_has_lapic(vcpu) && test_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events);
+	return lapic_in_kernel(vcpu) && test_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events);
 }
 
 static inline int kvm_apic_id(struct kvm_lapic *apic)
@@ -175,4 +178,6 @@
 
 bool kvm_intr_is_single_vcpu_fast(struct kvm *kvm, struct kvm_lapic_irq *irq,
 			struct kvm_vcpu **dest_vcpu);
+int kvm_vector_to_index(u32 vector, u32 dest_vcpus,
+			const unsigned long *bitmap, u32 bitmap_size);
 #endif
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 1e7a49b..c512f09 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -41,6 +41,7 @@
 #include <asm/cmpxchg.h>
 #include <asm/io.h>
 #include <asm/vmx.h>
+#include <asm/kvm_page_track.h>
 
 /*
  * When setting this variable to true it enables Two-Dimensional-Paging
@@ -776,62 +777,85 @@
 	return &slot->arch.lpage_info[level - 2][idx];
 }
 
+static void update_gfn_disallow_lpage_count(struct kvm_memory_slot *slot,
+					    gfn_t gfn, int count)
+{
+	struct kvm_lpage_info *linfo;
+	int i;
+
+	for (i = PT_DIRECTORY_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
+		linfo = lpage_info_slot(gfn, slot, i);
+		linfo->disallow_lpage += count;
+		WARN_ON(linfo->disallow_lpage < 0);
+	}
+}
+
+void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn)
+{
+	update_gfn_disallow_lpage_count(slot, gfn, 1);
+}
+
+void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn)
+{
+	update_gfn_disallow_lpage_count(slot, gfn, -1);
+}
+
 static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
 {
 	struct kvm_memslots *slots;
 	struct kvm_memory_slot *slot;
-	struct kvm_lpage_info *linfo;
 	gfn_t gfn;
-	int i;
 
+	kvm->arch.indirect_shadow_pages++;
 	gfn = sp->gfn;
 	slots = kvm_memslots_for_spte_role(kvm, sp->role);
 	slot = __gfn_to_memslot(slots, gfn);
-	for (i = PT_DIRECTORY_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
-		linfo = lpage_info_slot(gfn, slot, i);
-		linfo->write_count += 1;
-	}
-	kvm->arch.indirect_shadow_pages++;
+
+	/* the non-leaf shadow pages are keeping readonly. */
+	if (sp->role.level > PT_PAGE_TABLE_LEVEL)
+		return kvm_slot_page_track_add_page(kvm, slot, gfn,
+						    KVM_PAGE_TRACK_WRITE);
+
+	kvm_mmu_gfn_disallow_lpage(slot, gfn);
 }
 
 static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
 {
 	struct kvm_memslots *slots;
 	struct kvm_memory_slot *slot;
-	struct kvm_lpage_info *linfo;
 	gfn_t gfn;
-	int i;
 
+	kvm->arch.indirect_shadow_pages--;
 	gfn = sp->gfn;
 	slots = kvm_memslots_for_spte_role(kvm, sp->role);
 	slot = __gfn_to_memslot(slots, gfn);
-	for (i = PT_DIRECTORY_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
-		linfo = lpage_info_slot(gfn, slot, i);
-		linfo->write_count -= 1;
-		WARN_ON(linfo->write_count < 0);
-	}
-	kvm->arch.indirect_shadow_pages--;
+	if (sp->role.level > PT_PAGE_TABLE_LEVEL)
+		return kvm_slot_page_track_remove_page(kvm, slot, gfn,
+						       KVM_PAGE_TRACK_WRITE);
+
+	kvm_mmu_gfn_allow_lpage(slot, gfn);
 }
 
-static int __has_wrprotected_page(gfn_t gfn, int level,
-				  struct kvm_memory_slot *slot)
+static bool __mmu_gfn_lpage_is_disallowed(gfn_t gfn, int level,
+					  struct kvm_memory_slot *slot)
 {
 	struct kvm_lpage_info *linfo;
 
 	if (slot) {
 		linfo = lpage_info_slot(gfn, slot, level);
-		return linfo->write_count;
+		return !!linfo->disallow_lpage;
 	}
 
-	return 1;
+	return true;
 }
 
-static int has_wrprotected_page(struct kvm_vcpu *vcpu, gfn_t gfn, int level)
+static bool mmu_gfn_lpage_is_disallowed(struct kvm_vcpu *vcpu, gfn_t gfn,
+					int level)
 {
 	struct kvm_memory_slot *slot;
 
 	slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
-	return __has_wrprotected_page(gfn, level, slot);
+	return __mmu_gfn_lpage_is_disallowed(gfn, level, slot);
 }
 
 static int host_mapping_level(struct kvm *kvm, gfn_t gfn)
@@ -897,7 +921,7 @@
 	max_level = min(kvm_x86_ops->get_lpage_level(), host_level);
 
 	for (level = PT_DIRECTORY_LEVEL; level <= max_level; ++level)
-		if (__has_wrprotected_page(large_gfn, level, slot))
+		if (__mmu_gfn_lpage_is_disallowed(large_gfn, level, slot))
 			break;
 
 	return level - 1;
@@ -1323,23 +1347,29 @@
 		kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
 }
 
-static bool rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
+bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
+				    struct kvm_memory_slot *slot, u64 gfn)
 {
-	struct kvm_memory_slot *slot;
 	struct kvm_rmap_head *rmap_head;
 	int i;
 	bool write_protected = false;
 
-	slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
-
 	for (i = PT_PAGE_TABLE_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
 		rmap_head = __gfn_to_rmap(gfn, i, slot);
-		write_protected |= __rmap_write_protect(vcpu->kvm, rmap_head, true);
+		write_protected |= __rmap_write_protect(kvm, rmap_head, true);
 	}
 
 	return write_protected;
 }
 
+static bool rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
+{
+	struct kvm_memory_slot *slot;
+
+	slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
+	return kvm_mmu_slot_gfn_write_protect(vcpu->kvm, slot, gfn);
+}
+
 static bool kvm_zap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head)
 {
 	u64 *sptep;
@@ -1754,7 +1784,7 @@
 static int nonpaging_sync_page(struct kvm_vcpu *vcpu,
 			       struct kvm_mmu_page *sp)
 {
-	return 1;
+	return 0;
 }
 
 static void nonpaging_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
@@ -1840,13 +1870,16 @@
 	return nr_unsync_leaf;
 }
 
+#define INVALID_INDEX (-1)
+
 static int mmu_unsync_walk(struct kvm_mmu_page *sp,
 			   struct kvm_mmu_pages *pvec)
 {
+	pvec->nr = 0;
 	if (!sp->unsync_children)
 		return 0;
 
-	mmu_pages_add(pvec, sp, 0);
+	mmu_pages_add(pvec, sp, INVALID_INDEX);
 	return __mmu_unsync_walk(sp, pvec);
 }
 
@@ -1883,37 +1916,35 @@
 		if ((_sp)->role.direct || (_sp)->role.invalid) {} else
 
 /* @sp->gfn should be write-protected at the call site */
-static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
-			   struct list_head *invalid_list, bool clear_unsync)
+static bool __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
+			    struct list_head *invalid_list)
 {
 	if (sp->role.cr4_pae != !!is_pae(vcpu)) {
 		kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
-		return 1;
+		return false;
 	}
 
-	if (clear_unsync)
-		kvm_unlink_unsync_page(vcpu->kvm, sp);
-
-	if (vcpu->arch.mmu.sync_page(vcpu, sp)) {
+	if (vcpu->arch.mmu.sync_page(vcpu, sp) == 0) {
 		kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
-		return 1;
+		return false;
 	}
 
-	kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
-	return 0;
+	return true;
 }
 
-static int kvm_sync_page_transient(struct kvm_vcpu *vcpu,
-				   struct kvm_mmu_page *sp)
+static void kvm_mmu_flush_or_zap(struct kvm_vcpu *vcpu,
+				 struct list_head *invalid_list,
+				 bool remote_flush, bool local_flush)
 {
-	LIST_HEAD(invalid_list);
-	int ret;
+	if (!list_empty(invalid_list)) {
+		kvm_mmu_commit_zap_page(vcpu->kvm, invalid_list);
+		return;
+	}
 
-	ret = __kvm_sync_page(vcpu, sp, &invalid_list, false);
-	if (ret)
-		kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
-
-	return ret;
+	if (remote_flush)
+		kvm_flush_remote_tlbs(vcpu->kvm);
+	else if (local_flush)
+		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
 }
 
 #ifdef CONFIG_KVM_MMU_AUDIT
@@ -1923,46 +1954,38 @@
 static void mmu_audit_disable(void) { }
 #endif
 
-static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
+static bool kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
 			 struct list_head *invalid_list)
 {
-	return __kvm_sync_page(vcpu, sp, invalid_list, true);
+	kvm_unlink_unsync_page(vcpu->kvm, sp);
+	return __kvm_sync_page(vcpu, sp, invalid_list);
 }
 
 /* @gfn should be write-protected at the call site */
-static void kvm_sync_pages(struct kvm_vcpu *vcpu,  gfn_t gfn)
+static bool kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn,
+			   struct list_head *invalid_list)
 {
 	struct kvm_mmu_page *s;
-	LIST_HEAD(invalid_list);
-	bool flush = false;
+	bool ret = false;
 
 	for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) {
 		if (!s->unsync)
 			continue;
 
 		WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL);
-		kvm_unlink_unsync_page(vcpu->kvm, s);
-		if ((s->role.cr4_pae != !!is_pae(vcpu)) ||
-			(vcpu->arch.mmu.sync_page(vcpu, s))) {
-			kvm_mmu_prepare_zap_page(vcpu->kvm, s, &invalid_list);
-			continue;
-		}
-		flush = true;
+		ret |= kvm_sync_page(vcpu, s, invalid_list);
 	}
 
-	kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
-	if (flush)
-		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
+	return ret;
 }
 
 struct mmu_page_path {
-	struct kvm_mmu_page *parent[PT64_ROOT_LEVEL-1];
-	unsigned int idx[PT64_ROOT_LEVEL-1];
+	struct kvm_mmu_page *parent[PT64_ROOT_LEVEL];
+	unsigned int idx[PT64_ROOT_LEVEL];
 };
 
 #define for_each_sp(pvec, sp, parents, i)			\
-		for (i = mmu_pages_next(&pvec, &parents, -1),	\
-			sp = pvec.page[i].sp;			\
+		for (i = mmu_pages_first(&pvec, &parents);	\
 			i < pvec.nr && ({ sp = pvec.page[i].sp; 1;});	\
 			i = mmu_pages_next(&pvec, &parents, i))
 
@@ -1974,19 +1997,43 @@
 
 	for (n = i+1; n < pvec->nr; n++) {
 		struct kvm_mmu_page *sp = pvec->page[n].sp;
+		unsigned idx = pvec->page[n].idx;
+		int level = sp->role.level;
 
-		if (sp->role.level == PT_PAGE_TABLE_LEVEL) {
-			parents->idx[0] = pvec->page[n].idx;
-			return n;
-		}
+		parents->idx[level-1] = idx;
+		if (level == PT_PAGE_TABLE_LEVEL)
+			break;
 
-		parents->parent[sp->role.level-2] = sp;
-		parents->idx[sp->role.level-1] = pvec->page[n].idx;
+		parents->parent[level-2] = sp;
 	}
 
 	return n;
 }
 
+static int mmu_pages_first(struct kvm_mmu_pages *pvec,
+			   struct mmu_page_path *parents)
+{
+	struct kvm_mmu_page *sp;
+	int level;
+
+	if (pvec->nr == 0)
+		return 0;
+
+	WARN_ON(pvec->page[0].idx != INVALID_INDEX);
+
+	sp = pvec->page[0].sp;
+	level = sp->role.level;
+	WARN_ON(level == PT_PAGE_TABLE_LEVEL);
+
+	parents->parent[level-2] = sp;
+
+	/* Also set up a sentinel.  Further entries in pvec are all
+	 * children of sp, so this element is never overwritten.
+	 */
+	parents->parent[level-1] = NULL;
+	return mmu_pages_next(pvec, parents, 0);
+}
+
 static void mmu_pages_clear_parents(struct mmu_page_path *parents)
 {
 	struct kvm_mmu_page *sp;
@@ -1994,22 +2041,14 @@
 
 	do {
 		unsigned int idx = parents->idx[level];
-
 		sp = parents->parent[level];
 		if (!sp)
 			return;
 
+		WARN_ON(idx == INVALID_INDEX);
 		clear_unsync_child_bit(sp, idx);
 		level++;
-	} while (level < PT64_ROOT_LEVEL-1 && !sp->unsync_children);
-}
-
-static void kvm_mmu_pages_init(struct kvm_mmu_page *parent,
-			       struct mmu_page_path *parents,
-			       struct kvm_mmu_pages *pvec)
-{
-	parents->parent[parent->role.level-1] = NULL;
-	pvec->nr = 0;
+	} while (!sp->unsync_children);
 }
 
 static void mmu_sync_children(struct kvm_vcpu *vcpu,
@@ -2020,30 +2059,36 @@
 	struct mmu_page_path parents;
 	struct kvm_mmu_pages pages;
 	LIST_HEAD(invalid_list);
+	bool flush = false;
 
-	kvm_mmu_pages_init(parent, &parents, &pages);
 	while (mmu_unsync_walk(parent, &pages)) {
 		bool protected = false;
 
 		for_each_sp(pages, sp, parents, i)
 			protected |= rmap_write_protect(vcpu, sp->gfn);
 
-		if (protected)
+		if (protected) {
 			kvm_flush_remote_tlbs(vcpu->kvm);
+			flush = false;
+		}
 
 		for_each_sp(pages, sp, parents, i) {
-			kvm_sync_page(vcpu, sp, &invalid_list);
+			flush |= kvm_sync_page(vcpu, sp, &invalid_list);
 			mmu_pages_clear_parents(&parents);
 		}
-		kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
-		cond_resched_lock(&vcpu->kvm->mmu_lock);
-		kvm_mmu_pages_init(parent, &parents, &pages);
+		if (need_resched() || spin_needbreak(&vcpu->kvm->mmu_lock)) {
+			kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush);
+			cond_resched_lock(&vcpu->kvm->mmu_lock);
+			flush = false;
+		}
 	}
+
+	kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush);
 }
 
 static void __clear_sp_write_flooding_count(struct kvm_mmu_page *sp)
 {
-	sp->write_flooding_count = 0;
+	atomic_set(&sp->write_flooding_count,  0);
 }
 
 static void clear_sp_write_flooding_count(u64 *spte)
@@ -2069,6 +2114,8 @@
 	unsigned quadrant;
 	struct kvm_mmu_page *sp;
 	bool need_sync = false;
+	bool flush = false;
+	LIST_HEAD(invalid_list);
 
 	role = vcpu->arch.mmu.base_role;
 	role.level = level;
@@ -2092,8 +2139,16 @@
 		if (sp->role.word != role.word)
 			continue;
 
-		if (sp->unsync && kvm_sync_page_transient(vcpu, sp))
-			break;
+		if (sp->unsync) {
+			/* The page is good, but __kvm_sync_page might still end
+			 * up zapping it.  If so, break in order to rebuild it.
+			 */
+			if (!__kvm_sync_page(vcpu, sp, &invalid_list))
+				break;
+
+			WARN_ON(!list_empty(&invalid_list));
+			kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
+		}
 
 		if (sp->unsync_children)
 			kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
@@ -2112,16 +2167,24 @@
 	hlist_add_head(&sp->hash_link,
 		&vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)]);
 	if (!direct) {
-		if (rmap_write_protect(vcpu, gfn))
-			kvm_flush_remote_tlbs(vcpu->kvm);
-		if (level > PT_PAGE_TABLE_LEVEL && need_sync)
-			kvm_sync_pages(vcpu, gfn);
-
+		/*
+		 * we should do write protection before syncing pages
+		 * otherwise the content of the synced shadow page may
+		 * be inconsistent with guest page table.
+		 */
 		account_shadowed(vcpu->kvm, sp);
+		if (level == PT_PAGE_TABLE_LEVEL &&
+		      rmap_write_protect(vcpu, gfn))
+			kvm_flush_remote_tlbs(vcpu->kvm);
+
+		if (level > PT_PAGE_TABLE_LEVEL && need_sync)
+			flush |= kvm_sync_pages(vcpu, gfn, &invalid_list);
 	}
 	sp->mmu_valid_gen = vcpu->kvm->arch.mmu_valid_gen;
 	clear_page(sp->spt);
 	trace_kvm_mmu_get_page(sp, true);
+
+	kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush);
 	return sp;
 }
 
@@ -2269,7 +2332,6 @@
 	if (parent->role.level == PT_PAGE_TABLE_LEVEL)
 		return 0;
 
-	kvm_mmu_pages_init(parent, &parents, &pages);
 	while (mmu_unsync_walk(parent, &pages)) {
 		struct kvm_mmu_page *sp;
 
@@ -2278,7 +2340,6 @@
 			mmu_pages_clear_parents(&parents);
 			zapped++;
 		}
-		kvm_mmu_pages_init(parent, &parents, &pages);
 	}
 
 	return zapped;
@@ -2354,8 +2415,8 @@
 	if (list_empty(&kvm->arch.active_mmu_pages))
 		return false;
 
-	sp = list_entry(kvm->arch.active_mmu_pages.prev,
-			struct kvm_mmu_page, link);
+	sp = list_last_entry(&kvm->arch.active_mmu_pages,
+			     struct kvm_mmu_page, link);
 	kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
 
 	return true;
@@ -2408,7 +2469,7 @@
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page);
 
-static void __kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
+static void kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
 {
 	trace_kvm_mmu_unsync_page(sp);
 	++vcpu->kvm->stat.mmu_unsync;
@@ -2417,37 +2478,26 @@
 	kvm_mmu_mark_parents_unsync(sp);
 }
 
-static void kvm_unsync_pages(struct kvm_vcpu *vcpu,  gfn_t gfn)
+static bool mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
+				   bool can_unsync)
 {
-	struct kvm_mmu_page *s;
+	struct kvm_mmu_page *sp;
 
-	for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) {
-		if (s->unsync)
-			continue;
-		WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL);
-		__kvm_unsync_page(vcpu, s);
-	}
-}
+	if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE))
+		return true;
 
-static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
-				  bool can_unsync)
-{
-	struct kvm_mmu_page *s;
-	bool need_unsync = false;
-
-	for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) {
+	for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
 		if (!can_unsync)
-			return 1;
+			return true;
 
-		if (s->role.level != PT_PAGE_TABLE_LEVEL)
-			return 1;
+		if (sp->unsync)
+			continue;
 
-		if (!s->unsync)
-			need_unsync = true;
+		WARN_ON(sp->role.level != PT_PAGE_TABLE_LEVEL);
+		kvm_unsync_page(vcpu, sp);
 	}
-	if (need_unsync)
-		kvm_unsync_pages(vcpu, gfn);
-	return 0;
+
+	return false;
 }
 
 static bool kvm_is_mmio_pfn(kvm_pfn_t pfn)
@@ -2503,7 +2553,7 @@
 		 * be fixed if guest refault.
 		 */
 		if (level > PT_PAGE_TABLE_LEVEL &&
-		    has_wrprotected_page(vcpu, gfn, level))
+		    mmu_gfn_lpage_is_disallowed(vcpu, gfn, level))
 			goto done;
 
 		spte |= PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE;
@@ -2768,7 +2818,7 @@
 	if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn) &&
 	    level == PT_PAGE_TABLE_LEVEL &&
 	    PageTransCompound(pfn_to_page(pfn)) &&
-	    !has_wrprotected_page(vcpu, gfn, PT_DIRECTORY_LEVEL)) {
+	    !mmu_gfn_lpage_is_disallowed(vcpu, gfn, PT_DIRECTORY_LEVEL)) {
 		unsigned long mask;
 		/*
 		 * mmu_notifier_retry was successful and we hold the
@@ -2796,20 +2846,16 @@
 static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
 				kvm_pfn_t pfn, unsigned access, int *ret_val)
 {
-	bool ret = true;
-
 	/* The pfn is invalid, report the error! */
 	if (unlikely(is_error_pfn(pfn))) {
 		*ret_val = kvm_handle_bad_page(vcpu, gfn, pfn);
-		goto exit;
+		return true;
 	}
 
 	if (unlikely(is_noslot_pfn(pfn)))
 		vcpu_cache_mmio_info(vcpu, gva, gfn, access);
 
-	ret = false;
-exit:
-	return ret;
+	return false;
 }
 
 static bool page_fault_can_be_fast(u32 error_code)
@@ -3273,7 +3319,7 @@
 	return __is_rsvd_bits_set(&mmu->shadow_zero_check, spte, level);
 }
 
-static bool quickly_check_mmio_pf(struct kvm_vcpu *vcpu, u64 addr, bool direct)
+static bool mmio_info_in_cache(struct kvm_vcpu *vcpu, u64 addr, bool direct)
 {
 	if (direct)
 		return vcpu_match_mmio_gpa(vcpu, addr);
@@ -3332,7 +3378,7 @@
 	u64 spte;
 	bool reserved;
 
-	if (quickly_check_mmio_pf(vcpu, addr, direct))
+	if (mmio_info_in_cache(vcpu, addr, direct))
 		return RET_MMIO_PF_EMULATE;
 
 	reserved = walk_shadow_page_get_mmio_spte(vcpu, addr, &spte);
@@ -3362,20 +3408,53 @@
 }
 EXPORT_SYMBOL_GPL(handle_mmio_page_fault);
 
+static bool page_fault_handle_page_track(struct kvm_vcpu *vcpu,
+					 u32 error_code, gfn_t gfn)
+{
+	if (unlikely(error_code & PFERR_RSVD_MASK))
+		return false;
+
+	if (!(error_code & PFERR_PRESENT_MASK) ||
+	      !(error_code & PFERR_WRITE_MASK))
+		return false;
+
+	/*
+	 * guest is writing the page which is write tracked which can
+	 * not be fixed by page fault handler.
+	 */
+	if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE))
+		return true;
+
+	return false;
+}
+
+static void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr)
+{
+	struct kvm_shadow_walk_iterator iterator;
+	u64 spte;
+
+	if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
+		return;
+
+	walk_shadow_page_lockless_begin(vcpu);
+	for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) {
+		clear_sp_write_flooding_count(iterator.sptep);
+		if (!is_shadow_present_pte(spte))
+			break;
+	}
+	walk_shadow_page_lockless_end(vcpu);
+}
+
 static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
 				u32 error_code, bool prefault)
 {
-	gfn_t gfn;
+	gfn_t gfn = gva >> PAGE_SHIFT;
 	int r;
 
 	pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code);
 
-	if (unlikely(error_code & PFERR_RSVD_MASK)) {
-		r = handle_mmio_page_fault(vcpu, gva, true);
-
-		if (likely(r != RET_MMIO_PF_INVALID))
-			return r;
-	}
+	if (page_fault_handle_page_track(vcpu, error_code, gfn))
+		return 1;
 
 	r = mmu_topup_memory_caches(vcpu);
 	if (r)
@@ -3383,7 +3462,6 @@
 
 	MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
 
-	gfn = gva >> PAGE_SHIFT;
 
 	return nonpaging_map(vcpu, gva & PAGE_MASK,
 			     error_code, gfn, prefault);
@@ -3460,12 +3538,8 @@
 
 	MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
 
-	if (unlikely(error_code & PFERR_RSVD_MASK)) {
-		r = handle_mmio_page_fault(vcpu, gpa, true);
-
-		if (likely(r != RET_MMIO_PF_INVALID))
-			return r;
-	}
+	if (page_fault_handle_page_track(vcpu, error_code, gfn))
+		return 1;
 
 	r = mmu_topup_memory_caches(vcpu);
 	if (r)
@@ -3558,13 +3632,24 @@
 	return false;
 }
 
-static inline bool is_last_gpte(struct kvm_mmu *mmu, unsigned level, unsigned gpte)
+static inline bool is_last_gpte(struct kvm_mmu *mmu,
+				unsigned level, unsigned gpte)
 {
-	unsigned index;
+	/*
+	 * PT_PAGE_TABLE_LEVEL always terminates.  The RHS has bit 7 set
+	 * iff level <= PT_PAGE_TABLE_LEVEL, which for our purpose means
+	 * level == PT_PAGE_TABLE_LEVEL; set PT_PAGE_SIZE_MASK in gpte then.
+	 */
+	gpte |= level - PT_PAGE_TABLE_LEVEL - 1;
 
-	index = level - 1;
-	index |= (gpte & PT_PAGE_SIZE_MASK) >> (PT_PAGE_SIZE_SHIFT - 2);
-	return mmu->last_pte_bitmap & (1 << index);
+	/*
+	 * The RHS has bit 7 set iff level < mmu->last_nonleaf_level.
+	 * If it is clear, there are no large pages at this level, so clear
+	 * PT_PAGE_SIZE_MASK in gpte if that is the case.
+	 */
+	gpte &= level - mmu->last_nonleaf_level;
+
+	return gpte & PT_PAGE_SIZE_MASK;
 }
 
 #define PTTYPE_EPT 18 /* arbitrary */
@@ -3838,22 +3923,13 @@
 	}
 }
 
-static void update_last_pte_bitmap(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
+static void update_last_nonleaf_level(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
 {
-	u8 map;
-	unsigned level, root_level = mmu->root_level;
-	const unsigned ps_set_index = 1 << 2;  /* bit 2 of index: ps */
+	unsigned root_level = mmu->root_level;
 
-	if (root_level == PT32E_ROOT_LEVEL)
-		--root_level;
-	/* PT_PAGE_TABLE_LEVEL always terminates */
-	map = 1 | (1 << ps_set_index);
-	for (level = PT_DIRECTORY_LEVEL; level <= root_level; ++level) {
-		if (level <= PT_PDPE_LEVEL
-		    && (mmu->root_level >= PT32E_ROOT_LEVEL || is_pse(vcpu)))
-			map |= 1 << (ps_set_index | (level - 1));
-	}
-	mmu->last_pte_bitmap = map;
+	mmu->last_nonleaf_level = root_level;
+	if (root_level == PT32_ROOT_LEVEL && is_pse(vcpu))
+		mmu->last_nonleaf_level++;
 }
 
 static void paging64_init_context_common(struct kvm_vcpu *vcpu,
@@ -3865,7 +3941,7 @@
 
 	reset_rsvds_bits_mask(vcpu, context);
 	update_permission_bitmask(vcpu, context, false);
-	update_last_pte_bitmap(vcpu, context);
+	update_last_nonleaf_level(vcpu, context);
 
 	MMU_WARN_ON(!is_pae(vcpu));
 	context->page_fault = paging64_page_fault;
@@ -3892,7 +3968,7 @@
 
 	reset_rsvds_bits_mask(vcpu, context);
 	update_permission_bitmask(vcpu, context, false);
-	update_last_pte_bitmap(vcpu, context);
+	update_last_nonleaf_level(vcpu, context);
 
 	context->page_fault = paging32_page_fault;
 	context->gva_to_gpa = paging32_gva_to_gpa;
@@ -3950,7 +4026,7 @@
 	}
 
 	update_permission_bitmask(vcpu, context, false);
-	update_last_pte_bitmap(vcpu, context);
+	update_last_nonleaf_level(vcpu, context);
 	reset_tdp_shadow_zero_bits_mask(vcpu, context);
 }
 
@@ -4056,7 +4132,7 @@
 	}
 
 	update_permission_bitmask(vcpu, g_context, false);
-	update_last_pte_bitmap(vcpu, g_context);
+	update_last_nonleaf_level(vcpu, g_context);
 }
 
 static void init_kvm_mmu(struct kvm_vcpu *vcpu)
@@ -4127,18 +4203,6 @@
 	return (old & ~new & PT64_PERM_MASK) != 0;
 }
 
-static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, bool zap_page,
-				    bool remote_flush, bool local_flush)
-{
-	if (zap_page)
-		return;
-
-	if (remote_flush)
-		kvm_flush_remote_tlbs(vcpu->kvm);
-	else if (local_flush)
-		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
-}
-
 static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
 				    const u8 *new, int *bytes)
 {
@@ -4188,7 +4252,8 @@
 	if (sp->role.level == PT_PAGE_TABLE_LEVEL)
 		return false;
 
-	return ++sp->write_flooding_count >= 3;
+	atomic_inc(&sp->write_flooding_count);
+	return atomic_read(&sp->write_flooding_count) >= 3;
 }
 
 /*
@@ -4250,15 +4315,15 @@
 	return spte;
 }
 
-void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
-		       const u8 *new, int bytes)
+static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
+			      const u8 *new, int bytes)
 {
 	gfn_t gfn = gpa >> PAGE_SHIFT;
 	struct kvm_mmu_page *sp;
 	LIST_HEAD(invalid_list);
 	u64 entry, gentry, *spte;
 	int npte;
-	bool remote_flush, local_flush, zap_page;
+	bool remote_flush, local_flush;
 	union kvm_mmu_page_role mask = { };
 
 	mask.cr0_wp = 1;
@@ -4275,7 +4340,7 @@
 	if (!ACCESS_ONCE(vcpu->kvm->arch.indirect_shadow_pages))
 		return;
 
-	zap_page = remote_flush = local_flush = false;
+	remote_flush = local_flush = false;
 
 	pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
 
@@ -4295,8 +4360,7 @@
 	for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
 		if (detect_write_misaligned(sp, gpa, bytes) ||
 		      detect_write_flooding(sp)) {
-			zap_page |= !!kvm_mmu_prepare_zap_page(vcpu->kvm, sp,
-						     &invalid_list);
+			kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list);
 			++vcpu->kvm->stat.mmu_flooded;
 			continue;
 		}
@@ -4318,8 +4382,7 @@
 			++spte;
 		}
 	}
-	mmu_pte_write_flush_tlb(vcpu, zap_page, remote_flush, local_flush);
-	kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
+	kvm_mmu_flush_or_zap(vcpu, &invalid_list, remote_flush, local_flush);
 	kvm_mmu_audit(vcpu, AUDIT_POST_PTE_WRITE);
 	spin_unlock(&vcpu->kvm->mmu_lock);
 }
@@ -4356,32 +4419,34 @@
 	kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
 }
 
-static bool is_mmio_page_fault(struct kvm_vcpu *vcpu, gva_t addr)
-{
-	if (vcpu->arch.mmu.direct_map || mmu_is_nested(vcpu))
-		return vcpu_match_mmio_gpa(vcpu, addr);
-
-	return vcpu_match_mmio_gva(vcpu, addr);
-}
-
 int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code,
 		       void *insn, int insn_len)
 {
 	int r, emulation_type = EMULTYPE_RETRY;
 	enum emulation_result er;
+	bool direct = vcpu->arch.mmu.direct_map || mmu_is_nested(vcpu);
+
+	if (unlikely(error_code & PFERR_RSVD_MASK)) {
+		r = handle_mmio_page_fault(vcpu, cr2, direct);
+		if (r == RET_MMIO_PF_EMULATE) {
+			emulation_type = 0;
+			goto emulate;
+		}
+		if (r == RET_MMIO_PF_RETRY)
+			return 1;
+		if (r < 0)
+			return r;
+	}
 
 	r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code, false);
 	if (r < 0)
-		goto out;
+		return r;
+	if (!r)
+		return 1;
 
-	if (!r) {
-		r = 1;
-		goto out;
-	}
-
-	if (is_mmio_page_fault(vcpu, cr2))
+	if (mmio_info_in_cache(vcpu, cr2, direct))
 		emulation_type = 0;
-
+emulate:
 	er = x86_emulate_instruction(vcpu, cr2, emulation_type, insn, insn_len);
 
 	switch (er) {
@@ -4395,8 +4460,6 @@
 	default:
 		BUG();
 	}
-out:
-	return r;
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
 
@@ -4465,6 +4528,21 @@
 	init_kvm_mmu(vcpu);
 }
 
+void kvm_mmu_init_vm(struct kvm *kvm)
+{
+	struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
+
+	node->track_write = kvm_mmu_pte_write;
+	kvm_page_track_register_notifier(kvm, node);
+}
+
+void kvm_mmu_uninit_vm(struct kvm *kvm)
+{
+	struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
+
+	kvm_page_track_unregister_notifier(kvm, node);
+}
+
 /* The return value indicates if tlb flush on all vcpus is needed. */
 typedef bool (*slot_level_handler) (struct kvm *kvm, struct kvm_rmap_head *rmap_head);
 
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 55ffb7b..58fe98a 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -174,4 +174,9 @@
 
 void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm);
 void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end);
+
+void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
+void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
+bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
+				    struct kvm_memory_slot *slot, u64 gfn);
 #endif
diff --git a/arch/x86/kvm/page_track.c b/arch/x86/kvm/page_track.c
new file mode 100644
index 0000000..11f7643
--- /dev/null
+++ b/arch/x86/kvm/page_track.c
@@ -0,0 +1,222 @@
+/*
+ * Support KVM gust page tracking
+ *
+ * This feature allows us to track page access in guest. Currently, only
+ * write access is tracked.
+ *
+ * Copyright(C) 2015 Intel Corporation.
+ *
+ * Author:
+ *   Xiao Guangrong <guangrong.xiao@linux.intel.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.  See
+ * the COPYING file in the top-level directory.
+ */
+
+#include <linux/kvm_host.h>
+#include <asm/kvm_host.h>
+#include <asm/kvm_page_track.h>
+
+#include "mmu.h"
+
+void kvm_page_track_free_memslot(struct kvm_memory_slot *free,
+				 struct kvm_memory_slot *dont)
+{
+	int i;
+
+	for (i = 0; i < KVM_PAGE_TRACK_MAX; i++)
+		if (!dont || free->arch.gfn_track[i] !=
+		      dont->arch.gfn_track[i]) {
+			kvfree(free->arch.gfn_track[i]);
+			free->arch.gfn_track[i] = NULL;
+		}
+}
+
+int kvm_page_track_create_memslot(struct kvm_memory_slot *slot,
+				  unsigned long npages)
+{
+	int  i;
+
+	for (i = 0; i < KVM_PAGE_TRACK_MAX; i++) {
+		slot->arch.gfn_track[i] = kvm_kvzalloc(npages *
+					    sizeof(*slot->arch.gfn_track[i]));
+		if (!slot->arch.gfn_track[i])
+			goto track_free;
+	}
+
+	return 0;
+
+track_free:
+	kvm_page_track_free_memslot(slot, NULL);
+	return -ENOMEM;
+}
+
+static inline bool page_track_mode_is_valid(enum kvm_page_track_mode mode)
+{
+	if (mode < 0 || mode >= KVM_PAGE_TRACK_MAX)
+		return false;
+
+	return true;
+}
+
+static void update_gfn_track(struct kvm_memory_slot *slot, gfn_t gfn,
+			     enum kvm_page_track_mode mode, short count)
+{
+	int index, val;
+
+	index = gfn_to_index(gfn, slot->base_gfn, PT_PAGE_TABLE_LEVEL);
+
+	val = slot->arch.gfn_track[mode][index];
+
+	if (WARN_ON(val + count < 0 || val + count > USHRT_MAX))
+		return;
+
+	slot->arch.gfn_track[mode][index] += count;
+}
+
+/*
+ * add guest page to the tracking pool so that corresponding access on that
+ * page will be intercepted.
+ *
+ * It should be called under the protection both of mmu-lock and kvm->srcu
+ * or kvm->slots_lock.
+ *
+ * @kvm: the guest instance we are interested in.
+ * @slot: the @gfn belongs to.
+ * @gfn: the guest page.
+ * @mode: tracking mode, currently only write track is supported.
+ */
+void kvm_slot_page_track_add_page(struct kvm *kvm,
+				  struct kvm_memory_slot *slot, gfn_t gfn,
+				  enum kvm_page_track_mode mode)
+{
+
+	if (WARN_ON(!page_track_mode_is_valid(mode)))
+		return;
+
+	update_gfn_track(slot, gfn, mode, 1);
+
+	/*
+	 * new track stops large page mapping for the
+	 * tracked page.
+	 */
+	kvm_mmu_gfn_disallow_lpage(slot, gfn);
+
+	if (mode == KVM_PAGE_TRACK_WRITE)
+		if (kvm_mmu_slot_gfn_write_protect(kvm, slot, gfn))
+			kvm_flush_remote_tlbs(kvm);
+}
+
+/*
+ * remove the guest page from the tracking pool which stops the interception
+ * of corresponding access on that page. It is the opposed operation of
+ * kvm_slot_page_track_add_page().
+ *
+ * It should be called under the protection both of mmu-lock and kvm->srcu
+ * or kvm->slots_lock.
+ *
+ * @kvm: the guest instance we are interested in.
+ * @slot: the @gfn belongs to.
+ * @gfn: the guest page.
+ * @mode: tracking mode, currently only write track is supported.
+ */
+void kvm_slot_page_track_remove_page(struct kvm *kvm,
+				     struct kvm_memory_slot *slot, gfn_t gfn,
+				     enum kvm_page_track_mode mode)
+{
+	if (WARN_ON(!page_track_mode_is_valid(mode)))
+		return;
+
+	update_gfn_track(slot, gfn, mode, -1);
+
+	/*
+	 * allow large page mapping for the tracked page
+	 * after the tracker is gone.
+	 */
+	kvm_mmu_gfn_allow_lpage(slot, gfn);
+}
+
+/*
+ * check if the corresponding access on the specified guest page is tracked.
+ */
+bool kvm_page_track_is_active(struct kvm_vcpu *vcpu, gfn_t gfn,
+			      enum kvm_page_track_mode mode)
+{
+	struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
+	int index = gfn_to_index(gfn, slot->base_gfn, PT_PAGE_TABLE_LEVEL);
+
+	if (WARN_ON(!page_track_mode_is_valid(mode)))
+		return false;
+
+	return !!ACCESS_ONCE(slot->arch.gfn_track[mode][index]);
+}
+
+void kvm_page_track_init(struct kvm *kvm)
+{
+	struct kvm_page_track_notifier_head *head;
+
+	head = &kvm->arch.track_notifier_head;
+	init_srcu_struct(&head->track_srcu);
+	INIT_HLIST_HEAD(&head->track_notifier_list);
+}
+
+/*
+ * register the notifier so that event interception for the tracked guest
+ * pages can be received.
+ */
+void
+kvm_page_track_register_notifier(struct kvm *kvm,
+				 struct kvm_page_track_notifier_node *n)
+{
+	struct kvm_page_track_notifier_head *head;
+
+	head = &kvm->arch.track_notifier_head;
+
+	spin_lock(&kvm->mmu_lock);
+	hlist_add_head_rcu(&n->node, &head->track_notifier_list);
+	spin_unlock(&kvm->mmu_lock);
+}
+
+/*
+ * stop receiving the event interception. It is the opposed operation of
+ * kvm_page_track_register_notifier().
+ */
+void
+kvm_page_track_unregister_notifier(struct kvm *kvm,
+				   struct kvm_page_track_notifier_node *n)
+{
+	struct kvm_page_track_notifier_head *head;
+
+	head = &kvm->arch.track_notifier_head;
+
+	spin_lock(&kvm->mmu_lock);
+	hlist_del_rcu(&n->node);
+	spin_unlock(&kvm->mmu_lock);
+	synchronize_srcu(&head->track_srcu);
+}
+
+/*
+ * Notify the node that write access is intercepted and write emulation is
+ * finished at this time.
+ *
+ * The node should figure out if the written page is the one that node is
+ * interested in by itself.
+ */
+void kvm_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new,
+			  int bytes)
+{
+	struct kvm_page_track_notifier_head *head;
+	struct kvm_page_track_notifier_node *n;
+	int idx;
+
+	head = &vcpu->kvm->arch.track_notifier_head;
+
+	if (hlist_empty(&head->track_notifier_list))
+		return;
+
+	idx = srcu_read_lock(&head->track_srcu);
+	hlist_for_each_entry_rcu(n, &head->track_notifier_list, node)
+		if (n->track_write)
+			n->track_write(vcpu, gpa, new, bytes);
+	srcu_read_unlock(&head->track_srcu, idx);
+}
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 2ce4f05..e159a81 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -189,8 +189,11 @@
 		((gpte & VMX_EPT_EXECUTABLE_MASK) ? ACC_EXEC_MASK : 0) |
 		ACC_USER_MASK;
 #else
-	access = (gpte & (PT_WRITABLE_MASK | PT_USER_MASK)) | ACC_EXEC_MASK;
-	access &= ~(gpte >> PT64_NX_SHIFT);
+	BUILD_BUG_ON(ACC_EXEC_MASK != PT_PRESENT_MASK);
+	BUILD_BUG_ON(ACC_EXEC_MASK != 1);
+	access = gpte & (PT_WRITABLE_MASK | PT_USER_MASK | PT_PRESENT_MASK);
+	/* Combine NX with P (which is set here) to get ACC_EXEC_MASK.  */
+	access ^= (gpte >> PT64_NX_SHIFT);
 #endif
 
 	return access;
@@ -702,24 +705,17 @@
 
 	pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
 
-	if (unlikely(error_code & PFERR_RSVD_MASK)) {
-		r = handle_mmio_page_fault(vcpu, addr, mmu_is_nested(vcpu));
-		if (likely(r != RET_MMIO_PF_INVALID))
-			return r;
-
-		/*
-		 * page fault with PFEC.RSVD  = 1 is caused by shadow
-		 * page fault, should not be used to walk guest page
-		 * table.
-		 */
-		error_code &= ~PFERR_RSVD_MASK;
-	};
-
 	r = mmu_topup_memory_caches(vcpu);
 	if (r)
 		return r;
 
 	/*
+	 * If PFEC.RSVD is set, this is a shadow page fault.
+	 * The bit needs to be cleared before walking guest page tables.
+	 */
+	error_code &= ~PFERR_RSVD_MASK;
+
+	/*
 	 * Look up the guest pte for the faulting address.
 	 */
 	r = FNAME(walk_addr)(&walker, vcpu, addr, error_code);
@@ -735,6 +731,11 @@
 		return 0;
 	}
 
+	if (page_fault_handle_page_track(vcpu, error_code, walker.gfn)) {
+		shadow_page_table_clear_flood(vcpu, addr);
+		return 1;
+	}
+
 	vcpu->arch.write_fault_to_shadow_pgtable = false;
 
 	is_self_change_mapping = FNAME(is_self_change_mapping)(vcpu,
@@ -945,7 +946,7 @@
 
 		if (kvm_vcpu_read_guest_atomic(vcpu, pte_gpa, &gpte,
 					       sizeof(pt_element_t)))
-			return -EINVAL;
+			return 0;
 
 		if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) {
 			vcpu->kvm->tlbs_dirty++;
@@ -977,7 +978,7 @@
 			 host_writable);
 	}
 
-	return !nr_present;
+	return nr_present;
 }
 
 #undef pt_element_t
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index 31aa2c8..06ce377 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -257,7 +257,7 @@
 
 void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu)
 {
-	if (vcpu->arch.apic)
+	if (lapic_in_kernel(vcpu))
 		kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTPC);
 }
 
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index c13a64b..9507038 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1858,8 +1858,7 @@
 static int vmmcall_interception(struct vcpu_svm *svm)
 {
 	svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
-	kvm_emulate_hypercall(&svm->vcpu);
-	return 1;
+	return kvm_emulate_hypercall(&svm->vcpu);
 }
 
 static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h
index ad9f6a2..2f1ea2f 100644
--- a/arch/x86/kvm/trace.h
+++ b/arch/x86/kvm/trace.h
@@ -996,11 +996,13 @@
  * Tracepoint for VT-d posted-interrupts.
  */
 TRACE_EVENT(kvm_pi_irte_update,
-	TP_PROTO(unsigned int vcpu_id, unsigned int gsi,
-		 unsigned int gvec, u64 pi_desc_addr, bool set),
-	TP_ARGS(vcpu_id, gsi, gvec, pi_desc_addr, set),
+	TP_PROTO(unsigned int host_irq, unsigned int vcpu_id,
+		 unsigned int gsi, unsigned int gvec,
+		 u64 pi_desc_addr, bool set),
+	TP_ARGS(host_irq, vcpu_id, gsi, gvec, pi_desc_addr, set),
 
 	TP_STRUCT__entry(
+		__field(	unsigned int,	host_irq	)
 		__field(	unsigned int,	vcpu_id		)
 		__field(	unsigned int,	gsi		)
 		__field(	unsigned int,	gvec		)
@@ -1009,6 +1011,7 @@
 	),
 
 	TP_fast_assign(
+		__entry->host_irq	= host_irq;
 		__entry->vcpu_id	= vcpu_id;
 		__entry->gsi		= gsi;
 		__entry->gvec		= gvec;
@@ -1016,9 +1019,10 @@
 		__entry->set		= set;
 	),
 
-	TP_printk("VT-d PI is %s for this irq, vcpu %u, gsi: 0x%x, "
+	TP_printk("VT-d PI is %s for irq %u, vcpu %u, gsi: 0x%x, "
 		  "gvec: 0x%x, pi_desc_addr: 0x%llx",
 		  __entry->set ? "enabled and being updated" : "disabled",
+		  __entry->host_irq,
 		  __entry->vcpu_id,
 		  __entry->gsi,
 		  __entry->gvec,
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 9bd8f44..5e45c27 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -863,7 +863,6 @@
 static u64 construct_eptp(unsigned long root_hpa);
 static void kvm_cpu_vmxon(u64 addr);
 static void kvm_cpu_vmxoff(void);
-static bool vmx_mpx_supported(void);
 static bool vmx_xsaves_supported(void);
 static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr);
 static void vmx_set_segment(struct kvm_vcpu *vcpu,
@@ -963,25 +962,36 @@
 	MSR_EFER, MSR_TSC_AUX, MSR_STAR,
 };
 
-static inline bool is_page_fault(u32 intr_info)
+static inline bool is_exception_n(u32 intr_info, u8 vector)
 {
 	return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
 			     INTR_INFO_VALID_MASK)) ==
-		(INTR_TYPE_HARD_EXCEPTION | PF_VECTOR | INTR_INFO_VALID_MASK);
+		(INTR_TYPE_HARD_EXCEPTION | vector | INTR_INFO_VALID_MASK);
+}
+
+static inline bool is_debug(u32 intr_info)
+{
+	return is_exception_n(intr_info, DB_VECTOR);
+}
+
+static inline bool is_breakpoint(u32 intr_info)
+{
+	return is_exception_n(intr_info, BP_VECTOR);
+}
+
+static inline bool is_page_fault(u32 intr_info)
+{
+	return is_exception_n(intr_info, PF_VECTOR);
 }
 
 static inline bool is_no_device(u32 intr_info)
 {
-	return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
-			     INTR_INFO_VALID_MASK)) ==
-		(INTR_TYPE_HARD_EXCEPTION | NM_VECTOR | INTR_INFO_VALID_MASK);
+	return is_exception_n(intr_info, NM_VECTOR);
 }
 
 static inline bool is_invalid_opcode(u32 intr_info)
 {
-	return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
-			     INTR_INFO_VALID_MASK)) ==
-		(INTR_TYPE_HARD_EXCEPTION | UD_VECTOR | INTR_INFO_VALID_MASK);
+	return is_exception_n(intr_info, UD_VECTOR);
 }
 
 static inline bool is_external_interrupt(u32 intr_info)
@@ -2605,7 +2615,7 @@
 		VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER |
 		VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT;
 
-	if (vmx_mpx_supported())
+	if (kvm_mpx_supported())
 		vmx->nested.nested_vmx_exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS;
 
 	/* We support free control of debug control saving. */
@@ -2626,7 +2636,7 @@
 		VM_ENTRY_LOAD_IA32_PAT;
 	vmx->nested.nested_vmx_entry_ctls_high |=
 		(VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER);
-	if (vmx_mpx_supported())
+	if (kvm_mpx_supported())
 		vmx->nested.nested_vmx_entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS;
 
 	/* We support free control of debug control loading. */
@@ -2870,7 +2880,7 @@
 		msr_info->data = vmcs_readl(GUEST_SYSENTER_ESP);
 		break;
 	case MSR_IA32_BNDCFGS:
-		if (!vmx_mpx_supported())
+		if (!kvm_mpx_supported())
 			return 1;
 		msr_info->data = vmcs_read64(GUEST_BNDCFGS);
 		break;
@@ -2947,7 +2957,7 @@
 		vmcs_writel(GUEST_SYSENTER_ESP, data);
 		break;
 	case MSR_IA32_BNDCFGS:
-		if (!vmx_mpx_supported())
+		if (!kvm_mpx_supported())
 			return 1;
 		vmcs_write64(GUEST_BNDCFGS, data);
 		break;
@@ -3420,7 +3430,7 @@
 	for (i = j = 0; i < max_shadow_read_write_fields; i++) {
 		switch (shadow_read_write_fields[i]) {
 		case GUEST_BNDCFGS:
-			if (!vmx_mpx_supported())
+			if (!kvm_mpx_supported())
 				continue;
 			break;
 		default:
@@ -5629,11 +5639,8 @@
 	}
 
 	if (vcpu->guest_debug == 0) {
-		u32 cpu_based_vm_exec_control;
-
-		cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
-		cpu_based_vm_exec_control &= ~CPU_BASED_MOV_DR_EXITING;
-		vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
+		vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL,
+				CPU_BASED_MOV_DR_EXITING);
 
 		/*
 		 * No more DR vmexits; force a reload of the debug registers
@@ -5670,8 +5677,6 @@
 
 static void vmx_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
 {
-	u32 cpu_based_vm_exec_control;
-
 	get_debugreg(vcpu->arch.db[0], 0);
 	get_debugreg(vcpu->arch.db[1], 1);
 	get_debugreg(vcpu->arch.db[2], 2);
@@ -5680,10 +5685,7 @@
 	vcpu->arch.dr7 = vmcs_readl(GUEST_DR7);
 
 	vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT;
-
-	cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
-	cpu_based_vm_exec_control |= CPU_BASED_MOV_DR_EXITING;
-	vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
+	vmcs_set_bits(CPU_BASED_VM_EXEC_CONTROL, CPU_BASED_MOV_DR_EXITING);
 }
 
 static void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val)
@@ -5768,8 +5770,7 @@
 
 static int handle_vmcall(struct kvm_vcpu *vcpu)
 {
-	kvm_emulate_hypercall(vcpu);
-	return 1;
+	return kvm_emulate_hypercall(vcpu);
 }
 
 static int handle_invd(struct kvm_vcpu *vcpu)
@@ -6456,8 +6457,8 @@
 
 	if (vmx->nested.vmcs02_num >= max(VMCS02_POOL_SIZE, 1)) {
 		/* Recycle the least recently used VMCS. */
-		item = list_entry(vmx->nested.vmcs02_pool.prev,
-			struct vmcs02_list, list);
+		item = list_last_entry(&vmx->nested.vmcs02_pool,
+				       struct vmcs02_list, list);
 		item->vmptr = vmx->nested.current_vmptr;
 		list_move(&item->list, &vmx->nested.vmcs02_pool);
 		return &item->vmcs02;
@@ -7773,6 +7774,13 @@
 		else if (is_no_device(intr_info) &&
 			 !(vmcs12->guest_cr0 & X86_CR0_TS))
 			return false;
+		else if (is_debug(intr_info) &&
+			 vcpu->guest_debug &
+			 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
+			return false;
+		else if (is_breakpoint(intr_info) &&
+			 vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
+			return false;
 		return vmcs12->exception_bitmap &
 				(1u << (intr_info & INTR_INFO_VECTOR_MASK));
 	case EXIT_REASON_EXTERNAL_INTERRUPT:
@@ -10277,7 +10285,7 @@
 	vmcs12->guest_sysenter_cs = vmcs_read32(GUEST_SYSENTER_CS);
 	vmcs12->guest_sysenter_esp = vmcs_readl(GUEST_SYSENTER_ESP);
 	vmcs12->guest_sysenter_eip = vmcs_readl(GUEST_SYSENTER_EIP);
-	if (vmx_mpx_supported())
+	if (kvm_mpx_supported())
 		vmcs12->guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS);
 	if (nested_cpu_has_xsaves(vmcs12))
 		vmcs12->xss_exit_bitmap = vmcs_read64(XSS_EXIT_BITMAP);
@@ -10785,13 +10793,26 @@
 		 */
 
 		kvm_set_msi_irq(e, &irq);
-		if (!kvm_intr_is_single_vcpu(kvm, &irq, &vcpu))
+		if (!kvm_intr_is_single_vcpu(kvm, &irq, &vcpu)) {
+			/*
+			 * Make sure the IRTE is in remapped mode if
+			 * we don't handle it in posted mode.
+			 */
+			ret = irq_set_vcpu_affinity(host_irq, NULL);
+			if (ret < 0) {
+				printk(KERN_INFO
+				   "failed to back to remapped mode, irq: %u\n",
+				   host_irq);
+				goto out;
+			}
+
 			continue;
+		}
 
 		vcpu_info.pi_desc_addr = __pa(vcpu_to_pi_desc(vcpu));
 		vcpu_info.vector = irq.vector;
 
-		trace_kvm_pi_irte_update(vcpu->vcpu_id, e->gsi,
+		trace_kvm_pi_irte_update(vcpu->vcpu_id, host_irq, e->gsi,
 				vcpu_info.vector, vcpu_info.pi_desc_addr, set);
 
 		if (set)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index eaf6ee8..7236bd3 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -123,6 +123,9 @@
 unsigned int __read_mostly lapic_timer_advance_ns = 0;
 module_param(lapic_timer_advance_ns, uint, S_IRUGO | S_IWUSR);
 
+static bool __read_mostly vector_hashing = true;
+module_param(vector_hashing, bool, S_IRUGO);
+
 static bool __read_mostly backwards_tsc_observed = false;
 
 #define KVM_NR_SHARED_MSRS 16
@@ -1196,17 +1199,11 @@
 
 static uint32_t div_frac(uint32_t dividend, uint32_t divisor)
 {
-	uint32_t quotient, remainder;
-
-	/* Don't try to replace with do_div(), this one calculates
-	 * "(dividend << 32) / divisor" */
-	__asm__ ( "divl %4"
-		  : "=a" (quotient), "=d" (remainder)
-		  : "0" (0), "1" (dividend), "r" (divisor) );
-	return quotient;
+	do_shl32_div32(dividend, divisor);
+	return dividend;
 }
 
-static void kvm_get_time_scale(uint32_t scaled_khz, uint32_t base_khz,
+static void kvm_get_time_scale(uint64_t scaled_hz, uint64_t base_hz,
 			       s8 *pshift, u32 *pmultiplier)
 {
 	uint64_t scaled64;
@@ -1214,8 +1211,8 @@
 	uint64_t tps64;
 	uint32_t tps32;
 
-	tps64 = base_khz * 1000LL;
-	scaled64 = scaled_khz * 1000LL;
+	tps64 = base_hz;
+	scaled64 = scaled_hz;
 	while (tps64 > scaled64*2 || tps64 & 0xffffffff00000000ULL) {
 		tps64 >>= 1;
 		shift--;
@@ -1233,8 +1230,8 @@
 	*pshift = shift;
 	*pmultiplier = div_frac(scaled64, tps32);
 
-	pr_debug("%s: base_khz %u => %u, shift %d, mul %u\n",
-		 __func__, base_khz, scaled_khz, shift, *pmultiplier);
+	pr_debug("%s: base_hz %llu => %llu, shift %d, mul %u\n",
+		 __func__, base_hz, scaled_hz, shift, *pmultiplier);
 }
 
 #ifdef CONFIG_X86_64
@@ -1293,23 +1290,23 @@
 	return 0;
 }
 
-static int kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 this_tsc_khz)
+static int kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz)
 {
 	u32 thresh_lo, thresh_hi;
 	int use_scaling = 0;
 
 	/* tsc_khz can be zero if TSC calibration fails */
-	if (this_tsc_khz == 0) {
+	if (user_tsc_khz == 0) {
 		/* set tsc_scaling_ratio to a safe value */
 		vcpu->arch.tsc_scaling_ratio = kvm_default_tsc_scaling_ratio;
 		return -1;
 	}
 
 	/* Compute a scale to convert nanoseconds in TSC cycles */
-	kvm_get_time_scale(this_tsc_khz, NSEC_PER_SEC / 1000,
+	kvm_get_time_scale(user_tsc_khz * 1000LL, NSEC_PER_SEC,
 			   &vcpu->arch.virtual_tsc_shift,
 			   &vcpu->arch.virtual_tsc_mult);
-	vcpu->arch.virtual_tsc_khz = this_tsc_khz;
+	vcpu->arch.virtual_tsc_khz = user_tsc_khz;
 
 	/*
 	 * Compute the variation in TSC rate which is acceptable
@@ -1319,11 +1316,11 @@
 	 */
 	thresh_lo = adjust_tsc_khz(tsc_khz, -tsc_tolerance_ppm);
 	thresh_hi = adjust_tsc_khz(tsc_khz, tsc_tolerance_ppm);
-	if (this_tsc_khz < thresh_lo || this_tsc_khz > thresh_hi) {
-		pr_debug("kvm: requested TSC rate %u falls outside tolerance [%u,%u]\n", this_tsc_khz, thresh_lo, thresh_hi);
+	if (user_tsc_khz < thresh_lo || user_tsc_khz > thresh_hi) {
+		pr_debug("kvm: requested TSC rate %u falls outside tolerance [%u,%u]\n", user_tsc_khz, thresh_lo, thresh_hi);
 		use_scaling = 1;
 	}
-	return set_tsc_khz(vcpu, this_tsc_khz, use_scaling);
+	return set_tsc_khz(vcpu, user_tsc_khz, use_scaling);
 }
 
 static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns)
@@ -1716,7 +1713,7 @@
 
 static int kvm_guest_time_update(struct kvm_vcpu *v)
 {
-	unsigned long flags, this_tsc_khz, tgt_tsc_khz;
+	unsigned long flags, tgt_tsc_khz;
 	struct kvm_vcpu_arch *vcpu = &v->arch;
 	struct kvm_arch *ka = &v->kvm->arch;
 	s64 kernel_ns;
@@ -1742,8 +1739,8 @@
 
 	/* Keep irq disabled to prevent changes to the clock */
 	local_irq_save(flags);
-	this_tsc_khz = __this_cpu_read(cpu_tsc_khz);
-	if (unlikely(this_tsc_khz == 0)) {
+	tgt_tsc_khz = __this_cpu_read(cpu_tsc_khz);
+	if (unlikely(tgt_tsc_khz == 0)) {
 		local_irq_restore(flags);
 		kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
 		return 1;
@@ -1778,13 +1775,14 @@
 	if (!vcpu->pv_time_enabled)
 		return 0;
 
-	if (unlikely(vcpu->hw_tsc_khz != this_tsc_khz)) {
-		tgt_tsc_khz = kvm_has_tsc_control ?
-			vcpu->virtual_tsc_khz : this_tsc_khz;
-		kvm_get_time_scale(NSEC_PER_SEC / 1000, tgt_tsc_khz,
+	if (kvm_has_tsc_control)
+		tgt_tsc_khz = kvm_scale_tsc(v, tgt_tsc_khz);
+
+	if (unlikely(vcpu->hw_tsc_khz != tgt_tsc_khz)) {
+		kvm_get_time_scale(NSEC_PER_SEC, tgt_tsc_khz * 1000LL,
 				   &vcpu->hv_clock.tsc_shift,
 				   &vcpu->hv_clock.tsc_to_system_mul);
-		vcpu->hw_tsc_khz = this_tsc_khz;
+		vcpu->hw_tsc_khz = tgt_tsc_khz;
 	}
 
 	/* With all the info we got, fill in the values */
@@ -2987,7 +2985,7 @@
 	kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked);
 
 	if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR &&
-	    kvm_vcpu_has_lapic(vcpu))
+	    lapic_in_kernel(vcpu))
 		vcpu->arch.apic->sipi_vector = events->sipi_vector;
 
 	if (events->flags & KVM_VCPUEVENT_VALID_SMM) {
@@ -3000,7 +2998,7 @@
 			vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK;
 		else
 			vcpu->arch.hflags &= ~HF_SMM_INSIDE_NMI_MASK;
-		if (kvm_vcpu_has_lapic(vcpu)) {
+		if (lapic_in_kernel(vcpu)) {
 			if (events->smi.latched_init)
 				set_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events);
 			else
@@ -3240,7 +3238,7 @@
 	switch (ioctl) {
 	case KVM_GET_LAPIC: {
 		r = -EINVAL;
-		if (!vcpu->arch.apic)
+		if (!lapic_in_kernel(vcpu))
 			goto out;
 		u.lapic = kzalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
 
@@ -3258,7 +3256,7 @@
 	}
 	case KVM_SET_LAPIC: {
 		r = -EINVAL;
-		if (!vcpu->arch.apic)
+		if (!lapic_in_kernel(vcpu))
 			goto out;
 		u.lapic = memdup_user(argp, sizeof(*u.lapic));
 		if (IS_ERR(u.lapic))
@@ -3605,20 +3603,26 @@
 
 static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
 {
-	mutex_lock(&kvm->arch.vpit->pit_state.lock);
-	memcpy(ps, &kvm->arch.vpit->pit_state, sizeof(struct kvm_pit_state));
-	mutex_unlock(&kvm->arch.vpit->pit_state.lock);
+	struct kvm_kpit_state *kps = &kvm->arch.vpit->pit_state;
+
+	BUILD_BUG_ON(sizeof(*ps) != sizeof(kps->channels));
+
+	mutex_lock(&kps->lock);
+	memcpy(ps, &kps->channels, sizeof(*ps));
+	mutex_unlock(&kps->lock);
 	return 0;
 }
 
 static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
 {
 	int i;
-	mutex_lock(&kvm->arch.vpit->pit_state.lock);
-	memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state));
+	struct kvm_pit *pit = kvm->arch.vpit;
+
+	mutex_lock(&pit->pit_state.lock);
+	memcpy(&pit->pit_state.channels, ps, sizeof(*ps));
 	for (i = 0; i < 3; i++)
-		kvm_pit_load_count(kvm, i, ps->channels[i].count, 0);
-	mutex_unlock(&kvm->arch.vpit->pit_state.lock);
+		kvm_pit_load_count(pit, i, ps->channels[i].count, 0);
+	mutex_unlock(&pit->pit_state.lock);
 	return 0;
 }
 
@@ -3638,29 +3642,39 @@
 	int start = 0;
 	int i;
 	u32 prev_legacy, cur_legacy;
-	mutex_lock(&kvm->arch.vpit->pit_state.lock);
-	prev_legacy = kvm->arch.vpit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY;
+	struct kvm_pit *pit = kvm->arch.vpit;
+
+	mutex_lock(&pit->pit_state.lock);
+	prev_legacy = pit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY;
 	cur_legacy = ps->flags & KVM_PIT_FLAGS_HPET_LEGACY;
 	if (!prev_legacy && cur_legacy)
 		start = 1;
-	memcpy(&kvm->arch.vpit->pit_state.channels, &ps->channels,
-	       sizeof(kvm->arch.vpit->pit_state.channels));
-	kvm->arch.vpit->pit_state.flags = ps->flags;
+	memcpy(&pit->pit_state.channels, &ps->channels,
+	       sizeof(pit->pit_state.channels));
+	pit->pit_state.flags = ps->flags;
 	for (i = 0; i < 3; i++)
-		kvm_pit_load_count(kvm, i, kvm->arch.vpit->pit_state.channels[i].count,
+		kvm_pit_load_count(pit, i, pit->pit_state.channels[i].count,
 				   start && i == 0);
-	mutex_unlock(&kvm->arch.vpit->pit_state.lock);
+	mutex_unlock(&pit->pit_state.lock);
 	return 0;
 }
 
 static int kvm_vm_ioctl_reinject(struct kvm *kvm,
 				 struct kvm_reinject_control *control)
 {
-	if (!kvm->arch.vpit)
+	struct kvm_pit *pit = kvm->arch.vpit;
+
+	if (!pit)
 		return -ENXIO;
-	mutex_lock(&kvm->arch.vpit->pit_state.lock);
-	kvm->arch.vpit->pit_state.reinject = control->pit_reinject;
-	mutex_unlock(&kvm->arch.vpit->pit_state.lock);
+
+	/* pit->pit_state.lock was overloaded to prevent userspace from getting
+	 * an inconsistent state after running multiple KVM_REINJECT_CONTROL
+	 * ioctls in parallel.  Use a separate lock if that ioctl isn't rare.
+	 */
+	mutex_lock(&pit->pit_state.lock);
+	kvm_pit_set_reinject(pit, control->pit_reinject);
+	mutex_unlock(&pit->pit_state.lock);
+
 	return 0;
 }
 
@@ -4093,7 +4107,7 @@
 
 	do {
 		n = min(len, 8);
-		if (!(vcpu->arch.apic &&
+		if (!(lapic_in_kernel(vcpu) &&
 		      !kvm_iodevice_write(vcpu, &vcpu->arch.apic->dev, addr, n, v))
 		    && kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, n, v))
 			break;
@@ -4113,7 +4127,7 @@
 
 	do {
 		n = min(len, 8);
-		if (!(vcpu->arch.apic &&
+		if (!(lapic_in_kernel(vcpu) &&
 		      !kvm_iodevice_read(vcpu, &vcpu->arch.apic->dev,
 					 addr, n, v))
 		    && kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, n, v))
@@ -4346,7 +4360,7 @@
 	ret = kvm_vcpu_write_guest(vcpu, gpa, val, bytes);
 	if (ret < 0)
 		return 0;
-	kvm_mmu_pte_write(vcpu, gpa, val, bytes);
+	kvm_page_track_write(vcpu, gpa, val, bytes);
 	return 1;
 }
 
@@ -4604,7 +4618,7 @@
 		return X86EMUL_CMPXCHG_FAILED;
 
 	kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT);
-	kvm_mmu_pte_write(vcpu, gpa, new, bytes);
+	kvm_page_track_write(vcpu, gpa, new, bytes);
 
 	return X86EMUL_CONTINUE;
 
@@ -6010,7 +6024,7 @@
 	if (!kvm_x86_ops->update_cr8_intercept)
 		return;
 
-	if (!vcpu->arch.apic)
+	if (!lapic_in_kernel(vcpu))
 		return;
 
 	if (vcpu->arch.apicv_active)
@@ -7038,7 +7052,7 @@
 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
 				    struct kvm_mp_state *mp_state)
 {
-	if (!kvm_vcpu_has_lapic(vcpu) &&
+	if (!lapic_in_kernel(vcpu) &&
 	    mp_state->mp_state != KVM_MP_STATE_RUNNABLE)
 		return -EINVAL;
 
@@ -7314,7 +7328,7 @@
 	 * Every 255 times fpu_counter rolls over to 0; a guest that uses
 	 * the FPU in bursts will revert to loading it on demand.
 	 */
-	if (!vcpu->arch.eager_fpu) {
+	if (!use_eager_fpu()) {
 		if (++vcpu->fpu_counter < 5)
 			kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu);
 	}
@@ -7593,6 +7607,7 @@
 }
 
 struct static_key kvm_no_apic_vcpu __read_mostly;
+EXPORT_SYMBOL_GPL(kvm_no_apic_vcpu);
 
 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
 {
@@ -7724,6 +7739,9 @@
 	INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn);
 	INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn);
 
+	kvm_page_track_init(kvm);
+	kvm_mmu_init_vm(kvm);
+
 	return 0;
 }
 
@@ -7850,6 +7868,7 @@
 	kfree(kvm->arch.vioapic);
 	kvm_free_vcpus(kvm);
 	kfree(rcu_dereference_check(kvm->arch.apic_map, 1));
+	kvm_mmu_uninit_vm(kvm);
 }
 
 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
@@ -7871,6 +7890,8 @@
 			free->arch.lpage_info[i - 1] = NULL;
 		}
 	}
+
+	kvm_page_track_free_memslot(free, dont);
 }
 
 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
@@ -7879,6 +7900,7 @@
 	int i;
 
 	for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
+		struct kvm_lpage_info *linfo;
 		unsigned long ugfn;
 		int lpages;
 		int level = i + 1;
@@ -7893,15 +7915,16 @@
 		if (i == 0)
 			continue;
 
-		slot->arch.lpage_info[i - 1] = kvm_kvzalloc(lpages *
-					sizeof(*slot->arch.lpage_info[i - 1]));
-		if (!slot->arch.lpage_info[i - 1])
+		linfo = kvm_kvzalloc(lpages * sizeof(*linfo));
+		if (!linfo)
 			goto out_free;
 
+		slot->arch.lpage_info[i - 1] = linfo;
+
 		if (slot->base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1))
-			slot->arch.lpage_info[i - 1][0].write_count = 1;
+			linfo[0].disallow_lpage = 1;
 		if ((slot->base_gfn + npages) & (KVM_PAGES_PER_HPAGE(level) - 1))
-			slot->arch.lpage_info[i - 1][lpages - 1].write_count = 1;
+			linfo[lpages - 1].disallow_lpage = 1;
 		ugfn = slot->userspace_addr >> PAGE_SHIFT;
 		/*
 		 * If the gfn and userspace address are not aligned wrt each
@@ -7913,10 +7936,13 @@
 			unsigned long j;
 
 			for (j = 0; j < lpages; ++j)
-				slot->arch.lpage_info[i - 1][j].write_count = 1;
+				linfo[j].disallow_lpage = 1;
 		}
 	}
 
+	if (kvm_page_track_create_memslot(slot, npages))
+		goto out_free;
+
 	return 0;
 
 out_free:
@@ -8370,6 +8396,12 @@
 	return kvm_x86_ops->update_pi_irte(kvm, host_irq, guest_irq, set);
 }
 
+bool kvm_vector_hashing_enabled(void)
+{
+	return vector_hashing;
+}
+EXPORT_SYMBOL_GPL(kvm_vector_hashing_enabled);
+
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_fast_mmio);
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq);
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index f2afa5f..007940f 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -179,6 +179,7 @@
 int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
 bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,
 					  int page_num);
+bool kvm_vector_hashing_enabled(void);
 
 #define KVM_SUPPORTED_XCR0     (XFEATURE_MASK_FP | XFEATURE_MASK_SSE \
 				| XFEATURE_MASK_YMM | XFEATURE_MASK_BNDREGS \
@@ -192,4 +193,19 @@
 extern unsigned int lapic_timer_advance_ns;
 
 extern struct static_key kvm_no_apic_vcpu;
+
+/* Same "calling convention" as do_div:
+ * - divide (n << 32) by base
+ * - put result in n
+ * - return remainder
+ */
+#define do_shl32_div32(n, base)					\
+	({							\
+	    u32 __quot, __rem;					\
+	    asm("divl %2" : "=a" (__quot), "=d" (__rem)		\
+			: "rm" (base), "0" (0), "1" ((u32) n));	\
+	    n = __quot;						\
+	    __rem;						\
+	 })
+
 #endif
diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
index a2fe51b..65be7cf 100644
--- a/arch/x86/lib/clear_page_64.S
+++ b/arch/x86/lib/clear_page_64.S
@@ -1,5 +1,5 @@
 #include <linux/linkage.h>
-#include <asm/cpufeature.h>
+#include <asm/cpufeatures.h>
 #include <asm/alternative-asm.h>
 
 /*
diff --git a/arch/x86/lib/cmdline.c b/arch/x86/lib/cmdline.c
index 422db00..5cc78bf 100644
--- a/arch/x86/lib/cmdline.c
+++ b/arch/x86/lib/cmdline.c
@@ -21,12 +21,16 @@
  * @option: option string to look for
  *
  * Returns the position of that @option (starts counting with 1)
- * or 0 on not found.
+ * or 0 on not found.  @option will only be found if it is found
+ * as an entire word in @cmdline.  For instance, if @option="car"
+ * then a cmdline which contains "cart" will not match.
  */
-int cmdline_find_option_bool(const char *cmdline, const char *option)
+static int
+__cmdline_find_option_bool(const char *cmdline, int max_cmdline_size,
+			   const char *option)
 {
 	char c;
-	int len, pos = 0, wstart = 0;
+	int pos = 0, wstart = 0;
 	const char *opptr = NULL;
 	enum {
 		st_wordstart = 0,	/* Start of word/after whitespace */
@@ -37,11 +41,11 @@
 	if (!cmdline)
 		return -1;      /* No command line */
 
-	len = min_t(int, strlen(cmdline), COMMAND_LINE_SIZE);
-	if (!len)
-		return 0;
-
-	while (len--) {
+	/*
+	 * This 'pos' check ensures we do not overrun
+	 * a non-NULL-terminated 'cmdline'
+	 */
+	while (pos < max_cmdline_size) {
 		c = *(char *)cmdline++;
 		pos++;
 
@@ -58,18 +62,35 @@
 			/* fall through */
 
 		case st_wordcmp:
-			if (!*opptr)
+			if (!*opptr) {
+				/*
+				 * We matched all the way to the end of the
+				 * option we were looking for.  If the
+				 * command-line has a space _or_ ends, then
+				 * we matched!
+				 */
 				if (!c || myisspace(c))
 					return wstart;
-				else
-					state = st_wordskip;
-			else if (!c)
+				/*
+				 * We hit the end of the option, but _not_
+				 * the end of a word on the cmdline.  Not
+				 * a match.
+				 */
+			} else if (!c) {
+				/*
+				 * Hit the NULL terminator on the end of
+				 * cmdline.
+				 */
 				return 0;
-			else if (c != *opptr++)
-				state = st_wordskip;
-			else if (!len)		/* last word and is matching */
-				return wstart;
-			break;
+			} else if (c == *opptr++) {
+				/*
+				 * We are currently matching, so continue
+				 * to the next character on the cmdline.
+				 */
+				break;
+			}
+			state = st_wordskip;
+			/* fall through */
 
 		case st_wordskip:
 			if (!c)
@@ -82,3 +103,8 @@
 
 	return 0;	/* Buffer overrun */
 }
+
+int cmdline_find_option_bool(const char *cmdline, const char *option)
+{
+	return __cmdline_find_option_bool(cmdline, COMMAND_LINE_SIZE, option);
+}
diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
index 009f982..24ef1c2 100644
--- a/arch/x86/lib/copy_page_64.S
+++ b/arch/x86/lib/copy_page_64.S
@@ -1,7 +1,7 @@
 /* Written 2003 by Andi Kleen, based on a kernel by Evandro Menezes */
 
 #include <linux/linkage.h>
-#include <asm/cpufeature.h>
+#include <asm/cpufeatures.h>
 #include <asm/alternative-asm.h>
 
 /*
diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
index 27f89c7..2b0ef26 100644
--- a/arch/x86/lib/copy_user_64.S
+++ b/arch/x86/lib/copy_user_64.S
@@ -10,7 +10,7 @@
 #include <asm/current.h>
 #include <asm/asm-offsets.h>
 #include <asm/thread_info.h>
-#include <asm/cpufeature.h>
+#include <asm/cpufeatures.h>
 #include <asm/alternative-asm.h>
 #include <asm/asm.h>
 #include <asm/smap.h>
diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
index 7d37641..cbb8ee5 100644
--- a/arch/x86/lib/memcpy_64.S
+++ b/arch/x86/lib/memcpy_64.S
@@ -1,7 +1,7 @@
 /* Copyright 2002 Andi Kleen */
 
 #include <linux/linkage.h>
-#include <asm/cpufeature.h>
+#include <asm/cpufeatures.h>
 #include <asm/alternative-asm.h>
 
 /*
diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
index ca2afdd..90ce01b 100644
--- a/arch/x86/lib/memmove_64.S
+++ b/arch/x86/lib/memmove_64.S
@@ -6,7 +6,7 @@
  *	- Copyright 2011 Fenghua Yu <fenghua.yu@intel.com>
  */
 #include <linux/linkage.h>
-#include <asm/cpufeature.h>
+#include <asm/cpufeatures.h>
 #include <asm/alternative-asm.h>
 
 #undef memmove
diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
index 2661fad..c9c8122 100644
--- a/arch/x86/lib/memset_64.S
+++ b/arch/x86/lib/memset_64.S
@@ -1,7 +1,7 @@
 /* Copyright 2002 Andi Kleen, SuSE Labs */
 
 #include <linux/linkage.h>
-#include <asm/cpufeature.h>
+#include <asm/cpufeatures.h>
 #include <asm/alternative-asm.h>
 
 .weak memset
diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c
index 4a6f1d9..99bfb19 100644
--- a/arch/x86/mm/dump_pagetables.c
+++ b/arch/x86/mm/dump_pagetables.c
@@ -358,20 +358,19 @@
 #define pgd_none(a)  pud_none(__pud(pgd_val(a)))
 #endif
 
-#ifdef CONFIG_X86_64
 static inline bool is_hypervisor_range(int idx)
 {
+#ifdef CONFIG_X86_64
 	/*
 	 * ffff800000000000 - ffff87ffffffffff is reserved for
 	 * the hypervisor.
 	 */
-	return paravirt_enabled() &&
-		(idx >= pgd_index(__PAGE_OFFSET) - 16) &&
-		(idx < pgd_index(__PAGE_OFFSET));
-}
+	return	(idx >= pgd_index(__PAGE_OFFSET) - 16) &&
+		(idx <  pgd_index(__PAGE_OFFSET));
 #else
-static inline bool is_hypervisor_range(int idx) { return false; }
+	return false;
 #endif
+}
 
 static void ptdump_walk_pgd_level_core(struct seq_file *m, pgd_t *pgd,
 				       bool checkwx)
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 2ebfbaf..bd7a9b9 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -388,7 +388,6 @@
 }
 
 pte_t *kmap_pte;
-pgprot_t kmap_prot;
 
 static inline pte_t *kmap_get_fixmap_pte(unsigned long vaddr)
 {
@@ -405,8 +404,6 @@
 	 */
 	kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
 	kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
-
-	kmap_prot = PAGE_KERNEL;
 }
 
 #ifdef CONFIG_HIGHMEM
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index a40b755..214afda 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -53,6 +53,7 @@
 #include <asm/numa.h>
 #include <asm/cacheflush.h>
 #include <asm/init.h>
+#include <asm/uv/uv.h>
 #include <asm/setup.h>
 
 #include "mm_internal.h"
@@ -1203,26 +1204,13 @@
 
 static unsigned long probe_memory_block_size(void)
 {
-	/* start from 2g */
-	unsigned long bz = 1UL<<31;
+	unsigned long bz = MIN_MEMORY_BLOCK_SIZE;
 
-	if (totalram_pages >= (64ULL << (30 - PAGE_SHIFT))) {
-		pr_info("Using 2GB memory block size for large-memory system\n");
-		return 2UL * 1024 * 1024 * 1024;
-	}
+	/* if system is UV or has 64GB of RAM or more, use large blocks */
+	if (is_uv_system() || ((max_pfn << PAGE_SHIFT) >= (64UL << 30)))
+		bz = 2UL << 30; /* 2GB */
 
-	/* less than 64g installed */
-	if ((max_pfn << PAGE_SHIFT) < (16UL << 32))
-		return MIN_MEMORY_BLOCK_SIZE;
-
-	/* get the tail size */
-	while (bz > MIN_MEMORY_BLOCK_SIZE) {
-		if (!((max_pfn << PAGE_SHIFT) & (bz - 1)))
-			break;
-		bz >>= 1;
-	}
-
-	printk(KERN_DEBUG "memory block size : %ldMB\n", bz >> 20);
+	pr_info("x86/mm: Memory block size: %ldMB\n", bz >> 20);
 
 	return bz;
 }
diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
index d470cf2..1b1110f 100644
--- a/arch/x86/mm/kasan_init_64.c
+++ b/arch/x86/mm/kasan_init_64.c
@@ -120,11 +120,22 @@
 	kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END),
 			(void *)KASAN_SHADOW_END);
 
-	memset(kasan_zero_page, 0, PAGE_SIZE);
-
 	load_cr3(init_level4_pgt);
 	__flush_tlb_all();
-	init_task.kasan_depth = 0;
 
+	/*
+	 * kasan_zero_page has been used as early shadow memory, thus it may
+	 * contain some garbage. Now we can clear and write protect it, since
+	 * after the TLB flush no one should write to it.
+	 */
+	memset(kasan_zero_page, 0, PAGE_SIZE);
+	for (i = 0; i < PTRS_PER_PTE; i++) {
+		pte_t pte = __pte(__pa(kasan_zero_page) | __PAGE_KERNEL_RO);
+		set_pte(&kasan_zero_pte[i], pte);
+	}
+	/* Flush TLBs again to be sure that write protection applied. */
+	__flush_tlb_all();
+
+	init_task.kasan_depth = 0;
 	pr_info("KernelAddressSanitizer initialized\n");
 }
diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c
index 637ab34..ddb2244 100644
--- a/arch/x86/mm/kmmio.c
+++ b/arch/x86/mm/kmmio.c
@@ -33,7 +33,7 @@
 struct kmmio_fault_page {
 	struct list_head list;
 	struct kmmio_fault_page *release_next;
-	unsigned long page; /* location of the fault page */
+	unsigned long addr; /* the requested address */
 	pteval_t old_presence; /* page presence prior to arming */
 	bool armed;
 
@@ -70,9 +70,16 @@
 static struct list_head kmmio_page_table[KMMIO_PAGE_TABLE_SIZE];
 static LIST_HEAD(kmmio_probes);
 
-static struct list_head *kmmio_page_list(unsigned long page)
+static struct list_head *kmmio_page_list(unsigned long addr)
 {
-	return &kmmio_page_table[hash_long(page, KMMIO_PAGE_HASH_BITS)];
+	unsigned int l;
+	pte_t *pte = lookup_address(addr, &l);
+
+	if (!pte)
+		return NULL;
+	addr &= page_level_mask(l);
+
+	return &kmmio_page_table[hash_long(addr, KMMIO_PAGE_HASH_BITS)];
 }
 
 /* Accessed per-cpu */
@@ -98,15 +105,19 @@
 }
 
 /* You must be holding RCU read lock. */
-static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long page)
+static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long addr)
 {
 	struct list_head *head;
 	struct kmmio_fault_page *f;
+	unsigned int l;
+	pte_t *pte = lookup_address(addr, &l);
 
-	page &= PAGE_MASK;
-	head = kmmio_page_list(page);
+	if (!pte)
+		return NULL;
+	addr &= page_level_mask(l);
+	head = kmmio_page_list(addr);
 	list_for_each_entry_rcu(f, head, list) {
-		if (f->page == page)
+		if (f->addr == addr)
 			return f;
 	}
 	return NULL;
@@ -137,10 +148,10 @@
 static int clear_page_presence(struct kmmio_fault_page *f, bool clear)
 {
 	unsigned int level;
-	pte_t *pte = lookup_address(f->page, &level);
+	pte_t *pte = lookup_address(f->addr, &level);
 
 	if (!pte) {
-		pr_err("no pte for page 0x%08lx\n", f->page);
+		pr_err("no pte for addr 0x%08lx\n", f->addr);
 		return -1;
 	}
 
@@ -156,7 +167,7 @@
 		return -1;
 	}
 
-	__flush_tlb_one(f->page);
+	__flush_tlb_one(f->addr);
 	return 0;
 }
 
@@ -176,12 +187,12 @@
 	int ret;
 	WARN_ONCE(f->armed, KERN_ERR pr_fmt("kmmio page already armed.\n"));
 	if (f->armed) {
-		pr_warning("double-arm: page 0x%08lx, ref %d, old %d\n",
-			   f->page, f->count, !!f->old_presence);
+		pr_warning("double-arm: addr 0x%08lx, ref %d, old %d\n",
+			   f->addr, f->count, !!f->old_presence);
 	}
 	ret = clear_page_presence(f, true);
-	WARN_ONCE(ret < 0, KERN_ERR pr_fmt("arming 0x%08lx failed.\n"),
-		  f->page);
+	WARN_ONCE(ret < 0, KERN_ERR pr_fmt("arming at 0x%08lx failed.\n"),
+		  f->addr);
 	f->armed = true;
 	return ret;
 }
@@ -191,7 +202,7 @@
 {
 	int ret = clear_page_presence(f, false);
 	WARN_ONCE(ret < 0,
-			KERN_ERR "kmmio disarming 0x%08lx failed.\n", f->page);
+			KERN_ERR "kmmio disarming at 0x%08lx failed.\n", f->addr);
 	f->armed = false;
 }
 
@@ -215,6 +226,12 @@
 	struct kmmio_context *ctx;
 	struct kmmio_fault_page *faultpage;
 	int ret = 0; /* default to fault not handled */
+	unsigned long page_base = addr;
+	unsigned int l;
+	pte_t *pte = lookup_address(addr, &l);
+	if (!pte)
+		return -EINVAL;
+	page_base &= page_level_mask(l);
 
 	/*
 	 * Preemption is now disabled to prevent process switch during
@@ -227,7 +244,7 @@
 	preempt_disable();
 	rcu_read_lock();
 
-	faultpage = get_kmmio_fault_page(addr);
+	faultpage = get_kmmio_fault_page(page_base);
 	if (!faultpage) {
 		/*
 		 * Either this page fault is not caused by kmmio, or
@@ -239,7 +256,7 @@
 
 	ctx = &get_cpu_var(kmmio_ctx);
 	if (ctx->active) {
-		if (addr == ctx->addr) {
+		if (page_base == ctx->addr) {
 			/*
 			 * A second fault on the same page means some other
 			 * condition needs handling by do_page_fault(), the
@@ -267,9 +284,9 @@
 	ctx->active++;
 
 	ctx->fpage = faultpage;
-	ctx->probe = get_kmmio_probe(addr);
+	ctx->probe = get_kmmio_probe(page_base);
 	ctx->saved_flags = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF));
-	ctx->addr = addr;
+	ctx->addr = page_base;
 
 	if (ctx->probe && ctx->probe->pre_handler)
 		ctx->probe->pre_handler(ctx->probe, regs, addr);
@@ -354,12 +371,11 @@
 }
 
 /* You must be holding kmmio_lock. */
-static int add_kmmio_fault_page(unsigned long page)
+static int add_kmmio_fault_page(unsigned long addr)
 {
 	struct kmmio_fault_page *f;
 
-	page &= PAGE_MASK;
-	f = get_kmmio_fault_page(page);
+	f = get_kmmio_fault_page(addr);
 	if (f) {
 		if (!f->count)
 			arm_kmmio_fault_page(f);
@@ -372,26 +388,25 @@
 		return -1;
 
 	f->count = 1;
-	f->page = page;
+	f->addr = addr;
 
 	if (arm_kmmio_fault_page(f)) {
 		kfree(f);
 		return -1;
 	}
 
-	list_add_rcu(&f->list, kmmio_page_list(f->page));
+	list_add_rcu(&f->list, kmmio_page_list(f->addr));
 
 	return 0;
 }
 
 /* You must be holding kmmio_lock. */
-static void release_kmmio_fault_page(unsigned long page,
+static void release_kmmio_fault_page(unsigned long addr,
 				struct kmmio_fault_page **release_list)
 {
 	struct kmmio_fault_page *f;
 
-	page &= PAGE_MASK;
-	f = get_kmmio_fault_page(page);
+	f = get_kmmio_fault_page(addr);
 	if (!f)
 		return;
 
@@ -420,18 +435,27 @@
 	int ret = 0;
 	unsigned long size = 0;
 	const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
+	unsigned int l;
+	pte_t *pte;
 
 	spin_lock_irqsave(&kmmio_lock, flags);
 	if (get_kmmio_probe(p->addr)) {
 		ret = -EEXIST;
 		goto out;
 	}
+
+	pte = lookup_address(p->addr, &l);
+	if (!pte) {
+		ret = -EINVAL;
+		goto out;
+	}
+
 	kmmio_count++;
 	list_add_rcu(&p->list, &kmmio_probes);
 	while (size < size_lim) {
 		if (add_kmmio_fault_page(p->addr + size))
 			pr_err("Unable to set page fault.\n");
-		size += PAGE_SIZE;
+		size += page_level_size(l);
 	}
 out:
 	spin_unlock_irqrestore(&kmmio_lock, flags);
@@ -506,11 +530,17 @@
 	const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
 	struct kmmio_fault_page *release_list = NULL;
 	struct kmmio_delayed_release *drelease;
+	unsigned int l;
+	pte_t *pte;
+
+	pte = lookup_address(p->addr, &l);
+	if (!pte)
+		return;
 
 	spin_lock_irqsave(&kmmio_lock, flags);
 	while (size < size_lim) {
 		release_kmmio_fault_page(p->addr + size, &release_list);
-		size += PAGE_SIZE;
+		size += page_level_size(l);
 	}
 	list_del_rcu(&p->list);
 	kmmio_count--;
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
index 72bb52f..d2dc043 100644
--- a/arch/x86/mm/mmap.c
+++ b/arch/x86/mm/mmap.c
@@ -94,18 +94,6 @@
 }
 
 /*
- * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
- * does, but not when emulating X86_32
- */
-static unsigned long mmap_legacy_base(unsigned long rnd)
-{
-	if (mmap_is_ia32())
-		return TASK_UNMAPPED_BASE;
-	else
-		return TASK_UNMAPPED_BASE + rnd;
-}
-
-/*
  * This function, called very early during the creation of a new
  * process VM image, sets up which VM layout function to use:
  */
@@ -116,7 +104,7 @@
 	if (current->flags & PF_RANDOMIZE)
 		random_factor = arch_mmap_rnd();
 
-	mm->mmap_legacy_base = mmap_legacy_base(random_factor);
+	mm->mmap_legacy_base = TASK_UNMAPPED_BASE + random_factor;
 
 	if (mmap_is_legacy()) {
 		mm->mmap_base = mm->mmap_legacy_base;
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index d04f809..f70c1ff 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -465,46 +465,67 @@
 	return true;
 }
 
+/*
+ * Mark all currently memblock-reserved physical memory (which covers the
+ * kernel's own memory ranges) as hot-unswappable.
+ */
 static void __init numa_clear_kernel_node_hotplug(void)
 {
-	int i, nid;
-	nodemask_t numa_kernel_nodes = NODE_MASK_NONE;
-	phys_addr_t start, end;
-	struct memblock_region *r;
+	nodemask_t reserved_nodemask = NODE_MASK_NONE;
+	struct memblock_region *mb_region;
+	int i;
 
 	/*
+	 * We have to do some preprocessing of memblock regions, to
+	 * make them suitable for reservation.
+	 *
 	 * At this time, all memory regions reserved by memblock are
-	 * used by the kernel. Set the nid in memblock.reserved will
-	 * mark out all the nodes the kernel resides in.
+	 * used by the kernel, but those regions are not split up
+	 * along node boundaries yet, and don't necessarily have their
+	 * node ID set yet either.
+	 *
+	 * So iterate over all memory known to the x86 architecture,
+	 * and use those ranges to set the nid in memblock.reserved.
+	 * This will split up the memblock regions along node
+	 * boundaries and will set the node IDs as well.
 	 */
 	for (i = 0; i < numa_meminfo.nr_blks; i++) {
-		struct numa_memblk *mb = &numa_meminfo.blk[i];
+		struct numa_memblk *mb = numa_meminfo.blk + i;
+		int ret;
 
-		memblock_set_node(mb->start, mb->end - mb->start,
-				  &memblock.reserved, mb->nid);
+		ret = memblock_set_node(mb->start, mb->end - mb->start, &memblock.reserved, mb->nid);
+		WARN_ON_ONCE(ret);
 	}
 
 	/*
-	 * Mark all kernel nodes.
+	 * Now go over all reserved memblock regions, to construct a
+	 * node mask of all kernel reserved memory areas.
 	 *
-	 * When booting with mem=nn[kMG] or in a kdump kernel, numa_meminfo
-	 * may not include all the memblock.reserved memory ranges because
-	 * trim_snb_memory() reserves specific pages for Sandy Bridge graphics.
+	 * [ Note, when booting with mem=nn[kMG] or in a kdump kernel,
+	 *   numa_meminfo might not include all memblock.reserved
+	 *   memory ranges, because quirks such as trim_snb_memory()
+	 *   reserve specific pages for Sandy Bridge graphics. ]
 	 */
-	for_each_memblock(reserved, r)
-		if (r->nid != MAX_NUMNODES)
-			node_set(r->nid, numa_kernel_nodes);
+	for_each_memblock(reserved, mb_region) {
+		if (mb_region->nid != MAX_NUMNODES)
+			node_set(mb_region->nid, reserved_nodemask);
+	}
 
-	/* Clear MEMBLOCK_HOTPLUG flag for memory in kernel nodes. */
+	/*
+	 * Finally, clear the MEMBLOCK_HOTPLUG flag for all memory
+	 * belonging to the reserved node mask.
+	 *
+	 * Note that this will include memory regions that reside
+	 * on nodes that contain kernel memory - entire nodes
+	 * become hot-unpluggable:
+	 */
 	for (i = 0; i < numa_meminfo.nr_blks; i++) {
-		nid = numa_meminfo.blk[i].nid;
-		if (!node_isset(nid, numa_kernel_nodes))
+		struct numa_memblk *mb = numa_meminfo.blk + i;
+
+		if (!node_isset(mb->nid, reserved_nodemask))
 			continue;
 
-		start = numa_meminfo.blk[i].start;
-		end = numa_meminfo.blk[i].end;
-
-		memblock_clear_hotplug(start, end - start);
+		memblock_clear_hotplug(mb->start, mb->end - mb->start);
 	}
 }
 
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index e64a470..4d0b262 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -1122,8 +1122,10 @@
 	/*
 	 * Ignore all non primary paths.
 	 */
-	if (!primary)
+	if (!primary) {
+		cpa->numpages = 1;
 		return 0;
+	}
 
 	/*
 	 * Ignore the NULL PTE for kernel identity mapping, as it is expected
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index f4ae536..04e2e71 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -943,7 +943,7 @@
 			return -EINVAL;
 	}
 
-	*prot = __pgprot((pgprot_val(vma->vm_page_prot) & (~_PAGE_CACHE_MASK)) |
+	*prot = __pgprot((pgprot_val(*prot) & (~_PAGE_CACHE_MASK)) |
 			 cachemode2protval(pcm));
 
 	return 0;
@@ -959,7 +959,7 @@
 
 	/* Set prot based on lookup */
 	pcm = lookup_memtype(pfn_t_to_phys(pfn));
-	*prot = __pgprot((pgprot_val(vma->vm_page_prot) & (~_PAGE_CACHE_MASK)) |
+	*prot = __pgprot((pgprot_val(*prot) & (~_PAGE_CACHE_MASK)) |
 			 cachemode2protval(pcm));
 
 	return 0;
diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
index 92e2eac..8bea847 100644
--- a/arch/x86/mm/setup_nx.c
+++ b/arch/x86/mm/setup_nx.c
@@ -4,6 +4,7 @@
 
 #include <asm/pgtable.h>
 #include <asm/proto.h>
+#include <asm/cpufeature.h>
 
 static int disable_nx;
 
@@ -31,9 +32,8 @@
 
 void x86_configure_nx(void)
 {
-	if (boot_cpu_has(X86_FEATURE_NX) && !disable_nx)
-		__supported_pte_mask |= _PAGE_NX;
-	else
+	/* If disable_nx is set, clear NX on all new mappings going forward. */
+	if (disable_nx)
 		__supported_pte_mask &= ~_PAGE_NX;
 }
 
diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
index 50d86c0..660a83c 100644
--- a/arch/x86/oprofile/op_model_amd.c
+++ b/arch/x86/oprofile/op_model_amd.c
@@ -24,7 +24,6 @@
 #include <asm/nmi.h>
 #include <asm/apic.h>
 #include <asm/processor.h>
-#include <asm/cpufeature.h>
 
 #include "op_x86_model.h"
 #include "op_counter.h"
diff --git a/arch/x86/platform/geode/alix.c b/arch/x86/platform/geode/alix.c
index 76b6632..1865c19 100644
--- a/arch/x86/platform/geode/alix.c
+++ b/arch/x86/platform/geode/alix.c
@@ -21,7 +21,7 @@
 #include <linux/init.h>
 #include <linux/io.h>
 #include <linux/string.h>
-#include <linux/module.h>
+#include <linux/moduleparam.h>
 #include <linux/leds.h>
 #include <linux/platform_device.h>
 #include <linux/gpio.h>
@@ -35,6 +35,11 @@
 #define BIOS_SIGNATURE_COREBOOT		0x500
 #define BIOS_REGION_SIZE		0x10000
 
+/*
+ * This driver is not modular, but to keep back compatibility
+ * with existing use cases, continuing with module_param is
+ * the easiest way forward.
+ */
 static bool force = 0;
 module_param(force, bool, 0444);
 /* FIXME: Award bios is not automatically detected as Alix platform */
@@ -192,9 +197,4 @@
 
 	return 0;
 }
-
-module_init(alix_init);
-
-MODULE_AUTHOR("Ed Wildgoose <kernel@wildgooses.com>");
-MODULE_DESCRIPTION("PCEngines ALIX System Setup");
-MODULE_LICENSE("GPL");
+device_initcall(alix_init);
diff --git a/arch/x86/platform/geode/geos.c b/arch/x86/platform/geode/geos.c
index aa733fb..4fcdb91 100644
--- a/arch/x86/platform/geode/geos.c
+++ b/arch/x86/platform/geode/geos.c
@@ -19,7 +19,6 @@
 #include <linux/init.h>
 #include <linux/io.h>
 #include <linux/string.h>
-#include <linux/module.h>
 #include <linux/leds.h>
 #include <linux/platform_device.h>
 #include <linux/gpio.h>
@@ -120,9 +119,4 @@
 
 	return 0;
 }
-
-module_init(geos_init);
-
-MODULE_AUTHOR("Philip Prindeville <philipp@redfish-solutions.com>");
-MODULE_DESCRIPTION("Traverse Technologies Geos System Setup");
-MODULE_LICENSE("GPL");
+device_initcall(geos_init);
diff --git a/arch/x86/platform/geode/net5501.c b/arch/x86/platform/geode/net5501.c
index 927e38c..a2f6b98 100644
--- a/arch/x86/platform/geode/net5501.c
+++ b/arch/x86/platform/geode/net5501.c
@@ -20,7 +20,6 @@
 #include <linux/init.h>
 #include <linux/io.h>
 #include <linux/string.h>
-#include <linux/module.h>
 #include <linux/leds.h>
 #include <linux/platform_device.h>
 #include <linux/gpio.h>
@@ -146,9 +145,4 @@
 
 	return 0;
 }
-
-module_init(net5501_init);
-
-MODULE_AUTHOR("Philip Prindeville <philipp@redfish-solutions.com>");
-MODULE_DESCRIPTION("Soekris net5501 System Setup");
-MODULE_LICENSE("GPL");
+device_initcall(net5501_init);
diff --git a/arch/x86/platform/intel-mid/mfld.c b/arch/x86/platform/intel-mid/mfld.c
index 23381d2..1eb47b6 100644
--- a/arch/x86/platform/intel-mid/mfld.c
+++ b/arch/x86/platform/intel-mid/mfld.c
@@ -52,10 +52,7 @@
 	/* mark tsc clocksource as reliable */
 	set_cpu_cap(&boot_cpu_data, X86_FEATURE_TSC_RELIABLE);
 
-	if (fast_calibrate)
-		return fast_calibrate;
-
-	return 0;
+	return fast_calibrate;
 }
 
 static void __init penwell_arch_setup(void)
diff --git a/arch/x86/platform/intel-mid/mrfl.c b/arch/x86/platform/intel-mid/mrfl.c
index aaca917..bd1adc6 100644
--- a/arch/x86/platform/intel-mid/mrfl.c
+++ b/arch/x86/platform/intel-mid/mrfl.c
@@ -81,10 +81,7 @@
 	/* mark tsc clocksource as reliable */
 	set_cpu_cap(&boot_cpu_data, X86_FEATURE_TSC_RELIABLE);
 
-	if (fast_calibrate)
-		return fast_calibrate;
-
-	return 0;
+	return fast_calibrate;
 }
 
 static void __init tangier_arch_setup(void)
diff --git a/arch/x86/platform/intel-quark/imr.c b/arch/x86/platform/intel-quark/imr.c
index bfadcd0..17d6d22 100644
--- a/arch/x86/platform/intel-quark/imr.c
+++ b/arch/x86/platform/intel-quark/imr.c
@@ -1,5 +1,5 @@
 /**
- * imr.c
+ * imr.c -- Intel Isolated Memory Region driver
  *
  * Copyright(c) 2013 Intel Corporation.
  * Copyright(c) 2015 Bryan O'Donoghue <pure.logic@nexus-software.ie>
@@ -31,7 +31,6 @@
 #include <linux/debugfs.h>
 #include <linux/init.h>
 #include <linux/mm.h>
-#include <linux/module.h>
 #include <linux/types.h>
 
 struct imr_device {
@@ -135,11 +134,9 @@
  * @idev:	pointer to imr_device structure.
  * @imr_id:	IMR entry to write.
  * @imr:	IMR structure representing address and access masks.
- * @lock:	indicates if the IMR lock bit should be applied.
  * @return:	0 on success or error code passed from mbi_iosf on failure.
  */
-static int imr_write(struct imr_device *idev, u32 imr_id,
-		     struct imr_regs *imr, bool lock)
+static int imr_write(struct imr_device *idev, u32 imr_id, struct imr_regs *imr)
 {
 	unsigned long flags;
 	u32 reg = imr_id * IMR_NUM_REGS + idev->reg_base;
@@ -163,15 +160,6 @@
 	if (ret)
 		goto failed;
 
-	/* Lock bit must be set separately to addr_lo address bits. */
-	if (lock) {
-		imr->addr_lo |= IMR_LOCK;
-		ret = iosf_mbi_write(QRK_MBI_UNIT_MM, MBI_REG_WRITE,
-				     reg - IMR_NUM_REGS, imr->addr_lo);
-		if (ret)
-			goto failed;
-	}
-
 	local_irq_restore(flags);
 	return 0;
 failed:
@@ -270,17 +258,6 @@
 }
 
 /**
- * imr_debugfs_unregister - unregister debugfs hooks.
- *
- * @idev:	pointer to imr_device structure.
- * @return:
- */
-static void imr_debugfs_unregister(struct imr_device *idev)
-{
-	debugfs_remove(idev->file);
-}
-
-/**
  * imr_check_params - check passed address range IMR alignment and non-zero size
  *
  * @base:	base address of intended IMR.
@@ -334,11 +311,10 @@
  * @size:	physical size of region in bytes must be aligned to 1KiB.
  * @read_mask:	read access mask.
  * @write_mask:	write access mask.
- * @lock:	indicates whether or not to permanently lock this region.
  * @return:	zero on success or negative value indicating error.
  */
 int imr_add_range(phys_addr_t base, size_t size,
-		  unsigned int rmask, unsigned int wmask, bool lock)
+		  unsigned int rmask, unsigned int wmask)
 {
 	phys_addr_t end;
 	unsigned int i;
@@ -411,7 +387,7 @@
 	imr.rmask = rmask;
 	imr.wmask = wmask;
 
-	ret = imr_write(idev, reg, &imr, lock);
+	ret = imr_write(idev, reg, &imr);
 	if (ret < 0) {
 		/*
 		 * In the highly unlikely event iosf_mbi_write failed
@@ -422,7 +398,7 @@
 		imr.addr_hi = 0;
 		imr.rmask = IMR_READ_ACCESS_ALL;
 		imr.wmask = IMR_WRITE_ACCESS_ALL;
-		imr_write(idev, reg, &imr, false);
+		imr_write(idev, reg, &imr);
 	}
 failed:
 	mutex_unlock(&idev->lock);
@@ -518,7 +494,7 @@
 	imr.rmask = IMR_READ_ACCESS_ALL;
 	imr.wmask = IMR_WRITE_ACCESS_ALL;
 
-	ret = imr_write(idev, reg, &imr, false);
+	ret = imr_write(idev, reg, &imr);
 
 failed:
 	mutex_unlock(&idev->lock);
@@ -599,7 +575,7 @@
 	 * We don't round up @size since it is already PAGE_SIZE aligned.
 	 * See vmlinux.lds.S for details.
 	 */
-	ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, false);
+	ret = imr_add_range(base, size, IMR_CPU, IMR_CPU);
 	if (ret < 0) {
 		pr_err("unable to setup IMR for kernel: %zu KiB (%lx - %lx)\n",
 			size / 1024, start, end);
@@ -614,7 +590,6 @@
 	{ X86_VENDOR_INTEL, 5, 9 },	/* Intel Quark SoC X1000. */
 	{}
 };
-MODULE_DEVICE_TABLE(x86cpu, imr_ids);
 
 /**
  * imr_init - entry point for IMR driver.
@@ -640,22 +615,4 @@
 	imr_fixup_memmap(idev);
 	return 0;
 }
-
-/**
- * imr_exit - exit point for IMR code.
- *
- * Deregisters debugfs, leave IMR state as-is.
- *
- * return:
- */
-static void __exit imr_exit(void)
-{
-	imr_debugfs_unregister(&imr_dev);
-}
-
-module_init(imr_init);
-module_exit(imr_exit);
-
-MODULE_AUTHOR("Bryan O'Donoghue <pure.logic@nexus-software.ie>");
-MODULE_DESCRIPTION("Intel Isolated Memory Region driver");
-MODULE_LICENSE("Dual BSD/GPL");
+device_initcall(imr_init);
diff --git a/arch/x86/platform/intel-quark/imr_selftest.c b/arch/x86/platform/intel-quark/imr_selftest.c
index 278e4da..f5bad40 100644
--- a/arch/x86/platform/intel-quark/imr_selftest.c
+++ b/arch/x86/platform/intel-quark/imr_selftest.c
@@ -1,5 +1,5 @@
 /**
- * imr_selftest.c
+ * imr_selftest.c -- Intel Isolated Memory Region self-test driver
  *
  * Copyright(c) 2013 Intel Corporation.
  * Copyright(c) 2015 Bryan O'Donoghue <pure.logic@nexus-software.ie>
@@ -15,7 +15,6 @@
 #include <asm/imr.h>
 #include <linux/init.h>
 #include <linux/mm.h>
-#include <linux/module.h>
 #include <linux/types.h>
 
 #define SELFTEST KBUILD_MODNAME ": "
@@ -61,30 +60,30 @@
 	int ret;
 
 	/* Test zero zero. */
-	ret = imr_add_range(0, 0, 0, 0, false);
+	ret = imr_add_range(0, 0, 0, 0);
 	imr_self_test_result(ret < 0, "zero sized IMR\n");
 
 	/* Test exact overlap. */
-	ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, false);
+	ret = imr_add_range(base, size, IMR_CPU, IMR_CPU);
 	imr_self_test_result(ret < 0, fmt_over, __va(base), __va(base + size));
 
 	/* Test overlap with base inside of existing. */
 	base += size - IMR_ALIGN;
-	ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, false);
+	ret = imr_add_range(base, size, IMR_CPU, IMR_CPU);
 	imr_self_test_result(ret < 0, fmt_over, __va(base), __va(base + size));
 
 	/* Test overlap with end inside of existing. */
 	base -= size + IMR_ALIGN * 2;
-	ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, false);
+	ret = imr_add_range(base, size, IMR_CPU, IMR_CPU);
 	imr_self_test_result(ret < 0, fmt_over, __va(base), __va(base + size));
 
 	/* Test that a 1 KiB IMR @ zero with read/write all will bomb out. */
 	ret = imr_add_range(0, IMR_ALIGN, IMR_READ_ACCESS_ALL,
-			    IMR_WRITE_ACCESS_ALL, false);
+			    IMR_WRITE_ACCESS_ALL);
 	imr_self_test_result(ret < 0, "1KiB IMR @ 0x00000000 - access-all\n");
 
 	/* Test that a 1 KiB IMR @ zero with CPU only will work. */
-	ret = imr_add_range(0, IMR_ALIGN, IMR_CPU, IMR_CPU, false);
+	ret = imr_add_range(0, IMR_ALIGN, IMR_CPU, IMR_CPU);
 	imr_self_test_result(ret >= 0, "1KiB IMR @ 0x00000000 - cpu-access\n");
 	if (ret >= 0) {
 		ret = imr_remove_range(0, IMR_ALIGN);
@@ -93,8 +92,7 @@
 
 	/* Test 2 KiB works. */
 	size = IMR_ALIGN * 2;
-	ret = imr_add_range(0, size, IMR_READ_ACCESS_ALL,
-			    IMR_WRITE_ACCESS_ALL, false);
+	ret = imr_add_range(0, size, IMR_READ_ACCESS_ALL, IMR_WRITE_ACCESS_ALL);
 	imr_self_test_result(ret >= 0, "2KiB IMR @ 0x00000000\n");
 	if (ret >= 0) {
 		ret = imr_remove_range(0, size);
@@ -106,7 +104,6 @@
 	{ X86_VENDOR_INTEL, 5, 9 },	/* Intel Quark SoC X1000. */
 	{}
 };
-MODULE_DEVICE_TABLE(x86cpu, imr_ids);
 
 /**
  * imr_self_test_init - entry point for IMR driver.
@@ -125,13 +122,4 @@
  *
  * return:
  */
-static void __exit imr_self_test_exit(void)
-{
-}
-
-module_init(imr_self_test_init);
-module_exit(imr_self_test_exit);
-
-MODULE_AUTHOR("Bryan O'Donoghue <pure.logic@nexus-software.ie>");
-MODULE_DESCRIPTION("Intel Isolated Memory Region self-test driver");
-MODULE_LICENSE("Dual BSD/GPL");
+device_initcall(imr_self_test_init);
diff --git a/arch/x86/um/asm/barrier.h b/arch/x86/um/asm/barrier.h
index 174781a..00c3190 100644
--- a/arch/x86/um/asm/barrier.h
+++ b/arch/x86/um/asm/barrier.h
@@ -3,7 +3,7 @@
 
 #include <asm/asm.h>
 #include <asm/segment.h>
-#include <asm/cpufeature.h>
+#include <asm/cpufeatures.h>
 #include <asm/cmpxchg.h>
 #include <asm/nops.h>
 
diff --git a/arch/x86/um/sys_call_table_32.c b/arch/x86/um/sys_call_table_32.c
index 439c099..bfce503 100644
--- a/arch/x86/um/sys_call_table_32.c
+++ b/arch/x86/um/sys_call_table_32.c
@@ -25,11 +25,11 @@
 
 #define old_mmap sys_old_mmap
 
-#define __SYSCALL_I386(nr, sym, compat) extern asmlinkage long sym(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) ;
+#define __SYSCALL_I386(nr, sym, qual) extern asmlinkage long sym(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) ;
 #include <asm/syscalls_32.h>
 
 #undef __SYSCALL_I386
-#define __SYSCALL_I386(nr, sym, compat) [ nr ] = sym,
+#define __SYSCALL_I386(nr, sym, qual) [ nr ] = sym,
 
 extern asmlinkage long sys_ni_syscall(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
 
diff --git a/arch/x86/um/sys_call_table_64.c b/arch/x86/um/sys_call_table_64.c
index b74ea6c..f306413 100644
--- a/arch/x86/um/sys_call_table_64.c
+++ b/arch/x86/um/sys_call_table_64.c
@@ -35,14 +35,11 @@
 #define stub_execveat sys_execveat
 #define stub_rt_sigreturn sys_rt_sigreturn
 
-#define __SYSCALL_COMMON(nr, sym, compat) __SYSCALL_64(nr, sym, compat)
-#define __SYSCALL_X32(nr, sym, compat) /* Not supported */
-
-#define __SYSCALL_64(nr, sym, compat) extern asmlinkage long sym(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) ;
+#define __SYSCALL_64(nr, sym, qual) extern asmlinkage long sym(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) ;
 #include <asm/syscalls_64.h>
 
 #undef __SYSCALL_64
-#define __SYSCALL_64(nr, sym, compat) [ nr ] = sym,
+#define __SYSCALL_64(nr, sym, qual) [ nr ] = sym,
 
 extern asmlinkage long sys_ni_syscall(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
 
diff --git a/arch/x86/um/user-offsets.c b/arch/x86/um/user-offsets.c
index ce7e360..470564b 100644
--- a/arch/x86/um/user-offsets.c
+++ b/arch/x86/um/user-offsets.c
@@ -9,14 +9,12 @@
 #include <asm/types.h>
 
 #ifdef __i386__
-#define __SYSCALL_I386(nr, sym, compat) [nr] = 1,
+#define __SYSCALL_I386(nr, sym, qual) [nr] = 1,
 static char syscalls[] = {
 #include <asm/syscalls_32.h>
 };
 #else
-#define __SYSCALL_64(nr, sym, compat) [nr] = 1,
-#define __SYSCALL_COMMON(nr, sym, compat) [nr] = 1,
-#define __SYSCALL_X32(nr, sym, compat) /* Not supported */
+#define __SYSCALL_64(nr, sym, qual) [nr] = 1,
 static char syscalls[] = {
 #include <asm/syscalls_64.h>
 };
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 3f4ebf0..3c6d17f 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -112,7 +112,7 @@
 		xen_pvh_secondary_vcpu_init(cpu);
 #endif
 	cpu_bringup();
-	cpu_startup_entry(CPUHP_ONLINE);
+	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
 }
 
 static void xen_smp_intr_free(unsigned int cpu)
diff --git a/arch/xtensa/kernel/smp.c b/arch/xtensa/kernel/smp.c
index 4d02e38..fc4ad21 100644
--- a/arch/xtensa/kernel/smp.c
+++ b/arch/xtensa/kernel/smp.c
@@ -157,7 +157,7 @@
 
 	complete(&cpu_running);
 
-	cpu_startup_entry(CPUHP_ONLINE);
+	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
 }
 
 static void mx_cpu_start(void *p)
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index 3df9770..5c79526 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -110,6 +110,7 @@
 	/* number of bits to (left) shift the reg value when formatting*/
 	int reg_shift;
 	int reg_stride;
+	int reg_stride_order;
 
 	/* regcache specific members */
 	const struct regcache_ops *cache_ops;
@@ -263,4 +264,19 @@
 	return map->name;
 }
 
+static inline unsigned int regmap_get_offset(const struct regmap *map,
+					     unsigned int index)
+{
+	if (map->reg_stride_order >= 0)
+		return index << map->reg_stride_order;
+	else
+		return index * map->reg_stride;
+}
+
+static inline unsigned int regcache_get_index_by_order(const struct regmap *map,
+						       unsigned int reg)
+{
+	return reg >> map->reg_stride_order;
+}
+
 #endif
diff --git a/drivers/base/regmap/regcache-flat.c b/drivers/base/regmap/regcache-flat.c
index 686c9e0..3ee7255 100644
--- a/drivers/base/regmap/regcache-flat.c
+++ b/drivers/base/regmap/regcache-flat.c
@@ -16,20 +16,30 @@
 
 #include "internal.h"
 
+static inline unsigned int regcache_flat_get_index(const struct regmap *map,
+						   unsigned int reg)
+{
+	return regcache_get_index_by_order(map, reg);
+}
+
 static int regcache_flat_init(struct regmap *map)
 {
 	int i;
 	unsigned int *cache;
 
-	map->cache = kcalloc(map->max_register + 1, sizeof(unsigned int),
-			     GFP_KERNEL);
+	if (!map || map->reg_stride_order < 0)
+		return -EINVAL;
+
+	map->cache = kcalloc(regcache_flat_get_index(map, map->max_register)
+			     + 1, sizeof(unsigned int), GFP_KERNEL);
 	if (!map->cache)
 		return -ENOMEM;
 
 	cache = map->cache;
 
 	for (i = 0; i < map->num_reg_defaults; i++)
-		cache[map->reg_defaults[i].reg] = map->reg_defaults[i].def;
+		cache[regcache_flat_get_index(map, map->reg_defaults[i].reg)] =
+				map->reg_defaults[i].def;
 
 	return 0;
 }
@@ -47,7 +57,7 @@
 {
 	unsigned int *cache = map->cache;
 
-	*value = cache[reg];
+	*value = cache[regcache_flat_get_index(map, reg)];
 
 	return 0;
 }
@@ -57,7 +67,7 @@
 {
 	unsigned int *cache = map->cache;
 
-	cache[reg] = value;
+	cache[regcache_flat_get_index(map, reg)] = value;
 
 	return 0;
 }
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index 348be3a..4170b7d 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -30,7 +30,7 @@
 	int i, j;
 	int ret;
 	int count;
-	unsigned int val;
+	unsigned int reg, val;
 	void *tmp_buf;
 
 	if (!map->num_reg_defaults_raw)
@@ -57,7 +57,7 @@
 		bool cache_bypass = map->cache_bypass;
 		dev_warn(map->dev, "No cache defaults, reading back from HW\n");
 
-		/* Bypass the cache access till data read from HW*/
+		/* Bypass the cache access till data read from HW */
 		map->cache_bypass = true;
 		tmp_buf = kmalloc(map->cache_size_raw, GFP_KERNEL);
 		if (!tmp_buf) {
@@ -65,29 +65,48 @@
 			goto err_free;
 		}
 		ret = regmap_raw_read(map, 0, tmp_buf,
-				      map->num_reg_defaults_raw);
+				      map->cache_size_raw);
 		map->cache_bypass = cache_bypass;
-		if (ret < 0)
-			goto err_cache_free;
-
-		map->reg_defaults_raw = tmp_buf;
-		map->cache_free = 1;
+		if (ret == 0) {
+			map->reg_defaults_raw = tmp_buf;
+			map->cache_free = 1;
+		} else {
+			kfree(tmp_buf);
+		}
 	}
 
 	/* fill the reg_defaults */
 	for (i = 0, j = 0; i < map->num_reg_defaults_raw; i++) {
-		if (regmap_volatile(map, i * map->reg_stride))
+		reg = i * map->reg_stride;
+
+		if (!regmap_readable(map, reg))
 			continue;
-		val = regcache_get_val(map, map->reg_defaults_raw, i);
-		map->reg_defaults[j].reg = i * map->reg_stride;
+
+		if (regmap_volatile(map, reg))
+			continue;
+
+		if (map->reg_defaults_raw) {
+			val = regcache_get_val(map, map->reg_defaults_raw, i);
+		} else {
+			bool cache_bypass = map->cache_bypass;
+
+			map->cache_bypass = true;
+			ret = regmap_read(map, reg, &val);
+			map->cache_bypass = cache_bypass;
+			if (ret != 0) {
+				dev_err(map->dev, "Failed to read %d: %d\n",
+					reg, ret);
+				goto err_free;
+			}
+		}
+
+		map->reg_defaults[j].reg = reg;
 		map->reg_defaults[j].def = val;
 		j++;
 	}
 
 	return 0;
 
-err_cache_free:
-	kfree(tmp_buf);
 err_free:
 	kfree(map->reg_defaults);
 
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index 9b0d202..26f799e 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -379,6 +379,7 @@
 	irq_set_chip_data(virq, data);
 	irq_set_chip(virq, &data->irq_chip);
 	irq_set_nested_thread(virq, 1);
+	irq_set_parent(virq, data->irq);
 	irq_set_noprobe(virq);
 
 	return 0;
@@ -655,13 +656,34 @@
  *
  * @irq: Primary IRQ for the device
  * @d:   regmap_irq_chip_data allocated by regmap_add_irq_chip()
+ *
+ * This function also dispose all mapped irq on chip.
  */
 void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
 {
+	unsigned int virq;
+	int hwirq;
+
 	if (!d)
 		return;
 
 	free_irq(irq, d);
+
+	/* Dispose all virtual irq from irq domain before removing it */
+	for (hwirq = 0; hwirq < d->chip->num_irqs; hwirq++) {
+		/* Ignore hwirq if holes in the IRQ list */
+		if (!d->chip->irqs[hwirq].mask)
+			continue;
+
+		/*
+		 * Find the virtual irq of hwirq on chip and if it is
+		 * there then dispose it
+		 */
+		virq = irq_find_mapping(d->domain, hwirq);
+		if (virq)
+			irq_dispose_mapping(virq);
+	}
+
 	irq_domain_remove(d->domain);
 	kfree(d->type_buf);
 	kfree(d->type_buf_def);
@@ -674,6 +696,88 @@
 }
 EXPORT_SYMBOL_GPL(regmap_del_irq_chip);
 
+static void devm_regmap_irq_chip_release(struct device *dev, void *res)
+{
+	struct regmap_irq_chip_data *d = *(struct regmap_irq_chip_data **)res;
+
+	regmap_del_irq_chip(d->irq, d);
+}
+
+static int devm_regmap_irq_chip_match(struct device *dev, void *res, void *data)
+
+{
+	struct regmap_irq_chip_data **r = res;
+
+	if (!r || !*r) {
+		WARN_ON(!r || !*r);
+		return 0;
+	}
+	return *r == data;
+}
+
+/**
+ * devm_regmap_add_irq_chip(): Resource manager regmap_add_irq_chip()
+ *
+ * @dev:       The device pointer on which irq_chip belongs to.
+ * @map:       The regmap for the device.
+ * @irq:       The IRQ the device uses to signal interrupts
+ * @irq_flags: The IRQF_ flags to use for the primary interrupt.
+ * @chip:      Configuration for the interrupt controller.
+ * @data:      Runtime data structure for the controller, allocated on success
+ *
+ * Returns 0 on success or an errno on failure.
+ *
+ * The regmap_irq_chip data automatically be released when the device is
+ * unbound.
+ */
+int devm_regmap_add_irq_chip(struct device *dev, struct regmap *map, int irq,
+			     int irq_flags, int irq_base,
+			     const struct regmap_irq_chip *chip,
+			     struct regmap_irq_chip_data **data)
+{
+	struct regmap_irq_chip_data **ptr, *d;
+	int ret;
+
+	ptr = devres_alloc(devm_regmap_irq_chip_release, sizeof(*ptr),
+			   GFP_KERNEL);
+	if (!ptr)
+		return -ENOMEM;
+
+	ret = regmap_add_irq_chip(map, irq, irq_flags, irq_base,
+				  chip, &d);
+	if (ret < 0) {
+		devres_free(ptr);
+		return ret;
+	}
+
+	*ptr = d;
+	devres_add(dev, ptr);
+	*data = d;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip);
+
+/**
+ * devm_regmap_del_irq_chip(): Resource managed regmap_del_irq_chip()
+ *
+ * @dev: Device for which which resource was allocated.
+ * @irq: Primary IRQ for the device
+ * @d:   regmap_irq_chip_data allocated by regmap_add_irq_chip()
+ */
+void devm_regmap_del_irq_chip(struct device *dev, int irq,
+			      struct regmap_irq_chip_data *data)
+{
+	int rc;
+
+	WARN_ON(irq != data->irq);
+	rc = devres_release(dev, devm_regmap_irq_chip_release,
+			    devm_regmap_irq_chip_match, data);
+
+	if (rc != 0)
+		WARN_ON(rc);
+}
+EXPORT_SYMBOL_GPL(devm_regmap_del_irq_chip);
+
 /**
  * regmap_irq_chip_get_base(): Retrieve interrupt base for a regmap IRQ chip
  *
diff --git a/drivers/base/regmap/regmap-mmio.c b/drivers/base/regmap/regmap-mmio.c
index eea5156..7526906 100644
--- a/drivers/base/regmap/regmap-mmio.c
+++ b/drivers/base/regmap/regmap-mmio.c
@@ -25,26 +25,14 @@
 
 struct regmap_mmio_context {
 	void __iomem *regs;
-	unsigned reg_bytes;
 	unsigned val_bytes;
-	unsigned pad_bytes;
 	struct clk *clk;
-};
 
-static inline void regmap_mmio_regsize_check(size_t reg_size)
-{
-	switch (reg_size) {
-	case 1:
-	case 2:
-	case 4:
-#ifdef CONFIG_64BIT
-	case 8:
-#endif
-		break;
-	default:
-		BUG();
-	}
-}
+	void (*reg_write)(struct regmap_mmio_context *ctx,
+			  unsigned int reg, unsigned int val);
+	unsigned int (*reg_read)(struct regmap_mmio_context *ctx,
+			         unsigned int reg);
+};
 
 static int regmap_mmio_regbits_check(size_t reg_bits)
 {
@@ -88,72 +76,62 @@
 	return min_stride;
 }
 
-static inline void regmap_mmio_count_check(size_t count, u32 offset)
+static void regmap_mmio_write8(struct regmap_mmio_context *ctx,
+				unsigned int reg,
+				unsigned int val)
 {
-	BUG_ON(count <= offset);
+	writeb(val, ctx->regs + reg);
 }
 
-static inline unsigned int
-regmap_mmio_get_offset(const void *reg, size_t reg_size)
+static void regmap_mmio_write16le(struct regmap_mmio_context *ctx,
+				  unsigned int reg,
+				  unsigned int val)
 {
-	switch (reg_size) {
-	case 1:
-		return *(u8 *)reg;
-	case 2:
-		return *(u16 *)reg;
-	case 4:
-		return *(u32 *)reg;
+	writew(val, ctx->regs + reg);
+}
+
+static void regmap_mmio_write16be(struct regmap_mmio_context *ctx,
+				  unsigned int reg,
+				  unsigned int val)
+{
+	iowrite16be(val, ctx->regs + reg);
+}
+
+static void regmap_mmio_write32le(struct regmap_mmio_context *ctx,
+				  unsigned int reg,
+				  unsigned int val)
+{
+	writel(val, ctx->regs + reg);
+}
+
+static void regmap_mmio_write32be(struct regmap_mmio_context *ctx,
+				  unsigned int reg,
+				  unsigned int val)
+{
+	iowrite32be(val, ctx->regs + reg);
+}
+
 #ifdef CONFIG_64BIT
-	case 8:
-		return *(u64 *)reg;
-#endif
-	default:
-		BUG();
-	}
+static void regmap_mmio_write64le(struct regmap_mmio_context *ctx,
+				  unsigned int reg,
+				  unsigned int val)
+{
+	writeq(val, ctx->regs + reg);
 }
+#endif
 
-static int regmap_mmio_gather_write(void *context,
-				    const void *reg, size_t reg_size,
-				    const void *val, size_t val_size)
+static int regmap_mmio_write(void *context, unsigned int reg, unsigned int val)
 {
 	struct regmap_mmio_context *ctx = context;
-	unsigned int offset;
 	int ret;
 
-	regmap_mmio_regsize_check(reg_size);
-
 	if (!IS_ERR(ctx->clk)) {
 		ret = clk_enable(ctx->clk);
 		if (ret < 0)
 			return ret;
 	}
 
-	offset = regmap_mmio_get_offset(reg, reg_size);
-
-	while (val_size) {
-		switch (ctx->val_bytes) {
-		case 1:
-			writeb(*(u8 *)val, ctx->regs + offset);
-			break;
-		case 2:
-			writew(*(u16 *)val, ctx->regs + offset);
-			break;
-		case 4:
-			writel(*(u32 *)val, ctx->regs + offset);
-			break;
-#ifdef CONFIG_64BIT
-		case 8:
-			writeq(*(u64 *)val, ctx->regs + offset);
-			break;
-#endif
-		default:
-			/* Should be caught by regmap_mmio_check_config */
-			BUG();
-		}
-		val_size -= ctx->val_bytes;
-		val += ctx->val_bytes;
-		offset += ctx->val_bytes;
-	}
+	ctx->reg_write(ctx, reg, val);
 
 	if (!IS_ERR(ctx->clk))
 		clk_disable(ctx->clk);
@@ -161,59 +139,56 @@
 	return 0;
 }
 
-static int regmap_mmio_write(void *context, const void *data, size_t count)
+static unsigned int regmap_mmio_read8(struct regmap_mmio_context *ctx,
+				      unsigned int reg)
 {
-	struct regmap_mmio_context *ctx = context;
-	unsigned int offset = ctx->reg_bytes + ctx->pad_bytes;
-
-	regmap_mmio_count_check(count, offset);
-
-	return regmap_mmio_gather_write(context, data, ctx->reg_bytes,
-					data + offset, count - offset);
+	return readb(ctx->regs + reg);
 }
 
-static int regmap_mmio_read(void *context,
-			    const void *reg, size_t reg_size,
-			    void *val, size_t val_size)
+static unsigned int regmap_mmio_read16le(struct regmap_mmio_context *ctx,
+				         unsigned int reg)
+{
+	return readw(ctx->regs + reg);
+}
+
+static unsigned int regmap_mmio_read16be(struct regmap_mmio_context *ctx,
+				         unsigned int reg)
+{
+	return ioread16be(ctx->regs + reg);
+}
+
+static unsigned int regmap_mmio_read32le(struct regmap_mmio_context *ctx,
+				         unsigned int reg)
+{
+	return readl(ctx->regs + reg);
+}
+
+static unsigned int regmap_mmio_read32be(struct regmap_mmio_context *ctx,
+				         unsigned int reg)
+{
+	return ioread32be(ctx->regs + reg);
+}
+
+#ifdef CONFIG_64BIT
+static unsigned int regmap_mmio_read64le(struct regmap_mmio_context *ctx,
+				         unsigned int reg)
+{
+	return readq(ctx->regs + reg);
+}
+#endif
+
+static int regmap_mmio_read(void *context, unsigned int reg, unsigned int *val)
 {
 	struct regmap_mmio_context *ctx = context;
-	unsigned int offset;
 	int ret;
 
-	regmap_mmio_regsize_check(reg_size);
-
 	if (!IS_ERR(ctx->clk)) {
 		ret = clk_enable(ctx->clk);
 		if (ret < 0)
 			return ret;
 	}
 
-	offset = regmap_mmio_get_offset(reg, reg_size);
-
-	while (val_size) {
-		switch (ctx->val_bytes) {
-		case 1:
-			*(u8 *)val = readb(ctx->regs + offset);
-			break;
-		case 2:
-			*(u16 *)val = readw(ctx->regs + offset);
-			break;
-		case 4:
-			*(u32 *)val = readl(ctx->regs + offset);
-			break;
-#ifdef CONFIG_64BIT
-		case 8:
-			*(u64 *)val = readq(ctx->regs + offset);
-			break;
-#endif
-		default:
-			/* Should be caught by regmap_mmio_check_config */
-			BUG();
-		}
-		val_size -= ctx->val_bytes;
-		val += ctx->val_bytes;
-		offset += ctx->val_bytes;
-	}
+	*val = ctx->reg_read(ctx, reg);
 
 	if (!IS_ERR(ctx->clk))
 		clk_disable(ctx->clk);
@@ -232,14 +207,11 @@
 	kfree(context);
 }
 
-static struct regmap_bus regmap_mmio = {
+static const struct regmap_bus regmap_mmio = {
 	.fast_io = true,
-	.write = regmap_mmio_write,
-	.gather_write = regmap_mmio_gather_write,
-	.read = regmap_mmio_read,
+	.reg_write = regmap_mmio_write,
+	.reg_read = regmap_mmio_read,
 	.free_context = regmap_mmio_free_context,
-	.reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
-	.val_format_endian_default = REGMAP_ENDIAN_NATIVE,
 };
 
 static struct regmap_mmio_context *regmap_mmio_gen_context(struct device *dev,
@@ -265,24 +237,71 @@
 	if (config->reg_stride < min_stride)
 		return ERR_PTR(-EINVAL);
 
-	switch (config->reg_format_endian) {
-	case REGMAP_ENDIAN_DEFAULT:
-	case REGMAP_ENDIAN_NATIVE:
-		break;
-	default:
-		return ERR_PTR(-EINVAL);
-	}
-
 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
 	if (!ctx)
 		return ERR_PTR(-ENOMEM);
 
 	ctx->regs = regs;
 	ctx->val_bytes = config->val_bits / 8;
-	ctx->reg_bytes = config->reg_bits / 8;
-	ctx->pad_bytes = config->pad_bits / 8;
 	ctx->clk = ERR_PTR(-ENODEV);
 
+	switch (config->reg_format_endian) {
+	case REGMAP_ENDIAN_DEFAULT:
+	case REGMAP_ENDIAN_LITTLE:
+#ifdef __LITTLE_ENDIAN
+	case REGMAP_ENDIAN_NATIVE:
+#endif
+		switch (config->val_bits) {
+		case 8:
+			ctx->reg_read = regmap_mmio_read8;
+			ctx->reg_write = regmap_mmio_write8;
+			break;
+		case 16:
+			ctx->reg_read = regmap_mmio_read16le;
+			ctx->reg_write = regmap_mmio_write16le;
+			break;
+		case 32:
+			ctx->reg_read = regmap_mmio_read32le;
+			ctx->reg_write = regmap_mmio_write32le;
+			break;
+#ifdef CONFIG_64BIT
+		case 64:
+			ctx->reg_read = regmap_mmio_read64le;
+			ctx->reg_write = regmap_mmio_write64le;
+			break;
+#endif
+		default:
+			ret = -EINVAL;
+			goto err_free;
+		}
+		break;
+	case REGMAP_ENDIAN_BIG:
+#ifdef __BIG_ENDIAN
+	case REGMAP_ENDIAN_NATIVE:
+#endif
+		switch (config->val_bits) {
+		case 8:
+			ctx->reg_read = regmap_mmio_read8;
+			ctx->reg_write = regmap_mmio_write8;
+			break;
+		case 16:
+			ctx->reg_read = regmap_mmio_read16be;
+			ctx->reg_write = regmap_mmio_write16be;
+			break;
+		case 32:
+			ctx->reg_read = regmap_mmio_read32be;
+			ctx->reg_write = regmap_mmio_write32be;
+			break;
+		default:
+			ret = -EINVAL;
+			goto err_free;
+		}
+		break;
+	default:
+		ret = -EINVAL;
+		goto err_free;
+	}
+
 	if (clk_id == NULL)
 		return ctx;
 
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index ee54e84..df2d2ef 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -19,6 +19,7 @@
 #include <linux/rbtree.h>
 #include <linux/sched.h>
 #include <linux/delay.h>
+#include <linux/log2.h>
 
 #define CREATE_TRACE_POINTS
 #include "trace.h"
@@ -557,6 +558,8 @@
 			endian = REGMAP_ENDIAN_BIG;
 		else if (of_property_read_bool(np, "little-endian"))
 			endian = REGMAP_ENDIAN_LITTLE;
+		else if (of_property_read_bool(np, "native-endian"))
+			endian = REGMAP_ENDIAN_NATIVE;
 
 		/* If the endianness was specified in DT, use that */
 		if (endian != REGMAP_ENDIAN_DEFAULT)
@@ -638,6 +641,10 @@
 		map->reg_stride = config->reg_stride;
 	else
 		map->reg_stride = 1;
+	if (is_power_of_2(map->reg_stride))
+		map->reg_stride_order = ilog2(map->reg_stride);
+	else
+		map->reg_stride_order = -1;
 	map->use_single_read = config->use_single_rw || !bus || !bus->read;
 	map->use_single_write = config->use_single_rw || !bus || !bus->write;
 	map->can_multi_write = config->can_multi_write && bus && bus->write;
@@ -1308,7 +1315,7 @@
 	if (map->writeable_reg)
 		for (i = 0; i < val_len / map->format.val_bytes; i++)
 			if (!map->writeable_reg(map->dev,
-						reg + (i * map->reg_stride)))
+					       reg + regmap_get_offset(map, i)))
 				return -EINVAL;
 
 	if (!map->cache_bypass && map->format.parse_val) {
@@ -1316,7 +1323,8 @@
 		int val_bytes = map->format.val_bytes;
 		for (i = 0; i < val_len / val_bytes; i++) {
 			ival = map->format.parse_val(val + (i * val_bytes));
-			ret = regcache_write(map, reg + (i * map->reg_stride),
+			ret = regcache_write(map,
+					     reg + regmap_get_offset(map, i),
 					     ival);
 			if (ret) {
 				dev_err(map->dev,
@@ -1690,100 +1698,63 @@
 EXPORT_SYMBOL_GPL(regmap_raw_write);
 
 /**
- * regmap_field_write(): Write a value to a single register field
- *
- * @field: Register field to write to
- * @val: Value to be written
- *
- * A value of zero will be returned on success, a negative errno will
- * be returned in error cases.
- */
-int regmap_field_write(struct regmap_field *field, unsigned int val)
-{
-	return regmap_update_bits(field->regmap, field->reg,
-				field->mask, val << field->shift);
-}
-EXPORT_SYMBOL_GPL(regmap_field_write);
-
-/**
- * regmap_field_update_bits():	Perform a read/modify/write cycle
- *                              on the register field
+ * regmap_field_update_bits_base():
+ *	Perform a read/modify/write cycle on the register field
+ *	with change, async, force option
  *
  * @field: Register field to write to
  * @mask: Bitmask to change
  * @val: Value to be written
+ * @change: Boolean indicating if a write was done
+ * @async: Boolean indicating asynchronously
+ * @force: Boolean indicating use force update
  *
  * A value of zero will be returned on success, a negative errno will
  * be returned in error cases.
  */
-int regmap_field_update_bits(struct regmap_field *field, unsigned int mask, unsigned int val)
+int regmap_field_update_bits_base(struct regmap_field *field,
+				  unsigned int mask, unsigned int val,
+				  bool *change, bool async, bool force)
 {
 	mask = (mask << field->shift) & field->mask;
 
-	return regmap_update_bits(field->regmap, field->reg,
-				  mask, val << field->shift);
+	return regmap_update_bits_base(field->regmap, field->reg,
+				       mask, val << field->shift,
+				       change, async, force);
 }
-EXPORT_SYMBOL_GPL(regmap_field_update_bits);
+EXPORT_SYMBOL_GPL(regmap_field_update_bits_base);
 
 /**
- * regmap_fields_write(): Write a value to a single register field with port ID
- *
- * @field: Register field to write to
- * @id: port ID
- * @val: Value to be written
- *
- * A value of zero will be returned on success, a negative errno will
- * be returned in error cases.
- */
-int regmap_fields_write(struct regmap_field *field, unsigned int id,
-			unsigned int val)
-{
-	if (id >= field->id_size)
-		return -EINVAL;
-
-	return regmap_update_bits(field->regmap,
-				  field->reg + (field->id_offset * id),
-				  field->mask, val << field->shift);
-}
-EXPORT_SYMBOL_GPL(regmap_fields_write);
-
-int regmap_fields_force_write(struct regmap_field *field, unsigned int id,
-			unsigned int val)
-{
-	if (id >= field->id_size)
-		return -EINVAL;
-
-	return regmap_write_bits(field->regmap,
-				  field->reg + (field->id_offset * id),
-				  field->mask, val << field->shift);
-}
-EXPORT_SYMBOL_GPL(regmap_fields_force_write);
-
-/**
- * regmap_fields_update_bits():	Perform a read/modify/write cycle
- *                              on the register field
+ * regmap_fields_update_bits_base():
+ *	Perform a read/modify/write cycle on the register field
+ *	with change, async, force option
  *
  * @field: Register field to write to
  * @id: port ID
  * @mask: Bitmask to change
  * @val: Value to be written
+ * @change: Boolean indicating if a write was done
+ * @async: Boolean indicating asynchronously
+ * @force: Boolean indicating use force update
  *
  * A value of zero will be returned on success, a negative errno will
  * be returned in error cases.
  */
-int regmap_fields_update_bits(struct regmap_field *field,  unsigned int id,
-			      unsigned int mask, unsigned int val)
+int regmap_fields_update_bits_base(struct regmap_field *field,  unsigned int id,
+				   unsigned int mask, unsigned int val,
+				   bool *change, bool async, bool force)
 {
 	if (id >= field->id_size)
 		return -EINVAL;
 
 	mask = (mask << field->shift) & field->mask;
 
-	return regmap_update_bits(field->regmap,
-				  field->reg + (field->id_offset * id),
-				  mask, val << field->shift);
+	return regmap_update_bits_base(field->regmap,
+				       field->reg + (field->id_offset * id),
+				       mask, val << field->shift,
+				       change, async, force);
 }
-EXPORT_SYMBOL_GPL(regmap_fields_update_bits);
+EXPORT_SYMBOL_GPL(regmap_fields_update_bits_base);
 
 /*
  * regmap_bulk_write(): Write multiple registers to the device
@@ -1846,8 +1817,9 @@
 				goto out;
 			}
 
-			ret = _regmap_write(map, reg + (i * map->reg_stride),
-					ival);
+			ret = _regmap_write(map,
+					    reg + regmap_get_offset(map, i),
+					    ival);
 			if (ret != 0)
 				goto out;
 		}
@@ -2253,6 +2225,9 @@
 
 	WARN_ON(!map->bus);
 
+	if (!map->bus || !map->bus->read)
+		return -EINVAL;
+
 	range = _regmap_range_lookup(map, reg);
 	if (range) {
 		ret = _regmap_select_page(map, &reg, range,
@@ -2416,7 +2391,7 @@
 		 * cost as we expect to hit the cache.
 		 */
 		for (i = 0; i < val_count; i++) {
-			ret = _regmap_read(map, reg + (i * map->reg_stride),
+			ret = _regmap_read(map, reg + regmap_get_offset(map, i),
 					   &v);
 			if (ret != 0)
 				goto out;
@@ -2568,7 +2543,7 @@
 	} else {
 		for (i = 0; i < val_count; i++) {
 			unsigned int ival;
-			ret = regmap_read(map, reg + (i * map->reg_stride),
+			ret = regmap_read(map, reg + regmap_get_offset(map, i),
 					  &ival);
 			if (ret != 0)
 				return ret;
@@ -2648,76 +2623,36 @@
 }
 
 /**
- * regmap_update_bits: Perform a read/modify/write cycle on the register map
+ * regmap_update_bits_base:
+ *	Perform a read/modify/write cycle on the
+ *	register map with change, async, force option
  *
  * @map: Register map to update
  * @reg: Register to update
  * @mask: Bitmask to change
  * @val: New value for bitmask
+ * @change: Boolean indicating if a write was done
+ * @async: Boolean indicating asynchronously
+ * @force: Boolean indicating use force update
  *
- * Returns zero for success, a negative number on error.
- */
-int regmap_update_bits(struct regmap *map, unsigned int reg,
-		       unsigned int mask, unsigned int val)
-{
-	int ret;
-
-	map->lock(map->lock_arg);
-	ret = _regmap_update_bits(map, reg, mask, val, NULL, false);
-	map->unlock(map->lock_arg);
-
-	return ret;
-}
-EXPORT_SYMBOL_GPL(regmap_update_bits);
-
-/**
- * regmap_write_bits: Perform a read/modify/write cycle on the register map
- *
- * @map: Register map to update
- * @reg: Register to update
- * @mask: Bitmask to change
- * @val: New value for bitmask
- *
- * Returns zero for success, a negative number on error.
- */
-int regmap_write_bits(struct regmap *map, unsigned int reg,
-		      unsigned int mask, unsigned int val)
-{
-	int ret;
-
-	map->lock(map->lock_arg);
-	ret = _regmap_update_bits(map, reg, mask, val, NULL, true);
-	map->unlock(map->lock_arg);
-
-	return ret;
-}
-EXPORT_SYMBOL_GPL(regmap_write_bits);
-
-/**
- * regmap_update_bits_async: Perform a read/modify/write cycle on the register
- *                           map asynchronously
- *
- * @map: Register map to update
- * @reg: Register to update
- * @mask: Bitmask to change
- * @val: New value for bitmask
- *
+ * if async was true,
  * With most buses the read must be done synchronously so this is most
  * useful for devices with a cache which do not need to interact with
  * the hardware to determine the current register value.
  *
  * Returns zero for success, a negative number on error.
  */
-int regmap_update_bits_async(struct regmap *map, unsigned int reg,
-			     unsigned int mask, unsigned int val)
+int regmap_update_bits_base(struct regmap *map, unsigned int reg,
+			    unsigned int mask, unsigned int val,
+			    bool *change, bool async, bool force)
 {
 	int ret;
 
 	map->lock(map->lock_arg);
 
-	map->async = true;
+	map->async = async;
 
-	ret = _regmap_update_bits(map, reg, mask, val, NULL, false);
+	ret = _regmap_update_bits(map, reg, mask, val, change, force);
 
 	map->async = false;
 
@@ -2725,69 +2660,7 @@
 
 	return ret;
 }
-EXPORT_SYMBOL_GPL(regmap_update_bits_async);
-
-/**
- * regmap_update_bits_check: Perform a read/modify/write cycle on the
- *                           register map and report if updated
- *
- * @map: Register map to update
- * @reg: Register to update
- * @mask: Bitmask to change
- * @val: New value for bitmask
- * @change: Boolean indicating if a write was done
- *
- * Returns zero for success, a negative number on error.
- */
-int regmap_update_bits_check(struct regmap *map, unsigned int reg,
-			     unsigned int mask, unsigned int val,
-			     bool *change)
-{
-	int ret;
-
-	map->lock(map->lock_arg);
-	ret = _regmap_update_bits(map, reg, mask, val, change, false);
-	map->unlock(map->lock_arg);
-	return ret;
-}
-EXPORT_SYMBOL_GPL(regmap_update_bits_check);
-
-/**
- * regmap_update_bits_check_async: Perform a read/modify/write cycle on the
- *                                 register map asynchronously and report if
- *                                 updated
- *
- * @map: Register map to update
- * @reg: Register to update
- * @mask: Bitmask to change
- * @val: New value for bitmask
- * @change: Boolean indicating if a write was done
- *
- * With most buses the read must be done synchronously so this is most
- * useful for devices with a cache which do not need to interact with
- * the hardware to determine the current register value.
- *
- * Returns zero for success, a negative number on error.
- */
-int regmap_update_bits_check_async(struct regmap *map, unsigned int reg,
-				   unsigned int mask, unsigned int val,
-				   bool *change)
-{
-	int ret;
-
-	map->lock(map->lock_arg);
-
-	map->async = true;
-
-	ret = _regmap_update_bits(map, reg, mask, val, change, false);
-
-	map->async = false;
-
-	map->unlock(map->lock_arg);
-
-	return ret;
-}
-EXPORT_SYMBOL_GPL(regmap_update_bits_check_async);
+EXPORT_SYMBOL_GPL(regmap_update_bits_base);
 
 void regmap_async_complete_cb(struct regmap_async *async, int ret)
 {
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 33db740..c346be6 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -160,6 +160,7 @@
 config CLKSRC_LPC32XX
 	bool "Clocksource for LPC32XX" if COMPILE_TEST
 	depends on GENERIC_CLOCKEVENTS && HAS_IOMEM
+	depends on ARM
 	select CLKSRC_MMIO
 	select CLKSRC_OF
 	help
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index c64d543..5152b38 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -32,6 +32,14 @@
 #define CNTTIDR		0x08
 #define CNTTIDR_VIRT(n)	(BIT(1) << ((n) * 4))
 
+#define CNTACR(n)	(0x40 + ((n) * 4))
+#define CNTACR_RPCT	BIT(0)
+#define CNTACR_RVCT	BIT(1)
+#define CNTACR_RFRQ	BIT(2)
+#define CNTACR_RVOFF	BIT(3)
+#define CNTACR_RWVT	BIT(4)
+#define CNTACR_RWPT	BIT(5)
+
 #define CNTVCT_LO	0x08
 #define CNTVCT_HI	0x0c
 #define CNTFRQ		0x10
@@ -67,7 +75,7 @@
 
 static struct clock_event_device __percpu *arch_timer_evt;
 
-static bool arch_timer_use_virtual = true;
+static enum ppi_nr arch_timer_uses_ppi = VIRT_PPI;
 static bool arch_timer_c3stop;
 static bool arch_timer_mem_use_virtual;
 
@@ -263,14 +271,22 @@
 		clk->name = "arch_sys_timer";
 		clk->rating = 450;
 		clk->cpumask = cpumask_of(smp_processor_id());
-		if (arch_timer_use_virtual) {
-			clk->irq = arch_timer_ppi[VIRT_PPI];
+		clk->irq = arch_timer_ppi[arch_timer_uses_ppi];
+		switch (arch_timer_uses_ppi) {
+		case VIRT_PPI:
 			clk->set_state_shutdown = arch_timer_shutdown_virt;
+			clk->set_state_oneshot_stopped = arch_timer_shutdown_virt;
 			clk->set_next_event = arch_timer_set_next_event_virt;
-		} else {
-			clk->irq = arch_timer_ppi[PHYS_SECURE_PPI];
+			break;
+		case PHYS_SECURE_PPI:
+		case PHYS_NONSECURE_PPI:
+		case HYP_PPI:
 			clk->set_state_shutdown = arch_timer_shutdown_phys;
+			clk->set_state_oneshot_stopped = arch_timer_shutdown_phys;
 			clk->set_next_event = arch_timer_set_next_event_phys;
+			break;
+		default:
+			BUG();
 		}
 	} else {
 		clk->features |= CLOCK_EVT_FEAT_DYNIRQ;
@@ -279,10 +295,12 @@
 		clk->cpumask = cpu_all_mask;
 		if (arch_timer_mem_use_virtual) {
 			clk->set_state_shutdown = arch_timer_shutdown_virt_mem;
+			clk->set_state_oneshot_stopped = arch_timer_shutdown_virt_mem;
 			clk->set_next_event =
 				arch_timer_set_next_event_virt_mem;
 		} else {
 			clk->set_state_shutdown = arch_timer_shutdown_phys_mem;
+			clk->set_state_oneshot_stopped = arch_timer_shutdown_phys_mem;
 			clk->set_next_event =
 				arch_timer_set_next_event_phys_mem;
 		}
@@ -338,17 +356,20 @@
 	arch_timer_set_cntkctl(cntkctl);
 }
 
+static bool arch_timer_has_nonsecure_ppi(void)
+{
+	return (arch_timer_uses_ppi == PHYS_SECURE_PPI &&
+		arch_timer_ppi[PHYS_NONSECURE_PPI]);
+}
+
 static int arch_timer_setup(struct clock_event_device *clk)
 {
 	__arch_timer_setup(ARCH_CP15_TIMER, clk);
 
-	if (arch_timer_use_virtual)
-		enable_percpu_irq(arch_timer_ppi[VIRT_PPI], 0);
-	else {
-		enable_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI], 0);
-		if (arch_timer_ppi[PHYS_NONSECURE_PPI])
-			enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], 0);
-	}
+	enable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], 0);
+
+	if (arch_timer_has_nonsecure_ppi())
+		enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], 0);
 
 	arch_counter_set_user_access();
 	if (IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM))
@@ -390,7 +411,7 @@
 		     (unsigned long)arch_timer_rate / 1000000,
 		     (unsigned long)(arch_timer_rate / 10000) % 100,
 		     type & ARCH_CP15_TIMER ?
-			arch_timer_use_virtual ? "virt" : "phys" :
+		     (arch_timer_uses_ppi == VIRT_PPI) ? "virt" : "phys" :
 			"",
 		     type == (ARCH_CP15_TIMER | ARCH_MEM_TIMER) ?  "/" : "",
 		     type & ARCH_MEM_TIMER ?
@@ -460,7 +481,7 @@
 
 	/* Register the CP15 based counter if we have one */
 	if (type & ARCH_CP15_TIMER) {
-		if (IS_ENABLED(CONFIG_ARM64) || arch_timer_use_virtual)
+		if (IS_ENABLED(CONFIG_ARM64) || arch_timer_uses_ppi == VIRT_PPI)
 			arch_timer_read_counter = arch_counter_get_cntvct;
 		else
 			arch_timer_read_counter = arch_counter_get_cntpct;
@@ -490,13 +511,9 @@
 	pr_debug("arch_timer_teardown disable IRQ%d cpu #%d\n",
 		 clk->irq, smp_processor_id());
 
-	if (arch_timer_use_virtual)
-		disable_percpu_irq(arch_timer_ppi[VIRT_PPI]);
-	else {
-		disable_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI]);
-		if (arch_timer_ppi[PHYS_NONSECURE_PPI])
-			disable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI]);
-	}
+	disable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi]);
+	if (arch_timer_has_nonsecure_ppi())
+		disable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI]);
 
 	clk->set_state_shutdown(clk);
 }
@@ -562,12 +579,14 @@
 		goto out;
 	}
 
-	if (arch_timer_use_virtual) {
-		ppi = arch_timer_ppi[VIRT_PPI];
+	ppi = arch_timer_ppi[arch_timer_uses_ppi];
+	switch (arch_timer_uses_ppi) {
+	case VIRT_PPI:
 		err = request_percpu_irq(ppi, arch_timer_handler_virt,
 					 "arch_timer", arch_timer_evt);
-	} else {
-		ppi = arch_timer_ppi[PHYS_SECURE_PPI];
+		break;
+	case PHYS_SECURE_PPI:
+	case PHYS_NONSECURE_PPI:
 		err = request_percpu_irq(ppi, arch_timer_handler_phys,
 					 "arch_timer", arch_timer_evt);
 		if (!err && arch_timer_ppi[PHYS_NONSECURE_PPI]) {
@@ -578,6 +597,13 @@
 				free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI],
 						arch_timer_evt);
 		}
+		break;
+	case HYP_PPI:
+		err = request_percpu_irq(ppi, arch_timer_handler_phys,
+					 "arch_timer", arch_timer_evt);
+		break;
+	default:
+		BUG();
 	}
 
 	if (err) {
@@ -602,15 +628,10 @@
 out_unreg_notify:
 	unregister_cpu_notifier(&arch_timer_cpu_nb);
 out_free_irq:
-	if (arch_timer_use_virtual)
-		free_percpu_irq(arch_timer_ppi[VIRT_PPI], arch_timer_evt);
-	else {
-		free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI],
+	free_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], arch_timer_evt);
+	if (arch_timer_has_nonsecure_ppi())
+		free_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI],
 				arch_timer_evt);
-		if (arch_timer_ppi[PHYS_NONSECURE_PPI])
-			free_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI],
-					arch_timer_evt);
-	}
 
 out_free:
 	free_percpu(arch_timer_evt);
@@ -697,12 +718,25 @@
 	 *
 	 * If no interrupt provided for virtual timer, we'll have to
 	 * stick to the physical timer. It'd better be accessible...
+	 *
+	 * On ARMv8.1 with VH extensions, the kernel runs in HYP. VHE
+	 * accesses to CNTP_*_EL1 registers are silently redirected to
+	 * their CNTHP_*_EL2 counterparts, and use a different PPI
+	 * number.
 	 */
 	if (is_hyp_mode_available() || !arch_timer_ppi[VIRT_PPI]) {
-		arch_timer_use_virtual = false;
+		bool has_ppi;
 
-		if (!arch_timer_ppi[PHYS_SECURE_PPI] ||
-		    !arch_timer_ppi[PHYS_NONSECURE_PPI]) {
+		if (is_kernel_in_hyp_mode()) {
+			arch_timer_uses_ppi = HYP_PPI;
+			has_ppi = !!arch_timer_ppi[HYP_PPI];
+		} else {
+			arch_timer_uses_ppi = PHYS_SECURE_PPI;
+			has_ppi = (!!arch_timer_ppi[PHYS_SECURE_PPI] ||
+				   !!arch_timer_ppi[PHYS_NONSECURE_PPI]);
+		}
+
+		if (!has_ppi) {
 			pr_warn("arch_timer: No interrupt available, giving up\n");
 			return;
 		}
@@ -735,7 +769,7 @@
 	 */
 	if (IS_ENABLED(CONFIG_ARM) &&
 	    of_property_read_bool(np, "arm,cpu-registers-not-fw-configured"))
-			arch_timer_use_virtual = false;
+		arch_timer_uses_ppi = PHYS_SECURE_PPI;
 
 	arch_timer_init();
 }
@@ -757,7 +791,6 @@
 	}
 
 	cnttidr = readl_relaxed(cntctlbase + CNTTIDR);
-	iounmap(cntctlbase);
 
 	/*
 	 * Try to find a virtual capable frame. Otherwise fall back to a
@@ -765,20 +798,31 @@
 	 */
 	for_each_available_child_of_node(np, frame) {
 		int n;
+		u32 cntacr;
 
 		if (of_property_read_u32(frame, "frame-number", &n)) {
 			pr_err("arch_timer: Missing frame-number\n");
-			of_node_put(best_frame);
 			of_node_put(frame);
-			return;
+			goto out;
 		}
 
-		if (cnttidr & CNTTIDR_VIRT(n)) {
+		/* Try enabling everything, and see what sticks */
+		cntacr = CNTACR_RFRQ | CNTACR_RWPT | CNTACR_RPCT |
+			 CNTACR_RWVT | CNTACR_RVOFF | CNTACR_RVCT;
+		writel_relaxed(cntacr, cntctlbase + CNTACR(n));
+		cntacr = readl_relaxed(cntctlbase + CNTACR(n));
+
+		if ((cnttidr & CNTTIDR_VIRT(n)) &&
+		    !(~cntacr & (CNTACR_RWVT | CNTACR_RVCT))) {
 			of_node_put(best_frame);
 			best_frame = frame;
 			arch_timer_mem_use_virtual = true;
 			break;
 		}
+
+		if (~cntacr & (CNTACR_RWPT | CNTACR_RPCT))
+			continue;
+
 		of_node_put(best_frame);
 		best_frame = of_node_get(frame);
 	}
@@ -786,24 +830,26 @@
 	base = arch_counter_base = of_iomap(best_frame, 0);
 	if (!base) {
 		pr_err("arch_timer: Can't map frame's registers\n");
-		of_node_put(best_frame);
-		return;
+		goto out;
 	}
 
 	if (arch_timer_mem_use_virtual)
 		irq = irq_of_parse_and_map(best_frame, 1);
 	else
 		irq = irq_of_parse_and_map(best_frame, 0);
-	of_node_put(best_frame);
+
 	if (!irq) {
 		pr_err("arch_timer: Frame missing %s irq",
 		       arch_timer_mem_use_virtual ? "virt" : "phys");
-		return;
+		goto out;
 	}
 
 	arch_timer_detect_rate(base, np);
 	arch_timer_mem_register(base, irq);
 	arch_timer_common_init();
+out:
+	iounmap(cntctlbase);
+	of_node_put(best_frame);
 }
 CLOCKSOURCE_OF_DECLARE(armv7_arch_timer_mem, "arm,armv7-timer-mem",
 		       arch_timer_mem_init);
diff --git a/drivers/clocksource/arm_global_timer.c b/drivers/clocksource/arm_global_timer.c
index d189d8c..9df0d16 100644
--- a/drivers/clocksource/arm_global_timer.c
+++ b/drivers/clocksource/arm_global_timer.c
@@ -16,6 +16,7 @@
 #include <linux/clockchips.h>
 #include <linux/cpu.h>
 #include <linux/clk.h>
+#include <linux/delay.h>
 #include <linux/err.h>
 #include <linux/io.h>
 #include <linux/of.h>
@@ -174,6 +175,7 @@
 	clk->set_state_shutdown = gt_clockevent_shutdown;
 	clk->set_state_periodic = gt_clockevent_set_periodic;
 	clk->set_state_oneshot = gt_clockevent_shutdown;
+	clk->set_state_oneshot_stopped = gt_clockevent_shutdown;
 	clk->set_next_event = gt_clockevent_set_next_event;
 	clk->cpumask = cpumask_of(cpu);
 	clk->rating = 300;
@@ -221,6 +223,21 @@
 }
 #endif
 
+static unsigned long gt_read_long(void)
+{
+	return readl_relaxed(gt_base + GT_COUNTER0);
+}
+
+static struct delay_timer gt_delay_timer = {
+	.read_current_timer = gt_read_long,
+};
+
+static void __init gt_delay_timer_init(void)
+{
+	gt_delay_timer.freq = gt_clk_rate;
+	register_current_timer_delay(&gt_delay_timer);
+}
+
 static void __init gt_clocksource_init(void)
 {
 	writel(0, gt_base + GT_CONTROL);
@@ -317,6 +334,7 @@
 	/* Immediately configure the timer on the boot CPU */
 	gt_clocksource_init();
 	gt_clockevents_init(this_cpu_ptr(gt_evt));
+	gt_delay_timer_init();
 
 	return;
 
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
index ff44082..be09bc0 100644
--- a/drivers/clocksource/exynos_mct.c
+++ b/drivers/clocksource/exynos_mct.c
@@ -313,6 +313,7 @@
 	.set_state_periodic	= mct_set_state_periodic,
 	.set_state_shutdown	= mct_set_state_shutdown,
 	.set_state_oneshot	= mct_set_state_shutdown,
+	.set_state_oneshot_stopped = mct_set_state_shutdown,
 	.tick_resume		= mct_set_state_shutdown,
 };
 
@@ -452,6 +453,7 @@
 	evt->set_state_periodic = set_state_periodic;
 	evt->set_state_shutdown = set_state_shutdown;
 	evt->set_state_oneshot = set_state_shutdown;
+	evt->set_state_oneshot_stopped = set_state_shutdown;
 	evt->tick_resume = set_state_shutdown;
 	evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
 	evt->rating = 450;
diff --git a/drivers/clocksource/rockchip_timer.c b/drivers/clocksource/rockchip_timer.c
index 8c77a52..b991b28 100644
--- a/drivers/clocksource/rockchip_timer.c
+++ b/drivers/clocksource/rockchip_timer.c
@@ -122,23 +122,23 @@
 	pclk = of_clk_get_by_name(np, "pclk");
 	if (IS_ERR(pclk)) {
 		pr_err("Failed to get pclk for '%s'\n", TIMER_NAME);
-		return;
+		goto out_unmap;
 	}
 
 	if (clk_prepare_enable(pclk)) {
 		pr_err("Failed to enable pclk for '%s'\n", TIMER_NAME);
-		return;
+		goto out_unmap;
 	}
 
 	timer_clk = of_clk_get_by_name(np, "timer");
 	if (IS_ERR(timer_clk)) {
 		pr_err("Failed to get timer clock for '%s'\n", TIMER_NAME);
-		return;
+		goto out_timer_clk;
 	}
 
 	if (clk_prepare_enable(timer_clk)) {
 		pr_err("Failed to enable timer clock\n");
-		return;
+		goto out_timer_clk;
 	}
 
 	bc_timer.freq = clk_get_rate(timer_clk);
@@ -146,7 +146,7 @@
 	irq = irq_of_parse_and_map(np, 0);
 	if (!irq) {
 		pr_err("Failed to map interrupts for '%s'\n", TIMER_NAME);
-		return;
+		goto out_irq;
 	}
 
 	ce->name = TIMER_NAME;
@@ -164,10 +164,19 @@
 	ret = request_irq(irq, rk_timer_interrupt, IRQF_TIMER, TIMER_NAME, ce);
 	if (ret) {
 		pr_err("Failed to initialize '%s': %d\n", TIMER_NAME, ret);
-		return;
+		goto out_irq;
 	}
 
 	clockevents_config_and_register(ce, bc_timer.freq, 1, UINT_MAX);
+
+	return;
+
+out_irq:
+	clk_disable_unprepare(timer_clk);
+out_timer_clk:
+	clk_disable_unprepare(pclk);
+out_unmap:
+	iounmap(bc_timer.base);
 }
 
 CLOCKSOURCE_OF_DECLARE(rk_timer, "rockchip,rk3288-timer", rk_timer_init);
diff --git a/drivers/clocksource/time-lpc32xx.c b/drivers/clocksource/time-lpc32xx.c
index 1316876..daae61e 100644
--- a/drivers/clocksource/time-lpc32xx.c
+++ b/drivers/clocksource/time-lpc32xx.c
@@ -18,6 +18,7 @@
 #include <linux/clk.h>
 #include <linux/clockchips.h>
 #include <linux/clocksource.h>
+#include <linux/delay.h>
 #include <linux/interrupt.h>
 #include <linux/irq.h>
 #include <linux/kernel.h>
@@ -43,6 +44,7 @@
 struct lpc32xx_clock_event_ddata {
 	struct clock_event_device evtdev;
 	void __iomem *base;
+	u32 ticks_per_jiffy;
 };
 
 /* Needed for the sched clock */
@@ -53,6 +55,15 @@
 	return readl(clocksource_timer_counter);
 }
 
+static unsigned long lpc32xx_delay_timer_read(void)
+{
+	return readl(clocksource_timer_counter);
+}
+
+static struct delay_timer lpc32xx_delay_timer = {
+	.read_current_timer = lpc32xx_delay_timer_read,
+};
+
 static int lpc32xx_clkevt_next_event(unsigned long delta,
 				     struct clock_event_device *evtdev)
 {
@@ -60,14 +71,13 @@
 		container_of(evtdev, struct lpc32xx_clock_event_ddata, evtdev);
 
 	/*
-	 * Place timer in reset and program the delta in the prescale
-	 * register (PR). When the prescale counter matches the value
-	 * in PR the counter register is incremented and the compare
-	 * match will trigger. After setup the timer is released from
-	 * reset and enabled.
+	 * Place timer in reset and program the delta in the match
+	 * channel 0 (MR0). When the timer counter matches the value
+	 * in MR0 register the match will trigger an interrupt.
+	 * After setup the timer is released from reset and enabled.
 	 */
 	writel_relaxed(LPC32XX_TIMER_TCR_CRST, ddata->base + LPC32XX_TIMER_TCR);
-	writel_relaxed(delta, ddata->base + LPC32XX_TIMER_PR);
+	writel_relaxed(delta, ddata->base + LPC32XX_TIMER_MR0);
 	writel_relaxed(LPC32XX_TIMER_TCR_CEN, ddata->base + LPC32XX_TIMER_TCR);
 
 	return 0;
@@ -86,11 +96,39 @@
 
 static int lpc32xx_clkevt_oneshot(struct clock_event_device *evtdev)
 {
+	struct lpc32xx_clock_event_ddata *ddata =
+		container_of(evtdev, struct lpc32xx_clock_event_ddata, evtdev);
+
 	/*
 	 * When using oneshot, we must also disable the timer
 	 * to wait for the first call to set_next_event().
 	 */
-	return lpc32xx_clkevt_shutdown(evtdev);
+	writel_relaxed(0, ddata->base + LPC32XX_TIMER_TCR);
+
+	/* Enable interrupt, reset on match and stop on match (MCR). */
+	writel_relaxed(LPC32XX_TIMER_MCR_MR0I | LPC32XX_TIMER_MCR_MR0R |
+		       LPC32XX_TIMER_MCR_MR0S, ddata->base + LPC32XX_TIMER_MCR);
+	return 0;
+}
+
+static int lpc32xx_clkevt_periodic(struct clock_event_device *evtdev)
+{
+	struct lpc32xx_clock_event_ddata *ddata =
+		container_of(evtdev, struct lpc32xx_clock_event_ddata, evtdev);
+
+	/* Enable interrupt and reset on match. */
+	writel_relaxed(LPC32XX_TIMER_MCR_MR0I | LPC32XX_TIMER_MCR_MR0R,
+		       ddata->base + LPC32XX_TIMER_MCR);
+
+	/*
+	 * Place timer in reset and program the delta in the match
+	 * channel 0 (MR0).
+	 */
+	writel_relaxed(LPC32XX_TIMER_TCR_CRST, ddata->base + LPC32XX_TIMER_TCR);
+	writel_relaxed(ddata->ticks_per_jiffy, ddata->base + LPC32XX_TIMER_MR0);
+	writel_relaxed(LPC32XX_TIMER_TCR_CEN, ddata->base + LPC32XX_TIMER_TCR);
+
+	return 0;
 }
 
 static irqreturn_t lpc32xx_clock_event_handler(int irq, void *dev_id)
@@ -108,11 +146,13 @@
 static struct lpc32xx_clock_event_ddata lpc32xx_clk_event_ddata = {
 	.evtdev = {
 		.name			= "lpc3220 clockevent",
-		.features		= CLOCK_EVT_FEAT_ONESHOT,
+		.features		= CLOCK_EVT_FEAT_ONESHOT |
+					  CLOCK_EVT_FEAT_PERIODIC,
 		.rating			= 300,
 		.set_next_event		= lpc32xx_clkevt_next_event,
 		.set_state_shutdown	= lpc32xx_clkevt_shutdown,
 		.set_state_oneshot	= lpc32xx_clkevt_oneshot,
+		.set_state_periodic	= lpc32xx_clkevt_periodic,
 	},
 };
 
@@ -162,6 +202,8 @@
 	}
 
 	clocksource_timer_counter = base + LPC32XX_TIMER_TC;
+	lpc32xx_delay_timer.freq = rate;
+	register_current_timer_delay(&lpc32xx_delay_timer);
 	sched_clock_register(lpc32xx_read_sched_clock, 32, rate);
 
 	return 0;
@@ -210,18 +252,16 @@
 
 	/*
 	 * Disable timer and clear any pending interrupt (IR) on match
-	 * channel 0 (MR0). Configure a compare match value of 1 on MR0
-	 * and enable interrupt, reset on match and stop on match (MCR).
+	 * channel 0 (MR0). Clear the prescaler as it's not used.
 	 */
 	writel_relaxed(0, base + LPC32XX_TIMER_TCR);
+	writel_relaxed(0, base + LPC32XX_TIMER_PR);
 	writel_relaxed(0, base + LPC32XX_TIMER_CTCR);
 	writel_relaxed(LPC32XX_TIMER_IR_MR0INT, base + LPC32XX_TIMER_IR);
-	writel_relaxed(1, base + LPC32XX_TIMER_MR0);
-	writel_relaxed(LPC32XX_TIMER_MCR_MR0I | LPC32XX_TIMER_MCR_MR0R |
-		       LPC32XX_TIMER_MCR_MR0S, base + LPC32XX_TIMER_MCR);
 
 	rate = clk_get_rate(clk);
 	lpc32xx_clk_event_ddata.base = base;
+	lpc32xx_clk_event_ddata.ticks_per_jiffy = DIV_ROUND_CLOSEST(rate, HZ);
 	clockevents_config_and_register(&lpc32xx_clk_event_ddata.evtdev,
 					rate, 1, -1);
 
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index cd83d47..3a4b39a 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -1431,7 +1431,7 @@
 	if (!all_cpu_data)
 		return -ENOMEM;
 
-	if (static_cpu_has_safe(X86_FEATURE_HWP) && !no_hwp) {
+	if (static_cpu_has(X86_FEATURE_HWP) && !no_hwp) {
 		pr_info("intel_pstate: HWP enabled\n");
 		hwp_active++;
 	}
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index ef25000..37755e6 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -367,14 +367,30 @@
 	  Support for error detection and correction on the
 	  Cavium Octeon family of SOCs.
 
-config EDAC_ALTERA_MC
-	bool "Altera SDRAM Memory Controller EDAC"
+config EDAC_ALTERA
+	bool "Altera SOCFPGA ECC"
 	depends on EDAC_MM_EDAC=y && ARCH_SOCFPGA
 	help
 	  Support for error detection and correction on the
-	  Altera SDRAM memory controller. Note that the
-	  preloader must initialize the SDRAM before loading
-	  the kernel.
+	  Altera SOCs. This must be selected for SDRAM ECC.
+	  Note that the preloader must initialize the SDRAM
+	  before loading the kernel.
+
+config EDAC_ALTERA_L2C
+	bool "Altera L2 Cache ECC"
+	depends on EDAC_ALTERA=y
+	select CACHE_L2X0
+	help
+	  Support for error detection and correction on the
+	  Altera L2 cache Memory for Altera SoCs. This option
+	  requires L2 cache so it will force that selection.
+
+config EDAC_ALTERA_OCRAM
+	bool "Altera On-Chip RAM ECC"
+	depends on EDAC_ALTERA=y && SRAM && GENERIC_ALLOCATOR
+	help
+	  Support for error detection and correction on the
+	  Altera On-Chip RAM Memory for Altera SoCs.
 
 config EDAC_SYNOPSYS
 	tristate "Synopsys DDR Memory Controller"
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index be163e2..f9e4a3e 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -67,6 +67,6 @@
 obj-$(CONFIG_EDAC_OCTEON_LMC)		+= octeon_edac-lmc.o
 obj-$(CONFIG_EDAC_OCTEON_PCI)		+= octeon_edac-pci.o
 
-obj-$(CONFIG_EDAC_ALTERA_MC)		+= altera_edac.o
+obj-$(CONFIG_EDAC_ALTERA)		+= altera_edac.o
 obj-$(CONFIG_EDAC_SYNOPSYS)		+= synopsys_edac.o
 obj-$(CONFIG_EDAC_XGENE)		+= xgene_edac.o
diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c
index 9296409..63e4209 100644
--- a/drivers/edac/altera_edac.c
+++ b/drivers/edac/altera_edac.c
@@ -1,5 +1,5 @@
 /*
- *  Copyright Altera Corporation (C) 2014-2015. All rights reserved.
+ *  Copyright Altera Corporation (C) 2014-2016. All rights reserved.
  *  Copyright 2011-2012 Calxeda, Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
@@ -17,8 +17,10 @@
  * Adapted from the highbank_mc_edac driver.
  */
 
+#include <asm/cacheflush.h>
 #include <linux/ctype.h>
 #include <linux/edac.h>
+#include <linux/genalloc.h>
 #include <linux/interrupt.h>
 #include <linux/kernel.h>
 #include <linux/mfd/syscon.h>
@@ -34,6 +36,7 @@
 
 #define EDAC_MOD_STR		"altera_edac"
 #define EDAC_VERSION		"1"
+#define EDAC_DEVICE		"Altera"
 
 static const struct altr_sdram_prv_data c5_data = {
 	.ecc_ctrl_offset    = CV_CTLCFG_OFST,
@@ -75,6 +78,31 @@
 	.ue_set_mask        = A10_DIAGINT_TDERRA_MASK,
 };
 
+/************************** EDAC Device Defines **************************/
+
+/* OCRAM ECC Management Group Defines */
+#define ALTR_MAN_GRP_OCRAM_ECC_OFFSET   0x04
+#define ALTR_OCR_ECC_EN                 BIT(0)
+#define ALTR_OCR_ECC_INJS               BIT(1)
+#define ALTR_OCR_ECC_INJD               BIT(2)
+#define ALTR_OCR_ECC_SERR               BIT(3)
+#define ALTR_OCR_ECC_DERR               BIT(4)
+
+/* L2 ECC Management Group Defines */
+#define ALTR_MAN_GRP_L2_ECC_OFFSET      0x00
+#define ALTR_L2_ECC_EN                  BIT(0)
+#define ALTR_L2_ECC_INJS                BIT(1)
+#define ALTR_L2_ECC_INJD                BIT(2)
+
+#define ALTR_UE_TRIGGER_CHAR            'U'   /* Trigger for UE */
+#define ALTR_TRIGGER_READ_WRD_CNT       32    /* Line size x 4 */
+#define ALTR_TRIG_OCRAM_BYTE_SIZE       128   /* Line size x 4 */
+#define ALTR_TRIG_L2C_BYTE_SIZE         4096  /* Full Page */
+
+/*********************** EDAC Memory Controller Functions ****************/
+
+/* The SDRAM controller uses the EDAC Memory Controller framework.       */
+
 static irqreturn_t altr_sdram_mc_err_handler(int irq, void *dev_id)
 {
 	struct mem_ctl_info *mci = dev_id;
@@ -504,6 +532,466 @@
 
 module_platform_driver(altr_sdram_edac_driver);
 
+/************************* EDAC Parent Probe *************************/
+
+static const struct of_device_id altr_edac_device_of_match[];
+
+static const struct of_device_id altr_edac_of_match[] = {
+	{ .compatible = "altr,socfpga-ecc-manager" },
+	{},
+};
+MODULE_DEVICE_TABLE(of, altr_edac_of_match);
+
+static int altr_edac_probe(struct platform_device *pdev)
+{
+	of_platform_populate(pdev->dev.of_node, altr_edac_device_of_match,
+			     NULL, &pdev->dev);
+	return 0;
+}
+
+static struct platform_driver altr_edac_driver = {
+	.probe =  altr_edac_probe,
+	.driver = {
+		.name = "socfpga_ecc_manager",
+		.of_match_table = altr_edac_of_match,
+	},
+};
+module_platform_driver(altr_edac_driver);
+
+/************************* EDAC Device Functions *************************/
+
+/*
+ * EDAC Device Functions (shared between various IPs).
+ * The discrete memories use the EDAC Device framework. The probe
+ * and error handling functions are very similar between memories
+ * so they are shared. The memory allocation and freeing for EDAC
+ * trigger testing are different for each memory.
+ */
+
+const struct edac_device_prv_data ocramecc_data;
+const struct edac_device_prv_data l2ecc_data;
+
+struct edac_device_prv_data {
+	int (*setup)(struct platform_device *pdev, void __iomem *base);
+	int ce_clear_mask;
+	int ue_clear_mask;
+	char dbgfs_name[20];
+	void * (*alloc_mem)(size_t size, void **other);
+	void (*free_mem)(void *p, size_t size, void *other);
+	int ecc_enable_mask;
+	int ce_set_mask;
+	int ue_set_mask;
+	int trig_alloc_sz;
+};
+
+struct altr_edac_device_dev {
+	void __iomem *base;
+	int sb_irq;
+	int db_irq;
+	const struct edac_device_prv_data *data;
+	struct dentry *debugfs_dir;
+	char *edac_dev_name;
+};
+
+static irqreturn_t altr_edac_device_handler(int irq, void *dev_id)
+{
+	irqreturn_t ret_value = IRQ_NONE;
+	struct edac_device_ctl_info *dci = dev_id;
+	struct altr_edac_device_dev *drvdata = dci->pvt_info;
+	const struct edac_device_prv_data *priv = drvdata->data;
+
+	if (irq == drvdata->sb_irq) {
+		if (priv->ce_clear_mask)
+			writel(priv->ce_clear_mask, drvdata->base);
+		edac_device_handle_ce(dci, 0, 0, drvdata->edac_dev_name);
+		ret_value = IRQ_HANDLED;
+	} else if (irq == drvdata->db_irq) {
+		if (priv->ue_clear_mask)
+			writel(priv->ue_clear_mask, drvdata->base);
+		edac_device_handle_ue(dci, 0, 0, drvdata->edac_dev_name);
+		panic("\nEDAC:ECC_DEVICE[Uncorrectable errors]\n");
+		ret_value = IRQ_HANDLED;
+	} else {
+		WARN_ON(1);
+	}
+
+	return ret_value;
+}
+
+static ssize_t altr_edac_device_trig(struct file *file,
+				     const char __user *user_buf,
+				     size_t count, loff_t *ppos)
+
+{
+	u32 *ptemp, i, error_mask;
+	int result = 0;
+	u8 trig_type;
+	unsigned long flags;
+	struct edac_device_ctl_info *edac_dci = file->private_data;
+	struct altr_edac_device_dev *drvdata = edac_dci->pvt_info;
+	const struct edac_device_prv_data *priv = drvdata->data;
+	void *generic_ptr = edac_dci->dev;
+
+	if (!user_buf || get_user(trig_type, user_buf))
+		return -EFAULT;
+
+	if (!priv->alloc_mem)
+		return -ENOMEM;
+
+	/*
+	 * Note that generic_ptr is initialized to the device * but in
+	 * some alloc_functions, this is overridden and returns data.
+	 */
+	ptemp = priv->alloc_mem(priv->trig_alloc_sz, &generic_ptr);
+	if (!ptemp) {
+		edac_printk(KERN_ERR, EDAC_DEVICE,
+			    "Inject: Buffer Allocation error\n");
+		return -ENOMEM;
+	}
+
+	if (trig_type == ALTR_UE_TRIGGER_CHAR)
+		error_mask = priv->ue_set_mask;
+	else
+		error_mask = priv->ce_set_mask;
+
+	edac_printk(KERN_ALERT, EDAC_DEVICE,
+		    "Trigger Error Mask (0x%X)\n", error_mask);
+
+	local_irq_save(flags);
+	/* write ECC corrupted data out. */
+	for (i = 0; i < (priv->trig_alloc_sz / sizeof(*ptemp)); i++) {
+		/* Read data so we're in the correct state */
+		rmb();
+		if (ACCESS_ONCE(ptemp[i]))
+			result = -1;
+		/* Toggle Error bit (it is latched), leave ECC enabled */
+		writel(error_mask, drvdata->base);
+		writel(priv->ecc_enable_mask, drvdata->base);
+		ptemp[i] = i;
+	}
+	/* Ensure it has been written out */
+	wmb();
+	local_irq_restore(flags);
+
+	if (result)
+		edac_printk(KERN_ERR, EDAC_DEVICE, "Mem Not Cleared\n");
+
+	/* Read out written data. ECC error caused here */
+	for (i = 0; i < ALTR_TRIGGER_READ_WRD_CNT; i++)
+		if (ACCESS_ONCE(ptemp[i]) != i)
+			edac_printk(KERN_ERR, EDAC_DEVICE,
+				    "Read doesn't match written data\n");
+
+	if (priv->free_mem)
+		priv->free_mem(ptemp, priv->trig_alloc_sz, generic_ptr);
+
+	return count;
+}
+
+static const struct file_operations altr_edac_device_inject_fops = {
+	.open = simple_open,
+	.write = altr_edac_device_trig,
+	.llseek = generic_file_llseek,
+};
+
+static void altr_create_edacdev_dbgfs(struct edac_device_ctl_info *edac_dci,
+				      const struct edac_device_prv_data *priv)
+{
+	struct altr_edac_device_dev *drvdata = edac_dci->pvt_info;
+
+	if (!IS_ENABLED(CONFIG_EDAC_DEBUG))
+		return;
+
+	drvdata->debugfs_dir = edac_debugfs_create_dir(drvdata->edac_dev_name);
+	if (!drvdata->debugfs_dir)
+		return;
+
+	if (!edac_debugfs_create_file(priv->dbgfs_name, S_IWUSR,
+				      drvdata->debugfs_dir, edac_dci,
+				      &altr_edac_device_inject_fops))
+		debugfs_remove_recursive(drvdata->debugfs_dir);
+}
+
+static const struct of_device_id altr_edac_device_of_match[] = {
+#ifdef CONFIG_EDAC_ALTERA_L2C
+	{ .compatible = "altr,socfpga-l2-ecc", .data = (void *)&l2ecc_data },
+#endif
+#ifdef CONFIG_EDAC_ALTERA_OCRAM
+	{ .compatible = "altr,socfpga-ocram-ecc",
+	  .data = (void *)&ocramecc_data },
+#endif
+	{},
+};
+MODULE_DEVICE_TABLE(of, altr_edac_device_of_match);
+
+/*
+ * altr_edac_device_probe()
+ *	This is a generic EDAC device driver that will support
+ *	various Altera memory devices such as the L2 cache ECC and
+ *	OCRAM ECC as well as the memories for other peripherals.
+ *	Module specific initialization is done by passing the
+ *	function index in the device tree.
+ */
+static int altr_edac_device_probe(struct platform_device *pdev)
+{
+	struct edac_device_ctl_info *dci;
+	struct altr_edac_device_dev *drvdata;
+	struct resource *r;
+	int res = 0;
+	struct device_node *np = pdev->dev.of_node;
+	char *ecc_name = (char *)np->name;
+	static int dev_instance;
+
+	if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) {
+		edac_printk(KERN_ERR, EDAC_DEVICE,
+			    "Unable to open devm\n");
+		return -ENOMEM;
+	}
+
+	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!r) {
+		edac_printk(KERN_ERR, EDAC_DEVICE,
+			    "Unable to get mem resource\n");
+		res = -ENODEV;
+		goto fail;
+	}
+
+	if (!devm_request_mem_region(&pdev->dev, r->start, resource_size(r),
+				     dev_name(&pdev->dev))) {
+		edac_printk(KERN_ERR, EDAC_DEVICE,
+			    "%s:Error requesting mem region\n", ecc_name);
+		res = -EBUSY;
+		goto fail;
+	}
+
+	dci = edac_device_alloc_ctl_info(sizeof(*drvdata), ecc_name,
+					 1, ecc_name, 1, 0, NULL, 0,
+					 dev_instance++);
+
+	if (!dci) {
+		edac_printk(KERN_ERR, EDAC_DEVICE,
+			    "%s: Unable to allocate EDAC device\n", ecc_name);
+		res = -ENOMEM;
+		goto fail;
+	}
+
+	drvdata = dci->pvt_info;
+	dci->dev = &pdev->dev;
+	platform_set_drvdata(pdev, dci);
+	drvdata->edac_dev_name = ecc_name;
+
+	drvdata->base = devm_ioremap(&pdev->dev, r->start, resource_size(r));
+	if (!drvdata->base)
+		goto fail1;
+
+	/* Get driver specific data for this EDAC device */
+	drvdata->data = of_match_node(altr_edac_device_of_match, np)->data;
+
+	/* Check specific dependencies for the module */
+	if (drvdata->data->setup) {
+		res = drvdata->data->setup(pdev, drvdata->base);
+		if (res)
+			goto fail1;
+	}
+
+	drvdata->sb_irq = platform_get_irq(pdev, 0);
+	res = devm_request_irq(&pdev->dev, drvdata->sb_irq,
+			       altr_edac_device_handler,
+			       0, dev_name(&pdev->dev), dci);
+	if (res)
+		goto fail1;
+
+	drvdata->db_irq = platform_get_irq(pdev, 1);
+	res = devm_request_irq(&pdev->dev, drvdata->db_irq,
+			       altr_edac_device_handler,
+			       0, dev_name(&pdev->dev), dci);
+	if (res)
+		goto fail1;
+
+	dci->mod_name = "Altera ECC Manager";
+	dci->dev_name = drvdata->edac_dev_name;
+
+	res = edac_device_add_device(dci);
+	if (res)
+		goto fail1;
+
+	altr_create_edacdev_dbgfs(dci, drvdata->data);
+
+	devres_close_group(&pdev->dev, NULL);
+
+	return 0;
+
+fail1:
+	edac_device_free_ctl_info(dci);
+fail:
+	devres_release_group(&pdev->dev, NULL);
+	edac_printk(KERN_ERR, EDAC_DEVICE,
+		    "%s:Error setting up EDAC device: %d\n", ecc_name, res);
+
+	return res;
+}
+
+static int altr_edac_device_remove(struct platform_device *pdev)
+{
+	struct edac_device_ctl_info *dci = platform_get_drvdata(pdev);
+	struct altr_edac_device_dev *drvdata = dci->pvt_info;
+
+	debugfs_remove_recursive(drvdata->debugfs_dir);
+	edac_device_del_device(&pdev->dev);
+	edac_device_free_ctl_info(dci);
+
+	return 0;
+}
+
+static struct platform_driver altr_edac_device_driver = {
+	.probe =  altr_edac_device_probe,
+	.remove = altr_edac_device_remove,
+	.driver = {
+		.name = "altr_edac_device",
+		.of_match_table = altr_edac_device_of_match,
+	},
+};
+module_platform_driver(altr_edac_device_driver);
+
+/*********************** OCRAM EDAC Device Functions *********************/
+
+#ifdef CONFIG_EDAC_ALTERA_OCRAM
+
+static void *ocram_alloc_mem(size_t size, void **other)
+{
+	struct device_node *np;
+	struct gen_pool *gp;
+	void *sram_addr;
+
+	np = of_find_compatible_node(NULL, NULL, "altr,socfpga-ocram-ecc");
+	if (!np)
+		return NULL;
+
+	gp = of_gen_pool_get(np, "iram", 0);
+	of_node_put(np);
+	if (!gp)
+		return NULL;
+
+	sram_addr = (void *)gen_pool_alloc(gp, size);
+	if (!sram_addr)
+		return NULL;
+
+	memset(sram_addr, 0, size);
+	/* Ensure data is written out */
+	wmb();
+
+	/* Remember this handle for freeing  later */
+	*other = gp;
+
+	return sram_addr;
+}
+
+static void ocram_free_mem(void *p, size_t size, void *other)
+{
+	gen_pool_free((struct gen_pool *)other, (u32)p, size);
+}
+
+/*
+ * altr_ocram_check_deps()
+ *	Test for OCRAM cache ECC dependencies upon entry because
+ *	platform specific startup should have initialized the
+ *	On-Chip RAM memory and enabled the ECC.
+ *	Can't turn on ECC here because accessing un-initialized
+ *	memory will cause CE/UE errors possibly causing an ABORT.
+ */
+static int altr_ocram_check_deps(struct platform_device *pdev,
+				 void __iomem *base)
+{
+	if (readl(base) & ALTR_OCR_ECC_EN)
+		return 0;
+
+	edac_printk(KERN_ERR, EDAC_DEVICE,
+		    "OCRAM: No ECC present or ECC disabled.\n");
+	return -ENODEV;
+}
+
+const struct edac_device_prv_data ocramecc_data = {
+	.setup = altr_ocram_check_deps,
+	.ce_clear_mask = (ALTR_OCR_ECC_EN | ALTR_OCR_ECC_SERR),
+	.ue_clear_mask = (ALTR_OCR_ECC_EN | ALTR_OCR_ECC_DERR),
+	.dbgfs_name = "altr_ocram_trigger",
+	.alloc_mem = ocram_alloc_mem,
+	.free_mem = ocram_free_mem,
+	.ecc_enable_mask = ALTR_OCR_ECC_EN,
+	.ce_set_mask = (ALTR_OCR_ECC_EN | ALTR_OCR_ECC_INJS),
+	.ue_set_mask = (ALTR_OCR_ECC_EN | ALTR_OCR_ECC_INJD),
+	.trig_alloc_sz = ALTR_TRIG_OCRAM_BYTE_SIZE,
+};
+
+#endif	/* CONFIG_EDAC_ALTERA_OCRAM */
+
+/********************* L2 Cache EDAC Device Functions ********************/
+
+#ifdef CONFIG_EDAC_ALTERA_L2C
+
+static void *l2_alloc_mem(size_t size, void **other)
+{
+	struct device *dev = *other;
+	void *ptemp = devm_kzalloc(dev, size, GFP_KERNEL);
+
+	if (!ptemp)
+		return NULL;
+
+	/* Make sure everything is written out */
+	wmb();
+
+	/*
+	 * Clean all cache levels up to LoC (includes L2)
+	 * This ensures the corrupted data is written into
+	 * L2 cache for readback test (which causes ECC error).
+	 */
+	flush_cache_all();
+
+	return ptemp;
+}
+
+static void l2_free_mem(void *p, size_t size, void *other)
+{
+	struct device *dev = other;
+
+	if (dev && p)
+		devm_kfree(dev, p);
+}
+
+/*
+ * altr_l2_check_deps()
+ *	Test for L2 cache ECC dependencies upon entry because
+ *	platform specific startup should have initialized the L2
+ *	memory and enabled the ECC.
+ *	Bail if ECC is not enabled.
+ *	Note that L2 Cache Enable is forced at build time.
+ */
+static int altr_l2_check_deps(struct platform_device *pdev,
+			      void __iomem *base)
+{
+	if (readl(base) & ALTR_L2_ECC_EN)
+		return 0;
+
+	edac_printk(KERN_ERR, EDAC_DEVICE,
+		    "L2: No ECC present, or ECC disabled\n");
+	return -ENODEV;
+}
+
+const struct edac_device_prv_data l2ecc_data = {
+	.setup = altr_l2_check_deps,
+	.ce_clear_mask = 0,
+	.ue_clear_mask = 0,
+	.dbgfs_name = "altr_l2_trigger",
+	.alloc_mem = l2_alloc_mem,
+	.free_mem = l2_free_mem,
+	.ecc_enable_mask = ALTR_L2_ECC_EN,
+	.ce_set_mask = (ALTR_L2_ECC_EN | ALTR_L2_ECC_INJS),
+	.ue_set_mask = (ALTR_L2_ECC_EN | ALTR_L2_ECC_INJD),
+	.trig_alloc_sz = ALTR_TRIG_L2C_BYTE_SIZE,
+};
+
+#endif	/* CONFIG_EDAC_ALTERA_L2C */
+
 MODULE_LICENSE("GPL v2");
 MODULE_AUTHOR("Thor Thayer");
-MODULE_DESCRIPTION("EDAC Driver for Altera SDRAM Controller");
+MODULE_DESCRIPTION("EDAC Driver for Altera Memories");
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 9eee13e..d87a475 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -1452,7 +1452,7 @@
 	u64 chan_off;
 	u64 dram_base		= get_dram_base(pvt, range);
 	u64 hole_off		= f10_dhar_offset(pvt);
-	u64 dct_sel_base_off	= (pvt->dct_sel_hi & 0xFFFFFC00) << 16;
+	u64 dct_sel_base_off	= (u64)(pvt->dct_sel_hi & 0xFFFFFC00) << 16;
 
 	if (hi_rng) {
 		/*
diff --git a/drivers/edac/debugfs.c b/drivers/edac/debugfs.c
index 54d2f66..92dbb7e 100644
--- a/drivers/edac/debugfs.c
+++ b/drivers/edac/debugfs.c
@@ -53,7 +53,7 @@
 
 void edac_debugfs_exit(void)
 {
-	debugfs_remove(edac_debugfs);
+	debugfs_remove_recursive(edac_debugfs);
 }
 
 int edac_create_debugfs_nodes(struct mem_ctl_info *mci)
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 8adfc16..1472f48 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -535,60 +535,21 @@
 
 	mutex_lock(&mem_ctls_mutex);
 
-	/* if this control struct has movd to offline state, we are done */
-	if (mci->op_state == OP_OFFLINE) {
+	if (mci->op_state != OP_RUNNING_POLL) {
 		mutex_unlock(&mem_ctls_mutex);
 		return;
 	}
 
-	/* Only poll controllers that are running polled and have a check */
-	if (edac_mc_assert_error_check_and_clear() && (mci->edac_check != NULL))
+	if (edac_mc_assert_error_check_and_clear())
 		mci->edac_check(mci);
 
 	mutex_unlock(&mem_ctls_mutex);
 
-	/* Reschedule */
+	/* Queue ourselves again. */
 	edac_queue_work(&mci->work, msecs_to_jiffies(edac_mc_get_poll_msec()));
 }
 
 /*
- * edac_mc_workq_setup
- *	initialize a workq item for this mci
- *	passing in the new delay period in msec
- *
- *	locking model:
- *
- *		called with the mem_ctls_mutex held
- */
-static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec)
-{
-	edac_dbg(0, "\n");
-
-	/* if this instance is not in the POLL state, then simply return */
-	if (mci->op_state != OP_RUNNING_POLL)
-		return;
-
-	INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function);
-
-	edac_queue_work(&mci->work, msecs_to_jiffies(msec));
-}
-
-/*
- * edac_mc_workq_teardown
- *	stop the workq processing on this mci
- *
- *	locking model:
- *
- *		called WITHOUT lock held
- */
-static void edac_mc_workq_teardown(struct mem_ctl_info *mci)
-{
-	mci->op_state = OP_OFFLINE;
-
-	edac_stop_work(&mci->work);
-}
-
-/*
  * edac_mc_reset_delay_period(unsigned long value)
  *
  *	user space has updated our poll period value, need to
@@ -771,12 +732,12 @@
 		goto fail1;
 	}
 
-	/* If there IS a check routine, then we are running POLLED */
-	if (mci->edac_check != NULL) {
-		/* This instance is NOW RUNNING */
+	if (mci->edac_check) {
 		mci->op_state = OP_RUNNING_POLL;
 
-		edac_mc_workq_setup(mci, edac_mc_get_poll_msec());
+		INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function);
+		edac_queue_work(&mci->work, msecs_to_jiffies(edac_mc_get_poll_msec()));
+
 	} else {
 		mci->op_state = OP_RUNNING_INTERRUPT;
 	}
@@ -823,15 +784,16 @@
 		return NULL;
 	}
 
+	/* mark MCI offline: */
+	mci->op_state = OP_OFFLINE;
+
 	if (!del_mc_from_global_list(mci))
 		edac_mc_owner = NULL;
+
 	mutex_unlock(&mem_ctls_mutex);
 
-	/* flush workq processes */
-	edac_mc_workq_teardown(mci);
-
-	/* marking MCI offline */
-	mci->op_state = OP_OFFLINE;
+	if (mci->edac_check)
+		edac_stop_work(&mci->work);
 
 	/* remove from sysfs */
 	edac_remove_sysfs_mci_device(mci);
diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
index 9968538..8f2f289 100644
--- a/drivers/edac/edac_pci.c
+++ b/drivers/edac/edac_pci.c
@@ -195,58 +195,27 @@
 
 	mutex_lock(&edac_pci_ctls_mutex);
 
-	if (pci->op_state == OP_RUNNING_POLL) {
-		/* we might be in POLL mode, but there may NOT be a poll func
-		 */
-		if ((pci->edac_check != NULL) && edac_pci_get_check_errors())
-			pci->edac_check(pci);
-
-		/* if we are on a one second period, then use round */
-		msec = edac_pci_get_poll_msec();
-		if (msec == 1000)
-			delay = round_jiffies_relative(msecs_to_jiffies(msec));
-		else
-			delay = msecs_to_jiffies(msec);
-
-		/* Reschedule only if we are in POLL mode */
-		edac_queue_work(&pci->work, delay);
+	if (pci->op_state != OP_RUNNING_POLL) {
+		mutex_unlock(&edac_pci_ctls_mutex);
+		return;
 	}
 
+	if (edac_pci_get_check_errors())
+		pci->edac_check(pci);
+
+	/* if we are on a one second period, then use round */
+	msec = edac_pci_get_poll_msec();
+	if (msec == 1000)
+		delay = round_jiffies_relative(msecs_to_jiffies(msec));
+	else
+		delay = msecs_to_jiffies(msec);
+
+	edac_queue_work(&pci->work, delay);
+
 	mutex_unlock(&edac_pci_ctls_mutex);
 }
 
 /*
- * edac_pci_workq_setup()
- * 	initialize a workq item for this edac_pci instance
- * 	passing in the new delay period in msec
- *
- *	locking model:
- *		called when 'edac_pci_ctls_mutex' is locked
- */
-static void edac_pci_workq_setup(struct edac_pci_ctl_info *pci,
-				 unsigned int msec)
-{
-	edac_dbg(0, "\n");
-
-	INIT_DELAYED_WORK(&pci->work, edac_pci_workq_function);
-
-	edac_queue_work(&pci->work, msecs_to_jiffies(edac_pci_get_poll_msec()));
-}
-
-/*
- * edac_pci_workq_teardown()
- * 	stop the workq processing on this edac_pci instance
- */
-static void edac_pci_workq_teardown(struct edac_pci_ctl_info *pci)
-{
-	edac_dbg(0, "\n");
-
-	pci->op_state = OP_OFFLINE;
-
-	edac_stop_work(&pci->work);
-}
-
-/*
  * edac_pci_alloc_index: Allocate a unique PCI index number
  *
  * Return:
@@ -289,10 +258,12 @@
 		goto fail1;
 	}
 
-	if (pci->edac_check != NULL) {
+	if (pci->edac_check) {
 		pci->op_state = OP_RUNNING_POLL;
 
-		edac_pci_workq_setup(pci, 1000);
+		INIT_DELAYED_WORK(&pci->work, edac_pci_workq_function);
+		edac_queue_work(&pci->work, msecs_to_jiffies(edac_pci_get_poll_msec()));
+
 	} else {
 		pci->op_state = OP_RUNNING_INTERRUPT;
 	}
@@ -350,8 +321,8 @@
 
 	mutex_unlock(&edac_pci_ctls_mutex);
 
-	/* stop the workq timer */
-	edac_pci_workq_teardown(pci);
+	if (pci->edac_check)
+		edac_stop_work(&pci->work);
 
 	edac_printk(KERN_INFO, EDAC_PCI,
 		"Removed device %d for %s %s: DEV %s\n",
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index b7139c1..ca63d0d 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -1244,7 +1244,7 @@
 static int __init mpc85xx_mc_init(void)
 {
 	int res = 0;
-	u32 pvr = 0;
+	u32 __maybe_unused pvr = 0;
 
 	printk(KERN_INFO "Freescale(R) MPC85xx EDAC driver, "
 	       "(C) 2006 Montavista Software\n");
diff --git a/drivers/edac/xgene_edac.c b/drivers/edac/xgene_edac.c
index 41f8764..bf19b6e 100644
--- a/drivers/edac/xgene_edac.c
+++ b/drivers/edac/xgene_edac.c
@@ -61,6 +61,7 @@
 	struct regmap		*mcba_map;
 	struct regmap		*mcbb_map;
 	struct regmap		*efuse_map;
+	struct regmap		*rb_map;
 	void __iomem		*pcp_csr;
 	spinlock_t		lock;
 	struct dentry           *dfs;
@@ -1057,7 +1058,7 @@
 		case 0x041:
 			return true;
 		}
-	} else if (L3C_ELR_ERRSYN(l3celr) == 9)
+	} else if (L3C_ELR_ERRWAY(l3celr) == 9)
 		return true;
 
 	return false;
@@ -1353,6 +1354,17 @@
 #define GLBL_MDED_ERRH			0x0848
 #define GLBL_MDED_ERRHMASK		0x084c
 
+/* IO Bus Registers */
+#define RBCSR				0x0000
+#define STICKYERR_MASK			BIT(0)
+#define RBEIR				0x0008
+#define AGENT_OFFLINE_ERR_MASK		BIT(30)
+#define UNIMPL_RBPAGE_ERR_MASK		BIT(29)
+#define WORD_ALIGNED_ERR_MASK		BIT(28)
+#define PAGE_ACCESS_ERR_MASK		BIT(27)
+#define WRITE_ACCESS_MASK		BIT(26)
+#define RBERRADDR_RD(src)		((src) & 0x03FFFFFF)
+
 static const char * const soc_mem_err_v1[] = {
 	"10GbE0",
 	"10GbE1",
@@ -1470,6 +1482,51 @@
 	u32 err_addr_hi;
 	u32 reg;
 
+	/* If the register bus resource isn't available, just skip it */
+	if (!ctx->edac->rb_map)
+		goto rb_skip;
+
+	/*
+	 * Check RB access errors
+	 * 1. Out of range
+	 * 2. Un-implemented page
+	 * 3. Un-aligned access
+	 * 4. Offline slave IP
+	 */
+	if (regmap_read(ctx->edac->rb_map, RBCSR, &reg))
+		return;
+	if (reg & STICKYERR_MASK) {
+		bool write;
+		u32 address;
+
+		dev_err(edac_dev->dev, "IOB bus access error(s)\n");
+		if (regmap_read(ctx->edac->rb_map, RBEIR, &reg))
+			return;
+		write = reg & WRITE_ACCESS_MASK ? 1 : 0;
+		address = RBERRADDR_RD(reg);
+		if (reg & AGENT_OFFLINE_ERR_MASK)
+			dev_err(edac_dev->dev,
+				"IOB bus %s access to offline agent error\n",
+				write ? "write" : "read");
+		if (reg & UNIMPL_RBPAGE_ERR_MASK)
+			dev_err(edac_dev->dev,
+				"IOB bus %s access to unimplemented page error\n",
+				write ? "write" : "read");
+		if (reg & WORD_ALIGNED_ERR_MASK)
+			dev_err(edac_dev->dev,
+				"IOB bus %s word aligned access error\n",
+				write ? "write" : "read");
+		if (reg & PAGE_ACCESS_ERR_MASK)
+			dev_err(edac_dev->dev,
+				"IOB bus %s to page out of range access error\n",
+				write ? "write" : "read");
+		if (regmap_write(ctx->edac->rb_map, RBEIR, 0))
+			return;
+		if (regmap_write(ctx->edac->rb_map, RBCSR, 0))
+			return;
+	}
+rb_skip:
+
 	/* IOB Bridge agent transaction error interrupt */
 	reg = readl(ctx->dev_csr + IOBBATRANSERRINTSTS);
 	if (!reg)
@@ -1852,6 +1909,17 @@
 		goto out_err;
 	}
 
+	/*
+	 * NOTE: The register bus resource is optional for compatibility
+	 * reason.
+	 */
+	edac->rb_map = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+						       "regmap-rb");
+	if (IS_ERR(edac->rb_map)) {
+		dev_warn(edac->dev, "missing syscon regmap rb\n");
+		edac->rb_map = NULL;
+	}
+
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	edac->pcp_csr = devm_ioremap_resource(&pdev->dev, res);
 	if (IS_ERR(edac->pcp_csr)) {
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index 4ebc796..2f8c0f4 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -256,12 +256,6 @@
 	u8 rsvdz4[1984];
 };
 
-/* Declare the various hypercall operations. */
-enum hv_call_code {
-	HVCALL_POST_MESSAGE	= 0x005c,
-	HVCALL_SIGNAL_EVENT	= 0x005d,
-};
-
 /* Definition of the hv_post_message hypercall input structure. */
 struct hv_input_post_message {
 	union hv_connection_id connectionid;
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 60fb80b..5c2d13a 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -685,6 +685,20 @@
 	  This driver can also be built as a module. If so, the module will
 	  be called ltc2945.
 
+config SENSORS_LTC2990
+	tristate "Linear Technology LTC2990 (current monitoring mode only)"
+	depends on I2C
+	help
+	  If you say yes here you get support for Linear Technology LTC2990
+	  I2C System Monitor. The LTC2990 supports a combination of voltage,
+	  current and temperature monitoring, but in addition to the Vcc supply
+	  voltage and chip temperature, this driver currently only supports
+	  reading two currents by measuring two differential voltages across
+	  series resistors.
+
+	  This driver can also be built as a module. If so, the module will
+	  be called ltc2990.
+
 config SENSORS_LTC4151
 	tristate "Linear Technology LTC4151"
 	depends on I2C
@@ -1127,7 +1141,7 @@
 
 	  Currently, this driver supports
 	  NCP15WB473, NCP18WB473, NCP21WB473, NCP03WB473, NCP15WL333,
-	  and NCP03WF104 from Murata and B57330V2103 from EPCOS.
+	  NCP03WF104 and NCP15XH103 from Murata and B57330V2103 from EPCOS.
 
 	  This driver can also be built as a module.  If so, the module
 	  will be called ntc-thermistor.
@@ -1176,6 +1190,21 @@
 	  This driver can also be built as a module.  If so, the module
 	  will be called nct7904.
 
+config SENSORS_NSA320
+	tristate "ZyXEL NSA320 and compatible fan speed and temperature sensors"
+	depends on GPIOLIB && OF
+	depends on MACH_KIRKWOOD || COMPILE_TEST
+	help
+	  If you say yes here you get support for hardware monitoring
+	  for the ZyXEL NSA320 Media Server and other compatible devices
+	  (probably the NSA325 and some NSA310 variants).
+
+	  The sensor data is taken from a Holtek HT46R065 microcontroller
+	  connected to GPIO lines.
+
+	  This driver can also be built as a module. If so, the module
+	  will be called nsa320-hwmon.
+
 config SENSORS_PCF8591
 	tristate "Philips PCF8591 ADC/DAC"
 	depends on I2C
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 30c94df..58cc3ac 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -100,6 +100,7 @@
 obj-$(CONFIG_SENSORS_LM95241)	+= lm95241.o
 obj-$(CONFIG_SENSORS_LM95245)	+= lm95245.o
 obj-$(CONFIG_SENSORS_LTC2945)	+= ltc2945.o
+obj-$(CONFIG_SENSORS_LTC2990)	+= ltc2990.o
 obj-$(CONFIG_SENSORS_LTC4151)	+= ltc4151.o
 obj-$(CONFIG_SENSORS_LTC4215)	+= ltc4215.o
 obj-$(CONFIG_SENSORS_LTC4222)	+= ltc4222.o
@@ -123,6 +124,7 @@
 obj-$(CONFIG_SENSORS_NCT6775)	+= nct6775.o
 obj-$(CONFIG_SENSORS_NCT7802)	+= nct7802.o
 obj-$(CONFIG_SENSORS_NCT7904)	+= nct7904.o
+obj-$(CONFIG_SENSORS_NSA320)	+= nsa320-hwmon.o
 obj-$(CONFIG_SENSORS_NTC_THERMISTOR)	+= ntc_thermistor.o
 obj-$(CONFIG_SENSORS_PC87360)	+= pc87360.o
 obj-$(CONFIG_SENSORS_PC87427)	+= pc87427.o
@@ -149,7 +151,7 @@
 obj-$(CONFIG_SENSORS_TMP401)	+= tmp401.o
 obj-$(CONFIG_SENSORS_TMP421)	+= tmp421.o
 obj-$(CONFIG_SENSORS_TWL4030_MADC)+= twl4030-madc-hwmon.o
-obj-$(CONFIG_SENSORS_VEXPRESS)	+= vexpress.o
+obj-$(CONFIG_SENSORS_VEXPRESS)	+= vexpress-hwmon.o
 obj-$(CONFIG_SENSORS_VIA_CPUTEMP)+= via-cputemp.o
 obj-$(CONFIG_SENSORS_VIA686A)	+= via686a.o
 obj-$(CONFIG_SENSORS_VT1211)	+= vt1211.o
diff --git a/drivers/hwmon/iio_hwmon.c b/drivers/hwmon/iio_hwmon.c
index 17ae2eb..b550ba5 100644
--- a/drivers/hwmon/iio_hwmon.c
+++ b/drivers/hwmon/iio_hwmon.c
@@ -67,6 +67,7 @@
 	enum iio_chan_type type;
 	struct iio_channel *channels;
 	const char *name = "iio_hwmon";
+	char *sname;
 
 	if (dev->of_node && dev->of_node->name)
 		name = dev->of_node->name;
@@ -144,7 +145,15 @@
 
 	st->attr_group.attrs = st->attrs;
 	st->groups[0] = &st->attr_group;
-	st->hwmon_dev = hwmon_device_register_with_groups(dev, name, st,
+
+	sname = devm_kstrdup(dev, name, GFP_KERNEL);
+	if (!sname) {
+		ret = -ENOMEM;
+		goto error_release_channels;
+	}
+
+	strreplace(sname, '-', '_');
+	st->hwmon_dev = hwmon_device_register_with_groups(dev, sname, st,
 							  st->groups);
 	if (IS_ERR(st->hwmon_dev)) {
 		ret = PTR_ERR(st->hwmon_dev);
diff --git a/drivers/hwmon/ltc2990.c b/drivers/hwmon/ltc2990.c
new file mode 100644
index 0000000..8f8fe05
--- /dev/null
+++ b/drivers/hwmon/ltc2990.c
@@ -0,0 +1,161 @@
+/*
+ * Driver for Linear Technology LTC2990 power monitor
+ *
+ * Copyright (C) 2014 Topic Embedded Products
+ * Author: Mike Looijmans <mike.looijmans@topic.nl>
+ *
+ * License: GPLv2
+ *
+ * This driver assumes the chip is wired as a dual current monitor, and
+ * reports the voltage drop across two series resistors. It also reports
+ * the chip's internal temperature and Vcc power supply voltage.
+ */
+
+#include <linux/err.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#define LTC2990_STATUS	0x00
+#define LTC2990_CONTROL	0x01
+#define LTC2990_TRIGGER	0x02
+#define LTC2990_TINT_MSB	0x04
+#define LTC2990_V1_MSB	0x06
+#define LTC2990_V2_MSB	0x08
+#define LTC2990_V3_MSB	0x0A
+#define LTC2990_V4_MSB	0x0C
+#define LTC2990_VCC_MSB	0x0E
+
+#define LTC2990_CONTROL_KELVIN		BIT(7)
+#define LTC2990_CONTROL_SINGLE		BIT(6)
+#define LTC2990_CONTROL_MEASURE_ALL	(0x3 << 3)
+#define LTC2990_CONTROL_MODE_CURRENT	0x06
+#define LTC2990_CONTROL_MODE_VOLTAGE	0x07
+
+/* convert raw register value to sign-extended integer in 16-bit range */
+static int ltc2990_voltage_to_int(int raw)
+{
+	if (raw & BIT(14))
+		return -(0x4000 - (raw & 0x3FFF)) << 2;
+	else
+		return (raw & 0x3FFF) << 2;
+}
+
+/* Return the converted value from the given register in uV or mC */
+static int ltc2990_get_value(struct i2c_client *i2c, u8 reg, int *result)
+{
+	int val;
+
+	val = i2c_smbus_read_word_swapped(i2c, reg);
+	if (unlikely(val < 0))
+		return val;
+
+	switch (reg) {
+	case LTC2990_TINT_MSB:
+		/* internal temp, 0.0625 degrees/LSB, 13-bit  */
+		val = (val & 0x1FFF) << 3;
+		*result = (val * 1000) >> 7;
+		break;
+	case LTC2990_V1_MSB:
+	case LTC2990_V3_MSB:
+		 /* Vx-Vy, 19.42uV/LSB. Depends on mode. */
+		*result = ltc2990_voltage_to_int(val) * 1942 / (4 * 100);
+		break;
+	case LTC2990_VCC_MSB:
+		/* Vcc, 305.18μV/LSB, 2.5V offset */
+		*result = (ltc2990_voltage_to_int(val) * 30518 /
+			   (4 * 100 * 1000)) + 2500;
+		break;
+	default:
+		return -EINVAL; /* won't happen, keep compiler happy */
+	}
+
+	return 0;
+}
+
+static ssize_t ltc2990_show_value(struct device *dev,
+				  struct device_attribute *da, char *buf)
+{
+	struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+	int value;
+	int ret;
+
+	ret = ltc2990_get_value(dev_get_drvdata(dev), attr->index, &value);
+	if (unlikely(ret < 0))
+		return ret;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", value);
+}
+
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, ltc2990_show_value, NULL,
+			  LTC2990_TINT_MSB);
+static SENSOR_DEVICE_ATTR(curr1_input, S_IRUGO, ltc2990_show_value, NULL,
+			  LTC2990_V1_MSB);
+static SENSOR_DEVICE_ATTR(curr2_input, S_IRUGO, ltc2990_show_value, NULL,
+			  LTC2990_V3_MSB);
+static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, ltc2990_show_value, NULL,
+			  LTC2990_VCC_MSB);
+
+static struct attribute *ltc2990_attrs[] = {
+	&sensor_dev_attr_temp1_input.dev_attr.attr,
+	&sensor_dev_attr_curr1_input.dev_attr.attr,
+	&sensor_dev_attr_curr2_input.dev_attr.attr,
+	&sensor_dev_attr_in0_input.dev_attr.attr,
+	NULL,
+};
+ATTRIBUTE_GROUPS(ltc2990);
+
+static int ltc2990_i2c_probe(struct i2c_client *i2c,
+			     const struct i2c_device_id *id)
+{
+	int ret;
+	struct device *hwmon_dev;
+
+	if (!i2c_check_functionality(i2c->adapter, I2C_FUNC_SMBUS_BYTE_DATA |
+				     I2C_FUNC_SMBUS_WORD_DATA))
+		return -ENODEV;
+
+	/* Setup continuous mode, current monitor */
+	ret = i2c_smbus_write_byte_data(i2c, LTC2990_CONTROL,
+					LTC2990_CONTROL_MEASURE_ALL |
+					LTC2990_CONTROL_MODE_CURRENT);
+	if (ret < 0) {
+		dev_err(&i2c->dev, "Error: Failed to set control mode.\n");
+		return ret;
+	}
+	/* Trigger once to start continuous conversion */
+	ret = i2c_smbus_write_byte_data(i2c, LTC2990_TRIGGER, 1);
+	if (ret < 0) {
+		dev_err(&i2c->dev, "Error: Failed to start acquisition.\n");
+		return ret;
+	}
+
+	hwmon_dev = devm_hwmon_device_register_with_groups(&i2c->dev,
+							   i2c->name,
+							   i2c,
+							   ltc2990_groups);
+
+	return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static const struct i2c_device_id ltc2990_i2c_id[] = {
+	{ "ltc2990", 0 },
+	{}
+};
+MODULE_DEVICE_TABLE(i2c, ltc2990_i2c_id);
+
+static struct i2c_driver ltc2990_i2c_driver = {
+	.driver = {
+		.name = "ltc2990",
+	},
+	.probe    = ltc2990_i2c_probe,
+	.id_table = ltc2990_i2c_id,
+};
+
+module_i2c_driver(ltc2990_i2c_driver);
+
+MODULE_DESCRIPTION("LTC2990 Sensor Driver");
+MODULE_AUTHOR("Topic Embedded Products");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hwmon/nsa320-hwmon.c b/drivers/hwmon/nsa320-hwmon.c
new file mode 100644
index 0000000..0517a2657
--- /dev/null
+++ b/drivers/hwmon/nsa320-hwmon.c
@@ -0,0 +1,215 @@
+/*
+ * drivers/hwmon/nsa320-hwmon.c
+ *
+ * ZyXEL NSA320 Media Servers
+ * hardware monitoring
+ *
+ * Copyright (C) 2016 Adam Baker <linux@baker-net.org.uk>
+ * based on a board file driver
+ * Copyright (C) 2012 Peter Schildmann <linux@schildmann.info>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License v2 as published by the
+ * Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+/* Tests for error return values rely upon this value being < 0x80 */
+#define MAGIC_NUMBER 0x55
+
+/*
+ * The Zyxel hwmon MCU is a Holtek HT46R065 that is factory programmed
+ * to perform temperature and fan speed monitoring. It is read by taking
+ * the active pin low. The 32 bit output word is then clocked onto the
+ * data line. The MSB of the data word is a magic nuber to indicate it
+ * has been read correctly, the next byte is the fan speed (in hundreds
+ * of RPM) and the last two bytes are the temperature (in tenths of a
+ * degree)
+ */
+
+struct nsa320_hwmon {
+	struct mutex		update_lock;	/* lock GPIO operations */
+	unsigned long		last_updated;	/* jiffies */
+	unsigned long		mcu_data;
+	struct gpio_desc	*act;
+	struct gpio_desc	*clk;
+	struct gpio_desc	*data;
+};
+
+enum nsa320_inputs {
+	NSA320_TEMP = 0,
+	NSA320_FAN = 1,
+};
+
+static const char * const nsa320_input_names[] = {
+	[NSA320_TEMP] = "System Temperature",
+	[NSA320_FAN] = "Chassis Fan",
+};
+
+/*
+ * Although this protocol looks similar to SPI the long delay
+ * between the active (aka chip select) signal and the shorter
+ * delay between clock pulses are needed for reliable operation.
+ * The delays provided are taken from the manufacturer kernel,
+ * testing suggest they probably incorporate a reasonable safety
+ * margin. (The single device tested became unreliable if the
+ * delay was reduced to 1/10th of this value.)
+ */
+static s32 nsa320_hwmon_update(struct device *dev)
+{
+	u32 mcu_data;
+	u32 mask;
+	struct nsa320_hwmon *hwmon = dev_get_drvdata(dev);
+
+	mutex_lock(&hwmon->update_lock);
+
+	mcu_data = hwmon->mcu_data;
+
+	if (time_after(jiffies, hwmon->last_updated + HZ) || mcu_data == 0) {
+		gpiod_set_value(hwmon->act, 1);
+		msleep(100);
+
+		mcu_data = 0;
+		for (mask = BIT(31); mask; mask >>= 1) {
+			gpiod_set_value(hwmon->clk, 0);
+			usleep_range(100, 200);
+			gpiod_set_value(hwmon->clk, 1);
+			usleep_range(100, 200);
+			if (gpiod_get_value(hwmon->data))
+				mcu_data |= mask;
+		}
+
+		gpiod_set_value(hwmon->act, 0);
+		dev_dbg(dev, "Read raw MCU data %08x\n", mcu_data);
+
+		if ((mcu_data >> 24) != MAGIC_NUMBER) {
+			dev_dbg(dev, "Read invalid MCU data %08x\n", mcu_data);
+			mcu_data = -EIO;
+		} else {
+			hwmon->mcu_data = mcu_data;
+			hwmon->last_updated = jiffies;
+		}
+	}
+
+	mutex_unlock(&hwmon->update_lock);
+
+	return mcu_data;
+}
+
+static ssize_t show_label(struct device *dev,
+			  struct device_attribute *attr, char *buf)
+{
+	int channel = to_sensor_dev_attr(attr)->index;
+
+	return sprintf(buf, "%s\n", nsa320_input_names[channel]);
+}
+
+static ssize_t show_temp(struct device *dev, struct device_attribute *attr,
+			 char *buf)
+{
+	s32 mcu_data = nsa320_hwmon_update(dev);
+
+	if (mcu_data < 0)
+		return mcu_data;
+
+	return sprintf(buf, "%d\n", (mcu_data & 0xffff) * 100);
+}
+
+static ssize_t show_fan(struct device *dev, struct device_attribute *attr,
+			char *buf)
+{
+	s32 mcu_data = nsa320_hwmon_update(dev);
+
+	if (mcu_data < 0)
+		return mcu_data;
+
+	return sprintf(buf, "%d\n", ((mcu_data & 0xff0000) >> 16) * 100);
+}
+
+static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, show_label, NULL, NSA320_TEMP);
+static DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL);
+static SENSOR_DEVICE_ATTR(fan1_label, S_IRUGO, show_label, NULL, NSA320_FAN);
+static DEVICE_ATTR(fan1_input, S_IRUGO, show_fan, NULL);
+
+static struct attribute *nsa320_attrs[] = {
+	&sensor_dev_attr_temp1_label.dev_attr.attr,
+	&dev_attr_temp1_input.attr,
+	&sensor_dev_attr_fan1_label.dev_attr.attr,
+	&dev_attr_fan1_input.attr,
+	NULL
+};
+
+ATTRIBUTE_GROUPS(nsa320);
+
+static const struct of_device_id of_nsa320_hwmon_match[] = {
+	{ .compatible = "zyxel,nsa320-mcu", },
+	{ },
+};
+
+static int nsa320_hwmon_probe(struct platform_device *pdev)
+{
+	struct nsa320_hwmon	*hwmon;
+	struct device		*classdev;
+
+	hwmon = devm_kzalloc(&pdev->dev, sizeof(*hwmon), GFP_KERNEL);
+	if (!hwmon)
+		return -ENOMEM;
+
+	/* Look up the GPIO pins to use */
+	hwmon->act = devm_gpiod_get(&pdev->dev, "act", GPIOD_OUT_LOW);
+	if (IS_ERR(hwmon->act))
+		return PTR_ERR(hwmon->act);
+
+	hwmon->clk = devm_gpiod_get(&pdev->dev, "clk", GPIOD_OUT_HIGH);
+	if (IS_ERR(hwmon->clk))
+		return PTR_ERR(hwmon->clk);
+
+	hwmon->data = devm_gpiod_get(&pdev->dev, "data", GPIOD_IN);
+	if (IS_ERR(hwmon->data))
+		return PTR_ERR(hwmon->data);
+
+	mutex_init(&hwmon->update_lock);
+
+	classdev = devm_hwmon_device_register_with_groups(&pdev->dev,
+					"nsa320", hwmon, nsa320_groups);
+
+	return PTR_ERR_OR_ZERO(classdev);
+
+}
+
+/* All allocations use devres so remove() is not needed. */
+
+static struct platform_driver nsa320_hwmon_driver = {
+	.probe = nsa320_hwmon_probe,
+	.driver = {
+		.name = "nsa320-hwmon",
+		.of_match_table = of_match_ptr(of_nsa320_hwmon_match),
+	},
+};
+
+module_platform_driver(nsa320_hwmon_driver);
+
+MODULE_DEVICE_TABLE(of, of_nsa320_hwmon_match);
+MODULE_AUTHOR("Peter Schildmann <linux@schildmann.info>");
+MODULE_AUTHOR("Adam Baker <linux@baker-net.org.uk>");
+MODULE_DESCRIPTION("NSA320 Hardware Monitoring");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:nsa320-hwmon");
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
index feed306..faa6e8d 100644
--- a/drivers/hwmon/ntc_thermistor.c
+++ b/drivers/hwmon/ntc_thermistor.c
@@ -54,6 +54,7 @@
 	{ "ncp15wl333", TYPE_NCPXXWL333 },
 	{ "b57330v2103", TYPE_B57330V2103},
 	{ "ncp03wf104", TYPE_NCPXXWF104 },
+	{ "ncp15xh103", TYPE_NCPXXXH103 },
 	{ },
 };
 
@@ -173,6 +174,43 @@
 	{ .temp_c	= 125, .ohm	= 2522 },
 };
 
+static const struct ntc_compensation ncpXXxh103[] = {
+	{ .temp_c	= -40, .ohm	= 247565 },
+	{ .temp_c	= -35, .ohm	= 181742 },
+	{ .temp_c	= -30, .ohm	= 135128 },
+	{ .temp_c	= -25, .ohm	= 101678 },
+	{ .temp_c	= -20, .ohm	= 77373 },
+	{ .temp_c	= -15, .ohm	= 59504 },
+	{ .temp_c	= -10, .ohm	= 46222 },
+	{ .temp_c	= -5, .ohm	= 36244 },
+	{ .temp_c	= 0, .ohm	= 28674 },
+	{ .temp_c	= 5, .ohm	= 22878 },
+	{ .temp_c	= 10, .ohm	= 18399 },
+	{ .temp_c	= 15, .ohm	= 14910 },
+	{ .temp_c	= 20, .ohm	= 12169 },
+	{ .temp_c	= 25, .ohm	= 10000 },
+	{ .temp_c	= 30, .ohm	= 8271 },
+	{ .temp_c	= 35, .ohm	= 6883 },
+	{ .temp_c	= 40, .ohm	= 5762 },
+	{ .temp_c	= 45, .ohm	= 4851 },
+	{ .temp_c	= 50, .ohm	= 4105 },
+	{ .temp_c	= 55, .ohm	= 3492 },
+	{ .temp_c	= 60, .ohm	= 2985 },
+	{ .temp_c	= 65, .ohm	= 2563 },
+	{ .temp_c	= 70, .ohm	= 2211 },
+	{ .temp_c	= 75, .ohm	= 1915 },
+	{ .temp_c	= 80, .ohm	= 1666 },
+	{ .temp_c	= 85, .ohm	= 1454 },
+	{ .temp_c	= 90, .ohm	= 1275 },
+	{ .temp_c	= 95, .ohm	= 1121 },
+	{ .temp_c	= 100, .ohm	= 990 },
+	{ .temp_c	= 105, .ohm	= 876 },
+	{ .temp_c	= 110, .ohm	= 779 },
+	{ .temp_c	= 115, .ohm	= 694 },
+	{ .temp_c	= 120, .ohm	= 620 },
+	{ .temp_c	= 125, .ohm	= 556 },
+};
+
 /*
  * The following compensation table is from the specification of EPCOS NTC
  * Thermistors Datasheet
@@ -260,6 +298,8 @@
 		.data = &ntc_thermistor_id[5]},
 	{ .compatible = "murata,ncp03wf104",
 		.data = &ntc_thermistor_id[6] },
+	{ .compatible = "murata,ncp15xh103",
+		.data = &ntc_thermistor_id[7] },
 
 	/* Usage of vendor name "ntc" is deprecated */
 	{ .compatible = "ntc,ncp15wb473",
@@ -609,6 +649,10 @@
 		data->comp = ncpXXwf104;
 		data->n_comp = ARRAY_SIZE(ncpXXwf104);
 		break;
+	case TYPE_NCPXXXH103:
+		data->comp = ncpXXxh103;
+		data->n_comp = ARRAY_SIZE(ncpXXxh103);
+		break;
 	default:
 		dev_err(&pdev->dev, "Unknown device type: %lu(%s)\n",
 				pdev_id->driver_data, pdev_id->name);
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
index 7e5cc3d..054d3d8 100644
--- a/drivers/hwmon/pmbus/Kconfig
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -31,8 +31,8 @@
 	default n
 	help
 	  If you say yes here you get hardware monitoring support for Analog
-	  Devices ADM1075, ADM1275, ADM1276, ADM1293, and ADM1294 Hot-Swap
-	  Controller and Digital Power Monitors.
+	  Devices ADM1075, ADM1275, ADM1276, ADM1278, ADM1293, and ADM1294
+	  Hot-Swap Controller and Digital Power Monitors.
 
 	  This driver can also be built as a module. If so, the module will
 	  be called adm1275.
diff --git a/drivers/hwmon/pmbus/adm1275.c b/drivers/hwmon/pmbus/adm1275.c
index 188af4c..3baa4f4a 100644
--- a/drivers/hwmon/pmbus/adm1275.c
+++ b/drivers/hwmon/pmbus/adm1275.c
@@ -24,7 +24,7 @@
 #include <linux/bitops.h>
 #include "pmbus.h"
 
-enum chips { adm1075, adm1275, adm1276, adm1293, adm1294 };
+enum chips { adm1075, adm1275, adm1276, adm1278, adm1293, adm1294 };
 
 #define ADM1275_MFR_STATUS_IOUT_WARN2	BIT(0)
 #define ADM1293_MFR_STATUS_VAUX_UV_WARN	BIT(5)
@@ -41,6 +41,10 @@
 #define ADM1075_IRANGE_25		BIT(3)
 #define ADM1075_IRANGE_MASK		(BIT(3) | BIT(4))
 
+#define ADM1278_TEMP1_EN		BIT(3)
+#define ADM1278_VIN_EN			BIT(2)
+#define ADM1278_VOUT_EN			BIT(1)
+
 #define ADM1293_IRANGE_25		0
 #define ADM1293_IRANGE_50		BIT(6)
 #define ADM1293_IRANGE_100		BIT(7)
@@ -54,6 +58,7 @@
 
 #define ADM1293_VAUX_EN			BIT(1)
 
+#define ADM1278_PEAK_TEMP		0xd7
 #define ADM1275_IOUT_WARN2_LIMIT	0xd7
 #define ADM1275_DEVICE_CONFIG		0xd8
 
@@ -80,6 +85,7 @@
 	bool have_iout_min;
 	bool have_pin_min;
 	bool have_pin_max;
+	bool have_temp_max;
 	struct pmbus_driver_info info;
 };
 
@@ -113,6 +119,13 @@
 	[4] = { 2115, 0, -1 },		/* power, vrange not set */
 };
 
+static const struct coefficients adm1278_coefficients[] = {
+	[0] = { 19599, 0, -2 },		/* voltage */
+	[1] = { 800, 20475, -1 },	/* current */
+	[2] = { 6123, 0, -2 },		/* power */
+	[3] = { 42, 31880, -1 },	/* temperature */
+};
+
 static const struct coefficients adm1293_coefficients[] = {
 	[0] = { 3333, -1, 0 },		/* voltage, vrange 1.2V */
 	[1] = { 5552, -5, -1 },		/* voltage, vrange 7.4V */
@@ -196,6 +209,11 @@
 			return -ENXIO;
 		ret = pmbus_read_word_data(client, 0, ADM1276_PEAK_PIN);
 		break;
+	case PMBUS_VIRT_READ_TEMP_MAX:
+		if (!data->have_temp_max)
+			return -ENXIO;
+		ret = pmbus_read_word_data(client, 0, ADM1278_PEAK_TEMP);
+		break;
 	case PMBUS_VIRT_RESET_IOUT_HISTORY:
 	case PMBUS_VIRT_RESET_VOUT_HISTORY:
 	case PMBUS_VIRT_RESET_VIN_HISTORY:
@@ -204,6 +222,10 @@
 		if (!data->have_pin_max)
 			return -ENXIO;
 		break;
+	case PMBUS_VIRT_RESET_TEMP_HISTORY:
+		if (!data->have_temp_max)
+			return -ENXIO;
+		break;
 	default:
 		ret = -ENODATA;
 		break;
@@ -245,6 +267,9 @@
 			ret = pmbus_write_word_data(client, 0,
 						    ADM1293_PIN_MIN, 0);
 		break;
+	case PMBUS_VIRT_RESET_TEMP_HISTORY:
+		ret = pmbus_write_word_data(client, 0, ADM1278_PEAK_TEMP, 0);
+		break;
 	default:
 		ret = -ENODATA;
 		break;
@@ -312,6 +337,7 @@
 	{ "adm1075", adm1075 },
 	{ "adm1275", adm1275 },
 	{ "adm1276", adm1276 },
+	{ "adm1278", adm1278 },
 	{ "adm1293", adm1293 },
 	{ "adm1294", adm1294 },
 	{ }
@@ -329,6 +355,7 @@
 	const struct i2c_device_id *mid;
 	const struct coefficients *coefficients;
 	int vindex = -1, voindex = -1, cindex = -1, pindex = -1;
+	int tindex = -1;
 
 	if (!i2c_check_functionality(client->adapter,
 				     I2C_FUNC_SMBUS_READ_BYTE_DATA
@@ -386,6 +413,7 @@
 	info->format[PSC_VOLTAGE_OUT] = direct;
 	info->format[PSC_CURRENT_OUT] = direct;
 	info->format[PSC_POWER] = direct;
+	info->format[PSC_TEMPERATURE] = direct;
 	info->func[0] = PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT;
 
 	info->read_word_data = adm1275_read_word_data;
@@ -460,6 +488,27 @@
 			info->func[0] |=
 			  PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT;
 		break;
+	case adm1278:
+		data->have_vout = true;
+		data->have_pin_max = true;
+		data->have_temp_max = true;
+
+		coefficients = adm1278_coefficients;
+		vindex = 0;
+		cindex = 1;
+		pindex = 2;
+		tindex = 3;
+
+		info->func[0] |= PMBUS_HAVE_PIN | PMBUS_HAVE_STATUS_INPUT;
+		if (config & ADM1278_TEMP1_EN)
+			info->func[0] |=
+				PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP;
+		if (config & ADM1278_VIN_EN)
+			info->func[0] |= PMBUS_HAVE_VIN;
+		if (config & ADM1278_VOUT_EN)
+			info->func[0] |=
+				PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT;
+		break;
 	case adm1293:
 	case adm1294:
 		data->have_iout_min = true;
@@ -537,6 +586,11 @@
 		info->b[PSC_POWER] = coefficients[pindex].b;
 		info->R[PSC_POWER] = coefficients[pindex].R;
 	}
+	if (tindex >= 0) {
+		info->m[PSC_TEMPERATURE] = coefficients[tindex].m;
+		info->b[PSC_TEMPERATURE] = coefficients[tindex].b;
+		info->R[PSC_TEMPERATURE] = coefficients[tindex].R;
+	}
 
 	return pmbus_do_probe(client, id, info);
 }
diff --git a/drivers/hwmon/vexpress.c b/drivers/hwmon/vexpress-hwmon.c
similarity index 100%
rename from drivers/hwmon/vexpress.c
rename to drivers/hwmon/vexpress-hwmon.c
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index fb50911..7e8c441 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -60,6 +60,17 @@
 	  The maximum number of VICs available in the system, for
 	  power management.
 
+config ARMADA_370_XP_IRQ
+	bool
+	select GENERIC_IRQ_CHIP
+	select PCI_MSI_IRQ_DOMAIN if PCI_MSI
+
+config ALPINE_MSI
+	bool
+	depends on PCI && PCI_MSI
+	select GENERIC_IRQ_CHIP
+	select PCI_MSI_IRQ_DOMAIN
+
 config ATMEL_AIC_IRQ
 	bool
 	select GENERIC_IRQ_CHIP
@@ -78,6 +89,11 @@
 	bool
 	select IRQ_DOMAIN
 
+config BCM6345_L1_IRQ
+	bool
+	select GENERIC_IRQ_CHIP
+	select IRQ_DOMAIN
+
 config BCM7038_L1_IRQ
 	bool
 	select GENERIC_IRQ_CHIP
@@ -151,6 +167,11 @@
 	help
 	  Enables SysCfg Controlled IRQs on STi based platforms.
 
+config TANGO_IRQ
+	bool
+	select IRQ_DOMAIN
+	select GENERIC_IRQ_CHIP
+
 config TB10X_IRQC
 	bool
 	select IRQ_DOMAIN
@@ -160,6 +181,7 @@
 	tristate "TS-4800 IRQ controller"
 	select IRQ_DOMAIN
 	depends on HAS_IOMEM
+	depends on SOC_IMX51 || COMPILE_TEST
 	help
 	  Support for the TS-4800 FPGA IRQ controller
 
@@ -193,6 +215,8 @@
 
 config MIPS_GIC
 	bool
+	select GENERIC_IRQ_IPI
+	select IRQ_DOMAIN_HIERARCHY
 	select MIPS_CM
 
 config INGENIC_IRQ
@@ -218,3 +242,7 @@
 	def_bool y if MACH_ASM9260 || ARCH_MXS
 	select IRQ_DOMAIN
 	select STMP_DEVICE
+
+config MVEBU_ODMI
+	bool
+	select GENERIC_MSI_IRQ_DOMAIN
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index 18caacb..b03cfcb 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -1,11 +1,13 @@
 obj-$(CONFIG_IRQCHIP)			+= irqchip.o
 
+obj-$(CONFIG_ALPINE_MSI)		+= irq-alpine-msi.o
+obj-$(CONFIG_ATH79)			+= irq-ath79-cpu.o
+obj-$(CONFIG_ATH79)			+= irq-ath79-misc.o
 obj-$(CONFIG_ARCH_BCM2835)		+= irq-bcm2835.o
 obj-$(CONFIG_ARCH_BCM2835)		+= irq-bcm2836.o
 obj-$(CONFIG_ARCH_EXYNOS)		+= exynos-combiner.o
 obj-$(CONFIG_ARCH_HIP04)		+= irq-hip04.o
 obj-$(CONFIG_ARCH_MMP)			+= irq-mmp.o
-obj-$(CONFIG_ARCH_MVEBU)		+= irq-armada-370-xp.o
 obj-$(CONFIG_IRQ_MXS)			+= irq-mxs.o
 obj-$(CONFIG_ARCH_TEGRA)		+= irq-tegra.o
 obj-$(CONFIG_ARCH_S3C24XX)		+= irq-s3c24xx.o
@@ -28,6 +30,7 @@
 obj-$(CONFIG_HISILICON_IRQ_MBIGEN)	+= irq-mbigen.o
 obj-$(CONFIG_ARM_NVIC)			+= irq-nvic.o
 obj-$(CONFIG_ARM_VIC)			+= irq-vic.o
+obj-$(CONFIG_ARMADA_370_XP_IRQ)		+= irq-armada-370-xp.o
 obj-$(CONFIG_ATMEL_AIC_IRQ)		+= irq-atmel-aic-common.o irq-atmel-aic.o
 obj-$(CONFIG_ATMEL_AIC5_IRQ)	+= irq-atmel-aic-common.o irq-atmel-aic5.o
 obj-$(CONFIG_I8259)			+= irq-i8259.o
@@ -40,12 +43,14 @@
 obj-$(CONFIG_ARCH_NSPIRE)		+= irq-zevio.o
 obj-$(CONFIG_ARCH_VT8500)		+= irq-vt8500.o
 obj-$(CONFIG_ST_IRQCHIP)		+= irq-st.o
+obj-$(CONFIG_TANGO_IRQ)			+= irq-tango.o
 obj-$(CONFIG_TB10X_IRQC)		+= irq-tb10x.o
 obj-$(CONFIG_TS4800_IRQ)		+= irq-ts4800.o
 obj-$(CONFIG_XTENSA)			+= irq-xtensa-pic.o
 obj-$(CONFIG_XTENSA_MX)			+= irq-xtensa-mx.o
 obj-$(CONFIG_IRQ_CROSSBAR)		+= irq-crossbar.o
 obj-$(CONFIG_SOC_VF610)			+= irq-vf610-mscm-ir.o
+obj-$(CONFIG_BCM6345_L1_IRQ)		+= irq-bcm6345-l1.o
 obj-$(CONFIG_BCM7038_L1_IRQ)		+= irq-bcm7038-l1.o
 obj-$(CONFIG_BCM7120_L2_IRQ)		+= irq-bcm7120-l2.o
 obj-$(CONFIG_BRCMSTB_L2_IRQ)		+= irq-brcmstb-l2.o
@@ -59,3 +64,4 @@
 obj-$(CONFIG_INGENIC_IRQ)		+= irq-ingenic.o
 obj-$(CONFIG_IMX_GPCV2)			+= irq-imx-gpcv2.o
 obj-$(CONFIG_PIC32_EVIC)		+= irq-pic32-evic.o
+obj-$(CONFIG_MVEBU_ODMI)		+= irq-mvebu-odmi.o
diff --git a/drivers/irqchip/irq-alpine-msi.c b/drivers/irqchip/irq-alpine-msi.c
new file mode 100644
index 0000000..2538425
--- /dev/null
+++ b/drivers/irqchip/irq-alpine-msi.c
@@ -0,0 +1,293 @@
+/*
+ * Annapurna Labs MSIX support services
+ *
+ * Copyright (C) 2016, Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Antoine Tenart <antoine.tenart@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/irqchip.h>
+#include <linux/irqchip/arm-gic.h>
+#include <linux/msi.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_pci.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+
+#include <asm/irq.h>
+#include <asm-generic/msi.h>
+
+/* MSIX message address format: local GIC target */
+#define ALPINE_MSIX_SPI_TARGET_CLUSTER0		BIT(16)
+
+struct alpine_msix_data {
+	spinlock_t msi_map_lock;
+	phys_addr_t addr;
+	u32 spi_first;		/* The SGI number that MSIs start */
+	u32 num_spis;		/* The number of SGIs for MSIs */
+	unsigned long *msi_map;
+};
+
+static void alpine_msix_mask_msi_irq(struct irq_data *d)
+{
+	pci_msi_mask_irq(d);
+	irq_chip_mask_parent(d);
+}
+
+static void alpine_msix_unmask_msi_irq(struct irq_data *d)
+{
+	pci_msi_unmask_irq(d);
+	irq_chip_unmask_parent(d);
+}
+
+static struct irq_chip alpine_msix_irq_chip = {
+	.name			= "MSIx",
+	.irq_mask		= alpine_msix_mask_msi_irq,
+	.irq_unmask		= alpine_msix_unmask_msi_irq,
+	.irq_eoi		= irq_chip_eoi_parent,
+	.irq_set_affinity	= irq_chip_set_affinity_parent,
+};
+
+static int alpine_msix_allocate_sgi(struct alpine_msix_data *priv, int num_req)
+{
+	int first;
+
+	spin_lock(&priv->msi_map_lock);
+
+	first = bitmap_find_next_zero_area(priv->msi_map, priv->num_spis, 0,
+					   num_req, 0);
+	if (first >= priv->num_spis) {
+		spin_unlock(&priv->msi_map_lock);
+		return -ENOSPC;
+	}
+
+	bitmap_set(priv->msi_map, first, num_req);
+
+	spin_unlock(&priv->msi_map_lock);
+
+	return priv->spi_first + first;
+}
+
+static void alpine_msix_free_sgi(struct alpine_msix_data *priv, unsigned sgi,
+				 int num_req)
+{
+	int first = sgi - priv->spi_first;
+
+	spin_lock(&priv->msi_map_lock);
+
+	bitmap_clear(priv->msi_map, first, num_req);
+
+	spin_unlock(&priv->msi_map_lock);
+}
+
+static void alpine_msix_compose_msi_msg(struct irq_data *data,
+					struct msi_msg *msg)
+{
+	struct alpine_msix_data *priv = irq_data_get_irq_chip_data(data);
+	phys_addr_t msg_addr = priv->addr;
+
+	msg_addr |= (data->hwirq << 3);
+
+	msg->address_hi = upper_32_bits(msg_addr);
+	msg->address_lo = lower_32_bits(msg_addr);
+	msg->data = 0;
+}
+
+static struct msi_domain_info alpine_msix_domain_info = {
+	.flags	= MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+		  MSI_FLAG_PCI_MSIX,
+	.chip	= &alpine_msix_irq_chip,
+};
+
+static struct irq_chip middle_irq_chip = {
+	.name			= "alpine_msix_middle",
+	.irq_mask		= irq_chip_mask_parent,
+	.irq_unmask		= irq_chip_unmask_parent,
+	.irq_eoi		= irq_chip_eoi_parent,
+	.irq_set_affinity	= irq_chip_set_affinity_parent,
+	.irq_compose_msi_msg	= alpine_msix_compose_msi_msg,
+};
+
+static int alpine_msix_gic_domain_alloc(struct irq_domain *domain,
+					unsigned int virq, int sgi)
+{
+	struct irq_fwspec fwspec;
+	struct irq_data *d;
+	int ret;
+
+	if (!is_of_node(domain->parent->fwnode))
+		return -EINVAL;
+
+	fwspec.fwnode = domain->parent->fwnode;
+	fwspec.param_count = 3;
+	fwspec.param[0] = 0;
+	fwspec.param[1] = sgi;
+	fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
+
+	ret = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
+	if (ret)
+		return ret;
+
+	d = irq_domain_get_irq_data(domain->parent, virq);
+	d->chip->irq_set_type(d, IRQ_TYPE_EDGE_RISING);
+
+	return 0;
+}
+
+static int alpine_msix_middle_domain_alloc(struct irq_domain *domain,
+					   unsigned int virq,
+					   unsigned int nr_irqs, void *args)
+{
+	struct alpine_msix_data *priv = domain->host_data;
+	int sgi, err, i;
+
+	sgi = alpine_msix_allocate_sgi(priv, nr_irqs);
+	if (sgi < 0)
+		return sgi;
+
+	for (i = 0; i < nr_irqs; i++) {
+		err = alpine_msix_gic_domain_alloc(domain, virq + i, sgi + i);
+		if (err)
+			goto err_sgi;
+
+		irq_domain_set_hwirq_and_chip(domain, virq + i, sgi + i,
+					      &middle_irq_chip, priv);
+	}
+
+	return 0;
+
+err_sgi:
+	while (--i >= 0)
+		irq_domain_free_irqs_parent(domain, virq, i);
+	alpine_msix_free_sgi(priv, sgi, nr_irqs);
+	return err;
+}
+
+static void alpine_msix_middle_domain_free(struct irq_domain *domain,
+					   unsigned int virq,
+					   unsigned int nr_irqs)
+{
+	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+	struct alpine_msix_data *priv = irq_data_get_irq_chip_data(d);
+
+	irq_domain_free_irqs_parent(domain, virq, nr_irqs);
+	alpine_msix_free_sgi(priv, d->hwirq, nr_irqs);
+}
+
+static const struct irq_domain_ops alpine_msix_middle_domain_ops = {
+	.alloc	= alpine_msix_middle_domain_alloc,
+	.free	= alpine_msix_middle_domain_free,
+};
+
+static int alpine_msix_init_domains(struct alpine_msix_data *priv,
+				    struct device_node *node)
+{
+	struct irq_domain *middle_domain, *msi_domain, *gic_domain;
+	struct device_node *gic_node;
+
+	gic_node = of_irq_find_parent(node);
+	if (!gic_node) {
+		pr_err("Failed to find the GIC node\n");
+		return -ENODEV;
+	}
+
+	gic_domain = irq_find_host(gic_node);
+	if (!gic_domain) {
+		pr_err("Failed to find the GIC domain\n");
+		return -ENXIO;
+	}
+
+	middle_domain = irq_domain_add_tree(NULL,
+					    &alpine_msix_middle_domain_ops,
+					    priv);
+	if (!middle_domain) {
+		pr_err("Failed to create the MSIX middle domain\n");
+		return -ENOMEM;
+	}
+
+	middle_domain->parent = gic_domain;
+
+	msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(node),
+					       &alpine_msix_domain_info,
+					       middle_domain);
+	if (!msi_domain) {
+		pr_err("Failed to create MSI domain\n");
+		irq_domain_remove(middle_domain);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static int alpine_msix_init(struct device_node *node,
+			    struct device_node *parent)
+{
+	struct alpine_msix_data *priv;
+	struct resource res;
+	int ret;
+
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	spin_lock_init(&priv->msi_map_lock);
+
+	ret = of_address_to_resource(node, 0, &res);
+	if (ret) {
+		pr_err("Failed to allocate resource\n");
+		goto err_priv;
+	}
+
+	/*
+	 * The 20 least significant bits of addr provide direct information
+	 * regarding the interrupt destination.
+	 *
+	 * To select the primary GIC as the target GIC, bits [18:17] must be set
+	 * to 0x0. In this case, bit 16 (SPI_TARGET_CLUSTER0) must be set.
+	 */
+	priv->addr = res.start & GENMASK_ULL(63,20);
+	priv->addr |= ALPINE_MSIX_SPI_TARGET_CLUSTER0;
+
+	if (of_property_read_u32(node, "al,msi-base-spi", &priv->spi_first)) {
+		pr_err("Unable to parse MSI base\n");
+		ret = -EINVAL;
+		goto err_priv;
+	}
+
+	if (of_property_read_u32(node, "al,msi-num-spis", &priv->num_spis)) {
+		pr_err("Unable to parse MSI numbers\n");
+		ret = -EINVAL;
+		goto err_priv;
+	}
+
+	priv->msi_map = kzalloc(sizeof(*priv->msi_map) * BITS_TO_LONGS(priv->num_spis),
+				GFP_KERNEL);
+	if (!priv->msi_map) {
+		ret = -ENOMEM;
+		goto err_priv;
+	}
+
+	pr_debug("Registering %d msixs, starting at %d\n",
+		 priv->num_spis, priv->spi_first);
+
+	ret = alpine_msix_init_domains(priv, node);
+	if (ret)
+		goto err_map;
+
+	return 0;
+
+err_map:
+	kfree(priv->msi_map);
+err_priv:
+	kfree(priv);
+	return ret;
+}
+IRQCHIP_DECLARE(alpine_msix, "al,alpine-msix", alpine_msix_init);
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
index 3f3a8c3..e7dc6cb 100644
--- a/drivers/irqchip/irq-armada-370-xp.c
+++ b/drivers/irqchip/irq-armada-370-xp.c
@@ -71,6 +71,7 @@
 static int parent_irq;
 #ifdef CONFIG_PCI_MSI
 static struct irq_domain *armada_370_xp_msi_domain;
+static struct irq_domain *armada_370_xp_msi_inner_domain;
 static DECLARE_BITMAP(msi_used, PCI_MSI_DOORBELL_NR);
 static DEFINE_MUTEX(msi_used_lock);
 static phys_addr_t msi_doorbell_addr;
@@ -115,127 +116,102 @@
 
 #ifdef CONFIG_PCI_MSI
 
-static int armada_370_xp_alloc_msi(void)
-{
-	int hwirq;
-
-	mutex_lock(&msi_used_lock);
-	hwirq = find_first_zero_bit(&msi_used, PCI_MSI_DOORBELL_NR);
-	if (hwirq >= PCI_MSI_DOORBELL_NR)
-		hwirq = -ENOSPC;
-	else
-		set_bit(hwirq, msi_used);
-	mutex_unlock(&msi_used_lock);
-
-	return hwirq;
-}
-
-static void armada_370_xp_free_msi(int hwirq)
-{
-	mutex_lock(&msi_used_lock);
-	if (!test_bit(hwirq, msi_used))
-		pr_err("trying to free unused MSI#%d\n", hwirq);
-	else
-		clear_bit(hwirq, msi_used);
-	mutex_unlock(&msi_used_lock);
-}
-
-static int armada_370_xp_setup_msi_irq(struct msi_controller *chip,
-				       struct pci_dev *pdev,
-				       struct msi_desc *desc)
-{
-	struct msi_msg msg;
-	int virq, hwirq;
-
-	/* We support MSI, but not MSI-X */
-	if (desc->msi_attrib.is_msix)
-		return -EINVAL;
-
-	hwirq = armada_370_xp_alloc_msi();
-	if (hwirq < 0)
-		return hwirq;
-
-	virq = irq_create_mapping(armada_370_xp_msi_domain, hwirq);
-	if (!virq) {
-		armada_370_xp_free_msi(hwirq);
-		return -EINVAL;
-	}
-
-	irq_set_msi_desc(virq, desc);
-
-	msg.address_lo = msi_doorbell_addr;
-	msg.address_hi = 0;
-	msg.data = 0xf00 | (hwirq + 16);
-
-	pci_write_msi_msg(virq, &msg);
-	return 0;
-}
-
-static void armada_370_xp_teardown_msi_irq(struct msi_controller *chip,
-					   unsigned int irq)
-{
-	struct irq_data *d = irq_get_irq_data(irq);
-	unsigned long hwirq = d->hwirq;
-
-	irq_dispose_mapping(irq);
-	armada_370_xp_free_msi(hwirq);
-}
-
 static struct irq_chip armada_370_xp_msi_irq_chip = {
-	.name = "armada_370_xp_msi_irq",
-	.irq_enable = pci_msi_unmask_irq,
-	.irq_disable = pci_msi_mask_irq,
+	.name = "MPIC MSI",
 	.irq_mask = pci_msi_mask_irq,
 	.irq_unmask = pci_msi_unmask_irq,
 };
 
-static int armada_370_xp_msi_map(struct irq_domain *domain, unsigned int virq,
-				 irq_hw_number_t hw)
-{
-	irq_set_chip_and_handler(virq, &armada_370_xp_msi_irq_chip,
-				 handle_simple_irq);
+static struct msi_domain_info armada_370_xp_msi_domain_info = {
+	.flags	= (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+		   MSI_FLAG_MULTI_PCI_MSI),
+	.chip	= &armada_370_xp_msi_irq_chip,
+};
 
-	return 0;
+static void armada_370_xp_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
+{
+	msg->address_lo = lower_32_bits(msi_doorbell_addr);
+	msg->address_hi = upper_32_bits(msi_doorbell_addr);
+	msg->data = 0xf00 | (data->hwirq + PCI_MSI_DOORBELL_START);
 }
 
-static const struct irq_domain_ops armada_370_xp_msi_irq_ops = {
-	.map = armada_370_xp_msi_map,
+static int armada_370_xp_msi_set_affinity(struct irq_data *irq_data,
+					  const struct cpumask *mask, bool force)
+{
+	 return -EINVAL;
+}
+
+static struct irq_chip armada_370_xp_msi_bottom_irq_chip = {
+	.name			= "MPIC MSI",
+	.irq_compose_msi_msg	= armada_370_xp_compose_msi_msg,
+	.irq_set_affinity	= armada_370_xp_msi_set_affinity,
+};
+
+static int armada_370_xp_msi_alloc(struct irq_domain *domain, unsigned int virq,
+				   unsigned int nr_irqs, void *args)
+{
+	int hwirq, i;
+
+	mutex_lock(&msi_used_lock);
+
+	hwirq = bitmap_find_next_zero_area(msi_used, PCI_MSI_DOORBELL_NR,
+					   0, nr_irqs, 0);
+	if (hwirq >= PCI_MSI_DOORBELL_NR) {
+		mutex_unlock(&msi_used_lock);
+		return -ENOSPC;
+	}
+
+	bitmap_set(msi_used, hwirq, nr_irqs);
+	mutex_unlock(&msi_used_lock);
+
+	for (i = 0; i < nr_irqs; i++) {
+		irq_domain_set_info(domain, virq + i, hwirq + i,
+				    &armada_370_xp_msi_bottom_irq_chip,
+				    domain->host_data, handle_simple_irq,
+				    NULL, NULL);
+	}
+
+	return hwirq;
+}
+
+static void armada_370_xp_msi_free(struct irq_domain *domain,
+				   unsigned int virq, unsigned int nr_irqs)
+{
+	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+
+	mutex_lock(&msi_used_lock);
+	bitmap_clear(msi_used, d->hwirq, nr_irqs);
+	mutex_unlock(&msi_used_lock);
+}
+
+static const struct irq_domain_ops armada_370_xp_msi_domain_ops = {
+	.alloc	= armada_370_xp_msi_alloc,
+	.free	= armada_370_xp_msi_free,
 };
 
 static int armada_370_xp_msi_init(struct device_node *node,
 				  phys_addr_t main_int_phys_base)
 {
-	struct msi_controller *msi_chip;
 	u32 reg;
-	int ret;
 
 	msi_doorbell_addr = main_int_phys_base +
 		ARMADA_370_XP_SW_TRIG_INT_OFFS;
 
-	msi_chip = kzalloc(sizeof(*msi_chip), GFP_KERNEL);
-	if (!msi_chip)
+	armada_370_xp_msi_inner_domain =
+		irq_domain_add_linear(NULL, PCI_MSI_DOORBELL_NR,
+				      &armada_370_xp_msi_domain_ops, NULL);
+	if (!armada_370_xp_msi_inner_domain)
 		return -ENOMEM;
 
-	msi_chip->setup_irq = armada_370_xp_setup_msi_irq;
-	msi_chip->teardown_irq = armada_370_xp_teardown_msi_irq;
-	msi_chip->of_node = node;
-
 	armada_370_xp_msi_domain =
-		irq_domain_add_linear(NULL, PCI_MSI_DOORBELL_NR,
-				      &armada_370_xp_msi_irq_ops,
-				      NULL);
+		pci_msi_create_irq_domain(of_node_to_fwnode(node),
+					  &armada_370_xp_msi_domain_info,
+					  armada_370_xp_msi_inner_domain);
 	if (!armada_370_xp_msi_domain) {
-		kfree(msi_chip);
+		irq_domain_remove(armada_370_xp_msi_inner_domain);
 		return -ENOMEM;
 	}
 
-	ret = of_pci_msi_chip_add(msi_chip);
-	if (ret < 0) {
-		irq_domain_remove(armada_370_xp_msi_domain);
-		kfree(msi_chip);
-		return ret;
-	}
-
 	reg = readl(per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS)
 		| PCI_MSI_DOORBELL_MASK;
 
@@ -280,7 +256,7 @@
 #endif
 
 static struct irq_chip armada_370_xp_irq_chip = {
-	.name		= "armada_370_xp_irq",
+	.name		= "MPIC",
 	.irq_mask       = armada_370_xp_irq_mask,
 	.irq_mask_ack   = armada_370_xp_irq_mask,
 	.irq_unmask     = armada_370_xp_irq_unmask,
@@ -427,12 +403,12 @@
 			continue;
 
 		if (is_chained) {
-			irq = irq_find_mapping(armada_370_xp_msi_domain,
-					       msinr - 16);
+			irq = irq_find_mapping(armada_370_xp_msi_inner_domain,
+					       msinr - PCI_MSI_DOORBELL_START);
 			generic_handle_irq(irq);
 		} else {
-			irq = msinr - 16;
-			handle_domain_irq(armada_370_xp_msi_domain,
+			irq = msinr - PCI_MSI_DOORBELL_START;
+			handle_domain_irq(armada_370_xp_msi_inner_domain,
 					  irq, regs);
 		}
 	}
@@ -604,8 +580,8 @@
 	armada_370_xp_mpic_domain =
 		irq_domain_add_linear(node, nr_irqs,
 				&armada_370_xp_mpic_irq_ops, NULL);
-
 	BUG_ON(!armada_370_xp_mpic_domain);
+	armada_370_xp_mpic_domain->bus_token = DOMAIN_BUS_WIRED;
 
 	/* Setup for the boot CPU */
 	armada_xp_mpic_perf_init();
diff --git a/drivers/irqchip/irq-ath79-cpu.c b/drivers/irqchip/irq-ath79-cpu.c
new file mode 100644
index 0000000..befe93c
--- /dev/null
+++ b/drivers/irqchip/irq-ath79-cpu.c
@@ -0,0 +1,97 @@
+/*
+ *  Atheros AR71xx/AR724x/AR913x specific interrupt handling
+ *
+ *  Copyright (C) 2015 Alban Bedel <albeu@free.fr>
+ *  Copyright (C) 2010-2011 Jaiganesh Narayanan <jnarayanan@atheros.com>
+ *  Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
+ *  Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ *
+ *  Parts of this file are based on Atheros' 2.6.15/2.6.31 BSP
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License version 2 as published
+ *  by the Free Software Foundation.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irqchip.h>
+#include <linux/of.h>
+
+#include <asm/irq_cpu.h>
+#include <asm/mach-ath79/ath79.h>
+
+/*
+ * The IP2/IP3 lines are tied to a PCI/WMAC/USB device. Drivers for
+ * these devices typically allocate coherent DMA memory, however the
+ * DMA controller may still have some unsynchronized data in the FIFO.
+ * Issue a flush in the handlers to ensure that the driver sees
+ * the update.
+ *
+ * This array map the interrupt lines to the DDR write buffer channels.
+ */
+
+static unsigned irq_wb_chan[8] = {
+	-1, -1, -1, -1, -1, -1, -1, -1,
+};
+
+asmlinkage void plat_irq_dispatch(void)
+{
+	unsigned long pending;
+	int irq;
+
+	pending = read_c0_status() & read_c0_cause() & ST0_IM;
+
+	if (!pending) {
+		spurious_interrupt();
+		return;
+	}
+
+	pending >>= CAUSEB_IP;
+	while (pending) {
+		irq = fls(pending) - 1;
+		if (irq < ARRAY_SIZE(irq_wb_chan) && irq_wb_chan[irq] != -1)
+			ath79_ddr_wb_flush(irq_wb_chan[irq]);
+		do_IRQ(MIPS_CPU_IRQ_BASE + irq);
+		pending &= ~BIT(irq);
+	}
+}
+
+static int __init ar79_cpu_intc_of_init(
+	struct device_node *node, struct device_node *parent)
+{
+	int err, i, count;
+
+	/* Fill the irq_wb_chan table */
+	count = of_count_phandle_with_args(
+		node, "qca,ddr-wb-channels", "#qca,ddr-wb-channel-cells");
+
+	for (i = 0; i < count; i++) {
+		struct of_phandle_args args;
+		u32 irq = i;
+
+		of_property_read_u32_index(
+			node, "qca,ddr-wb-channel-interrupts", i, &irq);
+		if (irq >= ARRAY_SIZE(irq_wb_chan))
+			continue;
+
+		err = of_parse_phandle_with_args(
+			node, "qca,ddr-wb-channels",
+			"#qca,ddr-wb-channel-cells",
+			i, &args);
+		if (err)
+			return err;
+
+		irq_wb_chan[irq] = args.args[0];
+	}
+
+	return mips_cpu_irq_of_init(node, parent);
+}
+IRQCHIP_DECLARE(ar79_cpu_intc, "qca,ar7100-cpu-intc",
+		ar79_cpu_intc_of_init);
+
+void __init ath79_cpu_irq_init(unsigned irq_wb_chan2, unsigned irq_wb_chan3)
+{
+	irq_wb_chan[2] = irq_wb_chan2;
+	irq_wb_chan[3] = irq_wb_chan3;
+	mips_cpu_irq_init();
+}
diff --git a/drivers/irqchip/irq-ath79-misc.c b/drivers/irqchip/irq-ath79-misc.c
new file mode 100644
index 0000000..aa72907
--- /dev/null
+++ b/drivers/irqchip/irq-ath79-misc.c
@@ -0,0 +1,189 @@
+/*
+ *  Atheros AR71xx/AR724x/AR913x MISC interrupt controller
+ *
+ *  Copyright (C) 2015 Alban Bedel <albeu@free.fr>
+ *  Copyright (C) 2010-2011 Jaiganesh Narayanan <jnarayanan@atheros.com>
+ *  Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
+ *  Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ *
+ *  Parts of this file are based on Atheros' 2.6.15/2.6.31 BSP
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License version 2 as published
+ *  by the Free Software Foundation.
+ */
+
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#define AR71XX_RESET_REG_MISC_INT_STATUS	0
+#define AR71XX_RESET_REG_MISC_INT_ENABLE	4
+
+#define ATH79_MISC_IRQ_COUNT			32
+
+static void ath79_misc_irq_handler(struct irq_desc *desc)
+{
+	struct irq_domain *domain = irq_desc_get_handler_data(desc);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+	void __iomem *base = domain->host_data;
+	u32 pending;
+
+	chained_irq_enter(chip, desc);
+
+	pending = __raw_readl(base + AR71XX_RESET_REG_MISC_INT_STATUS) &
+		  __raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE);
+
+	if (!pending) {
+		spurious_interrupt();
+		chained_irq_exit(chip, desc);
+		return;
+	}
+
+	while (pending) {
+		int bit = __ffs(pending);
+
+		generic_handle_irq(irq_linear_revmap(domain, bit));
+		pending &= ~BIT(bit);
+	}
+
+	chained_irq_exit(chip, desc);
+}
+
+static void ar71xx_misc_irq_unmask(struct irq_data *d)
+{
+	void __iomem *base = irq_data_get_irq_chip_data(d);
+	unsigned int irq = d->hwirq;
+	u32 t;
+
+	t = __raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE);
+	__raw_writel(t | BIT(irq), base + AR71XX_RESET_REG_MISC_INT_ENABLE);
+
+	/* flush write */
+	__raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE);
+}
+
+static void ar71xx_misc_irq_mask(struct irq_data *d)
+{
+	void __iomem *base = irq_data_get_irq_chip_data(d);
+	unsigned int irq = d->hwirq;
+	u32 t;
+
+	t = __raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE);
+	__raw_writel(t & ~BIT(irq), base + AR71XX_RESET_REG_MISC_INT_ENABLE);
+
+	/* flush write */
+	__raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE);
+}
+
+static void ar724x_misc_irq_ack(struct irq_data *d)
+{
+	void __iomem *base = irq_data_get_irq_chip_data(d);
+	unsigned int irq = d->hwirq;
+	u32 t;
+
+	t = __raw_readl(base + AR71XX_RESET_REG_MISC_INT_STATUS);
+	__raw_writel(t & ~BIT(irq), base + AR71XX_RESET_REG_MISC_INT_STATUS);
+
+	/* flush write */
+	__raw_readl(base + AR71XX_RESET_REG_MISC_INT_STATUS);
+}
+
+static struct irq_chip ath79_misc_irq_chip = {
+	.name		= "MISC",
+	.irq_unmask	= ar71xx_misc_irq_unmask,
+	.irq_mask	= ar71xx_misc_irq_mask,
+};
+
+static int misc_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
+{
+	irq_set_chip_and_handler(irq, &ath79_misc_irq_chip, handle_level_irq);
+	irq_set_chip_data(irq, d->host_data);
+	return 0;
+}
+
+static const struct irq_domain_ops misc_irq_domain_ops = {
+	.xlate = irq_domain_xlate_onecell,
+	.map = misc_map,
+};
+
+static void __init ath79_misc_intc_domain_init(
+	struct irq_domain *domain, int irq)
+{
+	void __iomem *base = domain->host_data;
+
+	/* Disable and clear all interrupts */
+	__raw_writel(0, base + AR71XX_RESET_REG_MISC_INT_ENABLE);
+	__raw_writel(0, base + AR71XX_RESET_REG_MISC_INT_STATUS);
+
+	irq_set_chained_handler_and_data(irq, ath79_misc_irq_handler, domain);
+}
+
+static int __init ath79_misc_intc_of_init(
+	struct device_node *node, struct device_node *parent)
+{
+	struct irq_domain *domain;
+	void __iomem *base;
+	int irq;
+
+	irq = irq_of_parse_and_map(node, 0);
+	if (!irq) {
+		pr_err("Failed to get MISC IRQ\n");
+		return -EINVAL;
+	}
+
+	base = of_iomap(node, 0);
+	if (!base) {
+		pr_err("Failed to get MISC IRQ registers\n");
+		return -ENOMEM;
+	}
+
+	domain = irq_domain_add_linear(node, ATH79_MISC_IRQ_COUNT,
+				&misc_irq_domain_ops, base);
+	if (!domain) {
+		pr_err("Failed to add MISC irqdomain\n");
+		return -EINVAL;
+	}
+
+	ath79_misc_intc_domain_init(domain, irq);
+	return 0;
+}
+
+static int __init ar7100_misc_intc_of_init(
+	struct device_node *node, struct device_node *parent)
+{
+	ath79_misc_irq_chip.irq_mask_ack = ar71xx_misc_irq_mask;
+	return ath79_misc_intc_of_init(node, parent);
+}
+
+IRQCHIP_DECLARE(ar7100_misc_intc, "qca,ar7100-misc-intc",
+		ar7100_misc_intc_of_init);
+
+static int __init ar7240_misc_intc_of_init(
+	struct device_node *node, struct device_node *parent)
+{
+	ath79_misc_irq_chip.irq_ack = ar724x_misc_irq_ack;
+	return ath79_misc_intc_of_init(node, parent);
+}
+
+IRQCHIP_DECLARE(ar7240_misc_intc, "qca,ar7240-misc-intc",
+		ar7240_misc_intc_of_init);
+
+void __init ath79_misc_irq_init(void __iomem *regs, int irq,
+				int irq_base, bool is_ar71xx)
+{
+	struct irq_domain *domain;
+
+	if (is_ar71xx)
+		ath79_misc_irq_chip.irq_mask_ack = ar71xx_misc_irq_mask;
+	else
+		ath79_misc_irq_chip.irq_ack = ar724x_misc_irq_ack;
+
+	domain = irq_domain_add_legacy(NULL, ATH79_MISC_IRQ_COUNT,
+			irq_base, 0, &misc_irq_domain_ops, regs);
+	if (!domain)
+		panic("Failed to create MISC irqdomain");
+
+	ath79_misc_intc_domain_init(domain, irq);
+}
diff --git a/drivers/irqchip/irq-atmel-aic-common.c b/drivers/irqchip/irq-atmel-aic-common.c
index 37199b9..28b26c8 100644
--- a/drivers/irqchip/irq-atmel-aic-common.c
+++ b/drivers/irqchip/irq-atmel-aic-common.c
@@ -80,16 +80,10 @@
 	return 0;
 }
 
-int aic_common_set_priority(int priority, unsigned *val)
+void aic_common_set_priority(int priority, unsigned *val)
 {
-	if (priority < AT91_AIC_IRQ_MIN_PRIORITY ||
-	    priority > AT91_AIC_IRQ_MAX_PRIORITY)
-		return -EINVAL;
-
 	*val &= ~AT91_AIC_PRIOR;
 	*val |= priority;
-
-	return 0;
 }
 
 int aic_common_irq_domain_xlate(struct irq_domain *d,
@@ -193,7 +187,7 @@
 	}
 }
 
-void __init aic_common_irq_fixup(const struct of_device_id *matches)
+static void __init aic_common_irq_fixup(const struct of_device_id *matches)
 {
 	struct device_node *root = of_find_node_by_path("/");
 	const struct of_device_id *match;
@@ -214,7 +208,8 @@
 
 struct irq_domain *__init aic_common_of_init(struct device_node *node,
 					     const struct irq_domain_ops *ops,
-					     const char *name, int nirqs)
+					     const char *name, int nirqs,
+					     const struct of_device_id *matches)
 {
 	struct irq_chip_generic *gc;
 	struct irq_domain *domain;
@@ -264,6 +259,7 @@
 	}
 
 	aic_common_ext_irq_of_init(domain);
+	aic_common_irq_fixup(matches);
 
 	return domain;
 
diff --git a/drivers/irqchip/irq-atmel-aic-common.h b/drivers/irqchip/irq-atmel-aic-common.h
index 603f0a9..af60376 100644
--- a/drivers/irqchip/irq-atmel-aic-common.h
+++ b/drivers/irqchip/irq-atmel-aic-common.h
@@ -19,7 +19,7 @@
 
 int aic_common_set_type(struct irq_data *d, unsigned type, unsigned *val);
 
-int aic_common_set_priority(int priority, unsigned *val);
+void aic_common_set_priority(int priority, unsigned *val);
 
 int aic_common_irq_domain_xlate(struct irq_domain *d,
 				struct device_node *ctrlr,
@@ -30,12 +30,11 @@
 
 struct irq_domain *__init aic_common_of_init(struct device_node *node,
 					     const struct irq_domain_ops *ops,
-					     const char *name, int nirqs);
+					     const char *name, int nirqs,
+					     const struct of_device_id *matches);
 
 void __init aic_common_rtc_irq_fixup(struct device_node *root);
 
 void __init aic_common_rtt_irq_fixup(struct device_node *root);
 
-void __init aic_common_irq_fixup(const struct of_device_id *matches);
-
 #endif /* __IRQ_ATMEL_AIC_COMMON_H */
diff --git a/drivers/irqchip/irq-atmel-aic.c b/drivers/irqchip/irq-atmel-aic.c
index 8a0c7f2..112e17c 100644
--- a/drivers/irqchip/irq-atmel-aic.c
+++ b/drivers/irqchip/irq-atmel-aic.c
@@ -196,9 +196,8 @@
 
 	irq_gc_lock(gc);
 	smr = irq_reg_readl(gc, AT91_AIC_SMR(*out_hwirq));
-	ret = aic_common_set_priority(intspec[2], &smr);
-	if (!ret)
-		irq_reg_writel(gc, smr, AT91_AIC_SMR(*out_hwirq));
+	aic_common_set_priority(intspec[2], &smr);
+	irq_reg_writel(gc, smr, AT91_AIC_SMR(*out_hwirq));
 	irq_gc_unlock(gc);
 
 	return ret;
@@ -248,12 +247,10 @@
 		return -EEXIST;
 
 	domain = aic_common_of_init(node, &aic_irq_ops, "atmel-aic",
-				    NR_AIC_IRQS);
+				    NR_AIC_IRQS, aic_irq_fixups);
 	if (IS_ERR(domain))
 		return PTR_ERR(domain);
 
-	aic_common_irq_fixup(aic_irq_fixups);
-
 	aic_domain = domain;
 	gc = irq_get_domain_generic_chip(domain, 0);
 
diff --git a/drivers/irqchip/irq-atmel-aic5.c b/drivers/irqchip/irq-atmel-aic5.c
index 62bb840..4f0d068 100644
--- a/drivers/irqchip/irq-atmel-aic5.c
+++ b/drivers/irqchip/irq-atmel-aic5.c
@@ -272,9 +272,8 @@
 	irq_gc_lock(bgc);
 	irq_reg_writel(bgc, *out_hwirq, AT91_AIC5_SSR);
 	smr = irq_reg_readl(bgc, AT91_AIC5_SMR);
-	ret = aic_common_set_priority(intspec[2], &smr);
-	if (!ret)
-		irq_reg_writel(bgc, intspec[2] | smr, AT91_AIC5_SMR);
+	aic_common_set_priority(intspec[2], &smr);
+	irq_reg_writel(bgc, smr, AT91_AIC5_SMR);
 	irq_gc_unlock(bgc);
 
 	return ret;
@@ -312,12 +311,10 @@
 		return -EEXIST;
 
 	domain = aic_common_of_init(node, &aic5_irq_ops, "atmel-aic5",
-				    nirqs);
+				    nirqs, aic5_irq_fixups);
 	if (IS_ERR(domain))
 		return PTR_ERR(domain);
 
-	aic_common_irq_fixup(aic5_irq_fixups);
-
 	aic5_domain = domain;
 	nchips = aic5_domain->revmap_size / 32;
 	for (i = 0; i < nchips; i++) {
diff --git a/drivers/irqchip/irq-bcm2836.c b/drivers/irqchip/irq-bcm2836.c
index 963065a..b6e950d 100644
--- a/drivers/irqchip/irq-bcm2836.c
+++ b/drivers/irqchip/irq-bcm2836.c
@@ -229,7 +229,6 @@
 	unsigned long secondary_startup_phys =
 		(unsigned long)virt_to_phys((void *)secondary_startup);
 
-	dsb();
 	writel(secondary_startup_phys,
 	       intc.base + LOCAL_MAILBOX3_SET0 + 16 * cpu);
 
diff --git a/drivers/irqchip/irq-bcm6345-l1.c b/drivers/irqchip/irq-bcm6345-l1.c
new file mode 100644
index 0000000..b844c89
--- /dev/null
+++ b/drivers/irqchip/irq-bcm6345-l1.c
@@ -0,0 +1,364 @@
+/*
+ * Broadcom BCM6345 style Level 1 interrupt controller driver
+ *
+ * Copyright (C) 2014 Broadcom Corporation
+ * Copyright 2015 Simon Arlott
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This is based on the BCM7038 (which supports SMP) but with a single
+ * enable register instead of separate mask/set/clear registers.
+ *
+ * The BCM3380 has a similar mask/status register layout, but each pair
+ * of words is at separate locations (and SMP is not supported).
+ *
+ * ENABLE/STATUS words are packed next to each other for each CPU:
+ *
+ * BCM6368:
+ *   0x1000_0020: CPU0_W0_ENABLE
+ *   0x1000_0024: CPU0_W1_ENABLE
+ *   0x1000_0028: CPU0_W0_STATUS		IRQs 31-63
+ *   0x1000_002c: CPU0_W1_STATUS		IRQs 0-31
+ *   0x1000_0030: CPU1_W0_ENABLE
+ *   0x1000_0034: CPU1_W1_ENABLE
+ *   0x1000_0038: CPU1_W0_STATUS		IRQs 31-63
+ *   0x1000_003c: CPU1_W1_STATUS		IRQs 0-31
+ *
+ * BCM63168:
+ *   0x1000_0020: CPU0_W0_ENABLE
+ *   0x1000_0024: CPU0_W1_ENABLE
+ *   0x1000_0028: CPU0_W2_ENABLE
+ *   0x1000_002c: CPU0_W3_ENABLE
+ *   0x1000_0030: CPU0_W0_STATUS	IRQs 96-127
+ *   0x1000_0034: CPU0_W1_STATUS	IRQs 64-95
+ *   0x1000_0038: CPU0_W2_STATUS	IRQs 32-63
+ *   0x1000_003c: CPU0_W3_STATUS	IRQs 0-31
+ *   0x1000_0040: CPU1_W0_ENABLE
+ *   0x1000_0044: CPU1_W1_ENABLE
+ *   0x1000_0048: CPU1_W2_ENABLE
+ *   0x1000_004c: CPU1_W3_ENABLE
+ *   0x1000_0050: CPU1_W0_STATUS	IRQs 96-127
+ *   0x1000_0054: CPU1_W1_STATUS	IRQs 64-95
+ *   0x1000_0058: CPU1_W2_STATUS	IRQs 32-63
+ *   0x1000_005c: CPU1_W3_STATUS	IRQs 0-31
+ *
+ * IRQs are numbered in CPU native endian order
+ * (which is big-endian in these examples)
+ */
+
+#define pr_fmt(fmt)	KBUILD_MODNAME	": " fmt
+
+#include <linux/bitops.h>
+#include <linux/cpumask.h>
+#include <linux/kconfig.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/types.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+
+#define IRQS_PER_WORD		32
+#define REG_BYTES_PER_IRQ_WORD	(sizeof(u32) * 2)
+
+struct bcm6345_l1_cpu;
+
+struct bcm6345_l1_chip {
+	raw_spinlock_t		lock;
+	unsigned int		n_words;
+	struct irq_domain	*domain;
+	struct cpumask		cpumask;
+	struct bcm6345_l1_cpu	*cpus[NR_CPUS];
+};
+
+struct bcm6345_l1_cpu {
+	void __iomem		*map_base;
+	unsigned int		parent_irq;
+	u32			enable_cache[];
+};
+
+static inline unsigned int reg_enable(struct bcm6345_l1_chip *intc,
+					   unsigned int word)
+{
+#ifdef __BIG_ENDIAN
+	return (1 * intc->n_words - word - 1) * sizeof(u32);
+#else
+	return (0 * intc->n_words + word) * sizeof(u32);
+#endif
+}
+
+static inline unsigned int reg_status(struct bcm6345_l1_chip *intc,
+				      unsigned int word)
+{
+#ifdef __BIG_ENDIAN
+	return (2 * intc->n_words - word - 1) * sizeof(u32);
+#else
+	return (1 * intc->n_words + word) * sizeof(u32);
+#endif
+}
+
+static inline unsigned int cpu_for_irq(struct bcm6345_l1_chip *intc,
+					struct irq_data *d)
+{
+	return cpumask_first_and(&intc->cpumask, irq_data_get_affinity_mask(d));
+}
+
+static void bcm6345_l1_irq_handle(struct irq_desc *desc)
+{
+	struct bcm6345_l1_chip *intc = irq_desc_get_handler_data(desc);
+	struct bcm6345_l1_cpu *cpu;
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+	unsigned int idx;
+
+#ifdef CONFIG_SMP
+	cpu = intc->cpus[cpu_logical_map(smp_processor_id())];
+#else
+	cpu = intc->cpus[0];
+#endif
+
+	chained_irq_enter(chip, desc);
+
+	for (idx = 0; idx < intc->n_words; idx++) {
+		int base = idx * IRQS_PER_WORD;
+		unsigned long pending;
+		irq_hw_number_t hwirq;
+		unsigned int irq;
+
+		pending = __raw_readl(cpu->map_base + reg_status(intc, idx));
+		pending &= __raw_readl(cpu->map_base + reg_enable(intc, idx));
+
+		for_each_set_bit(hwirq, &pending, IRQS_PER_WORD) {
+			irq = irq_linear_revmap(intc->domain, base + hwirq);
+			if (irq)
+				do_IRQ(irq);
+			else
+				spurious_interrupt();
+		}
+	}
+
+	chained_irq_exit(chip, desc);
+}
+
+static inline void __bcm6345_l1_unmask(struct irq_data *d)
+{
+	struct bcm6345_l1_chip *intc = irq_data_get_irq_chip_data(d);
+	u32 word = d->hwirq / IRQS_PER_WORD;
+	u32 mask = BIT(d->hwirq % IRQS_PER_WORD);
+	unsigned int cpu_idx = cpu_for_irq(intc, d);
+
+	intc->cpus[cpu_idx]->enable_cache[word] |= mask;
+	__raw_writel(intc->cpus[cpu_idx]->enable_cache[word],
+		intc->cpus[cpu_idx]->map_base + reg_enable(intc, word));
+}
+
+static inline void __bcm6345_l1_mask(struct irq_data *d)
+{
+	struct bcm6345_l1_chip *intc = irq_data_get_irq_chip_data(d);
+	u32 word = d->hwirq / IRQS_PER_WORD;
+	u32 mask = BIT(d->hwirq % IRQS_PER_WORD);
+	unsigned int cpu_idx = cpu_for_irq(intc, d);
+
+	intc->cpus[cpu_idx]->enable_cache[word] &= ~mask;
+	__raw_writel(intc->cpus[cpu_idx]->enable_cache[word],
+		intc->cpus[cpu_idx]->map_base + reg_enable(intc, word));
+}
+
+static void bcm6345_l1_unmask(struct irq_data *d)
+{
+	struct bcm6345_l1_chip *intc = irq_data_get_irq_chip_data(d);
+	unsigned long flags;
+
+	raw_spin_lock_irqsave(&intc->lock, flags);
+	__bcm6345_l1_unmask(d);
+	raw_spin_unlock_irqrestore(&intc->lock, flags);
+}
+
+static void bcm6345_l1_mask(struct irq_data *d)
+{
+	struct bcm6345_l1_chip *intc = irq_data_get_irq_chip_data(d);
+	unsigned long flags;
+
+	raw_spin_lock_irqsave(&intc->lock, flags);
+	__bcm6345_l1_mask(d);
+	raw_spin_unlock_irqrestore(&intc->lock, flags);
+}
+
+static int bcm6345_l1_set_affinity(struct irq_data *d,
+				   const struct cpumask *dest,
+				   bool force)
+{
+	struct bcm6345_l1_chip *intc = irq_data_get_irq_chip_data(d);
+	u32 word = d->hwirq / IRQS_PER_WORD;
+	u32 mask = BIT(d->hwirq % IRQS_PER_WORD);
+	unsigned int old_cpu = cpu_for_irq(intc, d);
+	unsigned int new_cpu;
+	struct cpumask valid;
+	unsigned long flags;
+	bool enabled;
+
+	if (!cpumask_and(&valid, &intc->cpumask, dest))
+		return -EINVAL;
+
+	new_cpu = cpumask_any_and(&valid, cpu_online_mask);
+	if (new_cpu >= nr_cpu_ids)
+		return -EINVAL;
+
+	dest = cpumask_of(new_cpu);
+
+	raw_spin_lock_irqsave(&intc->lock, flags);
+	if (old_cpu != new_cpu) {
+		enabled = intc->cpus[old_cpu]->enable_cache[word] & mask;
+		if (enabled)
+			__bcm6345_l1_mask(d);
+		cpumask_copy(irq_data_get_affinity_mask(d), dest);
+		if (enabled)
+			__bcm6345_l1_unmask(d);
+	} else {
+		cpumask_copy(irq_data_get_affinity_mask(d), dest);
+	}
+	raw_spin_unlock_irqrestore(&intc->lock, flags);
+
+	return IRQ_SET_MASK_OK_NOCOPY;
+}
+
+static int __init bcm6345_l1_init_one(struct device_node *dn,
+				      unsigned int idx,
+				      struct bcm6345_l1_chip *intc)
+{
+	struct resource res;
+	resource_size_t sz;
+	struct bcm6345_l1_cpu *cpu;
+	unsigned int i, n_words;
+
+	if (of_address_to_resource(dn, idx, &res))
+		return -EINVAL;
+	sz = resource_size(&res);
+	n_words = sz / REG_BYTES_PER_IRQ_WORD;
+
+	if (!intc->n_words)
+		intc->n_words = n_words;
+	else if (intc->n_words != n_words)
+		return -EINVAL;
+
+	cpu = intc->cpus[idx] = kzalloc(sizeof(*cpu) + n_words * sizeof(u32),
+					GFP_KERNEL);
+	if (!cpu)
+		return -ENOMEM;
+
+	cpu->map_base = ioremap(res.start, sz);
+	if (!cpu->map_base)
+		return -ENOMEM;
+
+	for (i = 0; i < n_words; i++) {
+		cpu->enable_cache[i] = 0;
+		__raw_writel(0, cpu->map_base + reg_enable(intc, i));
+	}
+
+	cpu->parent_irq = irq_of_parse_and_map(dn, idx);
+	if (!cpu->parent_irq) {
+		pr_err("failed to map parent interrupt %d\n", cpu->parent_irq);
+		return -EINVAL;
+	}
+	irq_set_chained_handler_and_data(cpu->parent_irq,
+						bcm6345_l1_irq_handle, intc);
+
+	return 0;
+}
+
+static struct irq_chip bcm6345_l1_irq_chip = {
+	.name			= "bcm6345-l1",
+	.irq_mask		= bcm6345_l1_mask,
+	.irq_unmask		= bcm6345_l1_unmask,
+	.irq_set_affinity	= bcm6345_l1_set_affinity,
+};
+
+static int bcm6345_l1_map(struct irq_domain *d, unsigned int virq,
+			  irq_hw_number_t hw_irq)
+{
+	irq_set_chip_and_handler(virq,
+		&bcm6345_l1_irq_chip, handle_percpu_irq);
+	irq_set_chip_data(virq, d->host_data);
+	return 0;
+}
+
+static const struct irq_domain_ops bcm6345_l1_domain_ops = {
+	.xlate			= irq_domain_xlate_onecell,
+	.map			= bcm6345_l1_map,
+};
+
+static int __init bcm6345_l1_of_init(struct device_node *dn,
+			      struct device_node *parent)
+{
+	struct bcm6345_l1_chip *intc;
+	unsigned int idx;
+	int ret;
+
+	intc = kzalloc(sizeof(*intc), GFP_KERNEL);
+	if (!intc)
+		return -ENOMEM;
+
+	for_each_possible_cpu(idx) {
+		ret = bcm6345_l1_init_one(dn, idx, intc);
+		if (ret)
+			pr_err("failed to init intc L1 for cpu %d: %d\n",
+				idx, ret);
+		else
+			cpumask_set_cpu(idx, &intc->cpumask);
+	}
+
+	if (!cpumask_weight(&intc->cpumask)) {
+		ret = -ENODEV;
+		goto out_free;
+	}
+
+	raw_spin_lock_init(&intc->lock);
+
+	intc->domain = irq_domain_add_linear(dn, IRQS_PER_WORD * intc->n_words,
+					     &bcm6345_l1_domain_ops,
+					     intc);
+	if (!intc->domain) {
+		ret = -ENOMEM;
+		goto out_unmap;
+	}
+
+	pr_info("registered BCM6345 L1 intc (IRQs: %d)\n",
+			IRQS_PER_WORD * intc->n_words);
+	for_each_cpu(idx, &intc->cpumask) {
+		struct bcm6345_l1_cpu *cpu = intc->cpus[idx];
+
+		pr_info("  CPU%u at MMIO 0x%p (irq = %d)\n", idx,
+				cpu->map_base, cpu->parent_irq);
+	}
+
+	return 0;
+
+out_unmap:
+	for_each_possible_cpu(idx) {
+		struct bcm6345_l1_cpu *cpu = intc->cpus[idx];
+
+		if (cpu) {
+			if (cpu->map_base)
+				iounmap(cpu->map_base);
+			kfree(cpu);
+		}
+	}
+out_free:
+	kfree(intc);
+	return ret;
+}
+
+IRQCHIP_DECLARE(bcm6345_l1, "brcm,bcm6345-l1-intc", bcm6345_l1_of_init);
diff --git a/drivers/irqchip/irq-gic-realview.c b/drivers/irqchip/irq-gic-realview.c
index aa46eb2..54c2964 100644
--- a/drivers/irqchip/irq-gic-realview.c
+++ b/drivers/irqchip/irq-gic-realview.c
@@ -10,7 +10,8 @@
 #include <linux/irqchip/arm-gic.h>
 
 #define REALVIEW_SYS_LOCK_OFFSET	0x20
-#define REALVIEW_PB11MP_SYS_PLD_CTRL1	0x74
+#define REALVIEW_SYS_PLD_CTRL1		0x74
+#define REALVIEW_EB_REVB_SYS_PLD_CTRL1	0xD8
 #define VERSATILE_LOCK_VAL		0xA05F
 #define PLD_INTMODE_MASK		BIT(22)|BIT(23)|BIT(24)
 #define PLD_INTMODE_LEGACY		0x0
@@ -18,26 +19,57 @@
 #define PLD_INTMODE_NEW_NO_DCC		BIT(23)
 #define PLD_INTMODE_FIQ_ENABLE		BIT(24)
 
+/* For some reason RealView EB Rev B moved this register */
+static const struct of_device_id syscon_pldset_of_match[] = {
+	{
+		.compatible = "arm,realview-eb11mp-revb-syscon",
+		.data = (void *)REALVIEW_EB_REVB_SYS_PLD_CTRL1,
+	},
+	{
+		.compatible = "arm,realview-eb11mp-revc-syscon",
+		.data = (void *)REALVIEW_SYS_PLD_CTRL1,
+	},
+	{
+		.compatible = "arm,realview-eb-syscon",
+		.data = (void *)REALVIEW_SYS_PLD_CTRL1,
+	},
+	{
+		.compatible = "arm,realview-pb11mp-syscon",
+		.data = (void *)REALVIEW_SYS_PLD_CTRL1,
+	},
+	{},
+};
+
 static int __init
 realview_gic_of_init(struct device_node *node, struct device_node *parent)
 {
 	static struct regmap *map;
+	struct device_node *np;
+	const struct of_device_id *gic_id;
+	u32 pld1_ctrl;
+
+	np = of_find_matching_node_and_match(NULL, syscon_pldset_of_match,
+					     &gic_id);
+	if (!np)
+		return -ENODEV;
+	pld1_ctrl = (u32)gic_id->data;
 
 	/* The PB11MPCore GIC needs to be configured in the syscon */
-	map = syscon_regmap_lookup_by_compatible("arm,realview-pb11mp-syscon");
+	map = syscon_node_to_regmap(np);
 	if (!IS_ERR(map)) {
 		/* new irq mode with no DCC */
 		regmap_write(map, REALVIEW_SYS_LOCK_OFFSET,
 			     VERSATILE_LOCK_VAL);
-		regmap_update_bits(map, REALVIEW_PB11MP_SYS_PLD_CTRL1,
+		regmap_update_bits(map, pld1_ctrl,
 				   PLD_INTMODE_NEW_NO_DCC,
 				   PLD_INTMODE_MASK);
 		regmap_write(map, REALVIEW_SYS_LOCK_OFFSET, 0x0000);
-		pr_info("TC11MP GIC: set up interrupt controller to NEW mode, no DCC\n");
+		pr_info("RealView GIC: set up interrupt controller to NEW mode, no DCC\n");
 	} else {
-		pr_err("TC11MP GIC setup: could not find syscon\n");
-		return -ENXIO;
+		pr_err("RealView GIC setup: could not find syscon\n");
+		return -ENODEV;
 	}
 	return gic_of_init(node, parent);
 }
 IRQCHIP_DECLARE(armtc11mp_gic, "arm,tc11mp-gic", realview_gic_of_init);
+IRQCHIP_DECLARE(armeb11mp_gic, "arm,eb11mp-gic", realview_gic_of_init);
diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c
index c779f83..28f047c 100644
--- a/drivers/irqchip/irq-gic-v2m.c
+++ b/drivers/irqchip/irq-gic-v2m.c
@@ -92,18 +92,6 @@
 	.chip	= &gicv2m_msi_irq_chip,
 };
 
-static int gicv2m_set_affinity(struct irq_data *irq_data,
-			       const struct cpumask *mask, bool force)
-{
-	int ret;
-
-	ret = irq_chip_set_affinity_parent(irq_data, mask, force);
-	if (ret == IRQ_SET_MASK_OK)
-		ret = IRQ_SET_MASK_OK_DONE;
-
-	return ret;
-}
-
 static void gicv2m_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
 {
 	struct v2m_data *v2m = irq_data_get_irq_chip_data(data);
@@ -122,7 +110,7 @@
 	.irq_mask		= irq_chip_mask_parent,
 	.irq_unmask		= irq_chip_unmask_parent,
 	.irq_eoi		= irq_chip_eoi_parent,
-	.irq_set_affinity	= gicv2m_set_affinity,
+	.irq_set_affinity	= irq_chip_set_affinity_parent,
 	.irq_compose_msi_msg	= gicv2m_compose_msi_msg,
 };
 
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 43dfd15..3926179 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -103,7 +103,6 @@
 
 static LIST_HEAD(its_nodes);
 static DEFINE_SPINLOCK(its_lock);
-static struct device_node *gic_root_node;
 static struct rdists *gic_rdists;
 
 #define gic_data_rdist()		(raw_cpu_ptr(gic_rdists->rdist))
@@ -671,7 +670,7 @@
 	return (chunk << IRQS_PER_CHUNK_SHIFT) + 8192;
 }
 
-static int its_lpi_init(u32 id_bits)
+static int __init its_lpi_init(u32 id_bits)
 {
 	lpi_chunks = its_lpi_to_chunk(1UL << id_bits);
 
@@ -1430,7 +1429,8 @@
 	gic_enable_quirks(iidr, its_quirks, its);
 }
 
-static int its_probe(struct device_node *node, struct irq_domain *parent)
+static int __init its_probe(struct device_node *node,
+			    struct irq_domain *parent)
 {
 	struct resource res;
 	struct its_node *its;
@@ -1591,7 +1591,7 @@
 	{},
 };
 
-int its_init(struct device_node *node, struct rdists *rdists,
+int __init its_init(struct device_node *node, struct rdists *rdists,
 	     struct irq_domain *parent_domain)
 {
 	struct device_node *np;
@@ -1607,8 +1607,6 @@
 	}
 
 	gic_rdists = rdists;
-	gic_root_node = node;
-
 	its_alloc_lpi_tables();
 	its_lpi_init(rdists->id_bits);
 
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index d7be6dd..5b7d3c2 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -15,10 +15,12 @@
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
+#include <linux/acpi.h>
 #include <linux/cpu.h>
 #include <linux/cpu_pm.h>
 #include <linux/delay.h>
 #include <linux/interrupt.h>
+#include <linux/irqdomain.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/of_irq.h>
@@ -38,6 +40,7 @@
 struct redist_region {
 	void __iomem		*redist_base;
 	phys_addr_t		phys_base;
+	bool			single_redist;
 };
 
 struct gic_chip_data {
@@ -434,6 +437,9 @@
 				return 0;
 			}
 
+			if (gic_data.redist_regions[i].single_redist)
+				break;
+
 			if (gic_data.redist_stride) {
 				ptr += gic_data.redist_stride;
 			} else {
@@ -634,7 +640,7 @@
 	else
 		gic_dist_wait_for_rwp();
 
-	return IRQ_SET_MASK_OK;
+	return IRQ_SET_MASK_OK_DONE;
 }
 #else
 #define gic_set_affinity	NULL
@@ -764,6 +770,15 @@
 		return 0;
 	}
 
+	if (is_fwnode_irqchip(fwspec->fwnode)) {
+		if(fwspec->param_count != 2)
+			return -EINVAL;
+
+		*hwirq = fwspec->param[0];
+		*type = fwspec->param[1];
+		return 0;
+	}
+
 	return -EINVAL;
 }
 
@@ -811,17 +826,88 @@
 #endif
 }
 
+static int __init gic_init_bases(void __iomem *dist_base,
+				 struct redist_region *rdist_regs,
+				 u32 nr_redist_regions,
+				 u64 redist_stride,
+				 struct fwnode_handle *handle)
+{
+	struct device_node *node;
+	u32 typer;
+	int gic_irqs;
+	int err;
+
+	if (!is_hyp_mode_available())
+		static_key_slow_dec(&supports_deactivate);
+
+	if (static_key_true(&supports_deactivate))
+		pr_info("GIC: Using split EOI/Deactivate mode\n");
+
+	gic_data.dist_base = dist_base;
+	gic_data.redist_regions = rdist_regs;
+	gic_data.nr_redist_regions = nr_redist_regions;
+	gic_data.redist_stride = redist_stride;
+
+	gicv3_enable_quirks();
+
+	/*
+	 * Find out how many interrupts are supported.
+	 * The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI)
+	 */
+	typer = readl_relaxed(gic_data.dist_base + GICD_TYPER);
+	gic_data.rdists.id_bits = GICD_TYPER_ID_BITS(typer);
+	gic_irqs = GICD_TYPER_IRQS(typer);
+	if (gic_irqs > 1020)
+		gic_irqs = 1020;
+	gic_data.irq_nr = gic_irqs;
+
+	gic_data.domain = irq_domain_create_tree(handle, &gic_irq_domain_ops,
+						 &gic_data);
+	gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist));
+
+	if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) {
+		err = -ENOMEM;
+		goto out_free;
+	}
+
+	set_handle_irq(gic_handle_irq);
+
+	node = to_of_node(handle);
+	if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis() &&
+	    node) /* Temp hack to prevent ITS init for ACPI */
+		its_init(node, &gic_data.rdists, gic_data.domain);
+
+	gic_smp_init();
+	gic_dist_init();
+	gic_cpu_init();
+	gic_cpu_pm_init();
+
+	return 0;
+
+out_free:
+	if (gic_data.domain)
+		irq_domain_remove(gic_data.domain);
+	free_percpu(gic_data.rdists.rdist);
+	return err;
+}
+
+static int __init gic_validate_dist_version(void __iomem *dist_base)
+{
+	u32 reg = readl_relaxed(dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK;
+
+	if (reg != GIC_PIDR2_ARCH_GICv3 && reg != GIC_PIDR2_ARCH_GICv4)
+		return -ENODEV;
+
+	return 0;
+}
+
 static int __init gic_of_init(struct device_node *node, struct device_node *parent)
 {
 	void __iomem *dist_base;
 	struct redist_region *rdist_regs;
 	u64 redist_stride;
 	u32 nr_redist_regions;
-	u32 typer;
-	u32 reg;
-	int gic_irqs;
-	int err;
-	int i;
+	int err, i;
 
 	dist_base = of_iomap(node, 0);
 	if (!dist_base) {
@@ -830,11 +916,10 @@
 		return -ENXIO;
 	}
 
-	reg = readl_relaxed(dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK;
-	if (reg != GIC_PIDR2_ARCH_GICv3 && reg != GIC_PIDR2_ARCH_GICv4) {
+	err = gic_validate_dist_version(dist_base);
+	if (err) {
 		pr_err("%s: no distributor detected, giving up\n",
 			node->full_name);
-		err = -ENODEV;
 		goto out_unmap_dist;
 	}
 
@@ -865,55 +950,11 @@
 	if (of_property_read_u64(node, "redistributor-stride", &redist_stride))
 		redist_stride = 0;
 
-	if (!is_hyp_mode_available())
-		static_key_slow_dec(&supports_deactivate);
+	err = gic_init_bases(dist_base, rdist_regs, nr_redist_regions,
+			     redist_stride, &node->fwnode);
+	if (!err)
+		return 0;
 
-	if (static_key_true(&supports_deactivate))
-		pr_info("GIC: Using split EOI/Deactivate mode\n");
-
-	gic_data.dist_base = dist_base;
-	gic_data.redist_regions = rdist_regs;
-	gic_data.nr_redist_regions = nr_redist_regions;
-	gic_data.redist_stride = redist_stride;
-
-	gicv3_enable_quirks();
-
-	/*
-	 * Find out how many interrupts are supported.
-	 * The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI)
-	 */
-	typer = readl_relaxed(gic_data.dist_base + GICD_TYPER);
-	gic_data.rdists.id_bits = GICD_TYPER_ID_BITS(typer);
-	gic_irqs = GICD_TYPER_IRQS(typer);
-	if (gic_irqs > 1020)
-		gic_irqs = 1020;
-	gic_data.irq_nr = gic_irqs;
-
-	gic_data.domain = irq_domain_add_tree(node, &gic_irq_domain_ops,
-					      &gic_data);
-	gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist));
-
-	if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) {
-		err = -ENOMEM;
-		goto out_free;
-	}
-
-	set_handle_irq(gic_handle_irq);
-
-	if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis())
-		its_init(node, &gic_data.rdists, gic_data.domain);
-
-	gic_smp_init();
-	gic_dist_init();
-	gic_cpu_init();
-	gic_cpu_pm_init();
-
-	return 0;
-
-out_free:
-	if (gic_data.domain)
-		irq_domain_remove(gic_data.domain);
-	free_percpu(gic_data.rdists.rdist);
 out_unmap_rdist:
 	for (i = 0; i < nr_redist_regions; i++)
 		if (rdist_regs[i].redist_base)
@@ -925,3 +966,213 @@
 }
 
 IRQCHIP_DECLARE(gic_v3, "arm,gic-v3", gic_of_init);
+
+#ifdef CONFIG_ACPI
+static void __iomem *dist_base;
+static struct redist_region *redist_regs __initdata;
+static u32 nr_redist_regions __initdata;
+static bool single_redist;
+
+static void __init
+gic_acpi_register_redist(phys_addr_t phys_base, void __iomem *redist_base)
+{
+	static int count = 0;
+
+	redist_regs[count].phys_base = phys_base;
+	redist_regs[count].redist_base = redist_base;
+	redist_regs[count].single_redist = single_redist;
+	count++;
+}
+
+static int __init
+gic_acpi_parse_madt_redist(struct acpi_subtable_header *header,
+			   const unsigned long end)
+{
+	struct acpi_madt_generic_redistributor *redist =
+			(struct acpi_madt_generic_redistributor *)header;
+	void __iomem *redist_base;
+
+	redist_base = ioremap(redist->base_address, redist->length);
+	if (!redist_base) {
+		pr_err("Couldn't map GICR region @%llx\n", redist->base_address);
+		return -ENOMEM;
+	}
+
+	gic_acpi_register_redist(redist->base_address, redist_base);
+	return 0;
+}
+
+static int __init
+gic_acpi_parse_madt_gicc(struct acpi_subtable_header *header,
+			 const unsigned long end)
+{
+	struct acpi_madt_generic_interrupt *gicc =
+				(struct acpi_madt_generic_interrupt *)header;
+	u32 reg = readl_relaxed(dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK;
+	u32 size = reg == GIC_PIDR2_ARCH_GICv4 ? SZ_64K * 4 : SZ_64K * 2;
+	void __iomem *redist_base;
+
+	redist_base = ioremap(gicc->gicr_base_address, size);
+	if (!redist_base)
+		return -ENOMEM;
+
+	gic_acpi_register_redist(gicc->gicr_base_address, redist_base);
+	return 0;
+}
+
+static int __init gic_acpi_collect_gicr_base(void)
+{
+	acpi_tbl_entry_handler redist_parser;
+	enum acpi_madt_type type;
+
+	if (single_redist) {
+		type = ACPI_MADT_TYPE_GENERIC_INTERRUPT;
+		redist_parser = gic_acpi_parse_madt_gicc;
+	} else {
+		type = ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR;
+		redist_parser = gic_acpi_parse_madt_redist;
+	}
+
+	/* Collect redistributor base addresses in GICR entries */
+	if (acpi_table_parse_madt(type, redist_parser, 0) > 0)
+		return 0;
+
+	pr_info("No valid GICR entries exist\n");
+	return -ENODEV;
+}
+
+static int __init gic_acpi_match_gicr(struct acpi_subtable_header *header,
+				  const unsigned long end)
+{
+	/* Subtable presence means that redist exists, that's it */
+	return 0;
+}
+
+static int __init gic_acpi_match_gicc(struct acpi_subtable_header *header,
+				      const unsigned long end)
+{
+	struct acpi_madt_generic_interrupt *gicc =
+				(struct acpi_madt_generic_interrupt *)header;
+
+	/*
+	 * If GICC is enabled and has valid gicr base address, then it means
+	 * GICR base is presented via GICC
+	 */
+	if ((gicc->flags & ACPI_MADT_ENABLED) && gicc->gicr_base_address)
+		return 0;
+
+	return -ENODEV;
+}
+
+static int __init gic_acpi_count_gicr_regions(void)
+{
+	int count;
+
+	/*
+	 * Count how many redistributor regions we have. It is not allowed
+	 * to mix redistributor description, GICR and GICC subtables have to be
+	 * mutually exclusive.
+	 */
+	count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR,
+				      gic_acpi_match_gicr, 0);
+	if (count > 0) {
+		single_redist = false;
+		return count;
+	}
+
+	count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
+				      gic_acpi_match_gicc, 0);
+	if (count > 0)
+		single_redist = true;
+
+	return count;
+}
+
+static bool __init acpi_validate_gic_table(struct acpi_subtable_header *header,
+					   struct acpi_probe_entry *ape)
+{
+	struct acpi_madt_generic_distributor *dist;
+	int count;
+
+	dist = (struct acpi_madt_generic_distributor *)header;
+	if (dist->version != ape->driver_data)
+		return false;
+
+	/* We need to do that exercise anyway, the sooner the better */
+	count = gic_acpi_count_gicr_regions();
+	if (count <= 0)
+		return false;
+
+	nr_redist_regions = count;
+	return true;
+}
+
+#define ACPI_GICV3_DIST_MEM_SIZE (SZ_64K)
+
+static int __init
+gic_acpi_init(struct acpi_subtable_header *header, const unsigned long end)
+{
+	struct acpi_madt_generic_distributor *dist;
+	struct fwnode_handle *domain_handle;
+	int i, err;
+
+	/* Get distributor base address */
+	dist = (struct acpi_madt_generic_distributor *)header;
+	dist_base = ioremap(dist->base_address, ACPI_GICV3_DIST_MEM_SIZE);
+	if (!dist_base) {
+		pr_err("Unable to map GICD registers\n");
+		return -ENOMEM;
+	}
+
+	err = gic_validate_dist_version(dist_base);
+	if (err) {
+		pr_err("No distributor detected at @%p, giving up", dist_base);
+		goto out_dist_unmap;
+	}
+
+	redist_regs = kzalloc(sizeof(*redist_regs) * nr_redist_regions,
+			      GFP_KERNEL);
+	if (!redist_regs) {
+		err = -ENOMEM;
+		goto out_dist_unmap;
+	}
+
+	err = gic_acpi_collect_gicr_base();
+	if (err)
+		goto out_redist_unmap;
+
+	domain_handle = irq_domain_alloc_fwnode(dist_base);
+	if (!domain_handle) {
+		err = -ENOMEM;
+		goto out_redist_unmap;
+	}
+
+	err = gic_init_bases(dist_base, redist_regs, nr_redist_regions, 0,
+			     domain_handle);
+	if (err)
+		goto out_fwhandle_free;
+
+	acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, domain_handle);
+	return 0;
+
+out_fwhandle_free:
+	irq_domain_free_fwnode(domain_handle);
+out_redist_unmap:
+	for (i = 0; i < nr_redist_regions; i++)
+		if (redist_regs[i].redist_base)
+			iounmap(redist_regs[i].redist_base);
+	kfree(redist_regs);
+out_dist_unmap:
+	iounmap(dist_base);
+	return err;
+}
+IRQCHIP_ACPI_DECLARE(gic_v3, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
+		     acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V3,
+		     gic_acpi_init);
+IRQCHIP_ACPI_DECLARE(gic_v4, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
+		     acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V4,
+		     gic_acpi_init);
+IRQCHIP_ACPI_DECLARE(gic_v3_or_v4, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
+		     acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_NONE,
+		     gic_acpi_init);
+#endif
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 8f9ebf7..282344b 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -319,7 +319,7 @@
 	writel_relaxed(val | bit, reg);
 	raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
 
-	return IRQ_SET_MASK_OK;
+	return IRQ_SET_MASK_OK_DONE;
 }
 #endif
 
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index 9e17ef2..94a30da 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -29,16 +29,32 @@
 	DECLARE_BITMAP(pcpu_mask, GIC_MAX_INTRS);
 };
 
+struct gic_irq_spec {
+	enum {
+		GIC_DEVICE,
+		GIC_IPI
+	} type;
+
+	union {
+		struct cpumask *ipimask;
+		unsigned int hwirq;
+	};
+};
+
 static unsigned long __gic_base_addr;
+
 static void __iomem *gic_base;
 static struct gic_pcpu_mask pcpu_masks[NR_CPUS];
 static DEFINE_SPINLOCK(gic_lock);
 static struct irq_domain *gic_irq_domain;
+static struct irq_domain *gic_dev_domain;
+static struct irq_domain *gic_ipi_domain;
 static int gic_shared_intrs;
 static int gic_vpes;
 static unsigned int gic_cpu_pin;
 static unsigned int timer_cpu_pin;
 static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller;
+DECLARE_BITMAP(ipi_resrv, GIC_MAX_INTRS);
 
 static void __gic_irq_dispatch(void);
 
@@ -264,9 +280,11 @@
 		  GIC_VPE_EIC_SS(irq), set);
 }
 
-void gic_send_ipi(unsigned int intr)
+static void gic_send_ipi(struct irq_data *d, unsigned int cpu)
 {
-	gic_write(GIC_REG(SHARED, GIC_SH_WEDGE), GIC_SH_WEDGE_SET(intr));
+	irq_hw_number_t hwirq = GIC_HWIRQ_TO_SHARED(irqd_to_hwirq(d));
+
+	gic_write(GIC_REG(SHARED, GIC_SH_WEDGE), GIC_SH_WEDGE_SET(hwirq));
 }
 
 int gic_get_c0_compare_int(void)
@@ -449,7 +467,7 @@
 	gic_map_to_vpe(irq, mips_cm_vp_id(cpumask_first(&tmp)));
 
 	/* Update the pcpu_masks */
-	for (i = 0; i < NR_CPUS; i++)
+	for (i = 0; i < gic_vpes; i++)
 		clear_bit(irq, pcpu_masks[i].pcpu_mask);
 	set_bit(irq, pcpu_masks[cpumask_first(&tmp)].pcpu_mask);
 
@@ -479,6 +497,7 @@
 #ifdef CONFIG_SMP
 	.irq_set_affinity	=	gic_set_affinity,
 #endif
+	.ipi_send_single	=	gic_send_ipi,
 };
 
 static void gic_handle_local_int(bool chained)
@@ -572,83 +591,6 @@
 	gic_handle_shared_int(true);
 }
 
-#ifdef CONFIG_MIPS_GIC_IPI
-static int gic_resched_int_base;
-static int gic_call_int_base;
-
-unsigned int plat_ipi_resched_int_xlate(unsigned int cpu)
-{
-	return gic_resched_int_base + cpu;
-}
-
-unsigned int plat_ipi_call_int_xlate(unsigned int cpu)
-{
-	return gic_call_int_base + cpu;
-}
-
-static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
-{
-	scheduler_ipi();
-
-	return IRQ_HANDLED;
-}
-
-static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
-{
-	generic_smp_call_function_interrupt();
-
-	return IRQ_HANDLED;
-}
-
-static struct irqaction irq_resched = {
-	.handler	= ipi_resched_interrupt,
-	.flags		= IRQF_PERCPU,
-	.name		= "IPI resched"
-};
-
-static struct irqaction irq_call = {
-	.handler	= ipi_call_interrupt,
-	.flags		= IRQF_PERCPU,
-	.name		= "IPI call"
-};
-
-static __init void gic_ipi_init_one(unsigned int intr, int cpu,
-				    struct irqaction *action)
-{
-	int virq = irq_create_mapping(gic_irq_domain,
-				      GIC_SHARED_TO_HWIRQ(intr));
-	int i;
-
-	gic_map_to_vpe(intr, mips_cm_vp_id(cpu));
-	for (i = 0; i < NR_CPUS; i++)
-		clear_bit(intr, pcpu_masks[i].pcpu_mask);
-	set_bit(intr, pcpu_masks[cpu].pcpu_mask);
-
-	irq_set_irq_type(virq, IRQ_TYPE_EDGE_RISING);
-
-	irq_set_handler(virq, handle_percpu_irq);
-	setup_irq(virq, action);
-}
-
-static __init void gic_ipi_init(void)
-{
-	int i;
-
-	/* Use last 2 * NR_CPUS interrupts as IPIs */
-	gic_resched_int_base = gic_shared_intrs - nr_cpu_ids;
-	gic_call_int_base = gic_resched_int_base - nr_cpu_ids;
-
-	for (i = 0; i < nr_cpu_ids; i++) {
-		gic_ipi_init_one(gic_call_int_base + i, i, &irq_call);
-		gic_ipi_init_one(gic_resched_int_base + i, i, &irq_resched);
-	}
-}
-#else
-static inline void gic_ipi_init(void)
-{
-}
-#endif
-
 static void __init gic_basic_init(void)
 {
 	unsigned int i;
@@ -753,19 +695,21 @@
 }
 
 static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
-				     irq_hw_number_t hw)
+				     irq_hw_number_t hw, unsigned int vpe)
 {
 	int intr = GIC_HWIRQ_TO_SHARED(hw);
 	unsigned long flags;
+	int i;
 
 	irq_set_chip_and_handler(virq, &gic_level_irq_controller,
 				 handle_level_irq);
 
 	spin_lock_irqsave(&gic_lock, flags);
 	gic_map_to_pin(intr, gic_cpu_pin);
-	/* Map to VPE 0 by default */
-	gic_map_to_vpe(intr, 0);
-	set_bit(intr, pcpu_masks[0].pcpu_mask);
+	gic_map_to_vpe(intr, vpe);
+	for (i = 0; i < gic_vpes; i++)
+		clear_bit(intr, pcpu_masks[i].pcpu_mask);
+	set_bit(intr, pcpu_masks[vpe].pcpu_mask);
 	spin_unlock_irqrestore(&gic_lock, flags);
 
 	return 0;
@@ -776,10 +720,93 @@
 {
 	if (GIC_HWIRQ_TO_LOCAL(hw) < GIC_NUM_LOCAL_INTRS)
 		return gic_local_irq_domain_map(d, virq, hw);
-	return gic_shared_irq_domain_map(d, virq, hw);
+	return gic_shared_irq_domain_map(d, virq, hw, 0);
 }
 
-static int gic_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
+static int gic_irq_domain_alloc(struct irq_domain *d, unsigned int virq,
+				unsigned int nr_irqs, void *arg)
+{
+	struct gic_irq_spec *spec = arg;
+	irq_hw_number_t hwirq, base_hwirq;
+	int cpu, ret, i;
+
+	if (spec->type == GIC_DEVICE) {
+		/* verify that it doesn't conflict with an IPI irq */
+		if (test_bit(spec->hwirq, ipi_resrv))
+			return -EBUSY;
+	} else {
+		base_hwirq = find_first_bit(ipi_resrv, gic_shared_intrs);
+		if (base_hwirq == gic_shared_intrs) {
+			return -ENOMEM;
+		}
+
+		/* check that we have enough space */
+		for (i = base_hwirq; i < nr_irqs; i++) {
+			if (!test_bit(i, ipi_resrv))
+				return -EBUSY;
+		}
+		bitmap_clear(ipi_resrv, base_hwirq, nr_irqs);
+
+		/* map the hwirq for each cpu consecutively */
+		i = 0;
+		for_each_cpu(cpu, spec->ipimask) {
+			hwirq = GIC_SHARED_TO_HWIRQ(base_hwirq + i);
+
+			ret = irq_domain_set_hwirq_and_chip(d, virq + i, hwirq,
+							    &gic_edge_irq_controller,
+							    NULL);
+			if (ret)
+				goto error;
+
+			ret = gic_shared_irq_domain_map(d, virq + i, hwirq, cpu);
+			if (ret)
+				goto error;
+
+			i++;
+		}
+
+		/*
+		 * tell the parent about the base hwirq we allocated so it can
+		 * set its own domain data
+		 */
+		spec->hwirq = base_hwirq;
+	}
+
+	return 0;
+error:
+	bitmap_set(ipi_resrv, base_hwirq, nr_irqs);
+	return ret;
+}
+
+void gic_irq_domain_free(struct irq_domain *d, unsigned int virq,
+			 unsigned int nr_irqs)
+{
+	irq_hw_number_t base_hwirq;
+	struct irq_data *data;
+
+	data = irq_get_irq_data(virq);
+	if (!data)
+		return;
+
+	base_hwirq = GIC_HWIRQ_TO_SHARED(irqd_to_hwirq(data));
+	bitmap_set(ipi_resrv, base_hwirq, nr_irqs);
+}
+
+int gic_irq_domain_match(struct irq_domain *d, struct device_node *node,
+			 enum irq_domain_bus_token bus_token)
+{
+	/* this domain should'nt be accessed directly */
+	return 0;
+}
+
+static const struct irq_domain_ops gic_irq_domain_ops = {
+	.map = gic_irq_domain_map,
+	.alloc = gic_irq_domain_alloc,
+	.free = gic_irq_domain_free,
+	.match = gic_irq_domain_match,
+};
+
+static int gic_dev_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
 				const u32 *intspec, unsigned int intsize,
 				irq_hw_number_t *out_hwirq,
 				unsigned int *out_type)
@@ -798,9 +825,130 @@
 	return 0;
 }
 
-static const struct irq_domain_ops gic_irq_domain_ops = {
-	.map = gic_irq_domain_map,
-	.xlate = gic_irq_domain_xlate,
+static int gic_dev_domain_alloc(struct irq_domain *d, unsigned int virq,
+				unsigned int nr_irqs, void *arg)
+{
+	struct irq_fwspec *fwspec = arg;
+	struct gic_irq_spec spec = {
+		.type = GIC_DEVICE,
+		.hwirq = fwspec->param[1],
+	};
+	int i, ret;
+	bool is_shared = fwspec->param[0] == GIC_SHARED;
+
+	if (is_shared) {
+		ret = irq_domain_alloc_irqs_parent(d, virq, nr_irqs, &spec);
+		if (ret)
+			return ret;
+	}
+
+	for (i = 0; i < nr_irqs; i++) {
+		irq_hw_number_t hwirq;
+
+		if (is_shared)
+			hwirq = GIC_SHARED_TO_HWIRQ(spec.hwirq + i);
+		else
+			hwirq = GIC_LOCAL_TO_HWIRQ(spec.hwirq + i);
+
+		ret = irq_domain_set_hwirq_and_chip(d, virq + i,
+						    hwirq,
+						    &gic_level_irq_controller,
+						    NULL);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+void gic_dev_domain_free(struct irq_domain *d, unsigned int virq,
+			 unsigned int nr_irqs)
+{
+	/* no real allocation is done for dev irqs, so no need to free anything */
+	return;
+}
+
+static struct irq_domain_ops gic_dev_domain_ops = {
+	.xlate = gic_dev_domain_xlate,
+	.alloc = gic_dev_domain_alloc,
+	.free = gic_dev_domain_free,
+};
+
+static int gic_ipi_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
+				const u32 *intspec, unsigned int intsize,
+				irq_hw_number_t *out_hwirq,
+				unsigned int *out_type)
+{
+	/*
+	 * There's nothing to translate here. hwirq is dynamically allocated and
+	 * the irq type is always edge triggered.
+	 * */
+	*out_hwirq = 0;
+	*out_type = IRQ_TYPE_EDGE_RISING;
+
+	return 0;
+}
+
+static int gic_ipi_domain_alloc(struct irq_domain *d, unsigned int virq,
+				unsigned int nr_irqs, void *arg)
+{
+	struct cpumask *ipimask = arg;
+	struct gic_irq_spec spec = {
+		.type = GIC_IPI,
+		.ipimask = ipimask
+	};
+	int ret, i;
+
+	ret = irq_domain_alloc_irqs_parent(d, virq, nr_irqs, &spec);
+	if (ret)
+		return ret;
+
+	/* the parent should have set spec.hwirq to the base_hwirq it allocated */
+	for (i = 0; i < nr_irqs; i++) {
+		ret = irq_domain_set_hwirq_and_chip(d, virq + i,
+						    GIC_SHARED_TO_HWIRQ(spec.hwirq + i),
+						    &gic_edge_irq_controller,
+						    NULL);
+		if (ret)
+			goto error;
+
+		ret = irq_set_irq_type(virq + i, IRQ_TYPE_EDGE_RISING);
+		if (ret)
+			goto error;
+	}
+
+	return 0;
+error:
+	irq_domain_free_irqs_parent(d, virq, nr_irqs);
+	return ret;
+}
+
+void gic_ipi_domain_free(struct irq_domain *d, unsigned int virq,
+			 unsigned int nr_irqs)
+{
+	irq_domain_free_irqs_parent(d, virq, nr_irqs);
+}
+
+int gic_ipi_domain_match(struct irq_domain *d, struct device_node *node,
+			 enum irq_domain_bus_token bus_token)
+{
+	bool is_ipi;
+
+	switch (bus_token) {
+	case DOMAIN_BUS_IPI:
+		is_ipi = d->bus_token == bus_token;
+		return to_of_node(d->fwnode) == node && is_ipi;
+		break;
+	default:
+		return 0;
+	}
+}
+
+static struct irq_domain_ops gic_ipi_domain_ops = {
+	.xlate = gic_ipi_domain_xlate,
+	.alloc = gic_ipi_domain_alloc,
+	.free = gic_ipi_domain_free,
+	.match = gic_ipi_domain_match,
 };
 
 static void __init __gic_init(unsigned long gic_base_addr,
@@ -809,6 +957,7 @@
 			      struct device_node *node)
 {
 	unsigned int gicconfig;
+	unsigned int v[2];
 
 	__gic_base_addr = gic_base_addr;
 
@@ -864,9 +1013,32 @@
 	if (!gic_irq_domain)
 		panic("Failed to add GIC IRQ domain");
 
-	gic_basic_init();
+	gic_dev_domain = irq_domain_add_hierarchy(gic_irq_domain, 0,
+						  GIC_NUM_LOCAL_INTRS + gic_shared_intrs,
+						  node, &gic_dev_domain_ops, NULL);
+	if (!gic_dev_domain)
+		panic("Failed to add GIC DEV domain");
 
-	gic_ipi_init();
+	gic_ipi_domain = irq_domain_add_hierarchy(gic_irq_domain,
+						  IRQ_DOMAIN_FLAG_IPI_PER_CPU,
+						  GIC_NUM_LOCAL_INTRS + gic_shared_intrs,
+						  node, &gic_ipi_domain_ops, NULL);
+	if (!gic_ipi_domain)
+		panic("Failed to add GIC IPI domain");
+
+	gic_ipi_domain->bus_token = DOMAIN_BUS_IPI;
+
+	if (node &&
+	    !of_property_read_u32_array(node, "mti,reserved-ipi-vectors", v, 2)) {
+		bitmap_set(ipi_resrv, v[0], v[1]);
+	} else {
+		/* Make the last 2 * gic_vpes available for IPIs */
+		bitmap_set(ipi_resrv,
+			   gic_shared_intrs - 2 * gic_vpes,
+			   2 * gic_vpes);
+	}
+
+	gic_basic_init();
 }
 
 void __init gic_init(unsigned long gic_base_addr,
diff --git a/drivers/irqchip/irq-mvebu-odmi.c b/drivers/irqchip/irq-mvebu-odmi.c
new file mode 100644
index 0000000..b4d3678
--- /dev/null
+++ b/drivers/irqchip/irq-mvebu-odmi.c
@@ -0,0 +1,236 @@
+/*
+ * Copyright (C) 2016 Marvell
+ *
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#define pr_fmt(fmt) "GIC-ODMI: " fmt
+
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/msi.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#define GICP_ODMIN_SET			0x40
+#define   GICP_ODMI_INT_NUM_SHIFT	12
+#define GICP_ODMIN_GM_EP_R0		0x110
+#define GICP_ODMIN_GM_EP_R1		0x114
+#define GICP_ODMIN_GM_EA_R0		0x108
+#define GICP_ODMIN_GM_EA_R1		0x118
+
+/*
+ * We don't support the group events, so we simply have 8 interrupts
+ * per frame.
+ */
+#define NODMIS_SHIFT		3
+#define NODMIS_PER_FRAME	(1 << NODMIS_SHIFT)
+#define NODMIS_MASK		(NODMIS_PER_FRAME - 1)
+
+struct odmi_data {
+	struct resource res;
+	void __iomem *base;
+	unsigned int spi_base;
+};
+
+static struct odmi_data *odmis;
+static unsigned long *odmis_bm;
+static unsigned int odmis_count;
+
+/* Protects odmis_bm */
+static DEFINE_SPINLOCK(odmis_bm_lock);
+
+static void odmi_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
+{
+	struct odmi_data *odmi;
+	phys_addr_t addr;
+	unsigned int odmin;
+
+	if (WARN_ON(d->hwirq >= odmis_count * NODMIS_PER_FRAME))
+		return;
+
+	odmi = &odmis[d->hwirq >> NODMIS_SHIFT];
+	odmin = d->hwirq & NODMIS_MASK;
+
+	addr = odmi->res.start + GICP_ODMIN_SET;
+
+	msg->address_hi = upper_32_bits(addr);
+	msg->address_lo = lower_32_bits(addr);
+	msg->data = odmin << GICP_ODMI_INT_NUM_SHIFT;
+}
+
+static struct irq_chip odmi_irq_chip = {
+	.name			= "ODMI",
+	.irq_mask		= irq_chip_mask_parent,
+	.irq_unmask		= irq_chip_unmask_parent,
+	.irq_eoi		= irq_chip_eoi_parent,
+	.irq_set_affinity	= irq_chip_set_affinity_parent,
+	.irq_compose_msi_msg	= odmi_compose_msi_msg,
+};
+
+static int odmi_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+				 unsigned int nr_irqs, void *args)
+{
+	struct odmi_data *odmi = NULL;
+	struct irq_fwspec fwspec;
+	struct irq_data *d;
+	unsigned int hwirq, odmin;
+	int ret;
+
+	spin_lock(&odmis_bm_lock);
+	hwirq = find_first_zero_bit(odmis_bm, NODMIS_PER_FRAME * odmis_count);
+	if (hwirq >= NODMIS_PER_FRAME * odmis_count) {
+		spin_unlock(&odmis_bm_lock);
+		return -ENOSPC;
+	}
+
+	__set_bit(hwirq, odmis_bm);
+	spin_unlock(&odmis_bm_lock);
+
+	odmi = &odmis[hwirq >> NODMIS_SHIFT];
+	odmin = hwirq & NODMIS_MASK;
+
+	fwspec.fwnode = domain->parent->fwnode;
+	fwspec.param_count = 3;
+	fwspec.param[0] = GIC_SPI;
+	fwspec.param[1] = odmi->spi_base - 32 + odmin;
+	fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
+
+	ret = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
+	if (ret) {
+		pr_err("Cannot allocate parent IRQ\n");
+		spin_lock(&odmis_bm_lock);
+		__clear_bit(odmin, odmis_bm);
+		spin_unlock(&odmis_bm_lock);
+		return ret;
+	}
+
+	/* Configure the interrupt line to be edge */
+	d = irq_domain_get_irq_data(domain->parent, virq);
+	d->chip->irq_set_type(d, IRQ_TYPE_EDGE_RISING);
+
+	irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
+				      &odmi_irq_chip, NULL);
+
+	return 0;
+}
+
+static void odmi_irq_domain_free(struct irq_domain *domain,
+				 unsigned int virq, unsigned int nr_irqs)
+{
+	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+
+	if (d->hwirq >= odmis_count * NODMIS_PER_FRAME) {
+		pr_err("Failed to teardown msi. Invalid hwirq %lu\n", d->hwirq);
+		return;
+	}
+
+	irq_domain_free_irqs_parent(domain, virq, nr_irqs);
+
+	/* Actually free the MSI */
+	spin_lock(&odmis_bm_lock);
+	__clear_bit(d->hwirq, odmis_bm);
+	spin_unlock(&odmis_bm_lock);
+}
+
+static const struct irq_domain_ops odmi_domain_ops = {
+	.alloc	= odmi_irq_domain_alloc,
+	.free	= odmi_irq_domain_free,
+};
+
+static struct irq_chip odmi_msi_irq_chip = {
+	.name	= "ODMI",
+};
+
+static struct msi_domain_ops odmi_msi_ops = {
+};
+
+static struct msi_domain_info odmi_msi_domain_info = {
+	.flags	= (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS),
+	.ops	= &odmi_msi_ops,
+	.chip	= &odmi_msi_irq_chip,
+};
+
+static int __init mvebu_odmi_init(struct device_node *node,
+				  struct device_node *parent)
+{
+	struct irq_domain *inner_domain, *plat_domain;
+	int ret, i;
+
+	if (of_property_read_u32(node, "marvell,odmi-frames", &odmis_count))
+		return -EINVAL;
+
+	odmis = kcalloc(odmis_count, sizeof(struct odmi_data), GFP_KERNEL);
+	if (!odmis)
+		return -ENOMEM;
+
+	odmis_bm = kcalloc(BITS_TO_LONGS(odmis_count * NODMIS_PER_FRAME),
+			   sizeof(long), GFP_KERNEL);
+	if (!odmis_bm) {
+		ret = -ENOMEM;
+		goto err_alloc;
+	}
+
+	for (i = 0; i < odmis_count; i++) {
+		struct odmi_data *odmi = &odmis[i];
+
+		ret = of_address_to_resource(node, i, &odmi->res);
+		if (ret)
+			goto err_unmap;
+
+		odmi->base = of_io_request_and_map(node, i, "odmi");
+		if (IS_ERR(odmi->base)) {
+			ret = PTR_ERR(odmi->base);
+			goto err_unmap;
+		}
+
+		if (of_property_read_u32_index(node, "marvell,spi-base",
+					       i, &odmi->spi_base)) {
+			ret = -EINVAL;
+			goto err_unmap;
+		}
+	}
+
+	inner_domain = irq_domain_create_linear(of_node_to_fwnode(node),
+						odmis_count * NODMIS_PER_FRAME,
+						&odmi_domain_ops, NULL);
+	if (!inner_domain) {
+		ret = -ENOMEM;
+		goto err_unmap;
+	}
+
+	inner_domain->parent = irq_find_host(parent);
+
+	plat_domain = platform_msi_create_irq_domain(of_node_to_fwnode(node),
+						     &odmi_msi_domain_info,
+						     inner_domain);
+	if (!plat_domain) {
+		ret = -ENOMEM;
+		goto err_remove_inner;
+	}
+
+	return 0;
+
+err_remove_inner:
+	irq_domain_remove(inner_domain);
+err_unmap:
+	for (i = 0; i < odmis_count; i++) {
+		struct odmi_data *odmi = &odmis[i];
+
+		if (odmi->base && !IS_ERR(odmi->base))
+			iounmap(odmis[i].base);
+	}
+	kfree(odmis_bm);
+err_alloc:
+	kfree(odmis);
+	return ret;
+}
+
+IRQCHIP_DECLARE(mvebu_odmi, "marvell,odmi-controller", mvebu_odmi_init);
diff --git a/drivers/irqchip/irq-mxs.c b/drivers/irqchip/irq-mxs.c
index efe5084..1730470 100644
--- a/drivers/irqchip/irq-mxs.c
+++ b/drivers/irqchip/irq-mxs.c
@@ -183,7 +183,7 @@
 	void __iomem *icoll_base;
 
 	icoll_base = of_io_request_and_map(np, 0, np->name);
-	if (!icoll_base)
+	if (IS_ERR(icoll_base))
 		panic("%s: unable to map resource", np->full_name);
 	return icoll_base;
 }
diff --git a/drivers/irqchip/irq-sunxi-nmi.c b/drivers/irqchip/irq-sunxi-nmi.c
index 0820f67..668730c 100644
--- a/drivers/irqchip/irq-sunxi-nmi.c
+++ b/drivers/irqchip/irq-sunxi-nmi.c
@@ -160,9 +160,9 @@
 
 	gc = irq_get_domain_generic_chip(domain, 0);
 	gc->reg_base = of_io_request_and_map(node, 0, of_node_full_name(node));
-	if (!gc->reg_base) {
+	if (IS_ERR(gc->reg_base)) {
 		pr_err("unable to map resource\n");
-		ret = -ENOMEM;
+		ret = PTR_ERR(gc->reg_base);
 		goto fail_irqd_remove;
 	}
 
diff --git a/drivers/irqchip/irq-tango.c b/drivers/irqchip/irq-tango.c
new file mode 100644
index 0000000..bdbb5c0
--- /dev/null
+++ b/drivers/irqchip/irq-tango.c
@@ -0,0 +1,232 @@
+/*
+ * Copyright (C) 2014 Mans Rullgard <mans@mansr.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/ioport.h>
+#include <linux/io.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/slab.h>
+
+#define IRQ0_CTL_BASE		0x0000
+#define IRQ1_CTL_BASE		0x0100
+#define EDGE_CTL_BASE		0x0200
+#define IRQ2_CTL_BASE		0x0300
+
+#define IRQ_CTL_HI		0x18
+#define EDGE_CTL_HI		0x20
+
+#define IRQ_STATUS		0x00
+#define IRQ_RAWSTAT		0x04
+#define IRQ_EN_SET		0x08
+#define IRQ_EN_CLR		0x0c
+#define IRQ_SOFT_SET		0x10
+#define IRQ_SOFT_CLR		0x14
+
+#define EDGE_STATUS		0x00
+#define EDGE_RAWSTAT		0x04
+#define EDGE_CFG_RISE		0x08
+#define EDGE_CFG_FALL		0x0c
+#define EDGE_CFG_RISE_SET	0x10
+#define EDGE_CFG_RISE_CLR	0x14
+#define EDGE_CFG_FALL_SET	0x18
+#define EDGE_CFG_FALL_CLR	0x1c
+
+struct tangox_irq_chip {
+	void __iomem *base;
+	unsigned long ctl;
+};
+
+static inline u32 intc_readl(struct tangox_irq_chip *chip, int reg)
+{
+	return readl_relaxed(chip->base + reg);
+}
+
+static inline void intc_writel(struct tangox_irq_chip *chip, int reg, u32 val)
+{
+	writel_relaxed(val, chip->base + reg);
+}
+
+static void tangox_dispatch_irqs(struct irq_domain *dom, unsigned int status,
+				 int base)
+{
+	unsigned int hwirq;
+	unsigned int virq;
+
+	while (status) {
+		hwirq = __ffs(status);
+		virq = irq_find_mapping(dom, base + hwirq);
+		if (virq)
+			generic_handle_irq(virq);
+		status &= ~BIT(hwirq);
+	}
+}
+
+static void tangox_irq_handler(struct irq_desc *desc)
+{
+	struct irq_domain *dom = irq_desc_get_handler_data(desc);
+	struct irq_chip *host_chip = irq_desc_get_chip(desc);
+	struct tangox_irq_chip *chip = dom->host_data;
+	unsigned int status_lo, status_hi;
+
+	chained_irq_enter(host_chip, desc);
+
+	status_lo = intc_readl(chip, chip->ctl + IRQ_STATUS);
+	status_hi = intc_readl(chip, chip->ctl + IRQ_CTL_HI + IRQ_STATUS);
+
+	tangox_dispatch_irqs(dom, status_lo, 0);
+	tangox_dispatch_irqs(dom, status_hi, 32);
+
+	chained_irq_exit(host_chip, desc);
+}
+
+static int tangox_irq_set_type(struct irq_data *d, unsigned int flow_type)
+{
+	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+	struct tangox_irq_chip *chip = gc->domain->host_data;
+	struct irq_chip_regs *regs = &gc->chip_types[0].regs;
+
+	switch (flow_type & IRQ_TYPE_SENSE_MASK) {
+	case IRQ_TYPE_EDGE_RISING:
+		intc_writel(chip, regs->type + EDGE_CFG_RISE_SET, d->mask);
+		intc_writel(chip, regs->type + EDGE_CFG_FALL_CLR, d->mask);
+		break;
+
+	case IRQ_TYPE_EDGE_FALLING:
+		intc_writel(chip, regs->type + EDGE_CFG_RISE_CLR, d->mask);
+		intc_writel(chip, regs->type + EDGE_CFG_FALL_SET, d->mask);
+		break;
+
+	case IRQ_TYPE_LEVEL_HIGH:
+		intc_writel(chip, regs->type + EDGE_CFG_RISE_CLR, d->mask);
+		intc_writel(chip, regs->type + EDGE_CFG_FALL_CLR, d->mask);
+		break;
+
+	case IRQ_TYPE_LEVEL_LOW:
+		intc_writel(chip, regs->type + EDGE_CFG_RISE_SET, d->mask);
+		intc_writel(chip, regs->type + EDGE_CFG_FALL_SET, d->mask);
+		break;
+
+	default:
+		pr_err("Invalid trigger mode %x for IRQ %d\n",
+		       flow_type, d->irq);
+		return -EINVAL;
+	}
+
+	return irq_setup_alt_chip(d, flow_type);
+}
+
+static void __init tangox_irq_init_chip(struct irq_chip_generic *gc,
+					unsigned long ctl_offs,
+					unsigned long edge_offs)
+{
+	struct tangox_irq_chip *chip = gc->domain->host_data;
+	struct irq_chip_type *ct = gc->chip_types;
+	unsigned long ctl_base = chip->ctl + ctl_offs;
+	unsigned long edge_base = EDGE_CTL_BASE + edge_offs;
+	int i;
+
+	gc->reg_base = chip->base;
+	gc->unused = 0;
+
+	for (i = 0; i < 2; i++) {
+		ct[i].chip.irq_ack = irq_gc_ack_set_bit;
+		ct[i].chip.irq_mask = irq_gc_mask_disable_reg;
+		ct[i].chip.irq_mask_ack = irq_gc_mask_disable_reg_and_ack;
+		ct[i].chip.irq_unmask = irq_gc_unmask_enable_reg;
+		ct[i].chip.irq_set_type = tangox_irq_set_type;
+		ct[i].chip.name = gc->domain->name;
+
+		ct[i].regs.enable = ctl_base + IRQ_EN_SET;
+		ct[i].regs.disable = ctl_base + IRQ_EN_CLR;
+		ct[i].regs.ack = edge_base + EDGE_RAWSTAT;
+		ct[i].regs.type = edge_base;
+	}
+
+	ct[0].type = IRQ_TYPE_LEVEL_MASK;
+	ct[0].handler = handle_level_irq;
+
+	ct[1].type = IRQ_TYPE_EDGE_BOTH;
+	ct[1].handler = handle_edge_irq;
+
+	intc_writel(chip, ct->regs.disable, 0xffffffff);
+	intc_writel(chip, ct->regs.ack, 0xffffffff);
+}
+
+static void __init tangox_irq_domain_init(struct irq_domain *dom)
+{
+	struct irq_chip_generic *gc;
+	int i;
+
+	for (i = 0; i < 2; i++) {
+		gc = irq_get_domain_generic_chip(dom, i * 32);
+		tangox_irq_init_chip(gc, i * IRQ_CTL_HI, i * EDGE_CTL_HI);
+	}
+}
+
+static int __init tangox_irq_init(void __iomem *base, struct resource *baseres,
+				  struct device_node *node)
+{
+	struct tangox_irq_chip *chip;
+	struct irq_domain *dom;
+	struct resource res;
+	int irq;
+	int err;
+
+	irq = irq_of_parse_and_map(node, 0);
+	if (!irq)
+		panic("%s: failed to get IRQ", node->name);
+
+	err = of_address_to_resource(node, 0, &res);
+	if (err)
+		panic("%s: failed to get address", node->name);
+
+	chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+	chip->ctl = res.start - baseres->start;
+	chip->base = base;
+
+	dom = irq_domain_add_linear(node, 64, &irq_generic_chip_ops, chip);
+	if (!dom)
+		panic("%s: failed to create irqdomain", node->name);
+
+	err = irq_alloc_domain_generic_chips(dom, 32, 2, node->name,
+					     handle_level_irq, 0, 0, 0);
+	if (err)
+		panic("%s: failed to allocate irqchip", node->name);
+
+	tangox_irq_domain_init(dom);
+
+	irq_set_chained_handler(irq, tangox_irq_handler);
+	irq_set_handler_data(irq, dom);
+
+	return 0;
+}
+
+static int __init tangox_of_irq_init(struct device_node *node,
+				     struct device_node *parent)
+{
+	struct device_node *c;
+	struct resource res;
+	void __iomem *base;
+
+	base = of_iomap(node, 0);
+	if (!base)
+		panic("%s: of_iomap failed", node->name);
+
+	of_address_to_resource(node, 0, &res);
+
+	for_each_child_of_node(node, c)
+		tangox_irq_init(base, &res, c);
+
+	return 0;
+}
+IRQCHIP_DECLARE(tangox_intc, "sigma,smp8642-intc", tangox_of_irq_init);
diff --git a/drivers/irqchip/irq-ts4800.c b/drivers/irqchip/irq-ts4800.c
index 4192bdc..2325fb3 100644
--- a/drivers/irqchip/irq-ts4800.c
+++ b/drivers/irqchip/irq-ts4800.c
@@ -59,7 +59,7 @@
 	return 0;
 }
 
-struct irq_domain_ops ts4800_ic_ops = {
+static const struct irq_domain_ops ts4800_ic_ops = {
 	.map = ts4800_irqdomain_map,
 	.xlate = irq_domain_xlate_onecell,
 };
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 7f940c2..1f64151 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -568,6 +568,14 @@
 	  This driver can also be built as a module. If so the module
 	  will be called leds-sead3.
 
+config LEDS_IS31FL32XX
+	tristate "LED support for ISSI IS31FL32XX I2C LED controller family"
+	depends on LEDS_CLASS && I2C && OF
+	help
+	  Say Y here to include support for ISSI IS31FL32XX and Si-En SN32xx
+	  LED controllers. They are I2C devices with multiple constant-current
+	  channels, each with independent 256-level PWM control.
+
 comment "LED driver for blink(1) USB RGB LED is under Special HID drivers (HID_THINGM)"
 
 config LEDS_BLINKM
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index e9d53092..cb2013d 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -66,6 +66,7 @@
 obj-$(CONFIG_LEDS_KTD2692)		+= leds-ktd2692.o
 obj-$(CONFIG_LEDS_POWERNV)		+= leds-powernv.o
 obj-$(CONFIG_LEDS_SEAD3)		+= leds-sead3.o
+obj-$(CONFIG_LEDS_IS31FL32XX)		+= leds-is31fl32xx.o
 
 # LED SPI Drivers
 obj-$(CONFIG_LEDS_DAC124S085)		+= leds-dac124s085.o
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index 14139c3..aa84e5b 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -245,6 +245,8 @@
 	up_write(&led_cdev->trigger_lock);
 #endif
 
+	led_cdev->flags |= LED_UNREGISTERING;
+
 	/* Stop blinking */
 	led_stop_software_blink(led_cdev);
 
diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c
index 19e1e60d..3495d5d 100644
--- a/drivers/leds/led-core.c
+++ b/drivers/leds/led-core.c
@@ -25,6 +25,26 @@
 LIST_HEAD(leds_list);
 EXPORT_SYMBOL_GPL(leds_list);
 
+static int __led_set_brightness(struct led_classdev *led_cdev,
+				enum led_brightness value)
+{
+	if (!led_cdev->brightness_set)
+		return -ENOTSUPP;
+
+	led_cdev->brightness_set(led_cdev, value);
+
+	return 0;
+}
+
+static int __led_set_brightness_blocking(struct led_classdev *led_cdev,
+					 enum led_brightness value)
+{
+	if (!led_cdev->brightness_set_blocking)
+		return -ENOTSUPP;
+
+	return led_cdev->brightness_set_blocking(led_cdev, value);
+}
+
 static void led_timer_function(unsigned long data)
 {
 	struct led_classdev *led_cdev = (void *)data;
@@ -91,14 +111,14 @@
 		led_cdev->flags &= ~LED_BLINK_DISABLE;
 	}
 
-	if (led_cdev->brightness_set)
-		led_cdev->brightness_set(led_cdev, led_cdev->delayed_set_value);
-	else if (led_cdev->brightness_set_blocking)
-		ret = led_cdev->brightness_set_blocking(led_cdev,
-						led_cdev->delayed_set_value);
-	else
-		ret = -ENOTSUPP;
-	if (ret < 0)
+	ret = __led_set_brightness(led_cdev, led_cdev->delayed_set_value);
+	if (ret == -ENOTSUPP)
+		ret = __led_set_brightness_blocking(led_cdev,
+					led_cdev->delayed_set_value);
+	if (ret < 0 &&
+	    /* LED HW might have been unplugged, therefore don't warn */
+	    !(ret == -ENODEV && (led_cdev->flags & LED_UNREGISTERING) &&
+	    (led_cdev->flags & LED_HW_PLUGGABLE)))
 		dev_err(led_cdev->dev,
 			"Setting an LED's brightness failed (%d)\n", ret);
 }
@@ -233,10 +253,8 @@
 			      enum led_brightness value)
 {
 	/* Use brightness_set op if available, it is guaranteed not to sleep */
-	if (led_cdev->brightness_set) {
-		led_cdev->brightness_set(led_cdev, value);
+	if (!__led_set_brightness(led_cdev, value))
 		return;
-	}
 
 	/* If brightness setting can sleep, delegate it to a work queue task */
 	led_cdev->delayed_set_value = value;
@@ -267,10 +285,7 @@
 	if (led_cdev->flags & LED_SUSPENDED)
 		return 0;
 
-	if (led_cdev->brightness_set_blocking)
-		return led_cdev->brightness_set_blocking(led_cdev,
-							 led_cdev->brightness);
-	return -ENOTSUPP;
+	return __led_set_brightness_blocking(led_cdev, led_cdev->brightness);
 }
 EXPORT_SYMBOL_GPL(led_set_brightness_sync);
 
diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c
index e1e9334..2181581 100644
--- a/drivers/leds/led-triggers.c
+++ b/drivers/leds/led-triggers.c
@@ -34,9 +34,7 @@
 		const char *buf, size_t count)
 {
 	struct led_classdev *led_cdev = dev_get_drvdata(dev);
-	char trigger_name[TRIG_NAME_MAX];
 	struct led_trigger *trig;
-	size_t len;
 	int ret = count;
 
 	mutex_lock(&led_cdev->led_access);
@@ -46,21 +44,14 @@
 		goto unlock;
 	}
 
-	trigger_name[sizeof(trigger_name) - 1] = '\0';
-	strncpy(trigger_name, buf, sizeof(trigger_name) - 1);
-	len = strlen(trigger_name);
-
-	if (len && trigger_name[len - 1] == '\n')
-		trigger_name[len - 1] = '\0';
-
-	if (!strcmp(trigger_name, "none")) {
+	if (sysfs_streq(buf, "none")) {
 		led_trigger_remove(led_cdev);
 		goto unlock;
 	}
 
 	down_read(&triggers_list_lock);
 	list_for_each_entry(trig, &trigger_list, next_trig) {
-		if (!strcmp(trigger_name, trig->name)) {
+		if (sysfs_streq(buf, trig->name)) {
 			down_write(&led_cdev->trigger_lock);
 			led_trigger_set(led_cdev, trig);
 			up_write(&led_cdev->trigger_lock);
diff --git a/drivers/leds/leds-88pm860x.c b/drivers/leds/leds-88pm860x.c
index 1ad4d03..77a104d 100644
--- a/drivers/leds/leds-88pm860x.c
+++ b/drivers/leds/leds-88pm860x.c
@@ -195,7 +195,6 @@
 		sprintf(data->name, "led1-blue");
 		break;
 	}
-	platform_set_drvdata(pdev, data);
 	data->chip = chip;
 	data->i2c = (chip->id == CHIP_PM8606) ? chip->client : chip->companion;
 	data->port = pdev->id;
@@ -208,7 +207,7 @@
 	data->cdev.brightness_set_blocking = pm860x_led_set;
 	mutex_init(&data->lock);
 
-	ret = led_classdev_register(chip->dev, &data->cdev);
+	ret = devm_led_classdev_register(chip->dev, &data->cdev);
 	if (ret < 0) {
 		dev_err(&pdev->dev, "Failed to register LED: %d\n", ret);
 		return ret;
@@ -217,21 +216,12 @@
 	return 0;
 }
 
-static int pm860x_led_remove(struct platform_device *pdev)
-{
-	struct pm860x_led *data = platform_get_drvdata(pdev);
-
-	led_classdev_unregister(&data->cdev);
-
-	return 0;
-}
 
 static struct platform_driver pm860x_led_driver = {
 	.driver	= {
 		.name	= "88pm860x-led",
 	},
 	.probe	= pm860x_led_probe,
-	.remove	= pm860x_led_remove,
 };
 
 module_platform_driver(pm860x_led_driver);
diff --git a/drivers/leds/leds-da903x.c b/drivers/leds/leds-da903x.c
index 4752a2b..5ff7d72 100644
--- a/drivers/leds/leds-da903x.c
+++ b/drivers/leds/leds-da903x.c
@@ -113,21 +113,12 @@
 	led->flags = pdata->flags;
 	led->master = pdev->dev.parent;
 
-	ret = led_classdev_register(led->master, &led->cdev);
+	ret = devm_led_classdev_register(led->master, &led->cdev);
 	if (ret) {
 		dev_err(&pdev->dev, "failed to register LED %d\n", id);
 		return ret;
 	}
 
-	platform_set_drvdata(pdev, led);
-	return 0;
-}
-
-static int da903x_led_remove(struct platform_device *pdev)
-{
-	struct da903x_led *led = platform_get_drvdata(pdev);
-
-	led_classdev_unregister(&led->cdev);
 	return 0;
 }
 
@@ -136,7 +127,6 @@
 		.name	= "da903x-led",
 	},
 	.probe		= da903x_led_probe,
-	.remove		= da903x_led_remove,
 };
 
 module_platform_driver(da903x_led_driver);
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
index 7bc5328..61143f5 100644
--- a/drivers/leds/leds-gpio.c
+++ b/drivers/leds/leds-gpio.c
@@ -86,7 +86,7 @@
 		 * still uses GPIO numbers. Ultimately we would like to get
 		 * rid of this block completely.
 		 */
-		unsigned long flags = 0;
+		unsigned long flags = GPIOF_OUT_INIT_LOW;
 
 		/* skip leds that aren't available */
 		if (!gpio_is_valid(template->gpio)) {
@@ -104,8 +104,8 @@
 			return ret;
 
 		led_dat->gpiod = gpio_to_desc(template->gpio);
-		if (IS_ERR(led_dat->gpiod))
-			return PTR_ERR(led_dat->gpiod);
+		if (!led_dat->gpiod)
+			return -EINVAL;
 	}
 
 	led_dat->cdev.name = template->name;
diff --git a/drivers/leds/leds-is31fl32xx.c b/drivers/leds/leds-is31fl32xx.c
new file mode 100644
index 0000000..c901d13
--- /dev/null
+++ b/drivers/leds/leds-is31fl32xx.c
@@ -0,0 +1,508 @@
+/*
+ * Driver for ISSI IS31FL32xx family of I2C LED controllers
+ *
+ * Copyright 2015 Allworx Corp.
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Datasheets:
+ *   http://www.issi.com/US/product-analog-fxled-driver.shtml
+ *   http://www.si-en.com/product.asp?parentid=890
+ */
+
+#include <linux/device.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+
+/* Used to indicate a device has no such register */
+#define IS31FL32XX_REG_NONE 0xFF
+
+/* Software Shutdown bit in Shutdown Register */
+#define IS31FL32XX_SHUTDOWN_SSD_ENABLE  0
+#define IS31FL32XX_SHUTDOWN_SSD_DISABLE BIT(0)
+
+/* IS31FL3216 has a number of unique registers */
+#define IS31FL3216_CONFIG_REG 0x00
+#define IS31FL3216_LIGHTING_EFFECT_REG 0x03
+#define IS31FL3216_CHANNEL_CONFIG_REG 0x04
+
+/* Software Shutdown bit in 3216 Config Register */
+#define IS31FL3216_CONFIG_SSD_ENABLE  BIT(7)
+#define IS31FL3216_CONFIG_SSD_DISABLE 0
+
+struct is31fl32xx_priv;
+struct is31fl32xx_led_data {
+	struct led_classdev cdev;
+	u8 channel; /* 1-based, max priv->cdef->channels */
+	struct is31fl32xx_priv *priv;
+};
+
+struct is31fl32xx_priv {
+	const struct is31fl32xx_chipdef *cdef;
+	struct i2c_client *client;
+	unsigned int num_leds;
+	struct is31fl32xx_led_data leds[0];
+};
+
+/**
+ * struct is31fl32xx_chipdef - chip-specific attributes
+ * @channels            : Number of LED channels
+ * @shutdown_reg        : address of Shutdown register (optional)
+ * @pwm_update_reg      : address of PWM Update register
+ * @global_control_reg  : address of Global Control register (optional)
+ * @reset_reg           : address of Reset register (optional)
+ * @pwm_register_base   : address of first PWM register
+ * @pwm_registers_reversed: : true if PWM registers count down instead of up
+ * @led_control_register_base : address of first LED control register (optional)
+ * @enable_bits_per_led_control_register: number of LEDs enable bits in each
+ * @reset_func:         : pointer to reset function
+ *
+ * For all optional register addresses, the sentinel value %IS31FL32XX_REG_NONE
+ * indicates that this chip has no such register.
+ *
+ * If non-NULL, @reset_func will be called during probing to set all
+ * necessary registers to a known initialization state. This is needed
+ * for chips that do not have a @reset_reg.
+ *
+ * @enable_bits_per_led_control_register must be >=1 if
+ * @led_control_register_base != %IS31FL32XX_REG_NONE.
+ */
+struct is31fl32xx_chipdef {
+	u8	channels;
+	u8	shutdown_reg;
+	u8	pwm_update_reg;
+	u8	global_control_reg;
+	u8	reset_reg;
+	u8	pwm_register_base;
+	bool	pwm_registers_reversed;
+	u8	led_control_register_base;
+	u8	enable_bits_per_led_control_register;
+	int (*reset_func)(struct is31fl32xx_priv *priv);
+	int (*sw_shutdown_func)(struct is31fl32xx_priv *priv, bool enable);
+};
+
+static const struct is31fl32xx_chipdef is31fl3236_cdef = {
+	.channels				= 36,
+	.shutdown_reg				= 0x00,
+	.pwm_update_reg				= 0x25,
+	.global_control_reg			= 0x4a,
+	.reset_reg				= 0x4f,
+	.pwm_register_base			= 0x01,
+	.led_control_register_base		= 0x26,
+	.enable_bits_per_led_control_register	= 1,
+};
+
+static const struct is31fl32xx_chipdef is31fl3235_cdef = {
+	.channels				= 28,
+	.shutdown_reg				= 0x00,
+	.pwm_update_reg				= 0x25,
+	.global_control_reg			= 0x4a,
+	.reset_reg				= 0x4f,
+	.pwm_register_base			= 0x05,
+	.led_control_register_base		= 0x2a,
+	.enable_bits_per_led_control_register	= 1,
+};
+
+static const struct is31fl32xx_chipdef is31fl3218_cdef = {
+	.channels				= 18,
+	.shutdown_reg				= 0x00,
+	.pwm_update_reg				= 0x16,
+	.global_control_reg			= IS31FL32XX_REG_NONE,
+	.reset_reg				= 0x17,
+	.pwm_register_base			= 0x01,
+	.led_control_register_base		= 0x13,
+	.enable_bits_per_led_control_register	= 6,
+};
+
+static int is31fl3216_reset(struct is31fl32xx_priv *priv);
+static int is31fl3216_software_shutdown(struct is31fl32xx_priv *priv,
+					bool enable);
+static const struct is31fl32xx_chipdef is31fl3216_cdef = {
+	.channels				= 16,
+	.shutdown_reg				= IS31FL32XX_REG_NONE,
+	.pwm_update_reg				= 0xB0,
+	.global_control_reg			= IS31FL32XX_REG_NONE,
+	.reset_reg				= IS31FL32XX_REG_NONE,
+	.pwm_register_base			= 0x10,
+	.pwm_registers_reversed			= true,
+	.led_control_register_base		= 0x01,
+	.enable_bits_per_led_control_register	= 8,
+	.reset_func				= is31fl3216_reset,
+	.sw_shutdown_func			= is31fl3216_software_shutdown,
+};
+
+static int is31fl32xx_write(struct is31fl32xx_priv *priv, u8 reg, u8 val)
+{
+	int ret;
+
+	dev_dbg(&priv->client->dev, "writing register 0x%02X=0x%02X", reg, val);
+
+	ret =  i2c_smbus_write_byte_data(priv->client, reg, val);
+	if (ret) {
+		dev_err(&priv->client->dev,
+			"register write to 0x%02X failed (error %d)",
+			reg, ret);
+	}
+	return ret;
+}
+
+/*
+ * Custom reset function for IS31FL3216 because it does not have a RESET
+ * register the way that the other IS31FL32xx chips do. We don't bother
+ * writing the GPIO and animation registers, because the registers we
+ * do write ensure those will have no effect.
+ */
+static int is31fl3216_reset(struct is31fl32xx_priv *priv)
+{
+	unsigned int i;
+	int ret;
+
+	ret = is31fl32xx_write(priv, IS31FL3216_CONFIG_REG,
+			       IS31FL3216_CONFIG_SSD_ENABLE);
+	if (ret)
+		return ret;
+	for (i = 0; i < priv->cdef->channels; i++) {
+		ret = is31fl32xx_write(priv, priv->cdef->pwm_register_base+i,
+				       0x00);
+		if (ret)
+			return ret;
+	}
+	ret = is31fl32xx_write(priv, priv->cdef->pwm_update_reg, 0);
+	if (ret)
+		return ret;
+	ret = is31fl32xx_write(priv, IS31FL3216_LIGHTING_EFFECT_REG, 0x00);
+	if (ret)
+		return ret;
+	ret = is31fl32xx_write(priv, IS31FL3216_CHANNEL_CONFIG_REG, 0x00);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+/*
+ * Custom Software-Shutdown function for IS31FL3216 because it does not have
+ * a SHUTDOWN register the way that the other IS31FL32xx chips do.
+ * We don't bother doing a read/modify/write on the CONFIG register because
+ * we only ever use a value of '0' for the other fields in that register.
+ */
+static int is31fl3216_software_shutdown(struct is31fl32xx_priv *priv,
+					bool enable)
+{
+	u8 value = enable ? IS31FL3216_CONFIG_SSD_ENABLE :
+			    IS31FL3216_CONFIG_SSD_DISABLE;
+
+	return is31fl32xx_write(priv, IS31FL3216_CONFIG_REG, value);
+}
+
+/*
+ * NOTE: A mutex is not needed in this function because:
+ * - All referenced data is read-only after probe()
+ * - The I2C core has a mutex on to protect the bus
+ * - There are no read/modify/write operations
+ * - Intervening operations between the write of the PWM register
+ *   and the Update register are harmless.
+ *
+ * Example:
+ *	PWM_REG_1 write 16
+ *	UPDATE_REG write 0
+ *	PWM_REG_2 write 128
+ *	UPDATE_REG write 0
+ *   vs:
+ *	PWM_REG_1 write 16
+ *	PWM_REG_2 write 128
+ *	UPDATE_REG write 0
+ *	UPDATE_REG write 0
+ * are equivalent. Poking the Update register merely applies all PWM
+ * register writes up to that point.
+ */
+static int is31fl32xx_brightness_set(struct led_classdev *led_cdev,
+				     enum led_brightness brightness)
+{
+	const struct is31fl32xx_led_data *led_data =
+		container_of(led_cdev, struct is31fl32xx_led_data, cdev);
+	const struct is31fl32xx_chipdef *cdef = led_data->priv->cdef;
+	u8 pwm_register_offset;
+	int ret;
+
+	dev_dbg(led_cdev->dev, "%s: %d\n", __func__, brightness);
+
+	/* NOTE: led_data->channel is 1-based */
+	if (cdef->pwm_registers_reversed)
+		pwm_register_offset = cdef->channels - led_data->channel;
+	else
+		pwm_register_offset = led_data->channel - 1;
+
+	ret = is31fl32xx_write(led_data->priv,
+			       cdef->pwm_register_base + pwm_register_offset,
+			       brightness);
+	if (ret)
+		return ret;
+
+	return is31fl32xx_write(led_data->priv, cdef->pwm_update_reg, 0);
+}
+
+static int is31fl32xx_reset_regs(struct is31fl32xx_priv *priv)
+{
+	const struct is31fl32xx_chipdef *cdef = priv->cdef;
+	int ret;
+
+	if (cdef->reset_reg != IS31FL32XX_REG_NONE) {
+		ret = is31fl32xx_write(priv, cdef->reset_reg, 0);
+		if (ret)
+			return ret;
+	}
+
+	if (cdef->reset_func)
+		return cdef->reset_func(priv);
+
+	return 0;
+}
+
+static int is31fl32xx_software_shutdown(struct is31fl32xx_priv *priv,
+					bool enable)
+{
+	const struct is31fl32xx_chipdef *cdef = priv->cdef;
+	int ret;
+
+	if (cdef->shutdown_reg != IS31FL32XX_REG_NONE) {
+		u8 value = enable ? IS31FL32XX_SHUTDOWN_SSD_ENABLE :
+				    IS31FL32XX_SHUTDOWN_SSD_DISABLE;
+		ret = is31fl32xx_write(priv, cdef->shutdown_reg, value);
+		if (ret)
+			return ret;
+	}
+
+	if (cdef->sw_shutdown_func)
+		return cdef->sw_shutdown_func(priv, enable);
+
+	return 0;
+}
+
+static int is31fl32xx_init_regs(struct is31fl32xx_priv *priv)
+{
+	const struct is31fl32xx_chipdef *cdef = priv->cdef;
+	int ret;
+
+	ret = is31fl32xx_reset_regs(priv);
+	if (ret)
+		return ret;
+
+	/*
+	 * Set enable bit for all channels.
+	 * We will control state with PWM registers alone.
+	 */
+	if (cdef->led_control_register_base != IS31FL32XX_REG_NONE) {
+		u8 value =
+		    GENMASK(cdef->enable_bits_per_led_control_register-1, 0);
+		u8 num_regs = cdef->channels /
+				cdef->enable_bits_per_led_control_register;
+		int i;
+
+		for (i = 0; i < num_regs; i++) {
+			ret = is31fl32xx_write(priv,
+					       cdef->led_control_register_base+i,
+					       value);
+			if (ret)
+				return ret;
+		}
+	}
+
+	ret = is31fl32xx_software_shutdown(priv, false);
+	if (ret)
+		return ret;
+
+	if (cdef->global_control_reg != IS31FL32XX_REG_NONE) {
+		ret = is31fl32xx_write(priv, cdef->global_control_reg, 0x00);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static inline size_t sizeof_is31fl32xx_priv(int num_leds)
+{
+	return sizeof(struct is31fl32xx_priv) +
+		      (sizeof(struct is31fl32xx_led_data) * num_leds);
+}
+
+static int is31fl32xx_parse_child_dt(const struct device *dev,
+				     const struct device_node *child,
+				     struct is31fl32xx_led_data *led_data)
+{
+	struct led_classdev *cdev = &led_data->cdev;
+	int ret = 0;
+	u32 reg;
+
+	if (of_property_read_string(child, "label", &cdev->name))
+		cdev->name = child->name;
+
+	ret = of_property_read_u32(child, "reg", &reg);
+	if (ret || reg < 1 || reg > led_data->priv->cdef->channels) {
+		dev_err(dev,
+			"Child node %s does not have a valid reg property\n",
+			child->full_name);
+		return -EINVAL;
+	}
+	led_data->channel = reg;
+
+	of_property_read_string(child, "linux,default-trigger",
+				&cdev->default_trigger);
+
+	cdev->brightness_set_blocking = is31fl32xx_brightness_set;
+
+	return 0;
+}
+
+static struct is31fl32xx_led_data *is31fl32xx_find_led_data(
+					struct is31fl32xx_priv *priv,
+					u8 channel)
+{
+	size_t i;
+
+	for (i = 0; i < priv->num_leds; i++) {
+		if (priv->leds[i].channel == channel)
+			return &priv->leds[i];
+	}
+
+	return NULL;
+}
+
+static int is31fl32xx_parse_dt(struct device *dev,
+			       struct is31fl32xx_priv *priv)
+{
+	struct device_node *child;
+	int ret = 0;
+
+	for_each_child_of_node(dev->of_node, child) {
+		struct is31fl32xx_led_data *led_data =
+			&priv->leds[priv->num_leds];
+		const struct is31fl32xx_led_data *other_led_data;
+
+		led_data->priv = priv;
+
+		ret = is31fl32xx_parse_child_dt(dev, child, led_data);
+		if (ret)
+			goto err;
+
+		/* Detect if channel is already in use by another child */
+		other_led_data = is31fl32xx_find_led_data(priv,
+							  led_data->channel);
+		if (other_led_data) {
+			dev_err(dev,
+				"%s and %s both attempting to use channel %d\n",
+				led_data->cdev.name,
+				other_led_data->cdev.name,
+				led_data->channel);
+			goto err;
+		}
+
+		ret = devm_led_classdev_register(dev, &led_data->cdev);
+		if (ret) {
+			dev_err(dev, "failed to register PWM led for %s: %d\n",
+				led_data->cdev.name, ret);
+			goto err;
+		}
+
+		priv->num_leds++;
+	}
+
+	return 0;
+
+err:
+	of_node_put(child);
+	return ret;
+}
+
+static const struct of_device_id of_is31fl31xx_match[] = {
+	{ .compatible = "issi,is31fl3236", .data = &is31fl3236_cdef, },
+	{ .compatible = "issi,is31fl3235", .data = &is31fl3235_cdef, },
+	{ .compatible = "issi,is31fl3218", .data = &is31fl3218_cdef, },
+	{ .compatible = "si-en,sn3218",    .data = &is31fl3218_cdef, },
+	{ .compatible = "issi,is31fl3216", .data = &is31fl3216_cdef, },
+	{ .compatible = "si-en,sn3216",    .data = &is31fl3216_cdef, },
+	{},
+};
+
+MODULE_DEVICE_TABLE(of, of_is31fl31xx_match);
+
+static int is31fl32xx_probe(struct i2c_client *client,
+			    const struct i2c_device_id *id)
+{
+	const struct is31fl32xx_chipdef *cdef;
+	const struct of_device_id *of_dev_id;
+	struct device *dev = &client->dev;
+	struct is31fl32xx_priv *priv;
+	int count;
+	int ret = 0;
+
+	of_dev_id = of_match_device(of_is31fl31xx_match, dev);
+	if (!of_dev_id)
+		return -EINVAL;
+
+	cdef = of_dev_id->data;
+
+	count = of_get_child_count(dev->of_node);
+	if (!count)
+		return -EINVAL;
+
+	priv = devm_kzalloc(dev, sizeof_is31fl32xx_priv(count),
+			    GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	priv->client = client;
+	priv->cdef = cdef;
+	i2c_set_clientdata(client, priv);
+
+	ret = is31fl32xx_init_regs(priv);
+	if (ret)
+		return ret;
+
+	ret = is31fl32xx_parse_dt(dev, priv);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int is31fl32xx_remove(struct i2c_client *client)
+{
+	struct is31fl32xx_priv *priv = i2c_get_clientdata(client);
+
+	return is31fl32xx_reset_regs(priv);
+}
+
+/*
+ * i2c-core requires that id_table be non-NULL, even though
+ * it is not used for DeviceTree based instantiation.
+ */
+static const struct i2c_device_id is31fl31xx_id[] = {
+	{},
+};
+
+MODULE_DEVICE_TABLE(i2c, is31fl31xx_id);
+
+static struct i2c_driver is31fl32xx_driver = {
+	.driver = {
+		.name	= "is31fl32xx",
+		.of_match_table = of_is31fl31xx_match,
+	},
+	.probe		= is31fl32xx_probe,
+	.remove		= is31fl32xx_remove,
+	.id_table	= is31fl31xx_id,
+};
+
+module_i2c_driver(is31fl32xx_driver);
+
+MODULE_AUTHOR("David Rivshin <drivshin@allworx.com>");
+MODULE_DESCRIPTION("ISSI IS31FL32xx LED driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/leds-lm3533.c b/drivers/leds/leds-lm3533.c
index 196dcb5..5b529dc 100644
--- a/drivers/leds/leds-lm3533.c
+++ b/drivers/leds/leds-lm3533.c
@@ -698,7 +698,7 @@
 
 	platform_set_drvdata(pdev, led);
 
-	ret = led_classdev_register(pdev->dev.parent, &led->cdev);
+	ret = devm_led_classdev_register(pdev->dev.parent, &led->cdev);
 	if (ret) {
 		dev_err(&pdev->dev, "failed to register LED %d\n", pdev->id);
 		return ret;
@@ -708,18 +708,13 @@
 
 	ret = lm3533_led_setup(led, pdata);
 	if (ret)
-		goto err_unregister;
+		return ret;
 
 	ret = lm3533_ctrlbank_enable(&led->cb);
 	if (ret)
-		goto err_unregister;
+		return ret;
 
 	return 0;
-
-err_unregister:
-	led_classdev_unregister(&led->cdev);
-
-	return ret;
 }
 
 static int lm3533_led_remove(struct platform_device *pdev)
@@ -729,7 +724,6 @@
 	dev_dbg(&pdev->dev, "%s\n", __func__);
 
 	lm3533_ctrlbank_disable(&led->cb);
-	led_classdev_unregister(&led->cdev);
 
 	return 0;
 }
diff --git a/drivers/leds/leds-lp3944.c b/drivers/leds/leds-lp3944.c
index 6c758ae..be60c18 100644
--- a/drivers/leds/leds-lp3944.c
+++ b/drivers/leds/leds-lp3944.c
@@ -199,8 +199,11 @@
 	if (status > LP3944_LED_STATUS_DIM1)
 		return -EINVAL;
 
-	/* invert only 0 and 1, leave unchanged the other values,
-	 * remember we are abusing status to set blink patterns
+	/*
+	 * Invert status only when it's < 2 (i.e. 0 or 1) which means it's
+	 * controlling the on/off state directly.
+	 * When, instead, status is >= 2 don't invert it because it would mean
+	 * to mess with the hardware blinking mode.
 	 */
 	if (led->type == LP3944_LED_TYPE_LED_INVERTED && status < 2)
 		status = 1 - status;
diff --git a/drivers/leds/leds-lp8788.c b/drivers/leds/leds-lp8788.c
index 0eee38f..38c253a 100644
--- a/drivers/leds/leds-lp8788.c
+++ b/drivers/leds/leds-lp8788.c
@@ -146,15 +146,13 @@
 
 	mutex_init(&led->lock);
 
-	platform_set_drvdata(pdev, led);
-
 	ret = lp8788_led_init_device(led, led_pdata);
 	if (ret) {
 		dev_err(dev, "led init device err: %d\n", ret);
 		return ret;
 	}
 
-	ret = led_classdev_register(dev, &led->led_dev);
+	ret = devm_led_classdev_register(dev, &led->led_dev);
 	if (ret) {
 		dev_err(dev, "led register err: %d\n", ret);
 		return ret;
@@ -163,18 +161,8 @@
 	return 0;
 }
 
-static int lp8788_led_remove(struct platform_device *pdev)
-{
-	struct lp8788_led *led = platform_get_drvdata(pdev);
-
-	led_classdev_unregister(&led->led_dev);
-
-	return 0;
-}
-
 static struct platform_driver lp8788_led_driver = {
 	.probe = lp8788_led_probe,
-	.remove = lp8788_led_remove,
 	.driver = {
 		.name = LP8788_DEV_KEYLED,
 	},
diff --git a/drivers/leds/leds-max8997.c b/drivers/leds/leds-max8997.c
index 01b45906..4edf74f 100644
--- a/drivers/leds/leds-max8997.c
+++ b/drivers/leds/leds-max8997.c
@@ -281,30 +281,18 @@
 
 	mutex_init(&led->mutex);
 
-	platform_set_drvdata(pdev, led);
-
-	ret = led_classdev_register(&pdev->dev, &led->cdev);
+	ret = devm_led_classdev_register(&pdev->dev, &led->cdev);
 	if (ret < 0)
 		return ret;
 
 	return 0;
 }
 
-static int max8997_led_remove(struct platform_device *pdev)
-{
-	struct max8997_led *led = platform_get_drvdata(pdev);
-
-	led_classdev_unregister(&led->cdev);
-
-	return 0;
-}
-
 static struct platform_driver max8997_led_driver = {
 	.driver = {
 		.name  = "max8997-led",
 	},
 	.probe  = max8997_led_probe,
-	.remove = max8997_led_remove,
 };
 
 module_platform_driver(max8997_led_driver);
diff --git a/drivers/leds/leds-s3c24xx.c b/drivers/leds/leds-s3c24xx.c
index 83641a7..404da45 100644
--- a/drivers/leds/leds-s3c24xx.c
+++ b/drivers/leds/leds-s3c24xx.c
@@ -29,11 +29,6 @@
 	struct s3c24xx_led_platdata	*pdata;
 };
 
-static inline struct s3c24xx_gpio_led *pdev_to_gpio(struct platform_device *dev)
-{
-	return platform_get_drvdata(dev);
-}
-
 static inline struct s3c24xx_gpio_led *to_gpio(struct led_classdev *led_cdev)
 {
 	return container_of(led_cdev, struct s3c24xx_gpio_led, cdev);
@@ -59,15 +54,6 @@
 	}
 }
 
-static int s3c24xx_led_remove(struct platform_device *dev)
-{
-	struct s3c24xx_gpio_led *led = pdev_to_gpio(dev);
-
-	led_classdev_unregister(&led->cdev);
-
-	return 0;
-}
-
 static int s3c24xx_led_probe(struct platform_device *dev)
 {
 	struct s3c24xx_led_platdata *pdata = dev_get_platdata(&dev->dev);
@@ -79,8 +65,6 @@
 	if (!led)
 		return -ENOMEM;
 
-	platform_set_drvdata(dev, led);
-
 	led->cdev.brightness_set = s3c24xx_led_set;
 	led->cdev.default_trigger = pdata->def_trigger;
 	led->cdev.name = pdata->name;
@@ -104,7 +88,7 @@
 
 	/* register our new led device */
 
-	ret = led_classdev_register(&dev->dev, &led->cdev);
+	ret = devm_led_classdev_register(&dev->dev, &led->cdev);
 	if (ret < 0)
 		dev_err(&dev->dev, "led_classdev_register failed\n");
 
@@ -113,7 +97,6 @@
 
 static struct platform_driver s3c24xx_led_driver = {
 	.probe		= s3c24xx_led_probe,
-	.remove		= s3c24xx_led_remove,
 	.driver		= {
 		.name		= "s3c24xx_led",
 	},
diff --git a/drivers/leds/leds-wm831x-status.c b/drivers/leds/leds-wm831x-status.c
index 64a2226..be93b20 100644
--- a/drivers/leds/leds-wm831x-status.c
+++ b/drivers/leds/leds-wm831x-status.c
@@ -239,7 +239,6 @@
 			       GFP_KERNEL);
 	if (!drvdata)
 		return -ENOMEM;
-	platform_set_drvdata(pdev, drvdata);
 
 	drvdata->wm831x = wm831x;
 	drvdata->reg = res->start;
@@ -284,7 +283,7 @@
 	drvdata->cdev.blink_set = wm831x_status_blink_set;
 	drvdata->cdev.groups = wm831x_status_groups;
 
-	ret = led_classdev_register(wm831x->dev, &drvdata->cdev);
+	ret = devm_led_classdev_register(wm831x->dev, &drvdata->cdev);
 	if (ret < 0) {
 		dev_err(&pdev->dev, "Failed to register LED: %d\n", ret);
 		return ret;
@@ -293,21 +292,11 @@
 	return 0;
 }
 
-static int wm831x_status_remove(struct platform_device *pdev)
-{
-	struct wm831x_status *drvdata = platform_get_drvdata(pdev);
-
-	led_classdev_unregister(&drvdata->cdev);
-
-	return 0;
-}
-
 static struct platform_driver wm831x_status_driver = {
 	.driver = {
 		   .name = "wm831x-status",
 		   },
 	.probe = wm831x_status_probe,
-	.remove = wm831x_status_remove,
 };
 
 module_platform_driver(wm831x_status_driver);
diff --git a/drivers/mfd/max77686.c b/drivers/mfd/max77686.c
index d959ebb..98ecd13 100644
--- a/drivers/mfd/max77686.c
+++ b/drivers/mfd/max77686.c
@@ -35,8 +35,6 @@
 #include <linux/err.h>
 #include <linux/of.h>
 
-#define I2C_ADDR_RTC	(0x0C >> 1)
-
 static const struct mfd_cell max77686_devs[] = {
 	{ .name = "max77686-pmic", },
 	{ .name = "max77686-rtc", },
@@ -116,11 +114,6 @@
 	.val_bits = 8,
 };
 
-static const struct regmap_config max77686_rtc_regmap_config = {
-	.reg_bits = 8,
-	.val_bits = 8,
-};
-
 static const struct regmap_config max77802_regmap_config = {
 	.reg_bits = 8,
 	.val_bits = 8,
@@ -156,25 +149,6 @@
 	.num_irqs		= ARRAY_SIZE(max77686_irqs),
 };
 
-static const struct regmap_irq max77686_rtc_irqs[] = {
-	/* RTC interrupts */
-	{ .reg_offset = 0, .mask = MAX77686_RTCINT_RTC60S_MSK, },
-	{ .reg_offset = 0, .mask = MAX77686_RTCINT_RTCA1_MSK, },
-	{ .reg_offset = 0, .mask = MAX77686_RTCINT_RTCA2_MSK, },
-	{ .reg_offset = 0, .mask = MAX77686_RTCINT_SMPL_MSK, },
-	{ .reg_offset = 0, .mask = MAX77686_RTCINT_RTC1S_MSK, },
-	{ .reg_offset = 0, .mask = MAX77686_RTCINT_WTSR_MSK, },
-};
-
-static const struct regmap_irq_chip max77686_rtc_irq_chip = {
-	.name			= "max77686-rtc",
-	.status_base		= MAX77686_RTC_INT,
-	.mask_base		= MAX77686_RTC_INTM,
-	.num_regs		= 1,
-	.irqs			= max77686_rtc_irqs,
-	.num_irqs		= ARRAY_SIZE(max77686_rtc_irqs),
-};
-
 static const struct regmap_irq_chip max77802_irq_chip = {
 	.name			= "max77802-pmic",
 	.status_base		= MAX77802_REG_INT1,
@@ -184,15 +158,6 @@
 	.num_irqs		= ARRAY_SIZE(max77686_irqs),
 };
 
-static const struct regmap_irq_chip max77802_rtc_irq_chip = {
-	.name			= "max77802-rtc",
-	.status_base		= MAX77802_RTC_INT,
-	.mask_base		= MAX77802_RTC_INTM,
-	.num_regs		= 1,
-	.irqs			= max77686_rtc_irqs, /* same masks as 77686 */
-	.num_irqs		= ARRAY_SIZE(max77686_rtc_irqs),
-};
-
 static const struct of_device_id max77686_pmic_dt_match[] = {
 	{
 		.compatible = "maxim,max77686",
@@ -214,8 +179,6 @@
 	int ret = 0;
 	const struct regmap_config *config;
 	const struct regmap_irq_chip *irq_chip;
-	const struct regmap_irq_chip *rtc_irq_chip;
-	struct regmap **rtc_regmap;
 	const struct mfd_cell *cells;
 	int n_devs;
 
@@ -242,15 +205,11 @@
 	if (max77686->type == TYPE_MAX77686) {
 		config = &max77686_regmap_config;
 		irq_chip = &max77686_irq_chip;
-		rtc_irq_chip = &max77686_rtc_irq_chip;
-		rtc_regmap = &max77686->rtc_regmap;
 		cells =  max77686_devs;
 		n_devs = ARRAY_SIZE(max77686_devs);
 	} else {
 		config = &max77802_regmap_config;
 		irq_chip = &max77802_irq_chip;
-		rtc_irq_chip = &max77802_rtc_irq_chip;
-		rtc_regmap = &max77686->regmap;
 		cells =  max77802_devs;
 		n_devs = ARRAY_SIZE(max77802_devs);
 	}
@@ -270,60 +229,25 @@
 		return -ENODEV;
 	}
 
-	if (max77686->type == TYPE_MAX77686) {
-		max77686->rtc = i2c_new_dummy(i2c->adapter, I2C_ADDR_RTC);
-		if (!max77686->rtc) {
-			dev_err(max77686->dev,
-				"Failed to allocate I2C device for RTC\n");
-			return -ENODEV;
-		}
-		i2c_set_clientdata(max77686->rtc, max77686);
-
-		max77686->rtc_regmap =
-			devm_regmap_init_i2c(max77686->rtc,
-					     &max77686_rtc_regmap_config);
-		if (IS_ERR(max77686->rtc_regmap)) {
-			ret = PTR_ERR(max77686->rtc_regmap);
-			dev_err(max77686->dev,
-				"failed to allocate RTC regmap: %d\n",
-				ret);
-			goto err_unregister_i2c;
-		}
-	}
-
 	ret = regmap_add_irq_chip(max77686->regmap, max77686->irq,
 				  IRQF_TRIGGER_FALLING | IRQF_ONESHOT |
 				  IRQF_SHARED, 0, irq_chip,
 				  &max77686->irq_data);
-	if (ret) {
+	if (ret < 0) {
 		dev_err(&i2c->dev, "failed to add PMIC irq chip: %d\n", ret);
-		goto err_unregister_i2c;
-	}
-
-	ret = regmap_add_irq_chip(*rtc_regmap, max77686->irq,
-				  IRQF_TRIGGER_FALLING | IRQF_ONESHOT |
-				  IRQF_SHARED, 0, rtc_irq_chip,
-				  &max77686->rtc_irq_data);
-	if (ret) {
-		dev_err(&i2c->dev, "failed to add RTC irq chip: %d\n", ret);
-		goto err_del_irqc;
+		return ret;
 	}
 
 	ret = mfd_add_devices(max77686->dev, -1, cells, n_devs, NULL, 0, NULL);
 	if (ret < 0) {
 		dev_err(&i2c->dev, "failed to add MFD devices: %d\n", ret);
-		goto err_del_rtc_irqc;
+		goto err_del_irqc;
 	}
 
 	return 0;
 
-err_del_rtc_irqc:
-	regmap_del_irq_chip(max77686->irq, max77686->rtc_irq_data);
 err_del_irqc:
 	regmap_del_irq_chip(max77686->irq, max77686->irq_data);
-err_unregister_i2c:
-	if (max77686->type == TYPE_MAX77686)
-		i2c_unregister_device(max77686->rtc);
 
 	return ret;
 }
@@ -334,12 +258,8 @@
 
 	mfd_remove_devices(max77686->dev);
 
-	regmap_del_irq_chip(max77686->irq, max77686->rtc_irq_data);
 	regmap_del_irq_chip(max77686->irq, max77686->irq_data);
 
-	if (max77686->type == TYPE_MAX77686)
-		i2c_unregister_device(max77686->rtc);
-
 	return 0;
 }
 
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index fa593dd..3772f3a 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -83,6 +83,15 @@
 	  To compile this driver as a module, choose M here. The module
 	  will be called e1000e.
 
+config E1000E_HWTS
+	bool "Support HW cross-timestamp on PCH devices"
+	default y
+	depends on E1000E && X86
+	---help---
+	 Say Y to enable hardware supported cross-timestamping on PCH
+	 devices. The cross-timestamp is available through the PTP clock
+	 driver precise cross-timestamp ioctl (PTP_SYS_OFFSET_PRECISE).
+
 config IGB
 	tristate "Intel(R) 82575/82576 PCI-Express Gigabit Ethernet support"
 	depends on PCI
diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h
index f7c7804..0641c00 100644
--- a/drivers/net/ethernet/intel/e1000e/defines.h
+++ b/drivers/net/ethernet/intel/e1000e/defines.h
@@ -528,6 +528,11 @@
 #define E1000_RXCW_C          0x20000000        /* Receive config */
 #define E1000_RXCW_SYNCH      0x40000000        /* Receive config synch */
 
+/* HH Time Sync */
+#define E1000_TSYNCTXCTL_MAX_ALLOWED_DLY_MASK	0x0000F000 /* max delay */
+#define E1000_TSYNCTXCTL_SYNC_COMP		0x40000000 /* sync complete */
+#define E1000_TSYNCTXCTL_START_SYNC		0x80000000 /* initiate sync */
+
 #define E1000_TSYNCTXCTL_VALID		0x00000001 /* Tx timestamp valid */
 #define E1000_TSYNCTXCTL_ENABLED	0x00000010 /* enable Tx timestamping */
 
diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
index 25a0ad5..e2ff3ef 100644
--- a/drivers/net/ethernet/intel/e1000e/ptp.c
+++ b/drivers/net/ethernet/intel/e1000e/ptp.c
@@ -26,6 +26,12 @@
 
 #include "e1000.h"
 
+#ifdef CONFIG_E1000E_HWTS
+#include <linux/clocksource.h>
+#include <linux/ktime.h>
+#include <asm/tsc.h>
+#endif
+
 /**
  * e1000e_phc_adjfreq - adjust the frequency of the hardware clock
  * @ptp: ptp clock structure
@@ -98,6 +104,78 @@
 	return 0;
 }
 
+#ifdef CONFIG_E1000E_HWTS
+#define MAX_HW_WAIT_COUNT (3)
+
+/**
+ * e1000e_phc_get_syncdevicetime - Callback given to timekeeping code reads system/device registers
+ * @device: current device time
+ * @system: system counter value read synchronously with device time
+ * @ctx: context provided by timekeeping code
+ *
+ * Read device and system (ART) clock simultaneously and return the corrected
+ * clock values in ns.
+ **/
+static int e1000e_phc_get_syncdevicetime(ktime_t *device,
+					 struct system_counterval_t *system,
+					 void *ctx)
+{
+	struct e1000_adapter *adapter = (struct e1000_adapter *)ctx;
+	struct e1000_hw *hw = &adapter->hw;
+	unsigned long flags;
+	int i;
+	u32 tsync_ctrl;
+	cycle_t dev_cycles;
+	cycle_t sys_cycles;
+
+	tsync_ctrl = er32(TSYNCTXCTL);
+	tsync_ctrl |= E1000_TSYNCTXCTL_START_SYNC |
+		E1000_TSYNCTXCTL_MAX_ALLOWED_DLY_MASK;
+	ew32(TSYNCTXCTL, tsync_ctrl);
+	for (i = 0; i < MAX_HW_WAIT_COUNT; ++i) {
+		udelay(1);
+		tsync_ctrl = er32(TSYNCTXCTL);
+		if (tsync_ctrl & E1000_TSYNCTXCTL_SYNC_COMP)
+			break;
+	}
+
+	if (i == MAX_HW_WAIT_COUNT)
+		return -ETIMEDOUT;
+
+	dev_cycles = er32(SYSSTMPH);
+	dev_cycles <<= 32;
+	dev_cycles |= er32(SYSSTMPL);
+	spin_lock_irqsave(&adapter->systim_lock, flags);
+	*device = ns_to_ktime(timecounter_cyc2time(&adapter->tc, dev_cycles));
+	spin_unlock_irqrestore(&adapter->systim_lock, flags);
+
+	sys_cycles = er32(PLTSTMPH);
+	sys_cycles <<= 32;
+	sys_cycles |= er32(PLTSTMPL);
+	*system = convert_art_to_tsc(sys_cycles);
+
+	return 0;
+}
+
+/**
+ * e1000e_phc_getsynctime - Reads the current system/device cross timestamp
+ * @ptp: ptp clock structure
+ * @cts: structure containing timestamp
+ *
+ * Read device and system (ART) clock simultaneously and return the scaled
+ * clock values in ns.
+ **/
+static int e1000e_phc_getcrosststamp(struct ptp_clock_info *ptp,
+				     struct system_device_crosststamp *xtstamp)
+{
+	struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter,
+						     ptp_clock_info);
+
+	return get_device_system_crosststamp(e1000e_phc_get_syncdevicetime,
+						adapter, NULL, xtstamp);
+}
+#endif/*CONFIG_E1000E_HWTS*/
+
 /**
  * e1000e_phc_gettime - Reads the current time from the hardware clock
  * @ptp: ptp clock structure
@@ -236,6 +314,13 @@
 		break;
 	}
 
+#ifdef CONFIG_E1000E_HWTS
+	/* CPU must have ART and GBe must be from Sunrise Point or greater */
+	if (hw->mac.type >= e1000_pch_spt && boot_cpu_has(X86_FEATURE_ART))
+		adapter->ptp_clock_info.getcrosststamp =
+			e1000e_phc_getcrosststamp;
+#endif/*CONFIG_E1000E_HWTS*/
+
 	INIT_DELAYED_WORK(&adapter->systim_overflow_work,
 			  e1000e_systim_overflow_work);
 
diff --git a/drivers/net/ethernet/intel/e1000e/regs.h b/drivers/net/ethernet/intel/e1000e/regs.h
index 1d5e0b7..0cb4d36 100644
--- a/drivers/net/ethernet/intel/e1000e/regs.h
+++ b/drivers/net/ethernet/intel/e1000e/regs.h
@@ -245,6 +245,10 @@
 #define E1000_SYSTIML	0x0B600	/* System time register Low - RO */
 #define E1000_SYSTIMH	0x0B604	/* System time register High - RO */
 #define E1000_TIMINCA	0x0B608	/* Increment attributes register - RW */
+#define E1000_SYSSTMPL  0x0B648 /* HH Timesync system stamp low register */
+#define E1000_SYSSTMPH  0x0B64C /* HH Timesync system stamp hi register */
+#define E1000_PLTSTMPL  0x0B640 /* HH Timesync platform stamp low register */
+#define E1000_PLTSTMPH  0x0B644 /* HH Timesync platform stamp hi register */
 #define E1000_RXMTRL	0x0B634	/* Time sync Rx EtherType and Msg Type - RW */
 #define E1000_RXUDP	0x0B638	/* Time Sync Rx UDP Port - RW */
 
diff --git a/drivers/pci/hotplug/s390_pci_hpc.c b/drivers/pci/hotplug/s390_pci_hpc.c
index eb5efae..50b8b7d 100644
--- a/drivers/pci/hotplug/s390_pci_hpc.c
+++ b/drivers/pci/hotplug/s390_pci_hpc.c
@@ -93,13 +93,17 @@
 static int disable_slot(struct hotplug_slot *hotplug_slot)
 {
 	struct slot *slot = hotplug_slot->private;
+	struct pci_dev *pdev;
 	int rc;
 
 	if (!zpci_fn_configured(slot->zdev->state))
 		return -EIO;
 
-	if (slot->zdev->pdev)
-		pci_stop_and_remove_bus_device_locked(slot->zdev->pdev);
+	pdev = pci_get_slot(slot->zdev->bus, ZPCI_DEVFN);
+	if (pdev) {
+		pci_stop_and_remove_bus_device_locked(pdev);
+		pci_dev_put(pdev);
+	}
 
 	rc = zpci_disable_device(slot->zdev);
 	if (rc)
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index 99a4c10..fb8200b 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -79,7 +79,7 @@
 	  controller available on sama5d2 SoC.
 
 config PINCTRL_AMD
-	bool "AMD GPIO pin control"
+	tristate "AMD GPIO pin control"
 	depends on GPIOLIB
 	select GPIOLIB_IRQCHIP
 	select PINCONF
@@ -168,37 +168,6 @@
 	select PINCONF
 	select GPIOLIB_IRQCHIP
 
-config PINCTRL_TEGRA
-	bool
-	select PINMUX
-	select PINCONF
-
-config PINCTRL_TEGRA20
-	bool
-	select PINCTRL_TEGRA
-
-config PINCTRL_TEGRA30
-	bool
-	select PINCTRL_TEGRA
-
-config PINCTRL_TEGRA114
-	bool
-	select PINCTRL_TEGRA
-
-config PINCTRL_TEGRA124
-	bool
-	select PINCTRL_TEGRA
-
-config PINCTRL_TEGRA210
-	bool
-	select PINCTRL_TEGRA
-
-config PINCTRL_TEGRA_XUSB
-	def_bool y if ARCH_TEGRA
-	select GENERIC_PHY
-	select PINCONF
-	select PINMUX
-
 config PINCTRL_TZ1090
 	bool "Toumaz Xenif TZ1090 pin control driver"
 	depends on SOC_TZ1090
@@ -238,6 +207,23 @@
 	  open drain configuration for the Palmas series devices like
 	  TPS65913, TPS80036 etc.
 
+config PINCTRL_PIC32
+	bool "Microchip PIC32 pin controller driver"
+	depends on OF
+	depends on MACH_PIC32
+	select PINMUX
+	select GENERIC_PINCONF
+	select GPIOLIB_IRQCHIP
+	select OF_GPIO
+	help
+	  This is the pin controller and gpio driver for Microchip PIC32
+	  microcontrollers. This option is selected automatically when specific
+	  machine and arch are selected to build.
+
+config PINCTRL_PIC32MZDA
+	def_bool y if PIC32MZDA
+	select PINCTRL_PIC32
+
 config PINCTRL_ZYNQ
 	bool "Pinctrl driver for Xilinx Zynq"
 	depends on ARCH_ZYNQ
@@ -257,7 +243,9 @@
 source "drivers/pinctrl/samsung/Kconfig"
 source "drivers/pinctrl/sh-pfc/Kconfig"
 source "drivers/pinctrl/spear/Kconfig"
+source "drivers/pinctrl/stm32/Kconfig"
 source "drivers/pinctrl/sunxi/Kconfig"
+source "drivers/pinctrl/tegra/Kconfig"
 source "drivers/pinctrl/uniphier/Kconfig"
 source "drivers/pinctrl/vt8500/Kconfig"
 source "drivers/pinctrl/mediatek/Kconfig"
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index bf1b5ca..e4bc115 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -18,17 +18,12 @@
 obj-$(CONFIG_PINCTRL_FALCON)	+= pinctrl-falcon.o
 obj-$(CONFIG_PINCTRL_MESON)	+= meson/
 obj-$(CONFIG_PINCTRL_PALMAS)	+= pinctrl-palmas.o
+obj-$(CONFIG_PINCTRL_PIC32)	+= pinctrl-pic32.o
 obj-$(CONFIG_PINCTRL_PISTACHIO)	+= pinctrl-pistachio.o
 obj-$(CONFIG_PINCTRL_ROCKCHIP)	+= pinctrl-rockchip.o
 obj-$(CONFIG_PINCTRL_SINGLE)	+= pinctrl-single.o
 obj-$(CONFIG_PINCTRL_SIRF)	+= sirf/
-obj-$(CONFIG_PINCTRL_TEGRA)	+= pinctrl-tegra.o
-obj-$(CONFIG_PINCTRL_TEGRA20)	+= pinctrl-tegra20.o
-obj-$(CONFIG_PINCTRL_TEGRA30)	+= pinctrl-tegra30.o
-obj-$(CONFIG_PINCTRL_TEGRA114)	+= pinctrl-tegra114.o
-obj-$(CONFIG_PINCTRL_TEGRA124)	+= pinctrl-tegra124.o
-obj-$(CONFIG_PINCTRL_TEGRA210)	+= pinctrl-tegra210.o
-obj-$(CONFIG_PINCTRL_TEGRA_XUSB)	+= pinctrl-tegra-xusb.o
+obj-$(CONFIG_PINCTRL_TEGRA)	+= tegra/
 obj-$(CONFIG_PINCTRL_TZ1090)	+= pinctrl-tz1090.o
 obj-$(CONFIG_PINCTRL_TZ1090_PDC)	+= pinctrl-tz1090-pdc.o
 obj-$(CONFIG_PINCTRL_U300)	+= pinctrl-u300.o
@@ -46,12 +41,13 @@
 obj-$(CONFIG_X86)		+= intel/
 obj-$(CONFIG_PINCTRL_MVEBU)	+= mvebu/
 obj-y				+= nomadik/
-obj-$(CONFIG_ARCH_PXA)		+= pxa/
+obj-$(CONFIG_PINCTRL_PXA)	+= pxa/
 obj-$(CONFIG_ARCH_QCOM)		+= qcom/
 obj-$(CONFIG_PINCTRL_SAMSUNG)	+= samsung/
 obj-$(CONFIG_PINCTRL_SH_PFC)	+= sh-pfc/
 obj-$(CONFIG_PINCTRL_SPEAR)	+= spear/
-obj-$(CONFIG_ARCH_SUNXI)	+= sunxi/
+obj-$(CONFIG_PINCTRL_STM32)	+= stm32/
+obj-$(CONFIG_PINCTRL_SUNXI)	+= sunxi/
 obj-$(CONFIG_PINCTRL_UNIPHIER)	+= uniphier/
 obj-$(CONFIG_ARCH_VT8500)	+= vt8500/
-obj-$(CONFIG_ARCH_MEDIATEK)	+= mediatek/
+obj-$(CONFIG_PINCTRL_MTK)	+= mediatek/
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
index 0f5997c..08b1d93 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
@@ -779,7 +779,7 @@
 		}
 		if (num_pulls) {
 			err = of_property_read_u32_index(np, "brcm,pull",
-					(num_funcs > 1) ? i : 0, &pull);
+					(num_pulls > 1) ? i : 0, &pull);
 			if (err)
 				goto out;
 			err = bcm2835_pctl_dt_node_to_map_pull(pc, np, pin,
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index 2686a44..f67a8b7 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -481,6 +481,30 @@
 }
 EXPORT_SYMBOL_GPL(pinctrl_get_group_pins);
 
+struct pinctrl_gpio_range *
+pinctrl_find_gpio_range_from_pin_nolock(struct pinctrl_dev *pctldev,
+					unsigned int pin)
+{
+	struct pinctrl_gpio_range *range;
+
+	/* Loop over the ranges */
+	list_for_each_entry(range, &pctldev->gpio_ranges, node) {
+		/* Check if we're in the valid range */
+		if (range->pins) {
+			int a;
+			for (a = 0; a < range->npins; a++) {
+				if (range->pins[a] == pin)
+					return range;
+			}
+		} else if (pin >= range->pin_base &&
+			   pin < range->pin_base + range->npins)
+			return range;
+	}
+
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(pinctrl_find_gpio_range_from_pin_nolock);
+
 /**
  * pinctrl_find_gpio_range_from_pin() - locate the GPIO range for a pin
  * @pctldev: the pin controller device to look in
@@ -493,22 +517,9 @@
 	struct pinctrl_gpio_range *range;
 
 	mutex_lock(&pctldev->mutex);
-	/* Loop over the ranges */
-	list_for_each_entry(range, &pctldev->gpio_ranges, node) {
-		/* Check if we're in the valid range */
-		if (range->pins) {
-			int a;
-			for (a = 0; a < range->npins; a++) {
-				if (range->pins[a] == pin)
-					goto out;
-			}
-		} else if (pin >= range->pin_base &&
-			   pin < range->pin_base + range->npins)
-			goto out;
-	}
-	range = NULL;
-out:
+	range = pinctrl_find_gpio_range_from_pin_nolock(pctldev, pin);
 	mutex_unlock(&pctldev->mutex);
+
 	return range;
 }
 EXPORT_SYMBOL_GPL(pinctrl_find_gpio_range_from_pin);
diff --git a/drivers/pinctrl/core.h b/drivers/pinctrl/core.h
index b24ea84..ca08723 100644
--- a/drivers/pinctrl/core.h
+++ b/drivers/pinctrl/core.h
@@ -182,6 +182,10 @@
 	return radix_tree_lookup(&pctldev->pin_desc_tree, pin);
 }
 
+extern struct pinctrl_gpio_range *
+pinctrl_find_gpio_range_from_pin_nolock(struct pinctrl_dev *pctldev,
+					unsigned int pin);
+
 int pinctrl_register_map(struct pinctrl_map const *maps, unsigned num_maps,
 			 bool dup);
 void pinctrl_unregister_map(struct pinctrl_map const *map);
diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c
index a5bb939..4621051 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx.c
@@ -15,6 +15,7 @@
 #include <linux/err.h>
 #include <linux/init.h>
 #include <linux/io.h>
+#include <linux/mfd/syscon.h>
 #include <linux/module.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
@@ -24,6 +25,7 @@
 #include <linux/pinctrl/pinctrl.h>
 #include <linux/pinctrl/pinmux.h>
 #include <linux/slab.h>
+#include <linux/regmap.h>
 
 #include "../core.h"
 #include "pinctrl-imx.h"
@@ -341,6 +343,31 @@
 	return 0;
 }
 
+static void imx_pmx_gpio_disable_free(struct pinctrl_dev *pctldev,
+			struct pinctrl_gpio_range *range, unsigned offset)
+{
+	struct imx_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
+	const struct imx_pinctrl_soc_info *info = ipctl->info;
+	const struct imx_pin_reg *pin_reg;
+	u32 reg;
+
+	/*
+	 * Only Vybrid has the input/output buffer enable flags (IBE/OBE)
+	 * They are part of the shared mux/conf register.
+	 */
+	if (!(info->flags & SHARE_MUX_CONF_REG))
+		return;
+
+	pin_reg = &info->pin_regs[offset];
+	if (pin_reg->mux_reg == -1)
+		return;
+
+	/* Clear IBE/OBE/PUE to disable the pin (Hi-Z) */
+	reg = readl(ipctl->base + pin_reg->mux_reg);
+	reg &= ~0x7;
+	writel(reg, ipctl->base + pin_reg->mux_reg);
+}
+
 static int imx_pmx_gpio_set_direction(struct pinctrl_dev *pctldev,
 	   struct pinctrl_gpio_range *range, unsigned offset, bool input)
 {
@@ -377,6 +404,7 @@
 	.get_function_groups = imx_pmx_get_groups,
 	.set_mux = imx_pmx_set,
 	.gpio_request_enable = imx_pmx_gpio_request_enable,
+	.gpio_disable_free = imx_pmx_gpio_disable_free,
 	.gpio_set_direction = imx_pmx_gpio_set_direction,
 };
 
@@ -692,10 +720,12 @@
 int imx_pinctrl_probe(struct platform_device *pdev,
 		      struct imx_pinctrl_soc_info *info)
 {
+	struct regmap_config config = { .name = "gpr" };
 	struct device_node *dev_np = pdev->dev.of_node;
 	struct device_node *np;
 	struct imx_pinctrl *ipctl;
 	struct resource *res;
+	struct regmap *gpr;
 	int ret, i;
 
 	if (!info || !info->pins || !info->npins) {
@@ -704,6 +734,12 @@
 	}
 	info->dev = &pdev->dev;
 
+	if (info->gpr_compatible) {
+		gpr = syscon_regmap_lookup_by_compatible(info->gpr_compatible);
+		if (!IS_ERR(gpr))
+			regmap_attach_dev(&pdev->dev, gpr, &config);
+	}
+
 	/* Create state holders etc for this driver */
 	ipctl = devm_kzalloc(&pdev->dev, sizeof(*ipctl), GFP_KERNEL);
 	if (!ipctl)
diff --git a/drivers/pinctrl/freescale/pinctrl-imx.h b/drivers/pinctrl/freescale/pinctrl-imx.h
index 2a592f6..3b8bd81 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx.h
+++ b/drivers/pinctrl/freescale/pinctrl-imx.h
@@ -82,6 +82,7 @@
 	struct imx_pmx_func *functions;
 	unsigned int nfunctions;
 	unsigned int flags;
+	const char *gpr_compatible;
 };
 
 #define SHARE_MUX_CONF_REG	0x1
diff --git a/drivers/pinctrl/freescale/pinctrl-imx50.c b/drivers/pinctrl/freescale/pinctrl-imx50.c
index 51b31df..8acc4d9 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx50.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx50.c
@@ -389,6 +389,7 @@
 static struct imx_pinctrl_soc_info imx50_pinctrl_info = {
 	.pins = imx50_pinctrl_pads,
 	.npins = ARRAY_SIZE(imx50_pinctrl_pads),
+	.gpr_compatible = "fsl,imx50-iomuxc-gpr",
 };
 
 static const struct of_device_id imx50_pinctrl_of_match[] = {
diff --git a/drivers/pinctrl/freescale/pinctrl-imx53.c b/drivers/pinctrl/freescale/pinctrl-imx53.c
index 7344d34..d39dfd6 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx53.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx53.c
@@ -452,6 +452,7 @@
 static struct imx_pinctrl_soc_info imx53_pinctrl_info = {
 	.pins = imx53_pinctrl_pads,
 	.npins = ARRAY_SIZE(imx53_pinctrl_pads),
+	.gpr_compatible = "fsl,imx53-iomuxc-gpr",
 };
 
 static const struct of_device_id imx53_pinctrl_of_match[] = {
diff --git a/drivers/pinctrl/freescale/pinctrl-imx6dl.c b/drivers/pinctrl/freescale/pinctrl-imx6dl.c
index 6805c67..5a2cdb0 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx6dl.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx6dl.c
@@ -458,6 +458,7 @@
 static struct imx_pinctrl_soc_info imx6dl_pinctrl_info = {
 	.pins = imx6dl_pinctrl_pads,
 	.npins = ARRAY_SIZE(imx6dl_pinctrl_pads),
+	.gpr_compatible = "fsl,imx6q-iomuxc-gpr",
 };
 
 static const struct of_device_id imx6dl_pinctrl_of_match[] = {
diff --git a/drivers/pinctrl/freescale/pinctrl-imx6q.c b/drivers/pinctrl/freescale/pinctrl-imx6q.c
index 4d1fcb8..7d50a36 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx6q.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx6q.c
@@ -464,6 +464,7 @@
 static struct imx_pinctrl_soc_info imx6q_pinctrl_info = {
 	.pins = imx6q_pinctrl_pads,
 	.npins = ARRAY_SIZE(imx6q_pinctrl_pads),
+	.gpr_compatible = "fsl,imx6q-iomuxc-gpr",
 };
 
 static const struct of_device_id imx6q_pinctrl_of_match[] = {
diff --git a/drivers/pinctrl/freescale/pinctrl-imx6sl.c b/drivers/pinctrl/freescale/pinctrl-imx6sl.c
index 83fa5f1..e27d17f 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx6sl.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx6sl.c
@@ -364,6 +364,7 @@
 static struct imx_pinctrl_soc_info imx6sl_pinctrl_info = {
 	.pins = imx6sl_pinctrl_pads,
 	.npins = ARRAY_SIZE(imx6sl_pinctrl_pads),
+	.gpr_compatible = "fsl,imx6sl-iomuxc-gpr",
 };
 
 static const struct of_device_id imx6sl_pinctrl_of_match[] = {
diff --git a/drivers/pinctrl/freescale/pinctrl-imx6sx.c b/drivers/pinctrl/freescale/pinctrl-imx6sx.c
index 0d78fe6..117180c 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx6sx.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx6sx.c
@@ -368,6 +368,7 @@
 static struct imx_pinctrl_soc_info imx6sx_pinctrl_info = {
 	.pins = imx6sx_pinctrl_pads,
 	.npins = ARRAY_SIZE(imx6sx_pinctrl_pads),
+	.gpr_compatible = "fsl,imx6sx-iomuxc-gpr",
 };
 
 static const struct of_device_id imx6sx_pinctrl_of_match[] = {
diff --git a/drivers/pinctrl/freescale/pinctrl-imx6ul.c b/drivers/pinctrl/freescale/pinctrl-imx6ul.c
index 08e7576..78627c7 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx6ul.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx6ul.c
@@ -284,6 +284,7 @@
 static struct imx_pinctrl_soc_info imx6ul_pinctrl_info = {
 	.pins = imx6ul_pinctrl_pads,
 	.npins = ARRAY_SIZE(imx6ul_pinctrl_pads),
+	.gpr_compatible = "fsl,imx6ul-iomuxc-gpr",
 };
 
 static struct of_device_id imx6ul_pinctrl_of_match[] = {
diff --git a/drivers/pinctrl/freescale/pinctrl-imx7d.c b/drivers/pinctrl/freescale/pinctrl-imx7d.c
index 16dc925..1c89613 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx7d.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx7d.c
@@ -359,6 +359,7 @@
 static struct imx_pinctrl_soc_info imx7d_pinctrl_info = {
 	.pins = imx7d_pinctrl_pads,
 	.npins = ARRAY_SIZE(imx7d_pinctrl_pads),
+	.gpr_compatible = "fsl,imx7d-iomuxc-gpr",
 };
 
 static struct imx_pinctrl_soc_info imx7d_lpsr_pinctrl_info = {
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index c0f5586..85536b4 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -11,13 +11,9 @@
  */
 
 #include <linux/module.h>
-#include <linux/init.h>
 #include <linux/interrupt.h>
-#include <linux/acpi.h>
-#include <linux/gpio.h>
 #include <linux/gpio/driver.h>
 #include <linux/platform_device.h>
-#include <linux/pm.h>
 #include <linux/pinctrl/pinctrl.h>
 #include <linux/pinctrl/pinmux.h>
 #include <linux/pinctrl/pinconf.h>
diff --git a/drivers/pinctrl/mediatek/Kconfig b/drivers/pinctrl/mediatek/Kconfig
index 02f6f92..4f0bc8a 100644
--- a/drivers/pinctrl/mediatek/Kconfig
+++ b/drivers/pinctrl/mediatek/Kconfig
@@ -1,6 +1,6 @@
 if ARCH_MEDIATEK || COMPILE_TEST
 
-config PINCTRL_MTK_COMMON
+config PINCTRL_MTK
 	bool
 	depends on OF
 	select PINMUX
@@ -9,17 +9,29 @@
 	select OF_GPIO
 
 # For ARMv7 SoCs
+config PINCTRL_MT2701
+	bool "Mediatek MT2701 pin control" if COMPILE_TEST && !MACH_MT2701
+	depends on OF
+	default MACH_MT2701
+	select PINCTRL_MTK
+
+config PINCTRL_MT7623
+	bool "Mediatek MT7623 pin control" if COMPILE_TEST && !MACH_MT7623
+	depends on OF
+	default MACH_MT7623
+	select PINCTRL_MTK_COMMON
+
 config PINCTRL_MT8135
 	bool "Mediatek MT8135 pin control" if COMPILE_TEST && !MACH_MT8135
 	depends on OF
 	default MACH_MT8135
-	select PINCTRL_MTK_COMMON
+	select PINCTRL_MTK
 
 config PINCTRL_MT8127
 	bool "Mediatek MT8127 pin control" if COMPILE_TEST && !MACH_MT8127
 	depends on OF
 	default MACH_MT8127
-	select PINCTRL_MTK_COMMON
+	select PINCTRL_MTK
 
 # For ARMv8 SoCs
 config PINCTRL_MT8173
@@ -27,13 +39,13 @@
 	depends on OF
 	depends on ARM64 || COMPILE_TEST
 	default ARM64 && ARCH_MEDIATEK
-	select PINCTRL_MTK_COMMON
+	select PINCTRL_MTK
 
 # For PMIC
 config PINCTRL_MT6397
 	bool "Mediatek MT6397 pin control" if COMPILE_TEST && !MFD_MT6397
 	depends on OF
 	default MFD_MT6397
-	select PINCTRL_MTK_COMMON
+	select PINCTRL_MTK
 
 endif
diff --git a/drivers/pinctrl/mediatek/Makefile b/drivers/pinctrl/mediatek/Makefile
index eb923d6..3e3390a 100644
--- a/drivers/pinctrl/mediatek/Makefile
+++ b/drivers/pinctrl/mediatek/Makefile
@@ -1,8 +1,10 @@
 # Core
-obj-$(CONFIG_PINCTRL_MTK_COMMON)	+= pinctrl-mtk-common.o
+obj-y				+= pinctrl-mtk-common.o
 
 # SoC Drivers
-obj-$(CONFIG_PINCTRL_MT8135)		+= pinctrl-mt8135.o
-obj-$(CONFIG_PINCTRL_MT8127)		+= pinctrl-mt8127.o
-obj-$(CONFIG_PINCTRL_MT8173)		+= pinctrl-mt8173.o
-obj-$(CONFIG_PINCTRL_MT6397)		+= pinctrl-mt6397.o
+obj-$(CONFIG_PINCTRL_MT2701)	+= pinctrl-mt2701.o
+obj-$(CONFIG_PINCTRL_MT7623)	+= pinctrl-mt7623.o
+obj-$(CONFIG_PINCTRL_MT8135)	+= pinctrl-mt8135.o
+obj-$(CONFIG_PINCTRL_MT8127)	+= pinctrl-mt8127.o
+obj-$(CONFIG_PINCTRL_MT8173)	+= pinctrl-mt8173.o
+obj-$(CONFIG_PINCTRL_MT6397)	+= pinctrl-mt6397.o
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt2701.c b/drivers/pinctrl/mediatek/pinctrl-mt2701.c
new file mode 100644
index 0000000..8d802fa
--- /dev/null
+++ b/drivers/pinctrl/mediatek/pinctrl-mt2701.c
@@ -0,0 +1,585 @@
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ * Author: Biao Huang <biao.huang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/pinctrl/mt65xx.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/regmap.h>
+
+#include "pinctrl-mtk-common.h"
+#include "pinctrl-mtk-mt2701.h"
+
+/**
+ * struct mtk_spec_pinmux_set
+ * - For special pins' mode setting
+ * @pin: The pin number.
+ * @offset: The offset of extra setting register.
+ * @bit: The bit of extra setting register.
+ */
+struct mtk_spec_pinmux_set {
+	unsigned short pin;
+	unsigned short offset;
+	unsigned char bit;
+};
+
+#define MTK_PINMUX_SPEC(_pin, _offset, _bit)	\
+	{					\
+		.pin = _pin,			\
+		.offset = _offset,		\
+		.bit = _bit,			\
+	}
+
+static const struct mtk_drv_group_desc mt2701_drv_grp[] =  {
+	/* 0E4E8SR 4/8/12/16 */
+	MTK_DRV_GRP(4, 16, 1, 2, 4),
+	/* 0E2E4SR  2/4/6/8 */
+	MTK_DRV_GRP(2, 8, 1, 2, 2),
+	/* E8E4E2  2/4/6/8/10/12/14/16 */
+	MTK_DRV_GRP(2, 16, 0, 2, 2)
+};
+
+static const struct mtk_pin_drv_grp mt2701_pin_drv[] = {
+	MTK_PIN_DRV_GRP(0, 0xf50, 0, 1),
+	MTK_PIN_DRV_GRP(1, 0xf50, 0, 1),
+	MTK_PIN_DRV_GRP(2, 0xf50, 0, 1),
+	MTK_PIN_DRV_GRP(3, 0xf50, 0, 1),
+	MTK_PIN_DRV_GRP(4, 0xf50, 0, 1),
+	MTK_PIN_DRV_GRP(5, 0xf50, 0, 1),
+	MTK_PIN_DRV_GRP(6, 0xf50, 0, 1),
+	MTK_PIN_DRV_GRP(7, 0xf50, 4, 1),
+	MTK_PIN_DRV_GRP(8, 0xf50, 4, 1),
+	MTK_PIN_DRV_GRP(9, 0xf50, 4, 1),
+	MTK_PIN_DRV_GRP(10, 0xf50, 8, 1),
+	MTK_PIN_DRV_GRP(11, 0xf50, 8, 1),
+	MTK_PIN_DRV_GRP(12, 0xf50, 8, 1),
+	MTK_PIN_DRV_GRP(13, 0xf50, 8, 1),
+	MTK_PIN_DRV_GRP(14, 0xf50, 12, 0),
+	MTK_PIN_DRV_GRP(15, 0xf50, 12, 0),
+	MTK_PIN_DRV_GRP(16, 0xf60, 0, 0),
+	MTK_PIN_DRV_GRP(17, 0xf60, 0, 0),
+	MTK_PIN_DRV_GRP(18, 0xf60, 4, 0),
+	MTK_PIN_DRV_GRP(19, 0xf60, 4, 0),
+	MTK_PIN_DRV_GRP(20, 0xf60, 4, 0),
+	MTK_PIN_DRV_GRP(21, 0xf60, 4, 0),
+	MTK_PIN_DRV_GRP(22, 0xf60, 8, 0),
+	MTK_PIN_DRV_GRP(23, 0xf60, 8, 0),
+	MTK_PIN_DRV_GRP(24, 0xf60, 8, 0),
+	MTK_PIN_DRV_GRP(25, 0xf60, 8, 0),
+	MTK_PIN_DRV_GRP(26, 0xf60, 8, 0),
+	MTK_PIN_DRV_GRP(27, 0xf60, 12, 0),
+	MTK_PIN_DRV_GRP(28, 0xf60, 12, 0),
+	MTK_PIN_DRV_GRP(29, 0xf60, 12, 0),
+	MTK_PIN_DRV_GRP(30, 0xf60, 0, 0),
+	MTK_PIN_DRV_GRP(31, 0xf60, 0, 0),
+	MTK_PIN_DRV_GRP(32, 0xf60, 0, 0),
+	MTK_PIN_DRV_GRP(33, 0xf70, 0, 0),
+	MTK_PIN_DRV_GRP(34, 0xf70, 0, 0),
+	MTK_PIN_DRV_GRP(35, 0xf70, 0, 0),
+	MTK_PIN_DRV_GRP(36, 0xf70, 0, 0),
+	MTK_PIN_DRV_GRP(37, 0xf70, 0, 0),
+	MTK_PIN_DRV_GRP(38, 0xf70, 4, 0),
+	MTK_PIN_DRV_GRP(39, 0xf70, 8, 1),
+	MTK_PIN_DRV_GRP(40, 0xf70, 8, 1),
+	MTK_PIN_DRV_GRP(41, 0xf70, 8, 1),
+	MTK_PIN_DRV_GRP(42, 0xf70, 8, 1),
+	MTK_PIN_DRV_GRP(43, 0xf70, 12, 0),
+	MTK_PIN_DRV_GRP(44, 0xf70, 12, 0),
+	MTK_PIN_DRV_GRP(45, 0xf70, 12, 0),
+	MTK_PIN_DRV_GRP(47, 0xf80, 0, 0),
+	MTK_PIN_DRV_GRP(48, 0xf80, 0, 0),
+	MTK_PIN_DRV_GRP(49, 0xf80, 4, 0),
+	MTK_PIN_DRV_GRP(50, 0xf70, 4, 0),
+	MTK_PIN_DRV_GRP(51, 0xf70, 4, 0),
+	MTK_PIN_DRV_GRP(52, 0xf70, 4, 0),
+	MTK_PIN_DRV_GRP(53, 0xf80, 12, 0),
+	MTK_PIN_DRV_GRP(54, 0xf80, 12, 0),
+	MTK_PIN_DRV_GRP(55, 0xf80, 12, 0),
+	MTK_PIN_DRV_GRP(56, 0xf80, 12, 0),
+	MTK_PIN_DRV_GRP(60, 0xf90, 8, 1),
+	MTK_PIN_DRV_GRP(61, 0xf90, 8, 1),
+	MTK_PIN_DRV_GRP(62, 0xf90, 8, 1),
+	MTK_PIN_DRV_GRP(63, 0xf90, 12, 1),
+	MTK_PIN_DRV_GRP(64, 0xf90, 12, 1),
+	MTK_PIN_DRV_GRP(65, 0xf90, 12, 1),
+	MTK_PIN_DRV_GRP(66, 0xfa0, 0, 1),
+	MTK_PIN_DRV_GRP(67, 0xfa0, 0, 1),
+	MTK_PIN_DRV_GRP(68, 0xfa0, 0, 1),
+	MTK_PIN_DRV_GRP(69, 0xfa0, 0, 1),
+	MTK_PIN_DRV_GRP(70, 0xfa0, 0, 1),
+	MTK_PIN_DRV_GRP(71, 0xfa0, 0, 1),
+	MTK_PIN_DRV_GRP(72, 0xf80, 4, 0),
+	MTK_PIN_DRV_GRP(73, 0xf80, 4, 0),
+	MTK_PIN_DRV_GRP(74, 0xf80, 4, 0),
+	MTK_PIN_DRV_GRP(85, 0xda0, 0, 2),
+	MTK_PIN_DRV_GRP(86, 0xd90, 0, 2),
+	MTK_PIN_DRV_GRP(87, 0xdb0, 0, 2),
+	MTK_PIN_DRV_GRP(88, 0xdb0, 0, 2),
+	MTK_PIN_DRV_GRP(89, 0xdb0, 0, 2),
+	MTK_PIN_DRV_GRP(90, 0xdb0, 0, 2),
+	MTK_PIN_DRV_GRP(105, 0xd40, 0, 2),
+	MTK_PIN_DRV_GRP(106, 0xd30, 0, 2),
+	MTK_PIN_DRV_GRP(107, 0xd50, 0, 2),
+	MTK_PIN_DRV_GRP(108, 0xd50, 0, 2),
+	MTK_PIN_DRV_GRP(109, 0xd50, 0, 2),
+	MTK_PIN_DRV_GRP(110, 0xd50, 0, 2),
+	MTK_PIN_DRV_GRP(111, 0xce0, 0, 2),
+	MTK_PIN_DRV_GRP(112, 0xce0, 0, 2),
+	MTK_PIN_DRV_GRP(113, 0xce0, 0, 2),
+	MTK_PIN_DRV_GRP(114, 0xce0, 0, 2),
+	MTK_PIN_DRV_GRP(115, 0xce0, 0, 2),
+	MTK_PIN_DRV_GRP(116, 0xcd0, 0, 2),
+	MTK_PIN_DRV_GRP(117, 0xcc0, 0, 2),
+	MTK_PIN_DRV_GRP(118, 0xce0, 0, 2),
+	MTK_PIN_DRV_GRP(119, 0xce0, 0, 2),
+	MTK_PIN_DRV_GRP(120, 0xce0, 0, 2),
+	MTK_PIN_DRV_GRP(121, 0xce0, 0, 2),
+	MTK_PIN_DRV_GRP(126, 0xf80, 4, 0),
+	MTK_PIN_DRV_GRP(188, 0xf70, 4, 0),
+	MTK_PIN_DRV_GRP(189, 0xfe0, 8, 0),
+	MTK_PIN_DRV_GRP(190, 0xfe0, 8, 0),
+	MTK_PIN_DRV_GRP(191, 0xfe0, 8, 0),
+	MTK_PIN_DRV_GRP(192, 0xfe0, 8, 0),
+	MTK_PIN_DRV_GRP(193, 0xfe0, 8, 0),
+	MTK_PIN_DRV_GRP(194, 0xfe0, 12, 0),
+	MTK_PIN_DRV_GRP(195, 0xfe0, 12, 0),
+	MTK_PIN_DRV_GRP(196, 0xfe0, 12, 0),
+	MTK_PIN_DRV_GRP(197, 0xfe0, 12, 0),
+	MTK_PIN_DRV_GRP(198, 0xfe0, 12, 0),
+	MTK_PIN_DRV_GRP(199, 0xf50, 4, 1),
+	MTK_PIN_DRV_GRP(200, 0xfd0, 0, 0),
+	MTK_PIN_DRV_GRP(201, 0xfd0, 0, 0),
+	MTK_PIN_DRV_GRP(202, 0xfd0, 0, 0),
+	MTK_PIN_DRV_GRP(203, 0xfd0, 4, 0),
+	MTK_PIN_DRV_GRP(204, 0xfd0, 4, 0),
+	MTK_PIN_DRV_GRP(205, 0xfd0, 4, 0),
+	MTK_PIN_DRV_GRP(206, 0xfd0, 4, 0),
+	MTK_PIN_DRV_GRP(207, 0xfd0, 4, 0),
+	MTK_PIN_DRV_GRP(208, 0xfd0, 8, 0),
+	MTK_PIN_DRV_GRP(209, 0xfd0, 8, 0),
+	MTK_PIN_DRV_GRP(210, 0xfd0, 12, 1),
+	MTK_PIN_DRV_GRP(211, 0xff0, 0, 1),
+	MTK_PIN_DRV_GRP(212, 0xff0, 0, 1),
+	MTK_PIN_DRV_GRP(213, 0xff0, 0, 1),
+	MTK_PIN_DRV_GRP(214, 0xff0, 0, 1),
+	MTK_PIN_DRV_GRP(215, 0xff0, 0, 1),
+	MTK_PIN_DRV_GRP(216, 0xff0, 0, 1),
+	MTK_PIN_DRV_GRP(217, 0xff0, 0, 1),
+	MTK_PIN_DRV_GRP(218, 0xff0, 0, 1),
+	MTK_PIN_DRV_GRP(219, 0xff0, 0, 1),
+	MTK_PIN_DRV_GRP(220, 0xff0, 0, 1),
+	MTK_PIN_DRV_GRP(221, 0xff0, 0, 1),
+	MTK_PIN_DRV_GRP(222, 0xff0, 0, 1),
+	MTK_PIN_DRV_GRP(223, 0xff0, 0, 1),
+	MTK_PIN_DRV_GRP(224, 0xff0, 0, 1),
+	MTK_PIN_DRV_GRP(225, 0xff0, 0, 1),
+	MTK_PIN_DRV_GRP(226, 0xff0, 0, 1),
+	MTK_PIN_DRV_GRP(227, 0xff0, 0, 1),
+	MTK_PIN_DRV_GRP(228, 0xff0, 0, 1),
+	MTK_PIN_DRV_GRP(229, 0xff0, 0, 1),
+	MTK_PIN_DRV_GRP(230, 0xff0, 0, 1),
+	MTK_PIN_DRV_GRP(231, 0xff0, 0, 1),
+	MTK_PIN_DRV_GRP(232, 0xff0, 0, 1),
+	MTK_PIN_DRV_GRP(233, 0xff0, 0, 1),
+	MTK_PIN_DRV_GRP(234, 0xff0, 0, 1),
+	MTK_PIN_DRV_GRP(235, 0xff0, 0, 1),
+	MTK_PIN_DRV_GRP(236, 0xff0, 4, 0),
+	MTK_PIN_DRV_GRP(237, 0xff0, 4, 0),
+	MTK_PIN_DRV_GRP(238, 0xff0, 4, 0),
+	MTK_PIN_DRV_GRP(239, 0xff0, 4, 0),
+	MTK_PIN_DRV_GRP(240, 0xff0, 4, 0),
+	MTK_PIN_DRV_GRP(241, 0xff0, 4, 0),
+	MTK_PIN_DRV_GRP(242, 0xff0, 8, 0),
+	MTK_PIN_DRV_GRP(243, 0xff0, 8, 0),
+	MTK_PIN_DRV_GRP(248, 0xf00, 0, 0),
+	MTK_PIN_DRV_GRP(249, 0xfc0, 0, 2),
+	MTK_PIN_DRV_GRP(250, 0xfc0, 0, 2),
+	MTK_PIN_DRV_GRP(251, 0xfc0, 0, 2),
+	MTK_PIN_DRV_GRP(252, 0xfc0, 0, 2),
+	MTK_PIN_DRV_GRP(253, 0xfc0, 0, 2),
+	MTK_PIN_DRV_GRP(254, 0xfc0, 0, 2),
+	MTK_PIN_DRV_GRP(255, 0xfc0, 0, 2),
+	MTK_PIN_DRV_GRP(256, 0xfc0, 0, 2),
+	MTK_PIN_DRV_GRP(257, 0xce0, 0, 2),
+	MTK_PIN_DRV_GRP(258, 0xcb0, 0, 2),
+	MTK_PIN_DRV_GRP(259, 0xc90, 0, 2),
+	MTK_PIN_DRV_GRP(260, 0x3a0, 0, 2),
+	MTK_PIN_DRV_GRP(261, 0xd50, 0, 2),
+	MTK_PIN_DRV_GRP(262, 0xf00, 8, 0),
+	MTK_PIN_DRV_GRP(263, 0xf00, 8, 0),
+	MTK_PIN_DRV_GRP(264, 0xf00, 8, 0),
+	MTK_PIN_DRV_GRP(265, 0xf00, 8, 0),
+	MTK_PIN_DRV_GRP(266, 0xf00, 8, 0),
+	MTK_PIN_DRV_GRP(267, 0xf00, 8, 0),
+	MTK_PIN_DRV_GRP(268, 0xf00, 8, 0),
+	MTK_PIN_DRV_GRP(269, 0xf00, 8, 0),
+	MTK_PIN_DRV_GRP(270, 0xf00, 8, 0),
+	MTK_PIN_DRV_GRP(271, 0xf00, 8, 0),
+	MTK_PIN_DRV_GRP(272, 0xf00, 8, 0),
+	MTK_PIN_DRV_GRP(273, 0xf00, 8, 0),
+	MTK_PIN_DRV_GRP(274, 0xf00, 8, 0),
+	MTK_PIN_DRV_GRP(275, 0xf00, 8, 0),
+	MTK_PIN_DRV_GRP(276, 0xf00, 8, 0),
+	MTK_PIN_DRV_GRP(277, 0xf00, 8, 0),
+	MTK_PIN_DRV_GRP(278, 0xf70, 8, 1),
+};
+
+static const struct mtk_pin_spec_pupd_set_samereg mt2701_spec_pupd[] = {
+	MTK_PIN_PUPD_SPEC_SR(111, 0xd00, 12, 13, 14),	/* ms0 data7 */
+	MTK_PIN_PUPD_SPEC_SR(112, 0xd00, 8, 9, 10),	/* ms0 data6 */
+	MTK_PIN_PUPD_SPEC_SR(113, 0xd00, 4, 5, 6),	/* ms0 data5 */
+	MTK_PIN_PUPD_SPEC_SR(114, 0xd00, 0, 1, 2),	/* ms0 data4 */
+	MTK_PIN_PUPD_SPEC_SR(115, 0xd10, 0, 1, 2),	/* ms0 rstb */
+	MTK_PIN_PUPD_SPEC_SR(116, 0xcd0, 8, 9, 10),	/* ms0 cmd */
+	MTK_PIN_PUPD_SPEC_SR(117, 0xcc0, 8, 9, 10),	/* ms0 clk */
+	MTK_PIN_PUPD_SPEC_SR(118, 0xcf0, 12, 13, 14),	/* ms0 data3 */
+	MTK_PIN_PUPD_SPEC_SR(119, 0xcf0, 8, 9, 10),	/* ms0 data2 */
+	MTK_PIN_PUPD_SPEC_SR(120, 0xcf0, 4, 5, 6),	/* ms0 data1 */
+	MTK_PIN_PUPD_SPEC_SR(121, 0xcf0, 0, 1, 2),	/* ms0 data0 */
+
+	MTK_PIN_PUPD_SPEC_SR(105, 0xd40, 8, 9, 10),	/* ms1 cmd */
+	MTK_PIN_PUPD_SPEC_SR(106, 0xd30, 8, 9, 10),	/* ms1 clk */
+	MTK_PIN_PUPD_SPEC_SR(107, 0xd60, 0, 1, 2),	/* ms1 dat0 */
+	MTK_PIN_PUPD_SPEC_SR(108, 0xd60, 10, 9, 8),	/* ms1 dat1 */
+	MTK_PIN_PUPD_SPEC_SR(109, 0xd60, 4, 5, 6),	/* ms1 dat2 */
+	MTK_PIN_PUPD_SPEC_SR(110, 0xc60, 12, 13, 14),	/* ms1 dat3 */
+
+	MTK_PIN_PUPD_SPEC_SR(85, 0xda0, 8, 9, 10),	/* ms2 cmd */
+	MTK_PIN_PUPD_SPEC_SR(86, 0xd90, 8, 9, 10),	/* ms2 clk */
+	MTK_PIN_PUPD_SPEC_SR(87, 0xdc0, 0, 1, 2),	/* ms2 dat0 */
+	MTK_PIN_PUPD_SPEC_SR(88, 0xdc0, 10, 9, 8),	/* ms2 dat1 */
+	MTK_PIN_PUPD_SPEC_SR(89, 0xdc0, 4, 5, 6),	/* ms2 dat2 */
+	MTK_PIN_PUPD_SPEC_SR(90, 0xdc0, 12, 13, 14),	/* ms2 dat3 */
+
+	MTK_PIN_PUPD_SPEC_SR(249, 0x140, 0, 1, 2),	/* ms0e rstb */
+	MTK_PIN_PUPD_SPEC_SR(250, 0x130, 12, 13, 14),	/* ms0e dat7 */
+	MTK_PIN_PUPD_SPEC_SR(251, 0x130, 8, 9, 10),	/* ms0e dat6 */
+	MTK_PIN_PUPD_SPEC_SR(252, 0x130, 4, 5, 6),	/* ms0e dat5 */
+	MTK_PIN_PUPD_SPEC_SR(253, 0x130, 0, 1, 2),	/* ms0e dat4 */
+	MTK_PIN_PUPD_SPEC_SR(254, 0xf40, 12, 13, 14),	/* ms0e dat3 */
+	MTK_PIN_PUPD_SPEC_SR(255, 0xf40, 8, 9, 10),	/* ms0e dat2 */
+	MTK_PIN_PUPD_SPEC_SR(256, 0xf40, 4, 5, 6),	/* ms0e dat1 */
+	MTK_PIN_PUPD_SPEC_SR(257, 0xf40, 0, 1, 2),	/* ms0e dat0 */
+	MTK_PIN_PUPD_SPEC_SR(258, 0xcb0, 8, 9, 10),	/* ms0e cmd */
+	MTK_PIN_PUPD_SPEC_SR(259, 0xc90, 8, 9, 10),	/* ms0e clk */
+	MTK_PIN_PUPD_SPEC_SR(261, 0x140, 8, 9, 10),	/* ms1 ins */
+};
+
+static int mt2701_spec_pull_set(struct regmap *regmap, unsigned int pin,
+		unsigned char align, bool isup, unsigned int r1r0)
+{
+	return mtk_pctrl_spec_pull_set_samereg(regmap, mt2701_spec_pupd,
+		ARRAY_SIZE(mt2701_spec_pupd), pin, align, isup, r1r0);
+}
+
+static const struct mtk_pin_ies_smt_set mt2701_ies_set[] = {
+	MTK_PIN_IES_SMT_SPEC(0, 6, 0xb20, 0),
+	MTK_PIN_IES_SMT_SPEC(7, 9, 0xb20, 1),
+	MTK_PIN_IES_SMT_SPEC(10, 13, 0xb30, 3),
+	MTK_PIN_IES_SMT_SPEC(14, 15, 0xb30, 13),
+	MTK_PIN_IES_SMT_SPEC(16, 17, 0xb40, 7),
+	MTK_PIN_IES_SMT_SPEC(18, 21, 0xb40, 13),
+	MTK_PIN_IES_SMT_SPEC(22, 26, 0xb40, 13),
+	MTK_PIN_IES_SMT_SPEC(27, 29, 0xb40, 13),
+	MTK_PIN_IES_SMT_SPEC(30, 32, 0xb40, 7),
+	MTK_PIN_IES_SMT_SPEC(33, 37, 0xb40, 13),
+	MTK_PIN_IES_SMT_SPEC(38, 38, 0xb20, 13),
+	MTK_PIN_IES_SMT_SPEC(39, 42, 0xb40, 13),
+	MTK_PIN_IES_SMT_SPEC(43, 45, 0xb20, 10),
+	MTK_PIN_IES_SMT_SPEC(47, 48, 0xb20, 11),
+	MTK_PIN_IES_SMT_SPEC(49, 49, 0xb20, 12),
+	MTK_PIN_IES_SMT_SPEC(50, 52, 0xb20, 13),
+	MTK_PIN_IES_SMT_SPEC(53, 56, 0xb20, 14),
+	MTK_PIN_IES_SMT_SPEC(57, 58, 0xb20, 15),
+	MTK_PIN_IES_SMT_SPEC(59, 59, 0xb30, 10),
+	MTK_PIN_IES_SMT_SPEC(60, 62, 0xb30, 0),
+	MTK_PIN_IES_SMT_SPEC(63, 65, 0xb30, 1),
+	MTK_PIN_IES_SMT_SPEC(66, 71, 0xb30, 2),
+	MTK_PIN_IES_SMT_SPEC(72, 74, 0xb20, 12),
+	MTK_PIN_IES_SMT_SPEC(75, 76, 0xb30, 3),
+	MTK_PIN_IES_SMT_SPEC(77, 78, 0xb30, 4),
+	MTK_PIN_IES_SMT_SPEC(79, 82, 0xb30, 5),
+	MTK_PIN_IES_SMT_SPEC(83, 84, 0xb30, 2),
+	MTK_PIN_IES_SMT_SPEC(85, 85, 0xda0, 4),
+	MTK_PIN_IES_SMT_SPEC(86, 86, 0xd90, 4),
+	MTK_PIN_IES_SMT_SPEC(87, 90, 0xdb0, 4),
+	MTK_PIN_IES_SMT_SPEC(101, 104, 0xb30, 6),
+	MTK_PIN_IES_SMT_SPEC(105, 105, 0xd40, 4),
+	MTK_PIN_IES_SMT_SPEC(106, 106, 0xd30, 4),
+	MTK_PIN_IES_SMT_SPEC(107, 110, 0xd50, 4),
+	MTK_PIN_IES_SMT_SPEC(111, 115, 0xce0, 4),
+	MTK_PIN_IES_SMT_SPEC(116, 116, 0xcd0, 4),
+	MTK_PIN_IES_SMT_SPEC(117, 117, 0xcc0, 4),
+	MTK_PIN_IES_SMT_SPEC(118, 121, 0xce0, 4),
+	MTK_PIN_IES_SMT_SPEC(122, 125, 0xb30, 7),
+	MTK_PIN_IES_SMT_SPEC(126, 126, 0xb20, 12),
+	MTK_PIN_IES_SMT_SPEC(127, 142, 0xb30, 9),
+	MTK_PIN_IES_SMT_SPEC(143, 160, 0xb30, 10),
+	MTK_PIN_IES_SMT_SPEC(161, 168, 0xb30, 12),
+	MTK_PIN_IES_SMT_SPEC(169, 183, 0xb30, 10),
+	MTK_PIN_IES_SMT_SPEC(184, 186, 0xb30, 9),
+	MTK_PIN_IES_SMT_SPEC(187, 187, 0xb30, 14),
+	MTK_PIN_IES_SMT_SPEC(188, 188, 0xb20, 13),
+	MTK_PIN_IES_SMT_SPEC(189, 193, 0xb30, 15),
+	MTK_PIN_IES_SMT_SPEC(194, 198, 0xb40, 0),
+	MTK_PIN_IES_SMT_SPEC(199, 199, 0xb20, 1),
+	MTK_PIN_IES_SMT_SPEC(200, 202, 0xb40, 1),
+	MTK_PIN_IES_SMT_SPEC(203, 207, 0xb40, 2),
+	MTK_PIN_IES_SMT_SPEC(208, 209, 0xb40, 3),
+	MTK_PIN_IES_SMT_SPEC(210, 210, 0xb40, 4),
+	MTK_PIN_IES_SMT_SPEC(211, 235, 0xb40, 5),
+	MTK_PIN_IES_SMT_SPEC(236, 241, 0xb40, 6),
+	MTK_PIN_IES_SMT_SPEC(242, 243, 0xb40, 7),
+	MTK_PIN_IES_SMT_SPEC(244, 247, 0xb40, 8),
+	MTK_PIN_IES_SMT_SPEC(248, 248, 0xb40, 9),
+	MTK_PIN_IES_SMT_SPEC(249, 257, 0xfc0, 4),
+	MTK_PIN_IES_SMT_SPEC(258, 258, 0xcb0, 4),
+	MTK_PIN_IES_SMT_SPEC(259, 259, 0xc90, 4),
+	MTK_PIN_IES_SMT_SPEC(260, 260, 0x3a0, 4),
+	MTK_PIN_IES_SMT_SPEC(261, 261, 0xd50, 4),
+	MTK_PIN_IES_SMT_SPEC(262, 277, 0xb40, 12),
+	MTK_PIN_IES_SMT_SPEC(278, 278, 0xb40, 13),
+};
+
+static const struct mtk_pin_ies_smt_set mt2701_smt_set[] = {
+	MTK_PIN_IES_SMT_SPEC(0, 6, 0xb50, 0),
+	MTK_PIN_IES_SMT_SPEC(7, 9, 0xb50, 1),
+	MTK_PIN_IES_SMT_SPEC(10, 13, 0xb60, 3),
+	MTK_PIN_IES_SMT_SPEC(14, 15, 0xb60, 13),
+	MTK_PIN_IES_SMT_SPEC(16, 17, 0xb70, 7),
+	MTK_PIN_IES_SMT_SPEC(18, 21, 0xb70, 13),
+	MTK_PIN_IES_SMT_SPEC(22, 26, 0xb70, 13),
+	MTK_PIN_IES_SMT_SPEC(27, 29, 0xb70, 13),
+	MTK_PIN_IES_SMT_SPEC(30, 32, 0xb70, 7),
+	MTK_PIN_IES_SMT_SPEC(33, 37, 0xb70, 13),
+	MTK_PIN_IES_SMT_SPEC(38, 38, 0xb50, 13),
+	MTK_PIN_IES_SMT_SPEC(39, 42, 0xb70, 13),
+	MTK_PIN_IES_SMT_SPEC(43, 45, 0xb50, 10),
+	MTK_PIN_IES_SMT_SPEC(47, 48, 0xb50, 11),
+	MTK_PIN_IES_SMT_SPEC(49, 49, 0xb50, 12),
+	MTK_PIN_IES_SMT_SPEC(50, 52, 0xb50, 13),
+	MTK_PIN_IES_SMT_SPEC(53, 56, 0xb50, 14),
+	MTK_PIN_IES_SMT_SPEC(57, 58, 0xb50, 15),
+	MTK_PIN_IES_SMT_SPEC(59, 59, 0xb60, 10),
+	MTK_PIN_IES_SMT_SPEC(60, 62, 0xb60, 0),
+	MTK_PIN_IES_SMT_SPEC(63, 65, 0xb60, 1),
+	MTK_PIN_IES_SMT_SPEC(66, 71, 0xb60, 2),
+	MTK_PIN_IES_SMT_SPEC(72, 74, 0xb50, 12),
+	MTK_PIN_IES_SMT_SPEC(75, 76, 0xb60, 3),
+	MTK_PIN_IES_SMT_SPEC(77, 78, 0xb60, 4),
+	MTK_PIN_IES_SMT_SPEC(79, 82, 0xb60, 5),
+	MTK_PIN_IES_SMT_SPEC(83, 84, 0xb60, 2),
+	MTK_PIN_IES_SMT_SPEC(85, 85, 0xda0, 11),
+	MTK_PIN_IES_SMT_SPEC(86, 86, 0xd90, 11),
+	MTK_PIN_IES_SMT_SPEC(87, 87, 0xdc0, 3),
+	MTK_PIN_IES_SMT_SPEC(88, 88, 0xdc0, 7),
+	MTK_PIN_IES_SMT_SPEC(89, 89, 0xdc0, 11),
+	MTK_PIN_IES_SMT_SPEC(90, 90, 0xdc0, 15),
+	MTK_PIN_IES_SMT_SPEC(101, 104, 0xb60, 6),
+	MTK_PIN_IES_SMT_SPEC(105, 105, 0xd40, 11),
+	MTK_PIN_IES_SMT_SPEC(106, 106, 0xd30, 11),
+	MTK_PIN_IES_SMT_SPEC(107, 107, 0xd60, 3),
+	MTK_PIN_IES_SMT_SPEC(108, 108, 0xd60, 7),
+	MTK_PIN_IES_SMT_SPEC(109, 109, 0xd60, 11),
+	MTK_PIN_IES_SMT_SPEC(110, 110, 0xd60, 15),
+	MTK_PIN_IES_SMT_SPEC(111, 111, 0xd00, 15),
+	MTK_PIN_IES_SMT_SPEC(112, 112, 0xd00, 11),
+	MTK_PIN_IES_SMT_SPEC(113, 113, 0xd00, 7),
+	MTK_PIN_IES_SMT_SPEC(114, 114, 0xd00, 3),
+	MTK_PIN_IES_SMT_SPEC(115, 115, 0xd10, 3),
+	MTK_PIN_IES_SMT_SPEC(116, 116, 0xcd0, 11),
+	MTK_PIN_IES_SMT_SPEC(117, 117, 0xcc0, 11),
+	MTK_PIN_IES_SMT_SPEC(118, 118, 0xcf0, 15),
+	MTK_PIN_IES_SMT_SPEC(119, 119, 0xcf0, 11),
+	MTK_PIN_IES_SMT_SPEC(120, 120, 0xcf0, 7),
+	MTK_PIN_IES_SMT_SPEC(121, 121, 0xcf0, 3),
+	MTK_PIN_IES_SMT_SPEC(122, 125, 0xb60, 7),
+	MTK_PIN_IES_SMT_SPEC(126, 126, 0xb50, 12),
+	MTK_PIN_IES_SMT_SPEC(127, 142, 0xb60, 9),
+	MTK_PIN_IES_SMT_SPEC(143, 160, 0xb60, 10),
+	MTK_PIN_IES_SMT_SPEC(161, 168, 0xb60, 12),
+	MTK_PIN_IES_SMT_SPEC(169, 183, 0xb60, 10),
+	MTK_PIN_IES_SMT_SPEC(184, 186, 0xb60, 9),
+	MTK_PIN_IES_SMT_SPEC(187, 187, 0xb60, 14),
+	MTK_PIN_IES_SMT_SPEC(188, 188, 0xb50, 13),
+	MTK_PIN_IES_SMT_SPEC(189, 193, 0xb60, 15),
+	MTK_PIN_IES_SMT_SPEC(194, 198, 0xb70, 0),
+	MTK_PIN_IES_SMT_SPEC(199, 199, 0xb50, 1),
+	MTK_PIN_IES_SMT_SPEC(200, 202, 0xb70, 1),
+	MTK_PIN_IES_SMT_SPEC(203, 207, 0xb70, 2),
+	MTK_PIN_IES_SMT_SPEC(208, 209, 0xb70, 3),
+	MTK_PIN_IES_SMT_SPEC(210, 210, 0xb70, 4),
+	MTK_PIN_IES_SMT_SPEC(211, 235, 0xb70, 5),
+	MTK_PIN_IES_SMT_SPEC(236, 241, 0xb70, 6),
+	MTK_PIN_IES_SMT_SPEC(242, 243, 0xb70, 7),
+	MTK_PIN_IES_SMT_SPEC(244, 247, 0xb70, 8),
+	MTK_PIN_IES_SMT_SPEC(248, 248, 0xb70, 9),
+	MTK_PIN_IES_SMT_SPEC(249, 249, 0x140, 3),
+	MTK_PIN_IES_SMT_SPEC(250, 250, 0x130, 15),
+	MTK_PIN_IES_SMT_SPEC(251, 251, 0x130, 11),
+	MTK_PIN_IES_SMT_SPEC(252, 252, 0x130, 7),
+	MTK_PIN_IES_SMT_SPEC(253, 253, 0x130, 3),
+	MTK_PIN_IES_SMT_SPEC(254, 254, 0xf40, 15),
+	MTK_PIN_IES_SMT_SPEC(255, 255, 0xf40, 11),
+	MTK_PIN_IES_SMT_SPEC(256, 256, 0xf40, 7),
+	MTK_PIN_IES_SMT_SPEC(257, 257, 0xf40, 3),
+	MTK_PIN_IES_SMT_SPEC(258, 258, 0xcb0, 11),
+	MTK_PIN_IES_SMT_SPEC(259, 259, 0xc90, 11),
+	MTK_PIN_IES_SMT_SPEC(260, 260, 0x3a0, 11),
+	MTK_PIN_IES_SMT_SPEC(261, 261, 0x0b0, 3),
+	MTK_PIN_IES_SMT_SPEC(262, 277, 0xb70, 12),
+	MTK_PIN_IES_SMT_SPEC(278, 278, 0xb70, 13),
+};
+
+static int mt2701_ies_smt_set(struct regmap *regmap, unsigned int pin,
+		unsigned char align, int value, enum pin_config_param arg)
+{
+	if (arg == PIN_CONFIG_INPUT_ENABLE)
+		return mtk_pconf_spec_set_ies_smt_range(regmap, mt2701_ies_set,
+			ARRAY_SIZE(mt2701_ies_set), pin, align, value);
+	else if (arg == PIN_CONFIG_INPUT_SCHMITT_ENABLE)
+		return mtk_pconf_spec_set_ies_smt_range(regmap, mt2701_smt_set,
+			ARRAY_SIZE(mt2701_smt_set), pin, align, value);
+	return -EINVAL;
+}
+
+static const struct mtk_spec_pinmux_set mt2701_spec_pinmux[] = {
+	MTK_PINMUX_SPEC(22, 0xb10, 3),
+	MTK_PINMUX_SPEC(23, 0xb10, 4),
+	MTK_PINMUX_SPEC(24, 0xb10, 5),
+	MTK_PINMUX_SPEC(29, 0xb10, 9),
+	MTK_PINMUX_SPEC(208, 0xb10, 7),
+	MTK_PINMUX_SPEC(209, 0xb10, 8),
+	MTK_PINMUX_SPEC(203, 0xf20, 0),
+	MTK_PINMUX_SPEC(204, 0xf20, 1),
+	MTK_PINMUX_SPEC(249, 0xef0, 0),
+	MTK_PINMUX_SPEC(250, 0xef0, 0),
+	MTK_PINMUX_SPEC(251, 0xef0, 0),
+	MTK_PINMUX_SPEC(252, 0xef0, 0),
+	MTK_PINMUX_SPEC(253, 0xef0, 0),
+	MTK_PINMUX_SPEC(254, 0xef0, 0),
+	MTK_PINMUX_SPEC(255, 0xef0, 0),
+	MTK_PINMUX_SPEC(256, 0xef0, 0),
+	MTK_PINMUX_SPEC(257, 0xef0, 0),
+	MTK_PINMUX_SPEC(258, 0xef0, 0),
+	MTK_PINMUX_SPEC(259, 0xef0, 0),
+	MTK_PINMUX_SPEC(260, 0xef0, 0),
+};
+
+static void mt2701_spec_pinmux_set(struct regmap *reg, unsigned int pin,
+			unsigned int mode)
+{
+	unsigned int i, value, mask;
+	unsigned int info_num = ARRAY_SIZE(mt2701_spec_pinmux);
+	unsigned int spec_flag;
+
+	for (i = 0; i < info_num; i++) {
+		if (pin == mt2701_spec_pinmux[i].pin)
+			break;
+	}
+
+	if (i == info_num)
+		return;
+
+	spec_flag = (mode >> 3);
+	mask = BIT(mt2701_spec_pinmux[i].bit);
+	if (!spec_flag)
+		value = mask;
+	else
+		value = 0;
+	regmap_update_bits(reg, mt2701_spec_pinmux[i].offset, mask, value);
+}
+
+static void mt2701_spec_dir_set(unsigned int *reg_addr, unsigned int pin)
+{
+	if (pin > 175)
+		*reg_addr += 0x10;
+}
+
+static const struct mtk_pinctrl_devdata mt2701_pinctrl_data = {
+	.pins = mtk_pins_mt2701,
+	.npins = ARRAY_SIZE(mtk_pins_mt2701),
+	.grp_desc = mt2701_drv_grp,
+	.n_grp_cls = ARRAY_SIZE(mt2701_drv_grp),
+	.pin_drv_grp = mt2701_pin_drv,
+	.n_pin_drv_grps = ARRAY_SIZE(mt2701_pin_drv),
+	.spec_pull_set = mt2701_spec_pull_set,
+	.spec_ies_smt_set = mt2701_ies_smt_set,
+	.spec_pinmux_set = mt2701_spec_pinmux_set,
+	.spec_dir_set = mt2701_spec_dir_set,
+	.dir_offset = 0x0000,
+	.pullen_offset = 0x0150,
+	.pullsel_offset = 0x0280,
+	.dout_offset = 0x0500,
+	.din_offset = 0x0630,
+	.pinmux_offset = 0x0760,
+	.type1_start = 280,
+	.type1_end = 280,
+	.port_shf = 4,
+	.port_mask = 0x1f,
+	.port_align = 4,
+	.eint_offsets = {
+		.name = "mt2701_eint",
+		.stat      = 0x000,
+		.ack       = 0x040,
+		.mask      = 0x080,
+		.mask_set  = 0x0c0,
+		.mask_clr  = 0x100,
+		.sens      = 0x140,
+		.sens_set  = 0x180,
+		.sens_clr  = 0x1c0,
+		.soft      = 0x200,
+		.soft_set  = 0x240,
+		.soft_clr  = 0x280,
+		.pol       = 0x300,
+		.pol_set   = 0x340,
+		.pol_clr   = 0x380,
+		.dom_en    = 0x400,
+		.dbnc_ctrl = 0x500,
+		.dbnc_set  = 0x600,
+		.dbnc_clr  = 0x700,
+		.port_mask = 6,
+		.ports     = 6,
+	},
+	.ap_num = 169,
+	.db_cnt = 16,
+};
+
+static int mt2701_pinctrl_probe(struct platform_device *pdev)
+{
+	return mtk_pctrl_init(pdev, &mt2701_pinctrl_data, NULL);
+}
+
+static const struct of_device_id mt2701_pctrl_match[] = {
+	{ .compatible = "mediatek,mt2701-pinctrl", },
+	{}
+};
+MODULE_DEVICE_TABLE(of, mt2701_pctrl_match);
+
+static struct platform_driver mtk_pinctrl_driver = {
+	.probe = mt2701_pinctrl_probe,
+	.driver = {
+		.name = "mediatek-mt2701-pinctrl",
+		.of_match_table = mt2701_pctrl_match,
+		.pm = &mtk_eint_pm_ops,
+	},
+};
+
+static int __init mtk_pinctrl_init(void)
+{
+	return platform_driver_register(&mtk_pinctrl_driver);
+}
+arch_initcall(mtk_pinctrl_init);
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt6397.c b/drivers/pinctrl/mediatek/pinctrl-mt6397.c
index f9751ae..6eccb85 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt6397.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt6397.c
@@ -12,7 +12,7 @@
  * GNU General Public License for more details.
  */
 
-#include <linux/module.h>
+#include <linux/init.h>
 #include <linux/platform_device.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
@@ -55,7 +55,6 @@
 	{ .compatible = "mediatek,mt6397-pinctrl", },
 	{ }
 };
-MODULE_DEVICE_TABLE(of, mt6397_pctrl_match);
 
 static struct platform_driver mtk_pinctrl_driver = {
 	.probe = mt6397_pinctrl_probe,
@@ -69,9 +68,4 @@
 {
 	return platform_driver_register(&mtk_pinctrl_driver);
 }
-
-module_init(mtk_pinctrl_init);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("MediaTek MT6397 Pinctrl Driver");
-MODULE_AUTHOR("Hongzhou Yang <hongzhou.yang@mediatek.com>");
+device_initcall(mtk_pinctrl_init);
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt7623.c b/drivers/pinctrl/mediatek/pinctrl-mt7623.c
new file mode 100644
index 0000000..67895f8
--- /dev/null
+++ b/drivers/pinctrl/mediatek/pinctrl-mt7623.c
@@ -0,0 +1,379 @@
+/*
+ * Copyright (c) 2016 John Crispin <blogic@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/pinctrl/mt65xx.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/regmap.h>
+
+#include "pinctrl-mtk-common.h"
+#include "pinctrl-mtk-mt7623.h"
+
+static const struct mtk_drv_group_desc mt7623_drv_grp[] =  {
+	/* 0E4E8SR 4/8/12/16 */
+	MTK_DRV_GRP(4, 16, 1, 2, 4),
+	/* 0E2E4SR  2/4/6/8 */
+	MTK_DRV_GRP(2, 8, 1, 2, 2),
+	/* E8E4E2  2/4/6/8/10/12/14/16 */
+	MTK_DRV_GRP(2, 16, 0, 2, 2)
+};
+
+#define DRV_SEL0	0xf50
+#define DRV_SEL1	0xf60
+#define DRV_SEL2	0xf70
+#define DRV_SEL3	0xf80
+#define DRV_SEL4	0xf90
+#define DRV_SEL5	0xfa0
+#define DRV_SEL6	0xfb0
+#define DRV_SEL7	0xfe0
+#define DRV_SEL8	0xfd0
+#define DRV_SEL9	0xff0
+#define DRV_SEL10	0xf00
+
+#define MSDC0_CTRL0	0xcc0
+#define MSDC0_CTRL1	0xcd0
+#define MSDC0_CTRL2	0xce0
+#define MSDC0_CTRL3	0xcf0
+#define MSDC0_CTRL4	0xd00
+#define MSDC0_CTRL5	0xd10
+#define MSDC0_CTRL6	0xd20
+#define MSDC1_CTRL0	0xd30
+#define MSDC1_CTRL1	0xd40
+#define MSDC1_CTRL2	0xd50
+#define MSDC1_CTRL3	0xd60
+#define MSDC1_CTRL4	0xd70
+#define MSDC1_CTRL5	0xd80
+#define MSDC1_CTRL6	0xd90
+
+#define IES_EN0		0xb20
+#define IES_EN1		0xb30
+#define IES_EN2		0xb40
+
+#define SMT_EN0		0xb50
+#define SMT_EN1		0xb60
+#define SMT_EN2		0xb70
+
+static const struct mtk_pin_drv_grp mt7623_pin_drv[] = {
+	MTK_PIN_DRV_GRP(0, DRV_SEL0, 0, 1),
+	MTK_PIN_DRV_GRP(1, DRV_SEL0, 0, 1),
+	MTK_PIN_DRV_GRP(2, DRV_SEL0, 0, 1),
+	MTK_PIN_DRV_GRP(3, DRV_SEL0, 0, 1),
+	MTK_PIN_DRV_GRP(4, DRV_SEL0, 0, 1),
+	MTK_PIN_DRV_GRP(5, DRV_SEL0, 0, 1),
+	MTK_PIN_DRV_GRP(6, DRV_SEL0, 0, 1),
+	MTK_PIN_DRV_GRP(7, DRV_SEL0, 4, 1),
+	MTK_PIN_DRV_GRP(8, DRV_SEL0, 4, 1),
+	MTK_PIN_DRV_GRP(9, DRV_SEL0, 4, 1),
+	MTK_PIN_DRV_GRP(10, DRV_SEL0, 8, 1),
+	MTK_PIN_DRV_GRP(11, DRV_SEL0, 8, 1),
+	MTK_PIN_DRV_GRP(12, DRV_SEL0, 8, 1),
+	MTK_PIN_DRV_GRP(13, DRV_SEL0, 8, 1),
+	MTK_PIN_DRV_GRP(14, DRV_SEL0, 12, 0),
+	MTK_PIN_DRV_GRP(15, DRV_SEL0, 12, 0),
+	MTK_PIN_DRV_GRP(18, DRV_SEL1, 4, 0),
+	MTK_PIN_DRV_GRP(19, DRV_SEL1, 4, 0),
+	MTK_PIN_DRV_GRP(20, DRV_SEL1, 4, 0),
+	MTK_PIN_DRV_GRP(21, DRV_SEL1, 4, 0),
+	MTK_PIN_DRV_GRP(22, DRV_SEL1, 8, 0),
+	MTK_PIN_DRV_GRP(23, DRV_SEL1, 8, 0),
+	MTK_PIN_DRV_GRP(24, DRV_SEL1, 8, 0),
+	MTK_PIN_DRV_GRP(25, DRV_SEL1, 8, 0),
+	MTK_PIN_DRV_GRP(26, DRV_SEL1, 8, 0),
+	MTK_PIN_DRV_GRP(27, DRV_SEL1, 12, 0),
+	MTK_PIN_DRV_GRP(28, DRV_SEL1, 12, 0),
+	MTK_PIN_DRV_GRP(29, DRV_SEL1, 12, 0),
+	MTK_PIN_DRV_GRP(33, DRV_SEL2, 0, 0),
+	MTK_PIN_DRV_GRP(34, DRV_SEL2, 0, 0),
+	MTK_PIN_DRV_GRP(35, DRV_SEL2, 0, 0),
+	MTK_PIN_DRV_GRP(36, DRV_SEL2, 0, 0),
+	MTK_PIN_DRV_GRP(37, DRV_SEL2, 0, 0),
+	MTK_PIN_DRV_GRP(39, DRV_SEL2, 8, 1),
+	MTK_PIN_DRV_GRP(40, DRV_SEL2, 8, 1),
+	MTK_PIN_DRV_GRP(41, DRV_SEL2, 8, 1),
+	MTK_PIN_DRV_GRP(42, DRV_SEL2, 8, 1),
+	MTK_PIN_DRV_GRP(43, DRV_SEL2, 12, 0),
+	MTK_PIN_DRV_GRP(44, DRV_SEL2, 12, 0),
+	MTK_PIN_DRV_GRP(45, DRV_SEL2, 12, 0),
+	MTK_PIN_DRV_GRP(47, DRV_SEL3, 0, 0),
+	MTK_PIN_DRV_GRP(48, DRV_SEL3, 0, 0),
+	MTK_PIN_DRV_GRP(49, DRV_SEL3, 4, 0),
+	MTK_PIN_DRV_GRP(53, DRV_SEL3, 12, 0),
+	MTK_PIN_DRV_GRP(54, DRV_SEL3, 12, 0),
+	MTK_PIN_DRV_GRP(55, DRV_SEL3, 12, 0),
+	MTK_PIN_DRV_GRP(56, DRV_SEL3, 12, 0),
+	MTK_PIN_DRV_GRP(60, DRV_SEL4, 8, 1),
+	MTK_PIN_DRV_GRP(61, DRV_SEL4, 8, 1),
+	MTK_PIN_DRV_GRP(62, DRV_SEL4, 8, 1),
+	MTK_PIN_DRV_GRP(63, DRV_SEL4, 12, 1),
+	MTK_PIN_DRV_GRP(64, DRV_SEL4, 12, 1),
+	MTK_PIN_DRV_GRP(65, DRV_SEL4, 12, 1),
+	MTK_PIN_DRV_GRP(66, DRV_SEL5, 0, 1),
+	MTK_PIN_DRV_GRP(67, DRV_SEL5, 0, 1),
+	MTK_PIN_DRV_GRP(68, DRV_SEL5, 0, 1),
+	MTK_PIN_DRV_GRP(69, DRV_SEL5, 0, 1),
+	MTK_PIN_DRV_GRP(70, DRV_SEL5, 0, 1),
+	MTK_PIN_DRV_GRP(71, DRV_SEL5, 0, 1),
+	MTK_PIN_DRV_GRP(72, DRV_SEL3, 4, 0),
+	MTK_PIN_DRV_GRP(73, DRV_SEL3, 4, 0),
+	MTK_PIN_DRV_GRP(74, DRV_SEL3, 4, 0),
+	MTK_PIN_DRV_GRP(83, DRV_SEL5, 0, 1),
+	MTK_PIN_DRV_GRP(84, DRV_SEL5, 0, 1),
+	MTK_PIN_DRV_GRP(105, MSDC1_CTRL1, 0, 1),
+	MTK_PIN_DRV_GRP(106, MSDC1_CTRL0, 0, 1),
+	MTK_PIN_DRV_GRP(107, MSDC1_CTRL2, 0, 1),
+	MTK_PIN_DRV_GRP(108, MSDC1_CTRL2, 0, 1),
+	MTK_PIN_DRV_GRP(109, MSDC1_CTRL2, 0, 1),
+	MTK_PIN_DRV_GRP(110, MSDC1_CTRL2, 0, 1),
+	MTK_PIN_DRV_GRP(111, MSDC0_CTRL2, 0, 1),
+	MTK_PIN_DRV_GRP(112, MSDC0_CTRL2, 0, 1),
+	MTK_PIN_DRV_GRP(113, MSDC0_CTRL2, 0, 1),
+	MTK_PIN_DRV_GRP(114, MSDC0_CTRL2, 0, 1),
+	MTK_PIN_DRV_GRP(115, MSDC0_CTRL2, 0, 1),
+	MTK_PIN_DRV_GRP(116, MSDC0_CTRL1, 0, 1),
+	MTK_PIN_DRV_GRP(117, MSDC0_CTRL0, 0, 1),
+	MTK_PIN_DRV_GRP(118, MSDC0_CTRL2, 0, 1),
+	MTK_PIN_DRV_GRP(119, MSDC0_CTRL2, 0, 1),
+	MTK_PIN_DRV_GRP(120, MSDC0_CTRL2, 0, 1),
+	MTK_PIN_DRV_GRP(121, MSDC0_CTRL2, 0, 1),
+	MTK_PIN_DRV_GRP(126, DRV_SEL3, 4, 0),
+	MTK_PIN_DRV_GRP(199, DRV_SEL0, 4, 1),
+	MTK_PIN_DRV_GRP(200, DRV_SEL8, 0, 0),
+	MTK_PIN_DRV_GRP(201, DRV_SEL8, 0, 0),
+	MTK_PIN_DRV_GRP(203, DRV_SEL8, 4, 0),
+	MTK_PIN_DRV_GRP(204, DRV_SEL8, 4, 0),
+	MTK_PIN_DRV_GRP(205, DRV_SEL8, 4, 0),
+	MTK_PIN_DRV_GRP(206, DRV_SEL8, 4, 0),
+	MTK_PIN_DRV_GRP(207, DRV_SEL8, 4, 0),
+	MTK_PIN_DRV_GRP(208, DRV_SEL8, 8, 0),
+	MTK_PIN_DRV_GRP(209, DRV_SEL8, 8, 0),
+	MTK_PIN_DRV_GRP(236, DRV_SEL9, 4, 0),
+	MTK_PIN_DRV_GRP(237, DRV_SEL9, 4, 0),
+	MTK_PIN_DRV_GRP(238, DRV_SEL9, 4, 0),
+	MTK_PIN_DRV_GRP(239, DRV_SEL9, 4, 0),
+	MTK_PIN_DRV_GRP(240, DRV_SEL9, 4, 0),
+	MTK_PIN_DRV_GRP(241, DRV_SEL9, 4, 0),
+	MTK_PIN_DRV_GRP(242, DRV_SEL9, 8, 0),
+	MTK_PIN_DRV_GRP(243, DRV_SEL9, 8, 0),
+	MTK_PIN_DRV_GRP(257, MSDC0_CTRL2, 0, 1),
+	MTK_PIN_DRV_GRP(261, MSDC1_CTRL2, 0, 1),
+	MTK_PIN_DRV_GRP(262, DRV_SEL10, 8, 0),
+	MTK_PIN_DRV_GRP(263, DRV_SEL10, 8, 0),
+	MTK_PIN_DRV_GRP(264, DRV_SEL10, 8, 0),
+	MTK_PIN_DRV_GRP(265, DRV_SEL10, 8, 0),
+	MTK_PIN_DRV_GRP(266, DRV_SEL10, 8, 0),
+	MTK_PIN_DRV_GRP(267, DRV_SEL10, 8, 0),
+	MTK_PIN_DRV_GRP(268, DRV_SEL10, 8, 0),
+	MTK_PIN_DRV_GRP(269, DRV_SEL10, 8, 0),
+	MTK_PIN_DRV_GRP(270, DRV_SEL10, 8, 0),
+	MTK_PIN_DRV_GRP(271, DRV_SEL10, 8, 0),
+	MTK_PIN_DRV_GRP(272, DRV_SEL10, 8, 0),
+	MTK_PIN_DRV_GRP(274, DRV_SEL10, 8, 0),
+	MTK_PIN_DRV_GRP(275, DRV_SEL10, 8, 0),
+	MTK_PIN_DRV_GRP(276, DRV_SEL10, 8, 0),
+	MTK_PIN_DRV_GRP(278, DRV_SEL2, 8, 1),
+};
+
+static const struct mtk_pin_spec_pupd_set_samereg mt7623_spec_pupd[] = {
+	MTK_PIN_PUPD_SPEC_SR(105, MSDC1_CTRL1, 8, 9, 10),
+	MTK_PIN_PUPD_SPEC_SR(106, MSDC1_CTRL0, 8, 9, 10),
+	MTK_PIN_PUPD_SPEC_SR(107, MSDC1_CTRL3, 0, 1, 2),
+	MTK_PIN_PUPD_SPEC_SR(108, MSDC1_CTRL3, 4, 5, 6),
+	MTK_PIN_PUPD_SPEC_SR(109, MSDC1_CTRL3, 8, 9, 10),
+	MTK_PIN_PUPD_SPEC_SR(110, MSDC1_CTRL3, 12, 13, 14),
+	MTK_PIN_PUPD_SPEC_SR(111, MSDC0_CTRL4, 12, 13, 14),
+	MTK_PIN_PUPD_SPEC_SR(112, MSDC0_CTRL4, 8, 9, 10),
+	MTK_PIN_PUPD_SPEC_SR(113, MSDC0_CTRL4, 4, 5, 6),
+	MTK_PIN_PUPD_SPEC_SR(114, MSDC0_CTRL4, 0, 1, 2),
+	MTK_PIN_PUPD_SPEC_SR(115, MSDC0_CTRL5, 0, 1, 2),
+	MTK_PIN_PUPD_SPEC_SR(116, MSDC0_CTRL1, 8, 9, 10),
+	MTK_PIN_PUPD_SPEC_SR(117, MSDC0_CTRL0, 8, 9, 10),
+	MTK_PIN_PUPD_SPEC_SR(118, MSDC0_CTRL3, 12, 13, 14),
+	MTK_PIN_PUPD_SPEC_SR(119, MSDC0_CTRL3, 8, 9, 10),
+	MTK_PIN_PUPD_SPEC_SR(120, MSDC0_CTRL3, 4, 5, 6),
+	MTK_PIN_PUPD_SPEC_SR(121, MSDC0_CTRL3, 0, 1, 2),
+};
+
+static int mt7623_spec_pull_set(struct regmap *regmap, unsigned int pin,
+		unsigned char align, bool isup, unsigned int r1r0)
+{
+	return mtk_pctrl_spec_pull_set_samereg(regmap, mt7623_spec_pupd,
+		ARRAY_SIZE(mt7623_spec_pupd), pin, align, isup, r1r0);
+}
+
+static const struct mtk_pin_ies_smt_set mt7623_ies_set[] = {
+	MTK_PIN_IES_SMT_SPEC(0, 6, IES_EN0, 0),
+	MTK_PIN_IES_SMT_SPEC(7, 9, IES_EN0, 1),
+	MTK_PIN_IES_SMT_SPEC(10, 13, IES_EN0, 2),
+	MTK_PIN_IES_SMT_SPEC(14, 15, IES_EN0, 3),
+	MTK_PIN_IES_SMT_SPEC(18, 21, IES_EN0, 5),
+	MTK_PIN_IES_SMT_SPEC(22, 26, IES_EN0, 6),
+	MTK_PIN_IES_SMT_SPEC(27, 29, IES_EN0, 7),
+	MTK_PIN_IES_SMT_SPEC(33, 37, IES_EN0, 8),
+	MTK_PIN_IES_SMT_SPEC(39, 42, IES_EN0, 9),
+	MTK_PIN_IES_SMT_SPEC(43, 45, IES_EN0, 10),
+	MTK_PIN_IES_SMT_SPEC(47, 48, IES_EN0, 11),
+	MTK_PIN_IES_SMT_SPEC(49, 49, IES_EN0, 12),
+	MTK_PIN_IES_SMT_SPEC(53, 56, IES_EN0, 14),
+	MTK_PIN_IES_SMT_SPEC(60, 62, IES_EN1, 0),
+	MTK_PIN_IES_SMT_SPEC(63, 65, IES_EN1, 1),
+	MTK_PIN_IES_SMT_SPEC(66, 71, IES_EN1, 2),
+	MTK_PIN_IES_SMT_SPEC(72, 74, IES_EN0, 12),
+	MTK_PIN_IES_SMT_SPEC(75, 76, IES_EN1, 3),
+	MTK_PIN_IES_SMT_SPEC(83, 84, IES_EN1, 2),
+	MTK_PIN_IES_SMT_SPEC(105, 121, MSDC1_CTRL1, 4),
+	MTK_PIN_IES_SMT_SPEC(122, 125, IES_EN1, 7),
+	MTK_PIN_IES_SMT_SPEC(126, 126, IES_EN0, 12),
+	MTK_PIN_IES_SMT_SPEC(199, 201, IES_EN0, 1),
+	MTK_PIN_IES_SMT_SPEC(203, 207, IES_EN2, 2),
+	MTK_PIN_IES_SMT_SPEC(208, 209, IES_EN2, 3),
+	MTK_PIN_IES_SMT_SPEC(236, 241, IES_EN2, 6),
+	MTK_PIN_IES_SMT_SPEC(242, 243, IES_EN2, 7),
+	MTK_PIN_IES_SMT_SPEC(261, 261, MSDC1_CTRL2, 4),
+	MTK_PIN_IES_SMT_SPEC(262, 272, IES_EN2, 12),
+	MTK_PIN_IES_SMT_SPEC(274, 276, IES_EN2, 12),
+	MTK_PIN_IES_SMT_SPEC(278, 278, IES_EN2, 13),
+};
+
+static const struct mtk_pin_ies_smt_set mt7623_smt_set[] = {
+	MTK_PIN_IES_SMT_SPEC(0, 6, SMT_EN0, 0),
+	MTK_PIN_IES_SMT_SPEC(7, 9, SMT_EN0, 1),
+	MTK_PIN_IES_SMT_SPEC(10, 13, SMT_EN0, 2),
+	MTK_PIN_IES_SMT_SPEC(14, 15, SMT_EN0, 3),
+	MTK_PIN_IES_SMT_SPEC(18, 21, SMT_EN0, 5),
+	MTK_PIN_IES_SMT_SPEC(22, 26, SMT_EN0, 6),
+	MTK_PIN_IES_SMT_SPEC(27, 29, SMT_EN0, 7),
+	MTK_PIN_IES_SMT_SPEC(33, 37, SMT_EN0, 8),
+	MTK_PIN_IES_SMT_SPEC(39, 42, SMT_EN0, 9),
+	MTK_PIN_IES_SMT_SPEC(43, 45, SMT_EN0, 10),
+	MTK_PIN_IES_SMT_SPEC(47, 48, SMT_EN0, 11),
+	MTK_PIN_IES_SMT_SPEC(49, 49, SMT_EN0, 12),
+	MTK_PIN_IES_SMT_SPEC(53, 56, SMT_EN0, 14),
+	MTK_PIN_IES_SMT_SPEC(60, 62, SMT_EN1, 0),
+	MTK_PIN_IES_SMT_SPEC(63, 65, SMT_EN1, 1),
+	MTK_PIN_IES_SMT_SPEC(66, 71, SMT_EN1, 2),
+	MTK_PIN_IES_SMT_SPEC(72, 74, SMT_EN0, 12),
+	MTK_PIN_IES_SMT_SPEC(75, 76, SMT_EN1, 3),
+	MTK_PIN_IES_SMT_SPEC(83, 84, SMT_EN1, 2),
+	MTK_PIN_IES_SMT_SPEC(105, 106, MSDC1_CTRL1, 11),
+	MTK_PIN_IES_SMT_SPEC(107, 107, MSDC1_CTRL3, 3),
+	MTK_PIN_IES_SMT_SPEC(108, 108, MSDC1_CTRL3, 7),
+	MTK_PIN_IES_SMT_SPEC(109, 109, MSDC1_CTRL3, 11),
+	MTK_PIN_IES_SMT_SPEC(110, 111, MSDC1_CTRL3, 15),
+	MTK_PIN_IES_SMT_SPEC(112, 112, MSDC0_CTRL4, 11),
+	MTK_PIN_IES_SMT_SPEC(113, 113, MSDC0_CTRL4, 7),
+	MTK_PIN_IES_SMT_SPEC(114, 115, MSDC0_CTRL4, 3),
+	MTK_PIN_IES_SMT_SPEC(116, 117, MSDC0_CTRL1, 11),
+	MTK_PIN_IES_SMT_SPEC(118, 118, MSDC0_CTRL3, 15),
+	MTK_PIN_IES_SMT_SPEC(119, 119, MSDC0_CTRL3, 11),
+	MTK_PIN_IES_SMT_SPEC(120, 120, MSDC0_CTRL3, 7),
+	MTK_PIN_IES_SMT_SPEC(121, 121, MSDC0_CTRL3, 3),
+	MTK_PIN_IES_SMT_SPEC(122, 125, SMT_EN1, 7),
+	MTK_PIN_IES_SMT_SPEC(126, 126, SMT_EN0, 12),
+	MTK_PIN_IES_SMT_SPEC(199, 201, SMT_EN0, 1),
+	MTK_PIN_IES_SMT_SPEC(203, 207, SMT_EN2, 2),
+	MTK_PIN_IES_SMT_SPEC(208, 209, SMT_EN2, 3),
+	MTK_PIN_IES_SMT_SPEC(236, 241, SMT_EN2, 6),
+	MTK_PIN_IES_SMT_SPEC(242, 243, SMT_EN2, 7),
+	MTK_PIN_IES_SMT_SPEC(261, 261, MSDC1_CTRL6, 3),
+	MTK_PIN_IES_SMT_SPEC(262, 272, SMT_EN2, 12),
+	MTK_PIN_IES_SMT_SPEC(274, 276, SMT_EN2, 12),
+	MTK_PIN_IES_SMT_SPEC(278, 278, SMT_EN2, 13),
+};
+
+static int mt7623_ies_smt_set(struct regmap *regmap, unsigned int pin,
+		unsigned char align, int value, enum pin_config_param arg)
+{
+	if (arg == PIN_CONFIG_INPUT_ENABLE)
+		return mtk_pconf_spec_set_ies_smt_range(regmap, mt7623_ies_set,
+			ARRAY_SIZE(mt7623_ies_set), pin, align, value);
+	else if (arg == PIN_CONFIG_INPUT_SCHMITT_ENABLE)
+		return mtk_pconf_spec_set_ies_smt_range(regmap, mt7623_smt_set,
+			ARRAY_SIZE(mt7623_smt_set), pin, align, value);
+	return -EINVAL;
+}
+
+static const struct mtk_pinctrl_devdata mt7623_pinctrl_data = {
+	.pins = mtk_pins_mt7623,
+	.npins = ARRAY_SIZE(mtk_pins_mt7623),
+	.grp_desc = mt7623_drv_grp,
+	.n_grp_cls = ARRAY_SIZE(mt7623_drv_grp),
+	.pin_drv_grp = mt7623_pin_drv,
+	.n_pin_drv_grps = ARRAY_SIZE(mt7623_pin_drv),
+	.spec_pull_set = mt7623_spec_pull_set,
+	.spec_ies_smt_set = mt7623_ies_smt_set,
+	.dir_offset = 0x0000,
+	.pullen_offset = 0x0150,
+	.pullsel_offset = 0x0280,
+	.dout_offset = 0x0500,
+	.din_offset = 0x0630,
+	.pinmux_offset = 0x0760,
+	.type1_start = 280,
+	.type1_end = 280,
+	.port_shf = 4,
+	.port_mask = 0x1f,
+	.port_align = 4,
+	.eint_offsets = {
+		.name = "mt7623_eint",
+		.stat      = 0x000,
+		.ack       = 0x040,
+		.mask      = 0x080,
+		.mask_set  = 0x0c0,
+		.mask_clr  = 0x100,
+		.sens      = 0x140,
+		.sens_set  = 0x180,
+		.sens_clr  = 0x1c0,
+		.soft      = 0x200,
+		.soft_set  = 0x240,
+		.soft_clr  = 0x280,
+		.pol       = 0x300,
+		.pol_set   = 0x340,
+		.pol_clr   = 0x380,
+		.dom_en    = 0x400,
+		.dbnc_ctrl = 0x500,
+		.dbnc_set  = 0x600,
+		.dbnc_clr  = 0x700,
+		.port_mask = 6,
+		.ports     = 6,
+	},
+	.ap_num = 169,
+	.db_cnt = 16,
+};
+
+static int mt7623_pinctrl_probe(struct platform_device *pdev)
+{
+	return mtk_pctrl_init(pdev, &mt7623_pinctrl_data, NULL);
+}
+
+static const struct of_device_id mt7623_pctrl_match[] = {
+	{ .compatible = "mediatek,mt7623-pinctrl", },
+	{}
+};
+MODULE_DEVICE_TABLE(of, mt7623_pctrl_match);
+
+static struct platform_driver mtk_pinctrl_driver = {
+	.probe = mt7623_pinctrl_probe,
+	.driver = {
+		.name = "mediatek-mt7623-pinctrl",
+		.of_match_table = mt7623_pctrl_match,
+	},
+};
+
+static int __init mtk_pinctrl_init(void)
+{
+	return platform_driver_register(&mtk_pinctrl_driver);
+}
+
+arch_initcall(mtk_pinctrl_init);
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8127.c b/drivers/pinctrl/mediatek/pinctrl-mt8127.c
index 98e0beb..d764915 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt8127.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt8127.c
@@ -13,7 +13,7 @@
  * GNU General Public License for more details.
  */
 
-#include <linux/module.h>
+#include <linux/init.h>
 #include <linux/platform_device.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
@@ -336,7 +336,6 @@
 	{ .compatible = "mediatek,mt8127-pinctrl", },
 	{ }
 };
-MODULE_DEVICE_TABLE(of, mt8127_pctrl_match);
 
 static struct platform_driver mtk_pinctrl_driver = {
 	.probe = mt8127_pinctrl_probe,
@@ -350,9 +349,4 @@
 {
 	return platform_driver_register(&mtk_pinctrl_driver);
 }
-
 arch_initcall(mtk_pinctrl_init);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("MediaTek MT8127 Pinctrl Driver");
-MODULE_AUTHOR("Yingjoe Chen <yingjoe.chen@mediatek.com>");
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8135.c b/drivers/pinctrl/mediatek/pinctrl-mt8135.c
index 1c153b8..d8c645f 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt8135.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt8135.c
@@ -12,7 +12,7 @@
  * GNU General Public License for more details.
  */
 
-#include <linux/module.h>
+#include <linux/init.h>
 #include <linux/platform_device.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
@@ -351,7 +351,6 @@
 	},
 	{ }
 };
-MODULE_DEVICE_TABLE(of, mt8135_pctrl_match);
 
 static struct platform_driver mtk_pinctrl_driver = {
 	.probe = mt8135_pinctrl_probe,
@@ -365,9 +364,4 @@
 {
 	return platform_driver_register(&mtk_pinctrl_driver);
 }
-
 arch_initcall(mtk_pinctrl_init);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("MediaTek Pinctrl Driver");
-MODULE_AUTHOR("Hongzhou Yang <hongzhou.yang@mediatek.com>");
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8173.c b/drivers/pinctrl/mediatek/pinctrl-mt8173.c
index a62514e..8bfd427 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt8173.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt8173.c
@@ -12,7 +12,7 @@
  * GNU General Public License for more details.
  */
 
-#include <linux/module.h>
+#include <linux/init.h>
 #include <linux/platform_device.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
@@ -378,7 +378,6 @@
 	},
 	{ }
 };
-MODULE_DEVICE_TABLE(of, mt8173_pctrl_match);
 
 static struct platform_driver mtk_pinctrl_driver = {
 	.probe = mt8173_pinctrl_probe,
@@ -393,9 +392,4 @@
 {
 	return platform_driver_register(&mtk_pinctrl_driver);
 }
-
 arch_initcall(mtk_pinctrl_init);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("MediaTek Pinctrl Driver");
-MODULE_AUTHOR("Hongzhou Yang <hongzhou.yang@mediatek.com>");
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
index e96e86d..2bbe6f7 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
@@ -43,10 +43,13 @@
 
 #define MAX_GPIO_MODE_PER_REG 5
 #define GPIO_MODE_BITS        3
+#define GPIO_MODE_PREFIX "GPIO"
 
 static const char * const mtk_gpio_functions[] = {
 	"func0", "func1", "func2", "func3",
 	"func4", "func5", "func6", "func7",
+	"func8", "func9", "func10", "func11",
+	"func12", "func13", "func14", "func15",
 };
 
 /*
@@ -81,6 +84,9 @@
 	reg_addr = mtk_get_port(pctl, offset) + pctl->devdata->dir_offset;
 	bit = BIT(offset & 0xf);
 
+	if (pctl->devdata->spec_dir_set)
+		pctl->devdata->spec_dir_set(&reg_addr, offset);
+
 	if (input)
 		/* Different SoC has different alignment offset. */
 		reg_addr = CLR_ADDR(reg_addr, pctl);
@@ -677,9 +683,14 @@
 	unsigned int mask = (1L << GPIO_MODE_BITS) - 1;
 	struct mtk_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
 
+	if (pctl->devdata->spec_pinmux_set)
+		pctl->devdata->spec_pinmux_set(mtk_get_regmap(pctl, pin),
+					pin, mode);
+
 	reg_addr = ((pin / MAX_GPIO_MODE_PER_REG) << pctl->devdata->port_shf)
 			+ pctl->devdata->pinmux_offset;
 
+	mode &= mask;
 	bit = pin % MAX_GPIO_MODE_PER_REG;
 	mask <<= (GPIO_MODE_BITS * bit);
 	val = (mode << (GPIO_MODE_BITS * bit));
@@ -725,12 +736,48 @@
 	return 0;
 }
 
+static int mtk_pmx_find_gpio_mode(struct mtk_pinctrl *pctl,
+				unsigned offset)
+{
+	const struct mtk_desc_pin *pin = pctl->devdata->pins + offset;
+	const struct mtk_desc_function *func = pin->functions;
+
+	while (func && func->name) {
+		if (!strncmp(func->name, GPIO_MODE_PREFIX,
+			sizeof(GPIO_MODE_PREFIX)-1))
+			return func->muxval;
+		func++;
+	}
+	return -EINVAL;
+}
+
+static int mtk_pmx_gpio_request_enable(struct pinctrl_dev *pctldev,
+				    struct pinctrl_gpio_range *range,
+				    unsigned offset)
+{
+	int muxval;
+	struct mtk_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+	muxval = mtk_pmx_find_gpio_mode(pctl, offset);
+
+	if (muxval < 0) {
+		dev_err(pctl->dev, "invalid gpio pin %d.\n", offset);
+		return -EINVAL;
+	}
+
+	mtk_pmx_set_mode(pctldev, offset, muxval);
+	mtk_pconf_set_ies_smt(pctl, offset, 1, PIN_CONFIG_INPUT_ENABLE);
+
+	return 0;
+}
+
 static const struct pinmux_ops mtk_pmx_ops = {
 	.get_functions_count	= mtk_pmx_get_funcs_cnt,
 	.get_function_name	= mtk_pmx_get_func_name,
 	.get_function_groups	= mtk_pmx_get_func_groups,
 	.set_mux		= mtk_pmx_set_mux,
 	.gpio_set_direction	= mtk_pmx_gpio_set_direction,
+	.gpio_request_enable	= mtk_pmx_gpio_request_enable,
 };
 
 static int mtk_gpio_direction_input(struct gpio_chip *chip,
@@ -756,6 +803,10 @@
 
 	reg_addr =  mtk_get_port(pctl, offset) + pctl->devdata->dir_offset;
 	bit = BIT(offset & 0xf);
+
+	if (pctl->devdata->spec_dir_set)
+		pctl->devdata->spec_dir_set(&reg_addr, offset);
+
 	regmap_read(pctl->regmap1, reg_addr, &read_val);
 	return !(read_val & bit);
 }
@@ -814,6 +865,10 @@
 
 	/* set mux to INT mode */
 	mtk_pmx_set_mode(pctl->pctl_dev, pin->pin.number, pin->eint.eintmux);
+	/* set gpio direction to input */
+	mtk_pmx_gpio_set_direction(pctl->pctl_dev, NULL, pin->pin.number, true);
+	/* set input-enable */
+	mtk_pconf_set_ies_smt(pctl, pin->pin.number, 1, PIN_CONFIG_INPUT_ENABLE);
 
 	return 0;
 }
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.h b/drivers/pinctrl/mediatek/pinctrl-mtk-common.h
index 55a5343..8543bc4 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.h
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.h
@@ -209,7 +209,14 @@
  * means when user set smt, input enable is set at the same time. So they
  * also need special control. If special control is success, this should
  * return 0, otherwise return non-zero value.
- *
+ * @spec_pinmux_set: In some cases, there are two pinmux functions share
+ * the same value in the same segment of pinmux control register. If user
+ * want to use one of the two functions, they need an extra bit setting to
+ * select the right one.
+ * @spec_dir_set: In very few SoCs, direction control registers are not
+ * arranged continuously, they may be cut to parts. So they need special
+ * dir setting.
+
  * @dir_offset: The direction register offset.
  * @pullen_offset: The pull-up/pull-down enable register offset.
  * @pinmux_offset: The pinmux register offset.
@@ -234,6 +241,9 @@
 			unsigned char align, bool isup, unsigned int arg);
 	int (*spec_ies_smt_set)(struct regmap *reg, unsigned int pin,
 			unsigned char align, int value, enum pin_config_param arg);
+	void (*spec_pinmux_set)(struct regmap *reg, unsigned int pin,
+			unsigned int mode);
+	void (*spec_dir_set)(unsigned int *reg_addr, unsigned int pin);
 	unsigned int dir_offset;
 	unsigned int ies_offset;
 	unsigned int smt_offset;
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-mt2701.h b/drivers/pinctrl/mediatek/pinctrl-mtk-mt2701.h
new file mode 100644
index 0000000..f906420
--- /dev/null
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-mt2701.h
@@ -0,0 +1,2323 @@
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ * Author: Biao Huang <biao.huang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __PINCTRL_MTK_MT2701_H
+#define __PINCTRL_MTK_MT2701_H
+
+#include <linux/pinctrl/pinctrl.h>
+#include "pinctrl-mtk-common.h"
+
+static const struct mtk_desc_pin mtk_pins_mt2701[] = {
+	MTK_PIN(
+		PINCTRL_PIN(0, "PWRAP_SPI0_MI"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 148),
+		MTK_FUNCTION(0, "GPIO0"),
+		MTK_FUNCTION(1, "PWRAP_SPIDO"),
+		MTK_FUNCTION(2, "PWRAP_SPIDI")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(1, "PWRAP_SPI0_MO"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 149),
+		MTK_FUNCTION(0, "GPIO1"),
+		MTK_FUNCTION(1, "PWRAP_SPIDI"),
+		MTK_FUNCTION(2, "PWRAP_SPIDO")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(2, "PWRAP_INT"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 150),
+		MTK_FUNCTION(0, "GPIO2"),
+		MTK_FUNCTION(1, "PWRAP_INT")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(3, "PWRAP_SPI0_CK"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 151),
+		MTK_FUNCTION(0, "GPIO3"),
+		MTK_FUNCTION(1, "PWRAP_SPICK_I")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(4, "PWRAP_SPI0_CSN"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 152),
+		MTK_FUNCTION(0, "GPIO4"),
+		MTK_FUNCTION(1, "PWRAP_SPICS_B_I")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(5, "PWRAP_SPI0_CK2"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 153),
+		MTK_FUNCTION(0, "GPIO5"),
+		MTK_FUNCTION(1, "PWRAP_SPICK2_I"),
+		MTK_FUNCTION(5, "ANT_SEL1")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(6, "PWRAP_SPI0_CSN2"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 154),
+		MTK_FUNCTION(0, "GPIO6"),
+		MTK_FUNCTION(1, "PWRAP_SPICS2_B_I"),
+		MTK_FUNCTION(5, "ANT_SEL0"),
+		MTK_FUNCTION(7, "DBG_MON_A[0]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(7, "SPI1_CSN"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 155),
+		MTK_FUNCTION(0, "GPIO7"),
+		MTK_FUNCTION(1, "SPI1_CS"),
+		MTK_FUNCTION(4, "KCOL0"),
+		MTK_FUNCTION(7, "DBG_MON_B[12]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(8, "SPI1_MI"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 156),
+		MTK_FUNCTION(0, "GPIO8"),
+		MTK_FUNCTION(1, "SPI1_MI"),
+		MTK_FUNCTION(2, "SPI1_MO"),
+		MTK_FUNCTION(4, "KCOL1"),
+		MTK_FUNCTION(7, "DBG_MON_B[13]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(9, "SPI1_MO"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 157),
+		MTK_FUNCTION(0, "GPIO9"),
+		MTK_FUNCTION(1, "SPI1_MO"),
+		MTK_FUNCTION(2, "SPI1_MI"),
+		MTK_FUNCTION(3, "EXT_FRAME_SYNC"),
+		MTK_FUNCTION(4, "KCOL2"),
+		MTK_FUNCTION(7, "DBG_MON_B[14]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(10, "RTC32K_CK"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 158),
+		MTK_FUNCTION(0, "GPIO10"),
+		MTK_FUNCTION(1, "RTC32K_CK")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(11, "WATCHDOG"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 159),
+		MTK_FUNCTION(0, "GPIO11"),
+		MTK_FUNCTION(1, "WATCHDOG")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(12, "SRCLKENA"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 160),
+		MTK_FUNCTION(0, "GPIO12"),
+		MTK_FUNCTION(1, "SRCLKENA")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(13, "SRCLKENAI"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 161),
+		MTK_FUNCTION(0, "GPIO13"),
+		MTK_FUNCTION(1, "SRCLKENAI")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(14, "URXD2"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 162),
+		MTK_FUNCTION(0, "GPIO14"),
+		MTK_FUNCTION(1, "URXD2"),
+		MTK_FUNCTION(2, "UTXD2"),
+		MTK_FUNCTION(5, "SRCCLKENAI2"),
+		MTK_FUNCTION(7, "DBG_MON_B[30]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(15, "UTXD2"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 163),
+		MTK_FUNCTION(0, "GPIO15"),
+		MTK_FUNCTION(1, "UTXD2"),
+		MTK_FUNCTION(2, "URXD2"),
+		MTK_FUNCTION(7, "DBG_MON_B[31]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(16, "I2S5_DATA_IN"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 164),
+		MTK_FUNCTION(0, "GPIO16"),
+		MTK_FUNCTION(1, "I2S5_DATA_IN"),
+		MTK_FUNCTION(3, "PCM_RX"),
+		MTK_FUNCTION(4, "ANT_SEL4")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(17, "I2S5_BCK"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 165),
+		MTK_FUNCTION(0, "GPIO17"),
+		MTK_FUNCTION(1, "I2S5_BCK"),
+		MTK_FUNCTION(3, "PCM_CLK0"),
+		MTK_FUNCTION(4, "ANT_SEL2")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(18, "PCM_CLK"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 166),
+		MTK_FUNCTION(0, "GPIO18"),
+		MTK_FUNCTION(1, "PCM_CLK0"),
+		MTK_FUNCTION(2, "MRG_CLK"),
+		MTK_FUNCTION(4, "MM_TEST_CK"),
+		MTK_FUNCTION(5, "CONN_DSP_JCK"),
+		MTK_FUNCTION(6, "WCN_PCM_CLKO"),
+		MTK_FUNCTION(7, "DBG_MON_A[3]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(19, "PCM_SYNC"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 167),
+		MTK_FUNCTION(0, "GPIO19"),
+		MTK_FUNCTION(1, "PCM_SYNC"),
+		MTK_FUNCTION(2, "MRG_SYNC"),
+		MTK_FUNCTION(5, "CONN_DSP_JINTP"),
+		MTK_FUNCTION(6, "WCN_PCM_SYNC"),
+		MTK_FUNCTION(7, "DBG_MON_A[5]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(20, "PCM_RX"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO20"),
+		MTK_FUNCTION(1, "PCM_RX"),
+		MTK_FUNCTION(2, "MRG_RX"),
+		MTK_FUNCTION(3, "MRG_TX"),
+		MTK_FUNCTION(4, "PCM_TX"),
+		MTK_FUNCTION(5, "CONN_DSP_JDI"),
+		MTK_FUNCTION(6, "WCN_PCM_RX"),
+		MTK_FUNCTION(7, "DBG_MON_A[4]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(21, "PCM_TX"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO21"),
+		MTK_FUNCTION(1, "PCM_TX"),
+		MTK_FUNCTION(2, "MRG_TX"),
+		MTK_FUNCTION(3, "MRG_RX"),
+		MTK_FUNCTION(4, "PCM_RX"),
+		MTK_FUNCTION(5, "CONN_DSP_JMS"),
+		MTK_FUNCTION(6, "WCN_PCM_TX"),
+		MTK_FUNCTION(7, "DBG_MON_A[2]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(22, "EINT0"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 0),
+		MTK_FUNCTION(0, "GPIO22"),
+		MTK_FUNCTION(1, "UCTS0"),
+		MTK_FUNCTION(3, "KCOL3"),
+		MTK_FUNCTION(4, "CONN_DSP_JDO"),
+		MTK_FUNCTION(5, "EXT_FRAME_SYNC"),
+		MTK_FUNCTION(7, "DBG_MON_A[30]"),
+		MTK_FUNCTION(10, "PCIE0_PERST_N")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(23, "EINT1"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 1),
+		MTK_FUNCTION(0, "GPIO23"),
+		MTK_FUNCTION(1, "URTS0"),
+		MTK_FUNCTION(3, "KCOL2"),
+		MTK_FUNCTION(4, "CONN_MCU_TDO"),
+		MTK_FUNCTION(5, "EXT_FRAME_SYNC"),
+		MTK_FUNCTION(7, "DBG_MON_A[29]"),
+		MTK_FUNCTION(10, "PCIE1_PERST_N")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(24, "EINT2"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 2),
+		MTK_FUNCTION(0, "GPIO24"),
+		MTK_FUNCTION(1, "UCTS1"),
+		MTK_FUNCTION(3, "KCOL1"),
+		MTK_FUNCTION(4, "CONN_MCU_DBGACK_N"),
+		MTK_FUNCTION(7, "DBG_MON_A[28]"),
+		MTK_FUNCTION(10, "PCIE2_PERST_N")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(25, "EINT3"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 3),
+		MTK_FUNCTION(0, "GPIO25"),
+		MTK_FUNCTION(1, "URTS1"),
+		MTK_FUNCTION(3, "KCOL0"),
+		MTK_FUNCTION(4, "CONN_MCU_DBGI_N"),
+		MTK_FUNCTION(7, "DBG_MON_A[27]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(26, "EINT4"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 4),
+		MTK_FUNCTION(0, "GPIO26"),
+		MTK_FUNCTION(1, "UCTS3"),
+		MTK_FUNCTION(2, "DRV_VBUS_P1"),
+		MTK_FUNCTION(3, "KROW3"),
+		MTK_FUNCTION(4, "CONN_MCU_TCK0"),
+		MTK_FUNCTION(5, "CONN_MCU_AICE_JCKC"),
+		MTK_FUNCTION(6, "PCIE2_WAKE_N"),
+		MTK_FUNCTION(7, "DBG_MON_A[26]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(27, "EINT5"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 5),
+		MTK_FUNCTION(0, "GPIO27"),
+		MTK_FUNCTION(1, "URTS3"),
+		MTK_FUNCTION(2, "IDDIG_P1"),
+		MTK_FUNCTION(3, "KROW2"),
+		MTK_FUNCTION(4, "CONN_MCU_TDI"),
+		MTK_FUNCTION(6, "PCIE1_WAKE_N"),
+		MTK_FUNCTION(7, "DBG_MON_A[25]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(28, "EINT6"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 6),
+		MTK_FUNCTION(0, "GPIO28"),
+		MTK_FUNCTION(1, "DRV_VBUS"),
+		MTK_FUNCTION(3, "KROW1"),
+		MTK_FUNCTION(4, "CONN_MCU_TRST_B"),
+		MTK_FUNCTION(6, "PCIE0_WAKE_N"),
+		MTK_FUNCTION(7, "DBG_MON_A[24]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(29, "EINT7"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 7),
+		MTK_FUNCTION(0, "GPIO29"),
+		MTK_FUNCTION(1, "IDDIG"),
+		MTK_FUNCTION(2, "MSDC1_WP"),
+		MTK_FUNCTION(3, "KROW0"),
+		MTK_FUNCTION(4, "CONN_MCU_TMS"),
+		MTK_FUNCTION(5, "CONN_MCU_AICE_JMSC"),
+		MTK_FUNCTION(7, "DBG_MON_A[23]"),
+		MTK_FUNCTION(14, "PCIE2_PERST_N")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(30, "I2S5_LRCK"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 12),
+		MTK_FUNCTION(0, "GPIO30"),
+		MTK_FUNCTION(1, "I2S5_LRCK"),
+		MTK_FUNCTION(3, "PCM_SYNC"),
+		MTK_FUNCTION(4, "ANT_SEL1")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(31, "I2S5_MCLK"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 13),
+		MTK_FUNCTION(0, "GPIO31"),
+		MTK_FUNCTION(1, "I2S5_MCLK"),
+		MTK_FUNCTION(4, "ANT_SEL0")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(32, "I2S5_DATA"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 14),
+		MTK_FUNCTION(0, "GPIO32"),
+		MTK_FUNCTION(1, "I2S5_DATA"),
+		MTK_FUNCTION(2, "I2S5_DATA_BYPS"),
+		MTK_FUNCTION(3, "PCM_TX"),
+		MTK_FUNCTION(4, "ANT_SEL3")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(33, "I2S1_DATA"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 15),
+		MTK_FUNCTION(0, "GPIO33"),
+		MTK_FUNCTION(1, "I2S1_DATA"),
+		MTK_FUNCTION(2, "I2S1_DATA_BYPS"),
+		MTK_FUNCTION(3, "PCM_TX"),
+		MTK_FUNCTION(4, "IMG_TEST_CK"),
+		MTK_FUNCTION(5, "G1_RXD0"),
+		MTK_FUNCTION(6, "WCN_PCM_TX"),
+		MTK_FUNCTION(7, "DBG_MON_B[8]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(34, "I2S1_DATA_IN"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 16),
+		MTK_FUNCTION(0, "GPIO34"),
+		MTK_FUNCTION(1, "I2S1_DATA_IN"),
+		MTK_FUNCTION(3, "PCM_RX"),
+		MTK_FUNCTION(4, "VDEC_TEST_CK"),
+		MTK_FUNCTION(5, "G1_RXD1"),
+		MTK_FUNCTION(6, "WCN_PCM_RX"),
+		MTK_FUNCTION(7, "DBG_MON_B[7]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(35, "I2S1_BCK"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 17),
+		MTK_FUNCTION(0, "GPIO35"),
+		MTK_FUNCTION(1, "I2S1_BCK"),
+		MTK_FUNCTION(3, "PCM_CLK0"),
+		MTK_FUNCTION(5, "G1_RXD2"),
+		MTK_FUNCTION(6, "WCN_PCM_CLKO"),
+		MTK_FUNCTION(7, "DBG_MON_B[9]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(36, "I2S1_LRCK"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 18),
+		MTK_FUNCTION(0, "GPIO36"),
+		MTK_FUNCTION(1, "I2S1_LRCK"),
+		MTK_FUNCTION(3, "PCM_SYNC"),
+		MTK_FUNCTION(5, "G1_RXD3"),
+		MTK_FUNCTION(6, "WCN_PCM_SYNC"),
+		MTK_FUNCTION(7, "DBG_MON_B[10]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(37, "I2S1_MCLK"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 19),
+		MTK_FUNCTION(0, "GPIO37"),
+		MTK_FUNCTION(1, "I2S1_MCLK"),
+		MTK_FUNCTION(5, "G1_RXDV"),
+		MTK_FUNCTION(7, "DBG_MON_B[11]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(38, "I2S2_DATA"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 20),
+		MTK_FUNCTION(0, "GPIO38"),
+		MTK_FUNCTION(2, "I2S2_DATA_BYPS"),
+		MTK_FUNCTION(3, "PCM_TX"),
+		MTK_FUNCTION(4, "DMIC_DAT0")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(39, "JTMS"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 21),
+		MTK_FUNCTION(0, "GPIO39"),
+		MTK_FUNCTION(1, "JTMS"),
+		MTK_FUNCTION(2, "CONN_MCU_TMS"),
+		MTK_FUNCTION(3, "CONN_MCU_AICE_JMSC"),
+		MTK_FUNCTION(4, "DFD_TMS_XI")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(40, "JTCK"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 22),
+		MTK_FUNCTION(0, "GPIO40"),
+		MTK_FUNCTION(1, "JTCK"),
+		MTK_FUNCTION(2, "CONN_MCU_TCK1"),
+		MTK_FUNCTION(3, "CONN_MCU_AICE_JCKC"),
+		MTK_FUNCTION(4, "DFD_TCK_XI")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(41, "JTDI"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 23),
+		MTK_FUNCTION(0, "GPIO41"),
+		MTK_FUNCTION(1, "JTDI"),
+		MTK_FUNCTION(2, "CONN_MCU_TDI"),
+		MTK_FUNCTION(4, "DFD_TDI_XI")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(42, "JTDO"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 24),
+		MTK_FUNCTION(0, "GPIO42"),
+		MTK_FUNCTION(1, "JTDO"),
+		MTK_FUNCTION(2, "CONN_MCU_TDO"),
+		MTK_FUNCTION(4, "DFD_TDO")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(43, "NCLE"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 25),
+		MTK_FUNCTION(0, "GPIO43"),
+		MTK_FUNCTION(1, "NCLE"),
+		MTK_FUNCTION(2, "EXT_XCS2")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(44, "NCEB1"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 26),
+		MTK_FUNCTION(0, "GPIO44"),
+		MTK_FUNCTION(1, "NCEB1"),
+		MTK_FUNCTION(2, "IDDIG")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(45, "NCEB0"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 27),
+		MTK_FUNCTION(0, "GPIO45"),
+		MTK_FUNCTION(1, "NCEB0"),
+		MTK_FUNCTION(2, "DRV_VBUS")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(46, "IR"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 28),
+		MTK_FUNCTION(0, "GPIO46"),
+		MTK_FUNCTION(1, "IR")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(47, "NREB"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 29),
+		MTK_FUNCTION(0, "GPIO47"),
+		MTK_FUNCTION(1, "NREB"),
+		MTK_FUNCTION(2, "IDDIG_P1")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(48, "NRNB"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 30),
+		MTK_FUNCTION(0, "GPIO48"),
+		MTK_FUNCTION(1, "NRNB"),
+		MTK_FUNCTION(2, "DRV_VBUS_P1")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(49, "I2S0_DATA"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 31),
+		MTK_FUNCTION(0, "GPIO49"),
+		MTK_FUNCTION(1, "I2S0_DATA"),
+		MTK_FUNCTION(2, "I2S0_DATA_BYPS"),
+		MTK_FUNCTION(3, "PCM_TX"),
+		MTK_FUNCTION(6, "WCN_I2S_DO"),
+		MTK_FUNCTION(7, "DBG_MON_B[3]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(50, "I2S2_BCK"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 32),
+		MTK_FUNCTION(0, "GPIO50"),
+		MTK_FUNCTION(1, "I2S2_BCK"),
+		MTK_FUNCTION(3, "PCM_CLK0"),
+		MTK_FUNCTION(4, "DMIC_SCK1")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(51, "I2S2_DATA_IN"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 33),
+		MTK_FUNCTION(0, "GPIO51"),
+		MTK_FUNCTION(1, "I2S2_DATA_IN"),
+		MTK_FUNCTION(3, "PCM_RX"),
+		MTK_FUNCTION(4, "DMIC_SCK0")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(52, "I2S2_LRCK"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 34),
+		MTK_FUNCTION(0, "GPIO52"),
+		MTK_FUNCTION(1, "I2S2_LRCK"),
+		MTK_FUNCTION(3, "PCM_SYNC"),
+		MTK_FUNCTION(4, "DMIC_DAT1")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(53, "SPI0_CSN"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 35),
+		MTK_FUNCTION(0, "GPIO53"),
+		MTK_FUNCTION(1, "SPI0_CS"),
+		MTK_FUNCTION(3, "SPDIF"),
+		MTK_FUNCTION(4, "ADC_CK"),
+		MTK_FUNCTION(5, "PWM1"),
+		MTK_FUNCTION(7, "DBG_MON_A[7]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(54, "SPI0_CK"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 36),
+		MTK_FUNCTION(0, "GPIO54"),
+		MTK_FUNCTION(1, "SPI0_CK"),
+		MTK_FUNCTION(3, "SPDIF_IN1"),
+		MTK_FUNCTION(4, "ADC_DAT_IN"),
+		MTK_FUNCTION(7, "DBG_MON_A[10]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(55, "SPI0_MI"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 37),
+		MTK_FUNCTION(0, "GPIO55"),
+		MTK_FUNCTION(1, "SPI0_MI"),
+		MTK_FUNCTION(2, "SPI0_MO"),
+		MTK_FUNCTION(3, "MSDC1_WP"),
+		MTK_FUNCTION(4, "ADC_WS"),
+		MTK_FUNCTION(5, "PWM2"),
+		MTK_FUNCTION(7, "DBG_MON_A[8]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(56, "SPI0_MO"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 38),
+		MTK_FUNCTION(0, "GPIO56"),
+		MTK_FUNCTION(1, "SPI0_MO"),
+		MTK_FUNCTION(2, "SPI0_MI"),
+		MTK_FUNCTION(3, "SPDIF_IN0"),
+		MTK_FUNCTION(7, "DBG_MON_A[9]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(57, "SDA1"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 39),
+		MTK_FUNCTION(0, "GPIO57"),
+		MTK_FUNCTION(1, "SDA1")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(58, "SCL1"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 40),
+		MTK_FUNCTION(0, "GPIO58"),
+		MTK_FUNCTION(1, "SCL1")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(59, "RAMBUF_I_CLK"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO59"),
+		MTK_FUNCTION(1, "RAMBUF_I_CLK")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(60, "WB_RSTB"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 41),
+		MTK_FUNCTION(0, "GPIO60"),
+		MTK_FUNCTION(1, "WB_RSTB"),
+		MTK_FUNCTION(7, "DBG_MON_A[11]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(61, "F2W_DATA"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 42),
+		MTK_FUNCTION(0, "GPIO61"),
+		MTK_FUNCTION(1, "F2W_DATA"),
+		MTK_FUNCTION(7, "DBG_MON_A[16]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(62, "F2W_CLK"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 43),
+		MTK_FUNCTION(0, "GPIO62"),
+		MTK_FUNCTION(1, "F2W_CK"),
+		MTK_FUNCTION(7, "DBG_MON_A[15]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(63, "WB_SCLK"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 44),
+		MTK_FUNCTION(0, "GPIO63"),
+		MTK_FUNCTION(1, "WB_SCLK"),
+		MTK_FUNCTION(7, "DBG_MON_A[13]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(64, "WB_SDATA"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 45),
+		MTK_FUNCTION(0, "GPIO64"),
+		MTK_FUNCTION(1, "WB_SDATA"),
+		MTK_FUNCTION(7, "DBG_MON_A[12]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(65, "WB_SEN"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 46),
+		MTK_FUNCTION(0, "GPIO65"),
+		MTK_FUNCTION(1, "WB_SEN"),
+		MTK_FUNCTION(7, "DBG_MON_A[14]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(66, "WB_CRTL0"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 47),
+		MTK_FUNCTION(0, "GPIO66"),
+		MTK_FUNCTION(1, "WB_CRTL0"),
+		MTK_FUNCTION(5, "DFD_NTRST_XI"),
+		MTK_FUNCTION(7, "DBG_MON_A[17]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(67, "WB_CRTL1"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 48),
+		MTK_FUNCTION(0, "GPIO67"),
+		MTK_FUNCTION(1, "WB_CRTL1"),
+		MTK_FUNCTION(5, "DFD_TMS_XI"),
+		MTK_FUNCTION(7, "DBG_MON_A[18]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(68, "WB_CRTL2"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 49),
+		MTK_FUNCTION(0, "GPIO68"),
+		MTK_FUNCTION(1, "WB_CRTL2"),
+		MTK_FUNCTION(5, "DFD_TCK_XI"),
+		MTK_FUNCTION(7, "DBG_MON_A[19]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(69, "WB_CRTL3"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 50),
+		MTK_FUNCTION(0, "GPIO69"),
+		MTK_FUNCTION(1, "WB_CRTL3"),
+		MTK_FUNCTION(5, "DFD_TDI_XI"),
+		MTK_FUNCTION(7, "DBG_MON_A[20]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(70, "WB_CRTL4"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 51),
+		MTK_FUNCTION(0, "GPIO70"),
+		MTK_FUNCTION(1, "WB_CRTL4"),
+		MTK_FUNCTION(5, "DFD_TDO"),
+		MTK_FUNCTION(7, "DBG_MON_A[21]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(71, "WB_CRTL5"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 52),
+		MTK_FUNCTION(0, "GPIO71"),
+		MTK_FUNCTION(1, "WB_CRTL5"),
+		MTK_FUNCTION(7, "DBG_MON_A[22]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(72, "I2S0_DATA_IN"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 53),
+		MTK_FUNCTION(0, "GPIO72"),
+		MTK_FUNCTION(1, "I2S0_DATA_IN"),
+		MTK_FUNCTION(3, "PCM_RX"),
+		MTK_FUNCTION(4, "PWM0"),
+		MTK_FUNCTION(5, "DISP_PWM"),
+		MTK_FUNCTION(6, "WCN_I2S_DI"),
+		MTK_FUNCTION(7, "DBG_MON_B[2]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(73, "I2S0_LRCK"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 54),
+		MTK_FUNCTION(0, "GPIO73"),
+		MTK_FUNCTION(1, "I2S0_LRCK"),
+		MTK_FUNCTION(3, "PCM_SYNC"),
+		MTK_FUNCTION(6, "WCN_I2S_LRCK"),
+		MTK_FUNCTION(7, "DBG_MON_B[5]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(74, "I2S0_BCK"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 55),
+		MTK_FUNCTION(0, "GPIO74"),
+		MTK_FUNCTION(1, "I2S0_BCK"),
+		MTK_FUNCTION(3, "PCM_CLK0"),
+		MTK_FUNCTION(6, "WCN_I2S_BCK"),
+		MTK_FUNCTION(7, "DBG_MON_B[4]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(75, "SDA0"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 56),
+		MTK_FUNCTION(0, "GPIO75"),
+		MTK_FUNCTION(1, "SDA0")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(76, "SCL0"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 57),
+		MTK_FUNCTION(0, "GPIO76"),
+		MTK_FUNCTION(1, "SCL0")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(77, "SDA2"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 58),
+		MTK_FUNCTION(0, "GPIO77"),
+		MTK_FUNCTION(1, "SDA2")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(78, "SCL2"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 59),
+		MTK_FUNCTION(0, "GPIO78"),
+		MTK_FUNCTION(1, "SCL2")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(79, "URXD0"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 60),
+		MTK_FUNCTION(0, "GPIO79"),
+		MTK_FUNCTION(1, "URXD0"),
+		MTK_FUNCTION(2, "UTXD0")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(80, "UTXD0"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 61),
+		MTK_FUNCTION(0, "GPIO80"),
+		MTK_FUNCTION(1, "UTXD0"),
+		MTK_FUNCTION(2, "URXD0")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(81, "URXD1"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 62),
+		MTK_FUNCTION(0, "GPIO81"),
+		MTK_FUNCTION(1, "URXD1"),
+		MTK_FUNCTION(2, "UTXD1")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(82, "UTXD1"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 63),
+		MTK_FUNCTION(0, "GPIO82"),
+		MTK_FUNCTION(1, "UTXD1"),
+		MTK_FUNCTION(2, "URXD1")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(83, "LCM_RST"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 64),
+		MTK_FUNCTION(0, "GPIO83"),
+		MTK_FUNCTION(1, "LCM_RST"),
+		MTK_FUNCTION(2, "VDAC_CK_XI"),
+		MTK_FUNCTION(7, "DBG_MON_B[1]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(84, "DSI_TE"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 65),
+		MTK_FUNCTION(0, "GPIO84"),
+		MTK_FUNCTION(1, "DSI_TE"),
+		MTK_FUNCTION(7, "DBG_MON_B[0]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(85, "MSDC2_CMD"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 66),
+		MTK_FUNCTION(0, "GPIO85"),
+		MTK_FUNCTION(1, "MSDC2_CMD"),
+		MTK_FUNCTION(2, "ANT_SEL0"),
+		MTK_FUNCTION(3, "SDA1"),
+		MTK_FUNCTION(6, "I2SOUT_BCK")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(86, "MSDC2_CLK"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 67),
+		MTK_FUNCTION(0, "GPIO86"),
+		MTK_FUNCTION(1, "MSDC2_CLK"),
+		MTK_FUNCTION(2, "ANT_SEL1"),
+		MTK_FUNCTION(3, "SCL1"),
+		MTK_FUNCTION(6, "I2SOUT_LRCK")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(87, "MSDC2_DAT0"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 68),
+		MTK_FUNCTION(0, "GPIO87"),
+		MTK_FUNCTION(1, "MSDC2_DAT0"),
+		MTK_FUNCTION(2, "ANT_SEL2"),
+		MTK_FUNCTION(5, "UTXD0"),
+		MTK_FUNCTION(6, "I2SOUT_DATA_OUT")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(88, "MSDC2_DAT1"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 71),
+		MTK_FUNCTION(0, "GPIO88"),
+		MTK_FUNCTION(1, "MSDC2_DAT1"),
+		MTK_FUNCTION(2, "ANT_SEL3"),
+		MTK_FUNCTION(3, "PWM0"),
+		MTK_FUNCTION(5, "URXD0"),
+		MTK_FUNCTION(6, "PWM1")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(89, "MSDC2_DAT2"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 72),
+		MTK_FUNCTION(0, "GPIO89"),
+		MTK_FUNCTION(1, "MSDC2_DAT2"),
+		MTK_FUNCTION(2, "ANT_SEL4"),
+		MTK_FUNCTION(3, "SDA2"),
+		MTK_FUNCTION(5, "UTXD1"),
+		MTK_FUNCTION(6, "PWM2")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(90, "MSDC2_DAT3"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 73),
+		MTK_FUNCTION(0, "GPIO90"),
+		MTK_FUNCTION(1, "MSDC2_DAT3"),
+		MTK_FUNCTION(2, "ANT_SEL5"),
+		MTK_FUNCTION(3, "SCL2"),
+		MTK_FUNCTION(4, "EXT_FRAME_SYNC"),
+		MTK_FUNCTION(5, "URXD1"),
+		MTK_FUNCTION(6, "PWM3")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(91, "TDN3"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPI91"),
+		MTK_FUNCTION(1, "TDN3")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(92, "TDP3"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPI92"),
+		MTK_FUNCTION(1, "TDP3")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(93, "TDN2"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPI93"),
+		MTK_FUNCTION(1, "TDN2")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(94, "TDP2"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPI94"),
+		MTK_FUNCTION(1, "TDP2")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(95, "TCN"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPI95"),
+		MTK_FUNCTION(1, "TCN")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(96, "TCP"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPI96"),
+		MTK_FUNCTION(1, "TCP")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(97, "TDN1"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPI97"),
+		MTK_FUNCTION(1, "TDN1")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(98, "TDP1"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPI98"),
+		MTK_FUNCTION(1, "TDP1")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(99, "TDN0"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPI99"),
+		MTK_FUNCTION(1, "TDN0")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(100, "TDP0"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPI100"),
+		MTK_FUNCTION(1, "TDP0")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(101, "SPI2_CSN"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 74),
+		MTK_FUNCTION(0, "GPIO101"),
+		MTK_FUNCTION(1, "SPI2_CS"),
+		MTK_FUNCTION(3, "SCL3"),
+		MTK_FUNCTION(4, "KROW0")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(102, "SPI2_MI"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 75),
+		MTK_FUNCTION(0, "GPIO102"),
+		MTK_FUNCTION(1, "SPI2_MI"),
+		MTK_FUNCTION(2, "SPI2_MO"),
+		MTK_FUNCTION(3, "SDA3"),
+		MTK_FUNCTION(4, "KROW1")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(103, "SPI2_MO"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 76),
+		MTK_FUNCTION(0, "GPIO103"),
+		MTK_FUNCTION(1, "SPI2_MO"),
+		MTK_FUNCTION(2, "SPI2_MI"),
+		MTK_FUNCTION(3, "SCL3"),
+		MTK_FUNCTION(4, "KROW2")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(104, "SPI2_CLK"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 77),
+		MTK_FUNCTION(0, "GPIO104"),
+		MTK_FUNCTION(1, "SPI2_CK"),
+		MTK_FUNCTION(3, "SDA3"),
+		MTK_FUNCTION(4, "KROW3")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(105, "MSDC1_CMD"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 78),
+		MTK_FUNCTION(0, "GPIO105"),
+		MTK_FUNCTION(1, "MSDC1_CMD"),
+		MTK_FUNCTION(2, "ANT_SEL0"),
+		MTK_FUNCTION(3, "SDA1"),
+		MTK_FUNCTION(6, "I2SOUT_BCK"),
+		MTK_FUNCTION(7, "DBG_MON_B[27]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(106, "MSDC1_CLK"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 79),
+		MTK_FUNCTION(0, "GPIO106"),
+		MTK_FUNCTION(1, "MSDC1_CLK"),
+		MTK_FUNCTION(2, "ANT_SEL1"),
+		MTK_FUNCTION(3, "SCL1"),
+		MTK_FUNCTION(6, "I2SOUT_LRCK"),
+		MTK_FUNCTION(7, "DBG_MON_B[28]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(107, "MSDC1_DAT0"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 80),
+		MTK_FUNCTION(0, "GPIO107"),
+		MTK_FUNCTION(1, "MSDC1_DAT0"),
+		MTK_FUNCTION(2, "ANT_SEL2"),
+		MTK_FUNCTION(5, "UTXD0"),
+		MTK_FUNCTION(6, "I2SOUT_DATA_OUT"),
+		MTK_FUNCTION(7, "DBG_MON_B[26]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(108, "MSDC1_DAT1"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 81),
+		MTK_FUNCTION(0, "GPIO108"),
+		MTK_FUNCTION(1, "MSDC1_DAT1"),
+		MTK_FUNCTION(2, "ANT_SEL3"),
+		MTK_FUNCTION(3, "PWM0"),
+		MTK_FUNCTION(5, "URXD0"),
+		MTK_FUNCTION(6, "PWM1"),
+		MTK_FUNCTION(7, "DBG_MON_B[25]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(109, "MSDC1_DAT2"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 82),
+		MTK_FUNCTION(0, "GPIO109"),
+		MTK_FUNCTION(1, "MSDC1_DAT2"),
+		MTK_FUNCTION(2, "ANT_SEL4"),
+		MTK_FUNCTION(3, "SDA2"),
+		MTK_FUNCTION(5, "UTXD1"),
+		MTK_FUNCTION(6, "PWM2"),
+		MTK_FUNCTION(7, "DBG_MON_B[24]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(110, "MSDC1_DAT3"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 83),
+		MTK_FUNCTION(0, "GPIO110"),
+		MTK_FUNCTION(1, "MSDC1_DAT3"),
+		MTK_FUNCTION(2, "ANT_SEL5"),
+		MTK_FUNCTION(3, "SCL2"),
+		MTK_FUNCTION(4, "EXT_FRAME_SYNC"),
+		MTK_FUNCTION(5, "URXD1"),
+		MTK_FUNCTION(6, "PWM3"),
+		MTK_FUNCTION(7, "DBG_MON_B[23]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(111, "MSDC0_DAT7"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 84),
+		MTK_FUNCTION(0, "GPIO111"),
+		MTK_FUNCTION(1, "MSDC0_DAT7"),
+		MTK_FUNCTION(4, "NLD7")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(112, "MSDC0_DAT6"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 85),
+		MTK_FUNCTION(0, "GPIO112"),
+		MTK_FUNCTION(1, "MSDC0_DAT6"),
+		MTK_FUNCTION(4, "NLD6")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(113, "MSDC0_DAT5"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 86),
+		MTK_FUNCTION(0, "GPIO113"),
+		MTK_FUNCTION(1, "MSDC0_DAT5"),
+		MTK_FUNCTION(4, "NLD5")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(114, "MSDC0_DAT4"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 87),
+		MTK_FUNCTION(0, "GPIO114"),
+		MTK_FUNCTION(1, "MSDC0_DAT4"),
+		MTK_FUNCTION(4, "NLD4")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(115, "MSDC0_RSTB"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 88),
+		MTK_FUNCTION(0, "GPIO115"),
+		MTK_FUNCTION(1, "MSDC0_RSTB"),
+		MTK_FUNCTION(4, "NLD8")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(116, "MSDC0_CMD"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 89),
+		MTK_FUNCTION(0, "GPIO116"),
+		MTK_FUNCTION(1, "MSDC0_CMD"),
+		MTK_FUNCTION(4, "NALE")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(117, "MSDC0_CLK"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 90),
+		MTK_FUNCTION(0, "GPIO117"),
+		MTK_FUNCTION(1, "MSDC0_CLK"),
+		MTK_FUNCTION(4, "NWEB")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(118, "MSDC0_DAT3"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 91),
+		MTK_FUNCTION(0, "GPIO118"),
+		MTK_FUNCTION(1, "MSDC0_DAT3"),
+		MTK_FUNCTION(4, "NLD3")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(119, "MSDC0_DAT2"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 92),
+		MTK_FUNCTION(0, "GPIO119"),
+		MTK_FUNCTION(1, "MSDC0_DAT2"),
+		MTK_FUNCTION(4, "NLD2")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(120, "MSDC0_DAT1"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 93),
+		MTK_FUNCTION(0, "GPIO120"),
+		MTK_FUNCTION(1, "MSDC0_DAT1"),
+		MTK_FUNCTION(4, "NLD1")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(121, "MSDC0_DAT0"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 94),
+		MTK_FUNCTION(0, "GPIO121"),
+		MTK_FUNCTION(1, "MSDC0_DAT0"),
+		MTK_FUNCTION(4, "NLD0"),
+		MTK_FUNCTION(5, "WATCHDOG")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(122, "CEC"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 95),
+		MTK_FUNCTION(0, "GPIO122"),
+		MTK_FUNCTION(1, "CEC"),
+		MTK_FUNCTION(4, "SDA2"),
+		MTK_FUNCTION(5, "URXD0")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(123, "HTPLG"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 96),
+		MTK_FUNCTION(0, "GPIO123"),
+		MTK_FUNCTION(1, "HTPLG"),
+		MTK_FUNCTION(4, "SCL2"),
+		MTK_FUNCTION(5, "UTXD0")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(124, "HDMISCK"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 97),
+		MTK_FUNCTION(0, "GPIO124"),
+		MTK_FUNCTION(1, "HDMISCK"),
+		MTK_FUNCTION(4, "SDA1"),
+		MTK_FUNCTION(5, "PWM3")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(125, "HDMISD"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 98),
+		MTK_FUNCTION(0, "GPIO125"),
+		MTK_FUNCTION(1, "HDMISD"),
+		MTK_FUNCTION(4, "SCL1"),
+		MTK_FUNCTION(5, "PWM4")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(126, "I2S0_MCLK"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 99),
+		MTK_FUNCTION(0, "GPIO126"),
+		MTK_FUNCTION(1, "I2S0_MCLK"),
+		MTK_FUNCTION(6, "WCN_I2S_MCLK"),
+		MTK_FUNCTION(7, "DBG_MON_B[6]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(127, "RAMBUF_IDATA0"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO127"),
+		MTK_FUNCTION(1, "RAMBUF_IDATA0")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(128, "RAMBUF_IDATA1"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO128"),
+		MTK_FUNCTION(1, "RAMBUF_IDATA1")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(129, "RAMBUF_IDATA2"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO129"),
+		MTK_FUNCTION(1, "RAMBUF_IDATA2")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(130, "RAMBUF_IDATA3"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO130"),
+		MTK_FUNCTION(1, "RAMBUF_IDATA3")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(131, "RAMBUF_IDATA4"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO131"),
+		MTK_FUNCTION(1, "RAMBUF_IDATA4")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(132, "RAMBUF_IDATA5"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO132"),
+		MTK_FUNCTION(1, "RAMBUF_IDATA5")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(133, "RAMBUF_IDATA6"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO133"),
+		MTK_FUNCTION(1, "RAMBUF_IDATA6")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(134, "RAMBUF_IDATA7"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO134"),
+		MTK_FUNCTION(1, "RAMBUF_IDATA7")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(135, "RAMBUF_IDATA8"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO135"),
+		MTK_FUNCTION(1, "RAMBUF_IDATA8")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(136, "RAMBUF_IDATA9"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO136"),
+		MTK_FUNCTION(1, "RAMBUF_IDATA9")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(137, "RAMBUF_IDATA10"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO137"),
+		MTK_FUNCTION(1, "RAMBUF_IDATA10")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(138, "RAMBUF_IDATA11"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO138"),
+		MTK_FUNCTION(1, "RAMBUF_IDATA11")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(139, "RAMBUF_IDATA12"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO139"),
+		MTK_FUNCTION(1, "RAMBUF_IDATA12")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(140, "RAMBUF_IDATA13"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO140"),
+		MTK_FUNCTION(1, "RAMBUF_IDATA13")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(141, "RAMBUF_IDATA14"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO141"),
+		MTK_FUNCTION(1, "RAMBUF_IDATA14")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(142, "RAMBUF_IDATA15"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO142"),
+		MTK_FUNCTION(1, "RAMBUF_IDATA15")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(143, "RAMBUF_ODATA0"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO143"),
+		MTK_FUNCTION(1, "RAMBUF_ODATA0")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(144, "RAMBUF_ODATA1"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO144"),
+		MTK_FUNCTION(1, "RAMBUF_ODATA1")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(145, "RAMBUF_ODATA2"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO145"),
+		MTK_FUNCTION(1, "RAMBUF_ODATA2")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(146, "RAMBUF_ODATA3"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO146"),
+		MTK_FUNCTION(1, "RAMBUF_ODATA3")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(147, "RAMBUF_ODATA4"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO147"),
+		MTK_FUNCTION(1, "RAMBUF_ODATA4")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(148, "RAMBUF_ODATA5"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO148"),
+		MTK_FUNCTION(1, "RAMBUF_ODATA5")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(149, "RAMBUF_ODATA6"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO149"),
+		MTK_FUNCTION(1, "RAMBUF_ODATA6")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(150, "RAMBUF_ODATA7"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO150"),
+		MTK_FUNCTION(1, "RAMBUF_ODATA7")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(151, "RAMBUF_ODATA8"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO151"),
+		MTK_FUNCTION(1, "RAMBUF_ODATA8")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(152, "RAMBUF_ODATA9"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO152"),
+		MTK_FUNCTION(1, "RAMBUF_ODATA9")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(153, "RAMBUF_ODATA10"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO153"),
+		MTK_FUNCTION(1, "RAMBUF_ODATA10")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(154, "RAMBUF_ODATA11"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO154"),
+		MTK_FUNCTION(1, "RAMBUF_ODATA11")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(155, "RAMBUF_ODATA12"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO155"),
+		MTK_FUNCTION(1, "RAMBUF_ODATA12")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(156, "RAMBUF_ODATA13"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO156"),
+		MTK_FUNCTION(1, "RAMBUF_ODATA13")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(157, "RAMBUF_ODATA14"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO157"),
+		MTK_FUNCTION(1, "RAMBUF_ODATA14")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(158, "RAMBUF_ODATA15"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO158"),
+		MTK_FUNCTION(1, "RAMBUF_ODATA15")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(159, "RAMBUF_BE0"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO159"),
+		MTK_FUNCTION(1, "RAMBUF_BE0")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(160, "RAMBUF_BE1"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO160"),
+		MTK_FUNCTION(1, "RAMBUF_BE1")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(161, "AP2PT_INT"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO161"),
+		MTK_FUNCTION(1, "AP2PT_INT")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(162, "AP2PT_INT_CLR"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO162"),
+		MTK_FUNCTION(1, "AP2PT_INT_CLR")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(163, "PT2AP_INT"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO163"),
+		MTK_FUNCTION(1, "PT2AP_INT")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(164, "PT2AP_INT_CLR"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO164"),
+		MTK_FUNCTION(1, "PT2AP_INT_CLR")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(165, "AP2UP_INT"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO165"),
+		MTK_FUNCTION(1, "AP2UP_INT")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(166, "AP2UP_INT_CLR"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO166"),
+		MTK_FUNCTION(1, "AP2UP_INT_CLR")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(167, "UP2AP_INT"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO167"),
+		MTK_FUNCTION(1, "UP2AP_INT")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(168, "UP2AP_INT_CLR"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO168"),
+		MTK_FUNCTION(1, "UP2AP_INT_CLR")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(169, "RAMBUF_ADDR0"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO169"),
+		MTK_FUNCTION(1, "RAMBUF_ADDR0")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(170, "RAMBUF_ADDR1"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO170"),
+		MTK_FUNCTION(1, "RAMBUF_ADDR1")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(171, "RAMBUF_ADDR2"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO171"),
+		MTK_FUNCTION(1, "RAMBUF_ADDR2")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(172, "RAMBUF_ADDR3"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO172"),
+		MTK_FUNCTION(1, "RAMBUF_ADDR3")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(173, "RAMBUF_ADDR4"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO173"),
+		MTK_FUNCTION(1, "RAMBUF_ADDR4")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(174, "RAMBUF_ADDR5"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO174"),
+		MTK_FUNCTION(1, "RAMBUF_ADDR5")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(175, "RAMBUF_ADDR6"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO175"),
+		MTK_FUNCTION(1, "RAMBUF_ADDR6")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(176, "RAMBUF_ADDR7"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO176"),
+		MTK_FUNCTION(1, "RAMBUF_ADDR7")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(177, "RAMBUF_ADDR8"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO177"),
+		MTK_FUNCTION(1, "RAMBUF_ADDR8")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(178, "RAMBUF_ADDR9"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO178"),
+		MTK_FUNCTION(1, "RAMBUF_ADDR9")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(179, "RAMBUF_ADDR10"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO179"),
+		MTK_FUNCTION(1, "RAMBUF_ADDR10")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(180, "RAMBUF_RW"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO180"),
+		MTK_FUNCTION(1, "RAMBUF_RW")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(181, "RAMBUF_LAST"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO181"),
+		MTK_FUNCTION(1, "RAMBUF_LAST")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(182, "RAMBUF_HP"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO182"),
+		MTK_FUNCTION(1, "RAMBUF_HP")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(183, "RAMBUF_REQ"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO183"),
+		MTK_FUNCTION(1, "RAMBUF_REQ")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(184, "RAMBUF_ALE"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO184"),
+		MTK_FUNCTION(1, "RAMBUF_ALE")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(185, "RAMBUF_DLE"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO185"),
+		MTK_FUNCTION(1, "RAMBUF_DLE")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(186, "RAMBUF_WDLE"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO186"),
+		MTK_FUNCTION(1, "RAMBUF_WDLE")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(187, "RAMBUF_O_CLK"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO187"),
+		MTK_FUNCTION(1, "RAMBUF_O_CLK")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(188, "I2S2_MCLK"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 100),
+		MTK_FUNCTION(0, "GPIO188"),
+		MTK_FUNCTION(1, "I2S2_MCLK")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(189, "I2S3_DATA"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 101),
+		MTK_FUNCTION(0, "GPIO189"),
+		MTK_FUNCTION(2, "I2S3_DATA_BYPS"),
+		MTK_FUNCTION(3, "PCM_TX")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(190, "I2S3_DATA_IN"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 102),
+		MTK_FUNCTION(0, "GPIO190"),
+		MTK_FUNCTION(1, "I2S3_DATA_IN"),
+		MTK_FUNCTION(3, "PCM_RX")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(191, "I2S3_BCK"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 103),
+		MTK_FUNCTION(0, "GPIO191"),
+		MTK_FUNCTION(1, "I2S3_BCK"),
+		MTK_FUNCTION(3, "PCM_CLK0")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(192, "I2S3_LRCK"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 104),
+		MTK_FUNCTION(0, "GPIO192"),
+		MTK_FUNCTION(1, "I2S3_LRCK"),
+		MTK_FUNCTION(3, "PCM_SYNC")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(193, "I2S3_MCLK"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 105),
+		MTK_FUNCTION(0, "GPIO193"),
+		MTK_FUNCTION(1, "I2S3_MCLK")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(194, "I2S4_DATA"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 106),
+		MTK_FUNCTION(0, "GPIO194"),
+		MTK_FUNCTION(1, "I2S4_DATA"),
+		MTK_FUNCTION(2, "I2S4_DATA_BYPS"),
+		MTK_FUNCTION(3, "PCM_TX")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(195, "I2S4_DATA_IN"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 107),
+		MTK_FUNCTION(0, "GPIO195"),
+		MTK_FUNCTION(1, "I2S4_DATA_IN"),
+		MTK_FUNCTION(3, "PCM_RX")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(196, "I2S4_BCK"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 108),
+		MTK_FUNCTION(0, "GPIO196"),
+		MTK_FUNCTION(1, "I2S4_BCK"),
+		MTK_FUNCTION(3, "PCM_CLK0")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(197, "I2S4_LRCK"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 109),
+		MTK_FUNCTION(0, "GPIO197"),
+		MTK_FUNCTION(1, "I2S4_LRCK"),
+		MTK_FUNCTION(3, "PCM_SYNC")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(198, "I2S4_MCLK"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 110),
+		MTK_FUNCTION(0, "GPIO198"),
+		MTK_FUNCTION(1, "I2S4_MCLK")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(199, "SPI1_CLK"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 111),
+		MTK_FUNCTION(0, "GPIO199"),
+		MTK_FUNCTION(1, "SPI1_CK"),
+		MTK_FUNCTION(3, "EXT_FRAME_SYNC"),
+		MTK_FUNCTION(4, "KCOL3"),
+		MTK_FUNCTION(7, "DBG_MON_B[15]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(200, "SPDIF_OUT"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 112),
+		MTK_FUNCTION(0, "GPIO200"),
+		MTK_FUNCTION(1, "SPDIF_OUT"),
+		MTK_FUNCTION(5, "G1_TXD3"),
+		MTK_FUNCTION(6, "URXD2"),
+		MTK_FUNCTION(7, "DBG_MON_B[16]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(201, "SPDIF_IN0"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 113),
+		MTK_FUNCTION(0, "GPIO201"),
+		MTK_FUNCTION(1, "SPDIF_IN0"),
+		MTK_FUNCTION(5, "G1_TXEN"),
+		MTK_FUNCTION(6, "UTXD2"),
+		MTK_FUNCTION(7, "DBG_MON_B[17]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(202, "SPDIF_IN1"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 114),
+		MTK_FUNCTION(0, "GPIO202"),
+		MTK_FUNCTION(1, "SPDIF_IN1")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(203, "PWM0"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 115),
+		MTK_FUNCTION(0, "GPIO203"),
+		MTK_FUNCTION(1, "PWM0"),
+		MTK_FUNCTION(2, "DISP_PWM"),
+		MTK_FUNCTION(5, "G1_TXD2"),
+		MTK_FUNCTION(7, "DBG_MON_B[18]"),
+		MTK_FUNCTION(9, "I2S2_DATA")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(204, "PWM1"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 116),
+		MTK_FUNCTION(0, "GPIO204"),
+		MTK_FUNCTION(1, "PWM1"),
+		MTK_FUNCTION(2, "CLKM3"),
+		MTK_FUNCTION(5, "G1_TXD1"),
+		MTK_FUNCTION(7, "DBG_MON_B[19]"),
+		MTK_FUNCTION(9, "I2S3_DATA")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(205, "PWM2"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 117),
+		MTK_FUNCTION(0, "GPIO205"),
+		MTK_FUNCTION(1, "PWM2"),
+		MTK_FUNCTION(2, "CLKM2"),
+		MTK_FUNCTION(5, "G1_TXD0"),
+		MTK_FUNCTION(7, "DBG_MON_B[20]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(206, "PWM3"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 118),
+		MTK_FUNCTION(0, "GPIO206"),
+		MTK_FUNCTION(1, "PWM3"),
+		MTK_FUNCTION(2, "CLKM1"),
+		MTK_FUNCTION(3, "EXT_FRAME_SYNC"),
+		MTK_FUNCTION(5, "G1_TXC"),
+		MTK_FUNCTION(7, "DBG_MON_B[21]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(207, "PWM4"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 119),
+		MTK_FUNCTION(0, "GPIO207"),
+		MTK_FUNCTION(1, "PWM4"),
+		MTK_FUNCTION(2, "CLKM0"),
+		MTK_FUNCTION(3, "EXT_FRAME_SYNC"),
+		MTK_FUNCTION(5, "G1_RXC"),
+		MTK_FUNCTION(7, "DBG_MON_B[22]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(208, "AUD_EXT_CK1"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 120),
+		MTK_FUNCTION(0, "GPIO208"),
+		MTK_FUNCTION(1, "AUD_EXT_CK1"),
+		MTK_FUNCTION(2, "PWM0"),
+		MTK_FUNCTION(4, "ANT_SEL5"),
+		MTK_FUNCTION(5, "DISP_PWM"),
+		MTK_FUNCTION(7, "DBG_MON_A[31]"),
+		MTK_FUNCTION(11, "PCIE0_PERST_N")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(209, "AUD_EXT_CK2"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 121),
+		MTK_FUNCTION(0, "GPIO209"),
+		MTK_FUNCTION(1, "AUD_EXT_CK2"),
+		MTK_FUNCTION(2, "MSDC1_WP"),
+		MTK_FUNCTION(5, "PWM1"),
+		MTK_FUNCTION(7, "DBG_MON_A[32]"),
+		MTK_FUNCTION(11, "PCIE1_PERST_N")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(210, "AUD_CLOCK"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO210"),
+		MTK_FUNCTION(1, "AUD_CLOCK")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(211, "DVP_RESET"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO211"),
+		MTK_FUNCTION(1, "DVP_RESET")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(212, "DVP_CLOCK"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO212"),
+		MTK_FUNCTION(1, "DVP_CLOCK")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(213, "DVP_CS"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO213"),
+		MTK_FUNCTION(1, "DVP_CS")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(214, "DVP_CK"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO214"),
+		MTK_FUNCTION(1, "DVP_CK")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(215, "DVP_DI"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO215"),
+		MTK_FUNCTION(1, "DVP_DI")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(216, "DVP_DO"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO216"),
+		MTK_FUNCTION(1, "DVP_DO")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(217, "AP_CS"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO217"),
+		MTK_FUNCTION(1, "AP_CS")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(218, "AP_CK"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO218"),
+		MTK_FUNCTION(1, "AP_CK")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(219, "AP_DI"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO219"),
+		MTK_FUNCTION(1, "AP_DI")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(220, "AP_DO"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO220"),
+		MTK_FUNCTION(1, "AP_DO")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(221, "DVD_BCLK"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO221"),
+		MTK_FUNCTION(1, "DVD_BCLK")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(222, "T8032_CLK"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO222"),
+		MTK_FUNCTION(1, "T8032_CLK")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(223, "AP_BCLK"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO223"),
+		MTK_FUNCTION(1, "AP_BCLK")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(224, "HOST_CS"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO224"),
+		MTK_FUNCTION(1, "HOST_CS")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(225, "HOST_CK"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO225"),
+		MTK_FUNCTION(1, "HOST_CK")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(226, "HOST_DO0"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO226"),
+		MTK_FUNCTION(1, "HOST_DO0")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(227, "HOST_DO1"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO227"),
+		MTK_FUNCTION(1, "HOST_DO1")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(228, "SLV_CS"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO228"),
+		MTK_FUNCTION(1, "SLV_CS")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(229, "SLV_CK"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO229"),
+		MTK_FUNCTION(1, "SLV_CK")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(230, "SLV_DI0"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO230"),
+		MTK_FUNCTION(1, "SLV_DI0")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(231, "SLV_DI1"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO231"),
+		MTK_FUNCTION(1, "SLV_DI1")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(232, "AP2DSP_INT"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO232"),
+		MTK_FUNCTION(1, "AP2DSP_INT")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(233, "AP2DSP_INT_CLR"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO233"),
+		MTK_FUNCTION(1, "AP2DSP_INT_CLR")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(234, "DSP2AP_INT"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO234"),
+		MTK_FUNCTION(1, "DSP2AP_INT")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(235, "DSP2AP_INT_CLR"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO235"),
+		MTK_FUNCTION(1, "DSP2AP_INT_CLR")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(236, "EXT_SDIO3"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 122),
+		MTK_FUNCTION(0, "GPIO236"),
+		MTK_FUNCTION(1, "EXT_SDIO3"),
+		MTK_FUNCTION(2, "IDDIG"),
+		MTK_FUNCTION(7, "DBG_MON_A[1]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(237, "EXT_SDIO2"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 123),
+		MTK_FUNCTION(0, "GPIO237"),
+		MTK_FUNCTION(1, "EXT_SDIO2"),
+		MTK_FUNCTION(2, "DRV_VBUS")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(238, "EXT_SDIO1"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 124),
+		MTK_FUNCTION(0, "GPIO238"),
+		MTK_FUNCTION(1, "EXT_SDIO1"),
+		MTK_FUNCTION(2, "IDDIG_P1")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(239, "EXT_SDIO0"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 125),
+		MTK_FUNCTION(0, "GPIO239"),
+		MTK_FUNCTION(1, "EXT_SDIO0"),
+		MTK_FUNCTION(2, "DRV_VBUS_P1")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(240, "EXT_XCS"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 126),
+		MTK_FUNCTION(0, "GPIO240"),
+		MTK_FUNCTION(1, "EXT_XCS")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(241, "EXT_SCK"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 127),
+		MTK_FUNCTION(0, "GPIO241"),
+		MTK_FUNCTION(1, "EXT_SCK")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(242, "URTS2"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 128),
+		MTK_FUNCTION(0, "GPIO242"),
+		MTK_FUNCTION(1, "URTS2"),
+		MTK_FUNCTION(2, "UTXD3"),
+		MTK_FUNCTION(3, "URXD3"),
+		MTK_FUNCTION(4, "SCL1"),
+		MTK_FUNCTION(7, "DBG_MON_B[32]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(243, "UCTS2"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 129),
+		MTK_FUNCTION(0, "GPIO243"),
+		MTK_FUNCTION(1, "UCTS2"),
+		MTK_FUNCTION(2, "URXD3"),
+		MTK_FUNCTION(3, "UTXD3"),
+		MTK_FUNCTION(4, "SDA1"),
+		MTK_FUNCTION(7, "DBG_MON_A[6]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(244, "HDMI_SDA_RX"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 130),
+		MTK_FUNCTION(0, "GPIO244"),
+		MTK_FUNCTION(1, "HDMI_SDA_RX")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(245, "HDMI_SCL_RX"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 131),
+		MTK_FUNCTION(0, "GPIO245"),
+		MTK_FUNCTION(1, "HDMI_SCL_RX")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(246, "MHL_SENCE"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 132),
+		MTK_FUNCTION(0, "GPIO246")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(247, "HDMI_HPD_CBUS_RX"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 69),
+		MTK_FUNCTION(0, "GPIO247"),
+		MTK_FUNCTION(1, "HDMI_HPD_RX")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(248, "HDMI_TESTOUTP_RX"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 133),
+		MTK_FUNCTION(0, "GPIO248"),
+		MTK_FUNCTION(1, "HDMI_TESTOUTP_RX")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(249, "MSDC0E_RSTB"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 134),
+		MTK_FUNCTION(0, "GPIO249"),
+		MTK_FUNCTION(1, "MSDC0E_RSTB")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(250, "MSDC0E_DAT7"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 135),
+		MTK_FUNCTION(0, "GPIO250"),
+		MTK_FUNCTION(1, "MSDC3_DAT7"),
+		MTK_FUNCTION(6, "PCIE0_CLKREQ_N")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(251, "MSDC0E_DAT6"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 136),
+		MTK_FUNCTION(0, "GPIO251"),
+		MTK_FUNCTION(1, "MSDC3_DAT6"),
+		MTK_FUNCTION(6, "PCIE0_WAKE_N")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(252, "MSDC0E_DAT5"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 137),
+		MTK_FUNCTION(0, "GPIO252"),
+		MTK_FUNCTION(1, "MSDC3_DAT5"),
+		MTK_FUNCTION(6, "PCIE1_CLKREQ_N")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(253, "MSDC0E_DAT4"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 138),
+		MTK_FUNCTION(0, "GPIO253"),
+		MTK_FUNCTION(1, "MSDC3_DAT4"),
+		MTK_FUNCTION(6, "PCIE1_WAKE_N")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(254, "MSDC0E_DAT3"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 139),
+		MTK_FUNCTION(0, "GPIO254"),
+		MTK_FUNCTION(1, "MSDC3_DAT3"),
+		MTK_FUNCTION(6, "PCIE2_CLKREQ_N")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(255, "MSDC0E_DAT2"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 140),
+		MTK_FUNCTION(0, "GPIO255"),
+		MTK_FUNCTION(1, "MSDC3_DAT2"),
+		MTK_FUNCTION(6, "PCIE2_WAKE_N")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(256, "MSDC0E_DAT1"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 141),
+		MTK_FUNCTION(0, "GPIO256"),
+		MTK_FUNCTION(1, "MSDC3_DAT1")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(257, "MSDC0E_DAT0"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 142),
+		MTK_FUNCTION(0, "GPIO257"),
+		MTK_FUNCTION(1, "MSDC3_DAT0")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(258, "MSDC0E_CMD"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 143),
+		MTK_FUNCTION(0, "GPIO258"),
+		MTK_FUNCTION(1, "MSDC3_CMD")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(259, "MSDC0E_CLK"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 144),
+		MTK_FUNCTION(0, "GPIO259"),
+		MTK_FUNCTION(1, "MSDC3_CLK")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(260, "MSDC0E_DSL"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 145),
+		MTK_FUNCTION(0, "GPIO260"),
+		MTK_FUNCTION(1, "MSDC3_DSL")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(261, "MSDC1_INS"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 146),
+		MTK_FUNCTION(0, "GPIO261"),
+		MTK_FUNCTION(1, "MSDC1_INS"),
+		MTK_FUNCTION(7, "DBG_MON_B[29]")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(262, "G2_TXEN"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 8),
+		MTK_FUNCTION(0, "GPIO262"),
+		MTK_FUNCTION(1, "G2_TXEN")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(263, "G2_TXD3"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 9),
+		MTK_FUNCTION(0, "GPIO263"),
+		MTK_FUNCTION(1, "G2_TXD3"),
+		MTK_FUNCTION(6, "ANT_SEL5")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(264, "G2_TXD2"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 10),
+		MTK_FUNCTION(0, "GPIO264"),
+		MTK_FUNCTION(1, "G2_TXD2"),
+		MTK_FUNCTION(6, "ANT_SEL4")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(265, "G2_TXD1"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 11),
+		MTK_FUNCTION(0, "GPIO265"),
+		MTK_FUNCTION(1, "G2_TXD1"),
+		MTK_FUNCTION(6, "ANT_SEL3")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(266, "G2_TXD0"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO266"),
+		MTK_FUNCTION(1, "G2_TXD0"),
+		MTK_FUNCTION(6, "ANT_SEL2")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(267, "G2_TXC"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO267"),
+		MTK_FUNCTION(1, "G2_TXC")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(268, "G2_RXC"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO268"),
+		MTK_FUNCTION(1, "G2_RXC")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(269, "G2_RXD0"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO269"),
+		MTK_FUNCTION(1, "G2_RXD0")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(270, "G2_RXD1"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO270"),
+		MTK_FUNCTION(1, "G2_RXD1")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(271, "G2_RXD2"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO271"),
+		MTK_FUNCTION(1, "G2_RXD2")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(272, "G2_RXD3"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO272"),
+		MTK_FUNCTION(1, "G2_RXD3")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(273, "ESW_INT"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 168),
+		MTK_FUNCTION(0, "GPIO273"),
+		MTK_FUNCTION(1, "ESW_INT")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(274, "G2_RXDV"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO274"),
+		MTK_FUNCTION(1, "G2_RXDV")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(275, "MDC"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO275"),
+		MTK_FUNCTION(1, "MDC"),
+		MTK_FUNCTION(6, "ANT_SEL0")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(276, "MDIO"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO276"),
+		MTK_FUNCTION(1, "MDIO"),
+		MTK_FUNCTION(6, "ANT_SEL1")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(277, "ESW_RST"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO277"),
+		MTK_FUNCTION(1, "ESW_RST")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(278, "JTAG_RESET"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(0, 147),
+		MTK_FUNCTION(0, "GPIO278"),
+		MTK_FUNCTION(1, "JTAG_RESET")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(279, "USB3_RES_BOND"),
+		NULL, "mt2701",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO279"),
+		MTK_FUNCTION(1, "USB3_RES_BOND")
+	),
+};
+
+#endif /* __PINCTRL_MTK_MT2701_H */
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-mt7623.h b/drivers/pinctrl/mediatek/pinctrl-mtk-mt7623.h
new file mode 100644
index 0000000..3472a76
--- /dev/null
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-mt7623.h
@@ -0,0 +1,1936 @@
+/*
+ * Copyright (c) 2016 John Crispin <blogic@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __PINCTRL_MTK_MT7623_H
+#define __PINCTRL_MTK_MT7623_H
+
+#include <linux/pinctrl/pinctrl.h>
+#include "pinctrl-mtk-common.h"
+
+static const struct mtk_desc_pin mtk_pins_mt7623[] = {
+	MTK_PIN(
+		PINCTRL_PIN(0, "PWRAP_SPI0_MI"),
+		"J20", "mt7623",
+		MTK_EINT_FUNCTION(0, 148),
+		MTK_FUNCTION(0, "GPIO0"),
+		MTK_FUNCTION(1, "PWRAP_SPIDO"),
+		MTK_FUNCTION(2, "PWRAP_SPIDI")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(1, "PWRAP_SPI0_MO"),
+		"D10", "mt7623",
+		MTK_EINT_FUNCTION(0, 149),
+		MTK_FUNCTION(0, "GPIO1"),
+		MTK_FUNCTION(1, "PWRAP_SPIDI"),
+		MTK_FUNCTION(2, "PWRAP_SPIDO")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(2, "PWRAP_INT"),
+		"E11", "mt7623",
+		MTK_EINT_FUNCTION(0, 150),
+		MTK_FUNCTION(0, "GPIO2"),
+		MTK_FUNCTION(1, "PWRAP_INT")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(3, "PWRAP_SPI0_CK"),
+		"H12", "mt7623",
+		MTK_EINT_FUNCTION(0, 151),
+		MTK_FUNCTION(0, "GPIO3"),
+		MTK_FUNCTION(1, "PWRAP_SPICK_I")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(4, "PWRAP_SPI0_CSN"),
+		"E12", "mt7623",
+		MTK_EINT_FUNCTION(0, 152),
+		MTK_FUNCTION(0, "GPIO4"),
+		MTK_FUNCTION(1, "PWRAP_SPICS_B_I")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(5, "PWRAP_SPI0_CK2"),
+		"H11", "mt7623",
+		MTK_EINT_FUNCTION(0, 155),
+		MTK_FUNCTION(0, "GPIO5"),
+		MTK_FUNCTION(1, "PWRAP_SPICK2_I")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(6, "PWRAP_SPI0_CSN2"),
+		"G11", "mt7623",
+		MTK_EINT_FUNCTION(0, 156),
+		MTK_FUNCTION(0, "GPIO6"),
+		MTK_FUNCTION(1, "PWRAP_SPICS2_B_I")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(7, "SPI1_CSN"),
+		"G19", "mt7623",
+		MTK_EINT_FUNCTION(0, 153),
+		MTK_FUNCTION(0, "GPIO7"),
+		MTK_FUNCTION(1, "SPI1_CS")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(8, "SPI1_MI"),
+		"F19", "mt7623",
+		MTK_EINT_FUNCTION(0, 154),
+		MTK_FUNCTION(0, "GPIO8"),
+		MTK_FUNCTION(1, "SPI1_MI"),
+		MTK_FUNCTION(2, "SPI1_MO")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(9, "SPI1_MO"),
+		"G20", "mt7623",
+		MTK_EINT_FUNCTION(0, 157),
+		MTK_FUNCTION(0, "GPIO9"),
+		MTK_FUNCTION(1, "SPI1_MO"),
+		MTK_FUNCTION(2, "SPI1_MI")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(10, "RTC32K_CK"),
+		"A13", "mt7623",
+		MTK_EINT_FUNCTION(0, 158),
+		MTK_FUNCTION(0, "GPIO10"),
+		MTK_FUNCTION(1, "RTC32K_CK")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(11, "WATCHDOG"),
+		"D14", "mt7623",
+		MTK_EINT_FUNCTION(0, 159),
+		MTK_FUNCTION(0, "GPIO11"),
+		MTK_FUNCTION(1, "WATCHDOG")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(12, "SRCLKENA"),
+		"C13", "mt7623",
+		MTK_EINT_FUNCTION(0, 169),
+		MTK_FUNCTION(0, "GPIO12"),
+		MTK_FUNCTION(1, "SRCLKENA")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(13, "SRCLKENAI"),
+		"B13", "mt7623",
+		MTK_EINT_FUNCTION(0, 161),
+		MTK_FUNCTION(0, "GPIO13"),
+		MTK_FUNCTION(1, "SRCLKENAI")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(14, "GPIO14"),
+		"E18", "mt7623",
+		MTK_EINT_FUNCTION(0, 162),
+		MTK_FUNCTION(0, "GPIO14"),
+		MTK_FUNCTION(1, "URXD2"),
+		MTK_FUNCTION(2, "UTXD2")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(15, "GPIO15"),
+		"E17", "mt7623",
+		MTK_EINT_FUNCTION(0, 163),
+		MTK_FUNCTION(0, "GPIO15"),
+		MTK_FUNCTION(1, "UTXD2"),
+		MTK_FUNCTION(2, "URXD2")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(16, "GPIO16"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO16")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(17, "GPIO17"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO17")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(18, "PCM_CLK"),
+		"C19", "mt7623",
+		MTK_EINT_FUNCTION(0, 166),
+		MTK_FUNCTION(0, "GPIO18"),
+		MTK_FUNCTION(1, "PCM_CLK0"),
+		MTK_FUNCTION(6, "AP_PCM_CLKO")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(19, "PCM_SYNC"),
+		"D19", "mt7623",
+		MTK_EINT_FUNCTION(0, 167),
+		MTK_FUNCTION(0, "GPIO19"),
+		MTK_FUNCTION(1, "PCM_SYNC"),
+		MTK_FUNCTION(6, "AP_PCM_SYNC")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(20, "PCM_RX"),
+		"D18", "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO20"),
+		MTK_FUNCTION(1, "PCM_RX"),
+		MTK_FUNCTION(4, "PCM_TX"),
+		MTK_FUNCTION(6, "AP_PCM_RX")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(21, "PCM_TX"),
+		"C18", "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO21"),
+		MTK_FUNCTION(1, "PCM_TX"),
+		MTK_FUNCTION(4, "PCM_RX"),
+		MTK_FUNCTION(6, "AP_PCM_TX")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(22, "EINT0"),
+		"H15", "mt7623",
+		MTK_EINT_FUNCTION(0, 0),
+		MTK_FUNCTION(0, "GPIO22"),
+		MTK_FUNCTION(1, "UCTS0"),
+		MTK_FUNCTION(2, "PCIE0_PERST_N")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(23, "EINT1"),
+		"J16", "mt7623",
+		MTK_EINT_FUNCTION(0, 1),
+		MTK_FUNCTION(0, "GPIO23"),
+		MTK_FUNCTION(1, "URTS0"),
+		MTK_FUNCTION(2, "PCIE1_PERST_N")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(24, "EINT2"),
+		"H16", "mt7623",
+		MTK_EINT_FUNCTION(0, 2),
+		MTK_FUNCTION(0, "GPIO24"),
+		MTK_FUNCTION(1, "UCTS1"),
+		MTK_FUNCTION(2, "PCIE2_PERST_N")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(25, "EINT3"),
+		"K15", "mt7623",
+		MTK_EINT_FUNCTION(0, 3),
+		MTK_FUNCTION(0, "GPIO25"),
+		MTK_FUNCTION(1, "URTS1")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(26, "EINT4"),
+		"G15", "mt7623",
+		MTK_EINT_FUNCTION(0, 4),
+		MTK_FUNCTION(0, "GPIO26"),
+		MTK_FUNCTION(1, "UCTS3"),
+		MTK_FUNCTION(6, "PCIE2_WAKE_N")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(27, "EINT5"),
+		"F15", "mt7623",
+		MTK_EINT_FUNCTION(0, 5),
+		MTK_FUNCTION(0, "GPIO27"),
+		MTK_FUNCTION(1, "URTS3"),
+		MTK_FUNCTION(6, "PCIE1_WAKE_N")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(28, "EINT6"),
+		"J15", "mt7623",
+		MTK_EINT_FUNCTION(0, 6),
+		MTK_FUNCTION(0, "GPIO28"),
+		MTK_FUNCTION(1, "DRV_VBUS"),
+		MTK_FUNCTION(6, "PCIE0_WAKE_N")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(29, "EINT7"),
+		"E15", "mt7623",
+		MTK_EINT_FUNCTION(0, 7),
+		MTK_FUNCTION(0, "GPIO29"),
+		MTK_FUNCTION(1, "IDDIG"),
+		MTK_FUNCTION(2, "MSDC1_WP"),
+		MTK_FUNCTION(6, "PCIE2_PERST_N")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(30, "GPIO30"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO30")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(31, "GPIO31"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO31")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(32, "GPIO32"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO32")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(33, "I2S1_DATA"),
+		"Y18", "mt7623",
+		MTK_EINT_FUNCTION(0, 15),
+		MTK_FUNCTION(0, "GPIO33"),
+		MTK_FUNCTION(1, "I2S1_DATA"),
+		MTK_FUNCTION(3, "PCM_TX"),
+		MTK_FUNCTION(6, "AP_PCM_TX")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(34, "I2S1_DATA_IN"),
+		"Y17", "mt7623",
+		MTK_EINT_FUNCTION(0, 16),
+		MTK_FUNCTION(0, "GPIO34"),
+		MTK_FUNCTION(1, "I2S1_DATA_IN"),
+		MTK_FUNCTION(3, "PCM_RX"),
+		MTK_FUNCTION(6, "AP_PCM_RX")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(35, "I2S1_BCK"),
+		"V17", "mt7623",
+		MTK_EINT_FUNCTION(0, 17),
+		MTK_FUNCTION(0, "GPIO35"),
+		MTK_FUNCTION(1, "I2S1_BCK"),
+		MTK_FUNCTION(3, "PCM_CLK0"),
+		MTK_FUNCTION(6, "AP_PCM_CLKO")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(36, "I2S1_LRCK"),
+		"W17", "mt7623",
+		MTK_EINT_FUNCTION(0, 18),
+		MTK_FUNCTION(0, "GPIO36"),
+		MTK_FUNCTION(1, "I2S1_LRCK"),
+		MTK_FUNCTION(3, "PCM_SYNC"),
+		MTK_FUNCTION(6, "AP_PCM_SYNC")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(37, "I2S1_MCLK"),
+		"AA18", "mt7623",
+		MTK_EINT_FUNCTION(0, 19),
+		MTK_FUNCTION(0, "GPIO37"),
+		MTK_FUNCTION(1, "I2S1_MCLK")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(38, "GPIO38"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO38")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(39, "JTMS"),
+		"G21", "mt7623",
+		MTK_EINT_FUNCTION(0, 21),
+		MTK_FUNCTION(0, "GPIO39"),
+		MTK_FUNCTION(1, "JTMS")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(40, "GPIO40"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO40")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(41, "JTDI"),
+		"H22", "mt7623",
+		MTK_EINT_FUNCTION(0, 23),
+		MTK_FUNCTION(0, "GPIO41"),
+		MTK_FUNCTION(1, "JTDI")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(42, "JTDO"),
+		"H21", "mt7623",
+		MTK_EINT_FUNCTION(0, 24),
+		MTK_FUNCTION(0, "GPIO42"),
+		MTK_FUNCTION(1, "JTDO")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(43, "NCLE"),
+		"C7", "mt7623",
+		MTK_EINT_FUNCTION(0, 25),
+		MTK_FUNCTION(0, "GPIO43"),
+		MTK_FUNCTION(1, "NCLE"),
+		MTK_FUNCTION(2, "EXT_XCS2")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(44, "NCEB1"),
+		"C6", "mt7623",
+		MTK_EINT_FUNCTION(0, 26),
+		MTK_FUNCTION(0, "GPIO44"),
+		MTK_FUNCTION(1, "NCEB1"),
+		MTK_FUNCTION(2, "IDDIG")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(45, "NCEB0"),
+		"D7", "mt7623",
+		MTK_EINT_FUNCTION(0, 27),
+		MTK_FUNCTION(0, "GPIO45"),
+		MTK_FUNCTION(1, "NCEB0"),
+		MTK_FUNCTION(2, "DRV_VBUS")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(46, "IR"),
+		"D15", "mt7623",
+		MTK_EINT_FUNCTION(0, 28),
+		MTK_FUNCTION(0, "GPIO46"),
+		MTK_FUNCTION(1, "IR")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(47, "NREB"),
+		"A6", "mt7623",
+		MTK_EINT_FUNCTION(0, 29),
+		MTK_FUNCTION(0, "GPIO47"),
+		MTK_FUNCTION(1, "NREB")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(48, "NRNB"),
+		"B6", "mt7623",
+		MTK_EINT_FUNCTION(0, 30),
+		MTK_FUNCTION(0, "GPIO48"),
+		MTK_FUNCTION(1, "NRNB")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(49, "I2S0_DATA"),
+		"AB18", "mt7623",
+		MTK_EINT_FUNCTION(0, 31),
+		MTK_FUNCTION(0, "GPIO49"),
+		MTK_FUNCTION(1, "I2S0_DATA"),
+		MTK_FUNCTION(3, "PCM_TX"),
+		MTK_FUNCTION(6, "AP_I2S_DO")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(50, "GPIO50"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO50")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(51, "GPIO51"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO51")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(52, "GPIO52"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO52")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(53, "SPI0_CSN"),
+		"E7", "mt7623",
+		MTK_EINT_FUNCTION(0, 35),
+		MTK_FUNCTION(0, "GPIO53"),
+		MTK_FUNCTION(1, "SPI0_CS"),
+		MTK_FUNCTION(5, "PWM1")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(54, "SPI0_CK"),
+		"F7", "mt7623",
+		MTK_EINT_FUNCTION(0, 36),
+		MTK_FUNCTION(0, "GPIO54"),
+		MTK_FUNCTION(1, "SPI0_CK")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(55, "SPI0_MI"),
+		"E6", "mt7623",
+		MTK_EINT_FUNCTION(0, 37),
+		MTK_FUNCTION(0, "GPIO55"),
+		MTK_FUNCTION(1, "SPI0_MI"),
+		MTK_FUNCTION(2, "SPI0_MO"),
+		MTK_FUNCTION(3, "MSDC1_WP"),
+		MTK_FUNCTION(5, "PWM2")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(56, "SPI0_MO"),
+		"G7", "mt7623",
+		MTK_EINT_FUNCTION(0, 38),
+		MTK_FUNCTION(0, "GPIO56"),
+		MTK_FUNCTION(1, "SPI0_MO"),
+		MTK_FUNCTION(2, "SPI0_MI")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(57, "GPIO57"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO57")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(58, "GPIO58"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO58")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(59, "GPIO59"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO59")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(60, "WB_RSTB"),
+		"Y21", "mt7623",
+		MTK_EINT_FUNCTION(0, 41),
+		MTK_FUNCTION(0, "GPIO60"),
+		MTK_FUNCTION(1, "WB_RSTB")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(61, "GPIO61"),
+		"AA21", "mt7623",
+		MTK_EINT_FUNCTION(0, 42),
+		MTK_FUNCTION(0, "GPIO61"),
+		MTK_FUNCTION(1, "TEST_FD")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(62, "GPIO62"),
+		"AB22", "mt7623",
+		MTK_EINT_FUNCTION(0, 43),
+		MTK_FUNCTION(0, "GPIO62"),
+		MTK_FUNCTION(1, "TEST_FC")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(63, "WB_SCLK"),
+		"AC23", "mt7623",
+		MTK_EINT_FUNCTION(0, 44),
+		MTK_FUNCTION(0, "GPIO63"),
+		MTK_FUNCTION(1, "WB_SCLK")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(64, "WB_SDATA"),
+		"AB21", "mt7623",
+		MTK_EINT_FUNCTION(0, 45),
+		MTK_FUNCTION(0, "GPIO64"),
+		MTK_FUNCTION(1, "WB_SDATA")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(65, "WB_SEN"),
+		"AB24", "mt7623",
+		MTK_EINT_FUNCTION(0, 46),
+		MTK_FUNCTION(0, "GPIO65"),
+		MTK_FUNCTION(1, "WB_SEN")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(66, "WB_CRTL0"),
+		"AB20", "mt7623",
+		MTK_EINT_FUNCTION(0, 47),
+		MTK_FUNCTION(0, "GPIO66"),
+		MTK_FUNCTION(1, "WB_CRTL0")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(67, "WB_CRTL1"),
+		"AC20", "mt7623",
+		MTK_EINT_FUNCTION(0, 48),
+		MTK_FUNCTION(0, "GPIO67"),
+		MTK_FUNCTION(1, "WB_CRTL1")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(68, "WB_CRTL2"),
+		"AB19", "mt7623",
+		MTK_EINT_FUNCTION(0, 49),
+		MTK_FUNCTION(0, "GPIO68"),
+		MTK_FUNCTION(1, "WB_CRTL2")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(69, "WB_CRTL3"),
+		"AC19", "mt7623",
+		MTK_EINT_FUNCTION(0, 50),
+		MTK_FUNCTION(0, "GPIO69"),
+		MTK_FUNCTION(1, "WB_CRTL3")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(70, "WB_CRTL4"),
+		"AD19", "mt7623",
+		MTK_EINT_FUNCTION(0, 51),
+		MTK_FUNCTION(0, "GPIO70"),
+		MTK_FUNCTION(1, "WB_CRTL4")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(71, "WB_CRTL5"),
+		"AE19", "mt7623",
+		MTK_EINT_FUNCTION(0, 52),
+		MTK_FUNCTION(0, "GPIO71"),
+		MTK_FUNCTION(1, "WB_CRTL5")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(72, "I2S0_DATA_IN"),
+		"AA20", "mt7623",
+		MTK_EINT_FUNCTION(0, 53),
+		MTK_FUNCTION(0, "GPIO72"),
+		MTK_FUNCTION(1, "I2S0_DATA_IN"),
+		MTK_FUNCTION(3, "PCM_RX"),
+		MTK_FUNCTION(4, "PWM0"),
+		MTK_FUNCTION(5, "DISP_PWM"),
+		MTK_FUNCTION(6, "AP_I2S_DI")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(73, "I2S0_LRCK"),
+		"Y20", "mt7623",
+		MTK_EINT_FUNCTION(0, 54),
+		MTK_FUNCTION(0, "GPIO73"),
+		MTK_FUNCTION(1, "I2S0_LRCK"),
+		MTK_FUNCTION(3, "PCM_SYNC"),
+		MTK_FUNCTION(6, "AP_I2S_LRCK")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(74, "I2S0_BCK"),
+		"Y19", "mt7623",
+		MTK_EINT_FUNCTION(0, 55),
+		MTK_FUNCTION(0, "GPIO74"),
+		MTK_FUNCTION(1, "I2S0_BCK"),
+		MTK_FUNCTION(3, "PCM_CLK0"),
+		MTK_FUNCTION(6, "AP_I2S_BCK")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(75, "SDA0"),
+		"K19", "mt7623",
+		MTK_EINT_FUNCTION(0, 56),
+		MTK_FUNCTION(0, "GPIO75"),
+		MTK_FUNCTION(1, "SDA0")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(76, "SCL0"),
+		"K20", "mt7623",
+		MTK_EINT_FUNCTION(0, 57),
+		MTK_FUNCTION(0, "GPIO76"),
+		MTK_FUNCTION(1, "SCL0")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(77, "GPIO77"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO77")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(78, "GPIO78"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO78")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(79, "GPIO79"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO79")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(80, "GPIO80"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO80")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(81, "GPIO81"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO81")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(82, "GPIO82"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO82")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(83, "LCM_RST"),
+		"V16", "mt7623",
+		MTK_EINT_FUNCTION(0, 64),
+		MTK_FUNCTION(0, "GPIO83"),
+		MTK_FUNCTION(1, "LCM_RST")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(84, "DSI_TE"),
+		"V14", "mt7623",
+		MTK_EINT_FUNCTION(0, 65),
+		MTK_FUNCTION(0, "GPIO84"),
+		MTK_FUNCTION(1, "DSI_TE")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(85, "GPIO85"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO85")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(86, "GPIO86"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO86")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(87, "GPIO87"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO87")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(88, "GPIO88"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO88")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(89, "GPIO89"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO89")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(90, "GPIO90"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO90")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(91, "GPIO91"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO91")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(92, "GPIO92"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO92")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(93, "GPIO93"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO93")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(94, "GPIO94"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO94")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(95, "MIPI_TCN"),
+		"AB14", "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO95"),
+		MTK_FUNCTION(1, "TCN")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(96, "MIPI_TCP"),
+		"AC14", "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO96"),
+		MTK_FUNCTION(1, "TCP")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(97, "MIPI_TDN1"),
+		"AE15", "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO97"),
+		MTK_FUNCTION(1, "TDN1")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(98, "MIPI_TDP1"),
+		"AD15", "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO98"),
+		MTK_FUNCTION(1, "TDP1")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(99, "MIPI_TDN0"),
+		"AB15", "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO99"),
+		MTK_FUNCTION(1, "TDN0")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(100, "MIPI_TDP0"),
+		"AC15", "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO100"),
+		MTK_FUNCTION(1, "TDP0")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(101, "GPIO101"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO101")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(102, "GPIO102"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO102")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(103, "GPIO103"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO103")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(104, "GPIO104"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO104")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(105, "MSDC1_CMD"),
+		"AD2", "mt7623",
+		MTK_EINT_FUNCTION(0, 78),
+		MTK_FUNCTION(0, "GPIO105"),
+		MTK_FUNCTION(1, "MSDC1_CMD"),
+		MTK_FUNCTION(3, "SDA1"),
+		MTK_FUNCTION(6, "I2SOUT_BCK")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(106, "MSDC1_CLK"),
+		"AD3", "mt7623",
+		MTK_EINT_FUNCTION(0, 79),
+		MTK_FUNCTION(0, "GPIO106"),
+		MTK_FUNCTION(1, "MSDC1_CLK"),
+		MTK_FUNCTION(3, "SCL1"),
+		MTK_FUNCTION(6, "I2SOUT_LRCK")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(107, "MSDC1_DAT0"),
+		"AE2", "mt7623",
+		MTK_EINT_FUNCTION(0, 80),
+		MTK_FUNCTION(0, "GPIO107"),
+		MTK_FUNCTION(1, "MSDC1_DAT0"),
+		MTK_FUNCTION(5, "UTXD0"),
+		MTK_FUNCTION(6, "I2SOUT_DATA_OUT")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(108, "MSDC1_DAT1"),
+		"AC1", "mt7623",
+		MTK_EINT_FUNCTION(0, 81),
+		MTK_FUNCTION(0, "GPIO108"),
+		MTK_FUNCTION(1, "MSDC1_DAT1"),
+		MTK_FUNCTION(3, "PWM0"),
+		MTK_FUNCTION(5, "URXD0"),
+		MTK_FUNCTION(6, "PWM1")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(109, "MSDC1_DAT2"),
+		"AC3", "mt7623",
+		MTK_EINT_FUNCTION(0, 82),
+		MTK_FUNCTION(0, "GPIO109"),
+		MTK_FUNCTION(1, "MSDC1_DAT2"),
+		MTK_FUNCTION(3, "SDA2"),
+		MTK_FUNCTION(5, "UTXD1"),
+		MTK_FUNCTION(6, "PWM2")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(110, "MSDC1_DAT3"),
+		"AC4", "mt7623",
+		MTK_EINT_FUNCTION(0, 83),
+		MTK_FUNCTION(0, "GPIO110"),
+		MTK_FUNCTION(1, "MSDC1_DAT3"),
+		MTK_FUNCTION(3, "SCL2"),
+		MTK_FUNCTION(5, "URXD1"),
+		MTK_FUNCTION(6, "PWM3")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(111, "MSDC0_DAT7"),
+		"A2", "mt7623",
+		MTK_EINT_FUNCTION(0, 84),
+		MTK_FUNCTION(0, "GPIO111"),
+		MTK_FUNCTION(1, "MSDC0_DAT7"),
+		MTK_FUNCTION(4, "NLD7")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(112, "MSDC0_DAT6"),
+		"B3", "mt7623",
+		MTK_EINT_FUNCTION(0, 85),
+		MTK_FUNCTION(0, "GPIO112"),
+		MTK_FUNCTION(1, "MSDC0_DAT6"),
+		MTK_FUNCTION(4, "NLD6")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(113, "MSDC0_DAT5"),
+		"C4", "mt7623",
+		MTK_EINT_FUNCTION(0, 86),
+		MTK_FUNCTION(0, "GPIO113"),
+		MTK_FUNCTION(1, "MSDC0_DAT5"),
+		MTK_FUNCTION(4, "NLD5")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(114, "MSDC0_DAT4"),
+		"A4", "mt7623",
+		MTK_EINT_FUNCTION(0, 87),
+		MTK_FUNCTION(0, "GPIO114"),
+		MTK_FUNCTION(1, "MSDC0_DAT4"),
+		MTK_FUNCTION(4, "NLD4")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(115, "MSDC0_RSTB"),
+		"C5", "mt7623",
+		MTK_EINT_FUNCTION(0, 88),
+		MTK_FUNCTION(0, "GPIO115"),
+		MTK_FUNCTION(1, "MSDC0_RSTB"),
+		MTK_FUNCTION(4, "NLD8")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(116, "MSDC0_CMD"),
+		"D5", "mt7623",
+		MTK_EINT_FUNCTION(0, 89),
+		MTK_FUNCTION(0, "GPIO116"),
+		MTK_FUNCTION(1, "MSDC0_CMD"),
+		MTK_FUNCTION(4, "NALE")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(117, "MSDC0_CLK"),
+		"B1", "mt7623",
+		MTK_EINT_FUNCTION(0, 90),
+		MTK_FUNCTION(0, "GPIO117"),
+		MTK_FUNCTION(1, "MSDC0_CLK"),
+		MTK_FUNCTION(4, "NWEB")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(118, "MSDC0_DAT3"),
+		"D6", "mt7623",
+		MTK_EINT_FUNCTION(0, 91),
+		MTK_FUNCTION(0, "GPIO118"),
+		MTK_FUNCTION(1, "MSDC0_DAT3"),
+		MTK_FUNCTION(4, "NLD3")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(119, "MSDC0_DAT2"),
+		"B2", "mt7623",
+		MTK_EINT_FUNCTION(0, 92),
+		MTK_FUNCTION(0, "GPIO119"),
+		MTK_FUNCTION(1, "MSDC0_DAT2"),
+		MTK_FUNCTION(4, "NLD2")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(120, "MSDC0_DAT1"),
+		"A3", "mt7623",
+		MTK_EINT_FUNCTION(0, 93),
+		MTK_FUNCTION(0, "GPIO120"),
+		MTK_FUNCTION(1, "MSDC0_DAT1"),
+		MTK_FUNCTION(4, "NLD1")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(121, "MSDC0_DAT0"),
+		"B4", "mt7623",
+		MTK_EINT_FUNCTION(0, 94),
+		MTK_FUNCTION(0, "GPIO121"),
+		MTK_FUNCTION(1, "MSDC0_DAT0"),
+		MTK_FUNCTION(4, "NLD0"),
+		MTK_FUNCTION(5, "WATCHDOG")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(122, "GPIO122"),
+		"H17", "mt7623",
+		MTK_EINT_FUNCTION(0, 95),
+		MTK_FUNCTION(0, "GPIO122"),
+		MTK_FUNCTION(1, "TEST"),
+		MTK_FUNCTION(4, "SDA2"),
+		MTK_FUNCTION(5, "URXD0")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(123, "GPIO123"),
+		"F17", "mt7623",
+		MTK_EINT_FUNCTION(0, 96),
+		MTK_FUNCTION(0, "GPIO123"),
+		MTK_FUNCTION(1, "TEST"),
+		MTK_FUNCTION(4, "SCL2"),
+		MTK_FUNCTION(5, "UTXD0")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(124, "GPIO124"),
+		"H18", "mt7623",
+		MTK_EINT_FUNCTION(0, 97),
+		MTK_FUNCTION(0, "GPIO124"),
+		MTK_FUNCTION(1, "TEST"),
+		MTK_FUNCTION(4, "SDA1"),
+		MTK_FUNCTION(5, "PWM3")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(125, "GPIO125"),
+		"G17", "mt7623",
+		MTK_EINT_FUNCTION(0, 98),
+		MTK_FUNCTION(0, "GPIO125"),
+		MTK_FUNCTION(1, "TEST"),
+		MTK_FUNCTION(4, "SCL1"),
+		MTK_FUNCTION(5, "PWM4")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(126, "I2S0_MCLK"),
+		"AA19", "mt7623",
+		MTK_EINT_FUNCTION(0, 99),
+		MTK_FUNCTION(0, "GPIO126"),
+		MTK_FUNCTION(1, "I2S0_MCLK"),
+		MTK_FUNCTION(6, "AP_I2S_MCLK")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(127, "GPIO127"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO127")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(128, "GPIO128"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO128")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(129, "GPIO129"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO129")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(130, "GPIO130"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO130")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(131, "GPIO131"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO131")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(132, "GPIO132"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO132")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(133, "GPIO133"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO133")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(134, "GPIO134"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO134")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(135, "GPIO135"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO135")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(136, "GPIO136"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO136")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(137, "GPIO137"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO137")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(138, "GPIO138"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO138")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(139, "GPIO139"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO139")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(140, "GPIO140"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO140")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(141, "GPIO141"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO141")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(142, "GPIO142"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO142")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(143, "GPIO143"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO143")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(144, "GPIO144"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO144")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(145, "GPIO145"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO145")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(146, "GPIO146"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO146")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(147, "GPIO147"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO147")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(148, "GPIO148"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO148")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(149, "GPIO149"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO149")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(150, "GPIO150"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO150")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(151, "GPIO151"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO151")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(152, "GPIO152"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO152")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(153, "GPIO153"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO153")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(154, "GPIO154"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO154")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(155, "GPIO155"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO155")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(156, "GPIO156"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO156")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(157, "GPIO157"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO157")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(158, "GPIO158"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO158")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(159, "GPIO159"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO159")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(160, "GPIO160"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO160")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(161, "GPIO161"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO161")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(162, "GPIO162"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO162")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(163, "GPIO163"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO163")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(164, "GPIO164"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO164")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(165, "GPIO165"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO165")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(166, "GPIO166"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO166")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(167, "GPIO167"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO167")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(168, "GPIO168"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO168")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(169, "GPIO169"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO169")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(170, "GPIO170"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO170")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(171, "GPIO171"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO171")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(172, "GPIO172"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO172")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(173, "GPIO173"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO173")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(174, "GPIO174"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO174")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(175, "GPIO175"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO175")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(176, "GPIO176"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO176")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(177, "GPIO177"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO177")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(178, "GPIO178"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO178")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(179, "GPIO179"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO179")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(180, "GPIO180"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO180")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(181, "GPIO181"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO181")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(182, "GPIO182"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO182")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(183, "GPIO183"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO183")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(184, "GPIO184"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO184")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(185, "GPIO185"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO185")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(186, "GPIO186"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO186")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(187, "GPIO187"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO187")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(188, "GPIO188"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO188")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(189, "GPIO189"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO189")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(190, "GPIO190"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO190")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(191, "GPIO191"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO191")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(192, "GPIO192"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO192")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(193, "GPIO193"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO193")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(194, "GPIO194"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO194")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(195, "GPIO195"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO195")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(196, "GPIO196"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO196")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(197, "GPIO197"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO197")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(198, "GPIO198"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO198")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(199, "SPI1_CK"),
+		"E19", "mt7623",
+		MTK_EINT_FUNCTION(0, 111),
+		MTK_FUNCTION(0, "GPIO199"),
+		MTK_FUNCTION(1, "SPI1_CK")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(200, "URXD2"),
+		"K18", "mt7623",
+		MTK_EINT_FUNCTION(0, 112),
+		MTK_FUNCTION(0, "GPIO200"),
+		MTK_FUNCTION(6, "URXD2")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(201, "UTXD2"),
+		"L18", "mt7623",
+		MTK_EINT_FUNCTION(0, 113),
+		MTK_FUNCTION(0, "GPIO201"),
+		MTK_FUNCTION(6, "UTXD2")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(202, "GPIO202"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO202")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(203, "PWM0"),
+		"AA16", "mt7623",
+		MTK_EINT_FUNCTION(0, 115),
+		MTK_FUNCTION(0, "GPIO203"),
+		MTK_FUNCTION(1, "PWM0"),
+		MTK_FUNCTION(2, "DISP_PWM")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(204, "PWM1"),
+		"Y16", "mt7623",
+		MTK_EINT_FUNCTION(0, 116),
+		MTK_FUNCTION(0, "GPIO204"),
+		MTK_FUNCTION(1, "PWM1")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(205, "PWM2"),
+		"AA15", "mt7623",
+		MTK_EINT_FUNCTION(0, 117),
+		MTK_FUNCTION(0, "GPIO205"),
+		MTK_FUNCTION(1, "PWM2")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(206, "PWM3"),
+		"AA17", "mt7623",
+		MTK_EINT_FUNCTION(0, 118),
+		MTK_FUNCTION(0, "GPIO206"),
+		MTK_FUNCTION(1, "PWM3")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(207, "PWM4"),
+		"Y15", "mt7623",
+		MTK_EINT_FUNCTION(0, 119),
+		MTK_FUNCTION(0, "GPIO207"),
+		MTK_FUNCTION(1, "PWM4")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(208, "AUD_EXT_CK1"),
+		"W14", "mt7623",
+		MTK_EINT_FUNCTION(0, 120),
+		MTK_FUNCTION(0, "GPIO208"),
+		MTK_FUNCTION(1, "AUD_EXT_CK1"),
+		MTK_FUNCTION(2, "PWM0"),
+		MTK_FUNCTION(3, "PCIE0_PERST_N"),
+		MTK_FUNCTION(5, "DISP_PWM")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(209, "AUD_EXT_CK2"),
+		"V15", "mt7623",
+		MTK_EINT_FUNCTION(0, 121),
+		MTK_FUNCTION(0, "GPIO209"),
+		MTK_FUNCTION(1, "AUD_EXT_CK2"),
+		MTK_FUNCTION(2, "MSDC1_WP"),
+		MTK_FUNCTION(3, "PCIE1_PERST_N"),
+		MTK_FUNCTION(5, "PWM1")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(210, "GPIO210"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO210")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(211, "GPIO211"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO211")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(212, "GPIO212"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO212")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(213, "GPIO213"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO213")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(214, "GPIO214"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO214")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(215, "GPIO215"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO215")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(216, "GPIO216"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO216")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(217, "GPIO217"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO217")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(218, "GPIO218"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO218")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(219, "GPIO219"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO219")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(220, "GPIO220"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO220")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(221, "GPIO221"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO221")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(222, "GPIO222"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO222")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(223, "GPIO223"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO223")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(224, "GPIO224"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO224")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(225, "GPIO225"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO225")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(226, "GPIO226"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO226")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(227, "GPIO227"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO227")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(228, "GPIO228"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO228")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(229, "GPIO229"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO229")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(230, "GPIO230"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO230")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(231, "GPIO231"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO231")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(232, "GPIO232"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO232")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(233, "GPIO233"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO233")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(234, "GPIO234"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO234")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(235, "GPIO235"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO235")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(236, "EXT_SDIO3"),
+		"A8", "mt7623",
+		MTK_EINT_FUNCTION(0, 122),
+		MTK_FUNCTION(0, "GPIO236"),
+		MTK_FUNCTION(1, "EXT_SDIO3"),
+		MTK_FUNCTION(2, "IDDIG")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(237, "EXT_SDIO2"),
+		"D8", "mt7623",
+		MTK_EINT_FUNCTION(0, 123),
+		MTK_FUNCTION(0, "GPIO237"),
+		MTK_FUNCTION(1, "EXT_SDIO2"),
+		MTK_FUNCTION(2, "DRV_VBUS")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(238, "EXT_SDIO1"),
+		"D9", "mt7623",
+		MTK_EINT_FUNCTION(0, 124),
+		MTK_FUNCTION(0, "GPIO238"),
+		MTK_FUNCTION(1, "EXT_SDIO1")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(239, "EXT_SDIO0"),
+		"B8", "mt7623",
+		MTK_EINT_FUNCTION(0, 125),
+		MTK_FUNCTION(0, "GPIO239"),
+		MTK_FUNCTION(1, "EXT_SDIO0")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(240, "EXT_XCS"),
+		"C9", "mt7623",
+		MTK_EINT_FUNCTION(0, 126),
+		MTK_FUNCTION(0, "GPIO240"),
+		MTK_FUNCTION(1, "EXT_XCS")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(241, "EXT_SCK"),
+		"C8", "mt7623",
+		MTK_EINT_FUNCTION(0, 127),
+		MTK_FUNCTION(0, "GPIO241"),
+		MTK_FUNCTION(1, "EXT_SCK")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(242, "URTS2"),
+		"G18", "mt7623",
+		MTK_EINT_FUNCTION(0, 128),
+		MTK_FUNCTION(0, "GPIO242"),
+		MTK_FUNCTION(1, "URTS2"),
+		MTK_FUNCTION(2, "UTXD3"),
+		MTK_FUNCTION(3, "URXD3"),
+		MTK_FUNCTION(4, "SCL1")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(243, "UCTS2"),
+		"H19", "mt7623",
+		MTK_EINT_FUNCTION(0, 129),
+		MTK_FUNCTION(0, "GPIO243"),
+		MTK_FUNCTION(1, "UCTS2"),
+		MTK_FUNCTION(2, "URXD3"),
+		MTK_FUNCTION(3, "UTXD3"),
+		MTK_FUNCTION(4, "SDA1")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(244, "GPIO244"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO244")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(245, "GPIO245"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO245")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(246, "GPIO246"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO246")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(247, "GPIO247"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO247")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(248, "GPIO248"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO248")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(249, "GPIO249"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO249")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(250, "GPIO250"),
+		"A15", "mt7623",
+		MTK_EINT_FUNCTION(0, 135),
+		MTK_FUNCTION(0, "GPIO250"),
+		MTK_FUNCTION(1, "TEST_MD7"),
+		MTK_FUNCTION(6, "PCIE0_CLKREQ_N")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(251, "GPIO251"),
+		"B15", "mt7623",
+		MTK_EINT_FUNCTION(0, 136),
+		MTK_FUNCTION(0, "GPIO251"),
+		MTK_FUNCTION(1, "TEST_MD6"),
+		MTK_FUNCTION(6, "PCIE0_WAKE_N")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(252, "GPIO252"),
+		"C16", "mt7623",
+		MTK_EINT_FUNCTION(0, 137),
+		MTK_FUNCTION(0, "GPIO252"),
+		MTK_FUNCTION(1, "TEST_MD5"),
+		MTK_FUNCTION(6, "PCIE1_CLKREQ_N")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(253, "GPIO253"),
+		"D17", "mt7623",
+		MTK_EINT_FUNCTION(0, 138),
+		MTK_FUNCTION(0, "GPIO253"),
+		MTK_FUNCTION(1, "TEST_MD4"),
+		MTK_FUNCTION(6, "PCIE1_WAKE_N")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(254, "GPIO254"),
+		"D16", "mt7623",
+		MTK_EINT_FUNCTION(0, 139),
+		MTK_FUNCTION(0, "GPIO254"),
+		MTK_FUNCTION(1, "TEST_MD3"),
+		MTK_FUNCTION(6, "PCIE2_CLKREQ_N")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(255, "GPIO255"),
+		"C17", "mt7623",
+		MTK_EINT_FUNCTION(0, 140),
+		MTK_FUNCTION(0, "GPIO255"),
+		MTK_FUNCTION(1, "TEST_MD2"),
+		MTK_FUNCTION(6, "PCIE2_WAKE_N")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(256, "GPIO256"),
+		"B17", "mt7623",
+		MTK_EINT_FUNCTION(0, 141),
+		MTK_FUNCTION(0, "GPIO256"),
+		MTK_FUNCTION(1, "TEST_MD1")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(257, "GPIO257"),
+		"C15", "mt7623",
+		MTK_EINT_FUNCTION(0, 142),
+		MTK_FUNCTION(0, "GPIO257"),
+		MTK_FUNCTION(1, "TEST_MD0")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(258, "GPIO258"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO258")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(259, "GPIO259"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO259")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(260, "GPIO260"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO260")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(261, "MSDC1_INS"),
+		"AD1", "mt7623",
+		MTK_EINT_FUNCTION(0, 146),
+		MTK_FUNCTION(0, "GPIO261"),
+		MTK_FUNCTION(1, "MSDC1_INS")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(262, "G2_TXEN"),
+		"A23", "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO262"),
+		MTK_FUNCTION(1, "G2_TXEN")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(263, "G2_TXD3"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO263"),
+		MTK_FUNCTION(1, "G2_TXD3")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(264, "G2_TXD2"),
+		"C24", "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO264"),
+		MTK_FUNCTION(1, "G2_TXD2")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(265, "G2_TXD1"),
+		"B25", "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO265"),
+		MTK_FUNCTION(1, "G2_TXD1")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(266, "G2_TXD0"),
+		"A24", "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO266"),
+		MTK_FUNCTION(1, "G2_TXD0")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(267, "G2_TXCLK"),
+		"C23", "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO267"),
+		MTK_FUNCTION(1, "G2_TXC")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(268, "G2_RXCLK"),
+		"B23", "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO268"),
+		MTK_FUNCTION(1, "G2_RXC")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(269, "G2_RXD0"),
+		"D21", "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO269"),
+		MTK_FUNCTION(1, "G2_RXD0")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(270, "G2_RXD1"),
+		"B22", "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO270"),
+		MTK_FUNCTION(1, "G2_RXD1")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(271, "G2_RXD2"),
+		"A22", "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO271"),
+		MTK_FUNCTION(1, "G2_RXD2")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(272, "G2_RXD3"),
+		"C22", "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO272"),
+		MTK_FUNCTION(1, "G2_RXD3")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(273, "GPIO273"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO273")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(274, "G2_RXDV"),
+		"C21", "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO274"),
+		MTK_FUNCTION(1, "G2_RXDV")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(275, "G2_MDC"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO275"),
+		MTK_FUNCTION(1, "MDC")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(276, "G2_MDIO"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO276"),
+		MTK_FUNCTION(1, "MDIO")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(277, "GPIO277"),
+		NULL, "mt7623",
+		MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+		MTK_FUNCTION(0, "GPIO277")
+	),
+	MTK_PIN(
+		PINCTRL_PIN(278, "JTAG_RESET"),
+		"H20", "mt7623",
+		MTK_EINT_FUNCTION(0, 147),
+		MTK_FUNCTION(0, "GPIO278"),
+		MTK_FUNCTION(1, "JTAG_RESET")
+	),
+};
+
+#endif /* __PINCTRL_MTK_MT7623_H */
diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c
index 50cab27..0bdb8fd 100644
--- a/drivers/pinctrl/meson/pinctrl-meson.c
+++ b/drivers/pinctrl/meson/pinctrl-meson.c
@@ -49,7 +49,6 @@
 #include <linux/gpio.h>
 #include <linux/init.h>
 #include <linux/io.h>
-#include <linux/module.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/pinctrl/pinconf-generic.h>
@@ -104,15 +103,13 @@
 				     struct meson_bank **bank)
 {
 	struct meson_domain *d;
-	int i;
 
-	for (i = 0; i < pc->data->num_domains; i++) {
-		d = &pc->domains[i];
-		if (pin >= d->data->pin_base &&
-		    pin < d->data->pin_base + d->data->num_pins) {
-			*domain = d;
-			return meson_get_bank(d, pin, bank);
-		}
+	d = pc->domain;
+
+	if (pin >= d->data->pin_base &&
+	    pin < d->data->pin_base + d->data->num_pins) {
+		*domain = d;
+		return meson_get_bank(d, pin, bank);
 	}
 
 	return -EINVAL;
@@ -204,7 +201,7 @@
 		for (j = 0; j < group->num_pins; j++) {
 			if (group->pins[j] == pin) {
 				/* We have found a group using the pin */
-				domain = &pc->domains[group->domain];
+				domain = pc->domain;
 				regmap_update_bits(domain->reg_mux,
 						   group->reg * 4,
 						   BIT(group->bit), 0);
@@ -219,7 +216,7 @@
 	struct meson_pinctrl *pc = pinctrl_dev_get_drvdata(pcdev);
 	struct meson_pmx_func *func = &pc->data->funcs[func_num];
 	struct meson_pmx_group *group = &pc->data->groups[group_num];
-	struct meson_domain *domain = &pc->domains[group->domain];
+	struct meson_domain *domain = pc->domain;
 	int i, ret = 0;
 
 	dev_dbg(pc->dev, "enable function %s, group %s\n", func->name,
@@ -537,76 +534,67 @@
 
 static const struct of_device_id meson_pinctrl_dt_match[] = {
 	{
-		.compatible = "amlogic,meson8-pinctrl",
-		.data = &meson8_pinctrl_data,
+		.compatible = "amlogic,meson8-cbus-pinctrl",
+		.data = &meson8_cbus_pinctrl_data,
 	},
 	{
-		.compatible = "amlogic,meson8b-pinctrl",
-		.data = &meson8b_pinctrl_data,
+		.compatible = "amlogic,meson8b-cbus-pinctrl",
+		.data = &meson8b_cbus_pinctrl_data,
+	},
+	{
+		.compatible = "amlogic,meson8-aobus-pinctrl",
+		.data = &meson8_aobus_pinctrl_data,
+	},
+	{
+		.compatible = "amlogic,meson8b-aobus-pinctrl",
+		.data = &meson8b_aobus_pinctrl_data,
 	},
 	{ },
 };
-MODULE_DEVICE_TABLE(of, meson_pinctrl_dt_match);
 
 static int meson_gpiolib_register(struct meson_pinctrl *pc)
 {
 	struct meson_domain *domain;
-	int i, ret;
+	int ret;
 
-	for (i = 0; i < pc->data->num_domains; i++) {
-		domain = &pc->domains[i];
+	domain = pc->domain;
 
-		domain->chip.label = domain->data->name;
-		domain->chip.parent = pc->dev;
-		domain->chip.request = meson_gpio_request;
-		domain->chip.free = meson_gpio_free;
-		domain->chip.direction_input = meson_gpio_direction_input;
-		domain->chip.direction_output = meson_gpio_direction_output;
-		domain->chip.get = meson_gpio_get;
-		domain->chip.set = meson_gpio_set;
-		domain->chip.base = domain->data->pin_base;
-		domain->chip.ngpio = domain->data->num_pins;
-		domain->chip.can_sleep = false;
-		domain->chip.of_node = domain->of_node;
-		domain->chip.of_gpio_n_cells = 2;
+	domain->chip.label = domain->data->name;
+	domain->chip.parent = pc->dev;
+	domain->chip.request = meson_gpio_request;
+	domain->chip.free = meson_gpio_free;
+	domain->chip.direction_input = meson_gpio_direction_input;
+	domain->chip.direction_output = meson_gpio_direction_output;
+	domain->chip.get = meson_gpio_get;
+	domain->chip.set = meson_gpio_set;
+	domain->chip.base = domain->data->pin_base;
+	domain->chip.ngpio = domain->data->num_pins;
+	domain->chip.can_sleep = false;
+	domain->chip.of_node = domain->of_node;
+	domain->chip.of_gpio_n_cells = 2;
 
-		ret = gpiochip_add_data(&domain->chip, domain);
-		if (ret) {
-			dev_err(pc->dev, "can't add gpio chip %s\n",
-				domain->data->name);
-			goto fail;
-		}
+	ret = gpiochip_add_data(&domain->chip, domain);
+	if (ret) {
+		dev_err(pc->dev, "can't add gpio chip %s\n",
+			domain->data->name);
+		goto fail;
+	}
 
-		ret = gpiochip_add_pin_range(&domain->chip, dev_name(pc->dev),
-					     0, domain->data->pin_base,
-					     domain->chip.ngpio);
-		if (ret) {
-			dev_err(pc->dev, "can't add pin range\n");
-			goto fail;
-		}
+	ret = gpiochip_add_pin_range(&domain->chip, dev_name(pc->dev),
+				     0, domain->data->pin_base,
+				     domain->chip.ngpio);
+	if (ret) {
+		dev_err(pc->dev, "can't add pin range\n");
+		goto fail;
 	}
 
 	return 0;
 fail:
-	for (i--; i >= 0; i--)
-		gpiochip_remove(&pc->domains[i].chip);
+	gpiochip_remove(&pc->domain->chip);
 
 	return ret;
 }
 
-static struct meson_domain_data *meson_get_domain_data(struct meson_pinctrl *pc,
-						       struct device_node *np)
-{
-	int i;
-
-	for (i = 0; i < pc->data->num_domains; i++) {
-		if (!strcmp(np->name, pc->data->domain_data[i].name))
-			return &pc->data->domain_data[i];
-	}
-
-	return NULL;
-}
-
 static struct regmap_config meson_regmap_config = {
 	.reg_bits = 32,
 	.val_bits = 32,
@@ -643,7 +631,7 @@
 {
 	struct device_node *np;
 	struct meson_domain *domain;
-	int i = 0, num_domains = 0;
+	int num_domains = 0;
 
 	for_each_child_of_node(node, np) {
 		if (!of_find_property(np, "gpio-controller", NULL))
@@ -651,29 +639,22 @@
 		num_domains++;
 	}
 
-	if (num_domains != pc->data->num_domains) {
+	if (num_domains != 1) {
 		dev_err(pc->dev, "wrong number of subnodes\n");
 		return -EINVAL;
 	}
 
-	pc->domains = devm_kzalloc(pc->dev, num_domains *
-				   sizeof(struct meson_domain), GFP_KERNEL);
-	if (!pc->domains)
+	pc->domain = devm_kzalloc(pc->dev, sizeof(struct meson_domain), GFP_KERNEL);
+	if (!pc->domain)
 		return -ENOMEM;
 
+	domain = pc->domain;
+	domain->data = pc->data->domain_data;
+
 	for_each_child_of_node(node, np) {
 		if (!of_find_property(np, "gpio-controller", NULL))
 			continue;
 
-		domain = &pc->domains[i];
-
-		domain->data = meson_get_domain_data(pc, np);
-		if (!domain->data) {
-			dev_err(pc->dev, "domain data not found for node %s\n",
-				np->name);
-			return -ENODEV;
-		}
-
 		domain->of_node = np;
 
 		domain->reg_mux = meson_map_resource(pc, np, "mux");
@@ -699,7 +680,7 @@
 			return PTR_ERR(domain->reg_gpio);
 		}
 
-		i++;
+		break;
 	}
 
 	return 0;
@@ -718,7 +699,7 @@
 
 	pc->dev = dev;
 	match = of_match_node(meson_pinctrl_dt_match, pdev->dev.of_node);
-	pc->data = (struct meson_pinctrl_data *)match->data;
+	pc->data = (struct meson_pinctrl_data *) match->data;
 
 	ret = meson_pinctrl_parse_dt(pc, pdev->dev.of_node);
 	if (ret)
@@ -754,8 +735,4 @@
 		.of_match_table = meson_pinctrl_dt_match,
 	},
 };
-module_platform_driver(meson_pinctrl_driver);
-
-MODULE_AUTHOR("Beniamino Galvani <b.galvani@gmail.com>");
-MODULE_DESCRIPTION("Amlogic Meson pinctrl driver");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver(meson_pinctrl_driver);
diff --git a/drivers/pinctrl/meson/pinctrl-meson.h b/drivers/pinctrl/meson/pinctrl-meson.h
index 0fe7d53..9c93e0d 100644
--- a/drivers/pinctrl/meson/pinctrl-meson.h
+++ b/drivers/pinctrl/meson/pinctrl-meson.h
@@ -34,7 +34,6 @@
 	bool is_gpio;
 	unsigned int reg;
 	unsigned int bit;
-	unsigned int domain;
 };
 
 /**
@@ -144,7 +143,6 @@
 	unsigned int num_pins;
 	unsigned int num_groups;
 	unsigned int num_funcs;
-	unsigned int num_domains;
 };
 
 struct meson_pinctrl {
@@ -152,7 +150,7 @@
 	struct pinctrl_dev *pcdev;
 	struct pinctrl_desc desc;
 	struct meson_pinctrl_data *data;
-	struct meson_domain *domains;
+	struct meson_domain *domain;
 };
 
 #define PIN(x, b)	(b + x)
@@ -164,7 +162,6 @@
 		.num_pins = ARRAY_SIZE(grp ## _pins),			\
 		.reg = r,						\
 		.bit = b,						\
-		.domain = 0,						\
 	 }
 
 #define GPIO_GROUP(gpio, b)						\
@@ -175,16 +172,6 @@
 		.is_gpio = true,					\
 	 }
 
-#define GROUP_AO(grp, r, b)						\
-	{								\
-		.name = #grp,						\
-		.pins = grp ## _pins,					\
-		.num_pins = ARRAY_SIZE(grp ## _pins),			\
-		.reg = r,						\
-		.bit = b,						\
-		.domain = 1,						\
-	 }
-
 #define FUNCTION(fn)							\
 	{								\
 		.name = #fn,						\
@@ -208,5 +195,7 @@
 
 #define MESON_PIN(x, b) PINCTRL_PIN(PIN(x, b), #x)
 
-extern struct meson_pinctrl_data meson8_pinctrl_data;
-extern struct meson_pinctrl_data meson8b_pinctrl_data;
+extern struct meson_pinctrl_data meson8_cbus_pinctrl_data;
+extern struct meson_pinctrl_data meson8_aobus_pinctrl_data;
+extern struct meson_pinctrl_data meson8b_cbus_pinctrl_data;
+extern struct meson_pinctrl_data meson8b_aobus_pinctrl_data;
diff --git a/drivers/pinctrl/meson/pinctrl-meson8.c b/drivers/pinctrl/meson/pinctrl-meson8.c
index 7b1cc91..32de191 100644
--- a/drivers/pinctrl/meson/pinctrl-meson8.c
+++ b/drivers/pinctrl/meson/pinctrl-meson8.c
@@ -16,7 +16,7 @@
 
 #define AO_OFF	120
 
-static const struct pinctrl_pin_desc meson8_pins[] = {
+static const struct pinctrl_pin_desc meson8_cbus_pins[] = {
 	MESON_PIN(GPIOX_0, 0),
 	MESON_PIN(GPIOX_1, 0),
 	MESON_PIN(GPIOX_2, 0),
@@ -137,6 +137,9 @@
 	MESON_PIN(BOOT_16, 0),
 	MESON_PIN(BOOT_17, 0),
 	MESON_PIN(BOOT_18, 0),
+};
+
+static const struct pinctrl_pin_desc meson8_aobus_pins[] = {
 	MESON_PIN(GPIOAO_0, AO_OFF),
 	MESON_PIN(GPIOAO_1, AO_OFF),
 	MESON_PIN(GPIOAO_2, AO_OFF),
@@ -379,7 +382,7 @@
 static const unsigned int i2c_mst_sck_ao_pins[] = { PIN(GPIOAO_4, AO_OFF) };
 static const unsigned int i2c_mst_sda_ao_pins[] = { PIN(GPIOAO_5, AO_OFF) };
 
-static struct meson_pmx_group meson8_groups[] = {
+static struct meson_pmx_group meson8_cbus_groups[] = {
 	GPIO_GROUP(GPIOX_0, 0),
 	GPIO_GROUP(GPIOX_1, 0),
 	GPIO_GROUP(GPIOX_2, 0),
@@ -474,22 +477,6 @@
 	GPIO_GROUP(GPIOZ_12, 0),
 	GPIO_GROUP(GPIOZ_13, 0),
 	GPIO_GROUP(GPIOZ_14, 0),
-	GPIO_GROUP(GPIOAO_0, AO_OFF),
-	GPIO_GROUP(GPIOAO_1, AO_OFF),
-	GPIO_GROUP(GPIOAO_2, AO_OFF),
-	GPIO_GROUP(GPIOAO_3, AO_OFF),
-	GPIO_GROUP(GPIOAO_4, AO_OFF),
-	GPIO_GROUP(GPIOAO_5, AO_OFF),
-	GPIO_GROUP(GPIOAO_6, AO_OFF),
-	GPIO_GROUP(GPIOAO_7, AO_OFF),
-	GPIO_GROUP(GPIOAO_8, AO_OFF),
-	GPIO_GROUP(GPIOAO_9, AO_OFF),
-	GPIO_GROUP(GPIOAO_10, AO_OFF),
-	GPIO_GROUP(GPIOAO_11, AO_OFF),
-	GPIO_GROUP(GPIOAO_12, AO_OFF),
-	GPIO_GROUP(GPIOAO_13, AO_OFF),
-	GPIO_GROUP(GPIO_BSD_EN, AO_OFF),
-	GPIO_GROUP(GPIO_TEST_N, AO_OFF),
 
 	/* bank X */
 	GROUP(sd_d0_a,		8,	5),
@@ -675,26 +662,45 @@
 	GROUP(sdxc_d0_b,	2,	7),
 	GROUP(sdxc_clk_b,	2,	5),
 	GROUP(sdxc_cmd_b,	2,	4),
+};
+
+static struct meson_pmx_group meson8_aobus_groups[] = {
+	GPIO_GROUP(GPIOAO_0, AO_OFF),
+	GPIO_GROUP(GPIOAO_1, AO_OFF),
+	GPIO_GROUP(GPIOAO_2, AO_OFF),
+	GPIO_GROUP(GPIOAO_3, AO_OFF),
+	GPIO_GROUP(GPIOAO_4, AO_OFF),
+	GPIO_GROUP(GPIOAO_5, AO_OFF),
+	GPIO_GROUP(GPIOAO_6, AO_OFF),
+	GPIO_GROUP(GPIOAO_7, AO_OFF),
+	GPIO_GROUP(GPIOAO_8, AO_OFF),
+	GPIO_GROUP(GPIOAO_9, AO_OFF),
+	GPIO_GROUP(GPIOAO_10, AO_OFF),
+	GPIO_GROUP(GPIOAO_11, AO_OFF),
+	GPIO_GROUP(GPIOAO_12, AO_OFF),
+	GPIO_GROUP(GPIOAO_13, AO_OFF),
+	GPIO_GROUP(GPIO_BSD_EN, AO_OFF),
+	GPIO_GROUP(GPIO_TEST_N, AO_OFF),
 
 	/* bank AO */
-	GROUP_AO(uart_tx_ao_a,		0,	12),
-	GROUP_AO(uart_rx_ao_a,		0,	11),
-	GROUP_AO(uart_cts_ao_a,		0,	10),
-	GROUP_AO(uart_rts_ao_a,		0,	9),
+	GROUP(uart_tx_ao_a,		0,	12),
+	GROUP(uart_rx_ao_a,		0,	11),
+	GROUP(uart_cts_ao_a,		0,	10),
+	GROUP(uart_rts_ao_a,		0,	9),
 
-	GROUP_AO(remote_input,		0,	0),
+	GROUP(remote_input,		0,	0),
 
-	GROUP_AO(i2c_slave_sck_ao,	0,	2),
-	GROUP_AO(i2c_slave_sda_ao,	0,	1),
+	GROUP(i2c_slave_sck_ao,		0,	2),
+	GROUP(i2c_slave_sda_ao,		0,	1),
 
-	GROUP_AO(uart_tx_ao_b0,		0,	26),
-	GROUP_AO(uart_rx_ao_b0,		0,	25),
+	GROUP(uart_tx_ao_b0,		0,	26),
+	GROUP(uart_rx_ao_b0,		0,	25),
 
-	GROUP_AO(uart_tx_ao_b1,		0,	24),
-	GROUP_AO(uart_rx_ao_b1,		0,	23),
+	GROUP(uart_tx_ao_b1,		0,	24),
+	GROUP(uart_rx_ao_b1,		0,	23),
 
-	GROUP_AO(i2c_mst_sck_ao,	0,	6),
-	GROUP_AO(i2c_mst_sda_ao,	0,	5),
+	GROUP(i2c_mst_sck_ao,		0,	6),
+	GROUP(i2c_mst_sda_ao,		0,	5),
 };
 
 static const char * const gpio_groups[] = {
@@ -872,7 +878,7 @@
 	"i2c_mst_sck_ao", "i2c_mst_sda_ao"
 };
 
-static struct meson_pmx_func meson8_functions[] = {
+static struct meson_pmx_func meson8_cbus_functions[] = {
 	FUNCTION(gpio),
 	FUNCTION(sd_a),
 	FUNCTION(sdxc_a),
@@ -899,6 +905,9 @@
 	FUNCTION(nor),
 	FUNCTION(sd_b),
 	FUNCTION(sdxc_b),
+};
+
+static struct meson_pmx_func meson8_aobus_functions[] = {
 	FUNCTION(uart_ao),
 	FUNCTION(remote),
 	FUNCTION(i2c_slave_ao),
@@ -906,7 +915,7 @@
 	FUNCTION(i2c_mst_ao),
 };
 
-static struct meson_bank meson8_banks[] = {
+static struct meson_bank meson8_cbus_banks[] = {
 	/*   name    first             last                 pullen  pull    dir     out     in  */
 	BANK("X",    PIN(GPIOX_0, 0),  PIN(GPIOX_21, 0),    4,  0,  4,  0,  0,  0,  1,  0,  2,  0),
 	BANK("Y",    PIN(GPIOY_0, 0),  PIN(GPIOY_16, 0),    3,  0,  3,  0,  3,  0,  4,  0,  5,  0),
@@ -917,35 +926,43 @@
 	BANK("BOOT", PIN(BOOT_0, 0),   PIN(BOOT_18, 0),     2,  0,  2,  0,  9,  0, 10,  0, 11,  0),
 };
 
-static struct meson_bank meson8_ao_banks[] = {
+static struct meson_bank meson8_aobus_banks[] = {
 	/*   name    first                  last                      pullen  pull    dir     out     in  */
 	BANK("AO",   PIN(GPIOAO_0, AO_OFF), PIN(GPIO_TEST_N, AO_OFF), 0,  0,  0, 16,  0,  0,  0, 16,  1,  0),
 };
 
-static struct meson_domain_data meson8_domain_data[] = {
-	{
-		.name		= "banks",
-		.banks		= meson8_banks,
-		.num_banks	= ARRAY_SIZE(meson8_banks),
-		.pin_base	= 0,
-		.num_pins	= 120,
-	},
-	{
-		.name		= "ao-bank",
-		.banks		= meson8_ao_banks,
-		.num_banks	= ARRAY_SIZE(meson8_ao_banks),
-		.pin_base	= 120,
-		.num_pins	= 16,
-	},
+static struct meson_domain_data meson8_cbus_domain_data = {
+	.name		= "cbus-banks",
+	.banks		= meson8_cbus_banks,
+	.num_banks	= ARRAY_SIZE(meson8_cbus_banks),
+	.pin_base	= 0,
+	.num_pins	= 120,
 };
 
-struct meson_pinctrl_data meson8_pinctrl_data = {
-	.pins		= meson8_pins,
-	.groups		= meson8_groups,
-	.funcs		= meson8_functions,
-	.domain_data	= meson8_domain_data,
-	.num_pins	= ARRAY_SIZE(meson8_pins),
-	.num_groups	= ARRAY_SIZE(meson8_groups),
-	.num_funcs	= ARRAY_SIZE(meson8_functions),
-	.num_domains	= ARRAY_SIZE(meson8_domain_data),
+static struct meson_domain_data meson8_aobus_domain_data = {
+	.name		= "ao-bank",
+	.banks		= meson8_aobus_banks,
+	.num_banks	= ARRAY_SIZE(meson8_aobus_banks),
+	.pin_base	= 120,
+	.num_pins	= 16,
+};
+
+struct meson_pinctrl_data meson8_cbus_pinctrl_data = {
+	.pins		= meson8_cbus_pins,
+	.groups		= meson8_cbus_groups,
+	.funcs		= meson8_cbus_functions,
+	.domain_data	= &meson8_cbus_domain_data,
+	.num_pins	= ARRAY_SIZE(meson8_cbus_pins),
+	.num_groups	= ARRAY_SIZE(meson8_cbus_groups),
+	.num_funcs	= ARRAY_SIZE(meson8_cbus_functions),
+};
+
+struct meson_pinctrl_data meson8_aobus_pinctrl_data = {
+	.pins		= meson8_aobus_pins,
+	.groups		= meson8_aobus_groups,
+	.funcs		= meson8_aobus_functions,
+	.domain_data	= &meson8_aobus_domain_data,
+	.num_pins	= ARRAY_SIZE(meson8_aobus_pins),
+	.num_groups	= ARRAY_SIZE(meson8_aobus_groups),
+	.num_funcs	= ARRAY_SIZE(meson8_aobus_functions),
 };
diff --git a/drivers/pinctrl/meson/pinctrl-meson8b.c b/drivers/pinctrl/meson/pinctrl-meson8b.c
index 9677807..a100bcf 100644
--- a/drivers/pinctrl/meson/pinctrl-meson8b.c
+++ b/drivers/pinctrl/meson/pinctrl-meson8b.c
@@ -17,7 +17,7 @@
 
 #define AO_OFF	130
 
-static const struct pinctrl_pin_desc meson8b_pins[] = {
+static const struct pinctrl_pin_desc meson8b_cbus_pins[] = {
 	MESON_PIN(GPIOX_0, 0),
 	MESON_PIN(GPIOX_1, 0),
 	MESON_PIN(GPIOX_2, 0),
@@ -107,7 +107,9 @@
 	MESON_PIN(DIF_3_N, 0),
 	MESON_PIN(DIF_4_P, 0),
 	MESON_PIN(DIF_4_N, 0),
+};
 
+static const struct pinctrl_pin_desc meson8b_aobus_pins[] = {
 	MESON_PIN(GPIOAO_0, AO_OFF),
 	MESON_PIN(GPIOAO_1, AO_OFF),
 	MESON_PIN(GPIOAO_2, AO_OFF),
@@ -346,7 +348,7 @@
 static const unsigned int eth_mdc_pins[]	= { PIN(DIF_4_P, 0) };
 static const unsigned int eth_mdio_en_pins[]	= { PIN(DIF_4_N, 0) };
 
-static struct meson_pmx_group meson8b_groups[] = {
+static struct meson_pmx_group meson8b_cbus_groups[] = {
 	GPIO_GROUP(GPIOX_0, 0),
 	GPIO_GROUP(GPIOX_1, 0),
 	GPIO_GROUP(GPIOX_2, 0),
@@ -409,23 +411,6 @@
 	GPIO_GROUP(DIF_4_P, 0),
 	GPIO_GROUP(DIF_4_N, 0),
 
-	GPIO_GROUP(GPIOAO_0, AO_OFF),
-	GPIO_GROUP(GPIOAO_1, AO_OFF),
-	GPIO_GROUP(GPIOAO_2, AO_OFF),
-	GPIO_GROUP(GPIOAO_3, AO_OFF),
-	GPIO_GROUP(GPIOAO_4, AO_OFF),
-	GPIO_GROUP(GPIOAO_5, AO_OFF),
-	GPIO_GROUP(GPIOAO_6, AO_OFF),
-	GPIO_GROUP(GPIOAO_7, AO_OFF),
-	GPIO_GROUP(GPIOAO_8, AO_OFF),
-	GPIO_GROUP(GPIOAO_9, AO_OFF),
-	GPIO_GROUP(GPIOAO_10, AO_OFF),
-	GPIO_GROUP(GPIOAO_11, AO_OFF),
-	GPIO_GROUP(GPIOAO_12, AO_OFF),
-	GPIO_GROUP(GPIOAO_13, AO_OFF),
-	GPIO_GROUP(GPIO_BSD_EN, AO_OFF),
-	GPIO_GROUP(GPIO_TEST_N, AO_OFF),
-
 	/* bank X */
 	GROUP(sd_d0_a,		8,	5),
 	GROUP(sd_d1_a,		8,	4),
@@ -572,6 +557,37 @@
 	GROUP(sdxc_clk_b,	2,	5),
 	GROUP(sdxc_cmd_b,	2,	4),
 
+	/* bank DIF */
+	GROUP(eth_rxd1,		6,	0),
+	GROUP(eth_rxd0,		6,	1),
+	GROUP(eth_rx_dv,	6,	2),
+	GROUP(eth_rx_clk,	6,	3),
+	GROUP(eth_txd0_1,	6,	4),
+	GROUP(eth_txd1_1,	6,	5),
+	GROUP(eth_tx_en,	6,	0),
+	GROUP(eth_ref_clk,	6,	8),
+	GROUP(eth_mdc,		6,	9),
+	GROUP(eth_mdio_en,	6,	10),
+};
+
+static struct meson_pmx_group meson8b_aobus_groups[] = {
+	GPIO_GROUP(GPIOAO_0, AO_OFF),
+	GPIO_GROUP(GPIOAO_1, AO_OFF),
+	GPIO_GROUP(GPIOAO_2, AO_OFF),
+	GPIO_GROUP(GPIOAO_3, AO_OFF),
+	GPIO_GROUP(GPIOAO_4, AO_OFF),
+	GPIO_GROUP(GPIOAO_5, AO_OFF),
+	GPIO_GROUP(GPIOAO_6, AO_OFF),
+	GPIO_GROUP(GPIOAO_7, AO_OFF),
+	GPIO_GROUP(GPIOAO_8, AO_OFF),
+	GPIO_GROUP(GPIOAO_9, AO_OFF),
+	GPIO_GROUP(GPIOAO_10, AO_OFF),
+	GPIO_GROUP(GPIOAO_11, AO_OFF),
+	GPIO_GROUP(GPIOAO_12, AO_OFF),
+	GPIO_GROUP(GPIOAO_13, AO_OFF),
+	GPIO_GROUP(GPIO_BSD_EN, AO_OFF),
+	GPIO_GROUP(GPIO_TEST_N, AO_OFF),
+
 	/* bank AO */
 	GROUP(uart_tx_ao_a,	0,	12),
 	GROUP(uart_rx_ao_a,	0,	11),
@@ -601,18 +617,6 @@
 	GROUP(i2s_in_ch01,	0,	13),
 	GROUP(i2s_ao_clk_in,	0,	15),
 	GROUP(i2s_lr_clk_in,	0,	14),
-
-	/* bank DIF */
-	GROUP(eth_rxd1,		6,	0),
-	GROUP(eth_rxd0,		6,	1),
-	GROUP(eth_rx_dv,	6,	2),
-	GROUP(eth_rx_clk,	6,	3),
-	GROUP(eth_txd0_1,	6,	4),
-	GROUP(eth_txd1_1,	6,	5),
-	GROUP(eth_tx_en,	6,	0),
-	GROUP(eth_ref_clk,	6,	8),
-	GROUP(eth_mdc,		6,	9),
-	GROUP(eth_mdio_en,	6,	10),
 };
 
 static const char * const gpio_groups[] = {
@@ -694,7 +698,10 @@
 };
 
 static const char * const hdmi_groups[] = {
-	"hdmi_hpd", "hdmi_sda", "hdmi_scl", "hdmi_cec_0",
+	"hdmi_hpd", "hdmi_sda", "hdmi_scl", "hdmi_cec_0"
+};
+
+static const char * const hdmi_cec_groups[] = {
 	"hdmi_cec_1"
 };
 
@@ -770,12 +777,20 @@
 	"i2c_mst_sck_ao", "i2c_mst_sda_ao"
 };
 
-static const char * const clk_groups[] = {
-	"clk_24m_out", "clk_32k_in_out"
+static const char * const clk_24m_groups[] = {
+	"clk_24m_out"
 };
 
-static const char * const spdif_groups[] = {
-	"spdif_out_1", "spdif_out_0"
+static const char * const clk_32k_groups[] = {
+	"clk_32k_in_out"
+};
+
+static const char * const spdif_0_groups[] = {
+	"spdif_out_0"
+};
+
+static const char * const spdif_1_groups[] = {
+	"spdif_out_1"
 };
 
 static const char * const i2s_groups[] = {
@@ -789,7 +804,11 @@
 };
 
 static const char * const pwm_c_groups[] = {
-	"pwm_c0", "pwm_c1", "pwm_c2"
+	"pwm_c0", "pwm_c1"
+};
+
+static const char * const pwm_c_ao_groups[] = {
+	"pwm_c2"
 };
 
 static const char * const pwm_d_groups[] = {
@@ -814,7 +833,7 @@
 	"tsin_d0_b", "tsin_clk_b", "tsin_sop_b", "tsin_d_valid_b"
 };
 
-static struct meson_pmx_func meson8b_functions[] = {
+static struct meson_pmx_func meson8b_cbus_functions[] = {
 	FUNCTION(gpio),
 	FUNCTION(sd_a),
 	FUNCTION(sdxc_a),
@@ -837,14 +856,7 @@
 	FUNCTION(nor),
 	FUNCTION(sd_b),
 	FUNCTION(sdxc_b),
-	FUNCTION(uart_ao),
-	FUNCTION(remote),
-	FUNCTION(i2c_slave_ao),
-	FUNCTION(uart_ao_b),
-	FUNCTION(i2c_mst_ao),
-	FUNCTION(clk),
-	FUNCTION(spdif),
-	FUNCTION(i2s),
+	FUNCTION(spdif_0),
 	FUNCTION(pwm_b),
 	FUNCTION(pwm_c),
 	FUNCTION(pwm_d),
@@ -852,9 +864,23 @@
 	FUNCTION(pwm_vs),
 	FUNCTION(tsin_a),
 	FUNCTION(tsin_b),
+	FUNCTION(clk_24m),
 };
 
-static struct meson_bank meson8b_banks[] = {
+static struct meson_pmx_func meson8b_aobus_functions[] = {
+	FUNCTION(uart_ao),
+	FUNCTION(uart_ao_b),
+	FUNCTION(i2c_slave_ao),
+	FUNCTION(i2c_mst_ao),
+	FUNCTION(i2s),
+	FUNCTION(remote),
+	FUNCTION(clk_32k),
+	FUNCTION(pwm_c_ao),
+	FUNCTION(spdif_1),
+	FUNCTION(hdmi_cec),
+};
+
+static struct meson_bank meson8b_cbus_banks[] = {
 	/*   name    first                      last                   pullen  pull    dir     out     in  */
 	BANK("X",    PIN(GPIOX_0, 0),		PIN(GPIOX_21, 0),      4,  0,  4,  0,  0,  0,  1,  0,  2,  0),
 	BANK("Y",    PIN(GPIOY_0, 0),		PIN(GPIOY_14, 0),      3,  0,  3,  0,  3,  0,  4,  0,  5,  0),
@@ -865,35 +891,43 @@
 	BANK("DIF",  PIN(DIF_0_P, 0),		PIN(DIF_4_N, 0),       5,  8,  5,  8, 12, 12, 13, 12, 14, 12),
 };
 
-static struct meson_bank meson8b_ao_banks[] = {
+static struct meson_bank meson8b_aobus_banks[] = {
 	/*   name    first                  last                      pullen  pull    dir     out     in  */
 	BANK("AO",   PIN(GPIOAO_0, AO_OFF), PIN(GPIO_TEST_N, AO_OFF), 0,  0,  0, 16,  0,  0,  0, 16,  1,  0),
 };
 
-static struct meson_domain_data meson8b_domain_data[] = {
-	{
-		.name		= "banks",
-		.banks		= meson8b_banks,
-		.num_banks	= ARRAY_SIZE(meson8b_banks),
-		.pin_base	= 0,
-		.num_pins	= 130,
-	},
-	{
-		.name		= "ao-bank",
-		.banks		= meson8b_ao_banks,
-		.num_banks	= ARRAY_SIZE(meson8b_ao_banks),
-		.pin_base	= 130,
-		.num_pins	= 16,
-	},
+static struct meson_domain_data meson8b_cbus_domain_data = {
+	.name		= "cbus-banks",
+	.banks		= meson8b_cbus_banks,
+	.num_banks	= ARRAY_SIZE(meson8b_cbus_banks),
+	.pin_base	= 0,
+	.num_pins	= 130,
 };
 
-struct meson_pinctrl_data meson8b_pinctrl_data = {
-	.pins		= meson8b_pins,
-	.groups		= meson8b_groups,
-	.funcs		= meson8b_functions,
-	.domain_data	= meson8b_domain_data,
-	.num_pins	= ARRAY_SIZE(meson8b_pins),
-	.num_groups	= ARRAY_SIZE(meson8b_groups),
-	.num_funcs	= ARRAY_SIZE(meson8b_functions),
-	.num_domains	= ARRAY_SIZE(meson8b_domain_data),
+static struct meson_domain_data meson8b_aobus_domain_data = {
+	.name		= "aobus-banks",
+	.banks		= meson8b_aobus_banks,
+	.num_banks	= ARRAY_SIZE(meson8b_aobus_banks),
+	.pin_base	= 130,
+	.num_pins	= 16,
+};
+
+struct meson_pinctrl_data meson8b_cbus_pinctrl_data = {
+	.pins		= meson8b_cbus_pins,
+	.groups		= meson8b_cbus_groups,
+	.funcs		= meson8b_cbus_functions,
+	.domain_data	= &meson8b_cbus_domain_data,
+	.num_pins	= ARRAY_SIZE(meson8b_cbus_pins),
+	.num_groups	= ARRAY_SIZE(meson8b_cbus_groups),
+	.num_funcs	= ARRAY_SIZE(meson8b_cbus_functions),
+};
+
+struct meson_pinctrl_data meson8b_aobus_pinctrl_data = {
+	.pins		= meson8b_aobus_pins,
+	.groups		= meson8b_aobus_groups,
+	.funcs		= meson8b_aobus_functions,
+	.domain_data	= &meson8b_aobus_domain_data,
+	.num_pins	= ARRAY_SIZE(meson8b_aobus_pins),
+	.num_groups	= ARRAY_SIZE(meson8b_aobus_groups),
+	.num_funcs	= ARRAY_SIZE(meson8b_aobus_functions),
 };
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik-stn8815.c b/drivers/pinctrl/nomadik/pinctrl-nomadik-stn8815.c
index 587b222..e852048 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik-stn8815.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik-stn8815.c
@@ -287,6 +287,10 @@
 /* Altfunction B */
 static const unsigned u1_b_1_pins[] = { STN8815_PIN_B16, STN8815_PIN_A16 };
 static const unsigned i2cusb_b_1_pins[] = { STN8815_PIN_C21, STN8815_PIN_C20 };
+static const unsigned clcd_16_23_b_1_pins[] = { STN8815_PIN_AB6,
+	STN8815_PIN_AA6, STN8815_PIN_Y6, STN8815_PIN_Y5, STN8815_PIN_AA5,
+	STN8815_PIN_AB5, STN8815_PIN_AB4, STN8815_PIN_Y4 };
+
 
 #define STN8815_PIN_GROUP(a, b) { .name = #a, .pins = a##_pins,		\
 			.npins = ARRAY_SIZE(a##_pins), .altsetting = b }
@@ -302,6 +306,7 @@
 	STN8815_PIN_GROUP(i2c0_a_1, NMK_GPIO_ALT_A),
 	STN8815_PIN_GROUP(u1_b_1, NMK_GPIO_ALT_B),
 	STN8815_PIN_GROUP(i2cusb_b_1, NMK_GPIO_ALT_B),
+	STN8815_PIN_GROUP(clcd_16_23_b_1, NMK_GPIO_ALT_B),
 };
 
 /* We use this macro to define the groups applicable to a function */
@@ -314,6 +319,7 @@
 STN8815_FUNC_GROUPS(i2c1, "i2c1_a_1");
 STN8815_FUNC_GROUPS(i2c0, "i2c0_a_1");
 STN8815_FUNC_GROUPS(i2cusb, "i2cusb_b_1");
+STN8815_FUNC_GROUPS(clcd, "clcd_16_23_b_1");
 
 #define FUNCTION(fname)					\
 	{						\
@@ -329,6 +335,7 @@
 	FUNCTION(i2c1),
 	FUNCTION(i2c0),
 	FUNCTION(i2cusb),
+	FUNCTION(clcd),
 };
 
 static const struct nmk_pinctrl_soc_data nmk_stn8815_soc = {
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index 6574494..5c025f5 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -753,8 +753,8 @@
 
 	gpio_dev->base = devm_ioremap_nocache(&pdev->dev, res->start,
 						resource_size(res));
-	if (IS_ERR(gpio_dev->base))
-		return PTR_ERR(gpio_dev->base);
+	if (!gpio_dev->base)
+		return -ENOMEM;
 
 	irq_base = platform_get_irq(pdev, 0);
 	if (irq_base < 0) {
diff --git a/drivers/pinctrl/pinctrl-at91-pio4.c b/drivers/pinctrl/pinctrl-at91-pio4.c
index ee69db6..4429312 100644
--- a/drivers/pinctrl/pinctrl-at91-pio4.c
+++ b/drivers/pinctrl/pinctrl-at91-pio4.c
@@ -824,7 +824,7 @@
 	.pmxops		= &atmel_pmxops,
 };
 
-static int atmel_pctrl_suspend(struct device *dev)
+static int __maybe_unused atmel_pctrl_suspend(struct device *dev)
 {
 	struct platform_device *pdev = to_platform_device(dev);
 	struct atmel_pioctrl *atmel_pioctrl = platform_get_drvdata(pdev);
@@ -844,7 +844,7 @@
 	return 0;
 }
 
-static int atmel_pctrl_resume(struct device *dev)
+static int __maybe_unused atmel_pctrl_resume(struct device *dev)
 {
 	struct platform_device *pdev = to_platform_device(dev);
 	struct atmel_pioctrl *atmel_pioctrl = platform_get_drvdata(pdev);
diff --git a/drivers/pinctrl/pinctrl-coh901.c b/drivers/pinctrl/pinctrl-coh901.c
index cf7788d..741b39e 100644
--- a/drivers/pinctrl/pinctrl-coh901.c
+++ b/drivers/pinctrl/pinctrl-coh901.c
@@ -127,7 +127,7 @@
 }
 
 /* Initial configuration */
-static const struct __initconst u300_gpio_confdata
+static const struct u300_gpio_confdata __initconst
 bs335_gpio_config[U300_GPIO_NUM_PORTS][U300_GPIO_PINS_PER_PORT] = {
 	/* Port 0, pins 0-7 */
 	{
diff --git a/drivers/pinctrl/pinctrl-lpc18xx.c b/drivers/pinctrl/pinctrl-lpc18xx.c
index f0bebbe..b1767f7 100644
--- a/drivers/pinctrl/pinctrl-lpc18xx.c
+++ b/drivers/pinctrl/pinctrl-lpc18xx.c
@@ -49,6 +49,18 @@
 
 #define LPC18XX_SCU_FUNC_PER_PIN	8
 
+/* LPC18XX SCU pin interrupt select registers */
+#define LPC18XX_SCU_PINTSEL0		0xe00
+#define LPC18XX_SCU_PINTSEL1		0xe04
+#define LPC18XX_SCU_PINTSEL_VAL_MASK	0xff
+#define LPC18XX_SCU_PINTSEL_PORT_SHIFT	5
+#define LPC18XX_SCU_IRQ_PER_PINTSEL	4
+#define LPC18XX_GPIO_PINS_PER_PORT	32
+#define LPC18XX_GPIO_PIN_INT_MAX	8
+
+#define LPC18XX_SCU_PINTSEL_VAL(val, n) \
+	((val) << (((n) % LPC18XX_SCU_IRQ_PER_PINTSEL) * 8))
+
 /* LPC18xx pin types */
 enum {
 	TYPE_ND,	/* Normal-drive */
@@ -618,6 +630,25 @@
 	LPC18XX_PIN(i2c0_sda, PIN_I2C0_SDA),
 };
 
+/**
+ * enum lpc18xx_pin_config_param - possible pin configuration parameters
+ * @PIN_CONFIG_GPIO_PIN_INT: route gpio to the gpio pin interrupt
+ * 	controller.
+ */
+enum lpc18xx_pin_config_param {
+	PIN_CONFIG_GPIO_PIN_INT = PIN_CONFIG_END + 1,
+};
+
+static const struct pinconf_generic_params lpc18xx_params[] = {
+	{"nxp,gpio-pin-interrupt", PIN_CONFIG_GPIO_PIN_INT, 0},
+};
+
+#ifdef CONFIG_DEBUG_FS
+static const struct pin_config_item lpc18xx_conf_items[ARRAY_SIZE(lpc18xx_params)] = {
+	PCONFDUMP(PIN_CONFIG_GPIO_PIN_INT, "gpio pin int", NULL, true),
+};
+#endif
+
 static int lpc18xx_pconf_get_usb1(enum pin_config_param param, int *arg, u32 reg)
 {
 	switch (param) {
@@ -693,7 +724,71 @@
 	return 0;
 }
 
-static int lpc18xx_pconf_get_pin(enum pin_config_param param, int *arg, u32 reg,
+static int lpc18xx_pin_to_gpio(struct pinctrl_dev *pctldev, unsigned pin)
+{
+	struct pinctrl_gpio_range *range;
+
+	range = pinctrl_find_gpio_range_from_pin_nolock(pctldev, pin);
+	if (!range)
+		return -EINVAL;
+
+	return pin - range->pin_base + range->base;
+}
+
+static int lpc18xx_get_pintsel(void __iomem *addr, u32 val, int *arg)
+{
+	u32 reg_val;
+	int i;
+
+	reg_val = readl(addr);
+	for (i = 0; i < LPC18XX_SCU_IRQ_PER_PINTSEL; i++) {
+		if ((reg_val & LPC18XX_SCU_PINTSEL_VAL_MASK) == val)
+			return 0;
+
+		reg_val >>= BITS_PER_BYTE;
+		*arg += 1;
+	}
+
+	return -EINVAL;
+}
+
+static u32 lpc18xx_gpio_to_pintsel_val(int gpio)
+{
+	unsigned int gpio_port, gpio_pin;
+
+	gpio_port = gpio / LPC18XX_GPIO_PINS_PER_PORT;
+	gpio_pin  = gpio % LPC18XX_GPIO_PINS_PER_PORT;
+
+	return gpio_pin | (gpio_port << LPC18XX_SCU_PINTSEL_PORT_SHIFT);
+}
+
+static int lpc18xx_pconf_get_gpio_pin_int(struct pinctrl_dev *pctldev,
+					  int *arg, unsigned pin)
+{
+	struct lpc18xx_scu_data *scu = pinctrl_dev_get_drvdata(pctldev);
+	int gpio, ret;
+	u32 val;
+
+	gpio = lpc18xx_pin_to_gpio(pctldev, pin);
+	if (gpio < 0)
+		return -ENOTSUPP;
+
+	val = lpc18xx_gpio_to_pintsel_val(gpio);
+
+	/*
+	 * Check if this pin has been enabled as a interrupt in any of the two
+	 * PINTSEL registers. *arg indicates which interrupt number (0-7).
+	 */
+	*arg = 0;
+	ret = lpc18xx_get_pintsel(scu->base + LPC18XX_SCU_PINTSEL0, val, arg);
+	if (ret == 0)
+		return ret;
+
+	return lpc18xx_get_pintsel(scu->base + LPC18XX_SCU_PINTSEL1, val, arg);
+}
+
+static int lpc18xx_pconf_get_pin(struct pinctrl_dev *pctldev, unsigned param,
+				 int *arg, u32 reg, unsigned pin,
 				 struct lpc18xx_pin_caps *pin_cap)
 {
 	switch (param) {
@@ -755,6 +850,9 @@
 		}
 		break;
 
+	case PIN_CONFIG_GPIO_PIN_INT:
+		return lpc18xx_pconf_get_gpio_pin_int(pctldev, arg, pin);
+
 	default:
 		return -ENOTSUPP;
 	}
@@ -794,7 +892,7 @@
 	else if (pin_cap->type == TYPE_USB1)
 		ret = lpc18xx_pconf_get_usb1(param, &arg, reg);
 	else
-		ret = lpc18xx_pconf_get_pin(param, &arg, reg, pin_cap);
+		ret = lpc18xx_pconf_get_pin(pctldev, param, &arg, reg, pin, pin_cap);
 
 	if (ret < 0)
 		return ret;
@@ -883,9 +981,34 @@
 	return 0;
 }
 
-static int lpc18xx_pconf_set_pin(struct pinctrl_dev *pctldev,
-				 enum pin_config_param param,
-				 u16 param_val, u32 *reg,
+static int lpc18xx_pconf_set_gpio_pin_int(struct pinctrl_dev *pctldev,
+					  u16 param_val, unsigned pin)
+{
+	struct lpc18xx_scu_data *scu = pinctrl_dev_get_drvdata(pctldev);
+	u32 val, reg_val, reg_offset = LPC18XX_SCU_PINTSEL0;
+	int gpio;
+
+	if (param_val >= LPC18XX_GPIO_PIN_INT_MAX)
+		return -EINVAL;
+
+	gpio = lpc18xx_pin_to_gpio(pctldev, pin);
+	if (gpio < 0)
+		return -ENOTSUPP;
+
+	val = lpc18xx_gpio_to_pintsel_val(gpio);
+
+	reg_offset += (param_val / LPC18XX_SCU_IRQ_PER_PINTSEL) * sizeof(u32);
+
+	reg_val = readl(scu->base + reg_offset);
+	reg_val &= ~LPC18XX_SCU_PINTSEL_VAL(LPC18XX_SCU_PINTSEL_VAL_MASK, param_val);
+	reg_val |= LPC18XX_SCU_PINTSEL_VAL(val, param_val);
+	writel(reg_val, scu->base + reg_offset);
+
+	return 0;
+}
+
+static int lpc18xx_pconf_set_pin(struct pinctrl_dev *pctldev, unsigned param,
+				 u16 param_val, u32 *reg, unsigned pin,
 				 struct lpc18xx_pin_caps *pin_cap)
 {
 	switch (param) {
@@ -948,6 +1071,9 @@
 		*reg |= param_val << LPC18XX_SCU_PIN_EHD_POS;
 		break;
 
+	case PIN_CONFIG_GPIO_PIN_INT:
+		return lpc18xx_pconf_set_gpio_pin_int(pctldev, param_val, pin);
+
 	default:
 		dev_err(pctldev->dev, "Property not supported\n");
 		return -ENOTSUPP;
@@ -982,7 +1108,7 @@
 		else if (pin_cap->type == TYPE_USB1)
 			ret = lpc18xx_pconf_set_usb1(pctldev, param, param_val, &reg);
 		else
-			ret = lpc18xx_pconf_set_pin(pctldev, param, param_val, &reg, pin_cap);
+			ret = lpc18xx_pconf_set_pin(pctldev, param, param_val, &reg, pin, pin_cap);
 
 		if (ret)
 			return ret;
@@ -1136,6 +1262,11 @@
 	.pctlops = &lpc18xx_pctl_ops,
 	.pmxops = &lpc18xx_pmx_ops,
 	.confops = &lpc18xx_pconf_ops,
+	.num_custom_params = ARRAY_SIZE(lpc18xx_params),
+	.custom_params = lpc18xx_params,
+#ifdef CONFIG_DEBUG_FS
+	.custom_conf_items = lpc18xx_conf_items,
+#endif
 	.owner = THIS_MODULE,
 };
 
@@ -1170,9 +1301,8 @@
 	u16 pins[ARRAY_SIZE(lpc18xx_pins)];
 	int func, ngroups, i;
 
-	for (func = 0; func < FUNC_MAX; ngroups = 0, func++) {
-
-		for (i = 0; i < ARRAY_SIZE(lpc18xx_pins); i++) {
+	for (func = 0; func < FUNC_MAX; func++) {
+		for (ngroups = 0, i = 0; i < ARRAY_SIZE(lpc18xx_pins); i++) {
 			if (lpc18xx_valid_pin_function(i, func))
 				pins[ngroups++] = i;
 		}
diff --git a/drivers/pinctrl/pinctrl-pic32.c b/drivers/pinctrl/pinctrl-pic32.c
new file mode 100644
index 0000000..0b07d4b
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-pic32.c
@@ -0,0 +1,2312 @@
+/*
+ * PIC32 pinctrl driver
+ *
+ * Joshua Henderson, <joshua.henderson@microchip.com>
+ * Copyright (C) 2015 Microchip Technology Inc.  All rights reserved.
+ *
+ * This program is free software; you can distribute it and/or modify it
+ * under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ */
+#include <linux/clk.h>
+#include <linux/gpio/driver.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include <asm/mach-pic32/pic32.h>
+
+#include "pinctrl-utils.h"
+#include "pinctrl-pic32.h"
+
+#define PINS_PER_BANK		16
+
+#define PIC32_CNCON_EDGE	11
+#define PIC32_CNCON_ON		15
+
+#define PIN_CONFIG_MICROCHIP_DIGITAL	(PIN_CONFIG_END + 1)
+#define PIN_CONFIG_MICROCHIP_ANALOG	(PIN_CONFIG_END + 2)
+
+static const struct pinconf_generic_params pic32_mpp_bindings[] = {
+	{"microchip,digital",	PIN_CONFIG_MICROCHIP_DIGITAL,	0},
+	{"microchip,analog",	PIN_CONFIG_MICROCHIP_ANALOG,	0},
+};
+
+#define GPIO_BANK_START(bank)		((bank) * PINS_PER_BANK)
+
+struct pic32_function {
+	const char *name;
+	const char * const *groups;
+	unsigned int ngroups;
+};
+
+struct pic32_pin_group {
+	const char *name;
+	unsigned int pin;
+	struct pic32_desc_function *functions;
+};
+
+struct pic32_desc_function {
+	const char *name;
+	u32 muxreg;
+	u32 muxval;
+};
+
+struct pic32_gpio_bank {
+	void __iomem *reg_base;
+	struct gpio_chip gpio_chip;
+	struct irq_chip irq_chip;
+	struct clk *clk;
+};
+
+struct pic32_pinctrl {
+	void __iomem *reg_base;
+	struct device *dev;
+	struct pinctrl_dev *pctldev;
+	const struct pinctrl_pin_desc *pins;
+	unsigned int npins;
+	const struct pic32_function *functions;
+	unsigned int nfunctions;
+	const struct pic32_pin_group *groups;
+	unsigned int ngroups;
+	struct pic32_gpio_bank *gpio_banks;
+	unsigned int nbanks;
+	struct clk *clk;
+};
+
+static const struct pinctrl_pin_desc pic32_pins[] = {
+	PINCTRL_PIN(0, "A0"),
+	PINCTRL_PIN(1, "A1"),
+	PINCTRL_PIN(2, "A2"),
+	PINCTRL_PIN(3, "A3"),
+	PINCTRL_PIN(4, "A4"),
+	PINCTRL_PIN(5, "A5"),
+	PINCTRL_PIN(6, "A6"),
+	PINCTRL_PIN(7, "A7"),
+	PINCTRL_PIN(8, "A8"),
+	PINCTRL_PIN(9, "A9"),
+	PINCTRL_PIN(10, "A10"),
+	PINCTRL_PIN(11, "A11"),
+	PINCTRL_PIN(12, "A12"),
+	PINCTRL_PIN(13, "A13"),
+	PINCTRL_PIN(14, "A14"),
+	PINCTRL_PIN(15, "A15"),
+	PINCTRL_PIN(16, "B0"),
+	PINCTRL_PIN(17, "B1"),
+	PINCTRL_PIN(18, "B2"),
+	PINCTRL_PIN(19, "B3"),
+	PINCTRL_PIN(20, "B4"),
+	PINCTRL_PIN(21, "B5"),
+	PINCTRL_PIN(22, "B6"),
+	PINCTRL_PIN(23, "B7"),
+	PINCTRL_PIN(24, "B8"),
+	PINCTRL_PIN(25, "B9"),
+	PINCTRL_PIN(26, "B10"),
+	PINCTRL_PIN(27, "B11"),
+	PINCTRL_PIN(28, "B12"),
+	PINCTRL_PIN(29, "B13"),
+	PINCTRL_PIN(30, "B14"),
+	PINCTRL_PIN(31, "B15"),
+	PINCTRL_PIN(33, "C1"),
+	PINCTRL_PIN(34, "C2"),
+	PINCTRL_PIN(35, "C3"),
+	PINCTRL_PIN(36, "C4"),
+	PINCTRL_PIN(44, "C12"),
+	PINCTRL_PIN(45, "C13"),
+	PINCTRL_PIN(46, "C14"),
+	PINCTRL_PIN(47, "C15"),
+	PINCTRL_PIN(48, "D0"),
+	PINCTRL_PIN(49, "D1"),
+	PINCTRL_PIN(50, "D2"),
+	PINCTRL_PIN(51, "D3"),
+	PINCTRL_PIN(52, "D4"),
+	PINCTRL_PIN(53, "D5"),
+	PINCTRL_PIN(54, "D6"),
+	PINCTRL_PIN(55, "D7"),
+	PINCTRL_PIN(57, "D9"),
+	PINCTRL_PIN(58, "D10"),
+	PINCTRL_PIN(59, "D11"),
+	PINCTRL_PIN(60, "D12"),
+	PINCTRL_PIN(61, "D13"),
+	PINCTRL_PIN(62, "D14"),
+	PINCTRL_PIN(63, "D15"),
+	PINCTRL_PIN(64, "E0"),
+	PINCTRL_PIN(65, "E1"),
+	PINCTRL_PIN(66, "E2"),
+	PINCTRL_PIN(67, "E3"),
+	PINCTRL_PIN(68, "E4"),
+	PINCTRL_PIN(69, "E5"),
+	PINCTRL_PIN(70, "E6"),
+	PINCTRL_PIN(71, "E7"),
+	PINCTRL_PIN(72, "E8"),
+	PINCTRL_PIN(73, "E9"),
+	PINCTRL_PIN(80, "F0"),
+	PINCTRL_PIN(81, "F1"),
+	PINCTRL_PIN(82, "F2"),
+	PINCTRL_PIN(83, "F3"),
+	PINCTRL_PIN(84, "F4"),
+	PINCTRL_PIN(85, "F5"),
+	PINCTRL_PIN(88, "F8"),
+	PINCTRL_PIN(92, "F12"),
+	PINCTRL_PIN(93, "F13"),
+	PINCTRL_PIN(96, "G0"),
+	PINCTRL_PIN(97, "G1"),
+	PINCTRL_PIN(102, "G6"),
+	PINCTRL_PIN(103, "G7"),
+	PINCTRL_PIN(104, "G8"),
+	PINCTRL_PIN(105, "G9"),
+	PINCTRL_PIN(108, "G12"),
+	PINCTRL_PIN(109, "G13"),
+	PINCTRL_PIN(110, "G14"),
+	PINCTRL_PIN(111, "G15"),
+	PINCTRL_PIN(112, "H0"),
+	PINCTRL_PIN(113, "H1"),
+	PINCTRL_PIN(114, "H2"),
+	PINCTRL_PIN(115, "H3"),
+	PINCTRL_PIN(116, "H4"),
+	PINCTRL_PIN(117, "H5"),
+	PINCTRL_PIN(118, "H6"),
+	PINCTRL_PIN(119, "H7"),
+	PINCTRL_PIN(120, "H8"),
+	PINCTRL_PIN(121, "H9"),
+	PINCTRL_PIN(122, "H10"),
+	PINCTRL_PIN(123, "H11"),
+	PINCTRL_PIN(124, "H12"),
+	PINCTRL_PIN(125, "H13"),
+	PINCTRL_PIN(126, "H14"),
+	PINCTRL_PIN(127, "H15"),
+	PINCTRL_PIN(128, "J0"),
+	PINCTRL_PIN(129, "J1"),
+	PINCTRL_PIN(130, "J2"),
+	PINCTRL_PIN(131, "J3"),
+	PINCTRL_PIN(132, "J4"),
+	PINCTRL_PIN(133, "J5"),
+	PINCTRL_PIN(134, "J6"),
+	PINCTRL_PIN(135, "J7"),
+	PINCTRL_PIN(136, "J8"),
+	PINCTRL_PIN(137, "J9"),
+	PINCTRL_PIN(138, "J10"),
+	PINCTRL_PIN(139, "J11"),
+	PINCTRL_PIN(140, "J12"),
+	PINCTRL_PIN(141, "J13"),
+	PINCTRL_PIN(142, "J14"),
+	PINCTRL_PIN(143, "J15"),
+	PINCTRL_PIN(144, "K0"),
+	PINCTRL_PIN(145, "K1"),
+	PINCTRL_PIN(146, "K2"),
+	PINCTRL_PIN(147, "K3"),
+	PINCTRL_PIN(148, "K4"),
+	PINCTRL_PIN(149, "K5"),
+	PINCTRL_PIN(150, "K6"),
+	PINCTRL_PIN(151, "K7"),
+};
+
+static const char * const pic32_input0_group[] = {
+	"D2", "G8", "F4", "F1", "B9", "B10", "C14", "B5",
+	"C1", "D14", "G1", "A14", "D6",
+};
+
+static const char * const pic32_input1_group[] = {
+	"D3", "G7", "F5", "D11", "F0", "B1", "E5", "C13",
+	"B3", "C4", "G0", "A15", "D7",
+};
+
+static const char * const pic32_input2_group[] = {
+	"D9", "G6", "B8", "B15", "D4", "B0", "E3", "B7",
+	"F12", "D12", "F8", "C3", "E9",
+};
+
+static const char * const pic32_input3_group[] = {
+	"G9", "B14", "D0", "B6", "D5", "B2", "F3", "F13",
+	"F2", "C2", "E8",
+};
+
+static const char * const pic32_output0_group[] = {
+	"D2", "G8", "F4", "D10", "F1", "B9", "B10", "C14",
+	"B5", "C1", "D14", "G1", "A14", "D6",
+};
+
+static const char * const pic32_output0_1_group[] = {
+	"D2", "G8", "F4", "D10", "F1", "B9", "B10", "C14",
+	"B5", "C1", "D14", "G1", "A14", "D6",
+	"D3", "G7", "F5", "D11", "F0", "B1", "E5", "C13",
+	"B3", "C4", "D15", "G0", "A15", "D7",
+};
+
+static const char *const pic32_output1_group[] = {
+	"D3", "G7", "F5", "D11", "F0", "B1", "E5", "C13",
+	"B3", "C4", "D15", "G0", "A15", "D7",
+};
+
+static const char *const pic32_output1_3_group[] = {
+	"D3", "G7", "F5", "D11", "F0", "B1", "E5", "C13",
+	"B3", "C4", "D15", "G0", "A15", "D7",
+	"G9", "B14", "D0", "B6", "D5", "B2", "F3", "F13",
+	"C2", "E8", "F2",
+};
+
+static const char * const pic32_output2_group[] = {
+	"D9", "G6", "B8", "B15", "D4", "B0", "E3", "B7",
+	"F12", "D12", "F8", "C3", "E9",
+};
+
+static const char * const pic32_output2_3_group[] = {
+	"D9", "G6", "B8", "B15", "D4", "B0", "E3", "B7",
+	"F12", "D12", "F8", "C3", "E9",
+	"G9", "B14", "D0", "B6", "D5", "B2", "F3", "F13",
+	"C2", "E8", "F2",
+};
+
+static const char * const pic32_output3_group[] = {
+	"G9", "B14", "D0", "B6", "D5", "B2", "F3", "F13",
+	"C2", "E8", "F2",
+};
+
+#define FUNCTION(_name, _gr)					\
+	{							\
+		.name = #_name,					\
+		.groups = pic32_##_gr##_group,			\
+		.ngroups = ARRAY_SIZE(pic32_##_gr##_group),	\
+	}
+
+static const struct pic32_function pic32_functions[] = {
+	FUNCTION(INT3, input0),
+	FUNCTION(T2CK, input0),
+	FUNCTION(T6CK, input0),
+	FUNCTION(IC3, input0),
+	FUNCTION(IC7, input0),
+	FUNCTION(U1RX, input0),
+	FUNCTION(U2CTS, input0),
+	FUNCTION(U5RX, input0),
+	FUNCTION(U6CTS, input0),
+	FUNCTION(SDI1, input0),
+	FUNCTION(SDI3, input0),
+	FUNCTION(SDI5, input0),
+	FUNCTION(SS6IN, input0),
+	FUNCTION(REFCLKI1, input0),
+	FUNCTION(INT4, input1),
+	FUNCTION(T5CK, input1),
+	FUNCTION(T7CK, input1),
+	FUNCTION(IC4, input1),
+	FUNCTION(IC8, input1),
+	FUNCTION(U3RX, input1),
+	FUNCTION(U4CTS, input1),
+	FUNCTION(SDI2, input1),
+	FUNCTION(SDI4, input1),
+	FUNCTION(C1RX, input1),
+	FUNCTION(REFCLKI4, input1),
+	FUNCTION(INT2, input2),
+	FUNCTION(T3CK, input2),
+	FUNCTION(T8CK, input2),
+	FUNCTION(IC2, input2),
+	FUNCTION(IC5, input2),
+	FUNCTION(IC9, input2),
+	FUNCTION(U1CTS, input2),
+	FUNCTION(U2RX, input2),
+	FUNCTION(U5CTS, input2),
+	FUNCTION(SS1IN, input2),
+	FUNCTION(SS3IN, input2),
+	FUNCTION(SS4IN, input2),
+	FUNCTION(SS5IN, input2),
+	FUNCTION(C2RX, input2),
+	FUNCTION(INT1, input3),
+	FUNCTION(T4CK, input3),
+	FUNCTION(T9CK, input3),
+	FUNCTION(IC1, input3),
+	FUNCTION(IC6, input3),
+	FUNCTION(U3CTS, input3),
+	FUNCTION(U4RX, input3),
+	FUNCTION(U6RX, input3),
+	FUNCTION(SS2IN, input3),
+	FUNCTION(SDI6, input3),
+	FUNCTION(OCFA, input3),
+	FUNCTION(REFCLKI3, input3),
+	FUNCTION(U3TX, output0),
+	FUNCTION(U4RTS, output0),
+	FUNCTION(SDO1, output0_1),
+	FUNCTION(SDO2, output0_1),
+	FUNCTION(SDO3, output0_1),
+	FUNCTION(SDO5, output0_1),
+	FUNCTION(SS6OUT, output0),
+	FUNCTION(OC3, output0),
+	FUNCTION(OC6, output0),
+	FUNCTION(REFCLKO4, output0),
+	FUNCTION(C2OUT, output0),
+	FUNCTION(C1TX, output0),
+	FUNCTION(U1TX, output1),
+	FUNCTION(U2RTS, output1),
+	FUNCTION(U5TX, output1),
+	FUNCTION(U6RTS, output1),
+	FUNCTION(SDO4, output1_3),
+	FUNCTION(OC4, output1),
+	FUNCTION(OC7, output1),
+	FUNCTION(REFCLKO1, output1),
+	FUNCTION(U3RTS, output2),
+	FUNCTION(U4TX, output2),
+	FUNCTION(U6TX, output2_3),
+	FUNCTION(SS1OUT, output2),
+	FUNCTION(SS3OUT, output2),
+	FUNCTION(SS4OUT, output2),
+	FUNCTION(SS5OUT, output2),
+	FUNCTION(SDO6, output2_3),
+	FUNCTION(OC5, output2),
+	FUNCTION(OC8, output2),
+	FUNCTION(C1OUT, output2),
+	FUNCTION(REFCLKO3, output2),
+	FUNCTION(U1RTS, output3),
+	FUNCTION(U2TX, output3),
+	FUNCTION(U5RTS, output3),
+	FUNCTION(SS2OUT, output3),
+	FUNCTION(OC2, output3),
+	FUNCTION(OC1, output3),
+	FUNCTION(OC9, output3),
+	FUNCTION(C2TX, output3),
+};
+
+#define PIC32_PINCTRL_GROUP(_pin, _name, ...)				\
+	{								\
+		.name = #_name,						\
+		.pin = _pin,						\
+		.functions = (struct pic32_desc_function[]){		\
+			__VA_ARGS__, { } },				\
+	}
+
+#define PIC32_PINCTRL_FUNCTION(_name, _muxreg, _muxval)	\
+	{						\
+		.name = #_name,				\
+		.muxreg = _muxreg,			\
+		.muxval = _muxval,			\
+	}
+
+static const struct pic32_pin_group pic32_groups[] = {
+	PIC32_PINCTRL_GROUP(14, A14,
+			PIC32_PINCTRL_FUNCTION(INT3, INT3R, 13),
+			PIC32_PINCTRL_FUNCTION(T2CK, T2CKR, 13),
+			PIC32_PINCTRL_FUNCTION(T6CK, T6CKR, 13),
+			PIC32_PINCTRL_FUNCTION(IC3, IC3R, 13),
+			PIC32_PINCTRL_FUNCTION(IC7, IC7R, 13),
+			PIC32_PINCTRL_FUNCTION(U1RX, U1RXR, 13),
+			PIC32_PINCTRL_FUNCTION(U2CTS, U2CTSR, 13),
+			PIC32_PINCTRL_FUNCTION(U5RX, U5RXR, 13),
+			PIC32_PINCTRL_FUNCTION(U6CTS, U6CTSR, 13),
+			PIC32_PINCTRL_FUNCTION(SDI1, SDI1R, 13),
+			PIC32_PINCTRL_FUNCTION(SDI3, SDI3R, 13),
+			PIC32_PINCTRL_FUNCTION(SDI5, SDI5R, 13),
+			PIC32_PINCTRL_FUNCTION(SS6IN, SS6INR, 13),
+			PIC32_PINCTRL_FUNCTION(REFCLKI1, REFCLKI1R, 13),
+			PIC32_PINCTRL_FUNCTION(U3TX, RPA14R, 1),
+			PIC32_PINCTRL_FUNCTION(U4RTS, RPA14R, 2),
+			PIC32_PINCTRL_FUNCTION(SDO1, RPA14R, 5),
+			PIC32_PINCTRL_FUNCTION(SDO2, RPA14R, 6),
+			PIC32_PINCTRL_FUNCTION(SDO3, RPA14R, 7),
+			PIC32_PINCTRL_FUNCTION(SDO5, RPA14R, 9),
+			PIC32_PINCTRL_FUNCTION(SS6OUT, RPA14R, 10),
+			PIC32_PINCTRL_FUNCTION(OC3, RPA14R, 11),
+			PIC32_PINCTRL_FUNCTION(OC6, RPA14R, 12),
+			PIC32_PINCTRL_FUNCTION(REFCLKO4, RPA14R, 13),
+			PIC32_PINCTRL_FUNCTION(C2OUT, RPA14R, 14),
+			PIC32_PINCTRL_FUNCTION(C1TX, RPA14R, 15)),
+	PIC32_PINCTRL_GROUP(15, A15,
+			PIC32_PINCTRL_FUNCTION(INT4, INT4R, 13),
+			PIC32_PINCTRL_FUNCTION(T5CK, T5CKR, 13),
+			PIC32_PINCTRL_FUNCTION(T7CK, T7CKR, 13),
+			PIC32_PINCTRL_FUNCTION(IC4, IC4R, 13),
+			PIC32_PINCTRL_FUNCTION(IC8, IC8R, 13),
+			PIC32_PINCTRL_FUNCTION(U3RX, U3RXR, 13),
+			PIC32_PINCTRL_FUNCTION(U4CTS, U4CTSR, 13),
+			PIC32_PINCTRL_FUNCTION(SDI2, SDI2R, 13),
+			PIC32_PINCTRL_FUNCTION(SDI4, SDI4R, 13),
+			PIC32_PINCTRL_FUNCTION(C1RX, C1RXR, 13),
+			PIC32_PINCTRL_FUNCTION(REFCLKI4, REFCLKI4R, 13),
+			PIC32_PINCTRL_FUNCTION(U1TX, RPA15R, 1),
+			PIC32_PINCTRL_FUNCTION(U2RTS, RPA15R, 2),
+			PIC32_PINCTRL_FUNCTION(U5TX, RPA15R, 3),
+			PIC32_PINCTRL_FUNCTION(U6RTS, RPA15R, 4),
+			PIC32_PINCTRL_FUNCTION(SDO1, RPA15R, 5),
+			PIC32_PINCTRL_FUNCTION(SDO2, RPA15R, 6),
+			PIC32_PINCTRL_FUNCTION(SDO3, RPA15R, 7),
+			PIC32_PINCTRL_FUNCTION(SDO4, RPA15R, 8),
+			PIC32_PINCTRL_FUNCTION(SDO5, RPA15R, 9),
+			PIC32_PINCTRL_FUNCTION(OC4, RPA15R, 11),
+			PIC32_PINCTRL_FUNCTION(OC7, RPA15R, 12),
+			PIC32_PINCTRL_FUNCTION(REFCLKO1, RPA15R, 15)),
+	PIC32_PINCTRL_GROUP(16, B0,
+			PIC32_PINCTRL_FUNCTION(INT2, INT2R, 5),
+			PIC32_PINCTRL_FUNCTION(T3CK, T3CKR, 5),
+			PIC32_PINCTRL_FUNCTION(T8CK, T8CKR, 5),
+			PIC32_PINCTRL_FUNCTION(IC2, IC2R, 5),
+			PIC32_PINCTRL_FUNCTION(IC5, IC5R, 5),
+			PIC32_PINCTRL_FUNCTION(IC9, IC9R, 5),
+			PIC32_PINCTRL_FUNCTION(U1CTS, U1CTSR, 5),
+			PIC32_PINCTRL_FUNCTION(U2RX, U2RXR, 5),
+			PIC32_PINCTRL_FUNCTION(U5CTS, U5CTSR, 5),
+			PIC32_PINCTRL_FUNCTION(SS1IN, SS1INR, 5),
+			PIC32_PINCTRL_FUNCTION(SS3IN, SS3INR, 5),
+			PIC32_PINCTRL_FUNCTION(SS4IN, SS4INR, 5),
+			PIC32_PINCTRL_FUNCTION(SS5IN, SS5INR, 5),
+			PIC32_PINCTRL_FUNCTION(C2RX, C2RXR, 5),
+			PIC32_PINCTRL_FUNCTION(U3RTS, RPB0R, 1),
+			PIC32_PINCTRL_FUNCTION(U4TX, RPB0R, 2),
+			PIC32_PINCTRL_FUNCTION(U6TX, RPB0R, 4),
+			PIC32_PINCTRL_FUNCTION(SS1OUT, RPB0R, 5),
+			PIC32_PINCTRL_FUNCTION(SS3OUT, RPB0R, 7),
+			PIC32_PINCTRL_FUNCTION(SS4OUT, RPB0R, 8),
+			PIC32_PINCTRL_FUNCTION(SS5OUT, RPB0R, 9),
+			PIC32_PINCTRL_FUNCTION(SDO6, RPB0R, 10),
+			PIC32_PINCTRL_FUNCTION(OC5, RPB0R, 11),
+			PIC32_PINCTRL_FUNCTION(OC8, RPB0R, 12),
+			PIC32_PINCTRL_FUNCTION(C1OUT, RPB0R, 14),
+			PIC32_PINCTRL_FUNCTION(REFCLKO3, RPB0R, 15)),
+	PIC32_PINCTRL_GROUP(17, B1,
+			PIC32_PINCTRL_FUNCTION(INT4, INT4R, 5),
+			PIC32_PINCTRL_FUNCTION(T5CK, T5CKR, 5),
+			PIC32_PINCTRL_FUNCTION(T7CK, T7CKR, 5),
+			PIC32_PINCTRL_FUNCTION(IC4, IC4R, 5),
+			PIC32_PINCTRL_FUNCTION(IC8, IC8R, 5),
+			PIC32_PINCTRL_FUNCTION(U3RX, U3RXR, 5),
+			PIC32_PINCTRL_FUNCTION(U4CTS, U4CTSR, 5),
+			PIC32_PINCTRL_FUNCTION(SDI2, SDI2R, 5),
+			PIC32_PINCTRL_FUNCTION(SDI4, SDI4R, 5),
+			PIC32_PINCTRL_FUNCTION(C1RX, C1RXR, 5),
+			PIC32_PINCTRL_FUNCTION(REFCLKI4, REFCLKI4R, 5),
+			PIC32_PINCTRL_FUNCTION(U1TX, RPB1R, 1),
+			PIC32_PINCTRL_FUNCTION(U2RTS, RPB1R, 2),
+			PIC32_PINCTRL_FUNCTION(U5TX, RPB1R, 3),
+			PIC32_PINCTRL_FUNCTION(U6RTS, RPB1R, 4),
+			PIC32_PINCTRL_FUNCTION(SDO1, RPB1R, 5),
+			PIC32_PINCTRL_FUNCTION(SDO2, RPB1R, 6),
+			PIC32_PINCTRL_FUNCTION(SDO3, RPB1R, 7),
+			PIC32_PINCTRL_FUNCTION(SDO4, RPB1R, 8),
+			PIC32_PINCTRL_FUNCTION(SDO5, RPB1R, 9),
+			PIC32_PINCTRL_FUNCTION(OC4, RPB1R, 11),
+			PIC32_PINCTRL_FUNCTION(OC7, RPB1R, 12),
+			PIC32_PINCTRL_FUNCTION(REFCLKO1, RPB1R, 15)),
+	PIC32_PINCTRL_GROUP(18, B2,
+			PIC32_PINCTRL_FUNCTION(INT1, INT1R, 7),
+			PIC32_PINCTRL_FUNCTION(T4CK, T4CKR, 7),
+			PIC32_PINCTRL_FUNCTION(T9CK, T9CKR, 7),
+			PIC32_PINCTRL_FUNCTION(IC1, IC1R, 7),
+			PIC32_PINCTRL_FUNCTION(IC6, IC6R, 7),
+			PIC32_PINCTRL_FUNCTION(U3CTS, U3CTSR, 7),
+			PIC32_PINCTRL_FUNCTION(U4RX, U4RXR, 7),
+			PIC32_PINCTRL_FUNCTION(U6RX, U6RXR, 7),
+			PIC32_PINCTRL_FUNCTION(SS2IN, SS2INR, 7),
+			PIC32_PINCTRL_FUNCTION(SDI6, SDI6R, 7),
+			PIC32_PINCTRL_FUNCTION(OCFA, OCFAR, 7),
+			PIC32_PINCTRL_FUNCTION(REFCLKI3, REFCLKI3R, 7),
+			PIC32_PINCTRL_FUNCTION(U1RTS, RPB2R, 1),
+			PIC32_PINCTRL_FUNCTION(U2TX, RPB2R, 2),
+			PIC32_PINCTRL_FUNCTION(U5RTS, RPB2R, 3),
+			PIC32_PINCTRL_FUNCTION(U6TX, RPB2R, 4),
+			PIC32_PINCTRL_FUNCTION(SS2OUT, RPB2R, 6),
+			PIC32_PINCTRL_FUNCTION(SDO4, RPB2R, 8),
+			PIC32_PINCTRL_FUNCTION(SDO6, RPB2R, 10),
+			PIC32_PINCTRL_FUNCTION(OC2, RPB2R, 11),
+			PIC32_PINCTRL_FUNCTION(OC1, RPB2R, 12),
+			PIC32_PINCTRL_FUNCTION(OC9, RPB2R, 13),
+			PIC32_PINCTRL_FUNCTION(C2TX, RPB2R, 15)),
+	PIC32_PINCTRL_GROUP(19, B3,
+			PIC32_PINCTRL_FUNCTION(INT4, INT4R, 8),
+			PIC32_PINCTRL_FUNCTION(T5CK, T5CKR, 8),
+			PIC32_PINCTRL_FUNCTION(T7CK, T7CKR, 8),
+			PIC32_PINCTRL_FUNCTION(IC4, IC4R, 8),
+			PIC32_PINCTRL_FUNCTION(IC8, IC8R, 8),
+			PIC32_PINCTRL_FUNCTION(U3RX, U3RXR, 8),
+			PIC32_PINCTRL_FUNCTION(U4CTS, U4CTSR, 8),
+			PIC32_PINCTRL_FUNCTION(SDI2, SDI2R, 8),
+			PIC32_PINCTRL_FUNCTION(SDI4, SDI4R, 8),
+			PIC32_PINCTRL_FUNCTION(C1RX, C1RXR, 8),
+			PIC32_PINCTRL_FUNCTION(REFCLKI4, REFCLKI4R, 8),
+			PIC32_PINCTRL_FUNCTION(U1TX, RPB3R, 1),
+			PIC32_PINCTRL_FUNCTION(U2RTS, RPB3R, 2),
+			PIC32_PINCTRL_FUNCTION(U5TX, RPB3R, 3),
+			PIC32_PINCTRL_FUNCTION(U6RTS, RPB3R, 4),
+			PIC32_PINCTRL_FUNCTION(SDO1, RPB3R, 5),
+			PIC32_PINCTRL_FUNCTION(SDO2, RPB3R, 6),
+			PIC32_PINCTRL_FUNCTION(SDO3, RPB3R, 7),
+			PIC32_PINCTRL_FUNCTION(SDO4, RPB3R, 8),
+			PIC32_PINCTRL_FUNCTION(SDO5, RPB3R, 9),
+			PIC32_PINCTRL_FUNCTION(OC4, RPB3R, 11),
+			PIC32_PINCTRL_FUNCTION(OC7, RPB3R, 12),
+			PIC32_PINCTRL_FUNCTION(REFCLKO1, RPB3R, 15)),
+	PIC32_PINCTRL_GROUP(21, B5,
+			PIC32_PINCTRL_FUNCTION(INT3, INT3R, 8),
+			PIC32_PINCTRL_FUNCTION(T2CK, T2CKR, 8),
+			PIC32_PINCTRL_FUNCTION(T6CK, T6CKR, 8),
+			PIC32_PINCTRL_FUNCTION(IC3, IC3R, 8),
+			PIC32_PINCTRL_FUNCTION(IC7, IC7R, 8),
+			PIC32_PINCTRL_FUNCTION(U1RX, U1RXR, 8),
+			PIC32_PINCTRL_FUNCTION(U2CTS, U2CTSR, 8),
+			PIC32_PINCTRL_FUNCTION(U5RX, U5RXR, 8),
+			PIC32_PINCTRL_FUNCTION(U6CTS, U6CTSR, 8),
+			PIC32_PINCTRL_FUNCTION(SDI1, SDI1R, 8),
+			PIC32_PINCTRL_FUNCTION(SDI3, SDI3R, 8),
+			PIC32_PINCTRL_FUNCTION(SDI5, SDI5R, 8),
+			PIC32_PINCTRL_FUNCTION(SS6IN, SS6INR, 8),
+			PIC32_PINCTRL_FUNCTION(REFCLKI1, REFCLKI1R, 8),
+			PIC32_PINCTRL_FUNCTION(U3TX, RPB5R, 1),
+			PIC32_PINCTRL_FUNCTION(U4RTS, RPB5R, 2),
+			PIC32_PINCTRL_FUNCTION(SDO1, RPB5R, 5),
+			PIC32_PINCTRL_FUNCTION(SDO2, RPB5R, 6),
+			PIC32_PINCTRL_FUNCTION(SDO3, RPB5R, 7),
+			PIC32_PINCTRL_FUNCTION(SDO5, RPB5R, 9),
+			PIC32_PINCTRL_FUNCTION(SS6OUT, RPB5R, 10),
+			PIC32_PINCTRL_FUNCTION(OC3, RPB5R, 11),
+			PIC32_PINCTRL_FUNCTION(OC6, RPB5R, 12),
+			PIC32_PINCTRL_FUNCTION(REFCLKO4, RPB5R, 13),
+			PIC32_PINCTRL_FUNCTION(C2OUT, RPB5R, 14),
+			PIC32_PINCTRL_FUNCTION(C1TX, RPB5R, 15)),
+	PIC32_PINCTRL_GROUP(22, B6,
+			PIC32_PINCTRL_FUNCTION(INT1, INT1R, 4),
+			PIC32_PINCTRL_FUNCTION(T4CK, T4CKR, 4),
+			PIC32_PINCTRL_FUNCTION(T9CK, T9CKR, 4),
+			PIC32_PINCTRL_FUNCTION(IC1, IC1R, 4),
+			PIC32_PINCTRL_FUNCTION(IC6, IC6R, 4),
+			PIC32_PINCTRL_FUNCTION(U3CTS, U3CTSR, 4),
+			PIC32_PINCTRL_FUNCTION(U4RX, U4RXR, 4),
+			PIC32_PINCTRL_FUNCTION(U6RX, U6RXR, 4),
+			PIC32_PINCTRL_FUNCTION(SS2IN, SS2INR, 4),
+			PIC32_PINCTRL_FUNCTION(SDI6, SDI6R, 4),
+			PIC32_PINCTRL_FUNCTION(OCFA, OCFAR, 4),
+			PIC32_PINCTRL_FUNCTION(REFCLKI3, REFCLKI3R, 4),
+			PIC32_PINCTRL_FUNCTION(U1RTS, RPB6R, 1),
+			PIC32_PINCTRL_FUNCTION(U2TX, RPB6R, 2),
+			PIC32_PINCTRL_FUNCTION(U5RTS, RPB6R, 3),
+			PIC32_PINCTRL_FUNCTION(U6TX, RPB6R, 4),
+			PIC32_PINCTRL_FUNCTION(SS2OUT, RPB6R, 6),
+			PIC32_PINCTRL_FUNCTION(SDO4, RPB6R, 8),
+			PIC32_PINCTRL_FUNCTION(SDO6, RPB6R, 10),
+			PIC32_PINCTRL_FUNCTION(OC2, RPB6R, 11),
+			PIC32_PINCTRL_FUNCTION(OC1, RPB6R, 12),
+			PIC32_PINCTRL_FUNCTION(OC9, RPB6R, 13),
+			PIC32_PINCTRL_FUNCTION(C2TX, RPB6R, 15)),
+	PIC32_PINCTRL_GROUP(23, B7,
+			PIC32_PINCTRL_FUNCTION(INT2, INT2R, 7),
+			PIC32_PINCTRL_FUNCTION(T3CK, T3CKR, 7),
+			PIC32_PINCTRL_FUNCTION(T8CK, T8CKR, 7),
+			PIC32_PINCTRL_FUNCTION(IC2, IC2R, 7),
+			PIC32_PINCTRL_FUNCTION(IC5, IC5R, 7),
+			PIC32_PINCTRL_FUNCTION(IC9, IC9R, 7),
+			PIC32_PINCTRL_FUNCTION(U1CTS, U1CTSR, 7),
+			PIC32_PINCTRL_FUNCTION(U2RX, U2RXR, 7),
+			PIC32_PINCTRL_FUNCTION(U5CTS, U5CTSR, 7),
+			PIC32_PINCTRL_FUNCTION(SS1IN, SS1INR, 7),
+			PIC32_PINCTRL_FUNCTION(SS3IN, SS3INR, 7),
+			PIC32_PINCTRL_FUNCTION(SS4IN, SS4INR, 7),
+			PIC32_PINCTRL_FUNCTION(SS5IN, SS5INR, 7),
+			PIC32_PINCTRL_FUNCTION(C2RX, C2RXR, 7),
+			PIC32_PINCTRL_FUNCTION(U3RTS, RPB7R, 1),
+			PIC32_PINCTRL_FUNCTION(U4TX, RPB7R, 2),
+			PIC32_PINCTRL_FUNCTION(U6TX, RPB7R, 4),
+			PIC32_PINCTRL_FUNCTION(SS1OUT, RPB7R, 5),
+			PIC32_PINCTRL_FUNCTION(SS3OUT, RPB7R, 7),
+			PIC32_PINCTRL_FUNCTION(SS4OUT, RPB7R, 8),
+			PIC32_PINCTRL_FUNCTION(SS5OUT, RPB7R, 9),
+			PIC32_PINCTRL_FUNCTION(SDO6, RPB7R, 10),
+			PIC32_PINCTRL_FUNCTION(OC5, RPB7R, 11),
+			PIC32_PINCTRL_FUNCTION(OC8, RPB7R, 12),
+			PIC32_PINCTRL_FUNCTION(C1OUT, RPB7R, 14),
+			PIC32_PINCTRL_FUNCTION(REFCLKO3, RPB7R, 15)),
+	PIC32_PINCTRL_GROUP(24, B8,
+			PIC32_PINCTRL_FUNCTION(INT2, INT2R, 2),
+			PIC32_PINCTRL_FUNCTION(T3CK, T3CKR, 2),
+			PIC32_PINCTRL_FUNCTION(T8CK, T8CKR, 2),
+			PIC32_PINCTRL_FUNCTION(IC2, IC2R, 2),
+			PIC32_PINCTRL_FUNCTION(IC5, IC5R, 2),
+			PIC32_PINCTRL_FUNCTION(IC9, IC9R, 2),
+			PIC32_PINCTRL_FUNCTION(U1CTS, U1CTSR, 2),
+			PIC32_PINCTRL_FUNCTION(U2RX, U2RXR, 2),
+			PIC32_PINCTRL_FUNCTION(U5CTS, U5CTSR, 2),
+			PIC32_PINCTRL_FUNCTION(SS1IN, SS1INR, 2),
+			PIC32_PINCTRL_FUNCTION(SS3IN, SS3INR, 2),
+			PIC32_PINCTRL_FUNCTION(SS4IN, SS4INR, 2),
+			PIC32_PINCTRL_FUNCTION(SS5IN, SS5INR, 2),
+			PIC32_PINCTRL_FUNCTION(C2RX, C2RXR, 2),
+			PIC32_PINCTRL_FUNCTION(U3RTS, RPB8R, 1),
+			PIC32_PINCTRL_FUNCTION(U4TX, RPB8R, 2),
+			PIC32_PINCTRL_FUNCTION(U6TX, RPB8R, 4),
+			PIC32_PINCTRL_FUNCTION(SS1OUT, RPB8R, 5),
+			PIC32_PINCTRL_FUNCTION(SS3OUT, RPB8R, 7),
+			PIC32_PINCTRL_FUNCTION(SS4OUT, RPB8R, 8),
+			PIC32_PINCTRL_FUNCTION(SS5OUT, RPB8R, 9),
+			PIC32_PINCTRL_FUNCTION(SDO6, RPB8R, 10),
+			PIC32_PINCTRL_FUNCTION(OC5, RPB8R, 11),
+			PIC32_PINCTRL_FUNCTION(OC8, RPB8R, 12),
+			PIC32_PINCTRL_FUNCTION(C1OUT, RPB8R, 14),
+			PIC32_PINCTRL_FUNCTION(REFCLKO3, RPB8R, 15)),
+	PIC32_PINCTRL_GROUP(25, B9,
+			PIC32_PINCTRL_FUNCTION(INT3, INT3R, 5),
+			PIC32_PINCTRL_FUNCTION(T2CK, T2CKR, 5),
+			PIC32_PINCTRL_FUNCTION(T6CK, T6CKR, 5),
+			PIC32_PINCTRL_FUNCTION(IC3, IC3R, 5),
+			PIC32_PINCTRL_FUNCTION(IC7, IC7R, 5),
+			PIC32_PINCTRL_FUNCTION(U1RX, U1RXR, 5),
+			PIC32_PINCTRL_FUNCTION(U2CTS, U2CTSR, 5),
+			PIC32_PINCTRL_FUNCTION(U5RX, U5RXR, 5),
+			PIC32_PINCTRL_FUNCTION(U6CTS, U6CTSR, 5),
+			PIC32_PINCTRL_FUNCTION(SDI1, SDI1R, 5),
+			PIC32_PINCTRL_FUNCTION(SDI3, SDI3R, 5),
+			PIC32_PINCTRL_FUNCTION(SDI5, SDI5R, 5),
+			PIC32_PINCTRL_FUNCTION(SS6IN, SS6INR, 5),
+			PIC32_PINCTRL_FUNCTION(REFCLKI1, REFCLKI1R, 5),
+			PIC32_PINCTRL_FUNCTION(U3TX, RPB9R, 1),
+			PIC32_PINCTRL_FUNCTION(U4RTS, RPB9R, 2),
+			PIC32_PINCTRL_FUNCTION(SDO1, RPB9R, 5),
+			PIC32_PINCTRL_FUNCTION(SDO2, RPB9R, 6),
+			PIC32_PINCTRL_FUNCTION(SDO3, RPB9R, 7),
+			PIC32_PINCTRL_FUNCTION(SDO5, RPB9R, 9),
+			PIC32_PINCTRL_FUNCTION(SS6OUT, RPB9R, 10),
+			PIC32_PINCTRL_FUNCTION(OC3, RPB9R, 11),
+			PIC32_PINCTRL_FUNCTION(OC6, RPB9R, 12),
+			PIC32_PINCTRL_FUNCTION(REFCLKO4, RPB9R, 13),
+			PIC32_PINCTRL_FUNCTION(C2OUT, RPB9R, 14),
+			PIC32_PINCTRL_FUNCTION(C1TX, RPB9R, 15)),
+	PIC32_PINCTRL_GROUP(26, B10,
+			PIC32_PINCTRL_FUNCTION(INT3, INT3R, 6),
+			PIC32_PINCTRL_FUNCTION(T2CK, T2CKR, 6),
+			PIC32_PINCTRL_FUNCTION(T6CK, T6CKR, 6),
+			PIC32_PINCTRL_FUNCTION(IC3, IC3R, 6),
+			PIC32_PINCTRL_FUNCTION(IC7, IC7R, 6),
+			PIC32_PINCTRL_FUNCTION(U1RX, U1RXR, 6),
+			PIC32_PINCTRL_FUNCTION(U2CTS, U2CTSR, 6),
+			PIC32_PINCTRL_FUNCTION(U5RX, U5RXR, 6),
+			PIC32_PINCTRL_FUNCTION(U6CTS, U6CTSR, 6),
+			PIC32_PINCTRL_FUNCTION(SDI1, SDI1R, 6),
+			PIC32_PINCTRL_FUNCTION(SDI3, SDI3R, 6),
+			PIC32_PINCTRL_FUNCTION(SDI5, SDI5R, 6),
+			PIC32_PINCTRL_FUNCTION(SS6IN, SS6INR, 6),
+			PIC32_PINCTRL_FUNCTION(REFCLKI1, REFCLKI1R, 6),
+			PIC32_PINCTRL_FUNCTION(U3TX, RPB10R, 1),
+			PIC32_PINCTRL_FUNCTION(U4RTS, RPB10R, 2),
+			PIC32_PINCTRL_FUNCTION(SDO1, RPB10R, 5),
+			PIC32_PINCTRL_FUNCTION(SDO2, RPB10R, 6),
+			PIC32_PINCTRL_FUNCTION(SDO3, RPB10R, 7),
+			PIC32_PINCTRL_FUNCTION(SDO5, RPB10R, 9),
+			PIC32_PINCTRL_FUNCTION(SS6OUT, RPB10R, 10),
+			PIC32_PINCTRL_FUNCTION(OC3, RPB10R, 11),
+			PIC32_PINCTRL_FUNCTION(OC6, RPB10R, 12),
+			PIC32_PINCTRL_FUNCTION(REFCLKO4, RPB10R, 13),
+			PIC32_PINCTRL_FUNCTION(C2OUT, RPB10R, 14),
+			PIC32_PINCTRL_FUNCTION(C1TX, RPB10R, 15)),
+	PIC32_PINCTRL_GROUP(30, B14,
+			PIC32_PINCTRL_FUNCTION(INT1, INT1R, 2),
+			PIC32_PINCTRL_FUNCTION(T4CK, T4CKR, 2),
+			PIC32_PINCTRL_FUNCTION(T9CK, T9CKR, 2),
+			PIC32_PINCTRL_FUNCTION(IC1, IC1R, 2),
+			PIC32_PINCTRL_FUNCTION(IC6, IC6R, 2),
+			PIC32_PINCTRL_FUNCTION(U3CTS, U3CTSR, 2),
+			PIC32_PINCTRL_FUNCTION(U4RX, U4RXR, 2),
+			PIC32_PINCTRL_FUNCTION(U6RX, U6RXR, 2),
+			PIC32_PINCTRL_FUNCTION(SS2IN, SS2INR, 2),
+			PIC32_PINCTRL_FUNCTION(SDI6, SDI6R, 2),
+			PIC32_PINCTRL_FUNCTION(OCFA, OCFAR, 2),
+			PIC32_PINCTRL_FUNCTION(REFCLKI3, REFCLKI3R, 2),
+			PIC32_PINCTRL_FUNCTION(U1RTS, RPB14R, 1),
+			PIC32_PINCTRL_FUNCTION(U2TX, RPB14R, 2),
+			PIC32_PINCTRL_FUNCTION(U5RTS, RPB14R, 3),
+			PIC32_PINCTRL_FUNCTION(U6TX, RPB14R, 4),
+			PIC32_PINCTRL_FUNCTION(SS2OUT, RPB14R, 6),
+			PIC32_PINCTRL_FUNCTION(SDO4, RPB14R, 8),
+			PIC32_PINCTRL_FUNCTION(SDO6, RPB14R, 10),
+			PIC32_PINCTRL_FUNCTION(OC2, RPB14R, 11),
+			PIC32_PINCTRL_FUNCTION(OC1, RPB14R, 12),
+			PIC32_PINCTRL_FUNCTION(OC9, RPB14R, 13),
+			PIC32_PINCTRL_FUNCTION(C2TX, RPB14R, 15)),
+	PIC32_PINCTRL_GROUP(31, B15,
+			PIC32_PINCTRL_FUNCTION(INT2, INT2R, 3),
+			PIC32_PINCTRL_FUNCTION(T3CK, T3CKR, 3),
+			PIC32_PINCTRL_FUNCTION(T8CK, T8CKR, 3),
+			PIC32_PINCTRL_FUNCTION(IC2, IC2R, 3),
+			PIC32_PINCTRL_FUNCTION(IC5, IC5R, 3),
+			PIC32_PINCTRL_FUNCTION(IC9, IC9R, 3),
+			PIC32_PINCTRL_FUNCTION(U1CTS, U1CTSR, 3),
+			PIC32_PINCTRL_FUNCTION(U2RX, U2RXR, 3),
+			PIC32_PINCTRL_FUNCTION(U5CTS, U5CTSR, 3),
+			PIC32_PINCTRL_FUNCTION(SS1IN, SS1INR, 3),
+			PIC32_PINCTRL_FUNCTION(SS3IN, SS3INR, 3),
+			PIC32_PINCTRL_FUNCTION(SS4IN, SS4INR, 3),
+			PIC32_PINCTRL_FUNCTION(SS5IN, SS5INR, 3),
+			PIC32_PINCTRL_FUNCTION(C2RX, C2RXR, 3),
+			PIC32_PINCTRL_FUNCTION(U3RTS, RPB15R, 1),
+			PIC32_PINCTRL_FUNCTION(U4TX, RPB15R, 2),
+			PIC32_PINCTRL_FUNCTION(U6TX, RPB15R, 4),
+			PIC32_PINCTRL_FUNCTION(SS1OUT, RPB15R, 5),
+			PIC32_PINCTRL_FUNCTION(SS3OUT, RPB15R, 7),
+			PIC32_PINCTRL_FUNCTION(SS4OUT, RPB15R, 8),
+			PIC32_PINCTRL_FUNCTION(SS5OUT, RPB15R, 9),
+			PIC32_PINCTRL_FUNCTION(SDO6, RPB15R, 10),
+			PIC32_PINCTRL_FUNCTION(OC5, RPB15R, 11),
+			PIC32_PINCTRL_FUNCTION(OC8, RPB15R, 12),
+			PIC32_PINCTRL_FUNCTION(C1OUT, RPB15R, 14),
+			PIC32_PINCTRL_FUNCTION(REFCLKO3, RPB15R, 15)),
+	PIC32_PINCTRL_GROUP(33, C1,
+			PIC32_PINCTRL_FUNCTION(INT3, INT3R, 10),
+			PIC32_PINCTRL_FUNCTION(T2CK, T2CKR, 10),
+			PIC32_PINCTRL_FUNCTION(T6CK, T6CKR, 10),
+			PIC32_PINCTRL_FUNCTION(IC3, IC3R, 10),
+			PIC32_PINCTRL_FUNCTION(IC7, IC7R, 10),
+			PIC32_PINCTRL_FUNCTION(U1RX, U1RXR, 10),
+			PIC32_PINCTRL_FUNCTION(U2CTS, U2CTSR, 10),
+			PIC32_PINCTRL_FUNCTION(U5RX, U5RXR, 10),
+			PIC32_PINCTRL_FUNCTION(U6CTS, U6CTSR, 10),
+			PIC32_PINCTRL_FUNCTION(SDI1, SDI1R, 10),
+			PIC32_PINCTRL_FUNCTION(SDI3, SDI3R, 10),
+			PIC32_PINCTRL_FUNCTION(SDI5, SDI5R, 10),
+			PIC32_PINCTRL_FUNCTION(SS6IN, SS6INR, 10),
+			PIC32_PINCTRL_FUNCTION(REFCLKI1, REFCLKI1R, 10),
+			PIC32_PINCTRL_FUNCTION(U3TX, RPC1R, 1),
+			PIC32_PINCTRL_FUNCTION(U4RTS, RPC1R, 2),
+			PIC32_PINCTRL_FUNCTION(SDO1, RPC1R, 5),
+			PIC32_PINCTRL_FUNCTION(SDO2, RPC1R, 6),
+			PIC32_PINCTRL_FUNCTION(SDO3, RPC1R, 7),
+			PIC32_PINCTRL_FUNCTION(SDO5, RPC1R, 9),
+			PIC32_PINCTRL_FUNCTION(SS6OUT, RPC1R, 10),
+			PIC32_PINCTRL_FUNCTION(OC3, RPC1R, 11),
+			PIC32_PINCTRL_FUNCTION(OC6, RPC1R, 12),
+			PIC32_PINCTRL_FUNCTION(REFCLKO4, RPC1R, 13),
+			PIC32_PINCTRL_FUNCTION(C2OUT, RPC1R, 14),
+			PIC32_PINCTRL_FUNCTION(C1TX, RPC1R, 15)),
+	PIC32_PINCTRL_GROUP(34, C2,
+			PIC32_PINCTRL_FUNCTION(INT1, INT1R, 12),
+			PIC32_PINCTRL_FUNCTION(T4CK, T4CKR, 12),
+			PIC32_PINCTRL_FUNCTION(T9CK, T9CKR, 12),
+			PIC32_PINCTRL_FUNCTION(IC1, IC1R, 12),
+			PIC32_PINCTRL_FUNCTION(IC6, IC6R, 12),
+			PIC32_PINCTRL_FUNCTION(U3CTS, U3CTSR, 12),
+			PIC32_PINCTRL_FUNCTION(U4RX, U4RXR, 12),
+			PIC32_PINCTRL_FUNCTION(U6RX, U6RXR, 12),
+			PIC32_PINCTRL_FUNCTION(SS2IN, SS2INR, 12),
+			PIC32_PINCTRL_FUNCTION(SDI6, SDI6R, 12),
+			PIC32_PINCTRL_FUNCTION(OCFA, OCFAR, 12),
+			PIC32_PINCTRL_FUNCTION(REFCLKI3, REFCLKI3R, 12),
+			PIC32_PINCTRL_FUNCTION(U1RTS, RPC2R, 1),
+			PIC32_PINCTRL_FUNCTION(U2TX, RPC2R, 2),
+			PIC32_PINCTRL_FUNCTION(U5RTS, RPC2R, 3),
+			PIC32_PINCTRL_FUNCTION(U6TX, RPC2R, 4),
+			PIC32_PINCTRL_FUNCTION(SS2OUT, RPC2R, 6),
+			PIC32_PINCTRL_FUNCTION(SDO4, RPC2R, 8),
+			PIC32_PINCTRL_FUNCTION(SDO6, RPC2R, 10),
+			PIC32_PINCTRL_FUNCTION(OC2, RPC2R, 11),
+			PIC32_PINCTRL_FUNCTION(OC1, RPC2R, 12),
+			PIC32_PINCTRL_FUNCTION(OC9, RPC2R, 13),
+			PIC32_PINCTRL_FUNCTION(C2TX, RPC2R, 15)),
+	PIC32_PINCTRL_GROUP(35, C3,
+			PIC32_PINCTRL_FUNCTION(INT2, INT2R, 12),
+			PIC32_PINCTRL_FUNCTION(T3CK, T3CKR, 12),
+			PIC32_PINCTRL_FUNCTION(T8CK, T8CKR, 12),
+			PIC32_PINCTRL_FUNCTION(IC2, IC2R, 12),
+			PIC32_PINCTRL_FUNCTION(IC5, IC5R, 12),
+			PIC32_PINCTRL_FUNCTION(IC9, IC9R, 12),
+			PIC32_PINCTRL_FUNCTION(U1CTS, U1CTSR, 12),
+			PIC32_PINCTRL_FUNCTION(U2RX, U2RXR, 12),
+			PIC32_PINCTRL_FUNCTION(U5CTS, U5CTSR, 12),
+			PIC32_PINCTRL_FUNCTION(SS1IN, SS1INR, 12),
+			PIC32_PINCTRL_FUNCTION(SS3IN, SS3INR, 12),
+			PIC32_PINCTRL_FUNCTION(SS4IN, SS4INR, 12),
+			PIC32_PINCTRL_FUNCTION(SS5IN, SS5INR, 12),
+			PIC32_PINCTRL_FUNCTION(C2RX, C2RXR, 12),
+			PIC32_PINCTRL_FUNCTION(U3RTS, RPC3R, 1),
+			PIC32_PINCTRL_FUNCTION(U4TX, RPC3R, 2),
+			PIC32_PINCTRL_FUNCTION(U6TX, RPC3R, 4),
+			PIC32_PINCTRL_FUNCTION(SS1OUT, RPC3R, 5),
+			PIC32_PINCTRL_FUNCTION(SS3OUT, RPC3R, 7),
+			PIC32_PINCTRL_FUNCTION(SS4OUT, RPC3R, 8),
+			PIC32_PINCTRL_FUNCTION(SS5OUT, RPC3R, 9),
+			PIC32_PINCTRL_FUNCTION(SDO6, RPC3R, 10),
+			PIC32_PINCTRL_FUNCTION(OC5, RPC3R, 11),
+			PIC32_PINCTRL_FUNCTION(OC8, RPC3R, 12),
+			PIC32_PINCTRL_FUNCTION(C1OUT, RPC3R, 14),
+			PIC32_PINCTRL_FUNCTION(REFCLKO3, RPC3R, 15)),
+	PIC32_PINCTRL_GROUP(36, C4,
+			PIC32_PINCTRL_FUNCTION(INT4, INT4R, 10),
+			PIC32_PINCTRL_FUNCTION(T5CK, T5CKR, 10),
+			PIC32_PINCTRL_FUNCTION(T7CK, T7CKR, 10),
+			PIC32_PINCTRL_FUNCTION(IC4, IC4R, 10),
+			PIC32_PINCTRL_FUNCTION(IC8, IC8R, 10),
+			PIC32_PINCTRL_FUNCTION(U3RX, U3RXR, 10),
+			PIC32_PINCTRL_FUNCTION(U4CTS, U4CTSR, 10),
+			PIC32_PINCTRL_FUNCTION(SDI2, SDI2R, 10),
+			PIC32_PINCTRL_FUNCTION(SDI4, SDI4R, 10),
+			PIC32_PINCTRL_FUNCTION(C1RX, C1RXR, 10),
+			PIC32_PINCTRL_FUNCTION(REFCLKI4, REFCLKI4R, 10),
+			PIC32_PINCTRL_FUNCTION(U1TX, RPC4R, 1),
+			PIC32_PINCTRL_FUNCTION(U2RTS, RPC4R, 2),
+			PIC32_PINCTRL_FUNCTION(U5TX, RPC4R, 3),
+			PIC32_PINCTRL_FUNCTION(U6RTS, RPC4R, 4),
+			PIC32_PINCTRL_FUNCTION(SDO1, RPC4R, 5),
+			PIC32_PINCTRL_FUNCTION(SDO2, RPC4R, 6),
+			PIC32_PINCTRL_FUNCTION(SDO3, RPC4R, 7),
+			PIC32_PINCTRL_FUNCTION(SDO4, RPC4R, 8),
+			PIC32_PINCTRL_FUNCTION(SDO5, RPC4R, 9),
+			PIC32_PINCTRL_FUNCTION(OC4, RPC4R, 11),
+			PIC32_PINCTRL_FUNCTION(OC7, RPC4R, 12),
+			PIC32_PINCTRL_FUNCTION(REFCLKO1, RPC4R, 15)),
+	PIC32_PINCTRL_GROUP(45, C13,
+			PIC32_PINCTRL_FUNCTION(INT4, INT4R, 7),
+			PIC32_PINCTRL_FUNCTION(T5CK, T5CKR, 7),
+			PIC32_PINCTRL_FUNCTION(T7CK, T7CKR, 7),
+			PIC32_PINCTRL_FUNCTION(IC4, IC4R, 7),
+			PIC32_PINCTRL_FUNCTION(IC8, IC8R, 7),
+			PIC32_PINCTRL_FUNCTION(U3RX, U3RXR, 7),
+			PIC32_PINCTRL_FUNCTION(U4CTS, U4CTSR, 7),
+			PIC32_PINCTRL_FUNCTION(SDI2, SDI2R, 7),
+			PIC32_PINCTRL_FUNCTION(SDI4, SDI4R, 7),
+			PIC32_PINCTRL_FUNCTION(C1RX, C1RXR, 7),
+			PIC32_PINCTRL_FUNCTION(REFCLKI4, REFCLKI4R, 7),
+			PIC32_PINCTRL_FUNCTION(U1TX, RPC13R, 1),
+			PIC32_PINCTRL_FUNCTION(U2RTS, RPC13R, 2),
+			PIC32_PINCTRL_FUNCTION(U5TX, RPC13R, 3),
+			PIC32_PINCTRL_FUNCTION(U6RTS, RPC13R, 4),
+			PIC32_PINCTRL_FUNCTION(SDO1, RPC13R, 5),
+			PIC32_PINCTRL_FUNCTION(SDO2, RPC13R, 6),
+			PIC32_PINCTRL_FUNCTION(SDO3, RPC13R, 7),
+			PIC32_PINCTRL_FUNCTION(SDO4, RPC13R, 8),
+			PIC32_PINCTRL_FUNCTION(SDO5, RPC13R, 9),
+			PIC32_PINCTRL_FUNCTION(OC4, RPC13R, 11),
+			PIC32_PINCTRL_FUNCTION(OC7, RPC13R, 12),
+			PIC32_PINCTRL_FUNCTION(REFCLKO1, RPC13R, 15)),
+	PIC32_PINCTRL_GROUP(46, C14,
+			PIC32_PINCTRL_FUNCTION(INT3, INT3R, 7),
+			PIC32_PINCTRL_FUNCTION(T2CK, T2CKR, 7),
+			PIC32_PINCTRL_FUNCTION(T6CK, T6CKR, 7),
+			PIC32_PINCTRL_FUNCTION(IC3, IC3R, 7),
+			PIC32_PINCTRL_FUNCTION(IC7, IC7R, 7),
+			PIC32_PINCTRL_FUNCTION(U1RX, U1RXR, 7),
+			PIC32_PINCTRL_FUNCTION(U2CTS, U2CTSR, 7),
+			PIC32_PINCTRL_FUNCTION(U5RX, U5RXR, 7),
+			PIC32_PINCTRL_FUNCTION(U6CTS, U6CTSR, 7),
+			PIC32_PINCTRL_FUNCTION(SDI1, SDI1R, 7),
+			PIC32_PINCTRL_FUNCTION(SDI3, SDI3R, 7),
+			PIC32_PINCTRL_FUNCTION(SDI5, SDI5R, 7),
+			PIC32_PINCTRL_FUNCTION(SS6IN, SS6INR, 7),
+			PIC32_PINCTRL_FUNCTION(REFCLKI1, REFCLKI1R, 7),
+			PIC32_PINCTRL_FUNCTION(U3TX, RPC14R, 1),
+			PIC32_PINCTRL_FUNCTION(U4RTS, RPC14R, 2),
+			PIC32_PINCTRL_FUNCTION(SDO1, RPC14R, 5),
+			PIC32_PINCTRL_FUNCTION(SDO2, RPC14R, 6),
+			PIC32_PINCTRL_FUNCTION(SDO3, RPC14R, 7),
+			PIC32_PINCTRL_FUNCTION(SDO5, RPC14R, 9),
+			PIC32_PINCTRL_FUNCTION(SS6OUT, RPC14R, 10),
+			PIC32_PINCTRL_FUNCTION(OC3, RPC14R, 11),
+			PIC32_PINCTRL_FUNCTION(OC6, RPC14R, 12),
+			PIC32_PINCTRL_FUNCTION(REFCLKO4, RPC14R, 13),
+			PIC32_PINCTRL_FUNCTION(C2OUT, RPC14R, 14),
+			PIC32_PINCTRL_FUNCTION(C1TX, RPC14R, 15)),
+	PIC32_PINCTRL_GROUP(48, D0,
+			PIC32_PINCTRL_FUNCTION(INT1, INT1R, 3),
+			PIC32_PINCTRL_FUNCTION(T4CK, T4CKR, 3),
+			PIC32_PINCTRL_FUNCTION(T9CK, T9CKR, 3),
+			PIC32_PINCTRL_FUNCTION(IC1, IC1R, 3),
+			PIC32_PINCTRL_FUNCTION(IC6, IC6R, 3),
+			PIC32_PINCTRL_FUNCTION(U3CTS, U3CTSR, 3),
+			PIC32_PINCTRL_FUNCTION(U4RX, U4RXR, 3),
+			PIC32_PINCTRL_FUNCTION(U6RX, U6RXR, 3),
+			PIC32_PINCTRL_FUNCTION(SS2IN, SS2INR, 3),
+			PIC32_PINCTRL_FUNCTION(SDI6, SDI6R, 3),
+			PIC32_PINCTRL_FUNCTION(OCFA, OCFAR, 3),
+			PIC32_PINCTRL_FUNCTION(REFCLKI3, REFCLKI3R, 3),
+			PIC32_PINCTRL_FUNCTION(U1RTS, RPD0R, 1),
+			PIC32_PINCTRL_FUNCTION(U2TX, RPD0R, 2),
+			PIC32_PINCTRL_FUNCTION(U5RTS, RPD0R, 3),
+			PIC32_PINCTRL_FUNCTION(U6TX, RPD0R, 4),
+			PIC32_PINCTRL_FUNCTION(SS2OUT, RPD0R, 6),
+			PIC32_PINCTRL_FUNCTION(SDO4, RPD0R, 8),
+			PIC32_PINCTRL_FUNCTION(SDO6, RPD0R, 10),
+			PIC32_PINCTRL_FUNCTION(OC2, RPD0R, 11),
+			PIC32_PINCTRL_FUNCTION(OC1, RPD0R, 12),
+			PIC32_PINCTRL_FUNCTION(OC9, RPD0R, 13),
+			PIC32_PINCTRL_FUNCTION(C2TX, RPD0R, 15)),
+	PIC32_PINCTRL_GROUP(50, D2,
+			PIC32_PINCTRL_FUNCTION(INT3, INT3R, 0),
+			PIC32_PINCTRL_FUNCTION(T2CK, T2CKR, 0),
+			PIC32_PINCTRL_FUNCTION(T6CK, T6CKR, 0),
+			PIC32_PINCTRL_FUNCTION(IC3, IC3R, 0),
+			PIC32_PINCTRL_FUNCTION(IC7, IC7R, 0),
+			PIC32_PINCTRL_FUNCTION(U1RX, U1RXR, 0),
+			PIC32_PINCTRL_FUNCTION(U2CTS, U2CTSR, 0),
+			PIC32_PINCTRL_FUNCTION(U5RX, U5RXR, 0),
+			PIC32_PINCTRL_FUNCTION(U6CTS, U6CTSR, 0),
+			PIC32_PINCTRL_FUNCTION(SDI1, SDI1R, 0),
+			PIC32_PINCTRL_FUNCTION(SDI3, SDI3R, 0),
+			PIC32_PINCTRL_FUNCTION(SDI5, SDI5R, 0),
+			PIC32_PINCTRL_FUNCTION(SS6IN, SS6INR, 0),
+			PIC32_PINCTRL_FUNCTION(REFCLKI1, REFCLKI1R, 0),
+			PIC32_PINCTRL_FUNCTION(U3TX, RPD2R, 1),
+			PIC32_PINCTRL_FUNCTION(U4RTS, RPD2R, 2),
+			PIC32_PINCTRL_FUNCTION(SDO1, RPD2R, 5),
+			PIC32_PINCTRL_FUNCTION(SDO2, RPD2R, 6),
+			PIC32_PINCTRL_FUNCTION(SDO3, RPD2R, 7),
+			PIC32_PINCTRL_FUNCTION(SDO5, RPD2R, 9),
+			PIC32_PINCTRL_FUNCTION(SS6OUT, RPD2R, 10),
+			PIC32_PINCTRL_FUNCTION(OC3, RPD2R, 11),
+			PIC32_PINCTRL_FUNCTION(OC6, RPD2R, 12),
+			PIC32_PINCTRL_FUNCTION(REFCLKO4, RPD2R, 13),
+			PIC32_PINCTRL_FUNCTION(C2OUT, RPD2R, 14),
+			PIC32_PINCTRL_FUNCTION(C1TX, RPD2R, 15)),
+	PIC32_PINCTRL_GROUP(51, D3,
+			PIC32_PINCTRL_FUNCTION(INT4, INT4R, 0),
+			PIC32_PINCTRL_FUNCTION(T5CK, T5CKR, 0),
+			PIC32_PINCTRL_FUNCTION(T7CK, T7CKR, 0),
+			PIC32_PINCTRL_FUNCTION(IC4, IC4R, 0),
+			PIC32_PINCTRL_FUNCTION(IC8, IC8R, 0),
+			PIC32_PINCTRL_FUNCTION(U3RX, U3RXR, 0),
+			PIC32_PINCTRL_FUNCTION(U4CTS, U4CTSR, 0),
+			PIC32_PINCTRL_FUNCTION(SDI2, SDI2R, 0),
+			PIC32_PINCTRL_FUNCTION(SDI4, SDI4R, 0),
+			PIC32_PINCTRL_FUNCTION(C1RX, C1RXR, 0),
+			PIC32_PINCTRL_FUNCTION(REFCLKI4, REFCLKI4R, 0),
+			PIC32_PINCTRL_FUNCTION(U1TX, RPD3R, 1),
+			PIC32_PINCTRL_FUNCTION(U2RTS, RPD3R, 2),
+			PIC32_PINCTRL_FUNCTION(U5TX, RPD3R, 3),
+			PIC32_PINCTRL_FUNCTION(U6RTS, RPD3R, 4),
+			PIC32_PINCTRL_FUNCTION(SDO1, RPD3R, 5),
+			PIC32_PINCTRL_FUNCTION(SDO2, RPD3R, 6),
+			PIC32_PINCTRL_FUNCTION(SDO3, RPD3R, 7),
+			PIC32_PINCTRL_FUNCTION(SDO4, RPD3R, 8),
+			PIC32_PINCTRL_FUNCTION(SDO5, RPD3R, 9),
+			PIC32_PINCTRL_FUNCTION(OC4, RPD3R, 11),
+			PIC32_PINCTRL_FUNCTION(OC7, RPD3R, 12),
+			PIC32_PINCTRL_FUNCTION(REFCLKO1, RPD3R, 15)),
+	PIC32_PINCTRL_GROUP(52, D4,
+			PIC32_PINCTRL_FUNCTION(INT2, INT2R, 4),
+			PIC32_PINCTRL_FUNCTION(T3CK, T3CKR, 4),
+			PIC32_PINCTRL_FUNCTION(T8CK, T8CKR, 4),
+			PIC32_PINCTRL_FUNCTION(IC2, IC2R, 4),
+			PIC32_PINCTRL_FUNCTION(IC5, IC5R, 4),
+			PIC32_PINCTRL_FUNCTION(IC9, IC9R, 4),
+			PIC32_PINCTRL_FUNCTION(U1CTS, U1CTSR, 4),
+			PIC32_PINCTRL_FUNCTION(U2RX, U2RXR, 4),
+			PIC32_PINCTRL_FUNCTION(U5CTS, U5CTSR, 4),
+			PIC32_PINCTRL_FUNCTION(SS1IN, SS1INR, 4),
+			PIC32_PINCTRL_FUNCTION(SS3IN, SS3INR, 4),
+			PIC32_PINCTRL_FUNCTION(SS4IN, SS4INR, 4),
+			PIC32_PINCTRL_FUNCTION(SS5IN, SS5INR, 4),
+			PIC32_PINCTRL_FUNCTION(C2RX, C2RXR, 4),
+			PIC32_PINCTRL_FUNCTION(U3RTS, RPD4R, 1),
+			PIC32_PINCTRL_FUNCTION(U4TX, RPD4R, 2),
+			PIC32_PINCTRL_FUNCTION(U6TX, RPD4R, 4),
+			PIC32_PINCTRL_FUNCTION(SS1OUT, RPD4R, 5),
+			PIC32_PINCTRL_FUNCTION(SS3OUT, RPD4R, 7),
+			PIC32_PINCTRL_FUNCTION(SS4OUT, RPD4R, 8),
+			PIC32_PINCTRL_FUNCTION(SS5OUT, RPD4R, 9),
+			PIC32_PINCTRL_FUNCTION(SDO6, RPD4R, 10),
+			PIC32_PINCTRL_FUNCTION(OC5, RPD4R, 11),
+			PIC32_PINCTRL_FUNCTION(OC8, RPD4R, 12),
+			PIC32_PINCTRL_FUNCTION(C1OUT, RPD4R, 14),
+			PIC32_PINCTRL_FUNCTION(REFCLKO3, RPD4R, 15)),
+	PIC32_PINCTRL_GROUP(53, D5,
+			PIC32_PINCTRL_FUNCTION(INT1, INT1R, 6),
+			PIC32_PINCTRL_FUNCTION(T4CK, T4CKR, 6),
+			PIC32_PINCTRL_FUNCTION(T9CK, T9CKR, 6),
+			PIC32_PINCTRL_FUNCTION(IC1, IC1R, 6),
+			PIC32_PINCTRL_FUNCTION(IC6, IC6R, 6),
+			PIC32_PINCTRL_FUNCTION(U3CTS, U3CTSR, 6),
+			PIC32_PINCTRL_FUNCTION(U4RX, U4RXR, 6),
+			PIC32_PINCTRL_FUNCTION(U6RX, U6RXR, 6),
+			PIC32_PINCTRL_FUNCTION(SS2IN, SS2INR, 6),
+			PIC32_PINCTRL_FUNCTION(SDI6, SDI6R, 6),
+			PIC32_PINCTRL_FUNCTION(OCFA, OCFAR, 6),
+			PIC32_PINCTRL_FUNCTION(REFCLKI3, REFCLKI3R, 6),
+			PIC32_PINCTRL_FUNCTION(U1RTS, RPD5R, 1),
+			PIC32_PINCTRL_FUNCTION(U2TX, RPD5R, 2),
+			PIC32_PINCTRL_FUNCTION(U5RTS, RPD5R, 3),
+			PIC32_PINCTRL_FUNCTION(U6TX, RPD5R, 4),
+			PIC32_PINCTRL_FUNCTION(SS2OUT, RPD5R, 6),
+			PIC32_PINCTRL_FUNCTION(SDO4, RPD5R, 8),
+			PIC32_PINCTRL_FUNCTION(SDO6, RPD5R, 10),
+			PIC32_PINCTRL_FUNCTION(OC2, RPD5R, 11),
+			PIC32_PINCTRL_FUNCTION(OC1, RPD5R, 12),
+			PIC32_PINCTRL_FUNCTION(OC9, RPD5R, 13),
+			PIC32_PINCTRL_FUNCTION(C2TX, RPD5R, 15)),
+	PIC32_PINCTRL_GROUP(54, D6,
+			PIC32_PINCTRL_FUNCTION(INT3, INT3R, 14),
+			PIC32_PINCTRL_FUNCTION(T2CK, T2CKR, 14),
+			PIC32_PINCTRL_FUNCTION(T6CK, T6CKR, 14),
+			PIC32_PINCTRL_FUNCTION(IC3, IC3R, 14),
+			PIC32_PINCTRL_FUNCTION(IC7, IC7R, 14),
+			PIC32_PINCTRL_FUNCTION(U1RX, U1RXR, 14),
+			PIC32_PINCTRL_FUNCTION(U2CTS, U2CTSR, 14),
+			PIC32_PINCTRL_FUNCTION(U5RX, U5RXR, 14),
+			PIC32_PINCTRL_FUNCTION(U6CTS, U6CTSR, 14),
+			PIC32_PINCTRL_FUNCTION(SDI1, SDI1R, 14),
+			PIC32_PINCTRL_FUNCTION(SDI3, SDI3R, 14),
+			PIC32_PINCTRL_FUNCTION(SDI5, SDI5R, 14),
+			PIC32_PINCTRL_FUNCTION(SS6IN, SS6INR, 14),
+			PIC32_PINCTRL_FUNCTION(REFCLKI1, REFCLKI1R, 14),
+			PIC32_PINCTRL_FUNCTION(U3TX, RPD6R, 1),
+			PIC32_PINCTRL_FUNCTION(U4RTS, RPD6R, 2),
+			PIC32_PINCTRL_FUNCTION(SDO1, RPD6R, 5),
+			PIC32_PINCTRL_FUNCTION(SDO2, RPD6R, 6),
+			PIC32_PINCTRL_FUNCTION(SDO3, RPD6R, 7),
+			PIC32_PINCTRL_FUNCTION(SDO5, RPD6R, 9),
+			PIC32_PINCTRL_FUNCTION(SS6OUT, RPD6R, 10),
+			PIC32_PINCTRL_FUNCTION(OC3, RPD6R, 11),
+			PIC32_PINCTRL_FUNCTION(OC6, RPD6R, 12),
+			PIC32_PINCTRL_FUNCTION(REFCLKO4, RPD6R, 13),
+			PIC32_PINCTRL_FUNCTION(C2OUT, RPD6R, 14),
+			PIC32_PINCTRL_FUNCTION(C1TX, RPD6R, 15)),
+	PIC32_PINCTRL_GROUP(55, D7,
+			PIC32_PINCTRL_FUNCTION(INT4, INT4R, 14),
+			PIC32_PINCTRL_FUNCTION(T5CK, T5CKR, 14),
+			PIC32_PINCTRL_FUNCTION(T7CK, T7CKR, 14),
+			PIC32_PINCTRL_FUNCTION(IC4, IC4R, 14),
+			PIC32_PINCTRL_FUNCTION(IC8, IC8R, 14),
+			PIC32_PINCTRL_FUNCTION(U3RX, U3RXR, 14),
+			PIC32_PINCTRL_FUNCTION(U4CTS, U4CTSR, 14),
+			PIC32_PINCTRL_FUNCTION(SDI2, SDI2R, 14),
+			PIC32_PINCTRL_FUNCTION(SDI4, SDI4R, 14),
+			PIC32_PINCTRL_FUNCTION(C1RX, C1RXR, 14),
+			PIC32_PINCTRL_FUNCTION(REFCLKI4, REFCLKI4R, 14),
+			PIC32_PINCTRL_FUNCTION(U1TX, RPD7R, 1),
+			PIC32_PINCTRL_FUNCTION(U2RTS, RPD7R, 2),
+			PIC32_PINCTRL_FUNCTION(U5TX, RPD7R, 3),
+			PIC32_PINCTRL_FUNCTION(U6RTS, RPD7R, 4),
+			PIC32_PINCTRL_FUNCTION(SDO1, RPD7R, 5),
+			PIC32_PINCTRL_FUNCTION(SDO2, RPD7R, 6),
+			PIC32_PINCTRL_FUNCTION(SDO3, RPD7R, 7),
+			PIC32_PINCTRL_FUNCTION(SDO4, RPD7R, 8),
+			PIC32_PINCTRL_FUNCTION(SDO5, RPD7R, 9),
+			PIC32_PINCTRL_FUNCTION(OC4, RPD7R, 11),
+			PIC32_PINCTRL_FUNCTION(OC7, RPD7R, 12),
+			PIC32_PINCTRL_FUNCTION(REFCLKO1, RPD7R, 15)),
+	PIC32_PINCTRL_GROUP(57, D9,
+			PIC32_PINCTRL_FUNCTION(INT2, INT2R, 0),
+			PIC32_PINCTRL_FUNCTION(T3CK, T3CKR, 0),
+			PIC32_PINCTRL_FUNCTION(T8CK, T8CKR, 0),
+			PIC32_PINCTRL_FUNCTION(IC2, IC2R, 0),
+			PIC32_PINCTRL_FUNCTION(IC5, IC5R, 0),
+			PIC32_PINCTRL_FUNCTION(IC9, IC9R, 0),
+			PIC32_PINCTRL_FUNCTION(U1CTS, U1CTSR, 0),
+			PIC32_PINCTRL_FUNCTION(U2RX, U2RXR, 0),
+			PIC32_PINCTRL_FUNCTION(U5CTS, U5CTSR, 0),
+			PIC32_PINCTRL_FUNCTION(SS1IN, SS1INR, 0),
+			PIC32_PINCTRL_FUNCTION(SS3IN, SS3INR, 0),
+			PIC32_PINCTRL_FUNCTION(SS4IN, SS4INR, 0),
+			PIC32_PINCTRL_FUNCTION(SS5IN, SS5INR, 0),
+			PIC32_PINCTRL_FUNCTION(C2RX, C2RXR, 0),
+			PIC32_PINCTRL_FUNCTION(U3RTS, RPD9R, 1),
+			PIC32_PINCTRL_FUNCTION(U4TX, RPD9R, 2),
+			PIC32_PINCTRL_FUNCTION(U6TX, RPD9R, 4),
+			PIC32_PINCTRL_FUNCTION(SS1OUT, RPD9R, 5),
+			PIC32_PINCTRL_FUNCTION(SS3OUT, RPD9R, 7),
+			PIC32_PINCTRL_FUNCTION(SS4OUT, RPD9R, 8),
+			PIC32_PINCTRL_FUNCTION(SS5OUT, RPD9R, 9),
+			PIC32_PINCTRL_FUNCTION(SDO6, RPD9R, 10),
+			PIC32_PINCTRL_FUNCTION(OC5, RPD9R, 11),
+			PIC32_PINCTRL_FUNCTION(OC8, RPD9R, 12),
+			PIC32_PINCTRL_FUNCTION(C1OUT, RPD9R, 14),
+			PIC32_PINCTRL_FUNCTION(REFCLKO3, RPD9R, 15)),
+	PIC32_PINCTRL_GROUP(58, D10,
+			PIC32_PINCTRL_FUNCTION(U3TX, RPD10R, 1),
+			PIC32_PINCTRL_FUNCTION(U4RTS, RPD10R, 2),
+			PIC32_PINCTRL_FUNCTION(SDO1, RPD10R, 5),
+			PIC32_PINCTRL_FUNCTION(SDO2, RPD10R, 6),
+			PIC32_PINCTRL_FUNCTION(SDO3, RPD10R, 7),
+			PIC32_PINCTRL_FUNCTION(SDO5, RPD10R, 9),
+			PIC32_PINCTRL_FUNCTION(SS6OUT, RPD10R, 10),
+			PIC32_PINCTRL_FUNCTION(OC3, RPD10R, 11),
+			PIC32_PINCTRL_FUNCTION(OC6, RPD10R, 12),
+			PIC32_PINCTRL_FUNCTION(REFCLKO4, RPD10R, 13),
+			PIC32_PINCTRL_FUNCTION(C2OUT, RPD10R, 14),
+			PIC32_PINCTRL_FUNCTION(C1TX, RPD10R, 15)),
+	PIC32_PINCTRL_GROUP(59, D11,
+			PIC32_PINCTRL_FUNCTION(INT4, INT4R, 3),
+			PIC32_PINCTRL_FUNCTION(T5CK, T5CKR, 3),
+			PIC32_PINCTRL_FUNCTION(T7CK, T7CKR, 3),
+			PIC32_PINCTRL_FUNCTION(IC4, IC4R, 3),
+			PIC32_PINCTRL_FUNCTION(IC8, IC8R, 3),
+			PIC32_PINCTRL_FUNCTION(U3RX, U3RXR, 3),
+			PIC32_PINCTRL_FUNCTION(U4CTS, U4CTSR, 3),
+			PIC32_PINCTRL_FUNCTION(SDI2, SDI2R, 3),
+			PIC32_PINCTRL_FUNCTION(SDI4, SDI4R, 3),
+			PIC32_PINCTRL_FUNCTION(C1RX, C1RXR, 3),
+			PIC32_PINCTRL_FUNCTION(REFCLKI4, REFCLKI4R, 3),
+			PIC32_PINCTRL_FUNCTION(U1TX, RPD11R, 1),
+			PIC32_PINCTRL_FUNCTION(U2RTS, RPD11R, 2),
+			PIC32_PINCTRL_FUNCTION(U5TX, RPD11R, 3),
+			PIC32_PINCTRL_FUNCTION(U6RTS, RPD11R, 4),
+			PIC32_PINCTRL_FUNCTION(SDO1, RPD11R, 5),
+			PIC32_PINCTRL_FUNCTION(SDO2, RPD11R, 6),
+			PIC32_PINCTRL_FUNCTION(SDO3, RPD11R, 7),
+			PIC32_PINCTRL_FUNCTION(SDO4, RPD11R, 8),
+			PIC32_PINCTRL_FUNCTION(SDO5, RPD11R, 9),
+			PIC32_PINCTRL_FUNCTION(OC4, RPD11R, 11),
+			PIC32_PINCTRL_FUNCTION(OC7, RPD11R, 12),
+			PIC32_PINCTRL_FUNCTION(REFCLKO1, RPD11R, 15)),
+	PIC32_PINCTRL_GROUP(60, D12,
+			PIC32_PINCTRL_FUNCTION(INT2, INT2R, 10),
+			PIC32_PINCTRL_FUNCTION(T3CK, T3CKR, 10),
+			PIC32_PINCTRL_FUNCTION(T8CK, T8CKR, 10),
+			PIC32_PINCTRL_FUNCTION(IC2, IC2R, 10),
+			PIC32_PINCTRL_FUNCTION(IC5, IC5R, 10),
+			PIC32_PINCTRL_FUNCTION(IC9, IC9R, 10),
+			PIC32_PINCTRL_FUNCTION(U1CTS, U1CTSR, 10),
+			PIC32_PINCTRL_FUNCTION(U2RX, U2RXR, 10),
+			PIC32_PINCTRL_FUNCTION(U5CTS, U5CTSR, 10),
+			PIC32_PINCTRL_FUNCTION(SS1IN, SS1INR, 10),
+			PIC32_PINCTRL_FUNCTION(SS3IN, SS3INR, 10),
+			PIC32_PINCTRL_FUNCTION(SS4IN, SS4INR, 10),
+			PIC32_PINCTRL_FUNCTION(SS5IN, SS5INR, 10),
+			PIC32_PINCTRL_FUNCTION(C2RX, C2RXR, 10),
+			PIC32_PINCTRL_FUNCTION(U3RTS, RPD12R, 1),
+			PIC32_PINCTRL_FUNCTION(U4TX, RPD12R, 2),
+			PIC32_PINCTRL_FUNCTION(U6TX, RPD12R, 4),
+			PIC32_PINCTRL_FUNCTION(SS1OUT, RPD12R, 5),
+			PIC32_PINCTRL_FUNCTION(SS3OUT, RPD12R, 7),
+			PIC32_PINCTRL_FUNCTION(SS4OUT, RPD12R, 8),
+			PIC32_PINCTRL_FUNCTION(SS5OUT, RPD12R, 9),
+			PIC32_PINCTRL_FUNCTION(SDO6, RPD12R, 10),
+			PIC32_PINCTRL_FUNCTION(OC5, RPD12R, 11),
+			PIC32_PINCTRL_FUNCTION(OC8, RPD12R, 12),
+			PIC32_PINCTRL_FUNCTION(C1OUT, RPD12R, 14),
+			PIC32_PINCTRL_FUNCTION(REFCLKO3, RPD12R, 15)),
+	PIC32_PINCTRL_GROUP(62, D14,
+			PIC32_PINCTRL_FUNCTION(INT3, INT3R, 11),
+			PIC32_PINCTRL_FUNCTION(T2CK, T2CKR, 11),
+			PIC32_PINCTRL_FUNCTION(T6CK, T6CKR, 11),
+			PIC32_PINCTRL_FUNCTION(IC3, IC3R, 11),
+			PIC32_PINCTRL_FUNCTION(IC7, IC7R, 11),
+			PIC32_PINCTRL_FUNCTION(U1RX, U1RXR, 11),
+			PIC32_PINCTRL_FUNCTION(U2CTS, U2CTSR, 11),
+			PIC32_PINCTRL_FUNCTION(U5RX, U5RXR, 11),
+			PIC32_PINCTRL_FUNCTION(U6CTS, U6CTSR, 11),
+			PIC32_PINCTRL_FUNCTION(SDI1, SDI1R, 11),
+			PIC32_PINCTRL_FUNCTION(SDI3, SDI3R, 11),
+			PIC32_PINCTRL_FUNCTION(SDI5, SDI5R, 11),
+			PIC32_PINCTRL_FUNCTION(SS6IN, SS6INR, 11),
+			PIC32_PINCTRL_FUNCTION(REFCLKI1, REFCLKI1R, 11),
+			PIC32_PINCTRL_FUNCTION(U3TX, RPD14R, 1),
+			PIC32_PINCTRL_FUNCTION(U4RTS, RPD14R, 2),
+			PIC32_PINCTRL_FUNCTION(SDO1, RPD14R, 5),
+			PIC32_PINCTRL_FUNCTION(SDO2, RPD14R, 6),
+			PIC32_PINCTRL_FUNCTION(SDO3, RPD14R, 7),
+			PIC32_PINCTRL_FUNCTION(SDO5, RPD14R, 9),
+			PIC32_PINCTRL_FUNCTION(SS6OUT, RPD14R, 10),
+			PIC32_PINCTRL_FUNCTION(OC3, RPD14R, 11),
+			PIC32_PINCTRL_FUNCTION(OC6, RPD14R, 12),
+			PIC32_PINCTRL_FUNCTION(REFCLKO4, RPD14R, 13),
+			PIC32_PINCTRL_FUNCTION(C2OUT, RPD14R, 14),
+			PIC32_PINCTRL_FUNCTION(C1TX, RPD14R, 15)),
+	PIC32_PINCTRL_GROUP(63, D15,
+			PIC32_PINCTRL_FUNCTION(U1TX, RPD15R, 1),
+			PIC32_PINCTRL_FUNCTION(U2RTS, RPD15R, 2),
+			PIC32_PINCTRL_FUNCTION(U5TX, RPD15R, 3),
+			PIC32_PINCTRL_FUNCTION(U6RTS, RPD15R, 4),
+			PIC32_PINCTRL_FUNCTION(SDO1, RPD15R, 5),
+			PIC32_PINCTRL_FUNCTION(SDO2, RPD15R, 6),
+			PIC32_PINCTRL_FUNCTION(SDO3, RPD15R, 7),
+			PIC32_PINCTRL_FUNCTION(SDO4, RPD15R, 8),
+			PIC32_PINCTRL_FUNCTION(SDO5, RPD15R, 9),
+			PIC32_PINCTRL_FUNCTION(OC4, RPD15R, 11),
+			PIC32_PINCTRL_FUNCTION(OC7, RPD15R, 12),
+			PIC32_PINCTRL_FUNCTION(REFCLKO1, RPD15R, 15)),
+	PIC32_PINCTRL_GROUP(67, E3,
+			PIC32_PINCTRL_FUNCTION(INT2, INT2R, 6),
+			PIC32_PINCTRL_FUNCTION(T3CK, T3CKR, 6),
+			PIC32_PINCTRL_FUNCTION(T8CK, T8CKR, 6),
+			PIC32_PINCTRL_FUNCTION(IC2, IC2R, 6),
+			PIC32_PINCTRL_FUNCTION(IC5, IC5R, 6),
+			PIC32_PINCTRL_FUNCTION(IC9, IC9R, 6),
+			PIC32_PINCTRL_FUNCTION(U1CTS, U1CTSR, 6),
+			PIC32_PINCTRL_FUNCTION(U2RX, U2RXR, 6),
+			PIC32_PINCTRL_FUNCTION(U5CTS, U5CTSR, 6),
+			PIC32_PINCTRL_FUNCTION(SS1IN, SS1INR, 6),
+			PIC32_PINCTRL_FUNCTION(SS3IN, SS3INR, 6),
+			PIC32_PINCTRL_FUNCTION(SS4IN, SS4INR, 6),
+			PIC32_PINCTRL_FUNCTION(SS5IN, SS5INR, 6),
+			PIC32_PINCTRL_FUNCTION(C2RX, C2RXR, 6),
+			PIC32_PINCTRL_FUNCTION(U3RTS, RPE3R, 1),
+			PIC32_PINCTRL_FUNCTION(U4TX, RPE3R, 2),
+			PIC32_PINCTRL_FUNCTION(U6TX, RPE3R, 4),
+			PIC32_PINCTRL_FUNCTION(SS1OUT, RPE3R, 5),
+			PIC32_PINCTRL_FUNCTION(SS3OUT, RPE3R, 7),
+			PIC32_PINCTRL_FUNCTION(SS4OUT, RPE3R, 8),
+			PIC32_PINCTRL_FUNCTION(SS5OUT, RPE3R, 9),
+			PIC32_PINCTRL_FUNCTION(SDO6, RPE3R, 10),
+			PIC32_PINCTRL_FUNCTION(OC5, RPE3R, 11),
+			PIC32_PINCTRL_FUNCTION(OC8, RPE3R, 12),
+			PIC32_PINCTRL_FUNCTION(C1OUT, RPE3R, 14),
+			PIC32_PINCTRL_FUNCTION(REFCLKO3, RPE3R, 15)),
+	PIC32_PINCTRL_GROUP(69, E5,
+			PIC32_PINCTRL_FUNCTION(INT4, INT4R, 6),
+			PIC32_PINCTRL_FUNCTION(T5CK, T5CKR, 6),
+			PIC32_PINCTRL_FUNCTION(T7CK, T7CKR, 6),
+			PIC32_PINCTRL_FUNCTION(IC4, IC4R, 6),
+			PIC32_PINCTRL_FUNCTION(IC8, IC8R, 6),
+			PIC32_PINCTRL_FUNCTION(U3RX, U3RXR, 6),
+			PIC32_PINCTRL_FUNCTION(U4CTS, U4CTSR, 6),
+			PIC32_PINCTRL_FUNCTION(SDI2, SDI2R, 6),
+			PIC32_PINCTRL_FUNCTION(SDI4, SDI4R, 6),
+			PIC32_PINCTRL_FUNCTION(C1RX, C1RXR, 6),
+			PIC32_PINCTRL_FUNCTION(REFCLKI4, REFCLKI4R, 6),
+			PIC32_PINCTRL_FUNCTION(U1TX, RPE5R, 1),
+			PIC32_PINCTRL_FUNCTION(U2RTS, RPE5R, 2),
+			PIC32_PINCTRL_FUNCTION(U5TX, RPE5R, 3),
+			PIC32_PINCTRL_FUNCTION(U6RTS, RPE5R, 4),
+			PIC32_PINCTRL_FUNCTION(SDO1, RPE5R, 5),
+			PIC32_PINCTRL_FUNCTION(SDO2, RPE5R, 6),
+			PIC32_PINCTRL_FUNCTION(SDO3, RPE5R, 7),
+			PIC32_PINCTRL_FUNCTION(SDO4, RPE5R, 8),
+			PIC32_PINCTRL_FUNCTION(SDO5, RPE5R, 9),
+			PIC32_PINCTRL_FUNCTION(OC4, RPE5R, 11),
+			PIC32_PINCTRL_FUNCTION(OC7, RPE5R, 12),
+			PIC32_PINCTRL_FUNCTION(REFCLKO1, RPE5R, 15)),
+	PIC32_PINCTRL_GROUP(72, E8,
+			PIC32_PINCTRL_FUNCTION(INT1, INT1R, 13),
+			PIC32_PINCTRL_FUNCTION(T4CK, T4CKR, 13),
+			PIC32_PINCTRL_FUNCTION(T9CK, T9CKR, 13),
+			PIC32_PINCTRL_FUNCTION(IC1, IC1R, 13),
+			PIC32_PINCTRL_FUNCTION(IC6, IC6R, 13),
+			PIC32_PINCTRL_FUNCTION(U3CTS, U3CTSR, 13),
+			PIC32_PINCTRL_FUNCTION(U4RX, U4RXR, 13),
+			PIC32_PINCTRL_FUNCTION(U6RX, U6RXR, 13),
+			PIC32_PINCTRL_FUNCTION(SS2IN, SS2INR, 13),
+			PIC32_PINCTRL_FUNCTION(SDI6, SDI6R, 13),
+			PIC32_PINCTRL_FUNCTION(OCFA, OCFAR, 13),
+			PIC32_PINCTRL_FUNCTION(REFCLKI3, REFCLKI3R, 13),
+			PIC32_PINCTRL_FUNCTION(U1RTS, RPE8R, 1),
+			PIC32_PINCTRL_FUNCTION(U2TX, RPE8R, 2),
+			PIC32_PINCTRL_FUNCTION(U5RTS, RPE8R, 3),
+			PIC32_PINCTRL_FUNCTION(U6TX, RPE8R, 4),
+			PIC32_PINCTRL_FUNCTION(SS2OUT, RPE8R, 6),
+			PIC32_PINCTRL_FUNCTION(SDO4, RPE8R, 8),
+			PIC32_PINCTRL_FUNCTION(SDO6, RPE8R, 10),
+			PIC32_PINCTRL_FUNCTION(OC2, RPE8R, 11),
+			PIC32_PINCTRL_FUNCTION(OC1, RPE8R, 12),
+			PIC32_PINCTRL_FUNCTION(OC9, RPE8R, 13),
+			PIC32_PINCTRL_FUNCTION(C2TX, RPE8R, 15)),
+	PIC32_PINCTRL_GROUP(73, E9,
+			PIC32_PINCTRL_FUNCTION(INT2, INT2R, 13),
+			PIC32_PINCTRL_FUNCTION(T3CK, T3CKR, 13),
+			PIC32_PINCTRL_FUNCTION(T8CK, T8CKR, 13),
+			PIC32_PINCTRL_FUNCTION(IC2, IC2R, 13),
+			PIC32_PINCTRL_FUNCTION(IC5, IC5R, 13),
+			PIC32_PINCTRL_FUNCTION(IC9, IC9R, 13),
+			PIC32_PINCTRL_FUNCTION(U1CTS, U1CTSR, 13),
+			PIC32_PINCTRL_FUNCTION(U2RX, U2RXR, 13),
+			PIC32_PINCTRL_FUNCTION(U5CTS, U5CTSR, 13),
+			PIC32_PINCTRL_FUNCTION(SS1IN, SS1INR, 13),
+			PIC32_PINCTRL_FUNCTION(SS3IN, SS3INR, 13),
+			PIC32_PINCTRL_FUNCTION(SS4IN, SS4INR, 13),
+			PIC32_PINCTRL_FUNCTION(SS5IN, SS5INR, 13),
+			PIC32_PINCTRL_FUNCTION(C2RX, C2RXR, 13),
+			PIC32_PINCTRL_FUNCTION(U3RTS, RPE9R, 1),
+			PIC32_PINCTRL_FUNCTION(U4TX, RPE9R, 2),
+			PIC32_PINCTRL_FUNCTION(U6TX, RPE9R, 4),
+			PIC32_PINCTRL_FUNCTION(SS1OUT, RPE9R, 5),
+			PIC32_PINCTRL_FUNCTION(SS3OUT, RPE9R, 7),
+			PIC32_PINCTRL_FUNCTION(SS4OUT, RPE9R, 8),
+			PIC32_PINCTRL_FUNCTION(SS5OUT, RPE9R, 9),
+			PIC32_PINCTRL_FUNCTION(SDO6, RPE9R, 10),
+			PIC32_PINCTRL_FUNCTION(OC5, RPE9R, 11),
+			PIC32_PINCTRL_FUNCTION(OC8, RPE9R, 12),
+			PIC32_PINCTRL_FUNCTION(C1OUT, RPE9R, 14),
+			PIC32_PINCTRL_FUNCTION(REFCLKO3, RPE9R, 15)),
+	PIC32_PINCTRL_GROUP(80, F0,
+			PIC32_PINCTRL_FUNCTION(INT4, INT4R, 4),
+			PIC32_PINCTRL_FUNCTION(T5CK, T5CKR, 4),
+			PIC32_PINCTRL_FUNCTION(T7CK, T7CKR, 4),
+			PIC32_PINCTRL_FUNCTION(IC4, IC4R, 4),
+			PIC32_PINCTRL_FUNCTION(IC8, IC8R, 4),
+			PIC32_PINCTRL_FUNCTION(U3RX, U3RXR, 4),
+			PIC32_PINCTRL_FUNCTION(U4CTS, U4CTSR, 4),
+			PIC32_PINCTRL_FUNCTION(SDI2, SDI2R, 4),
+			PIC32_PINCTRL_FUNCTION(SDI4, SDI4R, 4),
+			PIC32_PINCTRL_FUNCTION(C1RX, C1RXR, 4),
+			PIC32_PINCTRL_FUNCTION(REFCLKI4, REFCLKI4R, 4),
+			PIC32_PINCTRL_FUNCTION(U1TX, RPF0R, 1),
+			PIC32_PINCTRL_FUNCTION(U2RTS, RPF0R, 2),
+			PIC32_PINCTRL_FUNCTION(U5TX, RPF0R, 3),
+			PIC32_PINCTRL_FUNCTION(U6RTS, RPF0R, 4),
+			PIC32_PINCTRL_FUNCTION(SDO1, RPF0R, 5),
+			PIC32_PINCTRL_FUNCTION(SDO2, RPF0R, 6),
+			PIC32_PINCTRL_FUNCTION(SDO3, RPF0R, 7),
+			PIC32_PINCTRL_FUNCTION(SDO4, RPF0R, 8),
+			PIC32_PINCTRL_FUNCTION(SDO5, RPF0R, 9),
+			PIC32_PINCTRL_FUNCTION(OC4, RPF0R, 11),
+			PIC32_PINCTRL_FUNCTION(OC7, RPF0R, 12),
+			PIC32_PINCTRL_FUNCTION(REFCLKO1, RPF0R, 15)),
+	PIC32_PINCTRL_GROUP(81, F1,
+			PIC32_PINCTRL_FUNCTION(INT3, INT3R, 4),
+			PIC32_PINCTRL_FUNCTION(T2CK, T2CKR, 4),
+			PIC32_PINCTRL_FUNCTION(T6CK, T6CKR, 4),
+			PIC32_PINCTRL_FUNCTION(IC3, IC3R, 4),
+			PIC32_PINCTRL_FUNCTION(IC7, IC7R, 4),
+			PIC32_PINCTRL_FUNCTION(U1RX, U1RXR, 4),
+			PIC32_PINCTRL_FUNCTION(U2CTS, U2CTSR, 4),
+			PIC32_PINCTRL_FUNCTION(U5RX, U5RXR, 4),
+			PIC32_PINCTRL_FUNCTION(U6CTS, U6CTSR, 4),
+			PIC32_PINCTRL_FUNCTION(SDI1, SDI1R, 4),
+			PIC32_PINCTRL_FUNCTION(SDI3, SDI3R, 4),
+			PIC32_PINCTRL_FUNCTION(SDI5, SDI5R, 4),
+			PIC32_PINCTRL_FUNCTION(SS6IN, SS6INR, 4),
+			PIC32_PINCTRL_FUNCTION(REFCLKI1, REFCLKI1R, 4),
+			PIC32_PINCTRL_FUNCTION(U3TX, RPF1R, 1),
+			PIC32_PINCTRL_FUNCTION(U4RTS, RPF1R, 2),
+			PIC32_PINCTRL_FUNCTION(SDO1, RPF1R, 5),
+			PIC32_PINCTRL_FUNCTION(SDO2, RPF1R, 6),
+			PIC32_PINCTRL_FUNCTION(SDO3, RPF1R, 7),
+			PIC32_PINCTRL_FUNCTION(SDO5, RPF1R, 9),
+			PIC32_PINCTRL_FUNCTION(SS6OUT, RPF1R, 10),
+			PIC32_PINCTRL_FUNCTION(OC3, RPF1R, 11),
+			PIC32_PINCTRL_FUNCTION(OC6, RPF1R, 12),
+			PIC32_PINCTRL_FUNCTION(REFCLKO4, RPF1R, 13),
+			PIC32_PINCTRL_FUNCTION(C2OUT, RPF1R, 14),
+			PIC32_PINCTRL_FUNCTION(C1TX, RPF1R, 15)),
+	PIC32_PINCTRL_GROUP(82, F2,
+			PIC32_PINCTRL_FUNCTION(INT1, INT1R, 11),
+			PIC32_PINCTRL_FUNCTION(T4CK, T4CKR, 11),
+			PIC32_PINCTRL_FUNCTION(T9CK, T9CKR, 11),
+			PIC32_PINCTRL_FUNCTION(IC1, IC1R, 11),
+			PIC32_PINCTRL_FUNCTION(IC6, IC6R, 11),
+			PIC32_PINCTRL_FUNCTION(U3CTS, U3CTSR, 11),
+			PIC32_PINCTRL_FUNCTION(U4RX, U4RXR, 11),
+			PIC32_PINCTRL_FUNCTION(U6RX, U6RXR, 11),
+			PIC32_PINCTRL_FUNCTION(SS2IN, SS2INR, 11),
+			PIC32_PINCTRL_FUNCTION(SDI6, SDI6R, 11),
+			PIC32_PINCTRL_FUNCTION(OCFA, OCFAR, 11),
+			PIC32_PINCTRL_FUNCTION(REFCLKI3, REFCLKI3R, 11),
+			PIC32_PINCTRL_FUNCTION(U1RTS, RPF2R, 1),
+			PIC32_PINCTRL_FUNCTION(U2TX, RPF2R, 2),
+			PIC32_PINCTRL_FUNCTION(U5RTS, RPF2R, 3),
+			PIC32_PINCTRL_FUNCTION(U6TX, RPF2R, 4),
+			PIC32_PINCTRL_FUNCTION(SS2OUT, RPF2R, 6),
+			PIC32_PINCTRL_FUNCTION(SDO4, RPF2R, 8),
+			PIC32_PINCTRL_FUNCTION(SDO6, RPF2R, 10),
+			PIC32_PINCTRL_FUNCTION(OC2, RPF2R, 11),
+			PIC32_PINCTRL_FUNCTION(OC1, RPF2R, 12),
+			PIC32_PINCTRL_FUNCTION(OC9, RPF2R, 13),
+			PIC32_PINCTRL_FUNCTION(C2TX, RPF2R, 15)),
+	PIC32_PINCTRL_GROUP(83, F3,
+			PIC32_PINCTRL_FUNCTION(INT1, INT1R, 8),
+			PIC32_PINCTRL_FUNCTION(T4CK, T4CKR, 8),
+			PIC32_PINCTRL_FUNCTION(T9CK, T9CKR, 8),
+			PIC32_PINCTRL_FUNCTION(IC1, IC1R, 8),
+			PIC32_PINCTRL_FUNCTION(IC6, IC6R, 8),
+			PIC32_PINCTRL_FUNCTION(U3CTS, U3CTSR, 8),
+			PIC32_PINCTRL_FUNCTION(U4RX, U4RXR, 8),
+			PIC32_PINCTRL_FUNCTION(U6RX, U6RXR, 8),
+			PIC32_PINCTRL_FUNCTION(SS2IN, SS2INR, 8),
+			PIC32_PINCTRL_FUNCTION(SDI6, SDI6R, 8),
+			PIC32_PINCTRL_FUNCTION(OCFA, OCFAR, 8),
+			PIC32_PINCTRL_FUNCTION(REFCLKI3, REFCLKI3R, 8),
+			PIC32_PINCTRL_FUNCTION(U1RTS, RPF3R, 1),
+			PIC32_PINCTRL_FUNCTION(U2TX, RPF3R, 2),
+			PIC32_PINCTRL_FUNCTION(U5RTS, RPF3R, 3),
+			PIC32_PINCTRL_FUNCTION(U6TX, RPF3R, 4),
+			PIC32_PINCTRL_FUNCTION(SS2OUT, RPF3R, 6),
+			PIC32_PINCTRL_FUNCTION(SDO4, RPF3R, 8),
+			PIC32_PINCTRL_FUNCTION(SDO6, RPF3R, 10),
+			PIC32_PINCTRL_FUNCTION(OC2, RPF3R, 11),
+			PIC32_PINCTRL_FUNCTION(OC1, RPF3R, 12),
+			PIC32_PINCTRL_FUNCTION(OC9, RPF3R, 13),
+			PIC32_PINCTRL_FUNCTION(C2TX, RPF3R, 15)),
+	PIC32_PINCTRL_GROUP(84, F4,
+			PIC32_PINCTRL_FUNCTION(INT3, INT3R, 2),
+			PIC32_PINCTRL_FUNCTION(T2CK, T2CKR, 2),
+			PIC32_PINCTRL_FUNCTION(T6CK, T6CKR, 2),
+			PIC32_PINCTRL_FUNCTION(IC3, IC3R, 2),
+			PIC32_PINCTRL_FUNCTION(IC7, IC7R, 2),
+			PIC32_PINCTRL_FUNCTION(U1RX, U1RXR, 2),
+			PIC32_PINCTRL_FUNCTION(U2CTS, U2CTSR, 2),
+			PIC32_PINCTRL_FUNCTION(U5RX, U5RXR, 2),
+			PIC32_PINCTRL_FUNCTION(U6CTS, U6CTSR, 2),
+			PIC32_PINCTRL_FUNCTION(SDI1, SDI1R, 2),
+			PIC32_PINCTRL_FUNCTION(SDI3, SDI3R, 2),
+			PIC32_PINCTRL_FUNCTION(SDI5, SDI5R, 2),
+			PIC32_PINCTRL_FUNCTION(SS6IN, SS6INR, 2),
+			PIC32_PINCTRL_FUNCTION(REFCLKI1, REFCLKI1R, 2),
+			PIC32_PINCTRL_FUNCTION(U3TX, RPF4R, 1),
+			PIC32_PINCTRL_FUNCTION(U4RTS, RPF4R, 2),
+			PIC32_PINCTRL_FUNCTION(SDO1, RPF4R, 5),
+			PIC32_PINCTRL_FUNCTION(SDO2, RPF4R, 6),
+			PIC32_PINCTRL_FUNCTION(SDO3, RPF4R, 7),
+			PIC32_PINCTRL_FUNCTION(SDO5, RPF4R, 9),
+			PIC32_PINCTRL_FUNCTION(SS6OUT, RPF4R, 10),
+			PIC32_PINCTRL_FUNCTION(OC3, RPF4R, 11),
+			PIC32_PINCTRL_FUNCTION(OC6, RPF4R, 12),
+			PIC32_PINCTRL_FUNCTION(REFCLKO4, RPF4R, 13),
+			PIC32_PINCTRL_FUNCTION(C2OUT, RPF4R, 14),
+			PIC32_PINCTRL_FUNCTION(C1TX, RPF4R, 15)),
+	PIC32_PINCTRL_GROUP(85, F5,
+			PIC32_PINCTRL_FUNCTION(INT4, INT4R, 2),
+			PIC32_PINCTRL_FUNCTION(T5CK, T5CKR, 2),
+			PIC32_PINCTRL_FUNCTION(T7CK, T7CKR, 2),
+			PIC32_PINCTRL_FUNCTION(IC4, IC4R, 2),
+			PIC32_PINCTRL_FUNCTION(IC8, IC8R, 2),
+			PIC32_PINCTRL_FUNCTION(U3RX, U3RXR, 2),
+			PIC32_PINCTRL_FUNCTION(U4CTS, U4CTSR, 2),
+			PIC32_PINCTRL_FUNCTION(SDI2, SDI2R, 2),
+			PIC32_PINCTRL_FUNCTION(SDI4, SDI4R, 2),
+			PIC32_PINCTRL_FUNCTION(C1RX, C1RXR, 2),
+			PIC32_PINCTRL_FUNCTION(REFCLKI4, REFCLKI4R, 2),
+			PIC32_PINCTRL_FUNCTION(U1TX, RPF5R, 1),
+			PIC32_PINCTRL_FUNCTION(U2RTS, RPF5R, 2),
+			PIC32_PINCTRL_FUNCTION(U5TX, RPF5R, 3),
+			PIC32_PINCTRL_FUNCTION(U6RTS, RPF5R, 4),
+			PIC32_PINCTRL_FUNCTION(SDO1, RPF5R, 5),
+			PIC32_PINCTRL_FUNCTION(SDO2, RPF5R, 6),
+			PIC32_PINCTRL_FUNCTION(SDO3, RPF5R, 7),
+			PIC32_PINCTRL_FUNCTION(SDO4, RPF5R, 8),
+			PIC32_PINCTRL_FUNCTION(SDO5, RPF5R, 9),
+			PIC32_PINCTRL_FUNCTION(OC4, RPF5R, 11),
+			PIC32_PINCTRL_FUNCTION(OC7, RPF5R, 12),
+			PIC32_PINCTRL_FUNCTION(REFCLKO1, RPF5R, 15)),
+	PIC32_PINCTRL_GROUP(88, F8,
+			PIC32_PINCTRL_FUNCTION(INT2, INT2R, 11),
+			PIC32_PINCTRL_FUNCTION(T3CK, T3CKR, 11),
+			PIC32_PINCTRL_FUNCTION(T8CK, T8CKR, 11),
+			PIC32_PINCTRL_FUNCTION(IC2, IC2R, 11),
+			PIC32_PINCTRL_FUNCTION(IC5, IC5R, 11),
+			PIC32_PINCTRL_FUNCTION(IC9, IC9R, 11),
+			PIC32_PINCTRL_FUNCTION(U1CTS, U1CTSR, 11),
+			PIC32_PINCTRL_FUNCTION(U2RX, U2RXR, 11),
+			PIC32_PINCTRL_FUNCTION(U5CTS, U5CTSR, 11),
+			PIC32_PINCTRL_FUNCTION(SS1IN, SS1INR, 11),
+			PIC32_PINCTRL_FUNCTION(SS3IN, SS3INR, 11),
+			PIC32_PINCTRL_FUNCTION(SS4IN, SS4INR, 11),
+			PIC32_PINCTRL_FUNCTION(SS5IN, SS5INR, 11),
+			PIC32_PINCTRL_FUNCTION(C2RX, C2RXR, 11),
+			PIC32_PINCTRL_FUNCTION(U3RTS, RPF8R, 1),
+			PIC32_PINCTRL_FUNCTION(U4TX, RPF8R, 2),
+			PIC32_PINCTRL_FUNCTION(U6TX, RPF8R, 4),
+			PIC32_PINCTRL_FUNCTION(SS1OUT, RPF8R, 5),
+			PIC32_PINCTRL_FUNCTION(SS3OUT, RPF8R, 7),
+			PIC32_PINCTRL_FUNCTION(SS4OUT, RPF8R, 8),
+			PIC32_PINCTRL_FUNCTION(SS5OUT, RPF8R, 9),
+			PIC32_PINCTRL_FUNCTION(SDO6, RPF8R, 10),
+			PIC32_PINCTRL_FUNCTION(OC5, RPF8R, 11),
+			PIC32_PINCTRL_FUNCTION(OC8, RPF8R, 12),
+			PIC32_PINCTRL_FUNCTION(C1OUT, RPF8R, 14),
+			PIC32_PINCTRL_FUNCTION(REFCLKO3, RPF8R, 15)),
+	PIC32_PINCTRL_GROUP(92, F12,
+			PIC32_PINCTRL_FUNCTION(INT2, INT2R, 9),
+			PIC32_PINCTRL_FUNCTION(T3CK, T3CKR, 9),
+			PIC32_PINCTRL_FUNCTION(T8CK, T8CKR, 9),
+			PIC32_PINCTRL_FUNCTION(IC2, IC2R, 9),
+			PIC32_PINCTRL_FUNCTION(IC5, IC5R, 9),
+			PIC32_PINCTRL_FUNCTION(IC9, IC9R, 9),
+			PIC32_PINCTRL_FUNCTION(U1CTS, U1CTSR, 9),
+			PIC32_PINCTRL_FUNCTION(U2RX, U2RXR, 9),
+			PIC32_PINCTRL_FUNCTION(U5CTS, U5CTSR, 9),
+			PIC32_PINCTRL_FUNCTION(SS1IN, SS1INR, 9),
+			PIC32_PINCTRL_FUNCTION(SS3IN, SS3INR, 9),
+			PIC32_PINCTRL_FUNCTION(SS4IN, SS4INR, 9),
+			PIC32_PINCTRL_FUNCTION(SS5IN, SS5INR, 9),
+			PIC32_PINCTRL_FUNCTION(C2RX, C2RXR, 9),
+			PIC32_PINCTRL_FUNCTION(U3RTS, RPF12R, 1),
+			PIC32_PINCTRL_FUNCTION(U4TX, RPF12R, 2),
+			PIC32_PINCTRL_FUNCTION(U6TX, RPF12R, 4),
+			PIC32_PINCTRL_FUNCTION(SS1OUT, RPF12R, 5),
+			PIC32_PINCTRL_FUNCTION(SS3OUT, RPF12R, 7),
+			PIC32_PINCTRL_FUNCTION(SS4OUT, RPF12R, 8),
+			PIC32_PINCTRL_FUNCTION(SS5OUT, RPF12R, 9),
+			PIC32_PINCTRL_FUNCTION(SDO6, RPF12R, 10),
+			PIC32_PINCTRL_FUNCTION(OC5, RPF12R, 11),
+			PIC32_PINCTRL_FUNCTION(OC8, RPF12R, 12),
+			PIC32_PINCTRL_FUNCTION(C1OUT, RPF12R, 14),
+			PIC32_PINCTRL_FUNCTION(REFCLKO3, RPF12R, 15)),
+	PIC32_PINCTRL_GROUP(93, F13,
+			PIC32_PINCTRL_FUNCTION(INT1, INT1R, 9),
+			PIC32_PINCTRL_FUNCTION(T4CK, T4CKR, 9),
+			PIC32_PINCTRL_FUNCTION(T9CK, T9CKR, 9),
+			PIC32_PINCTRL_FUNCTION(IC1, IC1R, 9),
+			PIC32_PINCTRL_FUNCTION(IC6, IC6R, 9),
+			PIC32_PINCTRL_FUNCTION(U3CTS, U3CTSR, 9),
+			PIC32_PINCTRL_FUNCTION(U4RX, U4RXR, 9),
+			PIC32_PINCTRL_FUNCTION(U6RX, U6RXR, 9),
+			PIC32_PINCTRL_FUNCTION(SS2IN, SS2INR, 9),
+			PIC32_PINCTRL_FUNCTION(SDI6, SDI6R, 9),
+			PIC32_PINCTRL_FUNCTION(OCFA, OCFAR, 9),
+			PIC32_PINCTRL_FUNCTION(REFCLKI3, REFCLKI3R, 9),
+			PIC32_PINCTRL_FUNCTION(U1RTS, RPF13R, 1),
+			PIC32_PINCTRL_FUNCTION(U2TX, RPF13R, 2),
+			PIC32_PINCTRL_FUNCTION(U5RTS, RPF13R, 3),
+			PIC32_PINCTRL_FUNCTION(U6TX, RPF13R, 4),
+			PIC32_PINCTRL_FUNCTION(SS2OUT, RPF13R, 6),
+			PIC32_PINCTRL_FUNCTION(SDO4, RPF13R, 8),
+			PIC32_PINCTRL_FUNCTION(SDO6, RPF13R, 10),
+			PIC32_PINCTRL_FUNCTION(OC2, RPF13R, 11),
+			PIC32_PINCTRL_FUNCTION(OC1, RPF13R, 12),
+			PIC32_PINCTRL_FUNCTION(OC9, RPF13R, 13),
+			PIC32_PINCTRL_FUNCTION(C2TX, RPF13R, 15)),
+	PIC32_PINCTRL_GROUP(96, G0,
+			PIC32_PINCTRL_FUNCTION(INT4, INT4R, 12),
+			PIC32_PINCTRL_FUNCTION(T5CK, T5CKR, 12),
+			PIC32_PINCTRL_FUNCTION(T7CK, T7CKR, 12),
+			PIC32_PINCTRL_FUNCTION(IC4, IC4R, 12),
+			PIC32_PINCTRL_FUNCTION(IC8, IC8R, 12),
+			PIC32_PINCTRL_FUNCTION(U3RX, U3RXR, 12),
+			PIC32_PINCTRL_FUNCTION(U4CTS, U4CTSR, 12),
+			PIC32_PINCTRL_FUNCTION(SDI2, SDI2R, 12),
+			PIC32_PINCTRL_FUNCTION(SDI4, SDI4R, 12),
+			PIC32_PINCTRL_FUNCTION(C1RX, C1RXR, 12),
+			PIC32_PINCTRL_FUNCTION(REFCLKI4, REFCLKI4R, 12),
+			PIC32_PINCTRL_FUNCTION(U1TX, RPG0R, 1),
+			PIC32_PINCTRL_FUNCTION(U2RTS, RPG0R, 2),
+			PIC32_PINCTRL_FUNCTION(U5TX, RPG0R, 3),
+			PIC32_PINCTRL_FUNCTION(U6RTS, RPG0R, 4),
+			PIC32_PINCTRL_FUNCTION(SDO1, RPG0R, 5),
+			PIC32_PINCTRL_FUNCTION(SDO2, RPG0R, 6),
+			PIC32_PINCTRL_FUNCTION(SDO3, RPG0R, 7),
+			PIC32_PINCTRL_FUNCTION(SDO4, RPG0R, 8),
+			PIC32_PINCTRL_FUNCTION(SDO5, RPG0R, 9),
+			PIC32_PINCTRL_FUNCTION(OC4, RPG0R, 11),
+			PIC32_PINCTRL_FUNCTION(OC7, RPG0R, 12),
+			PIC32_PINCTRL_FUNCTION(REFCLKO1, RPG0R, 15)),
+	PIC32_PINCTRL_GROUP(97, G1,
+			PIC32_PINCTRL_FUNCTION(INT3, INT3R, 12),
+			PIC32_PINCTRL_FUNCTION(T2CK, T2CKR, 12),
+			PIC32_PINCTRL_FUNCTION(T6CK, T6CKR, 12),
+			PIC32_PINCTRL_FUNCTION(IC3, IC3R, 12),
+			PIC32_PINCTRL_FUNCTION(IC7, IC7R, 12),
+			PIC32_PINCTRL_FUNCTION(U1RX, U1RXR, 12),
+			PIC32_PINCTRL_FUNCTION(U2CTS, U2CTSR, 12),
+			PIC32_PINCTRL_FUNCTION(U5RX, U5RXR, 12),
+			PIC32_PINCTRL_FUNCTION(U6CTS, U6CTSR, 12),
+			PIC32_PINCTRL_FUNCTION(SDI1, SDI1R, 12),
+			PIC32_PINCTRL_FUNCTION(SDI3, SDI3R, 12),
+			PIC32_PINCTRL_FUNCTION(SDI5, SDI5R, 12),
+			PIC32_PINCTRL_FUNCTION(SS6IN, SS6INR, 12),
+			PIC32_PINCTRL_FUNCTION(REFCLKI1, REFCLKI1R, 12),
+			PIC32_PINCTRL_FUNCTION(U3TX, RPG1R, 1),
+			PIC32_PINCTRL_FUNCTION(U4RTS, RPG1R, 2),
+			PIC32_PINCTRL_FUNCTION(SDO1, RPG1R, 5),
+			PIC32_PINCTRL_FUNCTION(SDO2, RPG1R, 6),
+			PIC32_PINCTRL_FUNCTION(SDO3, RPG1R, 7),
+			PIC32_PINCTRL_FUNCTION(SDO5, RPG1R, 9),
+			PIC32_PINCTRL_FUNCTION(SS6OUT, RPG1R, 10),
+			PIC32_PINCTRL_FUNCTION(OC3, RPG1R, 11),
+			PIC32_PINCTRL_FUNCTION(OC6, RPG1R, 12),
+			PIC32_PINCTRL_FUNCTION(REFCLKO4, RPG1R, 13),
+			PIC32_PINCTRL_FUNCTION(C2OUT, RPG1R, 14),
+			PIC32_PINCTRL_FUNCTION(C1TX, RPG1R, 15)),
+	PIC32_PINCTRL_GROUP(102, G6,
+			PIC32_PINCTRL_FUNCTION(INT2, INT2R, 1),
+			PIC32_PINCTRL_FUNCTION(T3CK, T3CKR, 1),
+			PIC32_PINCTRL_FUNCTION(T8CK, T8CKR, 1),
+			PIC32_PINCTRL_FUNCTION(IC2, IC2R, 1),
+			PIC32_PINCTRL_FUNCTION(IC5, IC5R, 1),
+			PIC32_PINCTRL_FUNCTION(IC9, IC9R, 1),
+			PIC32_PINCTRL_FUNCTION(U1CTS, U1CTSR, 1),
+			PIC32_PINCTRL_FUNCTION(U2RX, U2RXR, 1),
+			PIC32_PINCTRL_FUNCTION(U5CTS, U5CTSR, 1),
+			PIC32_PINCTRL_FUNCTION(SS1IN, SS1INR, 1),
+			PIC32_PINCTRL_FUNCTION(SS3IN, SS3INR, 1),
+			PIC32_PINCTRL_FUNCTION(SS4IN, SS4INR, 1),
+			PIC32_PINCTRL_FUNCTION(SS5IN, SS5INR, 1),
+			PIC32_PINCTRL_FUNCTION(C2RX, C2RXR, 1),
+			PIC32_PINCTRL_FUNCTION(U3RTS, RPG6R, 1),
+			PIC32_PINCTRL_FUNCTION(U4TX, RPG6R, 2),
+			PIC32_PINCTRL_FUNCTION(U6TX, RPG6R, 4),
+			PIC32_PINCTRL_FUNCTION(SS1OUT, RPG6R, 5),
+			PIC32_PINCTRL_FUNCTION(SS3OUT, RPG6R, 7),
+			PIC32_PINCTRL_FUNCTION(SS4OUT, RPG6R, 8),
+			PIC32_PINCTRL_FUNCTION(SS5OUT, RPG6R, 9),
+			PIC32_PINCTRL_FUNCTION(SDO6, RPG6R, 10),
+			PIC32_PINCTRL_FUNCTION(OC5, RPG6R, 11),
+			PIC32_PINCTRL_FUNCTION(OC8, RPG6R, 12),
+			PIC32_PINCTRL_FUNCTION(C1OUT, RPG6R, 14),
+			PIC32_PINCTRL_FUNCTION(REFCLKO3, RPG6R, 15)),
+	PIC32_PINCTRL_GROUP(103, G7,
+			PIC32_PINCTRL_FUNCTION(INT4, INT4R, 1),
+			PIC32_PINCTRL_FUNCTION(T5CK, T5CKR, 1),
+			PIC32_PINCTRL_FUNCTION(T7CK, T7CKR, 1),
+			PIC32_PINCTRL_FUNCTION(IC4, IC4R, 1),
+			PIC32_PINCTRL_FUNCTION(IC8, IC8R, 1),
+			PIC32_PINCTRL_FUNCTION(U3RX, U3RXR, 1),
+			PIC32_PINCTRL_FUNCTION(U4CTS, U4CTSR, 1),
+			PIC32_PINCTRL_FUNCTION(SDI2, SDI2R, 1),
+			PIC32_PINCTRL_FUNCTION(SDI4, SDI4R, 1),
+			PIC32_PINCTRL_FUNCTION(C1RX, C1RXR, 1),
+			PIC32_PINCTRL_FUNCTION(REFCLKI4, REFCLKI4R, 1),
+			PIC32_PINCTRL_FUNCTION(U1TX, RPG7R, 1),
+			PIC32_PINCTRL_FUNCTION(U2RTS, RPG7R, 2),
+			PIC32_PINCTRL_FUNCTION(U5TX, RPG7R, 3),
+			PIC32_PINCTRL_FUNCTION(U6RTS, RPG7R, 4),
+			PIC32_PINCTRL_FUNCTION(SDO1, RPG7R, 5),
+			PIC32_PINCTRL_FUNCTION(SDO2, RPG7R, 6),
+			PIC32_PINCTRL_FUNCTION(SDO3, RPG7R, 7),
+			PIC32_PINCTRL_FUNCTION(SDO4, RPG7R, 8),
+			PIC32_PINCTRL_FUNCTION(SDO5, RPG7R, 9),
+			PIC32_PINCTRL_FUNCTION(OC4, RPG7R, 11),
+			PIC32_PINCTRL_FUNCTION(OC7, RPG7R, 12),
+			PIC32_PINCTRL_FUNCTION(REFCLKO1, RPG7R, 15)),
+	PIC32_PINCTRL_GROUP(104, G8,
+			PIC32_PINCTRL_FUNCTION(INT3, INT3R, 1),
+			PIC32_PINCTRL_FUNCTION(T2CK, T2CKR, 1),
+			PIC32_PINCTRL_FUNCTION(T6CK, T6CKR, 1),
+			PIC32_PINCTRL_FUNCTION(IC3, IC3R, 1),
+			PIC32_PINCTRL_FUNCTION(IC7, IC7R, 1),
+			PIC32_PINCTRL_FUNCTION(U1RX, U1RXR, 1),
+			PIC32_PINCTRL_FUNCTION(U2CTS, U2CTSR, 1),
+			PIC32_PINCTRL_FUNCTION(U5RX, U5RXR, 1),
+			PIC32_PINCTRL_FUNCTION(U6CTS, U6CTSR, 1),
+			PIC32_PINCTRL_FUNCTION(SDI1, SDI1R, 1),
+			PIC32_PINCTRL_FUNCTION(SDI3, SDI3R, 1),
+			PIC32_PINCTRL_FUNCTION(SDI5, SDI5R, 1),
+			PIC32_PINCTRL_FUNCTION(SS6IN, SS6INR, 1),
+			PIC32_PINCTRL_FUNCTION(REFCLKI1, REFCLKI1R, 1),
+			PIC32_PINCTRL_FUNCTION(U3TX, RPG8R, 1),
+			PIC32_PINCTRL_FUNCTION(U4RTS, RPG8R, 2),
+			PIC32_PINCTRL_FUNCTION(SDO1, RPG8R, 5),
+			PIC32_PINCTRL_FUNCTION(SDO2, RPG8R, 6),
+			PIC32_PINCTRL_FUNCTION(SDO3, RPG8R, 7),
+			PIC32_PINCTRL_FUNCTION(SDO5, RPG8R, 9),
+			PIC32_PINCTRL_FUNCTION(SS6OUT, RPG8R, 10),
+			PIC32_PINCTRL_FUNCTION(OC3, RPG8R, 11),
+			PIC32_PINCTRL_FUNCTION(OC6, RPG8R, 12),
+			PIC32_PINCTRL_FUNCTION(REFCLKO4, RPG8R, 13),
+			PIC32_PINCTRL_FUNCTION(C2OUT, RPG8R, 14),
+			PIC32_PINCTRL_FUNCTION(C1TX, RPG8R, 15)),
+	PIC32_PINCTRL_GROUP(105, G9,
+			PIC32_PINCTRL_FUNCTION(INT1, INT1R, 1),
+			PIC32_PINCTRL_FUNCTION(T4CK, T4CKR, 1),
+			PIC32_PINCTRL_FUNCTION(T9CK, T9CKR, 1),
+			PIC32_PINCTRL_FUNCTION(IC1, IC1R, 1),
+			PIC32_PINCTRL_FUNCTION(IC6, IC6R, 1),
+			PIC32_PINCTRL_FUNCTION(U3CTS, U3CTSR, 1),
+			PIC32_PINCTRL_FUNCTION(U4RX, U4RXR, 1),
+			PIC32_PINCTRL_FUNCTION(U6RX, U6RXR, 1),
+			PIC32_PINCTRL_FUNCTION(SS2IN, SS2INR, 1),
+			PIC32_PINCTRL_FUNCTION(SDI6, SDI6R, 1),
+			PIC32_PINCTRL_FUNCTION(OCFA, OCFAR, 1),
+			PIC32_PINCTRL_FUNCTION(REFCLKI3, REFCLKI3R, 1),
+			PIC32_PINCTRL_FUNCTION(U1RTS, RPG9R, 1),
+			PIC32_PINCTRL_FUNCTION(U2TX, RPG9R, 2),
+			PIC32_PINCTRL_FUNCTION(U5RTS, RPG9R, 3),
+			PIC32_PINCTRL_FUNCTION(U6TX, RPG9R, 4),
+			PIC32_PINCTRL_FUNCTION(SS2OUT, RPG9R, 6),
+			PIC32_PINCTRL_FUNCTION(SDO4, RPG9R, 8),
+			PIC32_PINCTRL_FUNCTION(SDO6, RPG9R, 10),
+			PIC32_PINCTRL_FUNCTION(OC2, RPG9R, 11),
+			PIC32_PINCTRL_FUNCTION(OC1, RPG9R, 12),
+			PIC32_PINCTRL_FUNCTION(OC9, RPG9R, 13),
+			PIC32_PINCTRL_FUNCTION(C2TX, RPG9R, 15)),
+};
+
+static inline struct pic32_gpio_bank *irqd_to_bank(struct irq_data *d)
+{
+	return gpiochip_get_data(irq_data_get_irq_chip_data(d));
+}
+
+static inline struct pic32_gpio_bank *pctl_to_bank(struct pic32_pinctrl *pctl,
+						unsigned pin)
+{
+	return &pctl->gpio_banks[pin / PINS_PER_BANK];
+}
+
+static int pic32_pinctrl_get_groups_count(struct pinctrl_dev *pctldev)
+{
+	struct pic32_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+	return pctl->ngroups;
+}
+
+static const char *pic32_pinctrl_get_group_name(struct pinctrl_dev *pctldev,
+						    unsigned group)
+{
+	struct pic32_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+	return pctl->groups[group].name;
+}
+
+static int pic32_pinctrl_get_group_pins(struct pinctrl_dev *pctldev,
+					    unsigned group,
+					    const unsigned **pins,
+					    unsigned *num_pins)
+{
+	struct pic32_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+	*pins = &pctl->groups[group].pin;
+	*num_pins = 1;
+
+	return 0;
+}
+
+static const struct pinctrl_ops pic32_pinctrl_ops = {
+	.get_groups_count = pic32_pinctrl_get_groups_count,
+	.get_group_name = pic32_pinctrl_get_group_name,
+	.get_group_pins = pic32_pinctrl_get_group_pins,
+	.dt_node_to_map = pinconf_generic_dt_node_to_map_pin,
+	.dt_free_map = pinctrl_utils_dt_free_map,
+};
+
+static int pic32_pinmux_get_functions_count(struct pinctrl_dev *pctldev)
+{
+	struct pic32_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+	return pctl->nfunctions;
+}
+
+static const char *
+pic32_pinmux_get_function_name(struct pinctrl_dev *pctldev, unsigned func)
+{
+	struct pic32_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+	return pctl->functions[func].name;
+}
+
+static int pic32_pinmux_get_function_groups(struct pinctrl_dev *pctldev,
+						unsigned func,
+						const char * const **groups,
+						unsigned * const num_groups)
+{
+	struct pic32_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+	*groups = pctl->functions[func].groups;
+	*num_groups = pctl->functions[func].ngroups;
+
+	return 0;
+}
+
+static int pic32_pinmux_enable(struct pinctrl_dev *pctldev,
+				   unsigned func, unsigned group)
+{
+	struct pic32_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+	const struct pic32_pin_group *pg = &pctl->groups[group];
+	const struct pic32_function *pf = &pctl->functions[func];
+	const char *fname = pf->name;
+	struct pic32_desc_function *functions = pg->functions;
+
+	while (functions->name) {
+		if (!strcmp(functions->name, fname)) {
+			dev_dbg(pctl->dev,
+				"setting function %s reg 0x%x = %d\n",
+				fname, functions->muxreg, functions->muxval);
+
+			writel(functions->muxval, pctl->reg_base + functions->muxreg);
+
+			return 0;
+		}
+
+		functions++;
+	}
+
+	dev_err(pctl->dev, "cannot mux pin %u to function %u\n", group, func);
+
+	return -EINVAL;
+}
+
+static int pic32_gpio_request_enable(struct pinctrl_dev *pctldev,
+				     struct pinctrl_gpio_range *range,
+				     unsigned offset)
+{
+	struct pic32_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+	struct pic32_gpio_bank *bank = gpiochip_get_data(range->gc);
+	u32 mask = BIT(offset - bank->gpio_chip.base);
+
+	dev_dbg(pctl->dev, "requesting gpio %d in bank %d with mask 0x%x\n",
+		offset, bank->gpio_chip.base, mask);
+
+	writel(mask, bank->reg_base + PIC32_CLR(ANSEL_REG));
+
+	return 0;
+}
+
+static int pic32_gpio_direction_input(struct gpio_chip *chip,
+					  unsigned offset)
+{
+	struct pic32_gpio_bank *bank = gpiochip_get_data(chip);
+	u32 mask = BIT(offset);
+
+	writel(mask, bank->reg_base + PIC32_SET(TRIS_REG));
+
+	return 0;
+}
+
+static int pic32_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+	struct pic32_gpio_bank *bank = gpiochip_get_data(chip);
+
+	return !!(readl(bank->reg_base + PORT_REG) & BIT(offset));
+}
+
+static void pic32_gpio_set(struct gpio_chip *chip, unsigned offset,
+			       int value)
+{
+	struct pic32_gpio_bank *bank = gpiochip_get_data(chip);
+	u32 mask = BIT(offset);
+
+	if (value)
+		writel(mask, bank->reg_base + PIC32_SET(PORT_REG));
+	else
+		writel(mask, bank->reg_base + PIC32_CLR(PORT_REG));
+}
+
+static int pic32_gpio_direction_output(struct gpio_chip *chip,
+					   unsigned offset, int value)
+{
+	struct pic32_gpio_bank *bank = gpiochip_get_data(chip);
+	u32 mask = BIT(offset);
+
+	pic32_gpio_set(chip, offset, value);
+	writel(mask, bank->reg_base + PIC32_CLR(TRIS_REG));
+
+	return 0;
+}
+
+static int pic32_gpio_set_direction(struct pinctrl_dev *pctldev,
+					      struct pinctrl_gpio_range *range,
+					      unsigned offset, bool input)
+{
+	struct gpio_chip *chip = range->gc;
+
+	if (input)
+		pic32_gpio_direction_input(chip, offset);
+	else
+		pic32_gpio_direction_output(chip, offset, 0);
+
+	return 0;
+}
+
+static const struct pinmux_ops pic32_pinmux_ops = {
+	.get_functions_count = pic32_pinmux_get_functions_count,
+	.get_function_name = pic32_pinmux_get_function_name,
+	.get_function_groups = pic32_pinmux_get_function_groups,
+	.set_mux = pic32_pinmux_enable,
+	.gpio_request_enable = pic32_gpio_request_enable,
+	.gpio_set_direction = pic32_gpio_set_direction,
+};
+
+static int pic32_pinconf_get(struct pinctrl_dev *pctldev, unsigned pin,
+				 unsigned long *config)
+{
+	struct pic32_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+	struct pic32_gpio_bank *bank = pctl_to_bank(pctl, pin);
+	unsigned param = pinconf_to_config_param(*config);
+	u32 mask = BIT(pin - bank->gpio_chip.base);
+	u32 arg;
+
+	switch (param) {
+	case PIN_CONFIG_BIAS_PULL_UP:
+		arg = !!(readl(bank->reg_base + CNPU_REG) & mask);
+		break;
+	case PIN_CONFIG_BIAS_PULL_DOWN:
+		arg = !!(readl(bank->reg_base + CNPD_REG) & mask);
+		break;
+	case PIN_CONFIG_MICROCHIP_DIGITAL:
+		arg = !(readl(bank->reg_base + ANSEL_REG) & mask);
+		break;
+	case PIN_CONFIG_MICROCHIP_ANALOG:
+		arg = !!(readl(bank->reg_base + ANSEL_REG) & mask);
+		break;
+	case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+		arg = !!(readl(bank->reg_base + ODCU_REG) & mask);
+		break;
+	case PIN_CONFIG_INPUT_ENABLE:
+		arg = !!(readl(bank->reg_base + TRIS_REG) & mask);
+		break;
+	case PIN_CONFIG_OUTPUT:
+		arg = !(readl(bank->reg_base + TRIS_REG) & mask);
+		break;
+	default:
+		dev_err(pctl->dev, "Property %u not supported\n", param);
+		return -ENOTSUPP;
+	}
+
+	*config = pinconf_to_config_packed(param, arg);
+
+	return 0;
+}
+
+static int pic32_pinconf_set(struct pinctrl_dev *pctldev, unsigned pin,
+				 unsigned long *configs, unsigned num_configs)
+{
+	struct pic32_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+	struct pic32_gpio_bank *bank = pctl_to_bank(pctl, pin);
+	unsigned param;
+	u32 arg;
+	unsigned int i;
+	u32 offset = pin - bank->gpio_chip.base;
+	u32 mask = BIT(offset);
+
+	dev_dbg(pctl->dev, "setting pin %d bank %d mask 0x%x\n",
+		pin, bank->gpio_chip.base, mask);
+
+	for (i = 0; i < num_configs; i++) {
+		param = pinconf_to_config_param(configs[i]);
+		arg = pinconf_to_config_argument(configs[i]);
+
+		switch (param) {
+		case PIN_CONFIG_BIAS_PULL_UP:
+			dev_dbg(pctl->dev, "   pullup\n");
+			writel(mask, bank->reg_base +PIC32_SET(CNPU_REG));
+			break;
+		case PIN_CONFIG_BIAS_PULL_DOWN:
+			dev_dbg(pctl->dev, "   pulldown\n");
+			writel(mask, bank->reg_base + PIC32_SET(CNPD_REG));
+			break;
+		case PIN_CONFIG_MICROCHIP_DIGITAL:
+			dev_dbg(pctl->dev, "   digital\n");
+			writel(mask, bank->reg_base + PIC32_CLR(ANSEL_REG));
+			break;
+		case PIN_CONFIG_MICROCHIP_ANALOG:
+			dev_dbg(pctl->dev, "   analog\n");
+			writel(mask, bank->reg_base + PIC32_SET(ANSEL_REG));
+			break;
+		case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+			dev_dbg(pctl->dev, "   opendrain\n");
+			writel(mask, bank->reg_base + PIC32_SET(ODCU_REG));
+			break;
+		case PIN_CONFIG_INPUT_ENABLE:
+			pic32_gpio_direction_input(&bank->gpio_chip, offset);
+			break;
+		case PIN_CONFIG_OUTPUT:
+			pic32_gpio_direction_output(&bank->gpio_chip,
+						    offset, arg);
+			break;
+		default:
+			dev_err(pctl->dev, "Property %u not supported\n",
+				param);
+			return -ENOTSUPP;
+		}
+	}
+
+	return 0;
+}
+
+static const struct pinconf_ops pic32_pinconf_ops = {
+	.pin_config_get = pic32_pinconf_get,
+	.pin_config_set = pic32_pinconf_set,
+	.is_generic = true,
+};
+
+static struct pinctrl_desc pic32_pinctrl_desc = {
+	.name = "pic32-pinctrl",
+	.pctlops = &pic32_pinctrl_ops,
+	.pmxops = &pic32_pinmux_ops,
+	.confops = &pic32_pinconf_ops,
+	.owner = THIS_MODULE,
+};
+
+static int pic32_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
+{
+	struct pic32_gpio_bank *bank = gpiochip_get_data(chip);
+
+	return !!(readl(bank->reg_base + TRIS_REG) & BIT(offset));
+}
+
+static void pic32_gpio_irq_ack(struct irq_data *data)
+{
+	struct pic32_gpio_bank *bank = irqd_to_bank(data);
+
+	writel(0, bank->reg_base + CNF_REG);
+}
+
+static void pic32_gpio_irq_mask(struct irq_data *data)
+{
+	struct pic32_gpio_bank *bank = irqd_to_bank(data);
+
+	writel(BIT(PIC32_CNCON_ON), bank->reg_base + PIC32_CLR(CNCON_REG));
+}
+
+static void pic32_gpio_irq_unmask(struct irq_data *data)
+{
+	struct pic32_gpio_bank *bank = irqd_to_bank(data);
+
+	writel(BIT(PIC32_CNCON_ON), bank->reg_base + PIC32_SET(CNCON_REG));
+}
+
+static unsigned int pic32_gpio_irq_startup(struct irq_data *data)
+{
+	struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+
+	pic32_gpio_direction_input(chip, data->hwirq);
+	pic32_gpio_irq_unmask(data);
+
+	return 0;
+}
+
+static int pic32_gpio_irq_set_type(struct irq_data *data, unsigned int type)
+{
+	struct pic32_gpio_bank *bank = irqd_to_bank(data);
+	u32 mask = BIT(data->hwirq);
+
+	switch (type & IRQ_TYPE_SENSE_MASK) {
+	case IRQ_TYPE_EDGE_RISING:
+		/* enable RISE */
+		writel(mask, bank->reg_base + PIC32_SET(CNEN_REG));
+		/* disable FALL */
+		writel(mask, bank->reg_base + PIC32_CLR(CNNE_REG));
+		/* enable EDGE */
+		writel(BIT(PIC32_CNCON_EDGE), bank->reg_base + PIC32_SET(CNCON_REG));
+		break;
+	case IRQ_TYPE_EDGE_FALLING:
+		/* disable RISE */
+		writel(mask, bank->reg_base + PIC32_CLR(CNEN_REG));
+		/* enable FALL */
+		writel(mask, bank->reg_base + PIC32_SET(CNNE_REG));
+		/* enable EDGE */
+		writel(BIT(PIC32_CNCON_EDGE), bank->reg_base + PIC32_SET(CNCON_REG));
+		break;
+	case IRQ_TYPE_EDGE_BOTH:
+		/* enable RISE */
+		writel(mask, bank->reg_base + PIC32_SET(CNEN_REG));
+		/* enable FALL */
+		writel(mask, bank->reg_base + PIC32_SET(CNNE_REG));
+		/* enable EDGE */
+		writel(BIT(PIC32_CNCON_EDGE), bank->reg_base + PIC32_SET(CNCON_REG));
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	irq_set_handler_locked(data, handle_edge_irq);
+
+	return 0;
+}
+
+static u32 pic32_gpio_get_pending(struct gpio_chip *gc, unsigned long status)
+{
+	struct pic32_gpio_bank *bank = gpiochip_get_data(gc);
+	u32 pending = 0;
+	u32 cnen_rise, cnne_fall;
+	u32 pin;
+
+	cnen_rise = readl(bank->reg_base + CNEN_REG);
+	cnne_fall = readl(bank->reg_base + CNNE_REG);
+
+	for_each_set_bit(pin, &status, BITS_PER_LONG) {
+		u32 mask = BIT(pin);
+
+		if ((mask & cnen_rise) || (mask && cnne_fall))
+			pending |= mask;
+	}
+
+	return pending;
+}
+
+static void pic32_gpio_irq_handler(struct irq_desc *desc)
+{
+	struct gpio_chip *gc = irq_desc_get_handler_data(desc);
+	struct pic32_gpio_bank *bank = gpiochip_get_data(gc);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+	unsigned long pending;
+	unsigned int pin;
+	u32 stat;
+
+	chained_irq_enter(chip, desc);
+
+	stat = readl(bank->reg_base + CNF_REG);
+	pending = pic32_gpio_get_pending(gc, stat);
+
+	for_each_set_bit(pin, &pending, BITS_PER_LONG)
+		generic_handle_irq(irq_linear_revmap(gc->irqdomain, pin));
+
+	chained_irq_exit(chip, desc);
+}
+
+#define GPIO_BANK(_bank, _npins)					\
+	{								\
+		.gpio_chip = {						\
+			.label = "GPIO" #_bank,				\
+			.request = gpiochip_generic_request,		\
+			.free = gpiochip_generic_free,			\
+			.get_direction = pic32_gpio_get_direction,	\
+			.direction_input = pic32_gpio_direction_input,	\
+			.direction_output = pic32_gpio_direction_output, \
+			.get = pic32_gpio_get,				\
+			.set = pic32_gpio_set,				\
+			.ngpio = _npins,				\
+			.base = GPIO_BANK_START(_bank),			\
+			.owner = THIS_MODULE,				\
+			.can_sleep = 0,					\
+		},							\
+		.irq_chip = {						\
+			.name = "GPIO" #_bank,				\
+			.irq_startup = pic32_gpio_irq_startup,	\
+			.irq_ack = pic32_gpio_irq_ack,		\
+			.irq_mask = pic32_gpio_irq_mask,		\
+			.irq_unmask = pic32_gpio_irq_unmask,		\
+			.irq_set_type = pic32_gpio_irq_set_type,	\
+		},							\
+	}
+
+static struct pic32_gpio_bank pic32_gpio_banks[] = {
+	GPIO_BANK(0, PINS_PER_BANK),
+	GPIO_BANK(1, PINS_PER_BANK),
+	GPIO_BANK(2, PINS_PER_BANK),
+	GPIO_BANK(3, PINS_PER_BANK),
+	GPIO_BANK(4, PINS_PER_BANK),
+	GPIO_BANK(5, PINS_PER_BANK),
+	GPIO_BANK(6, PINS_PER_BANK),
+	GPIO_BANK(7, PINS_PER_BANK),
+	GPIO_BANK(8, PINS_PER_BANK),
+	GPIO_BANK(9, PINS_PER_BANK),
+};
+
+static int pic32_pinctrl_probe(struct platform_device *pdev)
+{
+	struct pic32_pinctrl *pctl;
+	struct resource *res;
+	int ret;
+
+	pctl = devm_kzalloc(&pdev->dev, sizeof(*pctl), GFP_KERNEL);
+	if (!pctl)
+		return -ENOMEM;
+	pctl->dev = &pdev->dev;
+	dev_set_drvdata(&pdev->dev, pctl);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	pctl->reg_base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(pctl->reg_base))
+		return PTR_ERR(pctl->reg_base);
+
+	pctl->clk = devm_clk_get(&pdev->dev, NULL);
+	if (IS_ERR(pctl->clk)) {
+		ret = PTR_ERR(pctl->clk);
+		dev_err(&pdev->dev, "clk get failed\n");
+		return ret;
+	}
+
+	ret = clk_prepare_enable(pctl->clk);
+	if (ret) {
+		dev_err(&pdev->dev, "clk enable failed\n");
+		return ret;
+	}
+
+	pctl->pins = pic32_pins;
+	pctl->npins = ARRAY_SIZE(pic32_pins);
+	pctl->functions = pic32_functions;
+	pctl->nfunctions = ARRAY_SIZE(pic32_functions);
+	pctl->groups = pic32_groups;
+	pctl->ngroups = ARRAY_SIZE(pic32_groups);
+	pctl->gpio_banks = pic32_gpio_banks;
+	pctl->nbanks = ARRAY_SIZE(pic32_gpio_banks);
+
+	pic32_pinctrl_desc.pins = pctl->pins;
+	pic32_pinctrl_desc.npins = pctl->npins;
+	pic32_pinctrl_desc.custom_params = pic32_mpp_bindings;
+	pic32_pinctrl_desc.num_custom_params = ARRAY_SIZE(pic32_mpp_bindings);
+
+	pctl->pctldev = pinctrl_register(&pic32_pinctrl_desc, &pdev->dev, pctl);
+	if (IS_ERR(pctl->pctldev)) {
+		dev_err(&pdev->dev, "Failed to register pinctrl device\n");
+		return PTR_ERR(pctl->pctldev);
+	}
+
+	return 0;
+}
+
+static int pic32_gpio_probe(struct platform_device *pdev)
+{
+	struct device_node *np = pdev->dev.of_node;
+	struct pic32_gpio_bank *bank;
+	u32 id;
+	int irq, ret;
+	struct resource *res;
+
+	if (of_property_read_u32(np, "microchip,gpio-bank", &id)) {
+		dev_err(&pdev->dev, "microchip,gpio-bank property not found\n");
+		return -EINVAL;
+	}
+
+	if (id >= ARRAY_SIZE(pic32_gpio_banks)) {
+		dev_err(&pdev->dev, "invalid microchip,gpio-bank property\n");
+		return -EINVAL;
+	}
+
+	bank = &pic32_gpio_banks[id];
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	bank->reg_base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(bank->reg_base))
+		return PTR_ERR(bank->reg_base);
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		dev_err(&pdev->dev, "irq get failed\n");
+		return irq;
+	}
+
+	bank->clk = devm_clk_get(&pdev->dev, NULL);
+	if (IS_ERR(bank->clk)) {
+		ret = PTR_ERR(bank->clk);
+		dev_err(&pdev->dev, "clk get failed\n");
+		return ret;
+	}
+
+	ret = clk_prepare_enable(bank->clk);
+	if (ret) {
+		dev_err(&pdev->dev, "clk enable failed\n");
+		return ret;
+	}
+
+	bank->gpio_chip.parent = &pdev->dev;
+	bank->gpio_chip.of_node = np;
+	ret = gpiochip_add_data(&bank->gpio_chip, bank);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "Failed to add GPIO chip %u: %d\n",
+			id, ret);
+		return ret;
+	}
+
+	ret = gpiochip_irqchip_add(&bank->gpio_chip, &bank->irq_chip,
+				0, handle_level_irq, IRQ_TYPE_NONE);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "Failed to add IRQ chip %u: %d\n",
+			id, ret);
+		gpiochip_remove(&bank->gpio_chip);
+		return ret;
+	}
+
+	gpiochip_set_chained_irqchip(&bank->gpio_chip, &bank->irq_chip,
+				     irq, pic32_gpio_irq_handler);
+
+	return 0;
+}
+
+static const struct of_device_id pic32_pinctrl_of_match[] = {
+	{ .compatible = "microchip,pic32mzda-pinctrl", },
+	{ },
+};
+
+static struct platform_driver pic32_pinctrl_driver = {
+	.driver = {
+		.name = "pic32-pinctrl",
+		.of_match_table = pic32_pinctrl_of_match,
+		.suppress_bind_attrs = true,
+	},
+	.probe = pic32_pinctrl_probe,
+};
+
+static const struct of_device_id pic32_gpio_of_match[] = {
+	{ .compatible = "microchip,pic32mzda-gpio", },
+	{ },
+};
+
+static struct platform_driver pic32_gpio_driver = {
+	.driver = {
+		.name = "pic32-gpio",
+		.of_match_table = pic32_gpio_of_match,
+		.suppress_bind_attrs = true,
+	},
+	.probe = pic32_gpio_probe,
+};
+
+static int __init pic32_gpio_register(void)
+{
+	return platform_driver_register(&pic32_gpio_driver);
+}
+arch_initcall(pic32_gpio_register);
+
+static int __init pic32_pinctrl_register(void)
+{
+	return platform_driver_register(&pic32_pinctrl_driver);
+}
+arch_initcall(pic32_pinctrl_register);
diff --git a/drivers/pinctrl/pinctrl-pic32.h b/drivers/pinctrl/pinctrl-pic32.h
new file mode 100644
index 0000000..1282626
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-pic32.h
@@ -0,0 +1,141 @@
+/*
+ * PIC32 pinctrl driver
+ *
+ * Joshua Henderson, <joshua.henderson@microchip.com>
+ * Copyright (C) 2015 Microchip Technology Inc.  All rights reserved.
+ *
+ * This program is free software; you can distribute it and/or modify it
+ * under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ */
+#ifndef PINCTRL_PINCTRL_PIC32_H
+#define PINCTRL_PINCTRL_PIC32_H
+
+/* PORT Registers */
+#define ANSEL_REG	0x00
+#define TRIS_REG	0x10
+#define PORT_REG	0x20
+#define LAT_REG		0x30
+#define ODCU_REG	0x40
+#define CNPU_REG	0x50
+#define CNPD_REG	0x60
+#define CNCON_REG	0x70
+#define CNEN_REG	0x80
+#define CNSTAT_REG	0x90
+#define CNNE_REG	0xA0
+#define CNF_REG		0xB0
+
+/* Input PPS Registers */
+#define INT1R 0x04
+#define INT2R 0x08
+#define INT3R 0x0C
+#define INT4R 0x10
+#define T2CKR 0x18
+#define T3CKR 0x1C
+#define T4CKR 0x20
+#define T5CKR 0x24
+#define T6CKR 0x28
+#define T7CKR 0x2C
+#define T8CKR 0x30
+#define T9CKR 0x34
+#define IC1R 0x38
+#define IC2R 0x3C
+#define IC3R 0x40
+#define IC4R 0x44
+#define IC5R 0x48
+#define IC6R 0x4C
+#define IC7R 0x50
+#define IC8R 0x54
+#define IC9R 0x58
+#define OCFAR 0x60
+#define U1RXR 0x68
+#define U1CTSR 0x6C
+#define U2RXR 0x70
+#define U2CTSR 0x74
+#define U3RXR 0x78
+#define U3CTSR 0x7C
+#define U4RXR 0x80
+#define U4CTSR 0x84
+#define U5RXR 0x88
+#define U5CTSR 0x8C
+#define U6RXR 0x90
+#define U6CTSR 0x94
+#define SDI1R 0x9C
+#define SS1INR 0xA0
+#define SDI2R 0xA8
+#define SS2INR 0xAC
+#define SDI3R 0xB4
+#define SS3INR 0xB8
+#define SDI4R 0xC0
+#define SS4INR 0xC4
+#define SDI5R 0xCC
+#define SS5INR 0xD0
+#define SDI6R 0xD8
+#define SS6INR 0xDC
+#define C1RXR 0xE0
+#define C2RXR 0xE4
+#define REFCLKI1R 0xE8
+#define REFCLKI3R 0xF0
+#define REFCLKI4R 0xF4
+
+/* Output PPS Registers */
+#define RPA14R 0x138
+#define RPA15R 0x13C
+#define RPB0R 0x140
+#define RPB1R 0x144
+#define RPB2R 0x148
+#define RPB3R 0x14C
+#define RPB5R 0x154
+#define RPB6R 0x158
+#define RPB7R 0x15C
+#define RPB8R 0x160
+#define RPB9R 0x164
+#define RPB10R 0x168
+#define RPB14R 0x178
+#define RPB15R 0x17C
+#define RPC1R 0x184
+#define RPC2R 0x188
+#define RPC3R 0x18C
+#define RPC4R 0x190
+#define RPC13R 0x1B4
+#define RPC14R 0x1B8
+#define RPD0R 0x1C0
+#define RPD1R 0x1C4
+#define RPD2R 0x1C8
+#define RPD3R 0x1CC
+#define RPD4R 0x1D0
+#define RPD5R 0x1D4
+#define RPD6R 0x1D8
+#define RPD7R 0x1DC
+#define RPD9R 0x1E4
+#define RPD10R 0x1E8
+#define RPD11R 0x1EC
+#define RPD12R 0x1F0
+#define RPD14R 0x1F8
+#define RPD15R 0x1FC
+#define RPE3R 0x20C
+#define RPE5R 0x214
+#define RPE8R 0x220
+#define RPE9R 0x224
+#define RPF0R 0x240
+#define RPF1R 0x244
+#define RPF2R 0x248
+#define RPF3R 0x24C
+#define RPF4R 0x250
+#define RPF5R 0x254
+#define RPF8R 0x260
+#define RPF12R 0x270
+#define RPF13R 0x274
+#define RPG0R 0x280
+#define RPG1R 0x284
+#define RPG6R 0x298
+#define RPG7R 0x29C
+#define RPG8R 0x2A0
+#define RPG9R 0x2A4
+
+#endif  /* PINCTRL_PINCTRL_PIC32_H */
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index 183545a..bf032b9 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -64,6 +64,7 @@
 	RK3188,
 	RK3288,
 	RK3368,
+	RK3399,
 };
 
 /**
@@ -86,6 +87,31 @@
 };
 
 /**
+ * enum type index corresponding to rockchip_perpin_drv_list arrays index.
+ */
+enum rockchip_pin_drv_type {
+	DRV_TYPE_IO_DEFAULT = 0,
+	DRV_TYPE_IO_1V8_OR_3V0,
+	DRV_TYPE_IO_1V8_ONLY,
+	DRV_TYPE_IO_1V8_3V0_AUTO,
+	DRV_TYPE_IO_3V3_ONLY,
+	DRV_TYPE_MAX
+};
+
+/**
+ * @drv_type: drive strength variant using rockchip_perpin_drv_type
+ * @offset: if initialized to -1 it will be autocalculated, by specifying
+ *	    an initial offset value the relevant source offset can be reset
+ *	    to a new value for autocalculating the following drive strength
+ *	    registers. if used chips own cal_drv func instead to calculate
+ *	    registers offset, the variant could be ignored.
+ */
+struct rockchip_drv {
+	enum rockchip_pin_drv_type	drv_type;
+	int				offset;
+};
+
+/**
  * @reg_base: register base of the gpio bank
  * @reg_pull: optional separate register for additional pull settings
  * @clk: clock of the gpio bank
@@ -96,6 +122,7 @@
  * @name: name of the bank
  * @bank_num: number of the bank, to account for holes
  * @iomux: array describing the 4 iomux sources of the bank
+ * @drv: array describing the 4 drive strength sources of the bank
  * @valid: are all necessary informations present
  * @of_node: dt node of this bank
  * @drvdata: common pinctrl basedata
@@ -115,6 +142,7 @@
 	char				*name;
 	u8				bank_num;
 	struct rockchip_iomux		iomux[4];
+	struct rockchip_drv		drv[4];
 	bool				valid;
 	struct device_node		*of_node;
 	struct rockchip_pinctrl		*drvdata;
@@ -151,6 +179,47 @@
 		},							\
 	}
 
+#define PIN_BANK_DRV_FLAGS(id, pins, label, type0, type1, type2, type3) \
+	{								\
+		.bank_num	= id,					\
+		.nr_pins	= pins,					\
+		.name		= label,				\
+		.iomux		= {					\
+			{ .offset = -1 },				\
+			{ .offset = -1 },				\
+			{ .offset = -1 },				\
+			{ .offset = -1 },				\
+		},							\
+		.drv		= {					\
+			{ .drv_type = type0, .offset = -1 },		\
+			{ .drv_type = type1, .offset = -1 },		\
+			{ .drv_type = type2, .offset = -1 },		\
+			{ .drv_type = type3, .offset = -1 },		\
+		},							\
+	}
+
+#define PIN_BANK_IOMUX_DRV_FLAGS_OFFSET(id, pins, label, iom0, iom1,	\
+					iom2, iom3, drv0, drv1, drv2,	\
+					drv3, offset0, offset1,		\
+					offset2, offset3)		\
+	{								\
+		.bank_num	= id,					\
+		.nr_pins	= pins,					\
+		.name		= label,				\
+		.iomux		= {					\
+			{ .type = iom0, .offset = -1 },			\
+			{ .type = iom1, .offset = -1 },			\
+			{ .type = iom2, .offset = -1 },			\
+			{ .type = iom3, .offset = -1 },			\
+		},							\
+		.drv		= {					\
+			{ .drv_type = drv0, .offset = offset0 },	\
+			{ .drv_type = drv1, .offset = offset1 },	\
+			{ .drv_type = drv2, .offset = offset2 },	\
+			{ .drv_type = drv3, .offset = offset3 },	\
+		},							\
+	}
+
 /**
  */
 struct rockchip_pin_ctrl {
@@ -161,6 +230,9 @@
 	enum rockchip_pinctrl_type	type;
 	int				grf_mux_offset;
 	int				pmu_mux_offset;
+	int				grf_drv_offset;
+	int				pmu_drv_offset;
+
 	void	(*pull_calc_reg)(struct rockchip_pin_bank *bank,
 				    int pin_num, struct regmap **regmap,
 				    int *reg, u8 *bit);
@@ -705,7 +777,68 @@
 	}
 }
 
-static int rockchip_perpin_drv_list[] = { 2, 4, 8, 12 };
+#define RK3399_PULL_GRF_OFFSET		0xe040
+#define RK3399_PULL_PMU_OFFSET		0x40
+#define RK3399_DRV_3BITS_PER_PIN	3
+
+static void rk3399_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank,
+					 int pin_num, struct regmap **regmap,
+					 int *reg, u8 *bit)
+{
+	struct rockchip_pinctrl *info = bank->drvdata;
+
+	/* The bank0:16 and bank1:32 pins are located in PMU */
+	if ((bank->bank_num == 0) || (bank->bank_num == 1)) {
+		*regmap = info->regmap_pmu;
+		*reg = RK3399_PULL_PMU_OFFSET;
+
+		*reg += bank->bank_num * RK3188_PULL_BANK_STRIDE;
+
+		*reg += ((pin_num / RK3188_PULL_PINS_PER_REG) * 4);
+		*bit = pin_num % RK3188_PULL_PINS_PER_REG;
+		*bit *= RK3188_PULL_BITS_PER_PIN;
+	} else {
+		*regmap = info->regmap_base;
+		*reg = RK3399_PULL_GRF_OFFSET;
+
+		/* correct the offset, as we're starting with the 3rd bank */
+		*reg -= 0x20;
+		*reg += bank->bank_num * RK3188_PULL_BANK_STRIDE;
+		*reg += ((pin_num / RK3188_PULL_PINS_PER_REG) * 4);
+
+		*bit = (pin_num % RK3188_PULL_PINS_PER_REG);
+		*bit *= RK3188_PULL_BITS_PER_PIN;
+	}
+}
+
+static void rk3399_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank,
+					int pin_num, struct regmap **regmap,
+					int *reg, u8 *bit)
+{
+	struct rockchip_pinctrl *info = bank->drvdata;
+	int drv_num = (pin_num / 8);
+
+	/*  The bank0:16 and bank1:32 pins are located in PMU */
+	if ((bank->bank_num == 0) || (bank->bank_num == 1))
+		*regmap = info->regmap_pmu;
+	else
+		*regmap = info->regmap_base;
+
+	*reg = bank->drv[drv_num].offset;
+	if ((bank->drv[drv_num].drv_type == DRV_TYPE_IO_1V8_3V0_AUTO) ||
+	    (bank->drv[drv_num].drv_type == DRV_TYPE_IO_3V3_ONLY))
+		*bit = (pin_num % 8) * 3;
+	else
+		*bit = (pin_num % 8) * 2;
+}
+
+static int rockchip_perpin_drv_list[DRV_TYPE_MAX][8] = {
+	{ 2, 4, 8, 12, -1, -1, -1, -1 },
+	{ 3, 6, 9, 12, -1, -1, -1, -1 },
+	{ 5, 10, 15, 20, -1, -1, -1, -1 },
+	{ 4, 6, 8, 10, 12, 14, 16, 18 },
+	{ 4, 7, 10, 13, 16, 19, 22, 26 }
+};
 
 static int rockchip_get_drive_perpin(struct rockchip_pin_bank *bank,
 				     int pin_num)
@@ -714,19 +847,74 @@
 	struct rockchip_pin_ctrl *ctrl = info->ctrl;
 	struct regmap *regmap;
 	int reg, ret;
-	u32 data;
+	u32 data, temp, rmask_bits;
 	u8 bit;
+	int drv_type = bank->drv[pin_num / 8].drv_type;
 
 	ctrl->drv_calc_reg(bank, pin_num, &regmap, &reg, &bit);
 
+	switch (drv_type) {
+	case DRV_TYPE_IO_1V8_3V0_AUTO:
+	case DRV_TYPE_IO_3V3_ONLY:
+		rmask_bits = RK3399_DRV_3BITS_PER_PIN;
+		switch (bit) {
+		case 0 ... 12:
+			/* regular case, nothing to do */
+			break;
+		case 15:
+			/*
+			 * drive-strength offset is special, as it is
+			 * spread over 2 registers
+			 */
+			ret = regmap_read(regmap, reg, &data);
+			if (ret)
+				return ret;
+
+			ret = regmap_read(regmap, reg + 0x4, &temp);
+			if (ret)
+				return ret;
+
+			/*
+			 * the bit data[15] contains bit 0 of the value
+			 * while temp[1:0] contains bits 2 and 1
+			 */
+			data >>= 15;
+			temp &= 0x3;
+			temp <<= 1;
+			data |= temp;
+
+			return rockchip_perpin_drv_list[drv_type][data];
+		case 18 ... 21:
+			/* setting fully enclosed in the second register */
+			reg += 4;
+			bit -= 16;
+			break;
+		default:
+			dev_err(info->dev, "unsupported bit: %d for pinctrl drive type: %d\n",
+				bit, drv_type);
+			return -EINVAL;
+		}
+
+		break;
+	case DRV_TYPE_IO_DEFAULT:
+	case DRV_TYPE_IO_1V8_OR_3V0:
+	case DRV_TYPE_IO_1V8_ONLY:
+		rmask_bits = RK3288_DRV_BITS_PER_PIN;
+		break;
+	default:
+		dev_err(info->dev, "unsupported pinctrl drive type: %d\n",
+			drv_type);
+		return -EINVAL;
+	}
+
 	ret = regmap_read(regmap, reg, &data);
 	if (ret)
 		return ret;
 
 	data >>= bit;
-	data &= (1 << RK3288_DRV_BITS_PER_PIN) - 1;
+	data &= (1 << rmask_bits) - 1;
 
-	return rockchip_perpin_drv_list[data];
+	return rockchip_perpin_drv_list[drv_type][data];
 }
 
 static int rockchip_set_drive_perpin(struct rockchip_pin_bank *bank,
@@ -737,16 +925,23 @@
 	struct regmap *regmap;
 	unsigned long flags;
 	int reg, ret, i;
-	u32 data, rmask;
+	u32 data, rmask, rmask_bits, temp;
 	u8 bit;
+	int drv_type = bank->drv[pin_num / 8].drv_type;
+
+	dev_dbg(info->dev, "setting drive of GPIO%d-%d to %d\n",
+		bank->bank_num, pin_num, strength);
 
 	ctrl->drv_calc_reg(bank, pin_num, &regmap, &reg, &bit);
 
 	ret = -EINVAL;
-	for (i = 0; i < ARRAY_SIZE(rockchip_perpin_drv_list); i++) {
-		if (rockchip_perpin_drv_list[i] == strength) {
+	for (i = 0; i < ARRAY_SIZE(rockchip_perpin_drv_list[drv_type]); i++) {
+		if (rockchip_perpin_drv_list[drv_type][i] == strength) {
 			ret = i;
 			break;
+		} else if (rockchip_perpin_drv_list[drv_type][i] < 0) {
+			ret = rockchip_perpin_drv_list[drv_type][i];
+			break;
 		}
 	}
 
@@ -758,8 +953,64 @@
 
 	spin_lock_irqsave(&bank->slock, flags);
 
+	switch (drv_type) {
+	case DRV_TYPE_IO_1V8_3V0_AUTO:
+	case DRV_TYPE_IO_3V3_ONLY:
+		rmask_bits = RK3399_DRV_3BITS_PER_PIN;
+		switch (bit) {
+		case 0 ... 12:
+			/* regular case, nothing to do */
+			break;
+		case 15:
+			/*
+			 * drive-strength offset is special, as it is spread
+			 * over 2 registers, the bit data[15] contains bit 0
+			 * of the value while temp[1:0] contains bits 2 and 1
+			 */
+			data = (ret & 0x1) << 15;
+			temp = (ret >> 0x1) & 0x3;
+
+			rmask = BIT(15) | BIT(31);
+			data |= BIT(31);
+			ret = regmap_update_bits(regmap, reg, rmask, data);
+			if (ret) {
+				spin_unlock_irqrestore(&bank->slock, flags);
+				return ret;
+			}
+
+			rmask = 0x3 | (0x3 << 16);
+			temp |= (0x3 << 16);
+			reg += 0x4;
+			ret = regmap_update_bits(regmap, reg, rmask, temp);
+
+			spin_unlock_irqrestore(&bank->slock, flags);
+			return ret;
+		case 18 ... 21:
+			/* setting fully enclosed in the second register */
+			reg += 4;
+			bit -= 16;
+			break;
+		default:
+			spin_unlock_irqrestore(&bank->slock, flags);
+			dev_err(info->dev, "unsupported bit: %d for pinctrl drive type: %d\n",
+				bit, drv_type);
+			return -EINVAL;
+		}
+		break;
+	case DRV_TYPE_IO_DEFAULT:
+	case DRV_TYPE_IO_1V8_OR_3V0:
+	case DRV_TYPE_IO_1V8_ONLY:
+		rmask_bits = RK3288_DRV_BITS_PER_PIN;
+		break;
+	default:
+		spin_unlock_irqrestore(&bank->slock, flags);
+		dev_err(info->dev, "unsupported pinctrl drive type: %d\n",
+			drv_type);
+		return -EINVAL;
+	}
+
 	/* enable the write to the equivalent lower bits */
-	data = ((1 << RK3288_DRV_BITS_PER_PIN) - 1) << (bit + 16);
+	data = ((1 << rmask_bits) - 1) << (bit + 16);
 	rmask = data | (data >> 16);
 	data |= (ret << bit);
 
@@ -796,6 +1047,7 @@
 	case RK3188:
 	case RK3288:
 	case RK3368:
+	case RK3399:
 		data >>= bit;
 		data &= (1 << RK3188_PULL_BITS_PER_PIN) - 1;
 
@@ -852,6 +1104,7 @@
 	case RK3188:
 	case RK3288:
 	case RK3368:
+	case RK3399:
 		spin_lock_irqsave(&bank->slock, flags);
 
 		/* enable the write to the equivalent lower bits */
@@ -1032,6 +1285,7 @@
 	case RK3188:
 	case RK3288:
 	case RK3368:
+	case RK3399:
 		return (pull != PIN_CONFIG_BIAS_PULL_PIN_DEFAULT);
 	}
 
@@ -1892,7 +2146,7 @@
 	struct device_node *np;
 	struct rockchip_pin_ctrl *ctrl;
 	struct rockchip_pin_bank *bank;
-	int grf_offs, pmu_offs, i, j;
+	int grf_offs, pmu_offs, drv_grf_offs, drv_pmu_offs, i, j;
 
 	match = of_match_node(rockchip_pinctrl_dt_match, node);
 	ctrl = (struct rockchip_pin_ctrl *)match->data;
@@ -1916,6 +2170,8 @@
 
 	grf_offs = ctrl->grf_mux_offset;
 	pmu_offs = ctrl->pmu_mux_offset;
+	drv_pmu_offs = ctrl->pmu_drv_offset;
+	drv_grf_offs = ctrl->grf_drv_offset;
 	bank = ctrl->pin_banks;
 	for (i = 0; i < ctrl->nr_banks; ++i, ++bank) {
 		int bank_pins = 0;
@@ -1925,27 +2181,39 @@
 		bank->pin_base = ctrl->nr_pins;
 		ctrl->nr_pins += bank->nr_pins;
 
-		/* calculate iomux offsets */
+		/* calculate iomux and drv offsets */
 		for (j = 0; j < 4; j++) {
 			struct rockchip_iomux *iom = &bank->iomux[j];
+			struct rockchip_drv *drv = &bank->drv[j];
 			int inc;
 
 			if (bank_pins >= bank->nr_pins)
 				break;
 
-			/* preset offset value, set new start value */
+			/* preset iomux offset value, set new start value */
 			if (iom->offset >= 0) {
 				if (iom->type & IOMUX_SOURCE_PMU)
 					pmu_offs = iom->offset;
 				else
 					grf_offs = iom->offset;
-			} else { /* set current offset */
+			} else { /* set current iomux offset */
 				iom->offset = (iom->type & IOMUX_SOURCE_PMU) ?
 							pmu_offs : grf_offs;
 			}
 
-			dev_dbg(d->dev, "bank %d, iomux %d has offset 0x%x\n",
-				 i, j, iom->offset);
+			/* preset drv offset value, set new start value */
+			if (drv->offset >= 0) {
+				if (iom->type & IOMUX_SOURCE_PMU)
+					drv_pmu_offs = drv->offset;
+				else
+					drv_grf_offs = drv->offset;
+			} else { /* set current drv offset */
+				drv->offset = (iom->type & IOMUX_SOURCE_PMU) ?
+						drv_pmu_offs : drv_grf_offs;
+			}
+
+			dev_dbg(d->dev, "bank %d, iomux %d has iom_offset 0x%x drv_offset 0x%x\n",
+				i, j, iom->offset, drv->offset);
 
 			/*
 			 * Increase offset according to iomux width.
@@ -1957,6 +2225,21 @@
 			else
 				grf_offs += inc;
 
+			/*
+			 * Increase offset according to drv width.
+			 * 3bit drive-strenth'es are spread over two registers.
+			 */
+			if ((drv->drv_type == DRV_TYPE_IO_1V8_3V0_AUTO) ||
+			    (drv->drv_type == DRV_TYPE_IO_3V3_ONLY))
+				inc = 8;
+			else
+				inc = 4;
+
+			if (iom->type & IOMUX_SOURCE_PMU)
+				drv_pmu_offs += inc;
+			else
+				drv_grf_offs += inc;
+
 			bank_pins += 8;
 		}
 	}
@@ -2257,6 +2540,62 @@
 		.drv_calc_reg		= rk3368_calc_drv_reg_and_bit,
 };
 
+static struct rockchip_pin_bank rk3399_pin_banks[] = {
+	PIN_BANK_IOMUX_DRV_FLAGS_OFFSET(0, 32, "gpio0", IOMUX_SOURCE_PMU,
+					IOMUX_SOURCE_PMU,
+					IOMUX_SOURCE_PMU,
+					IOMUX_SOURCE_PMU,
+					DRV_TYPE_IO_1V8_ONLY,
+					DRV_TYPE_IO_1V8_ONLY,
+					DRV_TYPE_IO_DEFAULT,
+					DRV_TYPE_IO_DEFAULT,
+					0x0,
+					0x8,
+					-1,
+					-1
+					),
+	PIN_BANK_IOMUX_DRV_FLAGS_OFFSET(1, 32, "gpio1", IOMUX_SOURCE_PMU,
+					IOMUX_SOURCE_PMU,
+					IOMUX_SOURCE_PMU,
+					IOMUX_SOURCE_PMU,
+					DRV_TYPE_IO_1V8_OR_3V0,
+					DRV_TYPE_IO_1V8_OR_3V0,
+					DRV_TYPE_IO_1V8_OR_3V0,
+					DRV_TYPE_IO_1V8_OR_3V0,
+					0x20,
+					0x28,
+					0x30,
+					0x38
+					),
+	PIN_BANK_DRV_FLAGS(2, 32, "gpio2", DRV_TYPE_IO_1V8_OR_3V0,
+			   DRV_TYPE_IO_1V8_OR_3V0,
+			   DRV_TYPE_IO_1V8_ONLY,
+			   DRV_TYPE_IO_1V8_ONLY
+			   ),
+	PIN_BANK_DRV_FLAGS(3, 32, "gpio3", DRV_TYPE_IO_3V3_ONLY,
+			   DRV_TYPE_IO_3V3_ONLY,
+			   DRV_TYPE_IO_3V3_ONLY,
+			   DRV_TYPE_IO_1V8_OR_3V0
+			   ),
+	PIN_BANK_DRV_FLAGS(4, 32, "gpio4", DRV_TYPE_IO_1V8_OR_3V0,
+			   DRV_TYPE_IO_1V8_3V0_AUTO,
+			   DRV_TYPE_IO_1V8_OR_3V0,
+			   DRV_TYPE_IO_1V8_OR_3V0
+			   ),
+};
+
+static struct rockchip_pin_ctrl rk3399_pin_ctrl = {
+		.pin_banks		= rk3399_pin_banks,
+		.nr_banks		= ARRAY_SIZE(rk3399_pin_banks),
+		.label			= "RK3399-GPIO",
+		.type			= RK3399,
+		.grf_mux_offset		= 0xe000,
+		.pmu_mux_offset		= 0x0,
+		.grf_drv_offset		= 0xe100,
+		.pmu_drv_offset		= 0x80,
+		.pull_calc_reg		= rk3399_calc_pull_reg_and_bit,
+		.drv_calc_reg		= rk3399_calc_drv_reg_and_bit,
+};
 
 static const struct of_device_id rockchip_pinctrl_dt_match[] = {
 	{ .compatible = "rockchip,rk2928-pinctrl",
@@ -2275,6 +2614,8 @@
 		.data = (void *)&rk3288_pin_ctrl },
 	{ .compatible = "rockchip,rk3368-pinctrl",
 		.data = (void *)&rk3368_pin_ctrl },
+	{ .compatible = "rockchip,rk3399-pinctrl",
+		.data = (void *)&rk3399_pin_ctrl },
 	{},
 };
 MODULE_DEVICE_TABLE(of, rockchip_pinctrl_dt_match);
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index d24e5f1..fb126d5 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -255,6 +255,13 @@
 };
 
 /*
+ * This lock class tells lockdep that irqchip core that this single
+ * pinctrl can be in a different category than its parents, so it won't
+ * report false recursion.
+ */
+static struct lock_class_key pcs_lock_class;
+
+/*
  * REVISIT: Reads and writes could eventually use regmap or something
  * generic. But at least on omaps, some mux registers are performance
  * critical as they may need to be remuxed every time before and after
@@ -1713,6 +1720,7 @@
 	irq_set_chip_data(irq, pcs_soc);
 	irq_set_chip_and_handler(irq, &pcs->chip,
 				 handle_level_irq);
+	irq_set_lockdep_class(irq, &pcs_lock_class);
 	irq_set_noprobe(irq);
 
 	return 0;
diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c
index fac844a..cab66c6 100644
--- a/drivers/pinctrl/pinctrl-st.c
+++ b/drivers/pinctrl/pinctrl-st.c
@@ -985,6 +985,7 @@
 	.get_function_groups	= st_pmx_get_groups,
 	.set_mux		= st_pmx_set_mux,
 	.gpio_set_direction	= st_pmx_set_gpio_direction,
+	.strict			= true,
 };
 
 /* Pinconf  */
diff --git a/drivers/pinctrl/pinctrl-zynq.c b/drivers/pinctrl/pinctrl-zynq.c
index d57b5ec..76f1abd 100644
--- a/drivers/pinctrl/pinctrl-zynq.c
+++ b/drivers/pinctrl/pinctrl-zynq.c
@@ -590,7 +590,7 @@
 static const char * const mdio0_groups[] = {"mdio0_0_grp"};
 static const char * const mdio1_groups[] = {"mdio1_0_grp"};
 static const char * const qspi0_groups[] = {"qspi0_0_grp"};
-static const char * const qspi1_groups[] = {"qspi0_1_grp"};
+static const char * const qspi1_groups[] = {"qspi1_0_grp"};
 static const char * const qspi_fbclk_groups[] = {"qspi_fbclk_grp"};
 static const char * const qspi_cs1_groups[] = {"qspi_cs1_grp"};
 static const char * const spi0_groups[] = {"spi0_0_grp", "spi0_1_grp",
diff --git a/drivers/pinctrl/pxa/pinctrl-pxa2xx.c b/drivers/pinctrl/pxa/pinctrl-pxa2xx.c
index 216f227..f553313 100644
--- a/drivers/pinctrl/pxa/pinctrl-pxa2xx.c
+++ b/drivers/pinctrl/pxa/pinctrl-pxa2xx.c
@@ -426,7 +426,7 @@
 
 	return 0;
 }
-EXPORT_SYMBOL(pxa2xx_pinctrl_init);
+EXPORT_SYMBOL_GPL(pxa2xx_pinctrl_init);
 
 int pxa2xx_pinctrl_exit(struct platform_device *pdev)
 {
@@ -435,3 +435,4 @@
 	pinctrl_unregister(pctl->pctl_dev);
 	return 0;
 }
+EXPORT_SYMBOL_GPL(pxa2xx_pinctrl_exit);
diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig
index eeac8cb..67bc70d 100644
--- a/drivers/pinctrl/qcom/Kconfig
+++ b/drivers/pinctrl/qcom/Kconfig
@@ -23,6 +23,14 @@
 	  This is the pinctrl, pinmux, pinconf and gpiolib driver for the
 	  Qualcomm TLMM block found in the Qualcomm APQ8084 platform.
 
+config PINCTRL_IPQ4019
+	tristate "Qualcomm IPQ4019 pin controller driver"
+	depends on GPIOLIB && OF
+	select PINCTRL_MSM
+	help
+	  This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+	  Qualcomm TLMM block found in the Qualcomm IPQ4019 platform.
+
 config PINCTRL_IPQ8064
 	tristate "Qualcomm IPQ8064 pin controller driver"
 	depends on GPIOLIB && OF
diff --git a/drivers/pinctrl/qcom/Makefile b/drivers/pinctrl/qcom/Makefile
index dfb50a9..c964a2c 100644
--- a/drivers/pinctrl/qcom/Makefile
+++ b/drivers/pinctrl/qcom/Makefile
@@ -2,6 +2,7 @@
 obj-$(CONFIG_PINCTRL_MSM)	+= pinctrl-msm.o
 obj-$(CONFIG_PINCTRL_APQ8064)	+= pinctrl-apq8064.o
 obj-$(CONFIG_PINCTRL_APQ8084)	+= pinctrl-apq8084.o
+obj-$(CONFIG_PINCTRL_IPQ4019)	+= pinctrl-ipq4019.o
 obj-$(CONFIG_PINCTRL_IPQ8064)	+= pinctrl-ipq8064.o
 obj-$(CONFIG_PINCTRL_MSM8660)	+= pinctrl-msm8660.o
 obj-$(CONFIG_PINCTRL_MSM8960)	+= pinctrl-msm8960.o
diff --git a/drivers/pinctrl/qcom/pinctrl-ipq4019.c b/drivers/pinctrl/qcom/pinctrl-ipq4019.c
new file mode 100644
index 0000000..b5d81ce
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-ipq4019.c
@@ -0,0 +1,453 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-msm.h"
+
+static const struct pinctrl_pin_desc ipq4019_pins[] = {
+	PINCTRL_PIN(0, "GPIO_0"),
+	PINCTRL_PIN(1, "GPIO_1"),
+	PINCTRL_PIN(2, "GPIO_2"),
+	PINCTRL_PIN(3, "GPIO_3"),
+	PINCTRL_PIN(4, "GPIO_4"),
+	PINCTRL_PIN(5, "GPIO_5"),
+	PINCTRL_PIN(6, "GPIO_6"),
+	PINCTRL_PIN(7, "GPIO_7"),
+	PINCTRL_PIN(8, "GPIO_8"),
+	PINCTRL_PIN(9, "GPIO_9"),
+	PINCTRL_PIN(10, "GPIO_10"),
+	PINCTRL_PIN(11, "GPIO_11"),
+	PINCTRL_PIN(12, "GPIO_12"),
+	PINCTRL_PIN(13, "GPIO_13"),
+	PINCTRL_PIN(14, "GPIO_14"),
+	PINCTRL_PIN(15, "GPIO_15"),
+	PINCTRL_PIN(16, "GPIO_16"),
+	PINCTRL_PIN(17, "GPIO_17"),
+	PINCTRL_PIN(18, "GPIO_18"),
+	PINCTRL_PIN(19, "GPIO_19"),
+	PINCTRL_PIN(20, "GPIO_20"),
+	PINCTRL_PIN(21, "GPIO_21"),
+	PINCTRL_PIN(22, "GPIO_22"),
+	PINCTRL_PIN(23, "GPIO_23"),
+	PINCTRL_PIN(24, "GPIO_24"),
+	PINCTRL_PIN(25, "GPIO_25"),
+	PINCTRL_PIN(26, "GPIO_26"),
+	PINCTRL_PIN(27, "GPIO_27"),
+	PINCTRL_PIN(28, "GPIO_28"),
+	PINCTRL_PIN(29, "GPIO_29"),
+	PINCTRL_PIN(30, "GPIO_30"),
+	PINCTRL_PIN(31, "GPIO_31"),
+	PINCTRL_PIN(32, "GPIO_32"),
+	PINCTRL_PIN(33, "GPIO_33"),
+	PINCTRL_PIN(34, "GPIO_34"),
+	PINCTRL_PIN(35, "GPIO_35"),
+	PINCTRL_PIN(36, "GPIO_36"),
+	PINCTRL_PIN(37, "GPIO_37"),
+	PINCTRL_PIN(38, "GPIO_38"),
+	PINCTRL_PIN(39, "GPIO_39"),
+	PINCTRL_PIN(40, "GPIO_40"),
+	PINCTRL_PIN(41, "GPIO_41"),
+	PINCTRL_PIN(42, "GPIO_42"),
+	PINCTRL_PIN(43, "GPIO_43"),
+	PINCTRL_PIN(44, "GPIO_44"),
+	PINCTRL_PIN(45, "GPIO_45"),
+	PINCTRL_PIN(46, "GPIO_46"),
+	PINCTRL_PIN(47, "GPIO_47"),
+	PINCTRL_PIN(48, "GPIO_48"),
+	PINCTRL_PIN(49, "GPIO_49"),
+	PINCTRL_PIN(50, "GPIO_50"),
+	PINCTRL_PIN(51, "GPIO_51"),
+	PINCTRL_PIN(52, "GPIO_52"),
+	PINCTRL_PIN(53, "GPIO_53"),
+	PINCTRL_PIN(54, "GPIO_54"),
+	PINCTRL_PIN(55, "GPIO_55"),
+	PINCTRL_PIN(56, "GPIO_56"),
+	PINCTRL_PIN(57, "GPIO_57"),
+	PINCTRL_PIN(58, "GPIO_58"),
+	PINCTRL_PIN(59, "GPIO_59"),
+	PINCTRL_PIN(60, "GPIO_60"),
+	PINCTRL_PIN(61, "GPIO_61"),
+	PINCTRL_PIN(62, "GPIO_62"),
+	PINCTRL_PIN(63, "GPIO_63"),
+	PINCTRL_PIN(64, "GPIO_64"),
+	PINCTRL_PIN(65, "GPIO_65"),
+	PINCTRL_PIN(66, "GPIO_66"),
+	PINCTRL_PIN(67, "GPIO_67"),
+	PINCTRL_PIN(68, "GPIO_68"),
+	PINCTRL_PIN(69, "GPIO_69"),
+	PINCTRL_PIN(70, "GPIO_70"),
+	PINCTRL_PIN(71, "GPIO_71"),
+	PINCTRL_PIN(72, "GPIO_72"),
+	PINCTRL_PIN(73, "GPIO_73"),
+	PINCTRL_PIN(74, "GPIO_74"),
+	PINCTRL_PIN(75, "GPIO_75"),
+	PINCTRL_PIN(76, "GPIO_76"),
+	PINCTRL_PIN(77, "GPIO_77"),
+	PINCTRL_PIN(78, "GPIO_78"),
+	PINCTRL_PIN(79, "GPIO_79"),
+	PINCTRL_PIN(80, "GPIO_80"),
+	PINCTRL_PIN(81, "GPIO_81"),
+	PINCTRL_PIN(82, "GPIO_82"),
+	PINCTRL_PIN(83, "GPIO_83"),
+	PINCTRL_PIN(84, "GPIO_84"),
+	PINCTRL_PIN(85, "GPIO_85"),
+	PINCTRL_PIN(86, "GPIO_86"),
+	PINCTRL_PIN(87, "GPIO_87"),
+	PINCTRL_PIN(88, "GPIO_88"),
+	PINCTRL_PIN(89, "GPIO_89"),
+	PINCTRL_PIN(90, "GPIO_90"),
+	PINCTRL_PIN(91, "GPIO_91"),
+	PINCTRL_PIN(92, "GPIO_92"),
+	PINCTRL_PIN(93, "GPIO_93"),
+	PINCTRL_PIN(94, "GPIO_94"),
+	PINCTRL_PIN(95, "GPIO_95"),
+	PINCTRL_PIN(96, "GPIO_96"),
+	PINCTRL_PIN(97, "GPIO_97"),
+	PINCTRL_PIN(98, "GPIO_98"),
+	PINCTRL_PIN(99, "GPIO_99"),
+};
+
+#define DECLARE_QCA_GPIO_PINS(pin) \
+	static const unsigned int gpio##pin##_pins[] = { pin }
+DECLARE_QCA_GPIO_PINS(0);
+DECLARE_QCA_GPIO_PINS(1);
+DECLARE_QCA_GPIO_PINS(2);
+DECLARE_QCA_GPIO_PINS(3);
+DECLARE_QCA_GPIO_PINS(4);
+DECLARE_QCA_GPIO_PINS(5);
+DECLARE_QCA_GPIO_PINS(6);
+DECLARE_QCA_GPIO_PINS(7);
+DECLARE_QCA_GPIO_PINS(8);
+DECLARE_QCA_GPIO_PINS(9);
+DECLARE_QCA_GPIO_PINS(10);
+DECLARE_QCA_GPIO_PINS(11);
+DECLARE_QCA_GPIO_PINS(12);
+DECLARE_QCA_GPIO_PINS(13);
+DECLARE_QCA_GPIO_PINS(14);
+DECLARE_QCA_GPIO_PINS(15);
+DECLARE_QCA_GPIO_PINS(16);
+DECLARE_QCA_GPIO_PINS(17);
+DECLARE_QCA_GPIO_PINS(18);
+DECLARE_QCA_GPIO_PINS(19);
+DECLARE_QCA_GPIO_PINS(20);
+DECLARE_QCA_GPIO_PINS(21);
+DECLARE_QCA_GPIO_PINS(22);
+DECLARE_QCA_GPIO_PINS(23);
+DECLARE_QCA_GPIO_PINS(24);
+DECLARE_QCA_GPIO_PINS(25);
+DECLARE_QCA_GPIO_PINS(26);
+DECLARE_QCA_GPIO_PINS(27);
+DECLARE_QCA_GPIO_PINS(28);
+DECLARE_QCA_GPIO_PINS(29);
+DECLARE_QCA_GPIO_PINS(30);
+DECLARE_QCA_GPIO_PINS(31);
+DECLARE_QCA_GPIO_PINS(32);
+DECLARE_QCA_GPIO_PINS(33);
+DECLARE_QCA_GPIO_PINS(34);
+DECLARE_QCA_GPIO_PINS(35);
+DECLARE_QCA_GPIO_PINS(36);
+DECLARE_QCA_GPIO_PINS(37);
+DECLARE_QCA_GPIO_PINS(38);
+DECLARE_QCA_GPIO_PINS(39);
+DECLARE_QCA_GPIO_PINS(40);
+DECLARE_QCA_GPIO_PINS(41);
+DECLARE_QCA_GPIO_PINS(42);
+DECLARE_QCA_GPIO_PINS(43);
+DECLARE_QCA_GPIO_PINS(44);
+DECLARE_QCA_GPIO_PINS(45);
+DECLARE_QCA_GPIO_PINS(46);
+DECLARE_QCA_GPIO_PINS(47);
+DECLARE_QCA_GPIO_PINS(48);
+DECLARE_QCA_GPIO_PINS(49);
+DECLARE_QCA_GPIO_PINS(50);
+DECLARE_QCA_GPIO_PINS(51);
+DECLARE_QCA_GPIO_PINS(52);
+DECLARE_QCA_GPIO_PINS(53);
+DECLARE_QCA_GPIO_PINS(54);
+DECLARE_QCA_GPIO_PINS(55);
+DECLARE_QCA_GPIO_PINS(56);
+DECLARE_QCA_GPIO_PINS(57);
+DECLARE_QCA_GPIO_PINS(58);
+DECLARE_QCA_GPIO_PINS(59);
+DECLARE_QCA_GPIO_PINS(60);
+DECLARE_QCA_GPIO_PINS(61);
+DECLARE_QCA_GPIO_PINS(62);
+DECLARE_QCA_GPIO_PINS(63);
+DECLARE_QCA_GPIO_PINS(64);
+DECLARE_QCA_GPIO_PINS(65);
+DECLARE_QCA_GPIO_PINS(66);
+DECLARE_QCA_GPIO_PINS(67);
+DECLARE_QCA_GPIO_PINS(68);
+DECLARE_QCA_GPIO_PINS(69);
+DECLARE_QCA_GPIO_PINS(70);
+DECLARE_QCA_GPIO_PINS(71);
+DECLARE_QCA_GPIO_PINS(72);
+DECLARE_QCA_GPIO_PINS(73);
+DECLARE_QCA_GPIO_PINS(74);
+DECLARE_QCA_GPIO_PINS(75);
+DECLARE_QCA_GPIO_PINS(76);
+DECLARE_QCA_GPIO_PINS(77);
+DECLARE_QCA_GPIO_PINS(78);
+DECLARE_QCA_GPIO_PINS(79);
+DECLARE_QCA_GPIO_PINS(80);
+DECLARE_QCA_GPIO_PINS(81);
+DECLARE_QCA_GPIO_PINS(82);
+DECLARE_QCA_GPIO_PINS(83);
+DECLARE_QCA_GPIO_PINS(84);
+DECLARE_QCA_GPIO_PINS(85);
+DECLARE_QCA_GPIO_PINS(86);
+DECLARE_QCA_GPIO_PINS(87);
+DECLARE_QCA_GPIO_PINS(88);
+DECLARE_QCA_GPIO_PINS(89);
+DECLARE_QCA_GPIO_PINS(90);
+DECLARE_QCA_GPIO_PINS(91);
+DECLARE_QCA_GPIO_PINS(92);
+DECLARE_QCA_GPIO_PINS(93);
+DECLARE_QCA_GPIO_PINS(94);
+DECLARE_QCA_GPIO_PINS(95);
+DECLARE_QCA_GPIO_PINS(96);
+DECLARE_QCA_GPIO_PINS(97);
+DECLARE_QCA_GPIO_PINS(98);
+DECLARE_QCA_GPIO_PINS(99);
+
+#define FUNCTION(fname)			                \
+	[qca_mux_##fname] = {		                \
+		.name = #fname,				\
+		.groups = fname##_groups,               \
+		.ngroups = ARRAY_SIZE(fname##_groups),	\
+	}
+
+#define PINGROUP(id, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12, f13, f14) \
+	{					        \
+		.name = "gpio" #id,			\
+		.pins = gpio##id##_pins,		\
+		.npins = (unsigned)ARRAY_SIZE(gpio##id##_pins),	\
+		.funcs = (int[]){			\
+			qca_mux_NA, /* gpio mode */	\
+			qca_mux_##f1,			\
+			qca_mux_##f2,			\
+			qca_mux_##f3,			\
+			qca_mux_##f4,			\
+			qca_mux_##f5,			\
+			qca_mux_##f6,			\
+			qca_mux_##f7,			\
+			qca_mux_##f8,			\
+			qca_mux_##f9,			\
+			qca_mux_##f10,			\
+			qca_mux_##f11,			\
+			qca_mux_##f12,			\
+			qca_mux_##f13,			\
+			qca_mux_##f14			\
+		},				        \
+		.nfuncs = 15,				\
+		.ctl_reg = 0x1000 + 0x10 * id,		\
+		.io_reg = 0x1004 + 0x10 * id,		\
+		.intr_cfg_reg = 0x1008 + 0x10 * id,	\
+		.intr_status_reg = 0x100c + 0x10 * id,	\
+		.intr_target_reg = 0x400 + 0x4 * id,	\
+		.mux_bit = 2,			\
+		.pull_bit = 0,			\
+		.drv_bit = 6,			\
+		.oe_bit = 9,			\
+		.in_bit = 0,			\
+		.out_bit = 1,			\
+		.intr_enable_bit = 0,		\
+		.intr_status_bit = 0,		\
+		.intr_target_bit = 5,		\
+		.intr_raw_status_bit = 4,	\
+		.intr_polarity_bit = 1,		\
+		.intr_detection_bit = 2,	\
+		.intr_detection_width = 2,	\
+	}
+
+
+enum ipq4019_functions {
+	qca_mux_gpio,
+	qca_mux_blsp_uart1,
+	qca_mux_blsp_i2c0,
+	qca_mux_blsp_i2c1,
+	qca_mux_blsp_uart0,
+	qca_mux_blsp_spi1,
+	qca_mux_blsp_spi0,
+	qca_mux_NA,
+};
+
+static const char * const gpio_groups[] = {
+	"gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7",
+	"gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14",
+	"gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21",
+	"gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28",
+	"gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35",
+	"gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42",
+	"gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49",
+	"gpio50", "gpio51", "gpio52", "gpio53", "gpio54", "gpio55", "gpio56",
+	"gpio57", "gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63",
+	"gpio64", "gpio65", "gpio66", "gpio67", "gpio68", "gpio69", "gpio70",
+	"gpio71", "gpio72", "gpio73", "gpio74", "gpio75", "gpio76", "gpio77",
+	"gpio78", "gpio79", "gpio80", "gpio81", "gpio82", "gpio83", "gpio84",
+	"gpio85", "gpio86", "gpio87", "gpio88", "gpio89", "gpio90", "gpio91",
+	"gpio92", "gpio93", "gpio94", "gpio95", "gpio96", "gpio97", "gpio98",
+	"gpio99",
+};
+
+static const char * const blsp_uart1_groups[] = {
+	"gpio8", "gpio9", "gpio10", "gpio11",
+};
+static const char * const blsp_i2c0_groups[] = {
+	"gpio10", "gpio11", "gpio20", "gpio21", "gpio58", "gpio59",
+};
+static const char * const blsp_spi0_groups[] = {
+	"gpio12", "gpio13", "gpio14", "gpio15", "gpio45",
+	"gpio54", "gpio55", "gpio56", "gpio57",
+};
+static const char * const blsp_i2c1_groups[] = {
+	"gpio12", "gpio13", "gpio34", "gpio35",
+};
+static const char * const blsp_uart0_groups[] = {
+	"gpio16", "gpio17", "gpio60", "gpio61",
+};
+static const char * const blsp_spi1_groups[] = {
+	"gpio44", "gpio45", "gpio46", "gpio47",
+};
+
+static const struct msm_function ipq4019_functions[] = {
+	FUNCTION(gpio),
+	FUNCTION(blsp_uart1),
+	FUNCTION(blsp_i2c0),
+	FUNCTION(blsp_i2c1),
+	FUNCTION(blsp_uart0),
+	FUNCTION(blsp_spi1),
+	FUNCTION(blsp_spi0),
+};
+
+static const struct msm_pingroup ipq4019_groups[] = {
+	PINGROUP(0, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(1, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(2, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(3, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(4, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(5, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(6, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(7, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(8, blsp_uart1, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(9, blsp_uart1, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(10, blsp_uart1, NA, NA, blsp_i2c0, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(11, blsp_uart1, NA, NA, blsp_i2c0, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(12, blsp_spi0, blsp_i2c1, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(13, blsp_spi0, blsp_i2c1, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(14, blsp_spi0, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(15, blsp_spi0, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(16, blsp_uart0, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(17, blsp_uart0, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(18, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(19, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(20, blsp_i2c0, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(21, blsp_i2c0, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(22, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(23, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(24, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(25, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(26, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(27, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(28, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(29, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(30, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(31, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(32, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(33, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(34, blsp_i2c1, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(35, blsp_i2c1, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(36, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(37, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(38, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(39, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(40, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(41, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(42, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(43, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(44, NA, blsp_spi1, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(45, NA, blsp_spi1, blsp_spi0, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(46, NA, blsp_spi1, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(47, NA, blsp_spi1, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(48, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(49, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(50, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(51, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(52, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(53, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(54, NA, blsp_spi0, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(55, NA, blsp_spi0, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(56, NA, blsp_spi0, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(57, NA, blsp_spi0, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(58, NA, NA, blsp_i2c0, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(59, NA, blsp_i2c0, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(60, NA, blsp_uart0, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(61, NA, blsp_uart0, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(62, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(63, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(64, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(65, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(66, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(67, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(68, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(69, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+};
+
+static const struct msm_pinctrl_soc_data ipq4019_pinctrl = {
+	.pins = ipq4019_pins,
+	.npins = ARRAY_SIZE(ipq4019_pins),
+	.functions = ipq4019_functions,
+	.nfunctions = ARRAY_SIZE(ipq4019_functions),
+	.groups = ipq4019_groups,
+	.ngroups = ARRAY_SIZE(ipq4019_groups),
+	.ngpios = 70,
+};
+
+static int ipq4019_pinctrl_probe(struct platform_device *pdev)
+{
+	return msm_pinctrl_probe(pdev, &ipq4019_pinctrl);
+}
+
+static const struct of_device_id ipq4019_pinctrl_of_match[] = {
+	{ .compatible = "qcom,ipq4019-pinctrl", },
+	{ },
+};
+
+static struct platform_driver ipq4019_pinctrl_driver = {
+	.driver = {
+		.name = "ipq4019-pinctrl",
+		.of_match_table = ipq4019_pinctrl_of_match,
+	},
+	.probe = ipq4019_pinctrl_probe,
+	.remove = msm_pinctrl_remove,
+};
+
+static int __init ipq4019_pinctrl_init(void)
+{
+	return platform_driver_register(&ipq4019_pinctrl_driver);
+}
+arch_initcall(ipq4019_pinctrl_init);
+
+static void __exit ipq4019_pinctrl_exit(void)
+{
+	platform_driver_unregister(&ipq4019_pinctrl_driver);
+}
+module_exit(ipq4019_pinctrl_exit);
+
+MODULE_DESCRIPTION("Qualcomm ipq4019 pinctrl driver");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(of, ipq4019_pinctrl_of_match);
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
index 2f18323..2a3e549 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
@@ -117,6 +117,7 @@
  * @output_enabled: Set to true if MPP output logic is enabled.
  * @input_enabled: Set to true if MPP input buffer logic is enabled.
  * @paired: Pin operates in paired mode
+ * @has_pullup: Pin has support to configure pullup
  * @num_sources: Number of power-sources supported by this MPP.
  * @power_source: Current power-source used.
  * @amux_input: Set the source for analog input.
@@ -134,6 +135,7 @@
 	bool		output_enabled;
 	bool		input_enabled;
 	bool		paired;
+	bool		has_pullup;
 	unsigned int	num_sources;
 	unsigned int	power_source;
 	unsigned int	amux_input;
@@ -477,11 +479,14 @@
 	if (ret < 0)
 		return ret;
 
-	val = pad->pullup << PMIC_MPP_REG_PULL_SHIFT;
+	if (pad->has_pullup) {
+		val = pad->pullup << PMIC_MPP_REG_PULL_SHIFT;
 
-	ret = pmic_mpp_write(state, pad, PMIC_MPP_REG_DIG_PULL_CTL, val);
-	if (ret < 0)
-		return ret;
+		ret = pmic_mpp_write(state, pad, PMIC_MPP_REG_DIG_PULL_CTL,
+				     val);
+		if (ret < 0)
+			return ret;
+	}
 
 	val = pad->amux_input & PMIC_MPP_REG_AIN_ROUTE_MASK;
 
@@ -534,7 +539,8 @@
 		seq_printf(s, " %-7s", pmic_mpp_functions[pad->function]);
 		seq_printf(s, " vin-%d", pad->power_source);
 		seq_printf(s, " %d", pad->aout_level);
-		seq_printf(s, " %-8s", biases[pad->pullup]);
+		if (pad->has_pullup)
+			seq_printf(s, " %-8s", biases[pad->pullup]);
 		seq_printf(s, " %-4s", pad->out_value ? "high" : "low");
 		if (pad->dtest)
 			seq_printf(s, " dtest%d", pad->dtest);
@@ -748,12 +754,16 @@
 	pad->power_source = val >> PMIC_MPP_REG_VIN_SHIFT;
 	pad->power_source &= PMIC_MPP_REG_VIN_MASK;
 
-	val = pmic_mpp_read(state, pad, PMIC_MPP_REG_DIG_PULL_CTL);
-	if (val < 0)
-		return val;
+	if (subtype != PMIC_MPP_SUBTYPE_ULT_4CH_NO_ANA_OUT &&
+	    subtype != PMIC_MPP_SUBTYPE_ULT_4CH_NO_SINK) {
+		val = pmic_mpp_read(state, pad, PMIC_MPP_REG_DIG_PULL_CTL);
+		if (val < 0)
+			return val;
 
-	pad->pullup = val >> PMIC_MPP_REG_PULL_SHIFT;
-	pad->pullup &= PMIC_MPP_REG_PULL_MASK;
+		pad->pullup = val >> PMIC_MPP_REG_PULL_SHIFT;
+		pad->pullup &= PMIC_MPP_REG_PULL_MASK;
+		pad->has_pullup = true;
+	}
 
 	val = pmic_mpp_read(state, pad, PMIC_MPP_REG_AIN_CTL);
 	if (val < 0)
diff --git a/drivers/pinctrl/sh-pfc/Kconfig b/drivers/pinctrl/sh-pfc/Kconfig
index 35d6e95..415dd80 100644
--- a/drivers/pinctrl/sh-pfc/Kconfig
+++ b/drivers/pinctrl/sh-pfc/Kconfig
@@ -2,10 +2,9 @@
 # Renesas SH and SH Mobile PINCTRL drivers
 #
 
-if ARCH_SHMOBILE || SUPERH
+if ARCH_RENESAS || SUPERH
 
 config PINCTRL_SH_PFC
-	select GPIO_SH_PFC if ARCH_REQUIRE_GPIOLIB
 	select PINMUX
 	select PINCONF
 	select GENERIC_PINCONF
@@ -13,12 +12,12 @@
 	help
 	  This enables pin control drivers for SH and SH Mobile platforms
 
-config GPIO_SH_PFC
-	bool "SuperH PFC GPIO support"
-	depends on PINCTRL_SH_PFC && GPIOLIB
+config PINCTRL_SH_PFC_GPIO
+	select GPIOLIB
+	select PINCTRL_SH_PFC
+	bool
 	help
-	  This enables support for GPIOs within the SoC's pin function
-	  controller.
+	  This enables pin control and GPIO drivers for SH/SH Mobile platforms
 
 config PINCTRL_PFC_EMEV2
 	def_bool y
@@ -28,12 +27,12 @@
 config PINCTRL_PFC_R8A73A4
 	def_bool y
 	depends on ARCH_R8A73A4
-	select PINCTRL_SH_PFC
+	select PINCTRL_SH_PFC_GPIO
 
 config PINCTRL_PFC_R8A7740
 	def_bool y
 	depends on ARCH_R8A7740
-	select PINCTRL_SH_PFC
+	select PINCTRL_SH_PFC_GPIO
 
 config PINCTRL_PFC_R8A7778
 	def_bool y
@@ -73,79 +72,66 @@
 config PINCTRL_PFC_SH7203
 	def_bool y
 	depends on CPU_SUBTYPE_SH7203
-	depends on GPIOLIB
-	select PINCTRL_SH_PFC
+	select PINCTRL_SH_PFC_GPIO
 
 config PINCTRL_PFC_SH7264
 	def_bool y
 	depends on CPU_SUBTYPE_SH7264
-	depends on GPIOLIB
-	select PINCTRL_SH_PFC
+	select PINCTRL_SH_PFC_GPIO
 
 config PINCTRL_PFC_SH7269
 	def_bool y
 	depends on CPU_SUBTYPE_SH7269
-	depends on GPIOLIB
-	select PINCTRL_SH_PFC
+	select PINCTRL_SH_PFC_GPIO
 
 config PINCTRL_PFC_SH73A0
 	def_bool y
 	depends on ARCH_SH73A0
-	select PINCTRL_SH_PFC
+	select PINCTRL_SH_PFC_GPIO
 	select REGULATOR
 
 config PINCTRL_PFC_SH7720
 	def_bool y
 	depends on CPU_SUBTYPE_SH7720
-	depends on GPIOLIB
-	select PINCTRL_SH_PFC
+	select PINCTRL_SH_PFC_GPIO
 
 config PINCTRL_PFC_SH7722
 	def_bool y
 	depends on CPU_SUBTYPE_SH7722
-	depends on GPIOLIB
-	select PINCTRL_SH_PFC
+	select PINCTRL_SH_PFC_GPIO
 
 config PINCTRL_PFC_SH7723
 	def_bool y
 	depends on CPU_SUBTYPE_SH7723
-	depends on GPIOLIB
-	select PINCTRL_SH_PFC
+	select PINCTRL_SH_PFC_GPIO
 
 config PINCTRL_PFC_SH7724
 	def_bool y
 	depends on CPU_SUBTYPE_SH7724
-	depends on GPIOLIB
-	select PINCTRL_SH_PFC
+	select PINCTRL_SH_PFC_GPIO
 
 config PINCTRL_PFC_SH7734
 	def_bool y
 	depends on CPU_SUBTYPE_SH7734
-	depends on GPIOLIB
-	select PINCTRL_SH_PFC
+	select PINCTRL_SH_PFC_GPIO
 
 config PINCTRL_PFC_SH7757
 	def_bool y
 	depends on CPU_SUBTYPE_SH7757
-	depends on GPIOLIB
-	select PINCTRL_SH_PFC
+	select PINCTRL_SH_PFC_GPIO
 
 config PINCTRL_PFC_SH7785
 	def_bool y
 	depends on CPU_SUBTYPE_SH7785
-	depends on GPIOLIB
-	select PINCTRL_SH_PFC
+	select PINCTRL_SH_PFC_GPIO
 
 config PINCTRL_PFC_SH7786
 	def_bool y
 	depends on CPU_SUBTYPE_SH7786
-	depends on GPIOLIB
-	select PINCTRL_SH_PFC
+	select PINCTRL_SH_PFC_GPIO
 
 config PINCTRL_PFC_SHX3
 	def_bool y
 	depends on CPU_SUBTYPE_SHX3
-	depends on GPIOLIB
-	select PINCTRL_SH_PFC
-
+	select PINCTRL_SH_PFC_GPIO
 endif
diff --git a/drivers/pinctrl/sh-pfc/Makefile b/drivers/pinctrl/sh-pfc/Makefile
index 173305f..8a2c871 100644
--- a/drivers/pinctrl/sh-pfc/Makefile
+++ b/drivers/pinctrl/sh-pfc/Makefile
@@ -1,8 +1,5 @@
-sh-pfc-objs			= core.o pinctrl.o
-ifeq ($(CONFIG_GPIO_SH_PFC),y)
-sh-pfc-objs			+= gpio.o
-endif
-obj-$(CONFIG_PINCTRL_SH_PFC)	+= sh-pfc.o
+obj-$(CONFIG_PINCTRL_SH_PFC)	+= core.o pinctrl.o
+obj-$(CONFIG_PINCTRL_SH_PFC_GPIO)	+= gpio.o
 obj-$(CONFIG_PINCTRL_PFC_EMEV2)	+= pfc-emev2.o
 obj-$(CONFIG_PINCTRL_PFC_R8A73A4)	+= pfc-r8a73a4.o
 obj-$(CONFIG_PINCTRL_PFC_R8A7740)	+= pfc-r8a7740.o
diff --git a/drivers/pinctrl/sh-pfc/core.c b/drivers/pinctrl/sh-pfc/core.c
index 181ea98..dc3609f 100644
--- a/drivers/pinctrl/sh-pfc/core.c
+++ b/drivers/pinctrl/sh-pfc/core.c
@@ -1,5 +1,7 @@
 /*
- * SuperH Pin Function Controller support.
+ * Pin Control and GPIO driver for SuperH Pin Function Controller.
+ *
+ * Authors: Magnus Damm, Paul Mundt, Laurent Pinchart
  *
  * Copyright (C) 2008 Magnus Damm
  * Copyright (C) 2009 - 2012 Paul Mundt
@@ -17,7 +19,7 @@
 #include <linux/io.h>
 #include <linux/ioport.h>
 #include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/init.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
 #include <linux/pinctrl/machine.h>
@@ -503,7 +505,6 @@
 #endif
 	{ },
 };
-MODULE_DEVICE_TABLE(of, sh_pfc_of_table);
 #endif
 
 static int sh_pfc_probe(struct platform_device *pdev)
@@ -518,7 +519,7 @@
 
 #ifdef CONFIG_OF
 	if (np)
-		info = of_match_device(sh_pfc_of_table, &pdev->dev)->data;
+		info = of_device_get_match_data(&pdev->dev);
 	else
 #endif
 		info = platid ? (const void *)platid->driver_data : NULL;
@@ -558,7 +559,7 @@
 	if (unlikely(ret != 0))
 		return ret;
 
-#ifdef CONFIG_GPIO_SH_PFC
+#ifdef CONFIG_PINCTRL_SH_PFC_GPIO
 	/*
 	 * Then the GPIO chip
 	 */
@@ -584,7 +585,7 @@
 {
 	struct sh_pfc *pfc = platform_get_drvdata(pdev);
 
-#ifdef CONFIG_GPIO_SH_PFC
+#ifdef CONFIG_PINCTRL_SH_PFC_GPIO
 	sh_pfc_unregister_gpiochip(pfc);
 #endif
 	sh_pfc_unregister_pinctrl(pfc);
@@ -632,7 +633,6 @@
 	{ "sh-pfc", 0 },
 	{ },
 };
-MODULE_DEVICE_TABLE(platform, sh_pfc_id_table);
 
 static struct platform_driver sh_pfc_driver = {
 	.probe		= sh_pfc_probe,
@@ -649,13 +649,3 @@
 	return platform_driver_register(&sh_pfc_driver);
 }
 postcore_initcall(sh_pfc_init);
-
-static void __exit sh_pfc_exit(void)
-{
-	platform_driver_unregister(&sh_pfc_driver);
-}
-module_exit(sh_pfc_exit);
-
-MODULE_AUTHOR("Magnus Damm, Paul Mundt, Laurent Pinchart");
-MODULE_DESCRIPTION("Pin Control and GPIO driver for SuperH pin function controller");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7778.c b/drivers/pinctrl/sh-pfc/pfc-r8a7778.c
index ad09a67..411d088 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7778.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7778.c
@@ -561,82 +561,82 @@
 	PINMUX_SINGLE(AVS2),
 
 	/* IPSR0 */
-	PINMUX_IPSR_DATA(IP0_1_0,	PRESETOUT),
-	PINMUX_IPSR_DATA(IP0_1_0,	PWM1),
+	PINMUX_IPSR_GPSR(IP0_1_0,	PRESETOUT),
+	PINMUX_IPSR_GPSR(IP0_1_0,	PWM1),
 
-	PINMUX_IPSR_DATA(IP0_4_2,	AUDATA0),
-	PINMUX_IPSR_DATA(IP0_4_2,	ARM_TRACEDATA_0),
+	PINMUX_IPSR_GPSR(IP0_4_2,	AUDATA0),
+	PINMUX_IPSR_GPSR(IP0_4_2,	ARM_TRACEDATA_0),
 	PINMUX_IPSR_MSEL(IP0_4_2,	GPSCLK_C,	SEL_GPS_C),
-	PINMUX_IPSR_DATA(IP0_4_2,	USB_OVC0),
-	PINMUX_IPSR_DATA(IP0_4_2,	TX2_E),
+	PINMUX_IPSR_GPSR(IP0_4_2,	USB_OVC0),
+	PINMUX_IPSR_GPSR(IP0_4_2,	TX2_E),
 	PINMUX_IPSR_MSEL(IP0_4_2,	SDA2_B,		SEL_I2C2_B),
 
-	PINMUX_IPSR_DATA(IP0_7_5,	AUDATA1),
-	PINMUX_IPSR_DATA(IP0_7_5,	ARM_TRACEDATA_1),
+	PINMUX_IPSR_GPSR(IP0_7_5,	AUDATA1),
+	PINMUX_IPSR_GPSR(IP0_7_5,	ARM_TRACEDATA_1),
 	PINMUX_IPSR_MSEL(IP0_7_5,	GPSIN_C,	SEL_GPS_C),
-	PINMUX_IPSR_DATA(IP0_7_5,	USB_OVC1),
+	PINMUX_IPSR_GPSR(IP0_7_5,	USB_OVC1),
 	PINMUX_IPSR_MSEL(IP0_7_5,	RX2_E,		SEL_SCIF2_E),
 	PINMUX_IPSR_MSEL(IP0_7_5,	SCL2_B,		SEL_I2C2_B),
 
 	PINMUX_IPSR_MSEL(IP0_11_8,	SD1_DAT2_A,	SEL_SD1_A),
-	PINMUX_IPSR_DATA(IP0_11_8,	MMC_D2),
-	PINMUX_IPSR_DATA(IP0_11_8,	BS),
-	PINMUX_IPSR_DATA(IP0_11_8,	ATADIR0_A),
-	PINMUX_IPSR_DATA(IP0_11_8,	SDSELF_A),
-	PINMUX_IPSR_DATA(IP0_11_8,	PWM4_B),
+	PINMUX_IPSR_GPSR(IP0_11_8,	MMC_D2),
+	PINMUX_IPSR_GPSR(IP0_11_8,	BS),
+	PINMUX_IPSR_GPSR(IP0_11_8,	ATADIR0_A),
+	PINMUX_IPSR_GPSR(IP0_11_8,	SDSELF_A),
+	PINMUX_IPSR_GPSR(IP0_11_8,	PWM4_B),
 
 	PINMUX_IPSR_MSEL(IP0_14_12,	SD1_DAT3_A,	SEL_SD1_A),
-	PINMUX_IPSR_DATA(IP0_14_12,	MMC_D3),
-	PINMUX_IPSR_DATA(IP0_14_12,	A0),
-	PINMUX_IPSR_DATA(IP0_14_12,	ATAG0_A),
+	PINMUX_IPSR_GPSR(IP0_14_12,	MMC_D3),
+	PINMUX_IPSR_GPSR(IP0_14_12,	A0),
+	PINMUX_IPSR_GPSR(IP0_14_12,	ATAG0_A),
 	PINMUX_IPSR_MSEL(IP0_14_12,	REMOCON_B,	SEL_REMOCON_B),
 
-	PINMUX_IPSR_DATA(IP0_15,	A4),
-	PINMUX_IPSR_DATA(IP0_16,	A5),
-	PINMUX_IPSR_DATA(IP0_17,	A6),
-	PINMUX_IPSR_DATA(IP0_18,	A7),
-	PINMUX_IPSR_DATA(IP0_19,	A8),
-	PINMUX_IPSR_DATA(IP0_20,	A9),
-	PINMUX_IPSR_DATA(IP0_21,	A10),
-	PINMUX_IPSR_DATA(IP0_22,	A11),
-	PINMUX_IPSR_DATA(IP0_23,	A12),
-	PINMUX_IPSR_DATA(IP0_24,	A13),
-	PINMUX_IPSR_DATA(IP0_25,	A14),
-	PINMUX_IPSR_DATA(IP0_26,	A15),
-	PINMUX_IPSR_DATA(IP0_27,	A16),
-	PINMUX_IPSR_DATA(IP0_28,	A17),
-	PINMUX_IPSR_DATA(IP0_29,	A18),
-	PINMUX_IPSR_DATA(IP0_30,	A19),
+	PINMUX_IPSR_GPSR(IP0_15,	A4),
+	PINMUX_IPSR_GPSR(IP0_16,	A5),
+	PINMUX_IPSR_GPSR(IP0_17,	A6),
+	PINMUX_IPSR_GPSR(IP0_18,	A7),
+	PINMUX_IPSR_GPSR(IP0_19,	A8),
+	PINMUX_IPSR_GPSR(IP0_20,	A9),
+	PINMUX_IPSR_GPSR(IP0_21,	A10),
+	PINMUX_IPSR_GPSR(IP0_22,	A11),
+	PINMUX_IPSR_GPSR(IP0_23,	A12),
+	PINMUX_IPSR_GPSR(IP0_24,	A13),
+	PINMUX_IPSR_GPSR(IP0_25,	A14),
+	PINMUX_IPSR_GPSR(IP0_26,	A15),
+	PINMUX_IPSR_GPSR(IP0_27,	A16),
+	PINMUX_IPSR_GPSR(IP0_28,	A17),
+	PINMUX_IPSR_GPSR(IP0_29,	A18),
+	PINMUX_IPSR_GPSR(IP0_30,	A19),
 
 	/* IPSR1 */
-	PINMUX_IPSR_DATA(IP1_0,		A20),
+	PINMUX_IPSR_GPSR(IP1_0,		A20),
 	PINMUX_IPSR_MSEL(IP1_0,		HSPI_CS1_B,	SEL_HSPI1_B),
 
-	PINMUX_IPSR_DATA(IP1_1,		A21),
+	PINMUX_IPSR_GPSR(IP1_1,		A21),
 	PINMUX_IPSR_MSEL(IP1_1,		HSPI_CLK1_B,	SEL_HSPI1_B),
 
-	PINMUX_IPSR_DATA(IP1_4_2,	A22),
+	PINMUX_IPSR_GPSR(IP1_4_2,	A22),
 	PINMUX_IPSR_MSEL(IP1_4_2,	HRTS0_B,	SEL_HSCIF0_B),
 	PINMUX_IPSR_MSEL(IP1_4_2,	RX2_B,		SEL_SCIF2_B),
 	PINMUX_IPSR_MSEL(IP1_4_2,	DREQ2_A,	SEL_DREQ2_A),
 
-	PINMUX_IPSR_DATA(IP1_7_5,	A23),
-	PINMUX_IPSR_DATA(IP1_7_5,	HTX0_B),
-	PINMUX_IPSR_DATA(IP1_7_5,	TX2_B),
-	PINMUX_IPSR_DATA(IP1_7_5,	DACK2_A),
+	PINMUX_IPSR_GPSR(IP1_7_5,	A23),
+	PINMUX_IPSR_GPSR(IP1_7_5,	HTX0_B),
+	PINMUX_IPSR_GPSR(IP1_7_5,	TX2_B),
+	PINMUX_IPSR_GPSR(IP1_7_5,	DACK2_A),
 	PINMUX_IPSR_MSEL(IP1_7_5,	TS_SDEN0_A,	SEL_TSIF0_A),
 
 	PINMUX_IPSR_MSEL(IP1_10_8,	SD1_CD_A,	SEL_SD1_A),
-	PINMUX_IPSR_DATA(IP1_10_8,	MMC_D6),
-	PINMUX_IPSR_DATA(IP1_10_8,	A24),
+	PINMUX_IPSR_GPSR(IP1_10_8,	MMC_D6),
+	PINMUX_IPSR_GPSR(IP1_10_8,	A24),
 	PINMUX_IPSR_MSEL(IP1_10_8,	DREQ1_A,	SEL_DREQ1_A),
 	PINMUX_IPSR_MSEL(IP1_10_8,	HRX0_B,		SEL_HSCIF0_B),
 	PINMUX_IPSR_MSEL(IP1_10_8,	TS_SPSYNC0_A,	SEL_TSIF0_A),
 
 	PINMUX_IPSR_MSEL(IP1_14_11,	SD1_WP_A,	SEL_SD1_A),
-	PINMUX_IPSR_DATA(IP1_14_11,	MMC_D7),
-	PINMUX_IPSR_DATA(IP1_14_11,	A25),
-	PINMUX_IPSR_DATA(IP1_14_11,	DACK1_A),
+	PINMUX_IPSR_GPSR(IP1_14_11,	MMC_D7),
+	PINMUX_IPSR_GPSR(IP1_14_11,	A25),
+	PINMUX_IPSR_GPSR(IP1_14_11,	DACK1_A),
 	PINMUX_IPSR_MSEL(IP1_14_11,	HCTS0_B,	SEL_HSCIF0_B),
 	PINMUX_IPSR_MSEL(IP1_14_11,	RX3_C,		SEL_SCIF3_C),
 	PINMUX_IPSR_MSEL(IP1_14_11,	TS_SDAT0_A,	SEL_TSIF0_A),
@@ -654,54 +654,54 @@
 	PINMUX_IPSR_NOGM(IP1_20_18,	SDA2_A,		SEL_I2C2_A),
 	PINMUX_IPSR_NOGM(IP1_20_18,	SCK2_B,		SEL_SCIF2_B),
 
-	PINMUX_IPSR_DATA(IP1_23_21,	MMC_D5),
-	PINMUX_IPSR_DATA(IP1_23_21,	ATADIR0_B),
-	PINMUX_IPSR_DATA(IP1_23_21,	RD_WR),
+	PINMUX_IPSR_GPSR(IP1_23_21,	MMC_D5),
+	PINMUX_IPSR_GPSR(IP1_23_21,	ATADIR0_B),
+	PINMUX_IPSR_GPSR(IP1_23_21,	RD_WR),
 
-	PINMUX_IPSR_DATA(IP1_24,	WE1),
-	PINMUX_IPSR_DATA(IP1_24,	ATAWR0_B),
+	PINMUX_IPSR_GPSR(IP1_24,	WE1),
+	PINMUX_IPSR_GPSR(IP1_24,	ATAWR0_B),
 
 	PINMUX_IPSR_MSEL(IP1_27_25,	SSI_WS1_B,	SEL_SSI1_B),
-	PINMUX_IPSR_DATA(IP1_27_25,	EX_CS0),
+	PINMUX_IPSR_GPSR(IP1_27_25,	EX_CS0),
 	PINMUX_IPSR_MSEL(IP1_27_25,	SCL2_A,		SEL_I2C2_A),
-	PINMUX_IPSR_DATA(IP1_27_25,	TX3_C),
+	PINMUX_IPSR_GPSR(IP1_27_25,	TX3_C),
 	PINMUX_IPSR_MSEL(IP1_27_25,	TS_SCK0_A,	SEL_TSIF0_A),
 
-	PINMUX_IPSR_DATA(IP1_29_28,	EX_CS1),
-	PINMUX_IPSR_DATA(IP1_29_28,	MMC_D4),
+	PINMUX_IPSR_GPSR(IP1_29_28,	EX_CS1),
+	PINMUX_IPSR_GPSR(IP1_29_28,	MMC_D4),
 
 	/* IPSR2 */
-	PINMUX_IPSR_DATA(IP2_2_0,	SD1_CLK_A),
-	PINMUX_IPSR_DATA(IP2_2_0,	MMC_CLK),
-	PINMUX_IPSR_DATA(IP2_2_0,	ATACS00),
-	PINMUX_IPSR_DATA(IP2_2_0,	EX_CS2),
+	PINMUX_IPSR_GPSR(IP2_2_0,	SD1_CLK_A),
+	PINMUX_IPSR_GPSR(IP2_2_0,	MMC_CLK),
+	PINMUX_IPSR_GPSR(IP2_2_0,	ATACS00),
+	PINMUX_IPSR_GPSR(IP2_2_0,	EX_CS2),
 
 	PINMUX_IPSR_MSEL(IP2_5_3,	SD1_CMD_A,	SEL_SD1_A),
-	PINMUX_IPSR_DATA(IP2_5_3,	MMC_CMD),
-	PINMUX_IPSR_DATA(IP2_5_3,	ATACS10),
-	PINMUX_IPSR_DATA(IP2_5_3,	EX_CS3),
+	PINMUX_IPSR_GPSR(IP2_5_3,	MMC_CMD),
+	PINMUX_IPSR_GPSR(IP2_5_3,	ATACS10),
+	PINMUX_IPSR_GPSR(IP2_5_3,	EX_CS3),
 
 	PINMUX_IPSR_MSEL(IP2_8_6,	SD1_DAT0_A,	SEL_SD1_A),
-	PINMUX_IPSR_DATA(IP2_8_6,	MMC_D0),
-	PINMUX_IPSR_DATA(IP2_8_6,	ATARD0),
-	PINMUX_IPSR_DATA(IP2_8_6,	EX_CS4),
+	PINMUX_IPSR_GPSR(IP2_8_6,	MMC_D0),
+	PINMUX_IPSR_GPSR(IP2_8_6,	ATARD0),
+	PINMUX_IPSR_GPSR(IP2_8_6,	EX_CS4),
 	PINMUX_IPSR_MSEL(IP2_8_6,	EX_WAIT1_A,	SEL_WAIT1_A),
 
 	PINMUX_IPSR_MSEL(IP2_11_9,	SD1_DAT1_A,	SEL_SD1_A),
-	PINMUX_IPSR_DATA(IP2_11_9,	MMC_D1),
-	PINMUX_IPSR_DATA(IP2_11_9,	ATAWR0_A),
-	PINMUX_IPSR_DATA(IP2_11_9,	EX_CS5),
+	PINMUX_IPSR_GPSR(IP2_11_9,	MMC_D1),
+	PINMUX_IPSR_GPSR(IP2_11_9,	ATAWR0_A),
+	PINMUX_IPSR_GPSR(IP2_11_9,	EX_CS5),
 	PINMUX_IPSR_MSEL(IP2_11_9,	EX_WAIT2_A,	SEL_WAIT2_A),
 
 	PINMUX_IPSR_MSEL(IP2_13_12,	DREQ0_A,	SEL_DREQ0_A),
 	PINMUX_IPSR_MSEL(IP2_13_12,	RX3_A,		SEL_SCIF3_A),
 
-	PINMUX_IPSR_DATA(IP2_16_14,	DACK0),
-	PINMUX_IPSR_DATA(IP2_16_14,	TX3_A),
-	PINMUX_IPSR_DATA(IP2_16_14,	DRACK0),
+	PINMUX_IPSR_GPSR(IP2_16_14,	DACK0),
+	PINMUX_IPSR_GPSR(IP2_16_14,	TX3_A),
+	PINMUX_IPSR_GPSR(IP2_16_14,	DRACK0),
 
-	PINMUX_IPSR_DATA(IP2_17,	EX_WAIT0),
-	PINMUX_IPSR_DATA(IP2_17,	PWM0_C),
+	PINMUX_IPSR_GPSR(IP2_17,	EX_WAIT0),
+	PINMUX_IPSR_GPSR(IP2_17,	PWM0_C),
 
 	PINMUX_IPSR_NOGP(IP2_18,	D0),
 	PINMUX_IPSR_NOGP(IP2_19,	D1),
@@ -716,33 +716,33 @@
 	PINMUX_IPSR_NOGP(IP2_28,	D10),
 	PINMUX_IPSR_NOGP(IP2_29,	D11),
 
-	PINMUX_IPSR_DATA(IP2_30,	RD_WR_B),
-	PINMUX_IPSR_DATA(IP2_30,	IRQ0),
+	PINMUX_IPSR_GPSR(IP2_30,	RD_WR_B),
+	PINMUX_IPSR_GPSR(IP2_30,	IRQ0),
 
-	PINMUX_IPSR_DATA(IP2_31,	MLB_CLK),
+	PINMUX_IPSR_GPSR(IP2_31,	MLB_CLK),
 	PINMUX_IPSR_MSEL(IP2_31,	IRQ1_A,		SEL_IRQ1_A),
 
 	/* IPSR3 */
-	PINMUX_IPSR_DATA(IP3_1_0,	MLB_SIG),
+	PINMUX_IPSR_GPSR(IP3_1_0,	MLB_SIG),
 	PINMUX_IPSR_MSEL(IP3_1_0,	RX5_B,		SEL_SCIF5_B),
 	PINMUX_IPSR_MSEL(IP3_1_0,	SDA3_A,		SEL_I2C3_A),
 	PINMUX_IPSR_MSEL(IP3_1_0,	IRQ2_A,		SEL_IRQ2_A),
 
-	PINMUX_IPSR_DATA(IP3_4_2,	MLB_DAT),
-	PINMUX_IPSR_DATA(IP3_4_2,	TX5_B),
+	PINMUX_IPSR_GPSR(IP3_4_2,	MLB_DAT),
+	PINMUX_IPSR_GPSR(IP3_4_2,	TX5_B),
 	PINMUX_IPSR_MSEL(IP3_4_2,	SCL3_A,		SEL_I2C3_A),
 	PINMUX_IPSR_MSEL(IP3_4_2,	IRQ3_A,		SEL_IRQ3_A),
-	PINMUX_IPSR_DATA(IP3_4_2,	SDSELF_B),
+	PINMUX_IPSR_GPSR(IP3_4_2,	SDSELF_B),
 
 	PINMUX_IPSR_MSEL(IP3_7_5,	SD1_CMD_B,	SEL_SD1_B),
-	PINMUX_IPSR_DATA(IP3_7_5,	SCIF_CLK),
-	PINMUX_IPSR_DATA(IP3_7_5,	AUDIO_CLKOUT_B),
+	PINMUX_IPSR_GPSR(IP3_7_5,	SCIF_CLK),
+	PINMUX_IPSR_GPSR(IP3_7_5,	AUDIO_CLKOUT_B),
 	PINMUX_IPSR_MSEL(IP3_7_5,	CAN_CLK_B,	SEL_CANCLK_B),
 	PINMUX_IPSR_MSEL(IP3_7_5,	SDA3_B,		SEL_I2C3_B),
 
-	PINMUX_IPSR_DATA(IP3_9_8,	SD1_CLK_B),
-	PINMUX_IPSR_DATA(IP3_9_8,	HTX0_A),
-	PINMUX_IPSR_DATA(IP3_9_8,	TX0_A),
+	PINMUX_IPSR_GPSR(IP3_9_8,	SD1_CLK_B),
+	PINMUX_IPSR_GPSR(IP3_9_8,	HTX0_A),
+	PINMUX_IPSR_GPSR(IP3_9_8,	TX0_A),
 
 	PINMUX_IPSR_MSEL(IP3_12_10,	SD1_DAT0_B,	SEL_SD1_B),
 	PINMUX_IPSR_MSEL(IP3_12_10,	HRX0_A,		SEL_HSCIF0_A),
@@ -750,513 +750,513 @@
 
 	PINMUX_IPSR_MSEL(IP3_15_13,	SD1_DAT1_B,	SEL_SD1_B),
 	PINMUX_IPSR_MSEL(IP3_15_13,	HSCK0,		SEL_HSCIF0_A),
-	PINMUX_IPSR_DATA(IP3_15_13,	SCK0),
+	PINMUX_IPSR_GPSR(IP3_15_13,	SCK0),
 	PINMUX_IPSR_MSEL(IP3_15_13,	SCL3_B,		SEL_I2C3_B),
 
 	PINMUX_IPSR_MSEL(IP3_18_16,	SD1_DAT2_B,	SEL_SD1_B),
 	PINMUX_IPSR_MSEL(IP3_18_16,	HCTS0_A,	SEL_HSCIF0_A),
-	PINMUX_IPSR_DATA(IP3_18_16,	CTS0),
+	PINMUX_IPSR_GPSR(IP3_18_16,	CTS0),
 
 	PINMUX_IPSR_MSEL(IP3_20_19,	SD1_DAT3_B,	SEL_SD1_B),
 	PINMUX_IPSR_MSEL(IP3_20_19,	HRTS0_A,	SEL_HSCIF0_A),
-	PINMUX_IPSR_DATA(IP3_20_19,	RTS0),
+	PINMUX_IPSR_GPSR(IP3_20_19,	RTS0),
 
-	PINMUX_IPSR_DATA(IP3_23_21,	SSI_SCK4),
-	PINMUX_IPSR_DATA(IP3_23_21,	DU0_DR0),
-	PINMUX_IPSR_DATA(IP3_23_21,	LCDOUT0),
-	PINMUX_IPSR_DATA(IP3_23_21,	AUDATA2),
-	PINMUX_IPSR_DATA(IP3_23_21,	ARM_TRACEDATA_2),
+	PINMUX_IPSR_GPSR(IP3_23_21,	SSI_SCK4),
+	PINMUX_IPSR_GPSR(IP3_23_21,	DU0_DR0),
+	PINMUX_IPSR_GPSR(IP3_23_21,	LCDOUT0),
+	PINMUX_IPSR_GPSR(IP3_23_21,	AUDATA2),
+	PINMUX_IPSR_GPSR(IP3_23_21,	ARM_TRACEDATA_2),
 	PINMUX_IPSR_MSEL(IP3_23_21,	SDA3_C,		SEL_I2C3_C),
-	PINMUX_IPSR_DATA(IP3_23_21,	ADICHS1),
+	PINMUX_IPSR_GPSR(IP3_23_21,	ADICHS1),
 	PINMUX_IPSR_MSEL(IP3_23_21,	TS_SDEN0_B,	SEL_TSIF0_B),
 
-	PINMUX_IPSR_DATA(IP3_26_24,	SSI_WS4),
-	PINMUX_IPSR_DATA(IP3_26_24,	DU0_DR1),
-	PINMUX_IPSR_DATA(IP3_26_24,	LCDOUT1),
-	PINMUX_IPSR_DATA(IP3_26_24,	AUDATA3),
-	PINMUX_IPSR_DATA(IP3_26_24,	ARM_TRACEDATA_3),
+	PINMUX_IPSR_GPSR(IP3_26_24,	SSI_WS4),
+	PINMUX_IPSR_GPSR(IP3_26_24,	DU0_DR1),
+	PINMUX_IPSR_GPSR(IP3_26_24,	LCDOUT1),
+	PINMUX_IPSR_GPSR(IP3_26_24,	AUDATA3),
+	PINMUX_IPSR_GPSR(IP3_26_24,	ARM_TRACEDATA_3),
 	PINMUX_IPSR_MSEL(IP3_26_24,	SCL3_C,		SEL_I2C3_C),
-	PINMUX_IPSR_DATA(IP3_26_24,	ADICHS2),
+	PINMUX_IPSR_GPSR(IP3_26_24,	ADICHS2),
 	PINMUX_IPSR_MSEL(IP3_26_24,	TS_SPSYNC0_B,	SEL_TSIF0_B),
 
-	PINMUX_IPSR_DATA(IP3_27,	DU0_DR2),
-	PINMUX_IPSR_DATA(IP3_27,	LCDOUT2),
+	PINMUX_IPSR_GPSR(IP3_27,	DU0_DR2),
+	PINMUX_IPSR_GPSR(IP3_27,	LCDOUT2),
 
-	PINMUX_IPSR_DATA(IP3_28,	DU0_DR3),
-	PINMUX_IPSR_DATA(IP3_28,	LCDOUT3),
+	PINMUX_IPSR_GPSR(IP3_28,	DU0_DR3),
+	PINMUX_IPSR_GPSR(IP3_28,	LCDOUT3),
 
-	PINMUX_IPSR_DATA(IP3_29,	DU0_DR4),
-	PINMUX_IPSR_DATA(IP3_29,	LCDOUT4),
+	PINMUX_IPSR_GPSR(IP3_29,	DU0_DR4),
+	PINMUX_IPSR_GPSR(IP3_29,	LCDOUT4),
 
-	PINMUX_IPSR_DATA(IP3_30,	DU0_DR5),
-	PINMUX_IPSR_DATA(IP3_30,	LCDOUT5),
+	PINMUX_IPSR_GPSR(IP3_30,	DU0_DR5),
+	PINMUX_IPSR_GPSR(IP3_30,	LCDOUT5),
 
-	PINMUX_IPSR_DATA(IP3_31,	DU0_DR6),
-	PINMUX_IPSR_DATA(IP3_31,	LCDOUT6),
+	PINMUX_IPSR_GPSR(IP3_31,	DU0_DR6),
+	PINMUX_IPSR_GPSR(IP3_31,	LCDOUT6),
 
 	/* IPSR4 */
-	PINMUX_IPSR_DATA(IP4_0,		DU0_DR7),
-	PINMUX_IPSR_DATA(IP4_0,		LCDOUT7),
+	PINMUX_IPSR_GPSR(IP4_0,		DU0_DR7),
+	PINMUX_IPSR_GPSR(IP4_0,		LCDOUT7),
 
-	PINMUX_IPSR_DATA(IP4_3_1,	DU0_DG0),
-	PINMUX_IPSR_DATA(IP4_3_1,	LCDOUT8),
-	PINMUX_IPSR_DATA(IP4_3_1,	AUDATA4),
-	PINMUX_IPSR_DATA(IP4_3_1,	ARM_TRACEDATA_4),
-	PINMUX_IPSR_DATA(IP4_3_1,	TX1_D),
-	PINMUX_IPSR_DATA(IP4_3_1,	CAN0_TX_A),
-	PINMUX_IPSR_DATA(IP4_3_1,	ADICHS0),
+	PINMUX_IPSR_GPSR(IP4_3_1,	DU0_DG0),
+	PINMUX_IPSR_GPSR(IP4_3_1,	LCDOUT8),
+	PINMUX_IPSR_GPSR(IP4_3_1,	AUDATA4),
+	PINMUX_IPSR_GPSR(IP4_3_1,	ARM_TRACEDATA_4),
+	PINMUX_IPSR_GPSR(IP4_3_1,	TX1_D),
+	PINMUX_IPSR_GPSR(IP4_3_1,	CAN0_TX_A),
+	PINMUX_IPSR_GPSR(IP4_3_1,	ADICHS0),
 
-	PINMUX_IPSR_DATA(IP4_6_4,	DU0_DG1),
-	PINMUX_IPSR_DATA(IP4_6_4,	LCDOUT9),
-	PINMUX_IPSR_DATA(IP4_6_4,	AUDATA5),
-	PINMUX_IPSR_DATA(IP4_6_4,	ARM_TRACEDATA_5),
+	PINMUX_IPSR_GPSR(IP4_6_4,	DU0_DG1),
+	PINMUX_IPSR_GPSR(IP4_6_4,	LCDOUT9),
+	PINMUX_IPSR_GPSR(IP4_6_4,	AUDATA5),
+	PINMUX_IPSR_GPSR(IP4_6_4,	ARM_TRACEDATA_5),
 	PINMUX_IPSR_MSEL(IP4_6_4,	RX1_D,		SEL_SCIF1_D),
 	PINMUX_IPSR_MSEL(IP4_6_4,	CAN0_RX_A,	SEL_CAN0_A),
-	PINMUX_IPSR_DATA(IP4_6_4,	ADIDATA),
+	PINMUX_IPSR_GPSR(IP4_6_4,	ADIDATA),
 
-	PINMUX_IPSR_DATA(IP4_7,		DU0_DG2),
-	PINMUX_IPSR_DATA(IP4_7,		LCDOUT10),
+	PINMUX_IPSR_GPSR(IP4_7,		DU0_DG2),
+	PINMUX_IPSR_GPSR(IP4_7,		LCDOUT10),
 
-	PINMUX_IPSR_DATA(IP4_8,		DU0_DG3),
-	PINMUX_IPSR_DATA(IP4_8,		LCDOUT11),
+	PINMUX_IPSR_GPSR(IP4_8,		DU0_DG3),
+	PINMUX_IPSR_GPSR(IP4_8,		LCDOUT11),
 
-	PINMUX_IPSR_DATA(IP4_10_9,	DU0_DG4),
-	PINMUX_IPSR_DATA(IP4_10_9,	LCDOUT12),
+	PINMUX_IPSR_GPSR(IP4_10_9,	DU0_DG4),
+	PINMUX_IPSR_GPSR(IP4_10_9,	LCDOUT12),
 	PINMUX_IPSR_MSEL(IP4_10_9,	RX0_B,		SEL_SCIF0_B),
 
-	PINMUX_IPSR_DATA(IP4_12_11,	DU0_DG5),
-	PINMUX_IPSR_DATA(IP4_12_11,	LCDOUT13),
-	PINMUX_IPSR_DATA(IP4_12_11,	TX0_B),
+	PINMUX_IPSR_GPSR(IP4_12_11,	DU0_DG5),
+	PINMUX_IPSR_GPSR(IP4_12_11,	LCDOUT13),
+	PINMUX_IPSR_GPSR(IP4_12_11,	TX0_B),
 
-	PINMUX_IPSR_DATA(IP4_14_13,	DU0_DG6),
-	PINMUX_IPSR_DATA(IP4_14_13,	LCDOUT14),
+	PINMUX_IPSR_GPSR(IP4_14_13,	DU0_DG6),
+	PINMUX_IPSR_GPSR(IP4_14_13,	LCDOUT14),
 	PINMUX_IPSR_MSEL(IP4_14_13,	RX4_A,		SEL_SCIF4_A),
 
-	PINMUX_IPSR_DATA(IP4_16_15,	DU0_DG7),
-	PINMUX_IPSR_DATA(IP4_16_15,	LCDOUT15),
-	PINMUX_IPSR_DATA(IP4_16_15,	TX4_A),
+	PINMUX_IPSR_GPSR(IP4_16_15,	DU0_DG7),
+	PINMUX_IPSR_GPSR(IP4_16_15,	LCDOUT15),
+	PINMUX_IPSR_GPSR(IP4_16_15,	TX4_A),
 
 	PINMUX_IPSR_MSEL(IP4_20_17,	SSI_SCK2_B,	SEL_SSI2_B),
 	PINMUX_DATA(VI0_R0_B_MARK,	FN_IP4_20_17,	FN_VI0_R0_B,	FN_SEL_VI0_B), /* see sel_vi0 */
 	PINMUX_DATA(VI0_R0_D_MARK,	FN_IP4_20_17,	FN_VI0_R0_B,	FN_SEL_VI0_D), /* see sel_vi0 */
-	PINMUX_IPSR_DATA(IP4_20_17,	DU0_DB0),
-	PINMUX_IPSR_DATA(IP4_20_17,	LCDOUT16),
-	PINMUX_IPSR_DATA(IP4_20_17,	AUDATA6),
-	PINMUX_IPSR_DATA(IP4_20_17,	ARM_TRACEDATA_6),
+	PINMUX_IPSR_GPSR(IP4_20_17,	DU0_DB0),
+	PINMUX_IPSR_GPSR(IP4_20_17,	LCDOUT16),
+	PINMUX_IPSR_GPSR(IP4_20_17,	AUDATA6),
+	PINMUX_IPSR_GPSR(IP4_20_17,	ARM_TRACEDATA_6),
 	PINMUX_IPSR_MSEL(IP4_20_17,	GPSCLK_A,	SEL_GPS_A),
-	PINMUX_IPSR_DATA(IP4_20_17,	PWM0_A),
-	PINMUX_IPSR_DATA(IP4_20_17,	ADICLK),
+	PINMUX_IPSR_GPSR(IP4_20_17,	PWM0_A),
+	PINMUX_IPSR_GPSR(IP4_20_17,	ADICLK),
 	PINMUX_IPSR_MSEL(IP4_20_17,	TS_SDAT0_B,	SEL_TSIF0_B),
 
-	PINMUX_IPSR_DATA(IP4_24_21,	AUDIO_CLKC),
+	PINMUX_IPSR_GPSR(IP4_24_21,	AUDIO_CLKC),
 	PINMUX_DATA(VI0_R1_B_MARK,	FN_IP4_24_21,	FN_VI0_R1_B,	FN_SEL_VI0_B), /* see sel_vi0 */
 	PINMUX_DATA(VI0_R1_D_MARK,	FN_IP4_24_21,	FN_VI0_R1_B,	FN_SEL_VI0_D), /* see sel_vi0 */
-	PINMUX_IPSR_DATA(IP4_24_21,	DU0_DB1),
-	PINMUX_IPSR_DATA(IP4_24_21,	LCDOUT17),
-	PINMUX_IPSR_DATA(IP4_24_21,	AUDATA7),
-	PINMUX_IPSR_DATA(IP4_24_21,	ARM_TRACEDATA_7),
+	PINMUX_IPSR_GPSR(IP4_24_21,	DU0_DB1),
+	PINMUX_IPSR_GPSR(IP4_24_21,	LCDOUT17),
+	PINMUX_IPSR_GPSR(IP4_24_21,	AUDATA7),
+	PINMUX_IPSR_GPSR(IP4_24_21,	ARM_TRACEDATA_7),
 	PINMUX_IPSR_MSEL(IP4_24_21,	GPSIN_A,	SEL_GPS_A),
-	PINMUX_IPSR_DATA(IP4_24_21,	ADICS_SAMP),
+	PINMUX_IPSR_GPSR(IP4_24_21,	ADICS_SAMP),
 	PINMUX_IPSR_MSEL(IP4_24_21,	TS_SCK0_B,	SEL_TSIF0_B),
 
 	PINMUX_DATA(VI0_R2_B_MARK,	FN_IP4_26_25,	FN_VI0_R2_B,	FN_SEL_VI0_B), /* see sel_vi0 */
 	PINMUX_DATA(VI0_R2_D_MARK,	FN_IP4_26_25,	FN_VI0_R2_B,	FN_SEL_VI0_D), /* see sel_vi0 */
-	PINMUX_IPSR_DATA(IP4_26_25,	DU0_DB2),
-	PINMUX_IPSR_DATA(IP4_26_25,	LCDOUT18),
+	PINMUX_IPSR_GPSR(IP4_26_25,	DU0_DB2),
+	PINMUX_IPSR_GPSR(IP4_26_25,	LCDOUT18),
 
 	PINMUX_IPSR_MSEL(IP4_28_27,	VI0_R3_B,	SEL_VI0_B),
-	PINMUX_IPSR_DATA(IP4_28_27,	DU0_DB3),
-	PINMUX_IPSR_DATA(IP4_28_27,	LCDOUT19),
+	PINMUX_IPSR_GPSR(IP4_28_27,	DU0_DB3),
+	PINMUX_IPSR_GPSR(IP4_28_27,	LCDOUT19),
 
 	PINMUX_DATA(VI0_R4_B_MARK,	FN_IP4_30_29,	FN_VI0_R4_B,	FN_SEL_VI0_B), /* see sel_vi0 */
 	PINMUX_DATA(VI0_R4_D_MARK,	FN_IP4_30_29,	FN_VI0_R4_B,	FN_SEL_VI0_D), /* see sel_vi0 */
-	PINMUX_IPSR_DATA(IP4_30_29,	DU0_DB4),
-	PINMUX_IPSR_DATA(IP4_30_29,	LCDOUT20),
+	PINMUX_IPSR_GPSR(IP4_30_29,	DU0_DB4),
+	PINMUX_IPSR_GPSR(IP4_30_29,	LCDOUT20),
 
 	/* IPSR5 */
 	PINMUX_DATA(VI0_R5_B_MARK,	FN_IP5_1_0,	FN_VI0_R5_B,	FN_SEL_VI0_B), /* see sel_vi0 */
 	PINMUX_DATA(VI0_R5_D_MARK,	FN_IP5_1_0,	FN_VI0_R5_B,	FN_SEL_VI0_D), /* see sel_vi0 */
-	PINMUX_IPSR_DATA(IP5_1_0,	DU0_DB5),
-	PINMUX_IPSR_DATA(IP5_1_0,	LCDOUT21),
+	PINMUX_IPSR_GPSR(IP5_1_0,	DU0_DB5),
+	PINMUX_IPSR_GPSR(IP5_1_0,	LCDOUT21),
 
 	PINMUX_IPSR_MSEL(IP5_3_2,	VI1_DATA10_B,	SEL_VI1_B),
-	PINMUX_IPSR_DATA(IP5_3_2,	DU0_DB6),
-	PINMUX_IPSR_DATA(IP5_3_2,	LCDOUT22),
+	PINMUX_IPSR_GPSR(IP5_3_2,	DU0_DB6),
+	PINMUX_IPSR_GPSR(IP5_3_2,	LCDOUT22),
 
 	PINMUX_IPSR_MSEL(IP5_5_4,	VI1_DATA11_B,	SEL_VI1_B),
-	PINMUX_IPSR_DATA(IP5_5_4,	DU0_DB7),
-	PINMUX_IPSR_DATA(IP5_5_4,	LCDOUT23),
+	PINMUX_IPSR_GPSR(IP5_5_4,	DU0_DB7),
+	PINMUX_IPSR_GPSR(IP5_5_4,	LCDOUT23),
 
-	PINMUX_IPSR_DATA(IP5_6,		DU0_DOTCLKIN),
-	PINMUX_IPSR_DATA(IP5_6,		QSTVA_QVS),
+	PINMUX_IPSR_GPSR(IP5_6,		DU0_DOTCLKIN),
+	PINMUX_IPSR_GPSR(IP5_6,		QSTVA_QVS),
 
-	PINMUX_IPSR_DATA(IP5_7,		DU0_DOTCLKO_UT0),
-	PINMUX_IPSR_DATA(IP5_7,		QCLK),
+	PINMUX_IPSR_GPSR(IP5_7,		DU0_DOTCLKO_UT0),
+	PINMUX_IPSR_GPSR(IP5_7,		QCLK),
 
-	PINMUX_IPSR_DATA(IP5_9_8,	DU0_DOTCLKO_UT1),
-	PINMUX_IPSR_DATA(IP5_9_8,	QSTVB_QVE),
-	PINMUX_IPSR_DATA(IP5_9_8,	AUDIO_CLKOUT_A),
+	PINMUX_IPSR_GPSR(IP5_9_8,	DU0_DOTCLKO_UT1),
+	PINMUX_IPSR_GPSR(IP5_9_8,	QSTVB_QVE),
+	PINMUX_IPSR_GPSR(IP5_9_8,	AUDIO_CLKOUT_A),
 	PINMUX_IPSR_MSEL(IP5_9_8,	REMOCON_C,	SEL_REMOCON_C),
 
 	PINMUX_IPSR_MSEL(IP5_11_10,	SSI_WS2_B,	SEL_SSI2_B),
-	PINMUX_IPSR_DATA(IP5_11_10,	DU0_EXHSYNC_DU0_HSYNC),
-	PINMUX_IPSR_DATA(IP5_11_10,	QSTH_QHS),
+	PINMUX_IPSR_GPSR(IP5_11_10,	DU0_EXHSYNC_DU0_HSYNC),
+	PINMUX_IPSR_GPSR(IP5_11_10,	QSTH_QHS),
 
-	PINMUX_IPSR_DATA(IP5_12,	DU0_EXVSYNC_DU0_VSYNC),
-	PINMUX_IPSR_DATA(IP5_12,	QSTB_QHE),
+	PINMUX_IPSR_GPSR(IP5_12,	DU0_EXVSYNC_DU0_VSYNC),
+	PINMUX_IPSR_GPSR(IP5_12,	QSTB_QHE),
 
-	PINMUX_IPSR_DATA(IP5_14_13,	DU0_EXODDF_DU0_ODDF_DISP_CDE),
-	PINMUX_IPSR_DATA(IP5_14_13,	QCPV_QDE),
+	PINMUX_IPSR_GPSR(IP5_14_13,	DU0_EXODDF_DU0_ODDF_DISP_CDE),
+	PINMUX_IPSR_GPSR(IP5_14_13,	QCPV_QDE),
 	PINMUX_IPSR_MSEL(IP5_14_13,	FMCLK_D,	SEL_FM_D),
 
 	PINMUX_IPSR_MSEL(IP5_17_15,	SSI_SCK1_A,	SEL_SSI1_A),
-	PINMUX_IPSR_DATA(IP5_17_15,	DU0_DISP),
-	PINMUX_IPSR_DATA(IP5_17_15,	QPOLA),
-	PINMUX_IPSR_DATA(IP5_17_15,	AUDCK),
-	PINMUX_IPSR_DATA(IP5_17_15,	ARM_TRACECLK),
-	PINMUX_IPSR_DATA(IP5_17_15,	BPFCLK_D),
+	PINMUX_IPSR_GPSR(IP5_17_15,	DU0_DISP),
+	PINMUX_IPSR_GPSR(IP5_17_15,	QPOLA),
+	PINMUX_IPSR_GPSR(IP5_17_15,	AUDCK),
+	PINMUX_IPSR_GPSR(IP5_17_15,	ARM_TRACECLK),
+	PINMUX_IPSR_GPSR(IP5_17_15,	BPFCLK_D),
 
 	PINMUX_IPSR_MSEL(IP5_20_18,	SSI_WS1_A,	SEL_SSI1_A),
-	PINMUX_IPSR_DATA(IP5_20_18,	DU0_CDE),
-	PINMUX_IPSR_DATA(IP5_20_18,	QPOLB),
-	PINMUX_IPSR_DATA(IP5_20_18,	AUDSYNC),
-	PINMUX_IPSR_DATA(IP5_20_18,	ARM_TRACECTL),
+	PINMUX_IPSR_GPSR(IP5_20_18,	DU0_CDE),
+	PINMUX_IPSR_GPSR(IP5_20_18,	QPOLB),
+	PINMUX_IPSR_GPSR(IP5_20_18,	AUDSYNC),
+	PINMUX_IPSR_GPSR(IP5_20_18,	ARM_TRACECTL),
 	PINMUX_IPSR_MSEL(IP5_20_18,	FMIN_D,		SEL_FM_D),
 
 	PINMUX_IPSR_MSEL(IP5_22_21,	SD1_CD_B,	SEL_SD1_B),
-	PINMUX_IPSR_DATA(IP5_22_21,	SSI_SCK78),
+	PINMUX_IPSR_GPSR(IP5_22_21,	SSI_SCK78),
 	PINMUX_IPSR_MSEL(IP5_22_21,	HSPI_RX0_B,	SEL_HSPI0_B),
-	PINMUX_IPSR_DATA(IP5_22_21,	TX1_B),
+	PINMUX_IPSR_GPSR(IP5_22_21,	TX1_B),
 
 	PINMUX_IPSR_MSEL(IP5_25_23,	SD1_WP_B,	SEL_SD1_B),
-	PINMUX_IPSR_DATA(IP5_25_23,	SSI_WS78),
+	PINMUX_IPSR_GPSR(IP5_25_23,	SSI_WS78),
 	PINMUX_IPSR_MSEL(IP5_25_23,	HSPI_CLK0_B,	SEL_HSPI0_B),
 	PINMUX_IPSR_MSEL(IP5_25_23,	RX1_B,		SEL_SCIF1_B),
 	PINMUX_IPSR_MSEL(IP5_25_23,	CAN_CLK_D,	SEL_CANCLK_D),
 
-	PINMUX_IPSR_DATA(IP5_28_26,	SSI_SDATA8),
+	PINMUX_IPSR_GPSR(IP5_28_26,	SSI_SDATA8),
 	PINMUX_IPSR_MSEL(IP5_28_26,	SSI_SCK2_A,	SEL_SSI2_A),
 	PINMUX_IPSR_MSEL(IP5_28_26,	HSPI_CS0_B,	SEL_HSPI0_B),
-	PINMUX_IPSR_DATA(IP5_28_26,	TX2_A),
-	PINMUX_IPSR_DATA(IP5_28_26,	CAN0_TX_B),
+	PINMUX_IPSR_GPSR(IP5_28_26,	TX2_A),
+	PINMUX_IPSR_GPSR(IP5_28_26,	CAN0_TX_B),
 
-	PINMUX_IPSR_DATA(IP5_30_29,	SSI_SDATA7),
-	PINMUX_IPSR_DATA(IP5_30_29,	HSPI_TX0_B),
+	PINMUX_IPSR_GPSR(IP5_30_29,	SSI_SDATA7),
+	PINMUX_IPSR_GPSR(IP5_30_29,	HSPI_TX0_B),
 	PINMUX_IPSR_MSEL(IP5_30_29,	RX2_A,		SEL_SCIF2_A),
 	PINMUX_IPSR_MSEL(IP5_30_29,	CAN0_RX_B,	SEL_CAN0_B),
 
 	/* IPSR6 */
-	PINMUX_IPSR_DATA(IP6_1_0,	SSI_SCK6),
+	PINMUX_IPSR_GPSR(IP6_1_0,	SSI_SCK6),
 	PINMUX_IPSR_MSEL(IP6_1_0,	HSPI_RX2_A,	SEL_HSPI2_A),
 	PINMUX_IPSR_MSEL(IP6_1_0,	FMCLK_B,	SEL_FM_B),
-	PINMUX_IPSR_DATA(IP6_1_0,	CAN1_TX_B),
+	PINMUX_IPSR_GPSR(IP6_1_0,	CAN1_TX_B),
 
-	PINMUX_IPSR_DATA(IP6_4_2,	SSI_WS6),
+	PINMUX_IPSR_GPSR(IP6_4_2,	SSI_WS6),
 	PINMUX_IPSR_MSEL(IP6_4_2,	HSPI_CLK2_A,	SEL_HSPI2_A),
-	PINMUX_IPSR_DATA(IP6_4_2,	BPFCLK_B),
+	PINMUX_IPSR_GPSR(IP6_4_2,	BPFCLK_B),
 	PINMUX_IPSR_MSEL(IP6_4_2,	CAN1_RX_B,	SEL_CAN1_B),
 
-	PINMUX_IPSR_DATA(IP6_6_5,	SSI_SDATA6),
-	PINMUX_IPSR_DATA(IP6_6_5,	HSPI_TX2_A),
+	PINMUX_IPSR_GPSR(IP6_6_5,	SSI_SDATA6),
+	PINMUX_IPSR_GPSR(IP6_6_5,	HSPI_TX2_A),
 	PINMUX_IPSR_MSEL(IP6_6_5,	FMIN_B,		SEL_FM_B),
 
-	PINMUX_IPSR_DATA(IP6_7,		SSI_SCK5),
+	PINMUX_IPSR_GPSR(IP6_7,		SSI_SCK5),
 	PINMUX_IPSR_MSEL(IP6_7,		RX4_C,		SEL_SCIF4_C),
 
-	PINMUX_IPSR_DATA(IP6_8,		SSI_WS5),
-	PINMUX_IPSR_DATA(IP6_8,		TX4_C),
+	PINMUX_IPSR_GPSR(IP6_8,		SSI_WS5),
+	PINMUX_IPSR_GPSR(IP6_8,		TX4_C),
 
-	PINMUX_IPSR_DATA(IP6_9,		SSI_SDATA5),
+	PINMUX_IPSR_GPSR(IP6_9,		SSI_SDATA5),
 	PINMUX_IPSR_MSEL(IP6_9,		RX0_D,		SEL_SCIF0_D),
 
-	PINMUX_IPSR_DATA(IP6_10,	SSI_WS34),
-	PINMUX_IPSR_DATA(IP6_10,	ARM_TRACEDATA_8),
+	PINMUX_IPSR_GPSR(IP6_10,	SSI_WS34),
+	PINMUX_IPSR_GPSR(IP6_10,	ARM_TRACEDATA_8),
 
-	PINMUX_IPSR_DATA(IP6_12_11,	SSI_SDATA4),
+	PINMUX_IPSR_GPSR(IP6_12_11,	SSI_SDATA4),
 	PINMUX_IPSR_MSEL(IP6_12_11,	SSI_WS2_A,	SEL_SSI2_A),
-	PINMUX_IPSR_DATA(IP6_12_11,	ARM_TRACEDATA_9),
+	PINMUX_IPSR_GPSR(IP6_12_11,	ARM_TRACEDATA_9),
 
-	PINMUX_IPSR_DATA(IP6_13,	SSI_SDATA3),
-	PINMUX_IPSR_DATA(IP6_13,	ARM_TRACEDATA_10),
+	PINMUX_IPSR_GPSR(IP6_13,	SSI_SDATA3),
+	PINMUX_IPSR_GPSR(IP6_13,	ARM_TRACEDATA_10),
 
-	PINMUX_IPSR_DATA(IP6_15_14,	SSI_SCK012),
-	PINMUX_IPSR_DATA(IP6_15_14,	ARM_TRACEDATA_11),
-	PINMUX_IPSR_DATA(IP6_15_14,	TX0_D),
+	PINMUX_IPSR_GPSR(IP6_15_14,	SSI_SCK012),
+	PINMUX_IPSR_GPSR(IP6_15_14,	ARM_TRACEDATA_11),
+	PINMUX_IPSR_GPSR(IP6_15_14,	TX0_D),
 
-	PINMUX_IPSR_DATA(IP6_16,	SSI_WS012),
-	PINMUX_IPSR_DATA(IP6_16,	ARM_TRACEDATA_12),
+	PINMUX_IPSR_GPSR(IP6_16,	SSI_WS012),
+	PINMUX_IPSR_GPSR(IP6_16,	ARM_TRACEDATA_12),
 
-	PINMUX_IPSR_DATA(IP6_18_17,	SSI_SDATA2),
+	PINMUX_IPSR_GPSR(IP6_18_17,	SSI_SDATA2),
 	PINMUX_IPSR_MSEL(IP6_18_17,	HSPI_CS2_A,	SEL_HSPI2_A),
-	PINMUX_IPSR_DATA(IP6_18_17,	ARM_TRACEDATA_13),
+	PINMUX_IPSR_GPSR(IP6_18_17,	ARM_TRACEDATA_13),
 	PINMUX_IPSR_MSEL(IP6_18_17,	SDA1_A,		SEL_I2C1_A),
 
-	PINMUX_IPSR_DATA(IP6_20_19,	SSI_SDATA1),
-	PINMUX_IPSR_DATA(IP6_20_19,	ARM_TRACEDATA_14),
+	PINMUX_IPSR_GPSR(IP6_20_19,	SSI_SDATA1),
+	PINMUX_IPSR_GPSR(IP6_20_19,	ARM_TRACEDATA_14),
 	PINMUX_IPSR_MSEL(IP6_20_19,	SCL1_A,		SEL_I2C1_A),
 	PINMUX_IPSR_MSEL(IP6_20_19,	SCK2_A,		SEL_SCIF2_A),
 
-	PINMUX_IPSR_DATA(IP6_21,	SSI_SDATA0),
-	PINMUX_IPSR_DATA(IP6_21,	ARM_TRACEDATA_15),
+	PINMUX_IPSR_GPSR(IP6_21,	SSI_SDATA0),
+	PINMUX_IPSR_GPSR(IP6_21,	ARM_TRACEDATA_15),
 
-	PINMUX_IPSR_DATA(IP6_23_22,	SD0_CLK),
-	PINMUX_IPSR_DATA(IP6_23_22,	SUB_TDO),
+	PINMUX_IPSR_GPSR(IP6_23_22,	SD0_CLK),
+	PINMUX_IPSR_GPSR(IP6_23_22,	SUB_TDO),
 
-	PINMUX_IPSR_DATA(IP6_25_24,	SD0_CMD),
-	PINMUX_IPSR_DATA(IP6_25_24,	SUB_TRST),
+	PINMUX_IPSR_GPSR(IP6_25_24,	SD0_CMD),
+	PINMUX_IPSR_GPSR(IP6_25_24,	SUB_TRST),
 
-	PINMUX_IPSR_DATA(IP6_27_26,	SD0_DAT0),
-	PINMUX_IPSR_DATA(IP6_27_26,	SUB_TMS),
+	PINMUX_IPSR_GPSR(IP6_27_26,	SD0_DAT0),
+	PINMUX_IPSR_GPSR(IP6_27_26,	SUB_TMS),
 
-	PINMUX_IPSR_DATA(IP6_29_28,	SD0_DAT1),
-	PINMUX_IPSR_DATA(IP6_29_28,	SUB_TCK),
+	PINMUX_IPSR_GPSR(IP6_29_28,	SD0_DAT1),
+	PINMUX_IPSR_GPSR(IP6_29_28,	SUB_TCK),
 
-	PINMUX_IPSR_DATA(IP6_31_30,	SD0_DAT2),
-	PINMUX_IPSR_DATA(IP6_31_30,	SUB_TDI),
+	PINMUX_IPSR_GPSR(IP6_31_30,	SD0_DAT2),
+	PINMUX_IPSR_GPSR(IP6_31_30,	SUB_TDI),
 
 	/* IPSR7 */
-	PINMUX_IPSR_DATA(IP7_1_0,	SD0_DAT3),
+	PINMUX_IPSR_GPSR(IP7_1_0,	SD0_DAT3),
 	PINMUX_IPSR_MSEL(IP7_1_0,	IRQ1_B,		SEL_IRQ1_B),
 
-	PINMUX_IPSR_DATA(IP7_3_2,	SD0_CD),
-	PINMUX_IPSR_DATA(IP7_3_2,	TX5_A),
+	PINMUX_IPSR_GPSR(IP7_3_2,	SD0_CD),
+	PINMUX_IPSR_GPSR(IP7_3_2,	TX5_A),
 
-	PINMUX_IPSR_DATA(IP7_5_4,	SD0_WP),
+	PINMUX_IPSR_GPSR(IP7_5_4,	SD0_WP),
 	PINMUX_IPSR_MSEL(IP7_5_4,	RX5_A,		SEL_SCIF5_A),
 
-	PINMUX_IPSR_DATA(IP7_8_6,	VI1_CLKENB),
+	PINMUX_IPSR_GPSR(IP7_8_6,	VI1_CLKENB),
 	PINMUX_IPSR_MSEL(IP7_8_6,	HSPI_CLK0_A,	SEL_HSPI0_A),
-	PINMUX_IPSR_DATA(IP7_8_6,	HTX1_A),
+	PINMUX_IPSR_GPSR(IP7_8_6,	HTX1_A),
 	PINMUX_IPSR_MSEL(IP7_8_6,	RTS1_C,		SEL_SCIF1_C),
 
-	PINMUX_IPSR_DATA(IP7_11_9,	VI1_FIELD),
+	PINMUX_IPSR_GPSR(IP7_11_9,	VI1_FIELD),
 	PINMUX_IPSR_MSEL(IP7_11_9,	HSPI_CS0_A,	SEL_HSPI0_A),
 	PINMUX_IPSR_MSEL(IP7_11_9,	HRX1_A,		SEL_HSCIF1_A),
 	PINMUX_IPSR_MSEL(IP7_11_9,	SCK1_C,		SEL_SCIF1_C),
 
-	PINMUX_IPSR_DATA(IP7_14_12,	VI1_HSYNC),
+	PINMUX_IPSR_GPSR(IP7_14_12,	VI1_HSYNC),
 	PINMUX_IPSR_MSEL(IP7_14_12,	HSPI_RX0_A,	SEL_HSPI0_A),
 	PINMUX_IPSR_MSEL(IP7_14_12,	HRTS1_A,	SEL_HSCIF1_A),
 	PINMUX_IPSR_MSEL(IP7_14_12,	FMCLK_A,	SEL_FM_A),
 	PINMUX_IPSR_MSEL(IP7_14_12,	RX1_C,		SEL_SCIF1_C),
 
-	PINMUX_IPSR_DATA(IP7_17_15,	VI1_VSYNC),
-	PINMUX_IPSR_DATA(IP7_17_15,	HSPI_TX0),
+	PINMUX_IPSR_GPSR(IP7_17_15,	VI1_VSYNC),
+	PINMUX_IPSR_GPSR(IP7_17_15,	HSPI_TX0),
 	PINMUX_IPSR_MSEL(IP7_17_15,	HCTS1_A,	SEL_HSCIF1_A),
-	PINMUX_IPSR_DATA(IP7_17_15,	BPFCLK_A),
-	PINMUX_IPSR_DATA(IP7_17_15,	TX1_C),
+	PINMUX_IPSR_GPSR(IP7_17_15,	BPFCLK_A),
+	PINMUX_IPSR_GPSR(IP7_17_15,	TX1_C),
 
-	PINMUX_IPSR_DATA(IP7_20_18,	TCLK0),
+	PINMUX_IPSR_GPSR(IP7_20_18,	TCLK0),
 	PINMUX_IPSR_MSEL(IP7_20_18,	HSCK1_A,	SEL_HSCIF1_A),
 	PINMUX_IPSR_MSEL(IP7_20_18,	FMIN_A,		SEL_FM_A),
 	PINMUX_IPSR_MSEL(IP7_20_18,	IRQ2_C,		SEL_IRQ2_C),
 	PINMUX_IPSR_MSEL(IP7_20_18,	CTS1_C,		SEL_SCIF1_C),
-	PINMUX_IPSR_DATA(IP7_20_18,	SPEEDIN),
+	PINMUX_IPSR_GPSR(IP7_20_18,	SPEEDIN),
 
-	PINMUX_IPSR_DATA(IP7_21,	VI0_CLK),
+	PINMUX_IPSR_GPSR(IP7_21,	VI0_CLK),
 	PINMUX_IPSR_MSEL(IP7_21,	CAN_CLK_A,	SEL_CANCLK_A),
 
-	PINMUX_IPSR_DATA(IP7_24_22,	VI0_CLKENB),
+	PINMUX_IPSR_GPSR(IP7_24_22,	VI0_CLKENB),
 	PINMUX_IPSR_MSEL(IP7_24_22,	SD2_DAT2_B,	SEL_SD2_B),
-	PINMUX_IPSR_DATA(IP7_24_22,	VI1_DATA0),
-	PINMUX_IPSR_DATA(IP7_24_22,	DU1_DG6),
+	PINMUX_IPSR_GPSR(IP7_24_22,	VI1_DATA0),
+	PINMUX_IPSR_GPSR(IP7_24_22,	DU1_DG6),
 	PINMUX_IPSR_MSEL(IP7_24_22,	HSPI_RX1_A,	SEL_HSPI1_A),
 	PINMUX_IPSR_MSEL(IP7_24_22,	RX4_B,		SEL_SCIF4_B),
 
-	PINMUX_IPSR_DATA(IP7_28_25,	VI0_FIELD),
+	PINMUX_IPSR_GPSR(IP7_28_25,	VI0_FIELD),
 	PINMUX_IPSR_MSEL(IP7_28_25,	SD2_DAT3_B,	SEL_SD2_B),
 	PINMUX_DATA(VI0_R3_C_MARK,	FN_IP7_28_25,	FN_VI0_R3_C,	FN_SEL_VI0_C), /* see sel_vi0 */
 	PINMUX_DATA(VI0_R3_D_MARK,	FN_IP7_28_25,	FN_VI0_R3_C,	FN_SEL_VI0_D), /* see sel_vi0 */
-	PINMUX_IPSR_DATA(IP7_28_25,	VI1_DATA1),
-	PINMUX_IPSR_DATA(IP7_28_25,	DU1_DG7),
+	PINMUX_IPSR_GPSR(IP7_28_25,	VI1_DATA1),
+	PINMUX_IPSR_GPSR(IP7_28_25,	DU1_DG7),
 	PINMUX_IPSR_MSEL(IP7_28_25,	HSPI_CLK1_A,	SEL_HSPI1_A),
-	PINMUX_IPSR_DATA(IP7_28_25,	TX4_B),
+	PINMUX_IPSR_GPSR(IP7_28_25,	TX4_B),
 
-	PINMUX_IPSR_DATA(IP7_31_29,	VI0_HSYNC),
+	PINMUX_IPSR_GPSR(IP7_31_29,	VI0_HSYNC),
 	PINMUX_IPSR_MSEL(IP7_31_29,	SD2_CD_B,	SEL_SD2_B),
-	PINMUX_IPSR_DATA(IP7_31_29,	VI1_DATA2),
-	PINMUX_IPSR_DATA(IP7_31_29,	DU1_DR2),
+	PINMUX_IPSR_GPSR(IP7_31_29,	VI1_DATA2),
+	PINMUX_IPSR_GPSR(IP7_31_29,	DU1_DR2),
 	PINMUX_IPSR_MSEL(IP7_31_29,	HSPI_CS1_A,	SEL_HSPI1_A),
 	PINMUX_IPSR_MSEL(IP7_31_29,	RX3_B,		SEL_SCIF3_B),
 
 	/* IPSR8 */
-	PINMUX_IPSR_DATA(IP8_2_0,	VI0_VSYNC),
+	PINMUX_IPSR_GPSR(IP8_2_0,	VI0_VSYNC),
 	PINMUX_IPSR_MSEL(IP8_2_0,	SD2_WP_B,	SEL_SD2_B),
-	PINMUX_IPSR_DATA(IP8_2_0,	VI1_DATA3),
-	PINMUX_IPSR_DATA(IP8_2_0,	DU1_DR3),
-	PINMUX_IPSR_DATA(IP8_2_0,	HSPI_TX1_A),
-	PINMUX_IPSR_DATA(IP8_2_0,	TX3_B),
+	PINMUX_IPSR_GPSR(IP8_2_0,	VI1_DATA3),
+	PINMUX_IPSR_GPSR(IP8_2_0,	DU1_DR3),
+	PINMUX_IPSR_GPSR(IP8_2_0,	HSPI_TX1_A),
+	PINMUX_IPSR_GPSR(IP8_2_0,	TX3_B),
 
-	PINMUX_IPSR_DATA(IP8_5_3,	VI0_DATA0_VI0_B0),
-	PINMUX_IPSR_DATA(IP8_5_3,	DU1_DG2),
+	PINMUX_IPSR_GPSR(IP8_5_3,	VI0_DATA0_VI0_B0),
+	PINMUX_IPSR_GPSR(IP8_5_3,	DU1_DG2),
 	PINMUX_IPSR_MSEL(IP8_5_3,	IRQ2_B,		SEL_IRQ2_B),
 	PINMUX_IPSR_MSEL(IP8_5_3,	RX3_D,		SEL_SCIF3_D),
 
-	PINMUX_IPSR_DATA(IP8_8_6,	VI0_DATA1_VI0_B1),
-	PINMUX_IPSR_DATA(IP8_8_6,	DU1_DG3),
+	PINMUX_IPSR_GPSR(IP8_8_6,	VI0_DATA1_VI0_B1),
+	PINMUX_IPSR_GPSR(IP8_8_6,	DU1_DG3),
 	PINMUX_IPSR_MSEL(IP8_8_6,	IRQ3_B,		SEL_IRQ3_B),
-	PINMUX_IPSR_DATA(IP8_8_6,	TX3_D),
+	PINMUX_IPSR_GPSR(IP8_8_6,	TX3_D),
 
-	PINMUX_IPSR_DATA(IP8_10_9,	VI0_DATA2_VI0_B2),
-	PINMUX_IPSR_DATA(IP8_10_9,	DU1_DG4),
+	PINMUX_IPSR_GPSR(IP8_10_9,	VI0_DATA2_VI0_B2),
+	PINMUX_IPSR_GPSR(IP8_10_9,	DU1_DG4),
 	PINMUX_IPSR_MSEL(IP8_10_9,	RX0_C,		SEL_SCIF0_C),
 
-	PINMUX_IPSR_DATA(IP8_13_11,	VI0_DATA3_VI0_B3),
-	PINMUX_IPSR_DATA(IP8_13_11,	DU1_DG5),
-	PINMUX_IPSR_DATA(IP8_13_11,	TX1_A),
-	PINMUX_IPSR_DATA(IP8_13_11,	TX0_C),
+	PINMUX_IPSR_GPSR(IP8_13_11,	VI0_DATA3_VI0_B3),
+	PINMUX_IPSR_GPSR(IP8_13_11,	DU1_DG5),
+	PINMUX_IPSR_GPSR(IP8_13_11,	TX1_A),
+	PINMUX_IPSR_GPSR(IP8_13_11,	TX0_C),
 
-	PINMUX_IPSR_DATA(IP8_15_14,	VI0_DATA4_VI0_B4),
-	PINMUX_IPSR_DATA(IP8_15_14,	DU1_DB2),
+	PINMUX_IPSR_GPSR(IP8_15_14,	VI0_DATA4_VI0_B4),
+	PINMUX_IPSR_GPSR(IP8_15_14,	DU1_DB2),
 	PINMUX_IPSR_MSEL(IP8_15_14,	RX1_A,		SEL_SCIF1_A),
 
-	PINMUX_IPSR_DATA(IP8_18_16,	VI0_DATA5_VI0_B5),
-	PINMUX_IPSR_DATA(IP8_18_16,	DU1_DB3),
+	PINMUX_IPSR_GPSR(IP8_18_16,	VI0_DATA5_VI0_B5),
+	PINMUX_IPSR_GPSR(IP8_18_16,	DU1_DB3),
 	PINMUX_IPSR_MSEL(IP8_18_16,	SCK1_A,		SEL_SCIF1_A),
-	PINMUX_IPSR_DATA(IP8_18_16,	PWM4),
+	PINMUX_IPSR_GPSR(IP8_18_16,	PWM4),
 	PINMUX_IPSR_MSEL(IP8_18_16,	HSCK1_B,	SEL_HSCIF1_B),
 
-	PINMUX_IPSR_DATA(IP8_21_19,	VI0_DATA6_VI0_G0),
-	PINMUX_IPSR_DATA(IP8_21_19,	DU1_DB4),
+	PINMUX_IPSR_GPSR(IP8_21_19,	VI0_DATA6_VI0_G0),
+	PINMUX_IPSR_GPSR(IP8_21_19,	DU1_DB4),
 	PINMUX_IPSR_MSEL(IP8_21_19,	CTS1_A,		SEL_SCIF1_A),
-	PINMUX_IPSR_DATA(IP8_21_19,	PWM5),
+	PINMUX_IPSR_GPSR(IP8_21_19,	PWM5),
 
-	PINMUX_IPSR_DATA(IP8_23_22,	VI0_DATA7_VI0_G1),
-	PINMUX_IPSR_DATA(IP8_23_22,	DU1_DB5),
+	PINMUX_IPSR_GPSR(IP8_23_22,	VI0_DATA7_VI0_G1),
+	PINMUX_IPSR_GPSR(IP8_23_22,	DU1_DB5),
 	PINMUX_IPSR_MSEL(IP8_23_22,	RTS1_A,		SEL_SCIF1_A),
 
-	PINMUX_IPSR_DATA(IP8_26_24,	VI0_G2),
-	PINMUX_IPSR_DATA(IP8_26_24,	SD2_CLK_B),
-	PINMUX_IPSR_DATA(IP8_26_24,	VI1_DATA4),
-	PINMUX_IPSR_DATA(IP8_26_24,	DU1_DR4),
-	PINMUX_IPSR_DATA(IP8_26_24,	HTX1_B),
+	PINMUX_IPSR_GPSR(IP8_26_24,	VI0_G2),
+	PINMUX_IPSR_GPSR(IP8_26_24,	SD2_CLK_B),
+	PINMUX_IPSR_GPSR(IP8_26_24,	VI1_DATA4),
+	PINMUX_IPSR_GPSR(IP8_26_24,	DU1_DR4),
+	PINMUX_IPSR_GPSR(IP8_26_24,	HTX1_B),
 
-	PINMUX_IPSR_DATA(IP8_29_27,	VI0_G3),
+	PINMUX_IPSR_GPSR(IP8_29_27,	VI0_G3),
 	PINMUX_IPSR_MSEL(IP8_29_27,	SD2_CMD_B,	SEL_SD2_B),
-	PINMUX_IPSR_DATA(IP8_29_27,	VI1_DATA5),
-	PINMUX_IPSR_DATA(IP8_29_27,	DU1_DR5),
+	PINMUX_IPSR_GPSR(IP8_29_27,	VI1_DATA5),
+	PINMUX_IPSR_GPSR(IP8_29_27,	DU1_DR5),
 	PINMUX_IPSR_MSEL(IP8_29_27,	HRX1_B,		SEL_HSCIF1_B),
 
 	/* IPSR9 */
-	PINMUX_IPSR_DATA(IP9_2_0,	VI0_G4),
+	PINMUX_IPSR_GPSR(IP9_2_0,	VI0_G4),
 	PINMUX_IPSR_MSEL(IP9_2_0,	SD2_DAT0_B,	SEL_SD2_B),
-	PINMUX_IPSR_DATA(IP9_2_0,	VI1_DATA6),
-	PINMUX_IPSR_DATA(IP9_2_0,	DU1_DR6),
+	PINMUX_IPSR_GPSR(IP9_2_0,	VI1_DATA6),
+	PINMUX_IPSR_GPSR(IP9_2_0,	DU1_DR6),
 	PINMUX_IPSR_MSEL(IP9_2_0,	HRTS1_B,	SEL_HSCIF1_B),
 
-	PINMUX_IPSR_DATA(IP9_5_3,	VI0_G5),
+	PINMUX_IPSR_GPSR(IP9_5_3,	VI0_G5),
 	PINMUX_IPSR_MSEL(IP9_5_3,	SD2_DAT1_B,	SEL_SD2_B),
-	PINMUX_IPSR_DATA(IP9_5_3,	VI1_DATA7),
-	PINMUX_IPSR_DATA(IP9_5_3,	DU1_DR7),
+	PINMUX_IPSR_GPSR(IP9_5_3,	VI1_DATA7),
+	PINMUX_IPSR_GPSR(IP9_5_3,	DU1_DR7),
 	PINMUX_IPSR_MSEL(IP9_5_3,	HCTS1_B,	SEL_HSCIF1_B),
 
 	PINMUX_DATA(VI0_R0_A_MARK,	FN_IP9_8_6,	FN_VI0_R0_A,	FN_SEL_VI0_A), /* see sel_vi0 */
 	PINMUX_DATA(VI0_R0_C_MARK,	FN_IP9_8_6,	FN_VI0_R0_A,	FN_SEL_VI0_C), /* see sel_vi0 */
-	PINMUX_IPSR_DATA(IP9_8_6,	VI1_CLK),
-	PINMUX_IPSR_DATA(IP9_8_6,	ETH_REF_CLK),
-	PINMUX_IPSR_DATA(IP9_8_6,	DU1_DOTCLKIN),
+	PINMUX_IPSR_GPSR(IP9_8_6,	VI1_CLK),
+	PINMUX_IPSR_GPSR(IP9_8_6,	ETH_REF_CLK),
+	PINMUX_IPSR_GPSR(IP9_8_6,	DU1_DOTCLKIN),
 
 	PINMUX_DATA(VI0_R1_A_MARK,	FN_IP9_11_9,	FN_VI0_R1_A,	FN_SEL_VI0_A), /* see sel_vi0 */
 	PINMUX_DATA(VI0_R1_C_MARK,	FN_IP9_11_9,	FN_VI0_R1_A,	FN_SEL_VI0_C), /* see sel_vi0 */
-	PINMUX_IPSR_DATA(IP9_11_9,	VI1_DATA8),
-	PINMUX_IPSR_DATA(IP9_11_9,	DU1_DB6),
-	PINMUX_IPSR_DATA(IP9_11_9,	ETH_TXD0),
-	PINMUX_IPSR_DATA(IP9_11_9,	PWM2),
-	PINMUX_IPSR_DATA(IP9_11_9,	TCLK1),
+	PINMUX_IPSR_GPSR(IP9_11_9,	VI1_DATA8),
+	PINMUX_IPSR_GPSR(IP9_11_9,	DU1_DB6),
+	PINMUX_IPSR_GPSR(IP9_11_9,	ETH_TXD0),
+	PINMUX_IPSR_GPSR(IP9_11_9,	PWM2),
+	PINMUX_IPSR_GPSR(IP9_11_9,	TCLK1),
 
 	PINMUX_DATA(VI0_R2_A_MARK,	FN_IP9_14_12,	FN_VI0_R2_A,	FN_SEL_VI0_A), /* see sel_vi0 */
 	PINMUX_DATA(VI0_R2_C_MARK,	FN_IP9_14_12,	FN_VI0_R2_A,	FN_SEL_VI0_C), /* see sel_vi0 */
-	PINMUX_IPSR_DATA(IP9_14_12,	VI1_DATA9),
-	PINMUX_IPSR_DATA(IP9_14_12,	DU1_DB7),
-	PINMUX_IPSR_DATA(IP9_14_12,	ETH_TXD1),
-	PINMUX_IPSR_DATA(IP9_14_12,	PWM3),
+	PINMUX_IPSR_GPSR(IP9_14_12,	VI1_DATA9),
+	PINMUX_IPSR_GPSR(IP9_14_12,	DU1_DB7),
+	PINMUX_IPSR_GPSR(IP9_14_12,	ETH_TXD1),
+	PINMUX_IPSR_GPSR(IP9_14_12,	PWM3),
 
 	PINMUX_IPSR_MSEL(IP9_17_15,	VI0_R3_A,	SEL_VI0_A),
-	PINMUX_IPSR_DATA(IP9_17_15,	ETH_CRS_DV),
-	PINMUX_IPSR_DATA(IP9_17_15,	IECLK),
+	PINMUX_IPSR_GPSR(IP9_17_15,	ETH_CRS_DV),
+	PINMUX_IPSR_GPSR(IP9_17_15,	IECLK),
 	PINMUX_IPSR_MSEL(IP9_17_15,	SCK2_C,		SEL_SCIF2_C),
 
 	PINMUX_DATA(VI0_R4_A_MARK,	FN_IP9_20_18,	FN_VI0_R4_A,	FN_SEL_VI0_A), /* see sel_vi0 */
 	PINMUX_DATA(VI0_R3_C_MARK,	FN_IP9_20_18,	FN_VI0_R4_A,	FN_SEL_VI0_C), /* see sel_vi0 */
-	PINMUX_IPSR_DATA(IP9_20_18,	ETH_TX_EN),
-	PINMUX_IPSR_DATA(IP9_20_18,	IETX),
-	PINMUX_IPSR_DATA(IP9_20_18,	TX2_C),
+	PINMUX_IPSR_GPSR(IP9_20_18,	ETH_TX_EN),
+	PINMUX_IPSR_GPSR(IP9_20_18,	IETX),
+	PINMUX_IPSR_GPSR(IP9_20_18,	TX2_C),
 
 	PINMUX_DATA(VI0_R5_A_MARK,	FN_IP9_23_21,	FN_VI0_R5_A,	FN_SEL_VI0_A), /* see sel_vi0 */
 	PINMUX_DATA(VI0_R5_C_MARK,	FN_IP9_23_21,	FN_VI0_R5_A,	FN_SEL_VI0_C), /* see sel_vi0 */
-	PINMUX_IPSR_DATA(IP9_23_21,	ETH_RX_ER),
+	PINMUX_IPSR_GPSR(IP9_23_21,	ETH_RX_ER),
 	PINMUX_IPSR_MSEL(IP9_23_21,	FMCLK_C,	SEL_FM_C),
-	PINMUX_IPSR_DATA(IP9_23_21,	IERX),
+	PINMUX_IPSR_GPSR(IP9_23_21,	IERX),
 	PINMUX_IPSR_MSEL(IP9_23_21,	RX2_C,		SEL_SCIF2_C),
 
 	PINMUX_IPSR_MSEL(IP9_26_24,	VI1_DATA10_A,	SEL_VI1_A),
-	PINMUX_IPSR_DATA(IP9_26_24,	DU1_DOTCLKOUT),
-	PINMUX_IPSR_DATA(IP9_26_24,	ETH_RXD0),
-	PINMUX_IPSR_DATA(IP9_26_24,	BPFCLK_C),
-	PINMUX_IPSR_DATA(IP9_26_24,	TX2_D),
+	PINMUX_IPSR_GPSR(IP9_26_24,	DU1_DOTCLKOUT),
+	PINMUX_IPSR_GPSR(IP9_26_24,	ETH_RXD0),
+	PINMUX_IPSR_GPSR(IP9_26_24,	BPFCLK_C),
+	PINMUX_IPSR_GPSR(IP9_26_24,	TX2_D),
 	PINMUX_IPSR_MSEL(IP9_26_24,	SDA2_C,		SEL_I2C2_C),
 
 	PINMUX_IPSR_MSEL(IP9_29_27,	VI1_DATA11_A,	SEL_VI1_A),
-	PINMUX_IPSR_DATA(IP9_29_27,	DU1_EXHSYNC_DU1_HSYNC),
-	PINMUX_IPSR_DATA(IP9_29_27,	ETH_RXD1),
+	PINMUX_IPSR_GPSR(IP9_29_27,	DU1_EXHSYNC_DU1_HSYNC),
+	PINMUX_IPSR_GPSR(IP9_29_27,	ETH_RXD1),
 	PINMUX_IPSR_MSEL(IP9_29_27,	FMIN_C,		SEL_FM_C),
 	PINMUX_IPSR_MSEL(IP9_29_27,	RX2_D,		SEL_SCIF2_D),
 	PINMUX_IPSR_MSEL(IP9_29_27,	SCL2_C,		SEL_I2C2_C),
 
 	/* IPSR10 */
-	PINMUX_IPSR_DATA(IP10_2_0,	SD2_CLK_A),
-	PINMUX_IPSR_DATA(IP10_2_0,	DU1_EXVSYNC_DU1_VSYNC),
-	PINMUX_IPSR_DATA(IP10_2_0,	ATARD1),
-	PINMUX_IPSR_DATA(IP10_2_0,	ETH_MDC),
+	PINMUX_IPSR_GPSR(IP10_2_0,	SD2_CLK_A),
+	PINMUX_IPSR_GPSR(IP10_2_0,	DU1_EXVSYNC_DU1_VSYNC),
+	PINMUX_IPSR_GPSR(IP10_2_0,	ATARD1),
+	PINMUX_IPSR_GPSR(IP10_2_0,	ETH_MDC),
 	PINMUX_IPSR_MSEL(IP10_2_0,	SDA1_B,		SEL_I2C1_B),
 
 	PINMUX_IPSR_MSEL(IP10_5_3,	SD2_CMD_A,	SEL_SD2_A),
-	PINMUX_IPSR_DATA(IP10_5_3,	DU1_EXODDF_DU1_ODDF_DISP_CDE),
-	PINMUX_IPSR_DATA(IP10_5_3,	ATAWR1),
-	PINMUX_IPSR_DATA(IP10_5_3,	ETH_MDIO),
+	PINMUX_IPSR_GPSR(IP10_5_3,	DU1_EXODDF_DU1_ODDF_DISP_CDE),
+	PINMUX_IPSR_GPSR(IP10_5_3,	ATAWR1),
+	PINMUX_IPSR_GPSR(IP10_5_3,	ETH_MDIO),
 	PINMUX_IPSR_MSEL(IP10_5_3,	SCL1_B,		SEL_I2C1_B),
 
 	PINMUX_IPSR_MSEL(IP10_8_6,	SD2_DAT0_A,	SEL_SD2_A),
-	PINMUX_IPSR_DATA(IP10_8_6,	DU1_DISP),
-	PINMUX_IPSR_DATA(IP10_8_6,	ATACS01),
+	PINMUX_IPSR_GPSR(IP10_8_6,	DU1_DISP),
+	PINMUX_IPSR_GPSR(IP10_8_6,	ATACS01),
 	PINMUX_IPSR_MSEL(IP10_8_6,	DREQ1_B,	SEL_DREQ1_B),
-	PINMUX_IPSR_DATA(IP10_8_6,	ETH_LINK),
+	PINMUX_IPSR_GPSR(IP10_8_6,	ETH_LINK),
 	PINMUX_IPSR_MSEL(IP10_8_6,	CAN1_RX_A,	SEL_CAN1_A),
 
 	PINMUX_IPSR_MSEL(IP10_12_9,	SD2_DAT1_A,	SEL_SD2_A),
-	PINMUX_IPSR_DATA(IP10_12_9,	DU1_CDE),
-	PINMUX_IPSR_DATA(IP10_12_9,	ATACS11),
-	PINMUX_IPSR_DATA(IP10_12_9,	DACK1_B),
-	PINMUX_IPSR_DATA(IP10_12_9,	ETH_MAGIC),
-	PINMUX_IPSR_DATA(IP10_12_9,	CAN1_TX_A),
-	PINMUX_IPSR_DATA(IP10_12_9,	PWM6),
+	PINMUX_IPSR_GPSR(IP10_12_9,	DU1_CDE),
+	PINMUX_IPSR_GPSR(IP10_12_9,	ATACS11),
+	PINMUX_IPSR_GPSR(IP10_12_9,	DACK1_B),
+	PINMUX_IPSR_GPSR(IP10_12_9,	ETH_MAGIC),
+	PINMUX_IPSR_GPSR(IP10_12_9,	CAN1_TX_A),
+	PINMUX_IPSR_GPSR(IP10_12_9,	PWM6),
 
 	PINMUX_IPSR_MSEL(IP10_15_13,	SD2_DAT2_A,	SEL_SD2_A),
-	PINMUX_IPSR_DATA(IP10_15_13,	VI1_DATA12),
+	PINMUX_IPSR_GPSR(IP10_15_13,	VI1_DATA12),
 	PINMUX_IPSR_MSEL(IP10_15_13,	DREQ2_B,	SEL_DREQ2_B),
-	PINMUX_IPSR_DATA(IP10_15_13,	ATADIR1),
+	PINMUX_IPSR_GPSR(IP10_15_13,	ATADIR1),
 	PINMUX_IPSR_MSEL(IP10_15_13,	HSPI_CLK2_B,	SEL_HSPI2_B),
 	PINMUX_IPSR_MSEL(IP10_15_13,	GPSCLK_B,	SEL_GPS_B),
 
 	PINMUX_IPSR_MSEL(IP10_18_16,	SD2_DAT3_A,	SEL_SD2_A),
-	PINMUX_IPSR_DATA(IP10_18_16,	VI1_DATA13),
-	PINMUX_IPSR_DATA(IP10_18_16,	DACK2_B),
-	PINMUX_IPSR_DATA(IP10_18_16,	ATAG1),
+	PINMUX_IPSR_GPSR(IP10_18_16,	VI1_DATA13),
+	PINMUX_IPSR_GPSR(IP10_18_16,	DACK2_B),
+	PINMUX_IPSR_GPSR(IP10_18_16,	ATAG1),
 	PINMUX_IPSR_MSEL(IP10_18_16,	HSPI_CS2_B,	SEL_HSPI2_B),
 	PINMUX_IPSR_MSEL(IP10_18_16,	GPSIN_B,	SEL_GPS_B),
 
 	PINMUX_IPSR_MSEL(IP10_21_19,	SD2_CD_A,	SEL_SD2_A),
-	PINMUX_IPSR_DATA(IP10_21_19,	VI1_DATA14),
+	PINMUX_IPSR_GPSR(IP10_21_19,	VI1_DATA14),
 	PINMUX_IPSR_MSEL(IP10_21_19,	EX_WAIT1_B,	SEL_WAIT1_B),
 	PINMUX_IPSR_MSEL(IP10_21_19,	DREQ0_B,	SEL_DREQ0_B),
 	PINMUX_IPSR_MSEL(IP10_21_19,	HSPI_RX2_B,	SEL_HSPI2_B),
 	PINMUX_IPSR_MSEL(IP10_21_19,	REMOCON_A,	SEL_REMOCON_A),
 
 	PINMUX_IPSR_MSEL(IP10_24_22,	SD2_WP_A,	SEL_SD2_A),
-	PINMUX_IPSR_DATA(IP10_24_22,	VI1_DATA15),
+	PINMUX_IPSR_GPSR(IP10_24_22,	VI1_DATA15),
 	PINMUX_IPSR_MSEL(IP10_24_22,	EX_WAIT2_B,	SEL_WAIT2_B),
-	PINMUX_IPSR_DATA(IP10_24_22,	DACK0_B),
-	PINMUX_IPSR_DATA(IP10_24_22,	HSPI_TX2_B),
+	PINMUX_IPSR_GPSR(IP10_24_22,	DACK0_B),
+	PINMUX_IPSR_GPSR(IP10_24_22,	HSPI_TX2_B),
 	PINMUX_IPSR_MSEL(IP10_24_22,	CAN_CLK_C,	SEL_CANCLK_C),
 };
 
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7779.c b/drivers/pinctrl/sh-pfc/pfc-r8a7779.c
index bd17ecc..5bef934 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7779.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7779.c
@@ -611,577 +611,577 @@
 	PINMUX_SINGLE(USB_PENC0),
 	PINMUX_SINGLE(USB_PENC1),
 
-	PINMUX_IPSR_DATA(IP0_2_0, USB_PENC2),
+	PINMUX_IPSR_GPSR(IP0_2_0, USB_PENC2),
 	PINMUX_IPSR_MSEL(IP0_2_0, SCK0, SEL_SCIF0_0),
-	PINMUX_IPSR_DATA(IP0_2_0, PWM1),
+	PINMUX_IPSR_GPSR(IP0_2_0, PWM1),
 	PINMUX_IPSR_MSEL(IP0_2_0, PWMFSW0, SEL_PWMFSW_0),
 	PINMUX_IPSR_MSEL(IP0_2_0, SCIF_CLK, SEL_SCIF_0),
 	PINMUX_IPSR_MSEL(IP0_2_0, TCLK0_C, SEL_TMU0_2),
-	PINMUX_IPSR_DATA(IP0_5_3, BS),
-	PINMUX_IPSR_DATA(IP0_5_3, SD1_DAT2),
-	PINMUX_IPSR_DATA(IP0_5_3, MMC0_D2),
-	PINMUX_IPSR_DATA(IP0_5_3, FD2),
-	PINMUX_IPSR_DATA(IP0_5_3, ATADIR0),
-	PINMUX_IPSR_DATA(IP0_5_3, SDSELF),
+	PINMUX_IPSR_GPSR(IP0_5_3, BS),
+	PINMUX_IPSR_GPSR(IP0_5_3, SD1_DAT2),
+	PINMUX_IPSR_GPSR(IP0_5_3, MMC0_D2),
+	PINMUX_IPSR_GPSR(IP0_5_3, FD2),
+	PINMUX_IPSR_GPSR(IP0_5_3, ATADIR0),
+	PINMUX_IPSR_GPSR(IP0_5_3, SDSELF),
 	PINMUX_IPSR_MSEL(IP0_5_3, HCTS1, SEL_HSCIF1_0),
-	PINMUX_IPSR_DATA(IP0_5_3, TX4_C),
-	PINMUX_IPSR_DATA(IP0_7_6, A0),
-	PINMUX_IPSR_DATA(IP0_7_6, SD1_DAT3),
-	PINMUX_IPSR_DATA(IP0_7_6, MMC0_D3),
-	PINMUX_IPSR_DATA(IP0_7_6, FD3),
-	PINMUX_IPSR_DATA(IP0_9_8, A20),
-	PINMUX_IPSR_DATA(IP0_9_8, TX5_D),
-	PINMUX_IPSR_DATA(IP0_9_8, HSPI_TX2_B),
-	PINMUX_IPSR_DATA(IP0_11_10, A21),
+	PINMUX_IPSR_GPSR(IP0_5_3, TX4_C),
+	PINMUX_IPSR_GPSR(IP0_7_6, A0),
+	PINMUX_IPSR_GPSR(IP0_7_6, SD1_DAT3),
+	PINMUX_IPSR_GPSR(IP0_7_6, MMC0_D3),
+	PINMUX_IPSR_GPSR(IP0_7_6, FD3),
+	PINMUX_IPSR_GPSR(IP0_9_8, A20),
+	PINMUX_IPSR_GPSR(IP0_9_8, TX5_D),
+	PINMUX_IPSR_GPSR(IP0_9_8, HSPI_TX2_B),
+	PINMUX_IPSR_GPSR(IP0_11_10, A21),
 	PINMUX_IPSR_MSEL(IP0_11_10, SCK5_D, SEL_SCIF5_3),
 	PINMUX_IPSR_MSEL(IP0_11_10, HSPI_CLK2_B, SEL_HSPI2_1),
-	PINMUX_IPSR_DATA(IP0_13_12, A22),
+	PINMUX_IPSR_GPSR(IP0_13_12, A22),
 	PINMUX_IPSR_MSEL(IP0_13_12, RX5_D, SEL_SCIF5_3),
 	PINMUX_IPSR_MSEL(IP0_13_12, HSPI_RX2_B, SEL_HSPI2_1),
-	PINMUX_IPSR_DATA(IP0_13_12, VI1_R0),
-	PINMUX_IPSR_DATA(IP0_15_14, A23),
-	PINMUX_IPSR_DATA(IP0_15_14, FCLE),
+	PINMUX_IPSR_GPSR(IP0_13_12, VI1_R0),
+	PINMUX_IPSR_GPSR(IP0_15_14, A23),
+	PINMUX_IPSR_GPSR(IP0_15_14, FCLE),
 	PINMUX_IPSR_MSEL(IP0_15_14, HSPI_CLK2, SEL_HSPI2_0),
-	PINMUX_IPSR_DATA(IP0_15_14, VI1_R1),
-	PINMUX_IPSR_DATA(IP0_18_16, A24),
-	PINMUX_IPSR_DATA(IP0_18_16, SD1_CD),
-	PINMUX_IPSR_DATA(IP0_18_16, MMC0_D4),
-	PINMUX_IPSR_DATA(IP0_18_16, FD4),
+	PINMUX_IPSR_GPSR(IP0_15_14, VI1_R1),
+	PINMUX_IPSR_GPSR(IP0_18_16, A24),
+	PINMUX_IPSR_GPSR(IP0_18_16, SD1_CD),
+	PINMUX_IPSR_GPSR(IP0_18_16, MMC0_D4),
+	PINMUX_IPSR_GPSR(IP0_18_16, FD4),
 	PINMUX_IPSR_MSEL(IP0_18_16, HSPI_CS2, SEL_HSPI2_0),
-	PINMUX_IPSR_DATA(IP0_18_16, VI1_R2),
+	PINMUX_IPSR_GPSR(IP0_18_16, VI1_R2),
 	PINMUX_IPSR_MSEL(IP0_18_16, SSI_WS78_B, SEL_SSI7_1),
-	PINMUX_IPSR_DATA(IP0_22_19, A25),
-	PINMUX_IPSR_DATA(IP0_22_19, SD1_WP),
-	PINMUX_IPSR_DATA(IP0_22_19, MMC0_D5),
-	PINMUX_IPSR_DATA(IP0_22_19, FD5),
+	PINMUX_IPSR_GPSR(IP0_22_19, A25),
+	PINMUX_IPSR_GPSR(IP0_22_19, SD1_WP),
+	PINMUX_IPSR_GPSR(IP0_22_19, MMC0_D5),
+	PINMUX_IPSR_GPSR(IP0_22_19, FD5),
 	PINMUX_IPSR_MSEL(IP0_22_19, HSPI_RX2, SEL_HSPI2_0),
-	PINMUX_IPSR_DATA(IP0_22_19, VI1_R3),
-	PINMUX_IPSR_DATA(IP0_22_19, TX5_B),
+	PINMUX_IPSR_GPSR(IP0_22_19, VI1_R3),
+	PINMUX_IPSR_GPSR(IP0_22_19, TX5_B),
 	PINMUX_IPSR_MSEL(IP0_22_19, SSI_SDATA7_B, SEL_SSI7_1),
 	PINMUX_IPSR_MSEL(IP0_22_19, CTS0_B, SEL_SCIF0_1),
-	PINMUX_IPSR_DATA(IP0_24_23, CLKOUT),
-	PINMUX_IPSR_DATA(IP0_24_23, TX3C_IRDA_TX_C),
-	PINMUX_IPSR_DATA(IP0_24_23, PWM0_B),
-	PINMUX_IPSR_DATA(IP0_25, CS0),
+	PINMUX_IPSR_GPSR(IP0_24_23, CLKOUT),
+	PINMUX_IPSR_GPSR(IP0_24_23, TX3C_IRDA_TX_C),
+	PINMUX_IPSR_GPSR(IP0_24_23, PWM0_B),
+	PINMUX_IPSR_GPSR(IP0_25, CS0),
 	PINMUX_IPSR_MSEL(IP0_25, HSPI_CS2_B, SEL_HSPI2_1),
-	PINMUX_IPSR_DATA(IP0_27_26, CS1_A26),
-	PINMUX_IPSR_DATA(IP0_27_26, HSPI_TX2),
-	PINMUX_IPSR_DATA(IP0_27_26, SDSELF_B),
-	PINMUX_IPSR_DATA(IP0_30_28, RD_WR),
-	PINMUX_IPSR_DATA(IP0_30_28, FWE),
-	PINMUX_IPSR_DATA(IP0_30_28, ATAG0),
-	PINMUX_IPSR_DATA(IP0_30_28, VI1_R7),
+	PINMUX_IPSR_GPSR(IP0_27_26, CS1_A26),
+	PINMUX_IPSR_GPSR(IP0_27_26, HSPI_TX2),
+	PINMUX_IPSR_GPSR(IP0_27_26, SDSELF_B),
+	PINMUX_IPSR_GPSR(IP0_30_28, RD_WR),
+	PINMUX_IPSR_GPSR(IP0_30_28, FWE),
+	PINMUX_IPSR_GPSR(IP0_30_28, ATAG0),
+	PINMUX_IPSR_GPSR(IP0_30_28, VI1_R7),
 	PINMUX_IPSR_MSEL(IP0_30_28, HRTS1, SEL_HSCIF1_0),
 	PINMUX_IPSR_MSEL(IP0_30_28, RX4_C, SEL_SCIF4_2),
 
-	PINMUX_IPSR_DATA(IP1_1_0, EX_CS0),
+	PINMUX_IPSR_GPSR(IP1_1_0, EX_CS0),
 	PINMUX_IPSR_MSEL(IP1_1_0, RX3_C_IRDA_RX_C, SEL_SCIF3_2),
-	PINMUX_IPSR_DATA(IP1_1_0, MMC0_D6),
-	PINMUX_IPSR_DATA(IP1_1_0, FD6),
-	PINMUX_IPSR_DATA(IP1_3_2, EX_CS1),
-	PINMUX_IPSR_DATA(IP1_3_2, MMC0_D7),
-	PINMUX_IPSR_DATA(IP1_3_2, FD7),
-	PINMUX_IPSR_DATA(IP1_6_4, EX_CS2),
-	PINMUX_IPSR_DATA(IP1_6_4, SD1_CLK),
-	PINMUX_IPSR_DATA(IP1_6_4, MMC0_CLK),
-	PINMUX_IPSR_DATA(IP1_6_4, FALE),
-	PINMUX_IPSR_DATA(IP1_6_4, ATACS00),
-	PINMUX_IPSR_DATA(IP1_10_7, EX_CS3),
-	PINMUX_IPSR_DATA(IP1_10_7, SD1_CMD),
-	PINMUX_IPSR_DATA(IP1_10_7, MMC0_CMD),
-	PINMUX_IPSR_DATA(IP1_10_7, FRE),
-	PINMUX_IPSR_DATA(IP1_10_7, ATACS10),
-	PINMUX_IPSR_DATA(IP1_10_7, VI1_R4),
+	PINMUX_IPSR_GPSR(IP1_1_0, MMC0_D6),
+	PINMUX_IPSR_GPSR(IP1_1_0, FD6),
+	PINMUX_IPSR_GPSR(IP1_3_2, EX_CS1),
+	PINMUX_IPSR_GPSR(IP1_3_2, MMC0_D7),
+	PINMUX_IPSR_GPSR(IP1_3_2, FD7),
+	PINMUX_IPSR_GPSR(IP1_6_4, EX_CS2),
+	PINMUX_IPSR_GPSR(IP1_6_4, SD1_CLK),
+	PINMUX_IPSR_GPSR(IP1_6_4, MMC0_CLK),
+	PINMUX_IPSR_GPSR(IP1_6_4, FALE),
+	PINMUX_IPSR_GPSR(IP1_6_4, ATACS00),
+	PINMUX_IPSR_GPSR(IP1_10_7, EX_CS3),
+	PINMUX_IPSR_GPSR(IP1_10_7, SD1_CMD),
+	PINMUX_IPSR_GPSR(IP1_10_7, MMC0_CMD),
+	PINMUX_IPSR_GPSR(IP1_10_7, FRE),
+	PINMUX_IPSR_GPSR(IP1_10_7, ATACS10),
+	PINMUX_IPSR_GPSR(IP1_10_7, VI1_R4),
 	PINMUX_IPSR_MSEL(IP1_10_7, RX5_B, SEL_SCIF5_1),
 	PINMUX_IPSR_MSEL(IP1_10_7, HSCK1, SEL_HSCIF1_0),
 	PINMUX_IPSR_MSEL(IP1_10_7, SSI_SDATA8_B, SEL_SSI8_1),
 	PINMUX_IPSR_MSEL(IP1_10_7, RTS0_B_TANS_B, SEL_SCIF0_1),
 	PINMUX_IPSR_MSEL(IP1_10_7, SSI_SDATA9, SEL_SSI9_0),
-	PINMUX_IPSR_DATA(IP1_14_11, EX_CS4),
-	PINMUX_IPSR_DATA(IP1_14_11, SD1_DAT0),
-	PINMUX_IPSR_DATA(IP1_14_11, MMC0_D0),
-	PINMUX_IPSR_DATA(IP1_14_11, FD0),
-	PINMUX_IPSR_DATA(IP1_14_11, ATARD0),
-	PINMUX_IPSR_DATA(IP1_14_11, VI1_R5),
+	PINMUX_IPSR_GPSR(IP1_14_11, EX_CS4),
+	PINMUX_IPSR_GPSR(IP1_14_11, SD1_DAT0),
+	PINMUX_IPSR_GPSR(IP1_14_11, MMC0_D0),
+	PINMUX_IPSR_GPSR(IP1_14_11, FD0),
+	PINMUX_IPSR_GPSR(IP1_14_11, ATARD0),
+	PINMUX_IPSR_GPSR(IP1_14_11, VI1_R5),
 	PINMUX_IPSR_MSEL(IP1_14_11, SCK5_B, SEL_SCIF5_1),
-	PINMUX_IPSR_DATA(IP1_14_11, HTX1),
-	PINMUX_IPSR_DATA(IP1_14_11, TX2_E),
-	PINMUX_IPSR_DATA(IP1_14_11, TX0_B),
+	PINMUX_IPSR_GPSR(IP1_14_11, HTX1),
+	PINMUX_IPSR_GPSR(IP1_14_11, TX2_E),
+	PINMUX_IPSR_GPSR(IP1_14_11, TX0_B),
 	PINMUX_IPSR_MSEL(IP1_14_11, SSI_SCK9, SEL_SSI9_0),
-	PINMUX_IPSR_DATA(IP1_18_15, EX_CS5),
-	PINMUX_IPSR_DATA(IP1_18_15, SD1_DAT1),
-	PINMUX_IPSR_DATA(IP1_18_15, MMC0_D1),
-	PINMUX_IPSR_DATA(IP1_18_15, FD1),
-	PINMUX_IPSR_DATA(IP1_18_15, ATAWR0),
-	PINMUX_IPSR_DATA(IP1_18_15, VI1_R6),
+	PINMUX_IPSR_GPSR(IP1_18_15, EX_CS5),
+	PINMUX_IPSR_GPSR(IP1_18_15, SD1_DAT1),
+	PINMUX_IPSR_GPSR(IP1_18_15, MMC0_D1),
+	PINMUX_IPSR_GPSR(IP1_18_15, FD1),
+	PINMUX_IPSR_GPSR(IP1_18_15, ATAWR0),
+	PINMUX_IPSR_GPSR(IP1_18_15, VI1_R6),
 	PINMUX_IPSR_MSEL(IP1_18_15, HRX1, SEL_HSCIF1_0),
 	PINMUX_IPSR_MSEL(IP1_18_15, RX2_E, SEL_SCIF2_4),
 	PINMUX_IPSR_MSEL(IP1_18_15, RX0_B, SEL_SCIF0_1),
 	PINMUX_IPSR_MSEL(IP1_18_15, SSI_WS9, SEL_SSI9_0),
-	PINMUX_IPSR_DATA(IP1_20_19, MLB_CLK),
-	PINMUX_IPSR_DATA(IP1_20_19, PWM2),
+	PINMUX_IPSR_GPSR(IP1_20_19, MLB_CLK),
+	PINMUX_IPSR_GPSR(IP1_20_19, PWM2),
 	PINMUX_IPSR_MSEL(IP1_20_19, SCK4, SEL_SCIF4_0),
-	PINMUX_IPSR_DATA(IP1_22_21, MLB_SIG),
-	PINMUX_IPSR_DATA(IP1_22_21, PWM3),
-	PINMUX_IPSR_DATA(IP1_22_21, TX4),
-	PINMUX_IPSR_DATA(IP1_24_23, MLB_DAT),
-	PINMUX_IPSR_DATA(IP1_24_23, PWM4),
+	PINMUX_IPSR_GPSR(IP1_22_21, MLB_SIG),
+	PINMUX_IPSR_GPSR(IP1_22_21, PWM3),
+	PINMUX_IPSR_GPSR(IP1_22_21, TX4),
+	PINMUX_IPSR_GPSR(IP1_24_23, MLB_DAT),
+	PINMUX_IPSR_GPSR(IP1_24_23, PWM4),
 	PINMUX_IPSR_MSEL(IP1_24_23, RX4, SEL_SCIF4_0),
-	PINMUX_IPSR_DATA(IP1_28_25, HTX0),
-	PINMUX_IPSR_DATA(IP1_28_25, TX1),
-	PINMUX_IPSR_DATA(IP1_28_25, SDATA),
+	PINMUX_IPSR_GPSR(IP1_28_25, HTX0),
+	PINMUX_IPSR_GPSR(IP1_28_25, TX1),
+	PINMUX_IPSR_GPSR(IP1_28_25, SDATA),
 	PINMUX_IPSR_MSEL(IP1_28_25, CTS0_C, SEL_SCIF0_2),
-	PINMUX_IPSR_DATA(IP1_28_25, SUB_TCK),
-	PINMUX_IPSR_DATA(IP1_28_25, CC5_STATE2),
-	PINMUX_IPSR_DATA(IP1_28_25, CC5_STATE10),
-	PINMUX_IPSR_DATA(IP1_28_25, CC5_STATE18),
-	PINMUX_IPSR_DATA(IP1_28_25, CC5_STATE26),
-	PINMUX_IPSR_DATA(IP1_28_25, CC5_STATE34),
+	PINMUX_IPSR_GPSR(IP1_28_25, SUB_TCK),
+	PINMUX_IPSR_GPSR(IP1_28_25, CC5_STATE2),
+	PINMUX_IPSR_GPSR(IP1_28_25, CC5_STATE10),
+	PINMUX_IPSR_GPSR(IP1_28_25, CC5_STATE18),
+	PINMUX_IPSR_GPSR(IP1_28_25, CC5_STATE26),
+	PINMUX_IPSR_GPSR(IP1_28_25, CC5_STATE34),
 
 	PINMUX_IPSR_MSEL(IP2_3_0, HRX0, SEL_HSCIF0_0),
 	PINMUX_IPSR_MSEL(IP2_3_0, RX1, SEL_SCIF1_0),
-	PINMUX_IPSR_DATA(IP2_3_0, SCKZ),
+	PINMUX_IPSR_GPSR(IP2_3_0, SCKZ),
 	PINMUX_IPSR_MSEL(IP2_3_0, RTS0_C_TANS_C, SEL_SCIF0_2),
-	PINMUX_IPSR_DATA(IP2_3_0, SUB_TDI),
-	PINMUX_IPSR_DATA(IP2_3_0, CC5_STATE3),
-	PINMUX_IPSR_DATA(IP2_3_0, CC5_STATE11),
-	PINMUX_IPSR_DATA(IP2_3_0, CC5_STATE19),
-	PINMUX_IPSR_DATA(IP2_3_0, CC5_STATE27),
-	PINMUX_IPSR_DATA(IP2_3_0, CC5_STATE35),
+	PINMUX_IPSR_GPSR(IP2_3_0, SUB_TDI),
+	PINMUX_IPSR_GPSR(IP2_3_0, CC5_STATE3),
+	PINMUX_IPSR_GPSR(IP2_3_0, CC5_STATE11),
+	PINMUX_IPSR_GPSR(IP2_3_0, CC5_STATE19),
+	PINMUX_IPSR_GPSR(IP2_3_0, CC5_STATE27),
+	PINMUX_IPSR_GPSR(IP2_3_0, CC5_STATE35),
 	PINMUX_IPSR_MSEL(IP2_7_4, HSCK0, SEL_HSCIF0_0),
 	PINMUX_IPSR_MSEL(IP2_7_4, SCK1, SEL_SCIF1_0),
-	PINMUX_IPSR_DATA(IP2_7_4, MTS),
-	PINMUX_IPSR_DATA(IP2_7_4, PWM5),
+	PINMUX_IPSR_GPSR(IP2_7_4, MTS),
+	PINMUX_IPSR_GPSR(IP2_7_4, PWM5),
 	PINMUX_IPSR_MSEL(IP2_7_4, SCK0_C, SEL_SCIF0_2),
 	PINMUX_IPSR_MSEL(IP2_7_4, SSI_SDATA9_B, SEL_SSI9_1),
-	PINMUX_IPSR_DATA(IP2_7_4, SUB_TDO),
-	PINMUX_IPSR_DATA(IP2_7_4, CC5_STATE0),
-	PINMUX_IPSR_DATA(IP2_7_4, CC5_STATE8),
-	PINMUX_IPSR_DATA(IP2_7_4, CC5_STATE16),
-	PINMUX_IPSR_DATA(IP2_7_4, CC5_STATE24),
-	PINMUX_IPSR_DATA(IP2_7_4, CC5_STATE32),
+	PINMUX_IPSR_GPSR(IP2_7_4, SUB_TDO),
+	PINMUX_IPSR_GPSR(IP2_7_4, CC5_STATE0),
+	PINMUX_IPSR_GPSR(IP2_7_4, CC5_STATE8),
+	PINMUX_IPSR_GPSR(IP2_7_4, CC5_STATE16),
+	PINMUX_IPSR_GPSR(IP2_7_4, CC5_STATE24),
+	PINMUX_IPSR_GPSR(IP2_7_4, CC5_STATE32),
 	PINMUX_IPSR_MSEL(IP2_11_8, HCTS0, SEL_HSCIF0_0),
 	PINMUX_IPSR_MSEL(IP2_11_8, CTS1, SEL_SCIF1_0),
-	PINMUX_IPSR_DATA(IP2_11_8, STM),
-	PINMUX_IPSR_DATA(IP2_11_8, PWM0_D),
+	PINMUX_IPSR_GPSR(IP2_11_8, STM),
+	PINMUX_IPSR_GPSR(IP2_11_8, PWM0_D),
 	PINMUX_IPSR_MSEL(IP2_11_8, RX0_C, SEL_SCIF0_2),
 	PINMUX_IPSR_MSEL(IP2_11_8, SCIF_CLK_C, SEL_SCIF_2),
-	PINMUX_IPSR_DATA(IP2_11_8, SUB_TRST),
+	PINMUX_IPSR_GPSR(IP2_11_8, SUB_TRST),
 	PINMUX_IPSR_MSEL(IP2_11_8, TCLK1_B, SEL_TMU1_1),
-	PINMUX_IPSR_DATA(IP2_11_8, CC5_OSCOUT),
+	PINMUX_IPSR_GPSR(IP2_11_8, CC5_OSCOUT),
 	PINMUX_IPSR_MSEL(IP2_15_12, HRTS0, SEL_HSCIF0_0),
 	PINMUX_IPSR_MSEL(IP2_15_12, RTS1_TANS, SEL_SCIF1_0),
-	PINMUX_IPSR_DATA(IP2_15_12, MDATA),
-	PINMUX_IPSR_DATA(IP2_15_12, TX0_C),
-	PINMUX_IPSR_DATA(IP2_15_12, SUB_TMS),
-	PINMUX_IPSR_DATA(IP2_15_12, CC5_STATE1),
-	PINMUX_IPSR_DATA(IP2_15_12, CC5_STATE9),
-	PINMUX_IPSR_DATA(IP2_15_12, CC5_STATE17),
-	PINMUX_IPSR_DATA(IP2_15_12, CC5_STATE25),
-	PINMUX_IPSR_DATA(IP2_15_12, CC5_STATE33),
-	PINMUX_IPSR_DATA(IP2_18_16, DU0_DR0),
-	PINMUX_IPSR_DATA(IP2_18_16, LCDOUT0),
+	PINMUX_IPSR_GPSR(IP2_15_12, MDATA),
+	PINMUX_IPSR_GPSR(IP2_15_12, TX0_C),
+	PINMUX_IPSR_GPSR(IP2_15_12, SUB_TMS),
+	PINMUX_IPSR_GPSR(IP2_15_12, CC5_STATE1),
+	PINMUX_IPSR_GPSR(IP2_15_12, CC5_STATE9),
+	PINMUX_IPSR_GPSR(IP2_15_12, CC5_STATE17),
+	PINMUX_IPSR_GPSR(IP2_15_12, CC5_STATE25),
+	PINMUX_IPSR_GPSR(IP2_15_12, CC5_STATE33),
+	PINMUX_IPSR_GPSR(IP2_18_16, DU0_DR0),
+	PINMUX_IPSR_GPSR(IP2_18_16, LCDOUT0),
 	PINMUX_IPSR_MSEL(IP2_18_16, DREQ0, SEL_EXBUS0_0),
 	PINMUX_IPSR_MSEL(IP2_18_16, GPS_CLK_B, SEL_GPS_1),
-	PINMUX_IPSR_DATA(IP2_18_16, AUDATA0),
-	PINMUX_IPSR_DATA(IP2_18_16, TX5_C),
-	PINMUX_IPSR_DATA(IP2_21_19, DU0_DR1),
-	PINMUX_IPSR_DATA(IP2_21_19, LCDOUT1),
-	PINMUX_IPSR_DATA(IP2_21_19, DACK0),
-	PINMUX_IPSR_DATA(IP2_21_19, DRACK0),
+	PINMUX_IPSR_GPSR(IP2_18_16, AUDATA0),
+	PINMUX_IPSR_GPSR(IP2_18_16, TX5_C),
+	PINMUX_IPSR_GPSR(IP2_21_19, DU0_DR1),
+	PINMUX_IPSR_GPSR(IP2_21_19, LCDOUT1),
+	PINMUX_IPSR_GPSR(IP2_21_19, DACK0),
+	PINMUX_IPSR_GPSR(IP2_21_19, DRACK0),
 	PINMUX_IPSR_MSEL(IP2_21_19, GPS_SIGN_B, SEL_GPS_1),
-	PINMUX_IPSR_DATA(IP2_21_19, AUDATA1),
+	PINMUX_IPSR_GPSR(IP2_21_19, AUDATA1),
 	PINMUX_IPSR_MSEL(IP2_21_19, RX5_C, SEL_SCIF5_2),
-	PINMUX_IPSR_DATA(IP2_22, DU0_DR2),
-	PINMUX_IPSR_DATA(IP2_22, LCDOUT2),
-	PINMUX_IPSR_DATA(IP2_23, DU0_DR3),
-	PINMUX_IPSR_DATA(IP2_23, LCDOUT3),
-	PINMUX_IPSR_DATA(IP2_24, DU0_DR4),
-	PINMUX_IPSR_DATA(IP2_24, LCDOUT4),
-	PINMUX_IPSR_DATA(IP2_25, DU0_DR5),
-	PINMUX_IPSR_DATA(IP2_25, LCDOUT5),
-	PINMUX_IPSR_DATA(IP2_26, DU0_DR6),
-	PINMUX_IPSR_DATA(IP2_26, LCDOUT6),
-	PINMUX_IPSR_DATA(IP2_27, DU0_DR7),
-	PINMUX_IPSR_DATA(IP2_27, LCDOUT7),
-	PINMUX_IPSR_DATA(IP2_30_28, DU0_DG0),
-	PINMUX_IPSR_DATA(IP2_30_28, LCDOUT8),
+	PINMUX_IPSR_GPSR(IP2_22, DU0_DR2),
+	PINMUX_IPSR_GPSR(IP2_22, LCDOUT2),
+	PINMUX_IPSR_GPSR(IP2_23, DU0_DR3),
+	PINMUX_IPSR_GPSR(IP2_23, LCDOUT3),
+	PINMUX_IPSR_GPSR(IP2_24, DU0_DR4),
+	PINMUX_IPSR_GPSR(IP2_24, LCDOUT4),
+	PINMUX_IPSR_GPSR(IP2_25, DU0_DR5),
+	PINMUX_IPSR_GPSR(IP2_25, LCDOUT5),
+	PINMUX_IPSR_GPSR(IP2_26, DU0_DR6),
+	PINMUX_IPSR_GPSR(IP2_26, LCDOUT6),
+	PINMUX_IPSR_GPSR(IP2_27, DU0_DR7),
+	PINMUX_IPSR_GPSR(IP2_27, LCDOUT7),
+	PINMUX_IPSR_GPSR(IP2_30_28, DU0_DG0),
+	PINMUX_IPSR_GPSR(IP2_30_28, LCDOUT8),
 	PINMUX_IPSR_MSEL(IP2_30_28, DREQ1, SEL_EXBUS1_0),
 	PINMUX_IPSR_MSEL(IP2_30_28, SCL2, SEL_I2C2_0),
-	PINMUX_IPSR_DATA(IP2_30_28, AUDATA2),
+	PINMUX_IPSR_GPSR(IP2_30_28, AUDATA2),
 
-	PINMUX_IPSR_DATA(IP3_2_0, DU0_DG1),
-	PINMUX_IPSR_DATA(IP3_2_0, LCDOUT9),
-	PINMUX_IPSR_DATA(IP3_2_0, DACK1),
+	PINMUX_IPSR_GPSR(IP3_2_0, DU0_DG1),
+	PINMUX_IPSR_GPSR(IP3_2_0, LCDOUT9),
+	PINMUX_IPSR_GPSR(IP3_2_0, DACK1),
 	PINMUX_IPSR_MSEL(IP3_2_0, SDA2, SEL_I2C2_0),
-	PINMUX_IPSR_DATA(IP3_2_0, AUDATA3),
-	PINMUX_IPSR_DATA(IP3_3, DU0_DG2),
-	PINMUX_IPSR_DATA(IP3_3, LCDOUT10),
-	PINMUX_IPSR_DATA(IP3_4, DU0_DG3),
-	PINMUX_IPSR_DATA(IP3_4, LCDOUT11),
-	PINMUX_IPSR_DATA(IP3_5, DU0_DG4),
-	PINMUX_IPSR_DATA(IP3_5, LCDOUT12),
-	PINMUX_IPSR_DATA(IP3_6, DU0_DG5),
-	PINMUX_IPSR_DATA(IP3_6, LCDOUT13),
-	PINMUX_IPSR_DATA(IP3_7, DU0_DG6),
-	PINMUX_IPSR_DATA(IP3_7, LCDOUT14),
-	PINMUX_IPSR_DATA(IP3_8, DU0_DG7),
-	PINMUX_IPSR_DATA(IP3_8, LCDOUT15),
-	PINMUX_IPSR_DATA(IP3_11_9, DU0_DB0),
-	PINMUX_IPSR_DATA(IP3_11_9, LCDOUT16),
-	PINMUX_IPSR_DATA(IP3_11_9, EX_WAIT1),
+	PINMUX_IPSR_GPSR(IP3_2_0, AUDATA3),
+	PINMUX_IPSR_GPSR(IP3_3, DU0_DG2),
+	PINMUX_IPSR_GPSR(IP3_3, LCDOUT10),
+	PINMUX_IPSR_GPSR(IP3_4, DU0_DG3),
+	PINMUX_IPSR_GPSR(IP3_4, LCDOUT11),
+	PINMUX_IPSR_GPSR(IP3_5, DU0_DG4),
+	PINMUX_IPSR_GPSR(IP3_5, LCDOUT12),
+	PINMUX_IPSR_GPSR(IP3_6, DU0_DG5),
+	PINMUX_IPSR_GPSR(IP3_6, LCDOUT13),
+	PINMUX_IPSR_GPSR(IP3_7, DU0_DG6),
+	PINMUX_IPSR_GPSR(IP3_7, LCDOUT14),
+	PINMUX_IPSR_GPSR(IP3_8, DU0_DG7),
+	PINMUX_IPSR_GPSR(IP3_8, LCDOUT15),
+	PINMUX_IPSR_GPSR(IP3_11_9, DU0_DB0),
+	PINMUX_IPSR_GPSR(IP3_11_9, LCDOUT16),
+	PINMUX_IPSR_GPSR(IP3_11_9, EX_WAIT1),
 	PINMUX_IPSR_MSEL(IP3_11_9, SCL1, SEL_I2C1_0),
 	PINMUX_IPSR_MSEL(IP3_11_9, TCLK1, SEL_TMU1_0),
-	PINMUX_IPSR_DATA(IP3_11_9, AUDATA4),
-	PINMUX_IPSR_DATA(IP3_14_12, DU0_DB1),
-	PINMUX_IPSR_DATA(IP3_14_12, LCDOUT17),
-	PINMUX_IPSR_DATA(IP3_14_12, EX_WAIT2),
+	PINMUX_IPSR_GPSR(IP3_11_9, AUDATA4),
+	PINMUX_IPSR_GPSR(IP3_14_12, DU0_DB1),
+	PINMUX_IPSR_GPSR(IP3_14_12, LCDOUT17),
+	PINMUX_IPSR_GPSR(IP3_14_12, EX_WAIT2),
 	PINMUX_IPSR_MSEL(IP3_14_12, SDA1, SEL_I2C1_0),
 	PINMUX_IPSR_MSEL(IP3_14_12, GPS_MAG_B, SEL_GPS_1),
-	PINMUX_IPSR_DATA(IP3_14_12, AUDATA5),
+	PINMUX_IPSR_GPSR(IP3_14_12, AUDATA5),
 	PINMUX_IPSR_MSEL(IP3_14_12, SCK5_C, SEL_SCIF5_2),
-	PINMUX_IPSR_DATA(IP3_15, DU0_DB2),
-	PINMUX_IPSR_DATA(IP3_15, LCDOUT18),
-	PINMUX_IPSR_DATA(IP3_16, DU0_DB3),
-	PINMUX_IPSR_DATA(IP3_16, LCDOUT19),
-	PINMUX_IPSR_DATA(IP3_17, DU0_DB4),
-	PINMUX_IPSR_DATA(IP3_17, LCDOUT20),
-	PINMUX_IPSR_DATA(IP3_18, DU0_DB5),
-	PINMUX_IPSR_DATA(IP3_18, LCDOUT21),
-	PINMUX_IPSR_DATA(IP3_19, DU0_DB6),
-	PINMUX_IPSR_DATA(IP3_19, LCDOUT22),
-	PINMUX_IPSR_DATA(IP3_20, DU0_DB7),
-	PINMUX_IPSR_DATA(IP3_20, LCDOUT23),
-	PINMUX_IPSR_DATA(IP3_22_21, DU0_DOTCLKIN),
-	PINMUX_IPSR_DATA(IP3_22_21, QSTVA_QVS),
-	PINMUX_IPSR_DATA(IP3_22_21, TX3_D_IRDA_TX_D),
+	PINMUX_IPSR_GPSR(IP3_15, DU0_DB2),
+	PINMUX_IPSR_GPSR(IP3_15, LCDOUT18),
+	PINMUX_IPSR_GPSR(IP3_16, DU0_DB3),
+	PINMUX_IPSR_GPSR(IP3_16, LCDOUT19),
+	PINMUX_IPSR_GPSR(IP3_17, DU0_DB4),
+	PINMUX_IPSR_GPSR(IP3_17, LCDOUT20),
+	PINMUX_IPSR_GPSR(IP3_18, DU0_DB5),
+	PINMUX_IPSR_GPSR(IP3_18, LCDOUT21),
+	PINMUX_IPSR_GPSR(IP3_19, DU0_DB6),
+	PINMUX_IPSR_GPSR(IP3_19, LCDOUT22),
+	PINMUX_IPSR_GPSR(IP3_20, DU0_DB7),
+	PINMUX_IPSR_GPSR(IP3_20, LCDOUT23),
+	PINMUX_IPSR_GPSR(IP3_22_21, DU0_DOTCLKIN),
+	PINMUX_IPSR_GPSR(IP3_22_21, QSTVA_QVS),
+	PINMUX_IPSR_GPSR(IP3_22_21, TX3_D_IRDA_TX_D),
 	PINMUX_IPSR_MSEL(IP3_22_21, SCL3_B, SEL_I2C3_1),
-	PINMUX_IPSR_DATA(IP3_23, DU0_DOTCLKOUT0),
-	PINMUX_IPSR_DATA(IP3_23, QCLK),
-	PINMUX_IPSR_DATA(IP3_26_24, DU0_DOTCLKOUT1),
-	PINMUX_IPSR_DATA(IP3_26_24, QSTVB_QVE),
+	PINMUX_IPSR_GPSR(IP3_23, DU0_DOTCLKOUT0),
+	PINMUX_IPSR_GPSR(IP3_23, QCLK),
+	PINMUX_IPSR_GPSR(IP3_26_24, DU0_DOTCLKOUT1),
+	PINMUX_IPSR_GPSR(IP3_26_24, QSTVB_QVE),
 	PINMUX_IPSR_MSEL(IP3_26_24, RX3_D_IRDA_RX_D, SEL_SCIF3_3),
 	PINMUX_IPSR_MSEL(IP3_26_24, SDA3_B, SEL_I2C3_1),
 	PINMUX_IPSR_MSEL(IP3_26_24, SDA2_C, SEL_I2C2_2),
-	PINMUX_IPSR_DATA(IP3_26_24, DACK0_B),
-	PINMUX_IPSR_DATA(IP3_26_24, DRACK0_B),
-	PINMUX_IPSR_DATA(IP3_27, DU0_EXHSYNC_DU0_HSYNC),
-	PINMUX_IPSR_DATA(IP3_27, QSTH_QHS),
-	PINMUX_IPSR_DATA(IP3_28, DU0_EXVSYNC_DU0_VSYNC),
-	PINMUX_IPSR_DATA(IP3_28, QSTB_QHE),
-	PINMUX_IPSR_DATA(IP3_31_29, DU0_EXODDF_DU0_ODDF_DISP_CDE),
-	PINMUX_IPSR_DATA(IP3_31_29, QCPV_QDE),
-	PINMUX_IPSR_DATA(IP3_31_29, CAN1_TX),
-	PINMUX_IPSR_DATA(IP3_31_29, TX2_C),
+	PINMUX_IPSR_GPSR(IP3_26_24, DACK0_B),
+	PINMUX_IPSR_GPSR(IP3_26_24, DRACK0_B),
+	PINMUX_IPSR_GPSR(IP3_27, DU0_EXHSYNC_DU0_HSYNC),
+	PINMUX_IPSR_GPSR(IP3_27, QSTH_QHS),
+	PINMUX_IPSR_GPSR(IP3_28, DU0_EXVSYNC_DU0_VSYNC),
+	PINMUX_IPSR_GPSR(IP3_28, QSTB_QHE),
+	PINMUX_IPSR_GPSR(IP3_31_29, DU0_EXODDF_DU0_ODDF_DISP_CDE),
+	PINMUX_IPSR_GPSR(IP3_31_29, QCPV_QDE),
+	PINMUX_IPSR_GPSR(IP3_31_29, CAN1_TX),
+	PINMUX_IPSR_GPSR(IP3_31_29, TX2_C),
 	PINMUX_IPSR_MSEL(IP3_31_29, SCL2_C, SEL_I2C2_2),
-	PINMUX_IPSR_DATA(IP3_31_29, REMOCON),
+	PINMUX_IPSR_GPSR(IP3_31_29, REMOCON),
 
-	PINMUX_IPSR_DATA(IP4_1_0, DU0_DISP),
-	PINMUX_IPSR_DATA(IP4_1_0, QPOLA),
+	PINMUX_IPSR_GPSR(IP4_1_0, DU0_DISP),
+	PINMUX_IPSR_GPSR(IP4_1_0, QPOLA),
 	PINMUX_IPSR_MSEL(IP4_1_0, CAN_CLK_C, SEL_CANCLK_2),
 	PINMUX_IPSR_MSEL(IP4_1_0, SCK2_C, SEL_SCIF2_2),
-	PINMUX_IPSR_DATA(IP4_4_2, DU0_CDE),
-	PINMUX_IPSR_DATA(IP4_4_2, QPOLB),
-	PINMUX_IPSR_DATA(IP4_4_2, CAN1_RX),
+	PINMUX_IPSR_GPSR(IP4_4_2, DU0_CDE),
+	PINMUX_IPSR_GPSR(IP4_4_2, QPOLB),
+	PINMUX_IPSR_GPSR(IP4_4_2, CAN1_RX),
 	PINMUX_IPSR_MSEL(IP4_4_2, RX2_C, SEL_SCIF2_2),
 	PINMUX_IPSR_MSEL(IP4_4_2, DREQ0_B, SEL_EXBUS0_1),
 	PINMUX_IPSR_MSEL(IP4_4_2, SSI_SCK78_B, SEL_SSI7_1),
 	PINMUX_IPSR_MSEL(IP4_4_2, SCK0_B, SEL_SCIF0_1),
-	PINMUX_IPSR_DATA(IP4_7_5, DU1_DR0),
-	PINMUX_IPSR_DATA(IP4_7_5, VI2_DATA0_VI2_B0),
-	PINMUX_IPSR_DATA(IP4_7_5, PWM6),
-	PINMUX_IPSR_DATA(IP4_7_5, SD3_CLK),
-	PINMUX_IPSR_DATA(IP4_7_5, TX3_E_IRDA_TX_E),
-	PINMUX_IPSR_DATA(IP4_7_5, AUDCK),
+	PINMUX_IPSR_GPSR(IP4_7_5, DU1_DR0),
+	PINMUX_IPSR_GPSR(IP4_7_5, VI2_DATA0_VI2_B0),
+	PINMUX_IPSR_GPSR(IP4_7_5, PWM6),
+	PINMUX_IPSR_GPSR(IP4_7_5, SD3_CLK),
+	PINMUX_IPSR_GPSR(IP4_7_5, TX3_E_IRDA_TX_E),
+	PINMUX_IPSR_GPSR(IP4_7_5, AUDCK),
 	PINMUX_IPSR_MSEL(IP4_7_5, PWMFSW0_B, SEL_PWMFSW_1),
-	PINMUX_IPSR_DATA(IP4_10_8, DU1_DR1),
-	PINMUX_IPSR_DATA(IP4_10_8, VI2_DATA1_VI2_B1),
-	PINMUX_IPSR_DATA(IP4_10_8, PWM0),
-	PINMUX_IPSR_DATA(IP4_10_8, SD3_CMD),
+	PINMUX_IPSR_GPSR(IP4_10_8, DU1_DR1),
+	PINMUX_IPSR_GPSR(IP4_10_8, VI2_DATA1_VI2_B1),
+	PINMUX_IPSR_GPSR(IP4_10_8, PWM0),
+	PINMUX_IPSR_GPSR(IP4_10_8, SD3_CMD),
 	PINMUX_IPSR_MSEL(IP4_10_8, RX3_E_IRDA_RX_E, SEL_SCIF3_4),
-	PINMUX_IPSR_DATA(IP4_10_8, AUDSYNC),
+	PINMUX_IPSR_GPSR(IP4_10_8, AUDSYNC),
 	PINMUX_IPSR_MSEL(IP4_10_8, CTS0_D, SEL_SCIF0_3),
-	PINMUX_IPSR_DATA(IP4_11, DU1_DR2),
-	PINMUX_IPSR_DATA(IP4_11, VI2_G0),
-	PINMUX_IPSR_DATA(IP4_12, DU1_DR3),
-	PINMUX_IPSR_DATA(IP4_12, VI2_G1),
-	PINMUX_IPSR_DATA(IP4_13, DU1_DR4),
-	PINMUX_IPSR_DATA(IP4_13, VI2_G2),
-	PINMUX_IPSR_DATA(IP4_14, DU1_DR5),
-	PINMUX_IPSR_DATA(IP4_14, VI2_G3),
-	PINMUX_IPSR_DATA(IP4_15, DU1_DR6),
-	PINMUX_IPSR_DATA(IP4_15, VI2_G4),
-	PINMUX_IPSR_DATA(IP4_16, DU1_DR7),
-	PINMUX_IPSR_DATA(IP4_16, VI2_G5),
-	PINMUX_IPSR_DATA(IP4_19_17, DU1_DG0),
-	PINMUX_IPSR_DATA(IP4_19_17, VI2_DATA2_VI2_B2),
+	PINMUX_IPSR_GPSR(IP4_11, DU1_DR2),
+	PINMUX_IPSR_GPSR(IP4_11, VI2_G0),
+	PINMUX_IPSR_GPSR(IP4_12, DU1_DR3),
+	PINMUX_IPSR_GPSR(IP4_12, VI2_G1),
+	PINMUX_IPSR_GPSR(IP4_13, DU1_DR4),
+	PINMUX_IPSR_GPSR(IP4_13, VI2_G2),
+	PINMUX_IPSR_GPSR(IP4_14, DU1_DR5),
+	PINMUX_IPSR_GPSR(IP4_14, VI2_G3),
+	PINMUX_IPSR_GPSR(IP4_15, DU1_DR6),
+	PINMUX_IPSR_GPSR(IP4_15, VI2_G4),
+	PINMUX_IPSR_GPSR(IP4_16, DU1_DR7),
+	PINMUX_IPSR_GPSR(IP4_16, VI2_G5),
+	PINMUX_IPSR_GPSR(IP4_19_17, DU1_DG0),
+	PINMUX_IPSR_GPSR(IP4_19_17, VI2_DATA2_VI2_B2),
 	PINMUX_IPSR_MSEL(IP4_19_17, SCL1_B, SEL_I2C1_1),
-	PINMUX_IPSR_DATA(IP4_19_17, SD3_DAT2),
+	PINMUX_IPSR_GPSR(IP4_19_17, SD3_DAT2),
 	PINMUX_IPSR_MSEL(IP4_19_17, SCK3_E, SEL_SCIF3_4),
-	PINMUX_IPSR_DATA(IP4_19_17, AUDATA6),
-	PINMUX_IPSR_DATA(IP4_19_17, TX0_D),
-	PINMUX_IPSR_DATA(IP4_22_20, DU1_DG1),
-	PINMUX_IPSR_DATA(IP4_22_20, VI2_DATA3_VI2_B3),
+	PINMUX_IPSR_GPSR(IP4_19_17, AUDATA6),
+	PINMUX_IPSR_GPSR(IP4_19_17, TX0_D),
+	PINMUX_IPSR_GPSR(IP4_22_20, DU1_DG1),
+	PINMUX_IPSR_GPSR(IP4_22_20, VI2_DATA3_VI2_B3),
 	PINMUX_IPSR_MSEL(IP4_22_20, SDA1_B, SEL_I2C1_1),
-	PINMUX_IPSR_DATA(IP4_22_20, SD3_DAT3),
+	PINMUX_IPSR_GPSR(IP4_22_20, SD3_DAT3),
 	PINMUX_IPSR_MSEL(IP4_22_20, SCK5, SEL_SCIF5_0),
-	PINMUX_IPSR_DATA(IP4_22_20, AUDATA7),
+	PINMUX_IPSR_GPSR(IP4_22_20, AUDATA7),
 	PINMUX_IPSR_MSEL(IP4_22_20, RX0_D, SEL_SCIF0_3),
-	PINMUX_IPSR_DATA(IP4_23, DU1_DG2),
-	PINMUX_IPSR_DATA(IP4_23, VI2_G6),
-	PINMUX_IPSR_DATA(IP4_24, DU1_DG3),
-	PINMUX_IPSR_DATA(IP4_24, VI2_G7),
-	PINMUX_IPSR_DATA(IP4_25, DU1_DG4),
-	PINMUX_IPSR_DATA(IP4_25, VI2_R0),
-	PINMUX_IPSR_DATA(IP4_26, DU1_DG5),
-	PINMUX_IPSR_DATA(IP4_26, VI2_R1),
-	PINMUX_IPSR_DATA(IP4_27, DU1_DG6),
-	PINMUX_IPSR_DATA(IP4_27, VI2_R2),
-	PINMUX_IPSR_DATA(IP4_28, DU1_DG7),
-	PINMUX_IPSR_DATA(IP4_28, VI2_R3),
-	PINMUX_IPSR_DATA(IP4_31_29, DU1_DB0),
-	PINMUX_IPSR_DATA(IP4_31_29, VI2_DATA4_VI2_B4),
+	PINMUX_IPSR_GPSR(IP4_23, DU1_DG2),
+	PINMUX_IPSR_GPSR(IP4_23, VI2_G6),
+	PINMUX_IPSR_GPSR(IP4_24, DU1_DG3),
+	PINMUX_IPSR_GPSR(IP4_24, VI2_G7),
+	PINMUX_IPSR_GPSR(IP4_25, DU1_DG4),
+	PINMUX_IPSR_GPSR(IP4_25, VI2_R0),
+	PINMUX_IPSR_GPSR(IP4_26, DU1_DG5),
+	PINMUX_IPSR_GPSR(IP4_26, VI2_R1),
+	PINMUX_IPSR_GPSR(IP4_27, DU1_DG6),
+	PINMUX_IPSR_GPSR(IP4_27, VI2_R2),
+	PINMUX_IPSR_GPSR(IP4_28, DU1_DG7),
+	PINMUX_IPSR_GPSR(IP4_28, VI2_R3),
+	PINMUX_IPSR_GPSR(IP4_31_29, DU1_DB0),
+	PINMUX_IPSR_GPSR(IP4_31_29, VI2_DATA4_VI2_B4),
 	PINMUX_IPSR_MSEL(IP4_31_29, SCL2_B, SEL_I2C2_1),
-	PINMUX_IPSR_DATA(IP4_31_29, SD3_DAT0),
-	PINMUX_IPSR_DATA(IP4_31_29, TX5),
+	PINMUX_IPSR_GPSR(IP4_31_29, SD3_DAT0),
+	PINMUX_IPSR_GPSR(IP4_31_29, TX5),
 	PINMUX_IPSR_MSEL(IP4_31_29, SCK0_D, SEL_SCIF0_3),
 
-	PINMUX_IPSR_DATA(IP5_2_0, DU1_DB1),
-	PINMUX_IPSR_DATA(IP5_2_0, VI2_DATA5_VI2_B5),
+	PINMUX_IPSR_GPSR(IP5_2_0, DU1_DB1),
+	PINMUX_IPSR_GPSR(IP5_2_0, VI2_DATA5_VI2_B5),
 	PINMUX_IPSR_MSEL(IP5_2_0, SDA2_B, SEL_I2C2_1),
-	PINMUX_IPSR_DATA(IP5_2_0, SD3_DAT1),
+	PINMUX_IPSR_GPSR(IP5_2_0, SD3_DAT1),
 	PINMUX_IPSR_MSEL(IP5_2_0, RX5, SEL_SCIF5_0),
 	PINMUX_IPSR_MSEL(IP5_2_0, RTS0_D_TANS_D, SEL_SCIF0_3),
-	PINMUX_IPSR_DATA(IP5_3, DU1_DB2),
-	PINMUX_IPSR_DATA(IP5_3, VI2_R4),
-	PINMUX_IPSR_DATA(IP5_4, DU1_DB3),
-	PINMUX_IPSR_DATA(IP5_4, VI2_R5),
-	PINMUX_IPSR_DATA(IP5_5, DU1_DB4),
-	PINMUX_IPSR_DATA(IP5_5, VI2_R6),
-	PINMUX_IPSR_DATA(IP5_6, DU1_DB5),
-	PINMUX_IPSR_DATA(IP5_6, VI2_R7),
-	PINMUX_IPSR_DATA(IP5_7, DU1_DB6),
+	PINMUX_IPSR_GPSR(IP5_3, DU1_DB2),
+	PINMUX_IPSR_GPSR(IP5_3, VI2_R4),
+	PINMUX_IPSR_GPSR(IP5_4, DU1_DB3),
+	PINMUX_IPSR_GPSR(IP5_4, VI2_R5),
+	PINMUX_IPSR_GPSR(IP5_5, DU1_DB4),
+	PINMUX_IPSR_GPSR(IP5_5, VI2_R6),
+	PINMUX_IPSR_GPSR(IP5_6, DU1_DB5),
+	PINMUX_IPSR_GPSR(IP5_6, VI2_R7),
+	PINMUX_IPSR_GPSR(IP5_7, DU1_DB6),
 	PINMUX_IPSR_MSEL(IP5_7, SCL2_D, SEL_I2C2_3),
-	PINMUX_IPSR_DATA(IP5_8, DU1_DB7),
+	PINMUX_IPSR_GPSR(IP5_8, DU1_DB7),
 	PINMUX_IPSR_MSEL(IP5_8, SDA2_D, SEL_I2C2_3),
-	PINMUX_IPSR_DATA(IP5_10_9, DU1_DOTCLKIN),
-	PINMUX_IPSR_DATA(IP5_10_9, VI2_CLKENB),
+	PINMUX_IPSR_GPSR(IP5_10_9, DU1_DOTCLKIN),
+	PINMUX_IPSR_GPSR(IP5_10_9, VI2_CLKENB),
 	PINMUX_IPSR_MSEL(IP5_10_9, HSPI_CS1, SEL_HSPI1_0),
 	PINMUX_IPSR_MSEL(IP5_10_9, SCL1_D, SEL_I2C1_3),
-	PINMUX_IPSR_DATA(IP5_12_11, DU1_DOTCLKOUT),
-	PINMUX_IPSR_DATA(IP5_12_11, VI2_FIELD),
+	PINMUX_IPSR_GPSR(IP5_12_11, DU1_DOTCLKOUT),
+	PINMUX_IPSR_GPSR(IP5_12_11, VI2_FIELD),
 	PINMUX_IPSR_MSEL(IP5_12_11, SDA1_D, SEL_I2C1_3),
-	PINMUX_IPSR_DATA(IP5_14_13, DU1_EXHSYNC_DU1_HSYNC),
-	PINMUX_IPSR_DATA(IP5_14_13, VI2_HSYNC),
-	PINMUX_IPSR_DATA(IP5_14_13, VI3_HSYNC),
-	PINMUX_IPSR_DATA(IP5_16_15, DU1_EXVSYNC_DU1_VSYNC),
-	PINMUX_IPSR_DATA(IP5_16_15, VI2_VSYNC),
-	PINMUX_IPSR_DATA(IP5_16_15, VI3_VSYNC),
-	PINMUX_IPSR_DATA(IP5_20_17, DU1_EXODDF_DU1_ODDF_DISP_CDE),
-	PINMUX_IPSR_DATA(IP5_20_17, VI2_CLK),
-	PINMUX_IPSR_DATA(IP5_20_17, TX3_B_IRDA_TX_B),
-	PINMUX_IPSR_DATA(IP5_20_17, SD3_CD),
-	PINMUX_IPSR_DATA(IP5_20_17, HSPI_TX1),
-	PINMUX_IPSR_DATA(IP5_20_17, VI1_CLKENB),
-	PINMUX_IPSR_DATA(IP5_20_17, VI3_CLKENB),
-	PINMUX_IPSR_DATA(IP5_20_17, AUDIO_CLKC),
-	PINMUX_IPSR_DATA(IP5_20_17, TX2_D),
-	PINMUX_IPSR_DATA(IP5_20_17, SPEEDIN),
+	PINMUX_IPSR_GPSR(IP5_14_13, DU1_EXHSYNC_DU1_HSYNC),
+	PINMUX_IPSR_GPSR(IP5_14_13, VI2_HSYNC),
+	PINMUX_IPSR_GPSR(IP5_14_13, VI3_HSYNC),
+	PINMUX_IPSR_GPSR(IP5_16_15, DU1_EXVSYNC_DU1_VSYNC),
+	PINMUX_IPSR_GPSR(IP5_16_15, VI2_VSYNC),
+	PINMUX_IPSR_GPSR(IP5_16_15, VI3_VSYNC),
+	PINMUX_IPSR_GPSR(IP5_20_17, DU1_EXODDF_DU1_ODDF_DISP_CDE),
+	PINMUX_IPSR_GPSR(IP5_20_17, VI2_CLK),
+	PINMUX_IPSR_GPSR(IP5_20_17, TX3_B_IRDA_TX_B),
+	PINMUX_IPSR_GPSR(IP5_20_17, SD3_CD),
+	PINMUX_IPSR_GPSR(IP5_20_17, HSPI_TX1),
+	PINMUX_IPSR_GPSR(IP5_20_17, VI1_CLKENB),
+	PINMUX_IPSR_GPSR(IP5_20_17, VI3_CLKENB),
+	PINMUX_IPSR_GPSR(IP5_20_17, AUDIO_CLKC),
+	PINMUX_IPSR_GPSR(IP5_20_17, TX2_D),
+	PINMUX_IPSR_GPSR(IP5_20_17, SPEEDIN),
 	PINMUX_IPSR_MSEL(IP5_20_17, GPS_SIGN_D, SEL_GPS_3),
-	PINMUX_IPSR_DATA(IP5_23_21, DU1_DISP),
-	PINMUX_IPSR_DATA(IP5_23_21, VI2_DATA6_VI2_B6),
+	PINMUX_IPSR_GPSR(IP5_23_21, DU1_DISP),
+	PINMUX_IPSR_GPSR(IP5_23_21, VI2_DATA6_VI2_B6),
 	PINMUX_IPSR_MSEL(IP5_23_21, TCLK0, SEL_TMU0_0),
-	PINMUX_IPSR_DATA(IP5_23_21, QSTVA_B_QVS_B),
+	PINMUX_IPSR_GPSR(IP5_23_21, QSTVA_B_QVS_B),
 	PINMUX_IPSR_MSEL(IP5_23_21, HSPI_CLK1, SEL_HSPI1_0),
 	PINMUX_IPSR_MSEL(IP5_23_21, SCK2_D, SEL_SCIF2_3),
-	PINMUX_IPSR_DATA(IP5_23_21, AUDIO_CLKOUT_B),
+	PINMUX_IPSR_GPSR(IP5_23_21, AUDIO_CLKOUT_B),
 	PINMUX_IPSR_MSEL(IP5_23_21, GPS_MAG_D, SEL_GPS_3),
-	PINMUX_IPSR_DATA(IP5_27_24, DU1_CDE),
-	PINMUX_IPSR_DATA(IP5_27_24, VI2_DATA7_VI2_B7),
+	PINMUX_IPSR_GPSR(IP5_27_24, DU1_CDE),
+	PINMUX_IPSR_GPSR(IP5_27_24, VI2_DATA7_VI2_B7),
 	PINMUX_IPSR_MSEL(IP5_27_24, RX3_B_IRDA_RX_B, SEL_SCIF3_1),
-	PINMUX_IPSR_DATA(IP5_27_24, SD3_WP),
+	PINMUX_IPSR_GPSR(IP5_27_24, SD3_WP),
 	PINMUX_IPSR_MSEL(IP5_27_24, HSPI_RX1, SEL_HSPI1_0),
-	PINMUX_IPSR_DATA(IP5_27_24, VI1_FIELD),
-	PINMUX_IPSR_DATA(IP5_27_24, VI3_FIELD),
-	PINMUX_IPSR_DATA(IP5_27_24, AUDIO_CLKOUT),
+	PINMUX_IPSR_GPSR(IP5_27_24, VI1_FIELD),
+	PINMUX_IPSR_GPSR(IP5_27_24, VI3_FIELD),
+	PINMUX_IPSR_GPSR(IP5_27_24, AUDIO_CLKOUT),
 	PINMUX_IPSR_MSEL(IP5_27_24, RX2_D, SEL_SCIF2_3),
 	PINMUX_IPSR_MSEL(IP5_27_24, GPS_CLK_C, SEL_GPS_2),
 	PINMUX_IPSR_MSEL(IP5_27_24, GPS_CLK_D, SEL_GPS_3),
-	PINMUX_IPSR_DATA(IP5_28, AUDIO_CLKA),
-	PINMUX_IPSR_DATA(IP5_28, CAN_TXCLK),
-	PINMUX_IPSR_DATA(IP5_30_29, AUDIO_CLKB),
-	PINMUX_IPSR_DATA(IP5_30_29, USB_OVC2),
-	PINMUX_IPSR_DATA(IP5_30_29, CAN_DEBUGOUT0),
-	PINMUX_IPSR_DATA(IP5_30_29, MOUT0),
+	PINMUX_IPSR_GPSR(IP5_28, AUDIO_CLKA),
+	PINMUX_IPSR_GPSR(IP5_28, CAN_TXCLK),
+	PINMUX_IPSR_GPSR(IP5_30_29, AUDIO_CLKB),
+	PINMUX_IPSR_GPSR(IP5_30_29, USB_OVC2),
+	PINMUX_IPSR_GPSR(IP5_30_29, CAN_DEBUGOUT0),
+	PINMUX_IPSR_GPSR(IP5_30_29, MOUT0),
 
-	PINMUX_IPSR_DATA(IP6_1_0, SSI_SCK0129),
-	PINMUX_IPSR_DATA(IP6_1_0, CAN_DEBUGOUT1),
-	PINMUX_IPSR_DATA(IP6_1_0, MOUT1),
-	PINMUX_IPSR_DATA(IP6_3_2, SSI_WS0129),
-	PINMUX_IPSR_DATA(IP6_3_2, CAN_DEBUGOUT2),
-	PINMUX_IPSR_DATA(IP6_3_2, MOUT2),
-	PINMUX_IPSR_DATA(IP6_5_4, SSI_SDATA0),
-	PINMUX_IPSR_DATA(IP6_5_4, CAN_DEBUGOUT3),
-	PINMUX_IPSR_DATA(IP6_5_4, MOUT5),
-	PINMUX_IPSR_DATA(IP6_7_6, SSI_SDATA1),
-	PINMUX_IPSR_DATA(IP6_7_6, CAN_DEBUGOUT4),
-	PINMUX_IPSR_DATA(IP6_7_6, MOUT6),
-	PINMUX_IPSR_DATA(IP6_8, SSI_SDATA2),
-	PINMUX_IPSR_DATA(IP6_8, CAN_DEBUGOUT5),
-	PINMUX_IPSR_DATA(IP6_11_9, SSI_SCK34),
-	PINMUX_IPSR_DATA(IP6_11_9, CAN_DEBUGOUT6),
-	PINMUX_IPSR_DATA(IP6_11_9, CAN0_TX_B),
+	PINMUX_IPSR_GPSR(IP6_1_0, SSI_SCK0129),
+	PINMUX_IPSR_GPSR(IP6_1_0, CAN_DEBUGOUT1),
+	PINMUX_IPSR_GPSR(IP6_1_0, MOUT1),
+	PINMUX_IPSR_GPSR(IP6_3_2, SSI_WS0129),
+	PINMUX_IPSR_GPSR(IP6_3_2, CAN_DEBUGOUT2),
+	PINMUX_IPSR_GPSR(IP6_3_2, MOUT2),
+	PINMUX_IPSR_GPSR(IP6_5_4, SSI_SDATA0),
+	PINMUX_IPSR_GPSR(IP6_5_4, CAN_DEBUGOUT3),
+	PINMUX_IPSR_GPSR(IP6_5_4, MOUT5),
+	PINMUX_IPSR_GPSR(IP6_7_6, SSI_SDATA1),
+	PINMUX_IPSR_GPSR(IP6_7_6, CAN_DEBUGOUT4),
+	PINMUX_IPSR_GPSR(IP6_7_6, MOUT6),
+	PINMUX_IPSR_GPSR(IP6_8, SSI_SDATA2),
+	PINMUX_IPSR_GPSR(IP6_8, CAN_DEBUGOUT5),
+	PINMUX_IPSR_GPSR(IP6_11_9, SSI_SCK34),
+	PINMUX_IPSR_GPSR(IP6_11_9, CAN_DEBUGOUT6),
+	PINMUX_IPSR_GPSR(IP6_11_9, CAN0_TX_B),
 	PINMUX_IPSR_MSEL(IP6_11_9, IERX, SEL_IE_0),
 	PINMUX_IPSR_MSEL(IP6_11_9, SSI_SCK9_C, SEL_SSI9_2),
-	PINMUX_IPSR_DATA(IP6_14_12, SSI_WS34),
-	PINMUX_IPSR_DATA(IP6_14_12, CAN_DEBUGOUT7),
+	PINMUX_IPSR_GPSR(IP6_14_12, SSI_WS34),
+	PINMUX_IPSR_GPSR(IP6_14_12, CAN_DEBUGOUT7),
 	PINMUX_IPSR_MSEL(IP6_14_12, CAN0_RX_B, SEL_CAN0_1),
-	PINMUX_IPSR_DATA(IP6_14_12, IETX),
+	PINMUX_IPSR_GPSR(IP6_14_12, IETX),
 	PINMUX_IPSR_MSEL(IP6_14_12, SSI_WS9_C, SEL_SSI9_2),
-	PINMUX_IPSR_DATA(IP6_17_15, SSI_SDATA3),
-	PINMUX_IPSR_DATA(IP6_17_15, PWM0_C),
-	PINMUX_IPSR_DATA(IP6_17_15, CAN_DEBUGOUT8),
+	PINMUX_IPSR_GPSR(IP6_17_15, SSI_SDATA3),
+	PINMUX_IPSR_GPSR(IP6_17_15, PWM0_C),
+	PINMUX_IPSR_GPSR(IP6_17_15, CAN_DEBUGOUT8),
 	PINMUX_IPSR_MSEL(IP6_17_15, CAN_CLK_B, SEL_CANCLK_1),
 	PINMUX_IPSR_MSEL(IP6_17_15, IECLK, SEL_IE_0),
 	PINMUX_IPSR_MSEL(IP6_17_15, SCIF_CLK_B, SEL_SCIF_1),
 	PINMUX_IPSR_MSEL(IP6_17_15, TCLK0_B, SEL_TMU0_1),
-	PINMUX_IPSR_DATA(IP6_19_18, SSI_SDATA4),
-	PINMUX_IPSR_DATA(IP6_19_18, CAN_DEBUGOUT9),
+	PINMUX_IPSR_GPSR(IP6_19_18, SSI_SDATA4),
+	PINMUX_IPSR_GPSR(IP6_19_18, CAN_DEBUGOUT9),
 	PINMUX_IPSR_MSEL(IP6_19_18, SSI_SDATA9_C, SEL_SSI9_2),
-	PINMUX_IPSR_DATA(IP6_22_20, SSI_SCK5),
-	PINMUX_IPSR_DATA(IP6_22_20, ADICLK),
-	PINMUX_IPSR_DATA(IP6_22_20, CAN_DEBUGOUT10),
+	PINMUX_IPSR_GPSR(IP6_22_20, SSI_SCK5),
+	PINMUX_IPSR_GPSR(IP6_22_20, ADICLK),
+	PINMUX_IPSR_GPSR(IP6_22_20, CAN_DEBUGOUT10),
 	PINMUX_IPSR_MSEL(IP6_22_20, SCK3, SEL_SCIF3_0),
 	PINMUX_IPSR_MSEL(IP6_22_20, TCLK0_D, SEL_TMU0_3),
-	PINMUX_IPSR_DATA(IP6_24_23, SSI_WS5),
+	PINMUX_IPSR_GPSR(IP6_24_23, SSI_WS5),
 	PINMUX_IPSR_MSEL(IP6_24_23, ADICS_SAMP, SEL_ADI_0),
-	PINMUX_IPSR_DATA(IP6_24_23, CAN_DEBUGOUT11),
-	PINMUX_IPSR_DATA(IP6_24_23, TX3_IRDA_TX),
-	PINMUX_IPSR_DATA(IP6_26_25, SSI_SDATA5),
+	PINMUX_IPSR_GPSR(IP6_24_23, CAN_DEBUGOUT11),
+	PINMUX_IPSR_GPSR(IP6_24_23, TX3_IRDA_TX),
+	PINMUX_IPSR_GPSR(IP6_26_25, SSI_SDATA5),
 	PINMUX_IPSR_MSEL(IP6_26_25, ADIDATA, SEL_ADI_0),
-	PINMUX_IPSR_DATA(IP6_26_25, CAN_DEBUGOUT12),
+	PINMUX_IPSR_GPSR(IP6_26_25, CAN_DEBUGOUT12),
 	PINMUX_IPSR_MSEL(IP6_26_25, RX3_IRDA_RX, SEL_SCIF3_0),
-	PINMUX_IPSR_DATA(IP6_30_29, SSI_SCK6),
-	PINMUX_IPSR_DATA(IP6_30_29, ADICHS0),
-	PINMUX_IPSR_DATA(IP6_30_29, CAN0_TX),
+	PINMUX_IPSR_GPSR(IP6_30_29, SSI_SCK6),
+	PINMUX_IPSR_GPSR(IP6_30_29, ADICHS0),
+	PINMUX_IPSR_GPSR(IP6_30_29, CAN0_TX),
 	PINMUX_IPSR_MSEL(IP6_30_29, IERX_B, SEL_IE_1),
 
-	PINMUX_IPSR_DATA(IP7_1_0, SSI_WS6),
-	PINMUX_IPSR_DATA(IP7_1_0, ADICHS1),
+	PINMUX_IPSR_GPSR(IP7_1_0, SSI_WS6),
+	PINMUX_IPSR_GPSR(IP7_1_0, ADICHS1),
 	PINMUX_IPSR_MSEL(IP7_1_0, CAN0_RX, SEL_CAN0_0),
-	PINMUX_IPSR_DATA(IP7_1_0, IETX_B),
-	PINMUX_IPSR_DATA(IP7_3_2, SSI_SDATA6),
-	PINMUX_IPSR_DATA(IP7_3_2, ADICHS2),
+	PINMUX_IPSR_GPSR(IP7_1_0, IETX_B),
+	PINMUX_IPSR_GPSR(IP7_3_2, SSI_SDATA6),
+	PINMUX_IPSR_GPSR(IP7_3_2, ADICHS2),
 	PINMUX_IPSR_MSEL(IP7_3_2, CAN_CLK, SEL_CANCLK_0),
 	PINMUX_IPSR_MSEL(IP7_3_2, IECLK_B, SEL_IE_1),
 	PINMUX_IPSR_MSEL(IP7_6_4, SSI_SCK78, SEL_SSI7_0),
-	PINMUX_IPSR_DATA(IP7_6_4, CAN_DEBUGOUT13),
+	PINMUX_IPSR_GPSR(IP7_6_4, CAN_DEBUGOUT13),
 	PINMUX_IPSR_MSEL(IP7_6_4, IRQ0_B, SEL_INT0_1),
 	PINMUX_IPSR_MSEL(IP7_6_4, SSI_SCK9_B, SEL_SSI9_1),
 	PINMUX_IPSR_MSEL(IP7_6_4, HSPI_CLK1_C, SEL_HSPI1_2),
 	PINMUX_IPSR_MSEL(IP7_9_7, SSI_WS78, SEL_SSI7_0),
-	PINMUX_IPSR_DATA(IP7_9_7, CAN_DEBUGOUT14),
+	PINMUX_IPSR_GPSR(IP7_9_7, CAN_DEBUGOUT14),
 	PINMUX_IPSR_MSEL(IP7_9_7, IRQ1_B, SEL_INT1_1),
 	PINMUX_IPSR_MSEL(IP7_9_7, SSI_WS9_B, SEL_SSI9_1),
 	PINMUX_IPSR_MSEL(IP7_9_7, HSPI_CS1_C, SEL_HSPI1_2),
 	PINMUX_IPSR_MSEL(IP7_12_10, SSI_SDATA7, SEL_SSI7_0),
-	PINMUX_IPSR_DATA(IP7_12_10, CAN_DEBUGOUT15),
+	PINMUX_IPSR_GPSR(IP7_12_10, CAN_DEBUGOUT15),
 	PINMUX_IPSR_MSEL(IP7_12_10, IRQ2_B, SEL_INT2_1),
 	PINMUX_IPSR_MSEL(IP7_12_10, TCLK1_C, SEL_TMU1_2),
-	PINMUX_IPSR_DATA(IP7_12_10, HSPI_TX1_C),
+	PINMUX_IPSR_GPSR(IP7_12_10, HSPI_TX1_C),
 	PINMUX_IPSR_MSEL(IP7_14_13, SSI_SDATA8, SEL_SSI8_0),
-	PINMUX_IPSR_DATA(IP7_14_13, VSP),
+	PINMUX_IPSR_GPSR(IP7_14_13, VSP),
 	PINMUX_IPSR_MSEL(IP7_14_13, IRQ3_B, SEL_INT3_1),
 	PINMUX_IPSR_MSEL(IP7_14_13, HSPI_RX1_C, SEL_HSPI1_2),
-	PINMUX_IPSR_DATA(IP7_16_15, SD0_CLK),
-	PINMUX_IPSR_DATA(IP7_16_15, ATACS01),
+	PINMUX_IPSR_GPSR(IP7_16_15, SD0_CLK),
+	PINMUX_IPSR_GPSR(IP7_16_15, ATACS01),
 	PINMUX_IPSR_MSEL(IP7_16_15, SCK1_B, SEL_SCIF1_1),
-	PINMUX_IPSR_DATA(IP7_18_17, SD0_CMD),
-	PINMUX_IPSR_DATA(IP7_18_17, ATACS11),
-	PINMUX_IPSR_DATA(IP7_18_17, TX1_B),
-	PINMUX_IPSR_DATA(IP7_18_17, CC5_TDO),
-	PINMUX_IPSR_DATA(IP7_20_19, SD0_DAT0),
-	PINMUX_IPSR_DATA(IP7_20_19, ATADIR1),
+	PINMUX_IPSR_GPSR(IP7_18_17, SD0_CMD),
+	PINMUX_IPSR_GPSR(IP7_18_17, ATACS11),
+	PINMUX_IPSR_GPSR(IP7_18_17, TX1_B),
+	PINMUX_IPSR_GPSR(IP7_18_17, CC5_TDO),
+	PINMUX_IPSR_GPSR(IP7_20_19, SD0_DAT0),
+	PINMUX_IPSR_GPSR(IP7_20_19, ATADIR1),
 	PINMUX_IPSR_MSEL(IP7_20_19, RX1_B, SEL_SCIF1_1),
-	PINMUX_IPSR_DATA(IP7_20_19, CC5_TRST),
-	PINMUX_IPSR_DATA(IP7_22_21, SD0_DAT1),
-	PINMUX_IPSR_DATA(IP7_22_21, ATAG1),
+	PINMUX_IPSR_GPSR(IP7_20_19, CC5_TRST),
+	PINMUX_IPSR_GPSR(IP7_22_21, SD0_DAT1),
+	PINMUX_IPSR_GPSR(IP7_22_21, ATAG1),
 	PINMUX_IPSR_MSEL(IP7_22_21, SCK2_B, SEL_SCIF2_1),
-	PINMUX_IPSR_DATA(IP7_22_21, CC5_TMS),
-	PINMUX_IPSR_DATA(IP7_24_23, SD0_DAT2),
-	PINMUX_IPSR_DATA(IP7_24_23, ATARD1),
-	PINMUX_IPSR_DATA(IP7_24_23, TX2_B),
-	PINMUX_IPSR_DATA(IP7_24_23, CC5_TCK),
-	PINMUX_IPSR_DATA(IP7_26_25, SD0_DAT3),
-	PINMUX_IPSR_DATA(IP7_26_25, ATAWR1),
+	PINMUX_IPSR_GPSR(IP7_22_21, CC5_TMS),
+	PINMUX_IPSR_GPSR(IP7_24_23, SD0_DAT2),
+	PINMUX_IPSR_GPSR(IP7_24_23, ATARD1),
+	PINMUX_IPSR_GPSR(IP7_24_23, TX2_B),
+	PINMUX_IPSR_GPSR(IP7_24_23, CC5_TCK),
+	PINMUX_IPSR_GPSR(IP7_26_25, SD0_DAT3),
+	PINMUX_IPSR_GPSR(IP7_26_25, ATAWR1),
 	PINMUX_IPSR_MSEL(IP7_26_25, RX2_B, SEL_SCIF2_1),
-	PINMUX_IPSR_DATA(IP7_26_25, CC5_TDI),
-	PINMUX_IPSR_DATA(IP7_28_27, SD0_CD),
+	PINMUX_IPSR_GPSR(IP7_26_25, CC5_TDI),
+	PINMUX_IPSR_GPSR(IP7_28_27, SD0_CD),
 	PINMUX_IPSR_MSEL(IP7_28_27, DREQ2, SEL_EXBUS2_0),
 	PINMUX_IPSR_MSEL(IP7_28_27, RTS1_B_TANS_B, SEL_SCIF1_1),
-	PINMUX_IPSR_DATA(IP7_30_29, SD0_WP),
-	PINMUX_IPSR_DATA(IP7_30_29, DACK2),
+	PINMUX_IPSR_GPSR(IP7_30_29, SD0_WP),
+	PINMUX_IPSR_GPSR(IP7_30_29, DACK2),
 	PINMUX_IPSR_MSEL(IP7_30_29, CTS1_B, SEL_SCIF1_1),
 
-	PINMUX_IPSR_DATA(IP8_3_0, HSPI_CLK0),
+	PINMUX_IPSR_GPSR(IP8_3_0, HSPI_CLK0),
 	PINMUX_IPSR_MSEL(IP8_3_0, CTS0, SEL_SCIF0_0),
-	PINMUX_IPSR_DATA(IP8_3_0, USB_OVC0),
-	PINMUX_IPSR_DATA(IP8_3_0, AD_CLK),
-	PINMUX_IPSR_DATA(IP8_3_0, CC5_STATE4),
-	PINMUX_IPSR_DATA(IP8_3_0, CC5_STATE12),
-	PINMUX_IPSR_DATA(IP8_3_0, CC5_STATE20),
-	PINMUX_IPSR_DATA(IP8_3_0, CC5_STATE28),
-	PINMUX_IPSR_DATA(IP8_3_0, CC5_STATE36),
-	PINMUX_IPSR_DATA(IP8_7_4, HSPI_CS0),
+	PINMUX_IPSR_GPSR(IP8_3_0, USB_OVC0),
+	PINMUX_IPSR_GPSR(IP8_3_0, AD_CLK),
+	PINMUX_IPSR_GPSR(IP8_3_0, CC5_STATE4),
+	PINMUX_IPSR_GPSR(IP8_3_0, CC5_STATE12),
+	PINMUX_IPSR_GPSR(IP8_3_0, CC5_STATE20),
+	PINMUX_IPSR_GPSR(IP8_3_0, CC5_STATE28),
+	PINMUX_IPSR_GPSR(IP8_3_0, CC5_STATE36),
+	PINMUX_IPSR_GPSR(IP8_7_4, HSPI_CS0),
 	PINMUX_IPSR_MSEL(IP8_7_4, RTS0_TANS, SEL_SCIF0_0),
-	PINMUX_IPSR_DATA(IP8_7_4, USB_OVC1),
-	PINMUX_IPSR_DATA(IP8_7_4, AD_DI),
-	PINMUX_IPSR_DATA(IP8_7_4, CC5_STATE5),
-	PINMUX_IPSR_DATA(IP8_7_4, CC5_STATE13),
-	PINMUX_IPSR_DATA(IP8_7_4, CC5_STATE21),
-	PINMUX_IPSR_DATA(IP8_7_4, CC5_STATE29),
-	PINMUX_IPSR_DATA(IP8_7_4, CC5_STATE37),
-	PINMUX_IPSR_DATA(IP8_11_8, HSPI_TX0),
-	PINMUX_IPSR_DATA(IP8_11_8, TX0),
-	PINMUX_IPSR_DATA(IP8_11_8, CAN_DEBUG_HW_TRIGGER),
-	PINMUX_IPSR_DATA(IP8_11_8, AD_DO),
-	PINMUX_IPSR_DATA(IP8_11_8, CC5_STATE6),
-	PINMUX_IPSR_DATA(IP8_11_8, CC5_STATE14),
-	PINMUX_IPSR_DATA(IP8_11_8, CC5_STATE22),
-	PINMUX_IPSR_DATA(IP8_11_8, CC5_STATE30),
-	PINMUX_IPSR_DATA(IP8_11_8, CC5_STATE38),
-	PINMUX_IPSR_DATA(IP8_15_12, HSPI_RX0),
+	PINMUX_IPSR_GPSR(IP8_7_4, USB_OVC1),
+	PINMUX_IPSR_GPSR(IP8_7_4, AD_DI),
+	PINMUX_IPSR_GPSR(IP8_7_4, CC5_STATE5),
+	PINMUX_IPSR_GPSR(IP8_7_4, CC5_STATE13),
+	PINMUX_IPSR_GPSR(IP8_7_4, CC5_STATE21),
+	PINMUX_IPSR_GPSR(IP8_7_4, CC5_STATE29),
+	PINMUX_IPSR_GPSR(IP8_7_4, CC5_STATE37),
+	PINMUX_IPSR_GPSR(IP8_11_8, HSPI_TX0),
+	PINMUX_IPSR_GPSR(IP8_11_8, TX0),
+	PINMUX_IPSR_GPSR(IP8_11_8, CAN_DEBUG_HW_TRIGGER),
+	PINMUX_IPSR_GPSR(IP8_11_8, AD_DO),
+	PINMUX_IPSR_GPSR(IP8_11_8, CC5_STATE6),
+	PINMUX_IPSR_GPSR(IP8_11_8, CC5_STATE14),
+	PINMUX_IPSR_GPSR(IP8_11_8, CC5_STATE22),
+	PINMUX_IPSR_GPSR(IP8_11_8, CC5_STATE30),
+	PINMUX_IPSR_GPSR(IP8_11_8, CC5_STATE38),
+	PINMUX_IPSR_GPSR(IP8_15_12, HSPI_RX0),
 	PINMUX_IPSR_MSEL(IP8_15_12, RX0, SEL_SCIF0_0),
-	PINMUX_IPSR_DATA(IP8_15_12, CAN_STEP0),
-	PINMUX_IPSR_DATA(IP8_15_12, AD_NCS),
-	PINMUX_IPSR_DATA(IP8_15_12, CC5_STATE7),
-	PINMUX_IPSR_DATA(IP8_15_12, CC5_STATE15),
-	PINMUX_IPSR_DATA(IP8_15_12, CC5_STATE23),
-	PINMUX_IPSR_DATA(IP8_15_12, CC5_STATE31),
-	PINMUX_IPSR_DATA(IP8_15_12, CC5_STATE39),
-	PINMUX_IPSR_DATA(IP8_17_16, FMCLK),
-	PINMUX_IPSR_DATA(IP8_17_16, RDS_CLK),
-	PINMUX_IPSR_DATA(IP8_17_16, PCMOE),
-	PINMUX_IPSR_DATA(IP8_18, BPFCLK),
-	PINMUX_IPSR_DATA(IP8_18, PCMWE),
-	PINMUX_IPSR_DATA(IP8_19, FMIN),
-	PINMUX_IPSR_DATA(IP8_19, RDS_DATA),
-	PINMUX_IPSR_DATA(IP8_20, VI0_CLK),
-	PINMUX_IPSR_DATA(IP8_20, MMC1_CLK),
-	PINMUX_IPSR_DATA(IP8_22_21, VI0_CLKENB),
-	PINMUX_IPSR_DATA(IP8_22_21, TX1_C),
-	PINMUX_IPSR_DATA(IP8_22_21, HTX1_B),
-	PINMUX_IPSR_DATA(IP8_22_21, MT1_SYNC),
-	PINMUX_IPSR_DATA(IP8_24_23, VI0_FIELD),
+	PINMUX_IPSR_GPSR(IP8_15_12, CAN_STEP0),
+	PINMUX_IPSR_GPSR(IP8_15_12, AD_NCS),
+	PINMUX_IPSR_GPSR(IP8_15_12, CC5_STATE7),
+	PINMUX_IPSR_GPSR(IP8_15_12, CC5_STATE15),
+	PINMUX_IPSR_GPSR(IP8_15_12, CC5_STATE23),
+	PINMUX_IPSR_GPSR(IP8_15_12, CC5_STATE31),
+	PINMUX_IPSR_GPSR(IP8_15_12, CC5_STATE39),
+	PINMUX_IPSR_GPSR(IP8_17_16, FMCLK),
+	PINMUX_IPSR_GPSR(IP8_17_16, RDS_CLK),
+	PINMUX_IPSR_GPSR(IP8_17_16, PCMOE),
+	PINMUX_IPSR_GPSR(IP8_18, BPFCLK),
+	PINMUX_IPSR_GPSR(IP8_18, PCMWE),
+	PINMUX_IPSR_GPSR(IP8_19, FMIN),
+	PINMUX_IPSR_GPSR(IP8_19, RDS_DATA),
+	PINMUX_IPSR_GPSR(IP8_20, VI0_CLK),
+	PINMUX_IPSR_GPSR(IP8_20, MMC1_CLK),
+	PINMUX_IPSR_GPSR(IP8_22_21, VI0_CLKENB),
+	PINMUX_IPSR_GPSR(IP8_22_21, TX1_C),
+	PINMUX_IPSR_GPSR(IP8_22_21, HTX1_B),
+	PINMUX_IPSR_GPSR(IP8_22_21, MT1_SYNC),
+	PINMUX_IPSR_GPSR(IP8_24_23, VI0_FIELD),
 	PINMUX_IPSR_MSEL(IP8_24_23, RX1_C, SEL_SCIF1_2),
 	PINMUX_IPSR_MSEL(IP8_24_23, HRX1_B, SEL_HSCIF1_1),
-	PINMUX_IPSR_DATA(IP8_27_25, VI0_HSYNC),
+	PINMUX_IPSR_GPSR(IP8_27_25, VI0_HSYNC),
 	PINMUX_IPSR_MSEL(IP8_27_25, VI0_DATA0_B_VI0_B0_B, SEL_VI0_1),
 	PINMUX_IPSR_MSEL(IP8_27_25, CTS1_C, SEL_SCIF1_2),
-	PINMUX_IPSR_DATA(IP8_27_25, TX4_D),
-	PINMUX_IPSR_DATA(IP8_27_25, MMC1_CMD),
+	PINMUX_IPSR_GPSR(IP8_27_25, TX4_D),
+	PINMUX_IPSR_GPSR(IP8_27_25, MMC1_CMD),
 	PINMUX_IPSR_MSEL(IP8_27_25, HSCK1_B, SEL_HSCIF1_1),
-	PINMUX_IPSR_DATA(IP8_30_28, VI0_VSYNC),
+	PINMUX_IPSR_GPSR(IP8_30_28, VI0_VSYNC),
 	PINMUX_IPSR_MSEL(IP8_30_28, VI0_DATA1_B_VI0_B1_B, SEL_VI0_1),
 	PINMUX_IPSR_MSEL(IP8_30_28, RTS1_C_TANS_C, SEL_SCIF1_2),
 	PINMUX_IPSR_MSEL(IP8_30_28, RX4_D, SEL_SCIF4_3),
@@ -1189,216 +1189,216 @@
 
 	PINMUX_IPSR_MSEL(IP9_1_0, VI0_DATA0_VI0_B0, SEL_VI0_0),
 	PINMUX_IPSR_MSEL(IP9_1_0, HRTS1_B, SEL_HSCIF1_1),
-	PINMUX_IPSR_DATA(IP9_1_0, MT1_VCXO),
+	PINMUX_IPSR_GPSR(IP9_1_0, MT1_VCXO),
 	PINMUX_IPSR_MSEL(IP9_3_2, VI0_DATA1_VI0_B1, SEL_VI0_0),
 	PINMUX_IPSR_MSEL(IP9_3_2, HCTS1_B, SEL_HSCIF1_1),
-	PINMUX_IPSR_DATA(IP9_3_2, MT1_PWM),
-	PINMUX_IPSR_DATA(IP9_4, VI0_DATA2_VI0_B2),
-	PINMUX_IPSR_DATA(IP9_4, MMC1_D0),
-	PINMUX_IPSR_DATA(IP9_5, VI0_DATA3_VI0_B3),
-	PINMUX_IPSR_DATA(IP9_5, MMC1_D1),
-	PINMUX_IPSR_DATA(IP9_6, VI0_DATA4_VI0_B4),
-	PINMUX_IPSR_DATA(IP9_6, MMC1_D2),
-	PINMUX_IPSR_DATA(IP9_7, VI0_DATA5_VI0_B5),
-	PINMUX_IPSR_DATA(IP9_7, MMC1_D3),
-	PINMUX_IPSR_DATA(IP9_9_8, VI0_DATA6_VI0_B6),
-	PINMUX_IPSR_DATA(IP9_9_8, MMC1_D4),
-	PINMUX_IPSR_DATA(IP9_9_8, ARM_TRACEDATA_0),
-	PINMUX_IPSR_DATA(IP9_11_10, VI0_DATA7_VI0_B7),
-	PINMUX_IPSR_DATA(IP9_11_10, MMC1_D5),
-	PINMUX_IPSR_DATA(IP9_11_10, ARM_TRACEDATA_1),
-	PINMUX_IPSR_DATA(IP9_13_12, VI0_G0),
+	PINMUX_IPSR_GPSR(IP9_3_2, MT1_PWM),
+	PINMUX_IPSR_GPSR(IP9_4, VI0_DATA2_VI0_B2),
+	PINMUX_IPSR_GPSR(IP9_4, MMC1_D0),
+	PINMUX_IPSR_GPSR(IP9_5, VI0_DATA3_VI0_B3),
+	PINMUX_IPSR_GPSR(IP9_5, MMC1_D1),
+	PINMUX_IPSR_GPSR(IP9_6, VI0_DATA4_VI0_B4),
+	PINMUX_IPSR_GPSR(IP9_6, MMC1_D2),
+	PINMUX_IPSR_GPSR(IP9_7, VI0_DATA5_VI0_B5),
+	PINMUX_IPSR_GPSR(IP9_7, MMC1_D3),
+	PINMUX_IPSR_GPSR(IP9_9_8, VI0_DATA6_VI0_B6),
+	PINMUX_IPSR_GPSR(IP9_9_8, MMC1_D4),
+	PINMUX_IPSR_GPSR(IP9_9_8, ARM_TRACEDATA_0),
+	PINMUX_IPSR_GPSR(IP9_11_10, VI0_DATA7_VI0_B7),
+	PINMUX_IPSR_GPSR(IP9_11_10, MMC1_D5),
+	PINMUX_IPSR_GPSR(IP9_11_10, ARM_TRACEDATA_1),
+	PINMUX_IPSR_GPSR(IP9_13_12, VI0_G0),
 	PINMUX_IPSR_MSEL(IP9_13_12, SSI_SCK78_C, SEL_SSI7_2),
 	PINMUX_IPSR_MSEL(IP9_13_12, IRQ0, SEL_INT0_0),
-	PINMUX_IPSR_DATA(IP9_13_12, ARM_TRACEDATA_2),
-	PINMUX_IPSR_DATA(IP9_15_14, VI0_G1),
+	PINMUX_IPSR_GPSR(IP9_13_12, ARM_TRACEDATA_2),
+	PINMUX_IPSR_GPSR(IP9_15_14, VI0_G1),
 	PINMUX_IPSR_MSEL(IP9_15_14, SSI_WS78_C, SEL_SSI7_2),
 	PINMUX_IPSR_MSEL(IP9_15_14, IRQ1, SEL_INT1_0),
-	PINMUX_IPSR_DATA(IP9_15_14, ARM_TRACEDATA_3),
-	PINMUX_IPSR_DATA(IP9_18_16, VI0_G2),
-	PINMUX_IPSR_DATA(IP9_18_16, ETH_TXD1),
-	PINMUX_IPSR_DATA(IP9_18_16, MMC1_D6),
-	PINMUX_IPSR_DATA(IP9_18_16, ARM_TRACEDATA_4),
-	PINMUX_IPSR_DATA(IP9_18_16, TS_SPSYNC0),
-	PINMUX_IPSR_DATA(IP9_21_19, VI0_G3),
-	PINMUX_IPSR_DATA(IP9_21_19, ETH_CRS_DV),
-	PINMUX_IPSR_DATA(IP9_21_19, MMC1_D7),
-	PINMUX_IPSR_DATA(IP9_21_19, ARM_TRACEDATA_5),
-	PINMUX_IPSR_DATA(IP9_21_19, TS_SDAT0),
-	PINMUX_IPSR_DATA(IP9_23_22, VI0_G4),
-	PINMUX_IPSR_DATA(IP9_23_22, ETH_TX_EN),
+	PINMUX_IPSR_GPSR(IP9_15_14, ARM_TRACEDATA_3),
+	PINMUX_IPSR_GPSR(IP9_18_16, VI0_G2),
+	PINMUX_IPSR_GPSR(IP9_18_16, ETH_TXD1),
+	PINMUX_IPSR_GPSR(IP9_18_16, MMC1_D6),
+	PINMUX_IPSR_GPSR(IP9_18_16, ARM_TRACEDATA_4),
+	PINMUX_IPSR_GPSR(IP9_18_16, TS_SPSYNC0),
+	PINMUX_IPSR_GPSR(IP9_21_19, VI0_G3),
+	PINMUX_IPSR_GPSR(IP9_21_19, ETH_CRS_DV),
+	PINMUX_IPSR_GPSR(IP9_21_19, MMC1_D7),
+	PINMUX_IPSR_GPSR(IP9_21_19, ARM_TRACEDATA_5),
+	PINMUX_IPSR_GPSR(IP9_21_19, TS_SDAT0),
+	PINMUX_IPSR_GPSR(IP9_23_22, VI0_G4),
+	PINMUX_IPSR_GPSR(IP9_23_22, ETH_TX_EN),
 	PINMUX_IPSR_MSEL(IP9_23_22, SD2_DAT0_B, SEL_SD2_1),
-	PINMUX_IPSR_DATA(IP9_23_22, ARM_TRACEDATA_6),
-	PINMUX_IPSR_DATA(IP9_25_24, VI0_G5),
-	PINMUX_IPSR_DATA(IP9_25_24, ETH_RX_ER),
+	PINMUX_IPSR_GPSR(IP9_23_22, ARM_TRACEDATA_6),
+	PINMUX_IPSR_GPSR(IP9_25_24, VI0_G5),
+	PINMUX_IPSR_GPSR(IP9_25_24, ETH_RX_ER),
 	PINMUX_IPSR_MSEL(IP9_25_24, SD2_DAT1_B, SEL_SD2_1),
-	PINMUX_IPSR_DATA(IP9_25_24, ARM_TRACEDATA_7),
-	PINMUX_IPSR_DATA(IP9_27_26, VI0_G6),
-	PINMUX_IPSR_DATA(IP9_27_26, ETH_RXD0),
+	PINMUX_IPSR_GPSR(IP9_25_24, ARM_TRACEDATA_7),
+	PINMUX_IPSR_GPSR(IP9_27_26, VI0_G6),
+	PINMUX_IPSR_GPSR(IP9_27_26, ETH_RXD0),
 	PINMUX_IPSR_MSEL(IP9_27_26, SD2_DAT2_B, SEL_SD2_1),
-	PINMUX_IPSR_DATA(IP9_27_26, ARM_TRACEDATA_8),
-	PINMUX_IPSR_DATA(IP9_29_28, VI0_G7),
-	PINMUX_IPSR_DATA(IP9_29_28, ETH_RXD1),
+	PINMUX_IPSR_GPSR(IP9_27_26, ARM_TRACEDATA_8),
+	PINMUX_IPSR_GPSR(IP9_29_28, VI0_G7),
+	PINMUX_IPSR_GPSR(IP9_29_28, ETH_RXD1),
 	PINMUX_IPSR_MSEL(IP9_29_28, SD2_DAT3_B, SEL_SD2_1),
-	PINMUX_IPSR_DATA(IP9_29_28, ARM_TRACEDATA_9),
+	PINMUX_IPSR_GPSR(IP9_29_28, ARM_TRACEDATA_9),
 
-	PINMUX_IPSR_DATA(IP10_2_0, VI0_R0),
+	PINMUX_IPSR_GPSR(IP10_2_0, VI0_R0),
 	PINMUX_IPSR_MSEL(IP10_2_0, SSI_SDATA7_C, SEL_SSI7_2),
 	PINMUX_IPSR_MSEL(IP10_2_0, SCK1_C, SEL_SCIF1_2),
 	PINMUX_IPSR_MSEL(IP10_2_0, DREQ1_B, SEL_EXBUS1_0),
-	PINMUX_IPSR_DATA(IP10_2_0, ARM_TRACEDATA_10),
+	PINMUX_IPSR_GPSR(IP10_2_0, ARM_TRACEDATA_10),
 	PINMUX_IPSR_MSEL(IP10_2_0, DREQ0_C, SEL_EXBUS0_2),
-	PINMUX_IPSR_DATA(IP10_5_3, VI0_R1),
+	PINMUX_IPSR_GPSR(IP10_5_3, VI0_R1),
 	PINMUX_IPSR_MSEL(IP10_5_3, SSI_SDATA8_C, SEL_SSI8_2),
-	PINMUX_IPSR_DATA(IP10_5_3, DACK1_B),
-	PINMUX_IPSR_DATA(IP10_5_3, ARM_TRACEDATA_11),
-	PINMUX_IPSR_DATA(IP10_5_3, DACK0_C),
-	PINMUX_IPSR_DATA(IP10_5_3, DRACK0_C),
-	PINMUX_IPSR_DATA(IP10_8_6, VI0_R2),
-	PINMUX_IPSR_DATA(IP10_8_6, ETH_LINK),
-	PINMUX_IPSR_DATA(IP10_8_6, SD2_CLK_B),
+	PINMUX_IPSR_GPSR(IP10_5_3, DACK1_B),
+	PINMUX_IPSR_GPSR(IP10_5_3, ARM_TRACEDATA_11),
+	PINMUX_IPSR_GPSR(IP10_5_3, DACK0_C),
+	PINMUX_IPSR_GPSR(IP10_5_3, DRACK0_C),
+	PINMUX_IPSR_GPSR(IP10_8_6, VI0_R2),
+	PINMUX_IPSR_GPSR(IP10_8_6, ETH_LINK),
+	PINMUX_IPSR_GPSR(IP10_8_6, SD2_CLK_B),
 	PINMUX_IPSR_MSEL(IP10_8_6, IRQ2, SEL_INT2_0),
-	PINMUX_IPSR_DATA(IP10_8_6, ARM_TRACEDATA_12),
-	PINMUX_IPSR_DATA(IP10_11_9, VI0_R3),
-	PINMUX_IPSR_DATA(IP10_11_9, ETH_MAGIC),
+	PINMUX_IPSR_GPSR(IP10_8_6, ARM_TRACEDATA_12),
+	PINMUX_IPSR_GPSR(IP10_11_9, VI0_R3),
+	PINMUX_IPSR_GPSR(IP10_11_9, ETH_MAGIC),
 	PINMUX_IPSR_MSEL(IP10_11_9, SD2_CMD_B, SEL_SD2_1),
 	PINMUX_IPSR_MSEL(IP10_11_9, IRQ3, SEL_INT3_0),
-	PINMUX_IPSR_DATA(IP10_11_9, ARM_TRACEDATA_13),
-	PINMUX_IPSR_DATA(IP10_14_12, VI0_R4),
-	PINMUX_IPSR_DATA(IP10_14_12, ETH_REFCLK),
+	PINMUX_IPSR_GPSR(IP10_11_9, ARM_TRACEDATA_13),
+	PINMUX_IPSR_GPSR(IP10_14_12, VI0_R4),
+	PINMUX_IPSR_GPSR(IP10_14_12, ETH_REFCLK),
 	PINMUX_IPSR_MSEL(IP10_14_12, SD2_CD_B, SEL_SD2_1),
 	PINMUX_IPSR_MSEL(IP10_14_12, HSPI_CLK1_B, SEL_HSPI1_1),
-	PINMUX_IPSR_DATA(IP10_14_12, ARM_TRACEDATA_14),
-	PINMUX_IPSR_DATA(IP10_14_12, MT1_CLK),
-	PINMUX_IPSR_DATA(IP10_14_12, TS_SCK0),
-	PINMUX_IPSR_DATA(IP10_17_15, VI0_R5),
-	PINMUX_IPSR_DATA(IP10_17_15, ETH_TXD0),
+	PINMUX_IPSR_GPSR(IP10_14_12, ARM_TRACEDATA_14),
+	PINMUX_IPSR_GPSR(IP10_14_12, MT1_CLK),
+	PINMUX_IPSR_GPSR(IP10_14_12, TS_SCK0),
+	PINMUX_IPSR_GPSR(IP10_17_15, VI0_R5),
+	PINMUX_IPSR_GPSR(IP10_17_15, ETH_TXD0),
 	PINMUX_IPSR_MSEL(IP10_17_15, SD2_WP_B, SEL_SD2_1),
 	PINMUX_IPSR_MSEL(IP10_17_15, HSPI_CS1_B, SEL_HSPI1_1),
-	PINMUX_IPSR_DATA(IP10_17_15, ARM_TRACEDATA_15),
-	PINMUX_IPSR_DATA(IP10_17_15, MT1_D),
-	PINMUX_IPSR_DATA(IP10_17_15, TS_SDEN0),
-	PINMUX_IPSR_DATA(IP10_20_18, VI0_R6),
-	PINMUX_IPSR_DATA(IP10_20_18, ETH_MDC),
+	PINMUX_IPSR_GPSR(IP10_17_15, ARM_TRACEDATA_15),
+	PINMUX_IPSR_GPSR(IP10_17_15, MT1_D),
+	PINMUX_IPSR_GPSR(IP10_17_15, TS_SDEN0),
+	PINMUX_IPSR_GPSR(IP10_20_18, VI0_R6),
+	PINMUX_IPSR_GPSR(IP10_20_18, ETH_MDC),
 	PINMUX_IPSR_MSEL(IP10_20_18, DREQ2_C, SEL_EXBUS2_2),
-	PINMUX_IPSR_DATA(IP10_20_18, HSPI_TX1_B),
-	PINMUX_IPSR_DATA(IP10_20_18, TRACECLK),
-	PINMUX_IPSR_DATA(IP10_20_18, MT1_BEN),
+	PINMUX_IPSR_GPSR(IP10_20_18, HSPI_TX1_B),
+	PINMUX_IPSR_GPSR(IP10_20_18, TRACECLK),
+	PINMUX_IPSR_GPSR(IP10_20_18, MT1_BEN),
 	PINMUX_IPSR_MSEL(IP10_20_18, PWMFSW0_D, SEL_PWMFSW_3),
-	PINMUX_IPSR_DATA(IP10_23_21, VI0_R7),
-	PINMUX_IPSR_DATA(IP10_23_21, ETH_MDIO),
-	PINMUX_IPSR_DATA(IP10_23_21, DACK2_C),
+	PINMUX_IPSR_GPSR(IP10_23_21, VI0_R7),
+	PINMUX_IPSR_GPSR(IP10_23_21, ETH_MDIO),
+	PINMUX_IPSR_GPSR(IP10_23_21, DACK2_C),
 	PINMUX_IPSR_MSEL(IP10_23_21, HSPI_RX1_B, SEL_HSPI1_1),
 	PINMUX_IPSR_MSEL(IP10_23_21, SCIF_CLK_D, SEL_SCIF_3),
-	PINMUX_IPSR_DATA(IP10_23_21, TRACECTL),
-	PINMUX_IPSR_DATA(IP10_23_21, MT1_PEN),
-	PINMUX_IPSR_DATA(IP10_25_24, VI1_CLK),
+	PINMUX_IPSR_GPSR(IP10_23_21, TRACECTL),
+	PINMUX_IPSR_GPSR(IP10_23_21, MT1_PEN),
+	PINMUX_IPSR_GPSR(IP10_25_24, VI1_CLK),
 	PINMUX_IPSR_MSEL(IP10_25_24, SIM_D, SEL_SIM_0),
 	PINMUX_IPSR_MSEL(IP10_25_24, SDA3, SEL_I2C3_0),
-	PINMUX_IPSR_DATA(IP10_28_26, VI1_HSYNC),
-	PINMUX_IPSR_DATA(IP10_28_26, VI3_CLK),
-	PINMUX_IPSR_DATA(IP10_28_26, SSI_SCK4),
+	PINMUX_IPSR_GPSR(IP10_28_26, VI1_HSYNC),
+	PINMUX_IPSR_GPSR(IP10_28_26, VI3_CLK),
+	PINMUX_IPSR_GPSR(IP10_28_26, SSI_SCK4),
 	PINMUX_IPSR_MSEL(IP10_28_26, GPS_SIGN_C, SEL_GPS_2),
 	PINMUX_IPSR_MSEL(IP10_28_26, PWMFSW0_E, SEL_PWMFSW_4),
-	PINMUX_IPSR_DATA(IP10_31_29, VI1_VSYNC),
-	PINMUX_IPSR_DATA(IP10_31_29, AUDIO_CLKOUT_C),
-	PINMUX_IPSR_DATA(IP10_31_29, SSI_WS4),
-	PINMUX_IPSR_DATA(IP10_31_29, SIM_CLK),
+	PINMUX_IPSR_GPSR(IP10_31_29, VI1_VSYNC),
+	PINMUX_IPSR_GPSR(IP10_31_29, AUDIO_CLKOUT_C),
+	PINMUX_IPSR_GPSR(IP10_31_29, SSI_WS4),
+	PINMUX_IPSR_GPSR(IP10_31_29, SIM_CLK),
 	PINMUX_IPSR_MSEL(IP10_31_29, GPS_MAG_C, SEL_GPS_2),
-	PINMUX_IPSR_DATA(IP10_31_29, SPV_TRST),
+	PINMUX_IPSR_GPSR(IP10_31_29, SPV_TRST),
 	PINMUX_IPSR_MSEL(IP10_31_29, SCL3, SEL_I2C3_0),
 
-	PINMUX_IPSR_DATA(IP11_2_0, VI1_DATA0_VI1_B0),
+	PINMUX_IPSR_GPSR(IP11_2_0, VI1_DATA0_VI1_B0),
 	PINMUX_IPSR_MSEL(IP11_2_0, SD2_DAT0, SEL_SD2_0),
-	PINMUX_IPSR_DATA(IP11_2_0, SIM_RST),
-	PINMUX_IPSR_DATA(IP11_2_0, SPV_TCK),
-	PINMUX_IPSR_DATA(IP11_2_0, ADICLK_B),
-	PINMUX_IPSR_DATA(IP11_5_3, VI1_DATA1_VI1_B1),
+	PINMUX_IPSR_GPSR(IP11_2_0, SIM_RST),
+	PINMUX_IPSR_GPSR(IP11_2_0, SPV_TCK),
+	PINMUX_IPSR_GPSR(IP11_2_0, ADICLK_B),
+	PINMUX_IPSR_GPSR(IP11_5_3, VI1_DATA1_VI1_B1),
 	PINMUX_IPSR_MSEL(IP11_5_3, SD2_DAT1, SEL_SD2_0),
-	PINMUX_IPSR_DATA(IP11_5_3, MT0_CLK),
-	PINMUX_IPSR_DATA(IP11_5_3, SPV_TMS),
+	PINMUX_IPSR_GPSR(IP11_5_3, MT0_CLK),
+	PINMUX_IPSR_GPSR(IP11_5_3, SPV_TMS),
 	PINMUX_IPSR_MSEL(IP11_5_3, ADICS_B_SAMP_B, SEL_ADI_1),
-	PINMUX_IPSR_DATA(IP11_8_6, VI1_DATA2_VI1_B2),
+	PINMUX_IPSR_GPSR(IP11_8_6, VI1_DATA2_VI1_B2),
 	PINMUX_IPSR_MSEL(IP11_8_6, SD2_DAT2, SEL_SD2_0),
-	PINMUX_IPSR_DATA(IP11_8_6, MT0_D),
-	PINMUX_IPSR_DATA(IP11_8_6, SPVTDI),
+	PINMUX_IPSR_GPSR(IP11_8_6, MT0_D),
+	PINMUX_IPSR_GPSR(IP11_8_6, SPVTDI),
 	PINMUX_IPSR_MSEL(IP11_8_6, ADIDATA_B, SEL_ADI_1),
-	PINMUX_IPSR_DATA(IP11_11_9, VI1_DATA3_VI1_B3),
+	PINMUX_IPSR_GPSR(IP11_11_9, VI1_DATA3_VI1_B3),
 	PINMUX_IPSR_MSEL(IP11_11_9, SD2_DAT3, SEL_SD2_0),
-	PINMUX_IPSR_DATA(IP11_11_9, MT0_BEN),
-	PINMUX_IPSR_DATA(IP11_11_9, SPV_TDO),
-	PINMUX_IPSR_DATA(IP11_11_9, ADICHS0_B),
-	PINMUX_IPSR_DATA(IP11_14_12, VI1_DATA4_VI1_B4),
-	PINMUX_IPSR_DATA(IP11_14_12, SD2_CLK),
-	PINMUX_IPSR_DATA(IP11_14_12, MT0_PEN),
-	PINMUX_IPSR_DATA(IP11_14_12, SPA_TRST),
+	PINMUX_IPSR_GPSR(IP11_11_9, MT0_BEN),
+	PINMUX_IPSR_GPSR(IP11_11_9, SPV_TDO),
+	PINMUX_IPSR_GPSR(IP11_11_9, ADICHS0_B),
+	PINMUX_IPSR_GPSR(IP11_14_12, VI1_DATA4_VI1_B4),
+	PINMUX_IPSR_GPSR(IP11_14_12, SD2_CLK),
+	PINMUX_IPSR_GPSR(IP11_14_12, MT0_PEN),
+	PINMUX_IPSR_GPSR(IP11_14_12, SPA_TRST),
 	PINMUX_IPSR_MSEL(IP11_14_12, HSPI_CLK1_D, SEL_HSPI1_3),
-	PINMUX_IPSR_DATA(IP11_14_12, ADICHS1_B),
-	PINMUX_IPSR_DATA(IP11_17_15, VI1_DATA5_VI1_B5),
+	PINMUX_IPSR_GPSR(IP11_14_12, ADICHS1_B),
+	PINMUX_IPSR_GPSR(IP11_17_15, VI1_DATA5_VI1_B5),
 	PINMUX_IPSR_MSEL(IP11_17_15, SD2_CMD, SEL_SD2_0),
-	PINMUX_IPSR_DATA(IP11_17_15, MT0_SYNC),
-	PINMUX_IPSR_DATA(IP11_17_15, SPA_TCK),
+	PINMUX_IPSR_GPSR(IP11_17_15, MT0_SYNC),
+	PINMUX_IPSR_GPSR(IP11_17_15, SPA_TCK),
 	PINMUX_IPSR_MSEL(IP11_17_15, HSPI_CS1_D, SEL_HSPI1_3),
-	PINMUX_IPSR_DATA(IP11_17_15, ADICHS2_B),
-	PINMUX_IPSR_DATA(IP11_20_18, VI1_DATA6_VI1_B6),
+	PINMUX_IPSR_GPSR(IP11_17_15, ADICHS2_B),
+	PINMUX_IPSR_GPSR(IP11_20_18, VI1_DATA6_VI1_B6),
 	PINMUX_IPSR_MSEL(IP11_20_18, SD2_CD, SEL_SD2_0),
-	PINMUX_IPSR_DATA(IP11_20_18, MT0_VCXO),
-	PINMUX_IPSR_DATA(IP11_20_18, SPA_TMS),
-	PINMUX_IPSR_DATA(IP11_20_18, HSPI_TX1_D),
-	PINMUX_IPSR_DATA(IP11_23_21, VI1_DATA7_VI1_B7),
+	PINMUX_IPSR_GPSR(IP11_20_18, MT0_VCXO),
+	PINMUX_IPSR_GPSR(IP11_20_18, SPA_TMS),
+	PINMUX_IPSR_GPSR(IP11_20_18, HSPI_TX1_D),
+	PINMUX_IPSR_GPSR(IP11_23_21, VI1_DATA7_VI1_B7),
 	PINMUX_IPSR_MSEL(IP11_23_21, SD2_WP, SEL_SD2_0),
-	PINMUX_IPSR_DATA(IP11_23_21, MT0_PWM),
-	PINMUX_IPSR_DATA(IP11_23_21, SPA_TDI),
+	PINMUX_IPSR_GPSR(IP11_23_21, MT0_PWM),
+	PINMUX_IPSR_GPSR(IP11_23_21, SPA_TDI),
 	PINMUX_IPSR_MSEL(IP11_23_21, HSPI_RX1_D, SEL_HSPI1_3),
-	PINMUX_IPSR_DATA(IP11_26_24, VI1_G0),
-	PINMUX_IPSR_DATA(IP11_26_24, VI3_DATA0),
-	PINMUX_IPSR_DATA(IP11_26_24, TS_SCK1),
+	PINMUX_IPSR_GPSR(IP11_26_24, VI1_G0),
+	PINMUX_IPSR_GPSR(IP11_26_24, VI3_DATA0),
+	PINMUX_IPSR_GPSR(IP11_26_24, TS_SCK1),
 	PINMUX_IPSR_MSEL(IP11_26_24, DREQ2_B, SEL_EXBUS2_1),
-	PINMUX_IPSR_DATA(IP11_26_24, TX2),
-	PINMUX_IPSR_DATA(IP11_26_24, SPA_TDO),
+	PINMUX_IPSR_GPSR(IP11_26_24, TX2),
+	PINMUX_IPSR_GPSR(IP11_26_24, SPA_TDO),
 	PINMUX_IPSR_MSEL(IP11_26_24, HCTS0_B, SEL_HSCIF0_1),
-	PINMUX_IPSR_DATA(IP11_29_27, VI1_G1),
-	PINMUX_IPSR_DATA(IP11_29_27, VI3_DATA1),
-	PINMUX_IPSR_DATA(IP11_29_27, SSI_SCK1),
-	PINMUX_IPSR_DATA(IP11_29_27, TS_SDEN1),
-	PINMUX_IPSR_DATA(IP11_29_27, DACK2_B),
+	PINMUX_IPSR_GPSR(IP11_29_27, VI1_G1),
+	PINMUX_IPSR_GPSR(IP11_29_27, VI3_DATA1),
+	PINMUX_IPSR_GPSR(IP11_29_27, SSI_SCK1),
+	PINMUX_IPSR_GPSR(IP11_29_27, TS_SDEN1),
+	PINMUX_IPSR_GPSR(IP11_29_27, DACK2_B),
 	PINMUX_IPSR_MSEL(IP11_29_27, RX2, SEL_SCIF2_0),
 	PINMUX_IPSR_MSEL(IP11_29_27, HRTS0_B, SEL_HSCIF0_1),
 
-	PINMUX_IPSR_DATA(IP12_2_0, VI1_G2),
-	PINMUX_IPSR_DATA(IP12_2_0, VI3_DATA2),
-	PINMUX_IPSR_DATA(IP12_2_0, SSI_WS1),
-	PINMUX_IPSR_DATA(IP12_2_0, TS_SPSYNC1),
+	PINMUX_IPSR_GPSR(IP12_2_0, VI1_G2),
+	PINMUX_IPSR_GPSR(IP12_2_0, VI3_DATA2),
+	PINMUX_IPSR_GPSR(IP12_2_0, SSI_WS1),
+	PINMUX_IPSR_GPSR(IP12_2_0, TS_SPSYNC1),
 	PINMUX_IPSR_MSEL(IP12_2_0, SCK2, SEL_SCIF2_0),
 	PINMUX_IPSR_MSEL(IP12_2_0, HSCK0_B, SEL_HSCIF0_1),
-	PINMUX_IPSR_DATA(IP12_5_3, VI1_G3),
-	PINMUX_IPSR_DATA(IP12_5_3, VI3_DATA3),
-	PINMUX_IPSR_DATA(IP12_5_3, SSI_SCK2),
-	PINMUX_IPSR_DATA(IP12_5_3, TS_SDAT1),
+	PINMUX_IPSR_GPSR(IP12_5_3, VI1_G3),
+	PINMUX_IPSR_GPSR(IP12_5_3, VI3_DATA3),
+	PINMUX_IPSR_GPSR(IP12_5_3, SSI_SCK2),
+	PINMUX_IPSR_GPSR(IP12_5_3, TS_SDAT1),
 	PINMUX_IPSR_MSEL(IP12_5_3, SCL1_C, SEL_I2C1_2),
-	PINMUX_IPSR_DATA(IP12_5_3, HTX0_B),
-	PINMUX_IPSR_DATA(IP12_8_6, VI1_G4),
-	PINMUX_IPSR_DATA(IP12_8_6, VI3_DATA4),
-	PINMUX_IPSR_DATA(IP12_8_6, SSI_WS2),
+	PINMUX_IPSR_GPSR(IP12_5_3, HTX0_B),
+	PINMUX_IPSR_GPSR(IP12_8_6, VI1_G4),
+	PINMUX_IPSR_GPSR(IP12_8_6, VI3_DATA4),
+	PINMUX_IPSR_GPSR(IP12_8_6, SSI_WS2),
 	PINMUX_IPSR_MSEL(IP12_8_6, SDA1_C, SEL_I2C1_2),
-	PINMUX_IPSR_DATA(IP12_8_6, SIM_RST_B),
+	PINMUX_IPSR_GPSR(IP12_8_6, SIM_RST_B),
 	PINMUX_IPSR_MSEL(IP12_8_6, HRX0_B, SEL_HSCIF0_1),
-	PINMUX_IPSR_DATA(IP12_11_9, VI1_G5),
-	PINMUX_IPSR_DATA(IP12_11_9, VI3_DATA5),
+	PINMUX_IPSR_GPSR(IP12_11_9, VI1_G5),
+	PINMUX_IPSR_GPSR(IP12_11_9, VI3_DATA5),
 	PINMUX_IPSR_MSEL(IP12_11_9, GPS_CLK, SEL_GPS_0),
-	PINMUX_IPSR_DATA(IP12_11_9, FSE),
-	PINMUX_IPSR_DATA(IP12_11_9, TX4_B),
+	PINMUX_IPSR_GPSR(IP12_11_9, FSE),
+	PINMUX_IPSR_GPSR(IP12_11_9, TX4_B),
 	PINMUX_IPSR_MSEL(IP12_11_9, SIM_D_B, SEL_SIM_1),
-	PINMUX_IPSR_DATA(IP12_14_12, VI1_G6),
-	PINMUX_IPSR_DATA(IP12_14_12, VI3_DATA6),
+	PINMUX_IPSR_GPSR(IP12_14_12, VI1_G6),
+	PINMUX_IPSR_GPSR(IP12_14_12, VI3_DATA6),
 	PINMUX_IPSR_MSEL(IP12_14_12, GPS_SIGN, SEL_GPS_0),
-	PINMUX_IPSR_DATA(IP12_14_12, FRB),
+	PINMUX_IPSR_GPSR(IP12_14_12, FRB),
 	PINMUX_IPSR_MSEL(IP12_14_12, RX4_B, SEL_SCIF4_1),
-	PINMUX_IPSR_DATA(IP12_14_12, SIM_CLK_B),
-	PINMUX_IPSR_DATA(IP12_17_15, VI1_G7),
-	PINMUX_IPSR_DATA(IP12_17_15, VI3_DATA7),
+	PINMUX_IPSR_GPSR(IP12_14_12, SIM_CLK_B),
+	PINMUX_IPSR_GPSR(IP12_17_15, VI1_G7),
+	PINMUX_IPSR_GPSR(IP12_17_15, VI3_DATA7),
 	PINMUX_IPSR_MSEL(IP12_17_15, GPS_MAG, SEL_GPS_0),
-	PINMUX_IPSR_DATA(IP12_17_15, FCE),
+	PINMUX_IPSR_GPSR(IP12_17_15, FCE),
 	PINMUX_IPSR_MSEL(IP12_17_15, SCK4_B, SEL_SCIF4_1),
 };
 
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7790.c b/drivers/pinctrl/sh-pfc/pfc-r8a7790.c
index a8b629b..0f4d48f 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7790.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7790.c
@@ -799,47 +799,47 @@
 	PINMUX_SINGLE(DU_DOTCLKIN0),
 	PINMUX_SINGLE(DU_DOTCLKIN2),
 
-	PINMUX_IPSR_DATA(IP0_2_0, D0),
+	PINMUX_IPSR_GPSR(IP0_2_0, D0),
 	PINMUX_IPSR_MSEL(IP0_2_0, MSIOF3_SCK_B, SEL_SOF3_1),
 	PINMUX_IPSR_MSEL(IP0_2_0, VI3_DATA0, SEL_VI3_0),
 	PINMUX_IPSR_MSEL(IP0_2_0, VI0_G4, SEL_VI0_0),
 	PINMUX_IPSR_MSEL(IP0_2_0, VI0_G4_B, SEL_VI0_1),
-	PINMUX_IPSR_DATA(IP0_5_3, D1),
+	PINMUX_IPSR_GPSR(IP0_5_3, D1),
 	PINMUX_IPSR_MSEL(IP0_5_3, MSIOF3_SYNC_B, SEL_SOF3_1),
 	PINMUX_IPSR_MSEL(IP0_5_3, VI3_DATA1, SEL_VI3_0),
 	PINMUX_IPSR_MSEL(IP0_5_3, VI0_G5, SEL_VI0_0),
 	PINMUX_IPSR_MSEL(IP0_5_3, VI0_G5_B, SEL_VI0_1),
-	PINMUX_IPSR_DATA(IP0_8_6, D2),
+	PINMUX_IPSR_GPSR(IP0_8_6, D2),
 	PINMUX_IPSR_MSEL(IP0_8_6, MSIOF3_RXD_B, SEL_SOF3_1),
 	PINMUX_IPSR_MSEL(IP0_8_6, VI3_DATA2, SEL_VI3_0),
 	PINMUX_IPSR_MSEL(IP0_8_6, VI0_G6, SEL_VI0_0),
 	PINMUX_IPSR_MSEL(IP0_8_6, VI0_G6_B, SEL_VI0_1),
-	PINMUX_IPSR_DATA(IP0_11_9, D3),
+	PINMUX_IPSR_GPSR(IP0_11_9, D3),
 	PINMUX_IPSR_MSEL(IP0_11_9, MSIOF3_TXD_B, SEL_SOF3_1),
 	PINMUX_IPSR_MSEL(IP0_11_9, VI3_DATA3, SEL_VI3_0),
 	PINMUX_IPSR_MSEL(IP0_11_9, VI0_G7, SEL_VI0_0),
 	PINMUX_IPSR_MSEL(IP0_11_9, VI0_G7_B, SEL_VI0_1),
-	PINMUX_IPSR_DATA(IP0_15_12, D4),
+	PINMUX_IPSR_GPSR(IP0_15_12, D4),
 	PINMUX_IPSR_MSEL(IP0_15_12, SCIFB1_RXD_F, SEL_SCIFB1_5),
 	PINMUX_IPSR_MSEL(IP0_15_12, SCIFB0_RXD_C, SEL_SCIFB_2),
 	PINMUX_IPSR_MSEL(IP0_15_12, VI3_DATA4, SEL_VI3_0),
 	PINMUX_IPSR_MSEL(IP0_15_12, VI0_R0, SEL_VI0_0),
 	PINMUX_IPSR_MSEL(IP0_15_12, VI0_R0_B, SEL_VI0_1),
 	PINMUX_IPSR_MSEL(IP0_15_12, RX0_B, SEL_SCIF0_1),
-	PINMUX_IPSR_DATA(IP0_19_16, D5),
+	PINMUX_IPSR_GPSR(IP0_19_16, D5),
 	PINMUX_IPSR_MSEL(IP0_19_16, SCIFB1_TXD_F, SEL_SCIFB1_5),
 	PINMUX_IPSR_MSEL(IP0_19_16, SCIFB0_TXD_C, SEL_SCIFB_2),
 	PINMUX_IPSR_MSEL(IP0_19_16, VI3_DATA5, SEL_VI3_0),
 	PINMUX_IPSR_MSEL(IP0_19_16, VI0_R1, SEL_VI0_0),
 	PINMUX_IPSR_MSEL(IP0_19_16, VI0_R1_B, SEL_VI0_1),
 	PINMUX_IPSR_MSEL(IP0_19_16, TX0_B, SEL_SCIF0_1),
-	PINMUX_IPSR_DATA(IP0_22_20, D6),
+	PINMUX_IPSR_GPSR(IP0_22_20, D6),
 	PINMUX_IPSR_MSEL(IP0_22_20, IIC2_SCL_C, SEL_IIC2_2),
 	PINMUX_IPSR_MSEL(IP0_22_20, VI3_DATA6, SEL_VI3_0),
 	PINMUX_IPSR_MSEL(IP0_22_20, VI0_R2, SEL_VI0_0),
 	PINMUX_IPSR_MSEL(IP0_22_20, VI0_R2_B, SEL_VI0_1),
 	PINMUX_IPSR_MSEL(IP0_22_20, I2C2_SCL_C, SEL_I2C2_2),
-	PINMUX_IPSR_DATA(IP0_26_23, D7),
+	PINMUX_IPSR_GPSR(IP0_26_23, D7),
 	PINMUX_IPSR_MSEL(IP0_26_23, AD_DI_B, SEL_ADI_1),
 	PINMUX_IPSR_MSEL(IP0_26_23, IIC2_SDA_C, SEL_IIC2_2),
 	PINMUX_IPSR_MSEL(IP0_26_23, VI3_DATA7, SEL_VI3_0),
@@ -847,81 +847,81 @@
 	PINMUX_IPSR_MSEL(IP0_26_23, VI0_R3_B, SEL_VI0_1),
 	PINMUX_IPSR_MSEL(IP0_26_23, I2C2_SDA_C, SEL_I2C2_2),
 	PINMUX_IPSR_MSEL(IP0_26_23, TCLK1, SEL_TMU1_0),
-	PINMUX_IPSR_DATA(IP0_30_27, D8),
+	PINMUX_IPSR_GPSR(IP0_30_27, D8),
 	PINMUX_IPSR_MSEL(IP0_30_27, SCIFA1_SCK_C, SEL_SCIFA1_2),
-	PINMUX_IPSR_DATA(IP0_30_27, AVB_TXD0),
+	PINMUX_IPSR_GPSR(IP0_30_27, AVB_TXD0),
 	PINMUX_IPSR_MSEL(IP0_30_27, VI0_G0, SEL_VI0_0),
 	PINMUX_IPSR_MSEL(IP0_30_27, VI0_G0_B, SEL_VI0_1),
 	PINMUX_IPSR_MSEL(IP0_30_27, VI2_DATA0_VI2_B0, SEL_VI2_0),
 
-	PINMUX_IPSR_DATA(IP1_3_0, D9),
+	PINMUX_IPSR_GPSR(IP1_3_0, D9),
 	PINMUX_IPSR_MSEL(IP1_3_0, SCIFA1_RXD_C, SEL_SCIFA1_2),
-	PINMUX_IPSR_DATA(IP1_3_0, AVB_TXD1),
+	PINMUX_IPSR_GPSR(IP1_3_0, AVB_TXD1),
 	PINMUX_IPSR_MSEL(IP1_3_0, VI0_G1, SEL_VI0_0),
 	PINMUX_IPSR_MSEL(IP1_3_0, VI0_G1_B, SEL_VI0_1),
 	PINMUX_IPSR_MSEL(IP1_3_0, VI2_DATA1_VI2_B1, SEL_VI2_0),
-	PINMUX_IPSR_DATA(IP1_7_4, D10),
+	PINMUX_IPSR_GPSR(IP1_7_4, D10),
 	PINMUX_IPSR_MSEL(IP1_7_4, SCIFA1_TXD_C, SEL_SCIFA1_2),
-	PINMUX_IPSR_DATA(IP1_7_4, AVB_TXD2),
+	PINMUX_IPSR_GPSR(IP1_7_4, AVB_TXD2),
 	PINMUX_IPSR_MSEL(IP1_7_4, VI0_G2, SEL_VI0_0),
 	PINMUX_IPSR_MSEL(IP1_7_4, VI0_G2_B, SEL_VI0_1),
 	PINMUX_IPSR_MSEL(IP1_7_4, VI2_DATA2_VI2_B2, SEL_VI2_0),
-	PINMUX_IPSR_DATA(IP1_11_8, D11),
+	PINMUX_IPSR_GPSR(IP1_11_8, D11),
 	PINMUX_IPSR_MSEL(IP1_11_8, SCIFA1_CTS_N_C, SEL_SCIFA1_2),
-	PINMUX_IPSR_DATA(IP1_11_8, AVB_TXD3),
+	PINMUX_IPSR_GPSR(IP1_11_8, AVB_TXD3),
 	PINMUX_IPSR_MSEL(IP1_11_8, VI0_G3, SEL_VI0_0),
 	PINMUX_IPSR_MSEL(IP1_11_8, VI0_G3_B, SEL_VI0_1),
 	PINMUX_IPSR_MSEL(IP1_11_8, VI2_DATA3_VI2_B3, SEL_VI2_0),
-	PINMUX_IPSR_DATA(IP1_14_12, D12),
+	PINMUX_IPSR_GPSR(IP1_14_12, D12),
 	PINMUX_IPSR_MSEL(IP1_14_12, SCIFA1_RTS_N_C, SEL_SCIFA1_2),
-	PINMUX_IPSR_DATA(IP1_14_12, AVB_TXD4),
+	PINMUX_IPSR_GPSR(IP1_14_12, AVB_TXD4),
 	PINMUX_IPSR_MSEL(IP1_14_12, VI0_HSYNC_N, SEL_VI0_0),
 	PINMUX_IPSR_MSEL(IP1_14_12, VI0_HSYNC_N_B, SEL_VI0_1),
 	PINMUX_IPSR_MSEL(IP1_14_12, VI2_DATA4_VI2_B4, SEL_VI2_0),
-	PINMUX_IPSR_DATA(IP1_17_15, D13),
-	PINMUX_IPSR_DATA(IP1_17_15, AVB_TXD5),
+	PINMUX_IPSR_GPSR(IP1_17_15, D13),
+	PINMUX_IPSR_GPSR(IP1_17_15, AVB_TXD5),
 	PINMUX_IPSR_MSEL(IP1_17_15, VI0_VSYNC_N, SEL_VI0_0),
 	PINMUX_IPSR_MSEL(IP1_17_15, VI0_VSYNC_N_B, SEL_VI0_1),
 	PINMUX_IPSR_MSEL(IP1_17_15, VI2_DATA5_VI2_B5, SEL_VI2_0),
-	PINMUX_IPSR_DATA(IP1_21_18, D14),
+	PINMUX_IPSR_GPSR(IP1_21_18, D14),
 	PINMUX_IPSR_MSEL(IP1_21_18, SCIFB1_RXD_C, SEL_SCIFB1_2),
-	PINMUX_IPSR_DATA(IP1_21_18, AVB_TXD6),
+	PINMUX_IPSR_GPSR(IP1_21_18, AVB_TXD6),
 	PINMUX_IPSR_MSEL(IP1_21_18, RX1_B, SEL_SCIF1_1),
 	PINMUX_IPSR_MSEL(IP1_21_18, VI0_CLKENB, SEL_VI0_0),
 	PINMUX_IPSR_MSEL(IP1_21_18, VI0_CLKENB_B, SEL_VI0_1),
 	PINMUX_IPSR_MSEL(IP1_21_18, VI2_DATA6_VI2_B6, SEL_VI2_0),
-	PINMUX_IPSR_DATA(IP1_25_22, D15),
+	PINMUX_IPSR_GPSR(IP1_25_22, D15),
 	PINMUX_IPSR_MSEL(IP1_25_22, SCIFB1_TXD_C, SEL_SCIFB1_2),
-	PINMUX_IPSR_DATA(IP1_25_22, AVB_TXD7),
+	PINMUX_IPSR_GPSR(IP1_25_22, AVB_TXD7),
 	PINMUX_IPSR_MSEL(IP1_25_22, TX1_B, SEL_SCIF1_1),
 	PINMUX_IPSR_MSEL(IP1_25_22, VI0_FIELD, SEL_VI0_0),
 	PINMUX_IPSR_MSEL(IP1_25_22, VI0_FIELD_B, SEL_VI0_1),
 	PINMUX_IPSR_MSEL(IP1_25_22, VI2_DATA7_VI2_B7, SEL_VI2_0),
-	PINMUX_IPSR_DATA(IP1_27_26, A0),
-	PINMUX_IPSR_DATA(IP1_27_26, PWM3),
-	PINMUX_IPSR_DATA(IP1_29_28, A1),
-	PINMUX_IPSR_DATA(IP1_29_28, PWM4),
+	PINMUX_IPSR_GPSR(IP1_27_26, A0),
+	PINMUX_IPSR_GPSR(IP1_27_26, PWM3),
+	PINMUX_IPSR_GPSR(IP1_29_28, A1),
+	PINMUX_IPSR_GPSR(IP1_29_28, PWM4),
 
-	PINMUX_IPSR_DATA(IP2_2_0, A2),
-	PINMUX_IPSR_DATA(IP2_2_0, PWM5),
+	PINMUX_IPSR_GPSR(IP2_2_0, A2),
+	PINMUX_IPSR_GPSR(IP2_2_0, PWM5),
 	PINMUX_IPSR_MSEL(IP2_2_0, MSIOF1_SS1_B, SEL_SOF1_1),
-	PINMUX_IPSR_DATA(IP2_5_3, A3),
-	PINMUX_IPSR_DATA(IP2_5_3, PWM6),
+	PINMUX_IPSR_GPSR(IP2_5_3, A3),
+	PINMUX_IPSR_GPSR(IP2_5_3, PWM6),
 	PINMUX_IPSR_MSEL(IP2_5_3, MSIOF1_SS2_B, SEL_SOF1_1),
-	PINMUX_IPSR_DATA(IP2_8_6, A4),
+	PINMUX_IPSR_GPSR(IP2_8_6, A4),
 	PINMUX_IPSR_MSEL(IP2_8_6, MSIOF1_TXD_B, SEL_SOF1_1),
-	PINMUX_IPSR_DATA(IP2_8_6, TPU0TO0),
-	PINMUX_IPSR_DATA(IP2_11_9, A5),
+	PINMUX_IPSR_GPSR(IP2_8_6, TPU0TO0),
+	PINMUX_IPSR_GPSR(IP2_11_9, A5),
 	PINMUX_IPSR_MSEL(IP2_11_9, SCIFA1_TXD_B, SEL_SCIFA1_1),
-	PINMUX_IPSR_DATA(IP2_11_9, TPU0TO1),
-	PINMUX_IPSR_DATA(IP2_14_12, A6),
+	PINMUX_IPSR_GPSR(IP2_11_9, TPU0TO1),
+	PINMUX_IPSR_GPSR(IP2_14_12, A6),
 	PINMUX_IPSR_MSEL(IP2_14_12, SCIFA1_RTS_N_B, SEL_SCIFA1_1),
-	PINMUX_IPSR_DATA(IP2_14_12, TPU0TO2),
-	PINMUX_IPSR_DATA(IP2_17_15, A7),
+	PINMUX_IPSR_GPSR(IP2_14_12, TPU0TO2),
+	PINMUX_IPSR_GPSR(IP2_17_15, A7),
 	PINMUX_IPSR_MSEL(IP2_17_15, SCIFA1_SCK_B, SEL_SCIFA1_1),
-	PINMUX_IPSR_DATA(IP2_17_15, AUDIO_CLKOUT_B),
-	PINMUX_IPSR_DATA(IP2_17_15, TPU0TO3),
-	PINMUX_IPSR_DATA(IP2_21_18, A8),
+	PINMUX_IPSR_GPSR(IP2_17_15, AUDIO_CLKOUT_B),
+	PINMUX_IPSR_GPSR(IP2_17_15, TPU0TO3),
+	PINMUX_IPSR_GPSR(IP2_21_18, A8),
 	PINMUX_IPSR_MSEL(IP2_21_18, SCIFA1_RXD_B, SEL_SCIFA1_1),
 	PINMUX_IPSR_MSEL(IP2_21_18, SSI_SCK5_B, SEL_SSI5_1),
 	PINMUX_IPSR_MSEL(IP2_21_18, VI0_R4, SEL_VI0_0),
@@ -929,7 +929,7 @@
 	PINMUX_IPSR_MSEL(IP2_21_18, SCIFB2_RXD_C, SEL_SCIFB2_2),
 	PINMUX_IPSR_MSEL(IP2_21_18, RX2_B, SEL_SCIF2_1),
 	PINMUX_IPSR_MSEL(IP2_21_18, VI2_DATA0_VI2_B0_B, SEL_VI2_1),
-	PINMUX_IPSR_DATA(IP2_25_22, A9),
+	PINMUX_IPSR_GPSR(IP2_25_22, A9),
 	PINMUX_IPSR_MSEL(IP2_25_22, SCIFA1_CTS_N_B, SEL_SCIFA1_1),
 	PINMUX_IPSR_MSEL(IP2_25_22, SSI_WS5_B, SEL_SSI5_1),
 	PINMUX_IPSR_MSEL(IP2_25_22, VI0_R5, SEL_VI0_0),
@@ -937,392 +937,392 @@
 	PINMUX_IPSR_MSEL(IP2_25_22, SCIFB2_TXD_C, SEL_SCIFB2_2),
 	PINMUX_IPSR_MSEL(IP2_25_22, TX2_B, SEL_SCIF2_1),
 	PINMUX_IPSR_MSEL(IP2_25_22, VI2_DATA1_VI2_B1_B, SEL_VI2_1),
-	PINMUX_IPSR_DATA(IP2_28_26, A10),
+	PINMUX_IPSR_GPSR(IP2_28_26, A10),
 	PINMUX_IPSR_MSEL(IP2_28_26, SSI_SDATA5_B, SEL_SSI5_1),
-	PINMUX_IPSR_DATA(IP2_28_26, MSIOF2_SYNC),
+	PINMUX_IPSR_GPSR(IP2_28_26, MSIOF2_SYNC),
 	PINMUX_IPSR_MSEL(IP2_28_26, VI0_R6, SEL_VI0_0),
 	PINMUX_IPSR_MSEL(IP2_28_26, VI0_R6_B, SEL_VI0_1),
 	PINMUX_IPSR_MSEL(IP2_28_26, VI2_DATA2_VI2_B2_B, SEL_VI2_1),
 
-	PINMUX_IPSR_DATA(IP3_3_0, A11),
+	PINMUX_IPSR_GPSR(IP3_3_0, A11),
 	PINMUX_IPSR_MSEL(IP3_3_0, SCIFB2_CTS_N_B, SEL_SCIFB2_1),
-	PINMUX_IPSR_DATA(IP3_3_0, MSIOF2_SCK),
+	PINMUX_IPSR_GPSR(IP3_3_0, MSIOF2_SCK),
 	PINMUX_IPSR_MSEL(IP3_3_0, VI1_R0, SEL_VI1_0),
 	PINMUX_IPSR_MSEL(IP3_3_0, VI1_R0_B, SEL_VI1_1),
-	PINMUX_IPSR_DATA(IP3_3_0, VI2_G0),
+	PINMUX_IPSR_GPSR(IP3_3_0, VI2_G0),
 	PINMUX_IPSR_MSEL(IP3_3_0, VI2_DATA3_VI2_B3_B, SEL_VI2_1),
-	PINMUX_IPSR_DATA(IP3_7_4, A12),
+	PINMUX_IPSR_GPSR(IP3_7_4, A12),
 	PINMUX_IPSR_MSEL(IP3_7_4, SCIFB2_RXD_B, SEL_SCIFB2_1),
-	PINMUX_IPSR_DATA(IP3_7_4, MSIOF2_TXD),
+	PINMUX_IPSR_GPSR(IP3_7_4, MSIOF2_TXD),
 	PINMUX_IPSR_MSEL(IP3_7_4, VI1_R1, SEL_VI1_0),
 	PINMUX_IPSR_MSEL(IP3_7_4, VI1_R1_B, SEL_VI1_1),
-	PINMUX_IPSR_DATA(IP3_7_4, VI2_G1),
+	PINMUX_IPSR_GPSR(IP3_7_4, VI2_G1),
 	PINMUX_IPSR_MSEL(IP3_7_4, VI2_DATA4_VI2_B4_B, SEL_VI2_1),
-	PINMUX_IPSR_DATA(IP3_11_8, A13),
+	PINMUX_IPSR_GPSR(IP3_11_8, A13),
 	PINMUX_IPSR_MSEL(IP3_11_8, SCIFB2_RTS_N_B, SEL_SCIFB2_1),
-	PINMUX_IPSR_DATA(IP3_11_8, EX_WAIT2),
-	PINMUX_IPSR_DATA(IP3_11_8, MSIOF2_RXD),
+	PINMUX_IPSR_GPSR(IP3_11_8, EX_WAIT2),
+	PINMUX_IPSR_GPSR(IP3_11_8, MSIOF2_RXD),
 	PINMUX_IPSR_MSEL(IP3_11_8, VI1_R2, SEL_VI1_0),
 	PINMUX_IPSR_MSEL(IP3_11_8, VI1_R2_B, SEL_VI1_1),
-	PINMUX_IPSR_DATA(IP3_11_8, VI2_G2),
+	PINMUX_IPSR_GPSR(IP3_11_8, VI2_G2),
 	PINMUX_IPSR_MSEL(IP3_11_8, VI2_DATA5_VI2_B5_B, SEL_VI2_1),
-	PINMUX_IPSR_DATA(IP3_14_12, A14),
+	PINMUX_IPSR_GPSR(IP3_14_12, A14),
 	PINMUX_IPSR_MSEL(IP3_14_12, SCIFB2_TXD_B, SEL_SCIFB2_1),
-	PINMUX_IPSR_DATA(IP3_14_12, ATACS11_N),
-	PINMUX_IPSR_DATA(IP3_14_12, MSIOF2_SS1),
-	PINMUX_IPSR_DATA(IP3_17_15, A15),
+	PINMUX_IPSR_GPSR(IP3_14_12, ATACS11_N),
+	PINMUX_IPSR_GPSR(IP3_14_12, MSIOF2_SS1),
+	PINMUX_IPSR_GPSR(IP3_17_15, A15),
 	PINMUX_IPSR_MSEL(IP3_17_15, SCIFB2_SCK_B, SEL_SCIFB2_1),
-	PINMUX_IPSR_DATA(IP3_17_15, ATARD1_N),
-	PINMUX_IPSR_DATA(IP3_17_15, MSIOF2_SS2),
-	PINMUX_IPSR_DATA(IP3_19_18, A16),
-	PINMUX_IPSR_DATA(IP3_19_18, ATAWR1_N),
-	PINMUX_IPSR_DATA(IP3_22_20, A17),
+	PINMUX_IPSR_GPSR(IP3_17_15, ATARD1_N),
+	PINMUX_IPSR_GPSR(IP3_17_15, MSIOF2_SS2),
+	PINMUX_IPSR_GPSR(IP3_19_18, A16),
+	PINMUX_IPSR_GPSR(IP3_19_18, ATAWR1_N),
+	PINMUX_IPSR_GPSR(IP3_22_20, A17),
 	PINMUX_IPSR_MSEL(IP3_22_20, AD_DO_B, SEL_ADI_1),
-	PINMUX_IPSR_DATA(IP3_22_20, ATADIR1_N),
-	PINMUX_IPSR_DATA(IP3_25_23, A18),
+	PINMUX_IPSR_GPSR(IP3_22_20, ATADIR1_N),
+	PINMUX_IPSR_GPSR(IP3_25_23, A18),
 	PINMUX_IPSR_MSEL(IP3_25_23, AD_CLK_B, SEL_ADI_1),
-	PINMUX_IPSR_DATA(IP3_25_23, ATAG1_N),
-	PINMUX_IPSR_DATA(IP3_28_26, A19),
+	PINMUX_IPSR_GPSR(IP3_25_23, ATAG1_N),
+	PINMUX_IPSR_GPSR(IP3_28_26, A19),
 	PINMUX_IPSR_MSEL(IP3_28_26, AD_NCS_N_B, SEL_ADI_1),
-	PINMUX_IPSR_DATA(IP3_28_26, ATACS01_N),
+	PINMUX_IPSR_GPSR(IP3_28_26, ATACS01_N),
 	PINMUX_IPSR_MSEL(IP3_28_26, EX_WAIT0_B, SEL_LBS_1),
-	PINMUX_IPSR_DATA(IP3_31_29, A20),
-	PINMUX_IPSR_DATA(IP3_31_29, SPCLK),
+	PINMUX_IPSR_GPSR(IP3_31_29, A20),
+	PINMUX_IPSR_GPSR(IP3_31_29, SPCLK),
 	PINMUX_IPSR_MSEL(IP3_31_29, VI1_R3, SEL_VI1_0),
 	PINMUX_IPSR_MSEL(IP3_31_29, VI1_R3_B, SEL_VI1_1),
-	PINMUX_IPSR_DATA(IP3_31_29, VI2_G4),
+	PINMUX_IPSR_GPSR(IP3_31_29, VI2_G4),
 
-	PINMUX_IPSR_DATA(IP4_2_0, A21),
-	PINMUX_IPSR_DATA(IP4_2_0, MOSI_IO0),
+	PINMUX_IPSR_GPSR(IP4_2_0, A21),
+	PINMUX_IPSR_GPSR(IP4_2_0, MOSI_IO0),
 	PINMUX_IPSR_MSEL(IP4_2_0, VI1_R4, SEL_VI1_0),
 	PINMUX_IPSR_MSEL(IP4_2_0, VI1_R4_B, SEL_VI1_1),
-	PINMUX_IPSR_DATA(IP4_2_0, VI2_G5),
-	PINMUX_IPSR_DATA(IP4_5_3, A22),
-	PINMUX_IPSR_DATA(IP4_5_3, MISO_IO1),
+	PINMUX_IPSR_GPSR(IP4_2_0, VI2_G5),
+	PINMUX_IPSR_GPSR(IP4_5_3, A22),
+	PINMUX_IPSR_GPSR(IP4_5_3, MISO_IO1),
 	PINMUX_IPSR_MSEL(IP4_5_3, VI1_R5, SEL_VI1_0),
 	PINMUX_IPSR_MSEL(IP4_5_3, VI1_R5_B, SEL_VI1_1),
-	PINMUX_IPSR_DATA(IP4_5_3, VI2_G6),
-	PINMUX_IPSR_DATA(IP4_8_6, A23),
-	PINMUX_IPSR_DATA(IP4_8_6, IO2),
+	PINMUX_IPSR_GPSR(IP4_5_3, VI2_G6),
+	PINMUX_IPSR_GPSR(IP4_8_6, A23),
+	PINMUX_IPSR_GPSR(IP4_8_6, IO2),
 	PINMUX_IPSR_MSEL(IP4_8_6, VI1_G7, SEL_VI1_0),
 	PINMUX_IPSR_MSEL(IP4_8_6, VI1_G7_B, SEL_VI1_1),
-	PINMUX_IPSR_DATA(IP4_8_6, VI2_G7),
-	PINMUX_IPSR_DATA(IP4_11_9, A24),
-	PINMUX_IPSR_DATA(IP4_11_9, IO3),
+	PINMUX_IPSR_GPSR(IP4_8_6, VI2_G7),
+	PINMUX_IPSR_GPSR(IP4_11_9, A24),
+	PINMUX_IPSR_GPSR(IP4_11_9, IO3),
 	PINMUX_IPSR_MSEL(IP4_11_9, VI1_R7, SEL_VI1_0),
 	PINMUX_IPSR_MSEL(IP4_11_9, VI1_R7_B, SEL_VI1_1),
 	PINMUX_IPSR_MSEL(IP4_11_9, VI2_CLKENB, SEL_VI2_0),
 	PINMUX_IPSR_MSEL(IP4_11_9, VI2_CLKENB_B, SEL_VI2_1),
-	PINMUX_IPSR_DATA(IP4_14_12, A25),
-	PINMUX_IPSR_DATA(IP4_14_12, SSL),
+	PINMUX_IPSR_GPSR(IP4_14_12, A25),
+	PINMUX_IPSR_GPSR(IP4_14_12, SSL),
 	PINMUX_IPSR_MSEL(IP4_14_12, VI1_G6, SEL_VI1_0),
 	PINMUX_IPSR_MSEL(IP4_14_12, VI1_G6_B, SEL_VI1_1),
 	PINMUX_IPSR_MSEL(IP4_14_12, VI2_FIELD, SEL_VI2_0),
 	PINMUX_IPSR_MSEL(IP4_14_12, VI2_FIELD_B, SEL_VI2_1),
-	PINMUX_IPSR_DATA(IP4_17_15, CS0_N),
+	PINMUX_IPSR_GPSR(IP4_17_15, CS0_N),
 	PINMUX_IPSR_MSEL(IP4_17_15, VI1_R6, SEL_VI1_0),
 	PINMUX_IPSR_MSEL(IP4_17_15, VI1_R6_B, SEL_VI1_1),
-	PINMUX_IPSR_DATA(IP4_17_15, VI2_G3),
+	PINMUX_IPSR_GPSR(IP4_17_15, VI2_G3),
 	PINMUX_IPSR_MSEL(IP4_17_15, MSIOF0_SS2_B, SEL_SOF0_1),
-	PINMUX_IPSR_DATA(IP4_20_18, CS1_N_A26),
-	PINMUX_IPSR_DATA(IP4_20_18, SPEEDIN),
+	PINMUX_IPSR_GPSR(IP4_20_18, CS1_N_A26),
+	PINMUX_IPSR_GPSR(IP4_20_18, SPEEDIN),
 	PINMUX_IPSR_MSEL(IP4_20_18, VI0_R7, SEL_VI0_0),
 	PINMUX_IPSR_MSEL(IP4_20_18, VI0_R7_B, SEL_VI0_1),
 	PINMUX_IPSR_MSEL(IP4_20_18, VI2_CLK, SEL_VI2_0),
 	PINMUX_IPSR_MSEL(IP4_20_18, VI2_CLK_B, SEL_VI2_1),
-	PINMUX_IPSR_DATA(IP4_23_21, EX_CS0_N),
+	PINMUX_IPSR_GPSR(IP4_23_21, EX_CS0_N),
 	PINMUX_IPSR_MSEL(IP4_23_21, HRX1_B, SEL_HSCIF1_1),
 	PINMUX_IPSR_MSEL(IP4_23_21, VI1_G5, SEL_VI1_0),
 	PINMUX_IPSR_MSEL(IP4_23_21, VI1_G5_B, SEL_VI1_1),
-	PINMUX_IPSR_DATA(IP4_23_21, VI2_R0),
+	PINMUX_IPSR_GPSR(IP4_23_21, VI2_R0),
 	PINMUX_IPSR_MSEL(IP4_23_21, HTX0_B, SEL_HSCIF0_1),
 	PINMUX_IPSR_MSEL(IP4_23_21, MSIOF0_SS1_B, SEL_SOF0_1),
-	PINMUX_IPSR_DATA(IP4_26_24, EX_CS1_N),
-	PINMUX_IPSR_DATA(IP4_26_24, GPS_CLK),
+	PINMUX_IPSR_GPSR(IP4_26_24, EX_CS1_N),
+	PINMUX_IPSR_GPSR(IP4_26_24, GPS_CLK),
 	PINMUX_IPSR_MSEL(IP4_26_24, HCTS1_N_B, SEL_HSCIF1_1),
 	PINMUX_IPSR_MSEL(IP4_26_24, VI1_FIELD, SEL_VI1_0),
 	PINMUX_IPSR_MSEL(IP4_26_24, VI1_FIELD_B, SEL_VI1_1),
-	PINMUX_IPSR_DATA(IP4_26_24, VI2_R1),
-	PINMUX_IPSR_DATA(IP4_29_27, EX_CS2_N),
-	PINMUX_IPSR_DATA(IP4_29_27, GPS_SIGN),
+	PINMUX_IPSR_GPSR(IP4_26_24, VI2_R1),
+	PINMUX_IPSR_GPSR(IP4_29_27, EX_CS2_N),
+	PINMUX_IPSR_GPSR(IP4_29_27, GPS_SIGN),
 	PINMUX_IPSR_MSEL(IP4_29_27, HRTS1_N_B, SEL_HSCIF1_1),
-	PINMUX_IPSR_DATA(IP4_29_27, VI3_CLKENB),
+	PINMUX_IPSR_GPSR(IP4_29_27, VI3_CLKENB),
 	PINMUX_IPSR_MSEL(IP4_29_27, VI1_G0, SEL_VI1_0),
 	PINMUX_IPSR_MSEL(IP4_29_27, VI1_G0_B, SEL_VI1_1),
-	PINMUX_IPSR_DATA(IP4_29_27, VI2_R2),
+	PINMUX_IPSR_GPSR(IP4_29_27, VI2_R2),
 
-	PINMUX_IPSR_DATA(IP5_2_0, EX_CS3_N),
-	PINMUX_IPSR_DATA(IP5_2_0, GPS_MAG),
-	PINMUX_IPSR_DATA(IP5_2_0, VI3_FIELD),
+	PINMUX_IPSR_GPSR(IP5_2_0, EX_CS3_N),
+	PINMUX_IPSR_GPSR(IP5_2_0, GPS_MAG),
+	PINMUX_IPSR_GPSR(IP5_2_0, VI3_FIELD),
 	PINMUX_IPSR_MSEL(IP5_2_0, VI1_G1, SEL_VI1_0),
 	PINMUX_IPSR_MSEL(IP5_2_0, VI1_G1_B, SEL_VI1_1),
-	PINMUX_IPSR_DATA(IP5_2_0, VI2_R3),
-	PINMUX_IPSR_DATA(IP5_5_3, EX_CS4_N),
+	PINMUX_IPSR_GPSR(IP5_2_0, VI2_R3),
+	PINMUX_IPSR_GPSR(IP5_5_3, EX_CS4_N),
 	PINMUX_IPSR_MSEL(IP5_5_3, MSIOF1_SCK_B, SEL_SOF1_1),
-	PINMUX_IPSR_DATA(IP5_5_3, VI3_HSYNC_N),
+	PINMUX_IPSR_GPSR(IP5_5_3, VI3_HSYNC_N),
 	PINMUX_IPSR_MSEL(IP5_5_3, VI2_HSYNC_N, SEL_VI2_0),
 	PINMUX_IPSR_MSEL(IP5_5_3, IIC1_SCL, SEL_IIC1_0),
 	PINMUX_IPSR_MSEL(IP5_5_3, VI2_HSYNC_N_B, SEL_VI2_1),
-	PINMUX_IPSR_DATA(IP5_5_3, INTC_EN0_N),
+	PINMUX_IPSR_GPSR(IP5_5_3, INTC_EN0_N),
 	PINMUX_IPSR_MSEL(IP5_5_3, I2C1_SCL, SEL_I2C1_0),
-	PINMUX_IPSR_DATA(IP5_9_6, EX_CS5_N),
+	PINMUX_IPSR_GPSR(IP5_9_6, EX_CS5_N),
 	PINMUX_IPSR_MSEL(IP5_9_6, CAN0_RX, SEL_CAN0_0),
 	PINMUX_IPSR_MSEL(IP5_9_6, MSIOF1_RXD_B, SEL_SOF1_1),
-	PINMUX_IPSR_DATA(IP5_9_6, VI3_VSYNC_N),
+	PINMUX_IPSR_GPSR(IP5_9_6, VI3_VSYNC_N),
 	PINMUX_IPSR_MSEL(IP5_9_6, VI1_G2, SEL_VI1_0),
 	PINMUX_IPSR_MSEL(IP5_9_6, VI1_G2_B, SEL_VI1_1),
-	PINMUX_IPSR_DATA(IP5_9_6, VI2_R4),
+	PINMUX_IPSR_GPSR(IP5_9_6, VI2_R4),
 	PINMUX_IPSR_MSEL(IP5_9_6, IIC1_SDA, SEL_IIC1_0),
-	PINMUX_IPSR_DATA(IP5_9_6, INTC_EN1_N),
+	PINMUX_IPSR_GPSR(IP5_9_6, INTC_EN1_N),
 	PINMUX_IPSR_MSEL(IP5_9_6, I2C1_SDA, SEL_I2C1_0),
-	PINMUX_IPSR_DATA(IP5_12_10, BS_N),
+	PINMUX_IPSR_GPSR(IP5_12_10, BS_N),
 	PINMUX_IPSR_MSEL(IP5_12_10, IETX, SEL_IEB_0),
 	PINMUX_IPSR_MSEL(IP5_12_10, HTX1_B, SEL_HSCIF1_1),
 	PINMUX_IPSR_MSEL(IP5_12_10, CAN1_TX, SEL_CAN1_0),
-	PINMUX_IPSR_DATA(IP5_12_10, DRACK0),
+	PINMUX_IPSR_GPSR(IP5_12_10, DRACK0),
 	PINMUX_IPSR_MSEL(IP5_12_10, IETX_C, SEL_IEB_2),
-	PINMUX_IPSR_DATA(IP5_14_13, RD_N),
+	PINMUX_IPSR_GPSR(IP5_14_13, RD_N),
 	PINMUX_IPSR_MSEL(IP5_14_13, CAN0_TX, SEL_CAN0_0),
 	PINMUX_IPSR_MSEL(IP5_14_13, SCIFA0_SCK_B, SEL_SCFA_1),
-	PINMUX_IPSR_DATA(IP5_17_15, RD_WR_N),
+	PINMUX_IPSR_GPSR(IP5_17_15, RD_WR_N),
 	PINMUX_IPSR_MSEL(IP5_17_15, VI1_G3, SEL_VI1_0),
 	PINMUX_IPSR_MSEL(IP5_17_15, VI1_G3_B, SEL_VI1_1),
-	PINMUX_IPSR_DATA(IP5_17_15, VI2_R5),
+	PINMUX_IPSR_GPSR(IP5_17_15, VI2_R5),
 	PINMUX_IPSR_MSEL(IP5_17_15, SCIFA0_RXD_B, SEL_SCFA_1),
-	PINMUX_IPSR_DATA(IP5_17_15, INTC_IRQ4_N),
-	PINMUX_IPSR_DATA(IP5_20_18, WE0_N),
+	PINMUX_IPSR_GPSR(IP5_17_15, INTC_IRQ4_N),
+	PINMUX_IPSR_GPSR(IP5_20_18, WE0_N),
 	PINMUX_IPSR_MSEL(IP5_20_18, IECLK, SEL_IEB_0),
 	PINMUX_IPSR_MSEL(IP5_20_18, CAN_CLK, SEL_CANCLK_0),
 	PINMUX_IPSR_MSEL(IP5_20_18, VI2_VSYNC_N, SEL_VI2_0),
 	PINMUX_IPSR_MSEL(IP5_20_18, SCIFA0_TXD_B, SEL_SCFA_1),
 	PINMUX_IPSR_MSEL(IP5_20_18, VI2_VSYNC_N_B, SEL_VI2_1),
-	PINMUX_IPSR_DATA(IP5_23_21, WE1_N),
+	PINMUX_IPSR_GPSR(IP5_23_21, WE1_N),
 	PINMUX_IPSR_MSEL(IP5_23_21, IERX, SEL_IEB_0),
 	PINMUX_IPSR_MSEL(IP5_23_21, CAN1_RX, SEL_CAN1_0),
 	PINMUX_IPSR_MSEL(IP5_23_21, VI1_G4, SEL_VI1_0),
 	PINMUX_IPSR_MSEL(IP5_23_21, VI1_G4_B, SEL_VI1_1),
-	PINMUX_IPSR_DATA(IP5_23_21, VI2_R6),
+	PINMUX_IPSR_GPSR(IP5_23_21, VI2_R6),
 	PINMUX_IPSR_MSEL(IP5_23_21, SCIFA0_CTS_N_B, SEL_SCFA_1),
 	PINMUX_IPSR_MSEL(IP5_23_21, IERX_C, SEL_IEB_2),
 	PINMUX_IPSR_MSEL(IP5_26_24, EX_WAIT0, SEL_LBS_0),
-	PINMUX_IPSR_DATA(IP5_26_24, IRQ3),
-	PINMUX_IPSR_DATA(IP5_26_24, INTC_IRQ3_N),
+	PINMUX_IPSR_GPSR(IP5_26_24, IRQ3),
+	PINMUX_IPSR_GPSR(IP5_26_24, INTC_IRQ3_N),
 	PINMUX_IPSR_MSEL(IP5_26_24, VI3_CLK, SEL_VI3_0),
 	PINMUX_IPSR_MSEL(IP5_26_24, SCIFA0_RTS_N_B, SEL_SCFA_1),
 	PINMUX_IPSR_MSEL(IP5_26_24, HRX0_B, SEL_HSCIF0_1),
 	PINMUX_IPSR_MSEL(IP5_26_24, MSIOF0_SCK_B, SEL_SOF0_1),
-	PINMUX_IPSR_DATA(IP5_29_27, DREQ0_N),
+	PINMUX_IPSR_GPSR(IP5_29_27, DREQ0_N),
 	PINMUX_IPSR_MSEL(IP5_29_27, VI1_HSYNC_N, SEL_VI1_0),
 	PINMUX_IPSR_MSEL(IP5_29_27, VI1_HSYNC_N_B, SEL_VI1_1),
-	PINMUX_IPSR_DATA(IP5_29_27, VI2_R7),
+	PINMUX_IPSR_GPSR(IP5_29_27, VI2_R7),
 	PINMUX_IPSR_MSEL(IP5_29_27, SSI_SCK78_C, SEL_SSI7_2),
 	PINMUX_IPSR_MSEL(IP5_29_27, SSI_WS78_B, SEL_SSI7_1),
 
-	PINMUX_IPSR_DATA(IP6_2_0, DACK0),
-	PINMUX_IPSR_DATA(IP6_2_0, IRQ0),
-	PINMUX_IPSR_DATA(IP6_2_0, INTC_IRQ0_N),
+	PINMUX_IPSR_GPSR(IP6_2_0, DACK0),
+	PINMUX_IPSR_GPSR(IP6_2_0, IRQ0),
+	PINMUX_IPSR_GPSR(IP6_2_0, INTC_IRQ0_N),
 	PINMUX_IPSR_MSEL(IP6_2_0, SSI_SCK6_B, SEL_SSI6_1),
 	PINMUX_IPSR_MSEL(IP6_2_0, VI1_VSYNC_N, SEL_VI1_0),
 	PINMUX_IPSR_MSEL(IP6_2_0, VI1_VSYNC_N_B, SEL_VI1_1),
 	PINMUX_IPSR_MSEL(IP6_2_0, SSI_WS78_C, SEL_SSI7_2),
-	PINMUX_IPSR_DATA(IP6_5_3, DREQ1_N),
+	PINMUX_IPSR_GPSR(IP6_5_3, DREQ1_N),
 	PINMUX_IPSR_MSEL(IP6_5_3, VI1_CLKENB, SEL_VI1_0),
 	PINMUX_IPSR_MSEL(IP6_5_3, VI1_CLKENB_B, SEL_VI1_1),
 	PINMUX_IPSR_MSEL(IP6_5_3, SSI_SDATA7_C, SEL_SSI7_2),
 	PINMUX_IPSR_MSEL(IP6_5_3, SSI_SCK78_B, SEL_SSI7_1),
-	PINMUX_IPSR_DATA(IP6_8_6, DACK1),
-	PINMUX_IPSR_DATA(IP6_8_6, IRQ1),
-	PINMUX_IPSR_DATA(IP6_8_6, INTC_IRQ1_N),
+	PINMUX_IPSR_GPSR(IP6_8_6, DACK1),
+	PINMUX_IPSR_GPSR(IP6_8_6, IRQ1),
+	PINMUX_IPSR_GPSR(IP6_8_6, INTC_IRQ1_N),
 	PINMUX_IPSR_MSEL(IP6_8_6, SSI_WS6_B, SEL_SSI6_1),
 	PINMUX_IPSR_MSEL(IP6_8_6, SSI_SDATA8_C, SEL_SSI8_2),
-	PINMUX_IPSR_DATA(IP6_10_9, DREQ2_N),
+	PINMUX_IPSR_GPSR(IP6_10_9, DREQ2_N),
 	PINMUX_IPSR_MSEL(IP6_10_9, HSCK1_B, SEL_HSCIF1_1),
 	PINMUX_IPSR_MSEL(IP6_10_9, HCTS0_N_B, SEL_HSCIF0_1),
 	PINMUX_IPSR_MSEL(IP6_10_9, MSIOF0_TXD_B, SEL_SOF0_1),
-	PINMUX_IPSR_DATA(IP6_13_11, DACK2),
-	PINMUX_IPSR_DATA(IP6_13_11, IRQ2),
-	PINMUX_IPSR_DATA(IP6_13_11, INTC_IRQ2_N),
+	PINMUX_IPSR_GPSR(IP6_13_11, DACK2),
+	PINMUX_IPSR_GPSR(IP6_13_11, IRQ2),
+	PINMUX_IPSR_GPSR(IP6_13_11, INTC_IRQ2_N),
 	PINMUX_IPSR_MSEL(IP6_13_11, SSI_SDATA6_B, SEL_SSI6_1),
 	PINMUX_IPSR_MSEL(IP6_13_11, HRTS0_N_B, SEL_HSCIF0_1),
 	PINMUX_IPSR_MSEL(IP6_13_11, MSIOF0_RXD_B, SEL_SOF0_1),
-	PINMUX_IPSR_DATA(IP6_16_14, ETH_CRS_DV),
+	PINMUX_IPSR_GPSR(IP6_16_14, ETH_CRS_DV),
 	PINMUX_IPSR_MSEL(IP6_16_14, STP_ISCLK_0_B, SEL_SSP_1),
 	PINMUX_IPSR_MSEL(IP6_16_14, TS_SDEN0_D, SEL_TSIF0_3),
 	PINMUX_IPSR_MSEL(IP6_16_14, GLO_Q0_C, SEL_GPS_2),
 	PINMUX_IPSR_MSEL(IP6_16_14, IIC2_SCL_E, SEL_IIC2_4),
 	PINMUX_IPSR_MSEL(IP6_16_14, I2C2_SCL_E, SEL_I2C2_4),
-	PINMUX_IPSR_DATA(IP6_19_17, ETH_RX_ER),
+	PINMUX_IPSR_GPSR(IP6_19_17, ETH_RX_ER),
 	PINMUX_IPSR_MSEL(IP6_19_17, STP_ISD_0_B, SEL_SSP_1),
 	PINMUX_IPSR_MSEL(IP6_19_17, TS_SPSYNC0_D, SEL_TSIF0_3),
 	PINMUX_IPSR_MSEL(IP6_19_17, GLO_Q1_C, SEL_GPS_2),
 	PINMUX_IPSR_MSEL(IP6_19_17, IIC2_SDA_E, SEL_IIC2_4),
 	PINMUX_IPSR_MSEL(IP6_19_17, I2C2_SDA_E, SEL_I2C2_4),
-	PINMUX_IPSR_DATA(IP6_22_20, ETH_RXD0),
+	PINMUX_IPSR_GPSR(IP6_22_20, ETH_RXD0),
 	PINMUX_IPSR_MSEL(IP6_22_20, STP_ISEN_0_B, SEL_SSP_1),
 	PINMUX_IPSR_MSEL(IP6_22_20, TS_SDAT0_D, SEL_TSIF0_3),
 	PINMUX_IPSR_MSEL(IP6_22_20, GLO_I0_C, SEL_GPS_2),
 	PINMUX_IPSR_MSEL(IP6_22_20, SCIFB1_SCK_G, SEL_SCIFB1_6),
 	PINMUX_IPSR_MSEL(IP6_22_20, SCK1_E, SEL_SCIF1_4),
-	PINMUX_IPSR_DATA(IP6_25_23, ETH_RXD1),
+	PINMUX_IPSR_GPSR(IP6_25_23, ETH_RXD1),
 	PINMUX_IPSR_MSEL(IP6_25_23, HRX0_E, SEL_HSCIF0_4),
 	PINMUX_IPSR_MSEL(IP6_25_23, STP_ISSYNC_0_B, SEL_SSP_1),
 	PINMUX_IPSR_MSEL(IP6_25_23, TS_SCK0_D, SEL_TSIF0_3),
 	PINMUX_IPSR_MSEL(IP6_25_23, GLO_I1_C, SEL_GPS_2),
 	PINMUX_IPSR_MSEL(IP6_25_23, SCIFB1_RXD_G, SEL_SCIFB1_6),
 	PINMUX_IPSR_MSEL(IP6_25_23, RX1_E, SEL_SCIF1_4),
-	PINMUX_IPSR_DATA(IP6_28_26, ETH_LINK),
+	PINMUX_IPSR_GPSR(IP6_28_26, ETH_LINK),
 	PINMUX_IPSR_MSEL(IP6_28_26, HTX0_E, SEL_HSCIF0_4),
 	PINMUX_IPSR_MSEL(IP6_28_26, STP_IVCXO27_0_B, SEL_SSP_1),
 	PINMUX_IPSR_MSEL(IP6_28_26, SCIFB1_TXD_G, SEL_SCIFB1_6),
 	PINMUX_IPSR_MSEL(IP6_28_26, TX1_E, SEL_SCIF1_4),
-	PINMUX_IPSR_DATA(IP6_31_29, ETH_REF_CLK),
+	PINMUX_IPSR_GPSR(IP6_31_29, ETH_REF_CLK),
 	PINMUX_IPSR_MSEL(IP6_31_29, HCTS0_N_E, SEL_HSCIF0_4),
 	PINMUX_IPSR_MSEL(IP6_31_29, STP_IVCXO27_1_B, SEL_SSP_1),
 	PINMUX_IPSR_MSEL(IP6_31_29, HRX0_F, SEL_HSCIF0_5),
 
-	PINMUX_IPSR_DATA(IP7_2_0, ETH_MDIO),
+	PINMUX_IPSR_GPSR(IP7_2_0, ETH_MDIO),
 	PINMUX_IPSR_MSEL(IP7_2_0, HRTS0_N_E, SEL_HSCIF0_4),
 	PINMUX_IPSR_MSEL(IP7_2_0, SIM0_D_C, SEL_SIM_2),
 	PINMUX_IPSR_MSEL(IP7_2_0, HCTS0_N_F, SEL_HSCIF0_5),
-	PINMUX_IPSR_DATA(IP7_5_3, ETH_TXD1),
+	PINMUX_IPSR_GPSR(IP7_5_3, ETH_TXD1),
 	PINMUX_IPSR_MSEL(IP7_5_3, HTX0_F, SEL_HSCIF0_5),
 	PINMUX_IPSR_MSEL(IP7_5_3, BPFCLK_G, SEL_FM_6),
-	PINMUX_IPSR_DATA(IP7_7_6, ETH_TX_EN),
+	PINMUX_IPSR_GPSR(IP7_7_6, ETH_TX_EN),
 	PINMUX_IPSR_MSEL(IP7_7_6, SIM0_CLK_C, SEL_SIM_2),
 	PINMUX_IPSR_MSEL(IP7_7_6, HRTS0_N_F, SEL_HSCIF0_5),
-	PINMUX_IPSR_DATA(IP7_9_8, ETH_MAGIC),
+	PINMUX_IPSR_GPSR(IP7_9_8, ETH_MAGIC),
 	PINMUX_IPSR_MSEL(IP7_9_8, SIM0_RST_C, SEL_SIM_2),
-	PINMUX_IPSR_DATA(IP7_12_10, ETH_TXD0),
+	PINMUX_IPSR_GPSR(IP7_12_10, ETH_TXD0),
 	PINMUX_IPSR_MSEL(IP7_12_10, STP_ISCLK_1_B, SEL_SSP_1),
 	PINMUX_IPSR_MSEL(IP7_12_10, TS_SDEN1_C, SEL_TSIF1_2),
 	PINMUX_IPSR_MSEL(IP7_12_10, GLO_SCLK_C, SEL_GPS_2),
-	PINMUX_IPSR_DATA(IP7_15_13, ETH_MDC),
+	PINMUX_IPSR_GPSR(IP7_15_13, ETH_MDC),
 	PINMUX_IPSR_MSEL(IP7_15_13, STP_ISD_1_B, SEL_SSP_1),
 	PINMUX_IPSR_MSEL(IP7_15_13, TS_SPSYNC1_C, SEL_TSIF1_2),
 	PINMUX_IPSR_MSEL(IP7_15_13, GLO_SDATA_C, SEL_GPS_2),
-	PINMUX_IPSR_DATA(IP7_18_16, PWM0),
+	PINMUX_IPSR_GPSR(IP7_18_16, PWM0),
 	PINMUX_IPSR_MSEL(IP7_18_16, SCIFA2_SCK_C, SEL_SCIFA2_2),
 	PINMUX_IPSR_MSEL(IP7_18_16, STP_ISEN_1_B, SEL_SSP_1),
 	PINMUX_IPSR_MSEL(IP7_18_16, TS_SDAT1_C, SEL_TSIF1_2),
 	PINMUX_IPSR_MSEL(IP7_18_16, GLO_SS_C, SEL_GPS_2),
-	PINMUX_IPSR_DATA(IP7_21_19, PWM1),
+	PINMUX_IPSR_GPSR(IP7_21_19, PWM1),
 	PINMUX_IPSR_MSEL(IP7_21_19, SCIFA2_TXD_C, SEL_SCIFA2_2),
 	PINMUX_IPSR_MSEL(IP7_21_19, STP_ISSYNC_1_B, SEL_SSP_1),
 	PINMUX_IPSR_MSEL(IP7_21_19, TS_SCK1_C, SEL_TSIF1_2),
 	PINMUX_IPSR_MSEL(IP7_21_19, GLO_RFON_C, SEL_GPS_2),
-	PINMUX_IPSR_DATA(IP7_21_19, PCMOE_N),
-	PINMUX_IPSR_DATA(IP7_24_22, PWM2),
-	PINMUX_IPSR_DATA(IP7_24_22, PWMFSW0),
+	PINMUX_IPSR_GPSR(IP7_21_19, PCMOE_N),
+	PINMUX_IPSR_GPSR(IP7_24_22, PWM2),
+	PINMUX_IPSR_GPSR(IP7_24_22, PWMFSW0),
 	PINMUX_IPSR_MSEL(IP7_24_22, SCIFA2_RXD_C, SEL_SCIFA2_2),
-	PINMUX_IPSR_DATA(IP7_24_22, PCMWE_N),
+	PINMUX_IPSR_GPSR(IP7_24_22, PCMWE_N),
 	PINMUX_IPSR_MSEL(IP7_24_22, IECLK_C, SEL_IEB_2),
-	PINMUX_IPSR_DATA(IP7_26_25, DU_DOTCLKIN1),
-	PINMUX_IPSR_DATA(IP7_26_25, AUDIO_CLKC),
-	PINMUX_IPSR_DATA(IP7_26_25, AUDIO_CLKOUT_C),
+	PINMUX_IPSR_GPSR(IP7_26_25, DU_DOTCLKIN1),
+	PINMUX_IPSR_GPSR(IP7_26_25, AUDIO_CLKC),
+	PINMUX_IPSR_GPSR(IP7_26_25, AUDIO_CLKOUT_C),
 	PINMUX_IPSR_MSEL(IP7_28_27, VI0_CLK, SEL_VI0_0),
-	PINMUX_IPSR_DATA(IP7_28_27, ATACS00_N),
-	PINMUX_IPSR_DATA(IP7_28_27, AVB_RXD1),
+	PINMUX_IPSR_GPSR(IP7_28_27, ATACS00_N),
+	PINMUX_IPSR_GPSR(IP7_28_27, AVB_RXD1),
 	PINMUX_IPSR_MSEL(IP7_30_29, VI0_DATA0_VI0_B0, SEL_VI0_0),
-	PINMUX_IPSR_DATA(IP7_30_29, ATACS10_N),
-	PINMUX_IPSR_DATA(IP7_30_29, AVB_RXD2),
+	PINMUX_IPSR_GPSR(IP7_30_29, ATACS10_N),
+	PINMUX_IPSR_GPSR(IP7_30_29, AVB_RXD2),
 
 	PINMUX_IPSR_MSEL(IP8_1_0, VI0_DATA1_VI0_B1, SEL_VI0_0),
-	PINMUX_IPSR_DATA(IP8_1_0, ATARD0_N),
-	PINMUX_IPSR_DATA(IP8_1_0, AVB_RXD3),
+	PINMUX_IPSR_GPSR(IP8_1_0, ATARD0_N),
+	PINMUX_IPSR_GPSR(IP8_1_0, AVB_RXD3),
 	PINMUX_IPSR_MSEL(IP8_3_2, VI0_DATA2_VI0_B2, SEL_VI0_0),
-	PINMUX_IPSR_DATA(IP8_3_2, ATAWR0_N),
-	PINMUX_IPSR_DATA(IP8_3_2, AVB_RXD4),
+	PINMUX_IPSR_GPSR(IP8_3_2, ATAWR0_N),
+	PINMUX_IPSR_GPSR(IP8_3_2, AVB_RXD4),
 	PINMUX_IPSR_MSEL(IP8_5_4, VI0_DATA3_VI0_B3, SEL_VI0_0),
-	PINMUX_IPSR_DATA(IP8_5_4, ATADIR0_N),
-	PINMUX_IPSR_DATA(IP8_5_4, AVB_RXD5),
+	PINMUX_IPSR_GPSR(IP8_5_4, ATADIR0_N),
+	PINMUX_IPSR_GPSR(IP8_5_4, AVB_RXD5),
 	PINMUX_IPSR_MSEL(IP8_7_6, VI0_DATA4_VI0_B4, SEL_VI0_0),
-	PINMUX_IPSR_DATA(IP8_7_6, ATAG0_N),
-	PINMUX_IPSR_DATA(IP8_7_6, AVB_RXD6),
+	PINMUX_IPSR_GPSR(IP8_7_6, ATAG0_N),
+	PINMUX_IPSR_GPSR(IP8_7_6, AVB_RXD6),
 	PINMUX_IPSR_MSEL(IP8_9_8, VI0_DATA5_VI0_B5, SEL_VI0_0),
-	PINMUX_IPSR_DATA(IP8_9_8, EX_WAIT1),
-	PINMUX_IPSR_DATA(IP8_9_8, AVB_RXD7),
+	PINMUX_IPSR_GPSR(IP8_9_8, EX_WAIT1),
+	PINMUX_IPSR_GPSR(IP8_9_8, AVB_RXD7),
 	PINMUX_IPSR_MSEL(IP8_11_10, VI0_DATA6_VI0_B6, SEL_VI0_0),
-	PINMUX_IPSR_DATA(IP8_11_10, AVB_RX_ER),
+	PINMUX_IPSR_GPSR(IP8_11_10, AVB_RX_ER),
 	PINMUX_IPSR_MSEL(IP8_13_12, VI0_DATA7_VI0_B7, SEL_VI0_0),
-	PINMUX_IPSR_DATA(IP8_13_12, AVB_RX_CLK),
+	PINMUX_IPSR_GPSR(IP8_13_12, AVB_RX_CLK),
 	PINMUX_IPSR_MSEL(IP8_15_14, VI1_CLK, SEL_VI1_0),
-	PINMUX_IPSR_DATA(IP8_15_14, AVB_RX_DV),
+	PINMUX_IPSR_GPSR(IP8_15_14, AVB_RX_DV),
 	PINMUX_IPSR_MSEL(IP8_17_16, VI1_DATA0_VI1_B0, SEL_VI1_0),
 	PINMUX_IPSR_MSEL(IP8_17_16, SCIFA1_SCK_D, SEL_SCIFA1_3),
-	PINMUX_IPSR_DATA(IP8_17_16, AVB_CRS),
+	PINMUX_IPSR_GPSR(IP8_17_16, AVB_CRS),
 	PINMUX_IPSR_MSEL(IP8_19_18, VI1_DATA1_VI1_B1, SEL_VI1_0),
 	PINMUX_IPSR_MSEL(IP8_19_18, SCIFA1_RXD_D, SEL_SCIFA1_3),
-	PINMUX_IPSR_DATA(IP8_19_18, AVB_MDC),
+	PINMUX_IPSR_GPSR(IP8_19_18, AVB_MDC),
 	PINMUX_IPSR_MSEL(IP8_21_20, VI1_DATA2_VI1_B2, SEL_VI1_0),
 	PINMUX_IPSR_MSEL(IP8_21_20, SCIFA1_TXD_D, SEL_SCIFA1_3),
-	PINMUX_IPSR_DATA(IP8_21_20, AVB_MDIO),
+	PINMUX_IPSR_GPSR(IP8_21_20, AVB_MDIO),
 	PINMUX_IPSR_MSEL(IP8_23_22, VI1_DATA3_VI1_B3, SEL_VI1_0),
 	PINMUX_IPSR_MSEL(IP8_23_22, SCIFA1_CTS_N_D, SEL_SCIFA1_3),
-	PINMUX_IPSR_DATA(IP8_23_22, AVB_GTX_CLK),
+	PINMUX_IPSR_GPSR(IP8_23_22, AVB_GTX_CLK),
 	PINMUX_IPSR_MSEL(IP8_25_24, VI1_DATA4_VI1_B4, SEL_VI1_0),
 	PINMUX_IPSR_MSEL(IP8_25_24, SCIFA1_RTS_N_D, SEL_SCIFA1_3),
-	PINMUX_IPSR_DATA(IP8_25_24, AVB_MAGIC),
+	PINMUX_IPSR_GPSR(IP8_25_24, AVB_MAGIC),
 	PINMUX_IPSR_MSEL(IP8_26, VI1_DATA5_VI1_B5, SEL_VI1_0),
-	PINMUX_IPSR_DATA(IP8_26, AVB_PHY_INT),
+	PINMUX_IPSR_GPSR(IP8_26, AVB_PHY_INT),
 	PINMUX_IPSR_MSEL(IP8_27, VI1_DATA6_VI1_B6, SEL_VI1_0),
-	PINMUX_IPSR_DATA(IP8_27, AVB_GTXREFCLK),
-	PINMUX_IPSR_DATA(IP8_28, SD0_CLK),
+	PINMUX_IPSR_GPSR(IP8_27, AVB_GTXREFCLK),
+	PINMUX_IPSR_GPSR(IP8_28, SD0_CLK),
 	PINMUX_IPSR_MSEL(IP8_28, VI1_DATA0_VI1_B0_B, SEL_VI1_1),
-	PINMUX_IPSR_DATA(IP8_30_29, SD0_CMD),
+	PINMUX_IPSR_GPSR(IP8_30_29, SD0_CMD),
 	PINMUX_IPSR_MSEL(IP8_30_29, SCIFB1_SCK_B, SEL_SCIFB1_1),
 	PINMUX_IPSR_MSEL(IP8_30_29, VI1_DATA1_VI1_B1_B, SEL_VI1_1),
 
-	PINMUX_IPSR_DATA(IP9_1_0, SD0_DAT0),
+	PINMUX_IPSR_GPSR(IP9_1_0, SD0_DAT0),
 	PINMUX_IPSR_MSEL(IP9_1_0, SCIFB1_RXD_B, SEL_SCIFB1_1),
 	PINMUX_IPSR_MSEL(IP9_1_0, VI1_DATA2_VI1_B2_B, SEL_VI1_1),
-	PINMUX_IPSR_DATA(IP9_3_2, SD0_DAT1),
+	PINMUX_IPSR_GPSR(IP9_3_2, SD0_DAT1),
 	PINMUX_IPSR_MSEL(IP9_3_2, SCIFB1_TXD_B, SEL_SCIFB1_1),
 	PINMUX_IPSR_MSEL(IP9_3_2, VI1_DATA3_VI1_B3_B, SEL_VI1_1),
-	PINMUX_IPSR_DATA(IP9_5_4, SD0_DAT2),
+	PINMUX_IPSR_GPSR(IP9_5_4, SD0_DAT2),
 	PINMUX_IPSR_MSEL(IP9_5_4, SCIFB1_CTS_N_B, SEL_SCIFB1_1),
 	PINMUX_IPSR_MSEL(IP9_5_4, VI1_DATA4_VI1_B4_B, SEL_VI1_1),
-	PINMUX_IPSR_DATA(IP9_7_6, SD0_DAT3),
+	PINMUX_IPSR_GPSR(IP9_7_6, SD0_DAT3),
 	PINMUX_IPSR_MSEL(IP9_7_6, SCIFB1_RTS_N_B, SEL_SCIFB1_1),
 	PINMUX_IPSR_MSEL(IP9_7_6, VI1_DATA5_VI1_B5_B, SEL_VI1_1),
-	PINMUX_IPSR_DATA(IP9_11_8, SD0_CD),
-	PINMUX_IPSR_DATA(IP9_11_8, MMC0_D6),
+	PINMUX_IPSR_GPSR(IP9_11_8, SD0_CD),
+	PINMUX_IPSR_GPSR(IP9_11_8, MMC0_D6),
 	PINMUX_IPSR_MSEL(IP9_11_8, TS_SDEN0_B, SEL_TSIF0_1),
-	PINMUX_IPSR_DATA(IP9_11_8, USB0_EXTP),
+	PINMUX_IPSR_GPSR(IP9_11_8, USB0_EXTP),
 	PINMUX_IPSR_MSEL(IP9_11_8, GLO_SCLK, SEL_GPS_0),
 	PINMUX_IPSR_MSEL(IP9_11_8, VI1_DATA6_VI1_B6_B, SEL_VI1_1),
 	PINMUX_IPSR_MSEL(IP9_11_8, IIC1_SCL_B, SEL_IIC1_1),
 	PINMUX_IPSR_MSEL(IP9_11_8, I2C1_SCL_B, SEL_I2C1_1),
 	PINMUX_IPSR_MSEL(IP9_11_8, VI2_DATA6_VI2_B6_B, SEL_VI2_1),
-	PINMUX_IPSR_DATA(IP9_15_12, SD0_WP),
-	PINMUX_IPSR_DATA(IP9_15_12, MMC0_D7),
+	PINMUX_IPSR_GPSR(IP9_15_12, SD0_WP),
+	PINMUX_IPSR_GPSR(IP9_15_12, MMC0_D7),
 	PINMUX_IPSR_MSEL(IP9_15_12, TS_SPSYNC0_B, SEL_TSIF0_1),
-	PINMUX_IPSR_DATA(IP9_15_12, USB0_IDIN),
+	PINMUX_IPSR_GPSR(IP9_15_12, USB0_IDIN),
 	PINMUX_IPSR_MSEL(IP9_15_12, GLO_SDATA, SEL_GPS_0),
 	PINMUX_IPSR_MSEL(IP9_15_12, VI1_DATA7_VI1_B7_B, SEL_VI1_1),
 	PINMUX_IPSR_MSEL(IP9_15_12, IIC1_SDA_B, SEL_IIC1_1),
 	PINMUX_IPSR_MSEL(IP9_15_12, I2C1_SDA_B, SEL_I2C1_1),
 	PINMUX_IPSR_MSEL(IP9_15_12, VI2_DATA7_VI2_B7_B, SEL_VI2_1),
-	PINMUX_IPSR_DATA(IP9_17_16, SD1_CLK),
-	PINMUX_IPSR_DATA(IP9_17_16, AVB_TX_EN),
-	PINMUX_IPSR_DATA(IP9_19_18, SD1_CMD),
-	PINMUX_IPSR_DATA(IP9_19_18, AVB_TX_ER),
+	PINMUX_IPSR_GPSR(IP9_17_16, SD1_CLK),
+	PINMUX_IPSR_GPSR(IP9_17_16, AVB_TX_EN),
+	PINMUX_IPSR_GPSR(IP9_19_18, SD1_CMD),
+	PINMUX_IPSR_GPSR(IP9_19_18, AVB_TX_ER),
 	PINMUX_IPSR_MSEL(IP9_19_18, SCIFB0_SCK_B, SEL_SCIFB_1),
-	PINMUX_IPSR_DATA(IP9_21_20, SD1_DAT0),
-	PINMUX_IPSR_DATA(IP9_21_20, AVB_TX_CLK),
+	PINMUX_IPSR_GPSR(IP9_21_20, SD1_DAT0),
+	PINMUX_IPSR_GPSR(IP9_21_20, AVB_TX_CLK),
 	PINMUX_IPSR_MSEL(IP9_21_20, SCIFB0_RXD_B, SEL_SCIFB_1),
-	PINMUX_IPSR_DATA(IP9_23_22, SD1_DAT1),
-	PINMUX_IPSR_DATA(IP9_23_22, AVB_LINK),
+	PINMUX_IPSR_GPSR(IP9_23_22, SD1_DAT1),
+	PINMUX_IPSR_GPSR(IP9_23_22, AVB_LINK),
 	PINMUX_IPSR_MSEL(IP9_23_22, SCIFB0_TXD_B, SEL_SCIFB_1),
-	PINMUX_IPSR_DATA(IP9_25_24, SD1_DAT2),
-	PINMUX_IPSR_DATA(IP9_25_24, AVB_COL),
+	PINMUX_IPSR_GPSR(IP9_25_24, SD1_DAT2),
+	PINMUX_IPSR_GPSR(IP9_25_24, AVB_COL),
 	PINMUX_IPSR_MSEL(IP9_25_24, SCIFB0_CTS_N_B, SEL_SCIFB_1),
-	PINMUX_IPSR_DATA(IP9_27_26, SD1_DAT3),
-	PINMUX_IPSR_DATA(IP9_27_26, AVB_RXD0),
+	PINMUX_IPSR_GPSR(IP9_27_26, SD1_DAT3),
+	PINMUX_IPSR_GPSR(IP9_27_26, AVB_RXD0),
 	PINMUX_IPSR_MSEL(IP9_27_26, SCIFB0_RTS_N_B, SEL_SCIFB_1),
-	PINMUX_IPSR_DATA(IP9_31_28, SD1_CD),
-	PINMUX_IPSR_DATA(IP9_31_28, MMC1_D6),
+	PINMUX_IPSR_GPSR(IP9_31_28, SD1_CD),
+	PINMUX_IPSR_GPSR(IP9_31_28, MMC1_D6),
 	PINMUX_IPSR_MSEL(IP9_31_28, TS_SDEN1, SEL_TSIF1_0),
-	PINMUX_IPSR_DATA(IP9_31_28, USB1_EXTP),
+	PINMUX_IPSR_GPSR(IP9_31_28, USB1_EXTP),
 	PINMUX_IPSR_MSEL(IP9_31_28, GLO_SS, SEL_GPS_0),
 	PINMUX_IPSR_MSEL(IP9_31_28, VI0_CLK_B, SEL_VI0_1),
 	PINMUX_IPSR_MSEL(IP9_31_28, IIC2_SCL_D, SEL_IIC2_3),
@@ -1330,24 +1330,24 @@
 	PINMUX_IPSR_MSEL(IP9_31_28, SIM0_CLK_B, SEL_SIM_1),
 	PINMUX_IPSR_MSEL(IP9_31_28, VI3_CLK_B, SEL_VI3_1),
 
-	PINMUX_IPSR_DATA(IP10_3_0, SD1_WP),
-	PINMUX_IPSR_DATA(IP10_3_0, MMC1_D7),
+	PINMUX_IPSR_GPSR(IP10_3_0, SD1_WP),
+	PINMUX_IPSR_GPSR(IP10_3_0, MMC1_D7),
 	PINMUX_IPSR_MSEL(IP10_3_0, TS_SPSYNC1, SEL_TSIF1_0),
-	PINMUX_IPSR_DATA(IP10_3_0, USB1_IDIN),
+	PINMUX_IPSR_GPSR(IP10_3_0, USB1_IDIN),
 	PINMUX_IPSR_MSEL(IP10_3_0, GLO_RFON, SEL_GPS_0),
 	PINMUX_IPSR_MSEL(IP10_3_0, VI1_CLK_B, SEL_VI1_1),
 	PINMUX_IPSR_MSEL(IP10_3_0, IIC2_SDA_D, SEL_IIC2_3),
 	PINMUX_IPSR_MSEL(IP10_3_0, I2C2_SDA_D, SEL_I2C2_3),
 	PINMUX_IPSR_MSEL(IP10_3_0, SIM0_D_B, SEL_SIM_1),
-	PINMUX_IPSR_DATA(IP10_6_4, SD2_CLK),
-	PINMUX_IPSR_DATA(IP10_6_4, MMC0_CLK),
+	PINMUX_IPSR_GPSR(IP10_6_4, SD2_CLK),
+	PINMUX_IPSR_GPSR(IP10_6_4, MMC0_CLK),
 	PINMUX_IPSR_MSEL(IP10_6_4, SIM0_CLK, SEL_SIM_0),
 	PINMUX_IPSR_MSEL(IP10_6_4, VI0_DATA0_VI0_B0_B, SEL_VI0_1),
 	PINMUX_IPSR_MSEL(IP10_6_4, TS_SDEN0_C, SEL_TSIF0_2),
 	PINMUX_IPSR_MSEL(IP10_6_4, GLO_SCLK_B, SEL_GPS_1),
 	PINMUX_IPSR_MSEL(IP10_6_4, VI3_DATA0_B, SEL_VI3_1),
-	PINMUX_IPSR_DATA(IP10_10_7, SD2_CMD),
-	PINMUX_IPSR_DATA(IP10_10_7, MMC0_CMD),
+	PINMUX_IPSR_GPSR(IP10_10_7, SD2_CMD),
+	PINMUX_IPSR_GPSR(IP10_10_7, MMC0_CMD),
 	PINMUX_IPSR_MSEL(IP10_10_7, SIM0_D, SEL_SIM_0),
 	PINMUX_IPSR_MSEL(IP10_10_7, VI0_DATA1_VI0_B1_B, SEL_VI0_1),
 	PINMUX_IPSR_MSEL(IP10_10_7, SCIFB1_SCK_E, SEL_SCIFB1_4),
@@ -1355,8 +1355,8 @@
 	PINMUX_IPSR_MSEL(IP10_10_7, TS_SPSYNC0_C, SEL_TSIF0_2),
 	PINMUX_IPSR_MSEL(IP10_10_7, GLO_SDATA_B, SEL_GPS_1),
 	PINMUX_IPSR_MSEL(IP10_10_7, VI3_DATA1_B, SEL_VI3_1),
-	PINMUX_IPSR_DATA(IP10_14_11, SD2_DAT0),
-	PINMUX_IPSR_DATA(IP10_14_11, MMC0_D0),
+	PINMUX_IPSR_GPSR(IP10_14_11, SD2_DAT0),
+	PINMUX_IPSR_GPSR(IP10_14_11, MMC0_D0),
 	PINMUX_IPSR_MSEL(IP10_14_11, FMCLK_B, SEL_FM_1),
 	PINMUX_IPSR_MSEL(IP10_14_11, VI0_DATA2_VI0_B2_B, SEL_VI0_1),
 	PINMUX_IPSR_MSEL(IP10_14_11, SCIFB1_RXD_E, SEL_SCIFB1_4),
@@ -1364,8 +1364,8 @@
 	PINMUX_IPSR_MSEL(IP10_14_11, TS_SDAT0_C, SEL_TSIF0_2),
 	PINMUX_IPSR_MSEL(IP10_14_11, GLO_SS_B, SEL_GPS_1),
 	PINMUX_IPSR_MSEL(IP10_14_11, VI3_DATA2_B, SEL_VI3_1),
-	PINMUX_IPSR_DATA(IP10_18_15, SD2_DAT1),
-	PINMUX_IPSR_DATA(IP10_18_15, MMC0_D1),
+	PINMUX_IPSR_GPSR(IP10_18_15, SD2_DAT1),
+	PINMUX_IPSR_GPSR(IP10_18_15, MMC0_D1),
 	PINMUX_IPSR_MSEL(IP10_18_15, FMIN_B, SEL_FM_1),
 	PINMUX_IPSR_MSEL(IP10_18_15, VI0_DATA3_VI0_B3_B, SEL_VI0_1),
 	PINMUX_IPSR_MSEL(IP10_18_15, SCIFB1_TXD_E, SEL_SCIFB1_4),
@@ -1373,26 +1373,26 @@
 	PINMUX_IPSR_MSEL(IP10_18_15, TS_SCK0_C, SEL_TSIF0_2),
 	PINMUX_IPSR_MSEL(IP10_18_15, GLO_RFON_B, SEL_GPS_1),
 	PINMUX_IPSR_MSEL(IP10_18_15, VI3_DATA3_B, SEL_VI3_1),
-	PINMUX_IPSR_DATA(IP10_22_19, SD2_DAT2),
-	PINMUX_IPSR_DATA(IP10_22_19, MMC0_D2),
+	PINMUX_IPSR_GPSR(IP10_22_19, SD2_DAT2),
+	PINMUX_IPSR_GPSR(IP10_22_19, MMC0_D2),
 	PINMUX_IPSR_MSEL(IP10_22_19, BPFCLK_B, SEL_FM_1),
 	PINMUX_IPSR_MSEL(IP10_22_19, VI0_DATA4_VI0_B4_B, SEL_VI0_1),
 	PINMUX_IPSR_MSEL(IP10_22_19, HRX0_D, SEL_HSCIF0_3),
 	PINMUX_IPSR_MSEL(IP10_22_19, TS_SDEN1_B, SEL_TSIF1_1),
 	PINMUX_IPSR_MSEL(IP10_22_19, GLO_Q0_B, SEL_GPS_1),
 	PINMUX_IPSR_MSEL(IP10_22_19, VI3_DATA4_B, SEL_VI3_1),
-	PINMUX_IPSR_DATA(IP10_25_23, SD2_DAT3),
-	PINMUX_IPSR_DATA(IP10_25_23, MMC0_D3),
+	PINMUX_IPSR_GPSR(IP10_25_23, SD2_DAT3),
+	PINMUX_IPSR_GPSR(IP10_25_23, MMC0_D3),
 	PINMUX_IPSR_MSEL(IP10_25_23, SIM0_RST, SEL_SIM_0),
 	PINMUX_IPSR_MSEL(IP10_25_23, VI0_DATA5_VI0_B5_B, SEL_VI0_1),
 	PINMUX_IPSR_MSEL(IP10_25_23, HTX0_D, SEL_HSCIF0_3),
 	PINMUX_IPSR_MSEL(IP10_25_23, TS_SPSYNC1_B, SEL_TSIF1_1),
 	PINMUX_IPSR_MSEL(IP10_25_23, GLO_Q1_B, SEL_GPS_1),
 	PINMUX_IPSR_MSEL(IP10_25_23, VI3_DATA5_B, SEL_VI3_1),
-	PINMUX_IPSR_DATA(IP10_29_26, SD2_CD),
-	PINMUX_IPSR_DATA(IP10_29_26, MMC0_D4),
+	PINMUX_IPSR_GPSR(IP10_29_26, SD2_CD),
+	PINMUX_IPSR_GPSR(IP10_29_26, MMC0_D4),
 	PINMUX_IPSR_MSEL(IP10_29_26, TS_SDAT0_B, SEL_TSIF0_1),
-	PINMUX_IPSR_DATA(IP10_29_26, USB2_EXTP),
+	PINMUX_IPSR_GPSR(IP10_29_26, USB2_EXTP),
 	PINMUX_IPSR_MSEL(IP10_29_26, GLO_I0, SEL_GPS_0),
 	PINMUX_IPSR_MSEL(IP10_29_26, VI0_DATA6_VI0_B6_B, SEL_VI0_1),
 	PINMUX_IPSR_MSEL(IP10_29_26, HCTS0_N_D, SEL_HSCIF0_3),
@@ -1400,164 +1400,164 @@
 	PINMUX_IPSR_MSEL(IP10_29_26, GLO_I0_B, SEL_GPS_1),
 	PINMUX_IPSR_MSEL(IP10_29_26, VI3_DATA6_B, SEL_VI3_1),
 
-	PINMUX_IPSR_DATA(IP11_3_0, SD2_WP),
-	PINMUX_IPSR_DATA(IP11_3_0, MMC0_D5),
+	PINMUX_IPSR_GPSR(IP11_3_0, SD2_WP),
+	PINMUX_IPSR_GPSR(IP11_3_0, MMC0_D5),
 	PINMUX_IPSR_MSEL(IP11_3_0, TS_SCK0_B, SEL_TSIF0_1),
-	PINMUX_IPSR_DATA(IP11_3_0, USB2_IDIN),
+	PINMUX_IPSR_GPSR(IP11_3_0, USB2_IDIN),
 	PINMUX_IPSR_MSEL(IP11_3_0, GLO_I1, SEL_GPS_0),
 	PINMUX_IPSR_MSEL(IP11_3_0, VI0_DATA7_VI0_B7_B, SEL_VI0_1),
 	PINMUX_IPSR_MSEL(IP11_3_0, HRTS0_N_D, SEL_HSCIF0_3),
 	PINMUX_IPSR_MSEL(IP11_3_0, TS_SCK1_B, SEL_TSIF1_1),
 	PINMUX_IPSR_MSEL(IP11_3_0, GLO_I1_B, SEL_GPS_1),
 	PINMUX_IPSR_MSEL(IP11_3_0, VI3_DATA7_B, SEL_VI3_1),
-	PINMUX_IPSR_DATA(IP11_4, SD3_CLK),
-	PINMUX_IPSR_DATA(IP11_4, MMC1_CLK),
-	PINMUX_IPSR_DATA(IP11_6_5, SD3_CMD),
-	PINMUX_IPSR_DATA(IP11_6_5, MMC1_CMD),
-	PINMUX_IPSR_DATA(IP11_6_5, MTS_N),
-	PINMUX_IPSR_DATA(IP11_8_7, SD3_DAT0),
-	PINMUX_IPSR_DATA(IP11_8_7, MMC1_D0),
-	PINMUX_IPSR_DATA(IP11_8_7, STM_N),
-	PINMUX_IPSR_DATA(IP11_10_9, SD3_DAT1),
-	PINMUX_IPSR_DATA(IP11_10_9, MMC1_D1),
-	PINMUX_IPSR_DATA(IP11_10_9, MDATA),
-	PINMUX_IPSR_DATA(IP11_12_11, SD3_DAT2),
-	PINMUX_IPSR_DATA(IP11_12_11, MMC1_D2),
-	PINMUX_IPSR_DATA(IP11_12_11, SDATA),
-	PINMUX_IPSR_DATA(IP11_14_13, SD3_DAT3),
-	PINMUX_IPSR_DATA(IP11_14_13, MMC1_D3),
-	PINMUX_IPSR_DATA(IP11_14_13, SCKZ),
-	PINMUX_IPSR_DATA(IP11_17_15, SD3_CD),
-	PINMUX_IPSR_DATA(IP11_17_15, MMC1_D4),
+	PINMUX_IPSR_GPSR(IP11_4, SD3_CLK),
+	PINMUX_IPSR_GPSR(IP11_4, MMC1_CLK),
+	PINMUX_IPSR_GPSR(IP11_6_5, SD3_CMD),
+	PINMUX_IPSR_GPSR(IP11_6_5, MMC1_CMD),
+	PINMUX_IPSR_GPSR(IP11_6_5, MTS_N),
+	PINMUX_IPSR_GPSR(IP11_8_7, SD3_DAT0),
+	PINMUX_IPSR_GPSR(IP11_8_7, MMC1_D0),
+	PINMUX_IPSR_GPSR(IP11_8_7, STM_N),
+	PINMUX_IPSR_GPSR(IP11_10_9, SD3_DAT1),
+	PINMUX_IPSR_GPSR(IP11_10_9, MMC1_D1),
+	PINMUX_IPSR_GPSR(IP11_10_9, MDATA),
+	PINMUX_IPSR_GPSR(IP11_12_11, SD3_DAT2),
+	PINMUX_IPSR_GPSR(IP11_12_11, MMC1_D2),
+	PINMUX_IPSR_GPSR(IP11_12_11, SDATA),
+	PINMUX_IPSR_GPSR(IP11_14_13, SD3_DAT3),
+	PINMUX_IPSR_GPSR(IP11_14_13, MMC1_D3),
+	PINMUX_IPSR_GPSR(IP11_14_13, SCKZ),
+	PINMUX_IPSR_GPSR(IP11_17_15, SD3_CD),
+	PINMUX_IPSR_GPSR(IP11_17_15, MMC1_D4),
 	PINMUX_IPSR_MSEL(IP11_17_15, TS_SDAT1, SEL_TSIF1_0),
-	PINMUX_IPSR_DATA(IP11_17_15, VSP),
+	PINMUX_IPSR_GPSR(IP11_17_15, VSP),
 	PINMUX_IPSR_MSEL(IP11_17_15, GLO_Q0, SEL_GPS_0),
 	PINMUX_IPSR_MSEL(IP11_17_15, SIM0_RST_B, SEL_SIM_1),
-	PINMUX_IPSR_DATA(IP11_21_18, SD3_WP),
-	PINMUX_IPSR_DATA(IP11_21_18, MMC1_D5),
+	PINMUX_IPSR_GPSR(IP11_21_18, SD3_WP),
+	PINMUX_IPSR_GPSR(IP11_21_18, MMC1_D5),
 	PINMUX_IPSR_MSEL(IP11_21_18, TS_SCK1, SEL_TSIF1_0),
 	PINMUX_IPSR_MSEL(IP11_21_18, GLO_Q1, SEL_GPS_0),
 	PINMUX_IPSR_MSEL(IP11_21_18, FMIN_C, SEL_FM_2),
 	PINMUX_IPSR_MSEL(IP11_21_18, FMIN_E, SEL_FM_4),
 	PINMUX_IPSR_MSEL(IP11_21_18, FMIN_F, SEL_FM_5),
-	PINMUX_IPSR_DATA(IP11_23_22, MLB_CLK),
+	PINMUX_IPSR_GPSR(IP11_23_22, MLB_CLK),
 	PINMUX_IPSR_MSEL(IP11_23_22, IIC2_SCL_B, SEL_IIC2_1),
 	PINMUX_IPSR_MSEL(IP11_23_22, I2C2_SCL_B, SEL_I2C2_1),
-	PINMUX_IPSR_DATA(IP11_26_24, MLB_SIG),
+	PINMUX_IPSR_GPSR(IP11_26_24, MLB_SIG),
 	PINMUX_IPSR_MSEL(IP11_26_24, SCIFB1_RXD_D, SEL_SCIFB1_3),
 	PINMUX_IPSR_MSEL(IP11_26_24, RX1_C, SEL_SCIF1_2),
 	PINMUX_IPSR_MSEL(IP11_26_24, IIC2_SDA_B, SEL_IIC2_1),
 	PINMUX_IPSR_MSEL(IP11_26_24, I2C2_SDA_B, SEL_I2C2_1),
-	PINMUX_IPSR_DATA(IP11_29_27, MLB_DAT),
+	PINMUX_IPSR_GPSR(IP11_29_27, MLB_DAT),
 	PINMUX_IPSR_MSEL(IP11_29_27, SCIFB1_TXD_D, SEL_SCIFB1_3),
 	PINMUX_IPSR_MSEL(IP11_29_27, TX1_C, SEL_SCIF1_2),
 	PINMUX_IPSR_MSEL(IP11_29_27, BPFCLK_C, SEL_FM_2),
-	PINMUX_IPSR_DATA(IP11_31_30, SSI_SCK0129),
+	PINMUX_IPSR_GPSR(IP11_31_30, SSI_SCK0129),
 	PINMUX_IPSR_MSEL(IP11_31_30, CAN_CLK_B, SEL_CANCLK_1),
-	PINMUX_IPSR_DATA(IP11_31_30, MOUT0),
+	PINMUX_IPSR_GPSR(IP11_31_30, MOUT0),
 
-	PINMUX_IPSR_DATA(IP12_1_0, SSI_WS0129),
+	PINMUX_IPSR_GPSR(IP12_1_0, SSI_WS0129),
 	PINMUX_IPSR_MSEL(IP12_1_0, CAN0_TX_B, SEL_CAN0_1),
-	PINMUX_IPSR_DATA(IP12_1_0, MOUT1),
-	PINMUX_IPSR_DATA(IP12_3_2, SSI_SDATA0),
+	PINMUX_IPSR_GPSR(IP12_1_0, MOUT1),
+	PINMUX_IPSR_GPSR(IP12_3_2, SSI_SDATA0),
 	PINMUX_IPSR_MSEL(IP12_3_2, CAN0_RX_B, SEL_CAN0_1),
-	PINMUX_IPSR_DATA(IP12_3_2, MOUT2),
-	PINMUX_IPSR_DATA(IP12_5_4, SSI_SDATA1),
+	PINMUX_IPSR_GPSR(IP12_3_2, MOUT2),
+	PINMUX_IPSR_GPSR(IP12_5_4, SSI_SDATA1),
 	PINMUX_IPSR_MSEL(IP12_5_4, CAN1_TX_B, SEL_CAN1_1),
-	PINMUX_IPSR_DATA(IP12_5_4, MOUT5),
-	PINMUX_IPSR_DATA(IP12_7_6, SSI_SDATA2),
+	PINMUX_IPSR_GPSR(IP12_5_4, MOUT5),
+	PINMUX_IPSR_GPSR(IP12_7_6, SSI_SDATA2),
 	PINMUX_IPSR_MSEL(IP12_7_6, CAN1_RX_B, SEL_CAN1_1),
-	PINMUX_IPSR_DATA(IP12_7_6, SSI_SCK1),
-	PINMUX_IPSR_DATA(IP12_7_6, MOUT6),
-	PINMUX_IPSR_DATA(IP12_10_8, SSI_SCK34),
-	PINMUX_IPSR_DATA(IP12_10_8, STP_OPWM_0),
+	PINMUX_IPSR_GPSR(IP12_7_6, SSI_SCK1),
+	PINMUX_IPSR_GPSR(IP12_7_6, MOUT6),
+	PINMUX_IPSR_GPSR(IP12_10_8, SSI_SCK34),
+	PINMUX_IPSR_GPSR(IP12_10_8, STP_OPWM_0),
 	PINMUX_IPSR_MSEL(IP12_10_8, SCIFB0_SCK, SEL_SCIFB_0),
 	PINMUX_IPSR_MSEL(IP12_10_8, MSIOF1_SCK, SEL_SOF1_0),
-	PINMUX_IPSR_DATA(IP12_10_8, CAN_DEBUG_HW_TRIGGER),
-	PINMUX_IPSR_DATA(IP12_13_11, SSI_WS34),
+	PINMUX_IPSR_GPSR(IP12_10_8, CAN_DEBUG_HW_TRIGGER),
+	PINMUX_IPSR_GPSR(IP12_13_11, SSI_WS34),
 	PINMUX_IPSR_MSEL(IP12_13_11, STP_IVCXO27_0, SEL_SSP_0),
 	PINMUX_IPSR_MSEL(IP12_13_11, SCIFB0_RXD, SEL_SCIFB_0),
-	PINMUX_IPSR_DATA(IP12_13_11, MSIOF1_SYNC),
-	PINMUX_IPSR_DATA(IP12_13_11, CAN_STEP0),
-	PINMUX_IPSR_DATA(IP12_16_14, SSI_SDATA3),
+	PINMUX_IPSR_GPSR(IP12_13_11, MSIOF1_SYNC),
+	PINMUX_IPSR_GPSR(IP12_13_11, CAN_STEP0),
+	PINMUX_IPSR_GPSR(IP12_16_14, SSI_SDATA3),
 	PINMUX_IPSR_MSEL(IP12_16_14, STP_ISCLK_0, SEL_SSP_0),
 	PINMUX_IPSR_MSEL(IP12_16_14, SCIFB0_TXD, SEL_SCIFB_0),
 	PINMUX_IPSR_MSEL(IP12_16_14, MSIOF1_SS1, SEL_SOF1_0),
-	PINMUX_IPSR_DATA(IP12_16_14, CAN_TXCLK),
-	PINMUX_IPSR_DATA(IP12_19_17, SSI_SCK4),
+	PINMUX_IPSR_GPSR(IP12_16_14, CAN_TXCLK),
+	PINMUX_IPSR_GPSR(IP12_19_17, SSI_SCK4),
 	PINMUX_IPSR_MSEL(IP12_19_17, STP_ISD_0, SEL_SSP_0),
 	PINMUX_IPSR_MSEL(IP12_19_17, SCIFB0_CTS_N, SEL_SCIFB_0),
 	PINMUX_IPSR_MSEL(IP12_19_17, MSIOF1_SS2, SEL_SOF1_0),
 	PINMUX_IPSR_MSEL(IP12_19_17, SSI_SCK5_C, SEL_SSI5_2),
-	PINMUX_IPSR_DATA(IP12_19_17, CAN_DEBUGOUT0),
-	PINMUX_IPSR_DATA(IP12_22_20, SSI_WS4),
+	PINMUX_IPSR_GPSR(IP12_19_17, CAN_DEBUGOUT0),
+	PINMUX_IPSR_GPSR(IP12_22_20, SSI_WS4),
 	PINMUX_IPSR_MSEL(IP12_22_20, STP_ISEN_0, SEL_SSP_0),
 	PINMUX_IPSR_MSEL(IP12_22_20, SCIFB0_RTS_N, SEL_SCIFB_0),
 	PINMUX_IPSR_MSEL(IP12_22_20, MSIOF1_TXD, SEL_SOF1_0),
 	PINMUX_IPSR_MSEL(IP12_22_20, SSI_WS5_C, SEL_SSI5_2),
-	PINMUX_IPSR_DATA(IP12_22_20, CAN_DEBUGOUT1),
-	PINMUX_IPSR_DATA(IP12_24_23, SSI_SDATA4),
+	PINMUX_IPSR_GPSR(IP12_22_20, CAN_DEBUGOUT1),
+	PINMUX_IPSR_GPSR(IP12_24_23, SSI_SDATA4),
 	PINMUX_IPSR_MSEL(IP12_24_23, STP_ISSYNC_0, SEL_SSP_0),
 	PINMUX_IPSR_MSEL(IP12_24_23, MSIOF1_RXD, SEL_SOF1_0),
-	PINMUX_IPSR_DATA(IP12_24_23, CAN_DEBUGOUT2),
+	PINMUX_IPSR_GPSR(IP12_24_23, CAN_DEBUGOUT2),
 	PINMUX_IPSR_MSEL(IP12_27_25, SSI_SCK5, SEL_SSI5_0),
 	PINMUX_IPSR_MSEL(IP12_27_25, SCIFB1_SCK, SEL_SCIFB1_0),
 	PINMUX_IPSR_MSEL(IP12_27_25, IERX_B, SEL_IEB_1),
-	PINMUX_IPSR_DATA(IP12_27_25, DU2_EXHSYNC_DU2_HSYNC),
-	PINMUX_IPSR_DATA(IP12_27_25, QSTH_QHS),
-	PINMUX_IPSR_DATA(IP12_27_25, CAN_DEBUGOUT3),
+	PINMUX_IPSR_GPSR(IP12_27_25, DU2_EXHSYNC_DU2_HSYNC),
+	PINMUX_IPSR_GPSR(IP12_27_25, QSTH_QHS),
+	PINMUX_IPSR_GPSR(IP12_27_25, CAN_DEBUGOUT3),
 	PINMUX_IPSR_MSEL(IP12_30_28, SSI_WS5, SEL_SSI5_0),
 	PINMUX_IPSR_MSEL(IP12_30_28, SCIFB1_RXD, SEL_SCIFB1_0),
 	PINMUX_IPSR_MSEL(IP12_30_28, IECLK_B, SEL_IEB_1),
-	PINMUX_IPSR_DATA(IP12_30_28, DU2_EXVSYNC_DU2_VSYNC),
-	PINMUX_IPSR_DATA(IP12_30_28, QSTB_QHE),
-	PINMUX_IPSR_DATA(IP12_30_28, CAN_DEBUGOUT4),
+	PINMUX_IPSR_GPSR(IP12_30_28, DU2_EXVSYNC_DU2_VSYNC),
+	PINMUX_IPSR_GPSR(IP12_30_28, QSTB_QHE),
+	PINMUX_IPSR_GPSR(IP12_30_28, CAN_DEBUGOUT4),
 
 	PINMUX_IPSR_MSEL(IP13_2_0, SSI_SDATA5, SEL_SSI5_0),
 	PINMUX_IPSR_MSEL(IP13_2_0, SCIFB1_TXD, SEL_SCIFB1_0),
 	PINMUX_IPSR_MSEL(IP13_2_0, IETX_B, SEL_IEB_1),
-	PINMUX_IPSR_DATA(IP13_2_0, DU2_DR2),
-	PINMUX_IPSR_DATA(IP13_2_0, LCDOUT2),
-	PINMUX_IPSR_DATA(IP13_2_0, CAN_DEBUGOUT5),
+	PINMUX_IPSR_GPSR(IP13_2_0, DU2_DR2),
+	PINMUX_IPSR_GPSR(IP13_2_0, LCDOUT2),
+	PINMUX_IPSR_GPSR(IP13_2_0, CAN_DEBUGOUT5),
 	PINMUX_IPSR_MSEL(IP13_6_3, SSI_SCK6, SEL_SSI6_0),
 	PINMUX_IPSR_MSEL(IP13_6_3, SCIFB1_CTS_N, SEL_SCIFB1_0),
 	PINMUX_IPSR_MSEL(IP13_6_3, BPFCLK_D, SEL_FM_3),
-	PINMUX_IPSR_DATA(IP13_6_3, DU2_DR3),
-	PINMUX_IPSR_DATA(IP13_6_3, LCDOUT3),
-	PINMUX_IPSR_DATA(IP13_6_3, CAN_DEBUGOUT6),
+	PINMUX_IPSR_GPSR(IP13_6_3, DU2_DR3),
+	PINMUX_IPSR_GPSR(IP13_6_3, LCDOUT3),
+	PINMUX_IPSR_GPSR(IP13_6_3, CAN_DEBUGOUT6),
 	PINMUX_IPSR_MSEL(IP13_6_3, BPFCLK_F, SEL_FM_5),
 	PINMUX_IPSR_MSEL(IP13_9_7, SSI_WS6, SEL_SSI6_0),
 	PINMUX_IPSR_MSEL(IP13_9_7, SCIFB1_RTS_N, SEL_SCIFB1_0),
 	PINMUX_IPSR_MSEL(IP13_9_7, CAN0_TX_D, SEL_CAN0_3),
-	PINMUX_IPSR_DATA(IP13_9_7, DU2_DR4),
-	PINMUX_IPSR_DATA(IP13_9_7, LCDOUT4),
-	PINMUX_IPSR_DATA(IP13_9_7, CAN_DEBUGOUT7),
+	PINMUX_IPSR_GPSR(IP13_9_7, DU2_DR4),
+	PINMUX_IPSR_GPSR(IP13_9_7, LCDOUT4),
+	PINMUX_IPSR_GPSR(IP13_9_7, CAN_DEBUGOUT7),
 	PINMUX_IPSR_MSEL(IP13_12_10, SSI_SDATA6, SEL_SSI6_0),
 	PINMUX_IPSR_MSEL(IP13_12_10, FMIN_D, SEL_FM_3),
-	PINMUX_IPSR_DATA(IP13_12_10, DU2_DR5),
-	PINMUX_IPSR_DATA(IP13_12_10, LCDOUT5),
-	PINMUX_IPSR_DATA(IP13_12_10, CAN_DEBUGOUT8),
+	PINMUX_IPSR_GPSR(IP13_12_10, DU2_DR5),
+	PINMUX_IPSR_GPSR(IP13_12_10, LCDOUT5),
+	PINMUX_IPSR_GPSR(IP13_12_10, CAN_DEBUGOUT8),
 	PINMUX_IPSR_MSEL(IP13_15_13, SSI_SCK78, SEL_SSI7_0),
 	PINMUX_IPSR_MSEL(IP13_15_13, STP_IVCXO27_1, SEL_SSP_0),
 	PINMUX_IPSR_MSEL(IP13_15_13, SCK1, SEL_SCIF1_0),
 	PINMUX_IPSR_MSEL(IP13_15_13, SCIFA1_SCK, SEL_SCIFA1_0),
-	PINMUX_IPSR_DATA(IP13_15_13, DU2_DR6),
-	PINMUX_IPSR_DATA(IP13_15_13, LCDOUT6),
-	PINMUX_IPSR_DATA(IP13_15_13, CAN_DEBUGOUT9),
+	PINMUX_IPSR_GPSR(IP13_15_13, DU2_DR6),
+	PINMUX_IPSR_GPSR(IP13_15_13, LCDOUT6),
+	PINMUX_IPSR_GPSR(IP13_15_13, CAN_DEBUGOUT9),
 	PINMUX_IPSR_MSEL(IP13_18_16, SSI_WS78, SEL_SSI7_0),
 	PINMUX_IPSR_MSEL(IP13_18_16, STP_ISCLK_1, SEL_SSP_0),
 	PINMUX_IPSR_MSEL(IP13_18_16, SCIFB2_SCK, SEL_SCIFB2_0),
-	PINMUX_IPSR_DATA(IP13_18_16, SCIFA2_CTS_N),
-	PINMUX_IPSR_DATA(IP13_18_16, DU2_DR7),
-	PINMUX_IPSR_DATA(IP13_18_16, LCDOUT7),
-	PINMUX_IPSR_DATA(IP13_18_16, CAN_DEBUGOUT10),
+	PINMUX_IPSR_GPSR(IP13_18_16, SCIFA2_CTS_N),
+	PINMUX_IPSR_GPSR(IP13_18_16, DU2_DR7),
+	PINMUX_IPSR_GPSR(IP13_18_16, LCDOUT7),
+	PINMUX_IPSR_GPSR(IP13_18_16, CAN_DEBUGOUT10),
 	PINMUX_IPSR_MSEL(IP13_22_19, SSI_SDATA7, SEL_SSI7_0),
 	PINMUX_IPSR_MSEL(IP13_22_19, STP_ISD_1, SEL_SSP_0),
 	PINMUX_IPSR_MSEL(IP13_22_19, SCIFB2_RXD, SEL_SCIFB2_0),
-	PINMUX_IPSR_DATA(IP13_22_19, SCIFA2_RTS_N),
-	PINMUX_IPSR_DATA(IP13_22_19, TCLK2),
-	PINMUX_IPSR_DATA(IP13_22_19, QSTVA_QVS),
-	PINMUX_IPSR_DATA(IP13_22_19, CAN_DEBUGOUT11),
+	PINMUX_IPSR_GPSR(IP13_22_19, SCIFA2_RTS_N),
+	PINMUX_IPSR_GPSR(IP13_22_19, TCLK2),
+	PINMUX_IPSR_GPSR(IP13_22_19, QSTVA_QVS),
+	PINMUX_IPSR_GPSR(IP13_22_19, CAN_DEBUGOUT11),
 	PINMUX_IPSR_MSEL(IP13_22_19, BPFCLK_E, SEL_FM_4),
 	PINMUX_IPSR_MSEL(IP13_22_19, SSI_SDATA7_B, SEL_SSI7_1),
 	PINMUX_IPSR_MSEL(IP13_22_19, FMIN_G, SEL_FM_6),
@@ -1565,161 +1565,161 @@
 	PINMUX_IPSR_MSEL(IP13_25_23, STP_ISEN_1, SEL_SSP_0),
 	PINMUX_IPSR_MSEL(IP13_25_23, SCIFB2_TXD, SEL_SCIFB2_0),
 	PINMUX_IPSR_MSEL(IP13_25_23, CAN0_TX_C, SEL_CAN0_2),
-	PINMUX_IPSR_DATA(IP13_25_23, CAN_DEBUGOUT12),
+	PINMUX_IPSR_GPSR(IP13_25_23, CAN_DEBUGOUT12),
 	PINMUX_IPSR_MSEL(IP13_25_23, SSI_SDATA8_B, SEL_SSI8_1),
-	PINMUX_IPSR_DATA(IP13_28_26, SSI_SDATA9),
+	PINMUX_IPSR_GPSR(IP13_28_26, SSI_SDATA9),
 	PINMUX_IPSR_MSEL(IP13_28_26, STP_ISSYNC_1, SEL_SSP_0),
 	PINMUX_IPSR_MSEL(IP13_28_26, SCIFB2_CTS_N, SEL_SCIFB2_0),
-	PINMUX_IPSR_DATA(IP13_28_26, SSI_WS1),
+	PINMUX_IPSR_GPSR(IP13_28_26, SSI_WS1),
 	PINMUX_IPSR_MSEL(IP13_28_26, SSI_SDATA5_C, SEL_SSI5_2),
-	PINMUX_IPSR_DATA(IP13_28_26, CAN_DEBUGOUT13),
-	PINMUX_IPSR_DATA(IP13_30_29, AUDIO_CLKA),
+	PINMUX_IPSR_GPSR(IP13_28_26, CAN_DEBUGOUT13),
+	PINMUX_IPSR_GPSR(IP13_30_29, AUDIO_CLKA),
 	PINMUX_IPSR_MSEL(IP13_30_29, SCIFB2_RTS_N, SEL_SCIFB2_0),
-	PINMUX_IPSR_DATA(IP13_30_29, CAN_DEBUGOUT14),
+	PINMUX_IPSR_GPSR(IP13_30_29, CAN_DEBUGOUT14),
 
-	PINMUX_IPSR_DATA(IP14_2_0, AUDIO_CLKB),
+	PINMUX_IPSR_GPSR(IP14_2_0, AUDIO_CLKB),
 	PINMUX_IPSR_MSEL(IP14_2_0, SCIF_CLK, SEL_SCIFCLK_0),
 	PINMUX_IPSR_MSEL(IP14_2_0, CAN0_RX_D, SEL_CAN0_3),
-	PINMUX_IPSR_DATA(IP14_2_0, DVC_MUTE),
+	PINMUX_IPSR_GPSR(IP14_2_0, DVC_MUTE),
 	PINMUX_IPSR_MSEL(IP14_2_0, CAN0_RX_C, SEL_CAN0_2),
-	PINMUX_IPSR_DATA(IP14_2_0, CAN_DEBUGOUT15),
-	PINMUX_IPSR_DATA(IP14_2_0, REMOCON),
+	PINMUX_IPSR_GPSR(IP14_2_0, CAN_DEBUGOUT15),
+	PINMUX_IPSR_GPSR(IP14_2_0, REMOCON),
 	PINMUX_IPSR_MSEL(IP14_5_3, SCIFA0_SCK, SEL_SCFA_0),
 	PINMUX_IPSR_MSEL(IP14_5_3, HSCK1, SEL_HSCIF1_0),
-	PINMUX_IPSR_DATA(IP14_5_3, SCK0),
-	PINMUX_IPSR_DATA(IP14_5_3, MSIOF3_SS2),
-	PINMUX_IPSR_DATA(IP14_5_3, DU2_DG2),
-	PINMUX_IPSR_DATA(IP14_5_3, LCDOUT10),
+	PINMUX_IPSR_GPSR(IP14_5_3, SCK0),
+	PINMUX_IPSR_GPSR(IP14_5_3, MSIOF3_SS2),
+	PINMUX_IPSR_GPSR(IP14_5_3, DU2_DG2),
+	PINMUX_IPSR_GPSR(IP14_5_3, LCDOUT10),
 	PINMUX_IPSR_MSEL(IP14_5_3, IIC1_SDA_C, SEL_IIC1_2),
 	PINMUX_IPSR_MSEL(IP14_5_3, I2C1_SDA_C, SEL_I2C1_2),
 	PINMUX_IPSR_MSEL(IP14_8_6, SCIFA0_RXD, SEL_SCFA_0),
 	PINMUX_IPSR_MSEL(IP14_8_6, HRX1, SEL_HSCIF1_0),
 	PINMUX_IPSR_MSEL(IP14_8_6, RX0, SEL_SCIF0_0),
-	PINMUX_IPSR_DATA(IP14_8_6, DU2_DR0),
-	PINMUX_IPSR_DATA(IP14_8_6, LCDOUT0),
+	PINMUX_IPSR_GPSR(IP14_8_6, DU2_DR0),
+	PINMUX_IPSR_GPSR(IP14_8_6, LCDOUT0),
 	PINMUX_IPSR_MSEL(IP14_11_9, SCIFA0_TXD, SEL_SCFA_0),
 	PINMUX_IPSR_MSEL(IP14_11_9, HTX1, SEL_HSCIF1_0),
 	PINMUX_IPSR_MSEL(IP14_11_9, TX0, SEL_SCIF0_0),
-	PINMUX_IPSR_DATA(IP14_11_9, DU2_DR1),
-	PINMUX_IPSR_DATA(IP14_11_9, LCDOUT1),
+	PINMUX_IPSR_GPSR(IP14_11_9, DU2_DR1),
+	PINMUX_IPSR_GPSR(IP14_11_9, LCDOUT1),
 	PINMUX_IPSR_MSEL(IP14_15_12, SCIFA0_CTS_N, SEL_SCFA_0),
 	PINMUX_IPSR_MSEL(IP14_15_12, HCTS1_N, SEL_HSCIF1_0),
-	PINMUX_IPSR_DATA(IP14_15_12, CTS0_N),
+	PINMUX_IPSR_GPSR(IP14_15_12, CTS0_N),
 	PINMUX_IPSR_MSEL(IP14_15_12, MSIOF3_SYNC, SEL_SOF3_0),
-	PINMUX_IPSR_DATA(IP14_15_12, DU2_DG3),
-	PINMUX_IPSR_DATA(IP14_15_12, LCDOUT11),
-	PINMUX_IPSR_DATA(IP14_15_12, PWM0_B),
+	PINMUX_IPSR_GPSR(IP14_15_12, DU2_DG3),
+	PINMUX_IPSR_GPSR(IP14_15_12, LCDOUT11),
+	PINMUX_IPSR_GPSR(IP14_15_12, PWM0_B),
 	PINMUX_IPSR_MSEL(IP14_15_12, IIC1_SCL_C, SEL_IIC1_2),
 	PINMUX_IPSR_MSEL(IP14_15_12, I2C1_SCL_C, SEL_I2C1_2),
 	PINMUX_IPSR_MSEL(IP14_18_16, SCIFA0_RTS_N, SEL_SCFA_0),
 	PINMUX_IPSR_MSEL(IP14_18_16, HRTS1_N, SEL_HSCIF1_0),
-	PINMUX_IPSR_DATA(IP14_18_16, RTS0_N),
-	PINMUX_IPSR_DATA(IP14_18_16, MSIOF3_SS1),
-	PINMUX_IPSR_DATA(IP14_18_16, DU2_DG0),
-	PINMUX_IPSR_DATA(IP14_18_16, LCDOUT8),
-	PINMUX_IPSR_DATA(IP14_18_16, PWM1_B),
+	PINMUX_IPSR_GPSR(IP14_18_16, RTS0_N),
+	PINMUX_IPSR_GPSR(IP14_18_16, MSIOF3_SS1),
+	PINMUX_IPSR_GPSR(IP14_18_16, DU2_DG0),
+	PINMUX_IPSR_GPSR(IP14_18_16, LCDOUT8),
+	PINMUX_IPSR_GPSR(IP14_18_16, PWM1_B),
 	PINMUX_IPSR_MSEL(IP14_21_19, SCIFA1_RXD, SEL_SCIFA1_0),
 	PINMUX_IPSR_MSEL(IP14_21_19, AD_DI, SEL_ADI_0),
 	PINMUX_IPSR_MSEL(IP14_21_19, RX1, SEL_SCIF1_0),
-	PINMUX_IPSR_DATA(IP14_21_19, DU2_EXODDF_DU2_ODDF_DISP_CDE),
-	PINMUX_IPSR_DATA(IP14_21_19, QCPV_QDE),
+	PINMUX_IPSR_GPSR(IP14_21_19, DU2_EXODDF_DU2_ODDF_DISP_CDE),
+	PINMUX_IPSR_GPSR(IP14_21_19, QCPV_QDE),
 	PINMUX_IPSR_MSEL(IP14_24_22, SCIFA1_TXD, SEL_SCIFA1_0),
 	PINMUX_IPSR_MSEL(IP14_24_22, AD_DO, SEL_ADI_0),
 	PINMUX_IPSR_MSEL(IP14_24_22, TX1, SEL_SCIF1_0),
-	PINMUX_IPSR_DATA(IP14_24_22, DU2_DG1),
-	PINMUX_IPSR_DATA(IP14_24_22, LCDOUT9),
+	PINMUX_IPSR_GPSR(IP14_24_22, DU2_DG1),
+	PINMUX_IPSR_GPSR(IP14_24_22, LCDOUT9),
 	PINMUX_IPSR_MSEL(IP14_27_25, SCIFA1_CTS_N, SEL_SCIFA1_0),
 	PINMUX_IPSR_MSEL(IP14_27_25, AD_CLK, SEL_ADI_0),
-	PINMUX_IPSR_DATA(IP14_27_25, CTS1_N),
+	PINMUX_IPSR_GPSR(IP14_27_25, CTS1_N),
 	PINMUX_IPSR_MSEL(IP14_27_25, MSIOF3_RXD, SEL_SOF3_0),
-	PINMUX_IPSR_DATA(IP14_27_25, DU0_DOTCLKOUT),
-	PINMUX_IPSR_DATA(IP14_27_25, QCLK),
+	PINMUX_IPSR_GPSR(IP14_27_25, DU0_DOTCLKOUT),
+	PINMUX_IPSR_GPSR(IP14_27_25, QCLK),
 	PINMUX_IPSR_MSEL(IP14_30_28, SCIFA1_RTS_N, SEL_SCIFA1_0),
 	PINMUX_IPSR_MSEL(IP14_30_28, AD_NCS_N, SEL_ADI_0),
-	PINMUX_IPSR_DATA(IP14_30_28, RTS1_N),
+	PINMUX_IPSR_GPSR(IP14_30_28, RTS1_N),
 	PINMUX_IPSR_MSEL(IP14_30_28, MSIOF3_TXD, SEL_SOF3_0),
-	PINMUX_IPSR_DATA(IP14_30_28, DU1_DOTCLKOUT),
-	PINMUX_IPSR_DATA(IP14_30_28, QSTVB_QVE),
+	PINMUX_IPSR_GPSR(IP14_30_28, DU1_DOTCLKOUT),
+	PINMUX_IPSR_GPSR(IP14_30_28, QSTVB_QVE),
 	PINMUX_IPSR_MSEL(IP14_30_28, HRTS0_N_C, SEL_HSCIF0_2),
 
 	PINMUX_IPSR_MSEL(IP15_2_0, SCIFA2_SCK, SEL_SCIFA2_0),
 	PINMUX_IPSR_MSEL(IP15_2_0, FMCLK, SEL_FM_0),
-	PINMUX_IPSR_DATA(IP15_2_0, SCK2),
+	PINMUX_IPSR_GPSR(IP15_2_0, SCK2),
 	PINMUX_IPSR_MSEL(IP15_2_0, MSIOF3_SCK, SEL_SOF3_0),
-	PINMUX_IPSR_DATA(IP15_2_0, DU2_DG7),
-	PINMUX_IPSR_DATA(IP15_2_0, LCDOUT15),
+	PINMUX_IPSR_GPSR(IP15_2_0, DU2_DG7),
+	PINMUX_IPSR_GPSR(IP15_2_0, LCDOUT15),
 	PINMUX_IPSR_MSEL(IP15_2_0, SCIF_CLK_B, SEL_SCIFCLK_1),
 	PINMUX_IPSR_MSEL(IP15_5_3, SCIFA2_RXD, SEL_SCIFA2_0),
 	PINMUX_IPSR_MSEL(IP15_5_3, FMIN, SEL_FM_0),
 	PINMUX_IPSR_MSEL(IP15_5_3, TX2, SEL_SCIF2_0),
-	PINMUX_IPSR_DATA(IP15_5_3, DU2_DB0),
-	PINMUX_IPSR_DATA(IP15_5_3, LCDOUT16),
+	PINMUX_IPSR_GPSR(IP15_5_3, DU2_DB0),
+	PINMUX_IPSR_GPSR(IP15_5_3, LCDOUT16),
 	PINMUX_IPSR_MSEL(IP15_5_3, IIC2_SCL, SEL_IIC2_0),
 	PINMUX_IPSR_MSEL(IP15_5_3, I2C2_SCL, SEL_I2C2_0),
 	PINMUX_IPSR_MSEL(IP15_8_6, SCIFA2_TXD, SEL_SCIFA2_0),
 	PINMUX_IPSR_MSEL(IP15_8_6, BPFCLK, SEL_FM_0),
 	PINMUX_IPSR_MSEL(IP15_8_6, RX2, SEL_SCIF2_0),
-	PINMUX_IPSR_DATA(IP15_8_6, DU2_DB1),
-	PINMUX_IPSR_DATA(IP15_8_6, LCDOUT17),
+	PINMUX_IPSR_GPSR(IP15_8_6, DU2_DB1),
+	PINMUX_IPSR_GPSR(IP15_8_6, LCDOUT17),
 	PINMUX_IPSR_MSEL(IP15_8_6, IIC2_SDA, SEL_IIC2_0),
 	PINMUX_IPSR_MSEL(IP15_8_6, I2C2_SDA, SEL_I2C2_0),
-	PINMUX_IPSR_DATA(IP15_11_9, HSCK0),
+	PINMUX_IPSR_GPSR(IP15_11_9, HSCK0),
 	PINMUX_IPSR_MSEL(IP15_11_9, TS_SDEN0, SEL_TSIF0_0),
-	PINMUX_IPSR_DATA(IP15_11_9, DU2_DG4),
-	PINMUX_IPSR_DATA(IP15_11_9, LCDOUT12),
+	PINMUX_IPSR_GPSR(IP15_11_9, DU2_DG4),
+	PINMUX_IPSR_GPSR(IP15_11_9, LCDOUT12),
 	PINMUX_IPSR_MSEL(IP15_11_9, HCTS0_N_C, SEL_HSCIF0_2),
 	PINMUX_IPSR_MSEL(IP15_13_12, HRX0, SEL_HSCIF0_0),
-	PINMUX_IPSR_DATA(IP15_13_12, DU2_DB2),
-	PINMUX_IPSR_DATA(IP15_13_12, LCDOUT18),
+	PINMUX_IPSR_GPSR(IP15_13_12, DU2_DB2),
+	PINMUX_IPSR_GPSR(IP15_13_12, LCDOUT18),
 	PINMUX_IPSR_MSEL(IP15_15_14, HTX0, SEL_HSCIF0_0),
-	PINMUX_IPSR_DATA(IP15_15_14, DU2_DB3),
-	PINMUX_IPSR_DATA(IP15_15_14, LCDOUT19),
+	PINMUX_IPSR_GPSR(IP15_15_14, DU2_DB3),
+	PINMUX_IPSR_GPSR(IP15_15_14, LCDOUT19),
 	PINMUX_IPSR_MSEL(IP15_17_16, HCTS0_N, SEL_HSCIF0_0),
-	PINMUX_IPSR_DATA(IP15_17_16, SSI_SCK9),
-	PINMUX_IPSR_DATA(IP15_17_16, DU2_DB4),
-	PINMUX_IPSR_DATA(IP15_17_16, LCDOUT20),
+	PINMUX_IPSR_GPSR(IP15_17_16, SSI_SCK9),
+	PINMUX_IPSR_GPSR(IP15_17_16, DU2_DB4),
+	PINMUX_IPSR_GPSR(IP15_17_16, LCDOUT20),
 	PINMUX_IPSR_MSEL(IP15_19_18, HRTS0_N, SEL_HSCIF0_0),
-	PINMUX_IPSR_DATA(IP15_19_18, SSI_WS9),
-	PINMUX_IPSR_DATA(IP15_19_18, DU2_DB5),
-	PINMUX_IPSR_DATA(IP15_19_18, LCDOUT21),
+	PINMUX_IPSR_GPSR(IP15_19_18, SSI_WS9),
+	PINMUX_IPSR_GPSR(IP15_19_18, DU2_DB5),
+	PINMUX_IPSR_GPSR(IP15_19_18, LCDOUT21),
 	PINMUX_IPSR_MSEL(IP15_22_20, MSIOF0_SCK, SEL_SOF0_0),
 	PINMUX_IPSR_MSEL(IP15_22_20, TS_SDAT0, SEL_TSIF0_0),
-	PINMUX_IPSR_DATA(IP15_22_20, ADICLK),
-	PINMUX_IPSR_DATA(IP15_22_20, DU2_DB6),
-	PINMUX_IPSR_DATA(IP15_22_20, LCDOUT22),
-	PINMUX_IPSR_DATA(IP15_25_23, MSIOF0_SYNC),
+	PINMUX_IPSR_GPSR(IP15_22_20, ADICLK),
+	PINMUX_IPSR_GPSR(IP15_22_20, DU2_DB6),
+	PINMUX_IPSR_GPSR(IP15_22_20, LCDOUT22),
+	PINMUX_IPSR_GPSR(IP15_25_23, MSIOF0_SYNC),
 	PINMUX_IPSR_MSEL(IP15_25_23, TS_SCK0, SEL_TSIF0_0),
-	PINMUX_IPSR_DATA(IP15_25_23, SSI_SCK2),
-	PINMUX_IPSR_DATA(IP15_25_23, ADIDATA),
-	PINMUX_IPSR_DATA(IP15_25_23, DU2_DB7),
-	PINMUX_IPSR_DATA(IP15_25_23, LCDOUT23),
+	PINMUX_IPSR_GPSR(IP15_25_23, SSI_SCK2),
+	PINMUX_IPSR_GPSR(IP15_25_23, ADIDATA),
+	PINMUX_IPSR_GPSR(IP15_25_23, DU2_DB7),
+	PINMUX_IPSR_GPSR(IP15_25_23, LCDOUT23),
 	PINMUX_IPSR_MSEL(IP15_25_23, HRX0_C, SEL_SCIFA2_1),
 	PINMUX_IPSR_MSEL(IP15_27_26, MSIOF0_SS1, SEL_SOF0_0),
-	PINMUX_IPSR_DATA(IP15_27_26, ADICHS0),
-	PINMUX_IPSR_DATA(IP15_27_26, DU2_DG5),
-	PINMUX_IPSR_DATA(IP15_27_26, LCDOUT13),
+	PINMUX_IPSR_GPSR(IP15_27_26, ADICHS0),
+	PINMUX_IPSR_GPSR(IP15_27_26, DU2_DG5),
+	PINMUX_IPSR_GPSR(IP15_27_26, LCDOUT13),
 	PINMUX_IPSR_MSEL(IP15_29_28, MSIOF0_TXD, SEL_SOF0_0),
-	PINMUX_IPSR_DATA(IP15_29_28, ADICHS1),
-	PINMUX_IPSR_DATA(IP15_29_28, DU2_DG6),
-	PINMUX_IPSR_DATA(IP15_29_28, LCDOUT14),
+	PINMUX_IPSR_GPSR(IP15_29_28, ADICHS1),
+	PINMUX_IPSR_GPSR(IP15_29_28, DU2_DG6),
+	PINMUX_IPSR_GPSR(IP15_29_28, LCDOUT14),
 
 	PINMUX_IPSR_MSEL(IP16_2_0, MSIOF0_SS2, SEL_SOF0_0),
-	PINMUX_IPSR_DATA(IP16_2_0, AUDIO_CLKOUT),
-	PINMUX_IPSR_DATA(IP16_2_0, ADICHS2),
-	PINMUX_IPSR_DATA(IP16_2_0, DU2_DISP),
-	PINMUX_IPSR_DATA(IP16_2_0, QPOLA),
+	PINMUX_IPSR_GPSR(IP16_2_0, AUDIO_CLKOUT),
+	PINMUX_IPSR_GPSR(IP16_2_0, ADICHS2),
+	PINMUX_IPSR_GPSR(IP16_2_0, DU2_DISP),
+	PINMUX_IPSR_GPSR(IP16_2_0, QPOLA),
 	PINMUX_IPSR_MSEL(IP16_2_0, HTX0_C, SEL_HSCIF0_2),
 	PINMUX_IPSR_MSEL(IP16_2_0, SCIFA2_TXD_B, SEL_SCIFA2_1),
 	PINMUX_IPSR_MSEL(IP16_5_3, MSIOF0_RXD, SEL_SOF0_0),
 	PINMUX_IPSR_MSEL(IP16_5_3, TS_SPSYNC0, SEL_TSIF0_0),
-	PINMUX_IPSR_DATA(IP16_5_3, SSI_WS2),
-	PINMUX_IPSR_DATA(IP16_5_3, ADICS_SAMP),
-	PINMUX_IPSR_DATA(IP16_5_3, DU2_CDE),
-	PINMUX_IPSR_DATA(IP16_5_3, QPOLB),
+	PINMUX_IPSR_GPSR(IP16_5_3, SSI_WS2),
+	PINMUX_IPSR_GPSR(IP16_5_3, ADICS_SAMP),
+	PINMUX_IPSR_GPSR(IP16_5_3, DU2_CDE),
+	PINMUX_IPSR_GPSR(IP16_5_3, QPOLB),
 	PINMUX_IPSR_MSEL(IP16_5_3, SCIFA2_RXD_B, SEL_HSCIF0_2),
-	PINMUX_IPSR_DATA(IP16_6, USB1_PWEN),
-	PINMUX_IPSR_DATA(IP16_6, AUDIO_CLKOUT_D),
-	PINMUX_IPSR_DATA(IP16_7, USB1_OVC),
+	PINMUX_IPSR_GPSR(IP16_6, USB1_PWEN),
+	PINMUX_IPSR_GPSR(IP16_6, AUDIO_CLKOUT_D),
+	PINMUX_IPSR_GPSR(IP16_7, USB1_OVC),
 	PINMUX_IPSR_MSEL(IP16_7, TCLK1_B, SEL_TMU1_1),
 
 	PINMUX_DATA(IIC0_SCL_MARK, FN_SEL_IIC0_0),
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
index 4cfbb94..01abbd5 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
@@ -792,171 +792,171 @@
 	PINMUX_SINGLE(SD1_CLK),
 
 	/* IPSR0 */
-	PINMUX_IPSR_DATA(IP0_0, D0),
-	PINMUX_IPSR_DATA(IP0_1, D1),
-	PINMUX_IPSR_DATA(IP0_2, D2),
-	PINMUX_IPSR_DATA(IP0_3, D3),
-	PINMUX_IPSR_DATA(IP0_4, D4),
-	PINMUX_IPSR_DATA(IP0_5, D5),
-	PINMUX_IPSR_DATA(IP0_6, D6),
-	PINMUX_IPSR_DATA(IP0_7, D7),
-	PINMUX_IPSR_DATA(IP0_8, D8),
-	PINMUX_IPSR_DATA(IP0_9, D9),
-	PINMUX_IPSR_DATA(IP0_10, D10),
-	PINMUX_IPSR_DATA(IP0_11, D11),
-	PINMUX_IPSR_DATA(IP0_12, D12),
-	PINMUX_IPSR_DATA(IP0_13, D13),
-	PINMUX_IPSR_DATA(IP0_14, D14),
-	PINMUX_IPSR_DATA(IP0_15, D15),
-	PINMUX_IPSR_DATA(IP0_18_16, A0),
+	PINMUX_IPSR_GPSR(IP0_0, D0),
+	PINMUX_IPSR_GPSR(IP0_1, D1),
+	PINMUX_IPSR_GPSR(IP0_2, D2),
+	PINMUX_IPSR_GPSR(IP0_3, D3),
+	PINMUX_IPSR_GPSR(IP0_4, D4),
+	PINMUX_IPSR_GPSR(IP0_5, D5),
+	PINMUX_IPSR_GPSR(IP0_6, D6),
+	PINMUX_IPSR_GPSR(IP0_7, D7),
+	PINMUX_IPSR_GPSR(IP0_8, D8),
+	PINMUX_IPSR_GPSR(IP0_9, D9),
+	PINMUX_IPSR_GPSR(IP0_10, D10),
+	PINMUX_IPSR_GPSR(IP0_11, D11),
+	PINMUX_IPSR_GPSR(IP0_12, D12),
+	PINMUX_IPSR_GPSR(IP0_13, D13),
+	PINMUX_IPSR_GPSR(IP0_14, D14),
+	PINMUX_IPSR_GPSR(IP0_15, D15),
+	PINMUX_IPSR_GPSR(IP0_18_16, A0),
 	PINMUX_IPSR_MSEL(IP0_18_16, ATAWR0_N_C, SEL_LBS_2),
 	PINMUX_IPSR_MSEL(IP0_18_16, MSIOF0_SCK_B, SEL_SOF0_1),
 	PINMUX_IPSR_MSEL(IP0_18_16, SCL0_C, SEL_IIC0_2),
-	PINMUX_IPSR_DATA(IP0_18_16, PWM2_B),
-	PINMUX_IPSR_DATA(IP0_20_19, A1),
+	PINMUX_IPSR_GPSR(IP0_18_16, PWM2_B),
+	PINMUX_IPSR_GPSR(IP0_20_19, A1),
 	PINMUX_IPSR_MSEL(IP0_20_19, MSIOF0_SYNC_B, SEL_SOF0_1),
-	PINMUX_IPSR_DATA(IP0_22_21, A2),
+	PINMUX_IPSR_GPSR(IP0_22_21, A2),
 	PINMUX_IPSR_MSEL(IP0_22_21, MSIOF0_SS1_B, SEL_SOF0_1),
-	PINMUX_IPSR_DATA(IP0_24_23, A3),
+	PINMUX_IPSR_GPSR(IP0_24_23, A3),
 	PINMUX_IPSR_MSEL(IP0_24_23, MSIOF0_SS2_B, SEL_SOF0_1),
-	PINMUX_IPSR_DATA(IP0_26_25, A4),
+	PINMUX_IPSR_GPSR(IP0_26_25, A4),
 	PINMUX_IPSR_MSEL(IP0_26_25, MSIOF0_TXD_B, SEL_SOF0_1),
-	PINMUX_IPSR_DATA(IP0_28_27, A5),
+	PINMUX_IPSR_GPSR(IP0_28_27, A5),
 	PINMUX_IPSR_MSEL(IP0_28_27, MSIOF0_RXD_B, SEL_SOF0_1),
-	PINMUX_IPSR_DATA(IP0_30_29, A6),
+	PINMUX_IPSR_GPSR(IP0_30_29, A6),
 	PINMUX_IPSR_MSEL(IP0_30_29, MSIOF1_SCK, SEL_SOF1_0),
 
 	/* IPSR1 */
-	PINMUX_IPSR_DATA(IP1_1_0, A7),
+	PINMUX_IPSR_GPSR(IP1_1_0, A7),
 	PINMUX_IPSR_MSEL(IP1_1_0, MSIOF1_SYNC, SEL_SOF1_0),
-	PINMUX_IPSR_DATA(IP1_3_2, A8),
+	PINMUX_IPSR_GPSR(IP1_3_2, A8),
 	PINMUX_IPSR_MSEL(IP1_3_2, MSIOF1_SS1, SEL_SOF1_0),
 	PINMUX_IPSR_MSEL(IP1_3_2, SCL0, SEL_IIC0_0),
-	PINMUX_IPSR_DATA(IP1_5_4, A9),
+	PINMUX_IPSR_GPSR(IP1_5_4, A9),
 	PINMUX_IPSR_MSEL(IP1_5_4, MSIOF1_SS2, SEL_SOF1_0),
 	PINMUX_IPSR_MSEL(IP1_5_4, SDA0, SEL_IIC0_0),
-	PINMUX_IPSR_DATA(IP1_7_6, A10),
+	PINMUX_IPSR_GPSR(IP1_7_6, A10),
 	PINMUX_IPSR_MSEL(IP1_7_6, MSIOF1_TXD, SEL_SOF1_0),
 	PINMUX_IPSR_MSEL(IP1_7_6, MSIOF1_TXD_D, SEL_SOF1_3),
-	PINMUX_IPSR_DATA(IP1_10_8, A11),
+	PINMUX_IPSR_GPSR(IP1_10_8, A11),
 	PINMUX_IPSR_MSEL(IP1_10_8, MSIOF1_RXD, SEL_SOF1_0),
 	PINMUX_IPSR_MSEL(IP1_10_8, SCL3_D, SEL_IIC3_3),
 	PINMUX_IPSR_MSEL(IP1_10_8, MSIOF1_RXD_D, SEL_SOF1_3),
-	PINMUX_IPSR_DATA(IP1_13_11, A12),
+	PINMUX_IPSR_GPSR(IP1_13_11, A12),
 	PINMUX_IPSR_MSEL(IP1_13_11, FMCLK, SEL_FM_0),
 	PINMUX_IPSR_MSEL(IP1_13_11, SDA3_D, SEL_IIC3_3),
 	PINMUX_IPSR_MSEL(IP1_13_11, MSIOF1_SCK_D, SEL_SOF1_3),
-	PINMUX_IPSR_DATA(IP1_16_14, A13),
+	PINMUX_IPSR_GPSR(IP1_16_14, A13),
 	PINMUX_IPSR_MSEL(IP1_16_14, ATAG0_N_C, SEL_LBS_2),
 	PINMUX_IPSR_MSEL(IP1_16_14, BPFCLK, SEL_FM_0),
 	PINMUX_IPSR_MSEL(IP1_16_14, MSIOF1_SS1_D, SEL_SOF1_3),
-	PINMUX_IPSR_DATA(IP1_19_17, A14),
+	PINMUX_IPSR_GPSR(IP1_19_17, A14),
 	PINMUX_IPSR_MSEL(IP1_19_17, ATADIR0_N_C, SEL_LBS_2),
 	PINMUX_IPSR_MSEL(IP1_19_17, FMIN, SEL_FM_0),
 	PINMUX_IPSR_MSEL(IP1_19_17, FMIN_C, SEL_FM_2),
 	PINMUX_IPSR_MSEL(IP1_19_17, MSIOF1_SYNC_D, SEL_SOF1_3),
-	PINMUX_IPSR_DATA(IP1_22_20, A15),
+	PINMUX_IPSR_GPSR(IP1_22_20, A15),
 	PINMUX_IPSR_MSEL(IP1_22_20, BPFCLK_C, SEL_FM_2),
-	PINMUX_IPSR_DATA(IP1_25_23, A16),
+	PINMUX_IPSR_GPSR(IP1_25_23, A16),
 	PINMUX_IPSR_MSEL(IP1_25_23, DREQ2_B, SEL_LBS_1),
 	PINMUX_IPSR_MSEL(IP1_25_23, FMCLK_C, SEL_FM_2),
 	PINMUX_IPSR_MSEL(IP1_25_23, SCIFA1_SCK_B, SEL_SCIFA1_1),
-	PINMUX_IPSR_DATA(IP1_28_26, A17),
+	PINMUX_IPSR_GPSR(IP1_28_26, A17),
 	PINMUX_IPSR_MSEL(IP1_28_26, DACK2_B, SEL_LBS_1),
 	PINMUX_IPSR_MSEL(IP1_28_26, SDA0_C, SEL_IIC0_2),
-	PINMUX_IPSR_DATA(IP1_31_29, A18),
+	PINMUX_IPSR_GPSR(IP1_31_29, A18),
 	PINMUX_IPSR_MSEL(IP1_31_29, DREQ1, SEL_LBS_0),
 	PINMUX_IPSR_MSEL(IP1_31_29, SCIFA1_RXD_C, SEL_SCIFA1_2),
 	PINMUX_IPSR_MSEL(IP1_31_29, SCIFB1_RXD_C, SEL_SCIFB1_2),
 
 	/* IPSR2 */
-	PINMUX_IPSR_DATA(IP2_2_0, A19),
-	PINMUX_IPSR_DATA(IP2_2_0, DACK1),
+	PINMUX_IPSR_GPSR(IP2_2_0, A19),
+	PINMUX_IPSR_GPSR(IP2_2_0, DACK1),
 	PINMUX_IPSR_MSEL(IP2_2_0, SCIFA1_TXD_C, SEL_SCIFA1_2),
 	PINMUX_IPSR_MSEL(IP2_2_0, SCIFB1_TXD_C, SEL_SCIFB1_2),
 	PINMUX_IPSR_MSEL(IP2_2_0, SCIFB1_SCK_B, SEL_SCIFB1_1),
-	PINMUX_IPSR_DATA(IP2_2_0, A20),
+	PINMUX_IPSR_GPSR(IP2_2_0, A20),
 	PINMUX_IPSR_MSEL(IP2_4_3, SPCLK, SEL_QSP_0),
-	PINMUX_IPSR_DATA(IP2_6_5, A21),
+	PINMUX_IPSR_GPSR(IP2_6_5, A21),
 	PINMUX_IPSR_MSEL(IP2_6_5, ATAWR0_N_B, SEL_LBS_1),
 	PINMUX_IPSR_MSEL(IP2_6_5, MOSI_IO0, SEL_QSP_0),
-	PINMUX_IPSR_DATA(IP2_9_7, A22),
+	PINMUX_IPSR_GPSR(IP2_9_7, A22),
 	PINMUX_IPSR_MSEL(IP2_9_7, MISO_IO1, SEL_QSP_0),
 	PINMUX_IPSR_MSEL(IP2_9_7, FMCLK_B, SEL_FM_1),
 	PINMUX_IPSR_MSEL(IP2_9_7, TX0, SEL_SCIF0_0),
 	PINMUX_IPSR_MSEL(IP2_9_7, SCIFA0_TXD, SEL_SCFA_0),
-	PINMUX_IPSR_DATA(IP2_12_10, A23),
+	PINMUX_IPSR_GPSR(IP2_12_10, A23),
 	PINMUX_IPSR_MSEL(IP2_12_10, IO2, SEL_QSP_0),
 	PINMUX_IPSR_MSEL(IP2_12_10, BPFCLK_B, SEL_FM_1),
 	PINMUX_IPSR_MSEL(IP2_12_10, RX0, SEL_SCIF0_0),
 	PINMUX_IPSR_MSEL(IP2_12_10, SCIFA0_RXD, SEL_SCFA_0),
-	PINMUX_IPSR_DATA(IP2_15_13, A24),
+	PINMUX_IPSR_GPSR(IP2_15_13, A24),
 	PINMUX_IPSR_MSEL(IP2_15_13, DREQ2, SEL_LBS_0),
 	PINMUX_IPSR_MSEL(IP2_15_13, IO3, SEL_QSP_0),
 	PINMUX_IPSR_MSEL(IP2_15_13, TX1, SEL_SCIF1_0),
 	PINMUX_IPSR_MSEL(IP2_15_13, SCIFA1_TXD, SEL_SCIFA1_0),
-	PINMUX_IPSR_DATA(IP2_18_16, A25),
+	PINMUX_IPSR_GPSR(IP2_18_16, A25),
 	PINMUX_IPSR_MSEL(IP2_18_16, DACK2, SEL_LBS_0),
 	PINMUX_IPSR_MSEL(IP2_18_16, SSL, SEL_QSP_0),
 	PINMUX_IPSR_MSEL(IP2_18_16, DREQ1_C, SEL_LBS_2),
 	PINMUX_IPSR_MSEL(IP2_18_16, RX1, SEL_SCIF1_0),
 	PINMUX_IPSR_MSEL(IP2_18_16, SCIFA1_RXD, SEL_SCIFA1_0),
-	PINMUX_IPSR_DATA(IP2_20_19, CS0_N),
+	PINMUX_IPSR_GPSR(IP2_20_19, CS0_N),
 	PINMUX_IPSR_MSEL(IP2_20_19, ATAG0_N_B, SEL_LBS_1),
 	PINMUX_IPSR_MSEL(IP2_20_19, SCL1, SEL_IIC1_0),
-	PINMUX_IPSR_DATA(IP2_22_21, CS1_N_A26),
+	PINMUX_IPSR_GPSR(IP2_22_21, CS1_N_A26),
 	PINMUX_IPSR_MSEL(IP2_22_21, ATADIR0_N_B, SEL_LBS_1),
 	PINMUX_IPSR_MSEL(IP2_22_21, SDA1, SEL_IIC1_0),
-	PINMUX_IPSR_DATA(IP2_24_23, EX_CS1_N),
+	PINMUX_IPSR_GPSR(IP2_24_23, EX_CS1_N),
 	PINMUX_IPSR_MSEL(IP2_24_23, MSIOF2_SCK, SEL_SOF2_0),
-	PINMUX_IPSR_DATA(IP2_26_25, EX_CS2_N),
+	PINMUX_IPSR_GPSR(IP2_26_25, EX_CS2_N),
 	PINMUX_IPSR_MSEL(IP2_26_25, ATAWR0_N, SEL_LBS_0),
 	PINMUX_IPSR_MSEL(IP2_26_25, MSIOF2_SYNC, SEL_SOF2_0),
-	PINMUX_IPSR_DATA(IP2_29_27, EX_CS3_N),
+	PINMUX_IPSR_GPSR(IP2_29_27, EX_CS3_N),
 	PINMUX_IPSR_MSEL(IP2_29_27, ATADIR0_N, SEL_LBS_0),
 	PINMUX_IPSR_MSEL(IP2_29_27, MSIOF2_TXD, SEL_SOF2_0),
 	PINMUX_IPSR_MSEL(IP2_29_27, ATAG0_N, SEL_LBS_0),
-	PINMUX_IPSR_DATA(IP2_29_27, EX_WAIT1),
+	PINMUX_IPSR_GPSR(IP2_29_27, EX_WAIT1),
 
 	/* IPSR3 */
-	PINMUX_IPSR_DATA(IP3_2_0, EX_CS4_N),
+	PINMUX_IPSR_GPSR(IP3_2_0, EX_CS4_N),
 	PINMUX_IPSR_MSEL(IP3_2_0, ATARD0_N, SEL_LBS_0),
 	PINMUX_IPSR_MSEL(IP3_2_0, MSIOF2_RXD, SEL_SOF2_0),
-	PINMUX_IPSR_DATA(IP3_2_0, EX_WAIT2),
-	PINMUX_IPSR_DATA(IP3_5_3, EX_CS5_N),
-	PINMUX_IPSR_DATA(IP3_5_3, ATACS00_N),
+	PINMUX_IPSR_GPSR(IP3_2_0, EX_WAIT2),
+	PINMUX_IPSR_GPSR(IP3_5_3, EX_CS5_N),
+	PINMUX_IPSR_GPSR(IP3_5_3, ATACS00_N),
 	PINMUX_IPSR_MSEL(IP3_5_3, MSIOF2_SS1, SEL_SOF2_0),
 	PINMUX_IPSR_MSEL(IP3_5_3, HRX1_B, SEL_HSCIF1_1),
 	PINMUX_IPSR_MSEL(IP3_5_3, SCIFB1_RXD_B, SEL_SCIFB1_1),
-	PINMUX_IPSR_DATA(IP3_5_3, PWM1),
-	PINMUX_IPSR_DATA(IP3_5_3, TPU_TO1),
-	PINMUX_IPSR_DATA(IP3_8_6, BS_N),
-	PINMUX_IPSR_DATA(IP3_8_6, ATACS10_N),
+	PINMUX_IPSR_GPSR(IP3_5_3, PWM1),
+	PINMUX_IPSR_GPSR(IP3_5_3, TPU_TO1),
+	PINMUX_IPSR_GPSR(IP3_8_6, BS_N),
+	PINMUX_IPSR_GPSR(IP3_8_6, ATACS10_N),
 	PINMUX_IPSR_MSEL(IP3_8_6, MSIOF2_SS2, SEL_SOF2_0),
 	PINMUX_IPSR_MSEL(IP3_8_6, HTX1_B, SEL_HSCIF1_1),
 	PINMUX_IPSR_MSEL(IP3_8_6, SCIFB1_TXD_B, SEL_SCIFB1_1),
-	PINMUX_IPSR_DATA(IP3_8_6, PWM2),
-	PINMUX_IPSR_DATA(IP3_8_6, TPU_TO2),
-	PINMUX_IPSR_DATA(IP3_11_9, RD_WR_N),
+	PINMUX_IPSR_GPSR(IP3_8_6, PWM2),
+	PINMUX_IPSR_GPSR(IP3_8_6, TPU_TO2),
+	PINMUX_IPSR_GPSR(IP3_11_9, RD_WR_N),
 	PINMUX_IPSR_MSEL(IP3_11_9, HRX2_B, SEL_HSCIF2_1),
 	PINMUX_IPSR_MSEL(IP3_11_9, FMIN_B, SEL_FM_1),
 	PINMUX_IPSR_MSEL(IP3_11_9, SCIFB0_RXD_B, SEL_SCIFB_1),
 	PINMUX_IPSR_MSEL(IP3_11_9, DREQ1_D, SEL_LBS_1),
-	PINMUX_IPSR_DATA(IP3_13_12, WE0_N),
+	PINMUX_IPSR_GPSR(IP3_13_12, WE0_N),
 	PINMUX_IPSR_MSEL(IP3_13_12, HCTS2_N_B, SEL_HSCIF2_1),
 	PINMUX_IPSR_MSEL(IP3_13_12, SCIFB0_TXD_B, SEL_SCIFB_1),
-	PINMUX_IPSR_DATA(IP3_15_14, WE1_N),
+	PINMUX_IPSR_GPSR(IP3_15_14, WE1_N),
 	PINMUX_IPSR_MSEL(IP3_15_14, ATARD0_N_B, SEL_LBS_1),
 	PINMUX_IPSR_MSEL(IP3_15_14, HTX2_B, SEL_HSCIF2_1),
 	PINMUX_IPSR_MSEL(IP3_15_14, SCIFB0_RTS_N_B, SEL_SCIFB_1),
-	PINMUX_IPSR_DATA(IP3_17_16, EX_WAIT0),
+	PINMUX_IPSR_GPSR(IP3_17_16, EX_WAIT0),
 	PINMUX_IPSR_MSEL(IP3_17_16, HRTS2_N_B, SEL_HSCIF2_1),
 	PINMUX_IPSR_MSEL(IP3_17_16, SCIFB0_CTS_N_B, SEL_SCIFB_1),
-	PINMUX_IPSR_DATA(IP3_19_18, DREQ0),
-	PINMUX_IPSR_DATA(IP3_19_18, PWM3),
-	PINMUX_IPSR_DATA(IP3_19_18, TPU_TO3),
-	PINMUX_IPSR_DATA(IP3_21_20, DACK0),
-	PINMUX_IPSR_DATA(IP3_21_20, DRACK0),
+	PINMUX_IPSR_GPSR(IP3_19_18, DREQ0),
+	PINMUX_IPSR_GPSR(IP3_19_18, PWM3),
+	PINMUX_IPSR_GPSR(IP3_19_18, TPU_TO3),
+	PINMUX_IPSR_GPSR(IP3_21_20, DACK0),
+	PINMUX_IPSR_GPSR(IP3_21_20, DRACK0),
 	PINMUX_IPSR_MSEL(IP3_21_20, REMOCON, SEL_RCN_0),
 	PINMUX_IPSR_MSEL(IP3_24_22, SPEEDIN, SEL_RSP_0),
 	PINMUX_IPSR_MSEL(IP3_24_22, HSCK0_C, SEL_HSCIF0_2),
@@ -995,61 +995,61 @@
 	PINMUX_IPSR_MSEL(IP4_9_8, SDA1_B, SEL_IIC1_1),
 	PINMUX_IPSR_MSEL(IP4_9_8, SDA8_B, SEL_IIC8_1),
 	PINMUX_IPSR_MSEL(IP4_9_8, MSIOF2_RXD_C, SEL_SOF2_2),
-	PINMUX_IPSR_DATA(IP4_12_10, SSI_SCK2),
+	PINMUX_IPSR_GPSR(IP4_12_10, SSI_SCK2),
 	PINMUX_IPSR_MSEL(IP4_12_10, SCL2, SEL_IIC2_0),
 	PINMUX_IPSR_MSEL(IP4_12_10, GPS_CLK_B, SEL_GPS_1),
 	PINMUX_IPSR_MSEL(IP4_12_10, GLO_Q0_D, SEL_GPS_3),
-	PINMUX_IPSR_DATA(IP4_15_13, SSI_WS2),
+	PINMUX_IPSR_GPSR(IP4_15_13, SSI_WS2),
 	PINMUX_IPSR_MSEL(IP4_15_13, SDA2, SEL_IIC2_0),
 	PINMUX_IPSR_MSEL(IP4_15_13, GPS_SIGN_B, SEL_GPS_1),
 	PINMUX_IPSR_MSEL(IP4_15_13, RX2_E, SEL_SCIF2_4),
 	PINMUX_IPSR_MSEL(IP4_15_13, GLO_Q1_D, SEL_GPS_3),
-	PINMUX_IPSR_DATA(IP4_18_16, SSI_SDATA2),
+	PINMUX_IPSR_GPSR(IP4_18_16, SSI_SDATA2),
 	PINMUX_IPSR_MSEL(IP4_18_16, GPS_MAG_B, SEL_GPS_1),
 	PINMUX_IPSR_MSEL(IP4_18_16, TX2_E, SEL_SCIF2_4),
-	PINMUX_IPSR_DATA(IP4_19, SSI_SCK34),
-	PINMUX_IPSR_DATA(IP4_20, SSI_WS34),
-	PINMUX_IPSR_DATA(IP4_21, SSI_SDATA3),
-	PINMUX_IPSR_DATA(IP4_23_22, SSI_SCK4),
+	PINMUX_IPSR_GPSR(IP4_19, SSI_SCK34),
+	PINMUX_IPSR_GPSR(IP4_20, SSI_WS34),
+	PINMUX_IPSR_GPSR(IP4_21, SSI_SDATA3),
+	PINMUX_IPSR_GPSR(IP4_23_22, SSI_SCK4),
 	PINMUX_IPSR_MSEL(IP4_23_22, GLO_SS_D, SEL_GPS_3),
-	PINMUX_IPSR_DATA(IP4_25_24, SSI_WS4),
+	PINMUX_IPSR_GPSR(IP4_25_24, SSI_WS4),
 	PINMUX_IPSR_MSEL(IP4_25_24, GLO_RFON_D, SEL_GPS_3),
-	PINMUX_IPSR_DATA(IP4_27_26, SSI_SDATA4),
+	PINMUX_IPSR_GPSR(IP4_27_26, SSI_SDATA4),
 	PINMUX_IPSR_MSEL(IP4_27_26, MSIOF2_SCK_D, SEL_SOF2_3),
-	PINMUX_IPSR_DATA(IP4_30_28, SSI_SCK5),
+	PINMUX_IPSR_GPSR(IP4_30_28, SSI_SCK5),
 	PINMUX_IPSR_MSEL(IP4_30_28, MSIOF1_SCK_C, SEL_SOF1_2),
 	PINMUX_IPSR_MSEL(IP4_30_28, TS_SDATA0, SEL_TSIF0_0),
 	PINMUX_IPSR_MSEL(IP4_30_28, GLO_I0, SEL_GPS_0),
 	PINMUX_IPSR_MSEL(IP4_30_28, MSIOF2_SYNC_D, SEL_SOF2_3),
-	PINMUX_IPSR_DATA(IP4_30_28, VI1_R2_B),
+	PINMUX_IPSR_GPSR(IP4_30_28, VI1_R2_B),
 
 	/* IPSR5 */
-	PINMUX_IPSR_DATA(IP5_2_0, SSI_WS5),
+	PINMUX_IPSR_GPSR(IP5_2_0, SSI_WS5),
 	PINMUX_IPSR_MSEL(IP5_2_0, MSIOF1_SYNC_C, SEL_SOF1_2),
 	PINMUX_IPSR_MSEL(IP5_2_0, TS_SCK0, SEL_TSIF0_0),
 	PINMUX_IPSR_MSEL(IP5_2_0, GLO_I1, SEL_GPS_0),
 	PINMUX_IPSR_MSEL(IP5_2_0, MSIOF2_TXD_D, SEL_SOF2_3),
-	PINMUX_IPSR_DATA(IP5_2_0, VI1_R3_B),
-	PINMUX_IPSR_DATA(IP5_5_3, SSI_SDATA5),
+	PINMUX_IPSR_GPSR(IP5_2_0, VI1_R3_B),
+	PINMUX_IPSR_GPSR(IP5_5_3, SSI_SDATA5),
 	PINMUX_IPSR_MSEL(IP5_5_3, MSIOF1_TXD_C, SEL_SOF1_2),
 	PINMUX_IPSR_MSEL(IP5_5_3, TS_SDEN0, SEL_TSIF0_0),
 	PINMUX_IPSR_MSEL(IP5_5_3, GLO_Q0, SEL_GPS_0),
 	PINMUX_IPSR_MSEL(IP5_5_3, MSIOF2_SS1_D, SEL_SOF2_3),
-	PINMUX_IPSR_DATA(IP5_5_3, VI1_R4_B),
-	PINMUX_IPSR_DATA(IP5_8_6, SSI_SCK6),
+	PINMUX_IPSR_GPSR(IP5_5_3, VI1_R4_B),
+	PINMUX_IPSR_GPSR(IP5_8_6, SSI_SCK6),
 	PINMUX_IPSR_MSEL(IP5_8_6, MSIOF1_RXD_C, SEL_SOF1_2),
 	PINMUX_IPSR_MSEL(IP5_8_6, TS_SPSYNC0, SEL_TSIF0_0),
 	PINMUX_IPSR_MSEL(IP5_8_6, GLO_Q1, SEL_GPS_0),
 	PINMUX_IPSR_MSEL(IP5_8_6, MSIOF2_RXD_D, SEL_SOF2_3),
-	PINMUX_IPSR_DATA(IP5_8_6, VI1_R5_B),
-	PINMUX_IPSR_DATA(IP5_11_9, SSI_WS6),
+	PINMUX_IPSR_GPSR(IP5_8_6, VI1_R5_B),
+	PINMUX_IPSR_GPSR(IP5_11_9, SSI_WS6),
 	PINMUX_IPSR_MSEL(IP5_11_9, GLO_SCLK, SEL_GPS_0),
 	PINMUX_IPSR_MSEL(IP5_11_9, MSIOF2_SS2_D, SEL_SOF2_3),
-	PINMUX_IPSR_DATA(IP5_11_9, VI1_R6_B),
-	PINMUX_IPSR_DATA(IP5_14_12, SSI_SDATA6),
+	PINMUX_IPSR_GPSR(IP5_11_9, VI1_R6_B),
+	PINMUX_IPSR_GPSR(IP5_14_12, SSI_SDATA6),
 	PINMUX_IPSR_MSEL(IP5_14_12, STP_IVCXO27_0_B, SEL_SSP_1),
 	PINMUX_IPSR_MSEL(IP5_14_12, GLO_SDATA, SEL_GPS_0),
-	PINMUX_IPSR_DATA(IP5_14_12, VI1_R7_B),
+	PINMUX_IPSR_GPSR(IP5_14_12, VI1_R7_B),
 	PINMUX_IPSR_MSEL(IP5_16_15, SSI_SCK78, SEL_SSI7_0),
 	PINMUX_IPSR_MSEL(IP5_16_15, STP_ISCLK_0_B, SEL_SSP_1),
 	PINMUX_IPSR_MSEL(IP5_16_15, GLO_SS, SEL_GPS_0),
@@ -1080,307 +1080,307 @@
 	PINMUX_IPSR_MSEL(IP6_2_0, MSIOF1_SCK_B, SEL_SOF1_1),
 	PINMUX_IPSR_MSEL(IP6_2_0, SCIF_CLK, SEL_SCIF_0),
 	PINMUX_IPSR_MSEL(IP6_2_0, BPFCLK_E, SEL_FM_4),
-	PINMUX_IPSR_DATA(IP6_5_3, AUDIO_CLKC),
+	PINMUX_IPSR_GPSR(IP6_5_3, AUDIO_CLKC),
 	PINMUX_IPSR_MSEL(IP6_5_3, SCIFB0_SCK_C, SEL_SCIFB_2),
 	PINMUX_IPSR_MSEL(IP6_5_3, MSIOF1_SYNC_B, SEL_SOF1_1),
 	PINMUX_IPSR_MSEL(IP6_5_3, RX2, SEL_SCIF2_0),
 	PINMUX_IPSR_MSEL(IP6_5_3, SCIFA2_RXD, SEL_SCIFA2_0),
 	PINMUX_IPSR_MSEL(IP6_5_3, FMIN_E, SEL_FM_4),
-	PINMUX_IPSR_DATA(IP6_7_6, AUDIO_CLKOUT),
+	PINMUX_IPSR_GPSR(IP6_7_6, AUDIO_CLKOUT),
 	PINMUX_IPSR_MSEL(IP6_7_6, MSIOF1_SS1_B, SEL_SOF1_1),
 	PINMUX_IPSR_MSEL(IP6_5_3, TX2, SEL_SCIF2_0),
 	PINMUX_IPSR_MSEL(IP6_7_6, SCIFA2_TXD, SEL_SCIFA2_0),
-	PINMUX_IPSR_DATA(IP6_9_8, IRQ0),
+	PINMUX_IPSR_GPSR(IP6_9_8, IRQ0),
 	PINMUX_IPSR_MSEL(IP6_9_8, SCIFB1_RXD_D, SEL_SCIFB1_3),
-	PINMUX_IPSR_DATA(IP6_9_8, INTC_IRQ0_N),
-	PINMUX_IPSR_DATA(IP6_11_10, IRQ1),
+	PINMUX_IPSR_GPSR(IP6_9_8, INTC_IRQ0_N),
+	PINMUX_IPSR_GPSR(IP6_11_10, IRQ1),
 	PINMUX_IPSR_MSEL(IP6_11_10, SCIFB1_SCK_C, SEL_SCIFB1_2),
-	PINMUX_IPSR_DATA(IP6_11_10, INTC_IRQ1_N),
-	PINMUX_IPSR_DATA(IP6_13_12, IRQ2),
+	PINMUX_IPSR_GPSR(IP6_11_10, INTC_IRQ1_N),
+	PINMUX_IPSR_GPSR(IP6_13_12, IRQ2),
 	PINMUX_IPSR_MSEL(IP6_13_12, SCIFB1_TXD_D, SEL_SCIFB1_3),
-	PINMUX_IPSR_DATA(IP6_13_12, INTC_IRQ2_N),
-	PINMUX_IPSR_DATA(IP6_15_14, IRQ3),
+	PINMUX_IPSR_GPSR(IP6_13_12, INTC_IRQ2_N),
+	PINMUX_IPSR_GPSR(IP6_15_14, IRQ3),
 	PINMUX_IPSR_MSEL(IP6_15_14, SCL4_C, SEL_IIC4_2),
 	PINMUX_IPSR_MSEL(IP6_15_14, MSIOF2_TXD_E, SEL_SOF2_4),
-	PINMUX_IPSR_DATA(IP6_15_14, INTC_IRQ4_N),
-	PINMUX_IPSR_DATA(IP6_18_16, IRQ4),
+	PINMUX_IPSR_GPSR(IP6_15_14, INTC_IRQ4_N),
+	PINMUX_IPSR_GPSR(IP6_18_16, IRQ4),
 	PINMUX_IPSR_MSEL(IP6_18_16, HRX1_C, SEL_HSCIF1_2),
 	PINMUX_IPSR_MSEL(IP6_18_16, SDA4_C, SEL_IIC4_2),
 	PINMUX_IPSR_MSEL(IP6_18_16, MSIOF2_RXD_E, SEL_SOF2_4),
-	PINMUX_IPSR_DATA(IP6_18_16, INTC_IRQ4_N),
-	PINMUX_IPSR_DATA(IP6_20_19, IRQ5),
+	PINMUX_IPSR_GPSR(IP6_18_16, INTC_IRQ4_N),
+	PINMUX_IPSR_GPSR(IP6_20_19, IRQ5),
 	PINMUX_IPSR_MSEL(IP6_20_19, HTX1_C, SEL_HSCIF1_2),
 	PINMUX_IPSR_MSEL(IP6_20_19, SCL1_E, SEL_IIC1_4),
 	PINMUX_IPSR_MSEL(IP6_20_19, MSIOF2_SCK_E, SEL_SOF2_4),
-	PINMUX_IPSR_DATA(IP6_23_21, IRQ6),
+	PINMUX_IPSR_GPSR(IP6_23_21, IRQ6),
 	PINMUX_IPSR_MSEL(IP6_23_21, HSCK1_C, SEL_HSCIF1_2),
 	PINMUX_IPSR_MSEL(IP6_23_21, MSIOF1_SS2_B, SEL_SOF1_1),
 	PINMUX_IPSR_MSEL(IP6_23_21, SDA1_E, SEL_IIC1_4),
 	PINMUX_IPSR_MSEL(IP6_23_21, MSIOF2_SYNC_E, SEL_SOF2_4),
-	PINMUX_IPSR_DATA(IP6_26_24, IRQ7),
+	PINMUX_IPSR_GPSR(IP6_26_24, IRQ7),
 	PINMUX_IPSR_MSEL(IP6_26_24, HCTS1_N_C, SEL_HSCIF1_2),
 	PINMUX_IPSR_MSEL(IP6_26_24, MSIOF1_TXD_B, SEL_SOF1_1),
 	PINMUX_IPSR_MSEL(IP6_26_24, GPS_CLK_C, SEL_GPS_2),
 	PINMUX_IPSR_MSEL(IP6_26_24, GPS_CLK_D, SEL_GPS_3),
-	PINMUX_IPSR_DATA(IP6_29_27, IRQ8),
+	PINMUX_IPSR_GPSR(IP6_29_27, IRQ8),
 	PINMUX_IPSR_MSEL(IP6_29_27, HRTS1_N_C, SEL_HSCIF1_2),
 	PINMUX_IPSR_MSEL(IP6_29_27, MSIOF1_RXD_B, SEL_SOF1_1),
 	PINMUX_IPSR_MSEL(IP6_29_27, GPS_SIGN_C, SEL_GPS_2),
 	PINMUX_IPSR_MSEL(IP6_29_27, GPS_SIGN_D, SEL_GPS_3),
 
 	/* IPSR7 */
-	PINMUX_IPSR_DATA(IP7_2_0, IRQ9),
+	PINMUX_IPSR_GPSR(IP7_2_0, IRQ9),
 	PINMUX_IPSR_MSEL(IP7_2_0, DU1_DOTCLKIN_B, SEL_DIS_1),
 	PINMUX_IPSR_MSEL(IP7_2_0, CAN_CLK_D, SEL_CANCLK_3),
 	PINMUX_IPSR_MSEL(IP7_2_0, GPS_MAG_C, SEL_GPS_2),
 	PINMUX_IPSR_MSEL(IP7_2_0, SCIF_CLK_B, SEL_SCIF_1),
 	PINMUX_IPSR_MSEL(IP7_2_0, GPS_MAG_D, SEL_GPS_3),
-	PINMUX_IPSR_DATA(IP7_5_3, DU1_DR0),
-	PINMUX_IPSR_DATA(IP7_5_3, LCDOUT0),
+	PINMUX_IPSR_GPSR(IP7_5_3, DU1_DR0),
+	PINMUX_IPSR_GPSR(IP7_5_3, LCDOUT0),
 	PINMUX_IPSR_MSEL(IP7_5_3, VI1_DATA0_B, SEL_VI1_1),
 	PINMUX_IPSR_MSEL(IP7_5_3, TX0_B, SEL_SCIF0_1),
 	PINMUX_IPSR_MSEL(IP7_5_3, SCIFA0_TXD_B, SEL_SCFA_1),
 	PINMUX_IPSR_MSEL(IP7_5_3, MSIOF2_SCK_B, SEL_SOF2_1),
-	PINMUX_IPSR_DATA(IP7_8_6, DU1_DR1),
-	PINMUX_IPSR_DATA(IP7_8_6, LCDOUT1),
+	PINMUX_IPSR_GPSR(IP7_8_6, DU1_DR1),
+	PINMUX_IPSR_GPSR(IP7_8_6, LCDOUT1),
 	PINMUX_IPSR_MSEL(IP7_8_6, VI1_DATA1_B, SEL_VI1_1),
 	PINMUX_IPSR_MSEL(IP7_8_6, RX0_B, SEL_SCIF0_1),
 	PINMUX_IPSR_MSEL(IP7_8_6, SCIFA0_RXD_B, SEL_SCFA_1),
 	PINMUX_IPSR_MSEL(IP7_8_6, MSIOF2_SYNC_B, SEL_SOF2_1),
-	PINMUX_IPSR_DATA(IP7_10_9, DU1_DR2),
-	PINMUX_IPSR_DATA(IP7_10_9, LCDOUT2),
+	PINMUX_IPSR_GPSR(IP7_10_9, DU1_DR2),
+	PINMUX_IPSR_GPSR(IP7_10_9, LCDOUT2),
 	PINMUX_IPSR_MSEL(IP7_10_9, SSI_SCK0129_B, SEL_SSI0_1),
-	PINMUX_IPSR_DATA(IP7_12_11, DU1_DR3),
-	PINMUX_IPSR_DATA(IP7_12_11, LCDOUT3),
+	PINMUX_IPSR_GPSR(IP7_12_11, DU1_DR3),
+	PINMUX_IPSR_GPSR(IP7_12_11, LCDOUT3),
 	PINMUX_IPSR_MSEL(IP7_12_11, SSI_WS0129_B, SEL_SSI0_1),
-	PINMUX_IPSR_DATA(IP7_14_13, DU1_DR4),
-	PINMUX_IPSR_DATA(IP7_14_13, LCDOUT4),
+	PINMUX_IPSR_GPSR(IP7_14_13, DU1_DR4),
+	PINMUX_IPSR_GPSR(IP7_14_13, LCDOUT4),
 	PINMUX_IPSR_MSEL(IP7_14_13, SSI_SDATA0_B, SEL_SSI0_1),
-	PINMUX_IPSR_DATA(IP7_16_15, DU1_DR5),
-	PINMUX_IPSR_DATA(IP7_16_15, LCDOUT5),
+	PINMUX_IPSR_GPSR(IP7_16_15, DU1_DR5),
+	PINMUX_IPSR_GPSR(IP7_16_15, LCDOUT5),
 	PINMUX_IPSR_MSEL(IP7_16_15, SSI_SCK1_B, SEL_SSI1_1),
-	PINMUX_IPSR_DATA(IP7_18_17, DU1_DR6),
-	PINMUX_IPSR_DATA(IP7_18_17, LCDOUT6),
+	PINMUX_IPSR_GPSR(IP7_18_17, DU1_DR6),
+	PINMUX_IPSR_GPSR(IP7_18_17, LCDOUT6),
 	PINMUX_IPSR_MSEL(IP7_18_17, SSI_WS1_B, SEL_SSI1_1),
-	PINMUX_IPSR_DATA(IP7_20_19, DU1_DR7),
-	PINMUX_IPSR_DATA(IP7_20_19, LCDOUT7),
+	PINMUX_IPSR_GPSR(IP7_20_19, DU1_DR7),
+	PINMUX_IPSR_GPSR(IP7_20_19, LCDOUT7),
 	PINMUX_IPSR_MSEL(IP7_20_19, SSI_SDATA1_B, SEL_SSI1_1),
-	PINMUX_IPSR_DATA(IP7_23_21, DU1_DG0),
-	PINMUX_IPSR_DATA(IP7_23_21, LCDOUT8),
+	PINMUX_IPSR_GPSR(IP7_23_21, DU1_DG0),
+	PINMUX_IPSR_GPSR(IP7_23_21, LCDOUT8),
 	PINMUX_IPSR_MSEL(IP7_23_21, VI1_DATA2_B, SEL_VI1_1),
 	PINMUX_IPSR_MSEL(IP7_23_21, TX1_B, SEL_SCIF1_1),
 	PINMUX_IPSR_MSEL(IP7_23_21, SCIFA1_TXD_B, SEL_SCIFA1_1),
 	PINMUX_IPSR_MSEL(IP7_23_21, MSIOF2_SS1_B, SEL_SOF2_1),
-	PINMUX_IPSR_DATA(IP7_26_24, DU1_DG1),
-	PINMUX_IPSR_DATA(IP7_26_24, LCDOUT9),
+	PINMUX_IPSR_GPSR(IP7_26_24, DU1_DG1),
+	PINMUX_IPSR_GPSR(IP7_26_24, LCDOUT9),
 	PINMUX_IPSR_MSEL(IP7_26_24, VI1_DATA3_B, SEL_VI1_1),
 	PINMUX_IPSR_MSEL(IP7_26_24, RX1_B, SEL_SCIF1_1),
 	PINMUX_IPSR_MSEL(IP7_26_24, SCIFA1_RXD_B, SEL_SCIFA1_1),
 	PINMUX_IPSR_MSEL(IP7_26_24, MSIOF2_SS2_B, SEL_SOF2_1),
-	PINMUX_IPSR_DATA(IP7_29_27, DU1_DG2),
-	PINMUX_IPSR_DATA(IP7_29_27, LCDOUT10),
+	PINMUX_IPSR_GPSR(IP7_29_27, DU1_DG2),
+	PINMUX_IPSR_GPSR(IP7_29_27, LCDOUT10),
 	PINMUX_IPSR_MSEL(IP7_29_27, VI1_DATA4_B, SEL_VI1_1),
-	PINMUX_IPSR_DATA(IP7_29_27, SCIF1_SCK_B),
+	PINMUX_IPSR_GPSR(IP7_29_27, SCIF1_SCK_B),
 	PINMUX_IPSR_MSEL(IP7_29_27, SCIFA1_SCK, SEL_SCIFA1_0),
 	PINMUX_IPSR_MSEL(IP7_29_27, SSI_SCK78_B, SEL_SSI7_1),
 
 	/* IPSR8 */
-	PINMUX_IPSR_DATA(IP8_2_0, DU1_DG3),
-	PINMUX_IPSR_DATA(IP8_2_0, LCDOUT11),
+	PINMUX_IPSR_GPSR(IP8_2_0, DU1_DG3),
+	PINMUX_IPSR_GPSR(IP8_2_0, LCDOUT11),
 	PINMUX_IPSR_MSEL(IP8_2_0, VI1_DATA5_B, SEL_VI1_1),
 	PINMUX_IPSR_MSEL(IP8_2_0, SSI_WS78_B, SEL_SSI7_1),
-	PINMUX_IPSR_DATA(IP8_5_3, DU1_DG4),
-	PINMUX_IPSR_DATA(IP8_5_3, LCDOUT12),
+	PINMUX_IPSR_GPSR(IP8_5_3, DU1_DG4),
+	PINMUX_IPSR_GPSR(IP8_5_3, LCDOUT12),
 	PINMUX_IPSR_MSEL(IP8_5_3, VI1_DATA6_B, SEL_VI1_1),
 	PINMUX_IPSR_MSEL(IP8_5_3, HRX0_B, SEL_HSCIF0_1),
 	PINMUX_IPSR_MSEL(IP8_5_3, SCIFB2_RXD_B, SEL_SCIFB2_1),
 	PINMUX_IPSR_MSEL(IP8_5_3, SSI_SDATA7_B, SEL_SSI7_1),
-	PINMUX_IPSR_DATA(IP8_8_6, DU1_DG5),
-	PINMUX_IPSR_DATA(IP8_8_6, LCDOUT13),
+	PINMUX_IPSR_GPSR(IP8_8_6, DU1_DG5),
+	PINMUX_IPSR_GPSR(IP8_8_6, LCDOUT13),
 	PINMUX_IPSR_MSEL(IP8_8_6, VI1_DATA7_B, SEL_VI1_1),
 	PINMUX_IPSR_MSEL(IP8_8_6, HCTS0_N_B, SEL_HSCIF0_1),
 	PINMUX_IPSR_MSEL(IP8_8_6, SCIFB2_TXD_B, SEL_SCIFB2_1),
 	PINMUX_IPSR_MSEL(IP8_8_6, SSI_SDATA8_B, SEL_SSI8_1),
-	PINMUX_IPSR_DATA(IP8_11_9, DU1_DG6),
-	PINMUX_IPSR_DATA(IP8_11_9, LCDOUT14),
+	PINMUX_IPSR_GPSR(IP8_11_9, DU1_DG6),
+	PINMUX_IPSR_GPSR(IP8_11_9, LCDOUT14),
 	PINMUX_IPSR_MSEL(IP8_11_9, HRTS0_N_B, SEL_HSCIF0_1),
 	PINMUX_IPSR_MSEL(IP8_11_9, SCIFB2_CTS_N_B, SEL_SCIFB2_1),
 	PINMUX_IPSR_MSEL(IP8_11_9, SSI_SCK9_B, SEL_SSI9_1),
-	PINMUX_IPSR_DATA(IP8_14_12, DU1_DG7),
-	PINMUX_IPSR_DATA(IP8_14_12, LCDOUT15),
+	PINMUX_IPSR_GPSR(IP8_14_12, DU1_DG7),
+	PINMUX_IPSR_GPSR(IP8_14_12, LCDOUT15),
 	PINMUX_IPSR_MSEL(IP8_14_12, HTX0_B, SEL_HSCIF0_1),
 	PINMUX_IPSR_MSEL(IP8_14_12, SCIFB2_RTS_N_B, SEL_SCIFB2_1),
 	PINMUX_IPSR_MSEL(IP8_14_12, SSI_WS9_B, SEL_SSI9_1),
-	PINMUX_IPSR_DATA(IP8_17_15, DU1_DB0),
-	PINMUX_IPSR_DATA(IP8_17_15, LCDOUT16),
+	PINMUX_IPSR_GPSR(IP8_17_15, DU1_DB0),
+	PINMUX_IPSR_GPSR(IP8_17_15, LCDOUT16),
 	PINMUX_IPSR_MSEL(IP8_17_15, VI1_CLK_B, SEL_VI1_1),
 	PINMUX_IPSR_MSEL(IP8_17_15, TX2_B, SEL_SCIF2_1),
 	PINMUX_IPSR_MSEL(IP8_17_15, SCIFA2_TXD_B, SEL_SCIFA2_1),
 	PINMUX_IPSR_MSEL(IP8_17_15, MSIOF2_TXD_B, SEL_SOF2_1),
-	PINMUX_IPSR_DATA(IP8_20_18, DU1_DB1),
-	PINMUX_IPSR_DATA(IP8_20_18, LCDOUT17),
+	PINMUX_IPSR_GPSR(IP8_20_18, DU1_DB1),
+	PINMUX_IPSR_GPSR(IP8_20_18, LCDOUT17),
 	PINMUX_IPSR_MSEL(IP8_20_18, VI1_HSYNC_N_B, SEL_VI1_1),
 	PINMUX_IPSR_MSEL(IP8_20_18, RX2_B, SEL_SCIF2_1),
 	PINMUX_IPSR_MSEL(IP8_20_18, SCIFA2_RXD_B, SEL_SCIFA2_1),
 	PINMUX_IPSR_MSEL(IP8_20_18, MSIOF2_RXD_B, SEL_SOF2_1),
-	PINMUX_IPSR_DATA(IP8_23_21, DU1_DB2),
-	PINMUX_IPSR_DATA(IP8_23_21, LCDOUT18),
+	PINMUX_IPSR_GPSR(IP8_23_21, DU1_DB2),
+	PINMUX_IPSR_GPSR(IP8_23_21, LCDOUT18),
 	PINMUX_IPSR_MSEL(IP8_23_21, VI1_VSYNC_N_B, SEL_VI1_1),
-	PINMUX_IPSR_DATA(IP8_23_21, SCIF2_SCK_B),
+	PINMUX_IPSR_GPSR(IP8_23_21, SCIF2_SCK_B),
 	PINMUX_IPSR_MSEL(IP8_23_21, SCIFA2_SCK, SEL_SCIFA2_1),
 	PINMUX_IPSR_MSEL(IP8_23_21, SSI_SDATA9_B, SEL_SSI9_1),
-	PINMUX_IPSR_DATA(IP8_25_24, DU1_DB3),
-	PINMUX_IPSR_DATA(IP8_25_24, LCDOUT19),
+	PINMUX_IPSR_GPSR(IP8_25_24, DU1_DB3),
+	PINMUX_IPSR_GPSR(IP8_25_24, LCDOUT19),
 	PINMUX_IPSR_MSEL(IP8_25_24, VI1_CLKENB_B, SEL_VI1_1),
-	PINMUX_IPSR_DATA(IP8_27_26, DU1_DB4),
-	PINMUX_IPSR_DATA(IP8_27_26, LCDOUT20),
+	PINMUX_IPSR_GPSR(IP8_27_26, DU1_DB4),
+	PINMUX_IPSR_GPSR(IP8_27_26, LCDOUT20),
 	PINMUX_IPSR_MSEL(IP8_27_26, VI1_FIELD_B, SEL_VI1_1),
 	PINMUX_IPSR_MSEL(IP8_27_26, CAN1_RX, SEL_CAN1_0),
-	PINMUX_IPSR_DATA(IP8_30_28, DU1_DB5),
-	PINMUX_IPSR_DATA(IP8_30_28, LCDOUT21),
+	PINMUX_IPSR_GPSR(IP8_30_28, DU1_DB5),
+	PINMUX_IPSR_GPSR(IP8_30_28, LCDOUT21),
 	PINMUX_IPSR_MSEL(IP8_30_28, TX3, SEL_SCIF3_0),
 	PINMUX_IPSR_MSEL(IP8_30_28, SCIFA3_TXD, SEL_SCIFA3_0),
 	PINMUX_IPSR_MSEL(IP8_30_28, CAN1_TX, SEL_CAN1_0),
 
 	/* IPSR9 */
-	PINMUX_IPSR_DATA(IP9_2_0, DU1_DB6),
-	PINMUX_IPSR_DATA(IP9_2_0, LCDOUT22),
+	PINMUX_IPSR_GPSR(IP9_2_0, DU1_DB6),
+	PINMUX_IPSR_GPSR(IP9_2_0, LCDOUT22),
 	PINMUX_IPSR_MSEL(IP9_2_0, SCL3_C, SEL_IIC3_2),
 	PINMUX_IPSR_MSEL(IP9_2_0, RX3, SEL_SCIF3_0),
 	PINMUX_IPSR_MSEL(IP9_2_0, SCIFA3_RXD, SEL_SCIFA3_0),
-	PINMUX_IPSR_DATA(IP9_5_3, DU1_DB7),
-	PINMUX_IPSR_DATA(IP9_5_3, LCDOUT23),
+	PINMUX_IPSR_GPSR(IP9_5_3, DU1_DB7),
+	PINMUX_IPSR_GPSR(IP9_5_3, LCDOUT23),
 	PINMUX_IPSR_MSEL(IP9_5_3, SDA3_C, SEL_IIC3_2),
 	PINMUX_IPSR_MSEL(IP9_5_3, SCIF3_SCK, SEL_SCIF3_0),
 	PINMUX_IPSR_MSEL(IP9_5_3, SCIFA3_SCK, SEL_SCIFA3_0),
 	PINMUX_IPSR_MSEL(IP9_6, DU1_DOTCLKIN, SEL_DIS_0),
-	PINMUX_IPSR_DATA(IP9_6, QSTVA_QVS),
-	PINMUX_IPSR_DATA(IP9_7, DU1_DOTCLKOUT0),
-	PINMUX_IPSR_DATA(IP9_7, QCLK),
-	PINMUX_IPSR_DATA(IP9_10_8, DU1_DOTCLKOUT1),
-	PINMUX_IPSR_DATA(IP9_10_8, QSTVB_QVE),
+	PINMUX_IPSR_GPSR(IP9_6, QSTVA_QVS),
+	PINMUX_IPSR_GPSR(IP9_7, DU1_DOTCLKOUT0),
+	PINMUX_IPSR_GPSR(IP9_7, QCLK),
+	PINMUX_IPSR_GPSR(IP9_10_8, DU1_DOTCLKOUT1),
+	PINMUX_IPSR_GPSR(IP9_10_8, QSTVB_QVE),
 	PINMUX_IPSR_MSEL(IP9_10_8, CAN0_TX, SEL_CAN0_0),
 	PINMUX_IPSR_MSEL(IP9_10_8, TX3_B, SEL_SCIF3_1),
 	PINMUX_IPSR_MSEL(IP9_10_8, SCL2_B, SEL_IIC2_1),
-	PINMUX_IPSR_DATA(IP9_10_8, PWM4),
-	PINMUX_IPSR_DATA(IP9_11, DU1_EXHSYNC_DU1_HSYNC),
-	PINMUX_IPSR_DATA(IP9_11, QSTH_QHS),
-	PINMUX_IPSR_DATA(IP9_12, DU1_EXVSYNC_DU1_VSYNC),
-	PINMUX_IPSR_DATA(IP9_12, QSTB_QHE),
-	PINMUX_IPSR_DATA(IP9_15_13, DU1_EXODDF_DU1_ODDF_DISP_CDE),
-	PINMUX_IPSR_DATA(IP9_15_13, QCPV_QDE),
+	PINMUX_IPSR_GPSR(IP9_10_8, PWM4),
+	PINMUX_IPSR_GPSR(IP9_11, DU1_EXHSYNC_DU1_HSYNC),
+	PINMUX_IPSR_GPSR(IP9_11, QSTH_QHS),
+	PINMUX_IPSR_GPSR(IP9_12, DU1_EXVSYNC_DU1_VSYNC),
+	PINMUX_IPSR_GPSR(IP9_12, QSTB_QHE),
+	PINMUX_IPSR_GPSR(IP9_15_13, DU1_EXODDF_DU1_ODDF_DISP_CDE),
+	PINMUX_IPSR_GPSR(IP9_15_13, QCPV_QDE),
 	PINMUX_IPSR_MSEL(IP9_15_13, CAN0_RX, SEL_CAN0_0),
 	PINMUX_IPSR_MSEL(IP9_15_13, RX3_B, SEL_SCIF3_1),
 	PINMUX_IPSR_MSEL(IP9_15_13, SDA2_B, SEL_IIC2_1),
-	PINMUX_IPSR_DATA(IP9_16, DU1_DISP),
-	PINMUX_IPSR_DATA(IP9_16, QPOLA),
-	PINMUX_IPSR_DATA(IP9_18_17, DU1_CDE),
-	PINMUX_IPSR_DATA(IP9_18_17, QPOLB),
-	PINMUX_IPSR_DATA(IP9_18_17, PWM4_B),
-	PINMUX_IPSR_DATA(IP9_20_19, VI0_CLKENB),
+	PINMUX_IPSR_GPSR(IP9_16, DU1_DISP),
+	PINMUX_IPSR_GPSR(IP9_16, QPOLA),
+	PINMUX_IPSR_GPSR(IP9_18_17, DU1_CDE),
+	PINMUX_IPSR_GPSR(IP9_18_17, QPOLB),
+	PINMUX_IPSR_GPSR(IP9_18_17, PWM4_B),
+	PINMUX_IPSR_GPSR(IP9_20_19, VI0_CLKENB),
 	PINMUX_IPSR_MSEL(IP9_20_19, TX4, SEL_SCIF4_0),
 	PINMUX_IPSR_MSEL(IP9_20_19, SCIFA4_TXD, SEL_SCIFA4_0),
 	PINMUX_IPSR_MSEL(IP9_20_19, TS_SDATA0_D, SEL_TSIF0_3),
-	PINMUX_IPSR_DATA(IP9_22_21, VI0_FIELD),
+	PINMUX_IPSR_GPSR(IP9_22_21, VI0_FIELD),
 	PINMUX_IPSR_MSEL(IP9_22_21, RX4, SEL_SCIF4_0),
 	PINMUX_IPSR_MSEL(IP9_22_21, SCIFA4_RXD, SEL_SCIFA4_0),
 	PINMUX_IPSR_MSEL(IP9_22_21, TS_SCK0_D, SEL_TSIF0_3),
-	PINMUX_IPSR_DATA(IP9_24_23, VI0_HSYNC_N),
+	PINMUX_IPSR_GPSR(IP9_24_23, VI0_HSYNC_N),
 	PINMUX_IPSR_MSEL(IP9_24_23, TX5, SEL_SCIF5_0),
 	PINMUX_IPSR_MSEL(IP9_24_23, SCIFA5_TXD, SEL_SCIFA5_0),
 	PINMUX_IPSR_MSEL(IP9_24_23, TS_SDEN0_D, SEL_TSIF0_3),
-	PINMUX_IPSR_DATA(IP9_26_25, VI0_VSYNC_N),
+	PINMUX_IPSR_GPSR(IP9_26_25, VI0_VSYNC_N),
 	PINMUX_IPSR_MSEL(IP9_26_25, RX5, SEL_SCIF5_0),
 	PINMUX_IPSR_MSEL(IP9_26_25, SCIFA5_RXD, SEL_SCIFA5_0),
 	PINMUX_IPSR_MSEL(IP9_26_25, TS_SPSYNC0_D, SEL_TSIF0_3),
-	PINMUX_IPSR_DATA(IP9_28_27, VI0_DATA3_VI0_B3),
+	PINMUX_IPSR_GPSR(IP9_28_27, VI0_DATA3_VI0_B3),
 	PINMUX_IPSR_MSEL(IP9_28_27, SCIF3_SCK_B, SEL_SCIF3_1),
 	PINMUX_IPSR_MSEL(IP9_28_27, SCIFA3_SCK_B, SEL_SCIFA3_1),
-	PINMUX_IPSR_DATA(IP9_31_29, VI0_G0),
+	PINMUX_IPSR_GPSR(IP9_31_29, VI0_G0),
 	PINMUX_IPSR_MSEL(IP9_31_29, SCL8, SEL_IIC8_0),
 	PINMUX_IPSR_MSEL(IP9_31_29, STP_IVCXO27_0_C, SEL_SSP_2),
 	PINMUX_IPSR_MSEL(IP9_31_29, SCL4, SEL_IIC4_0),
 	PINMUX_IPSR_MSEL(IP9_31_29, HCTS2_N, SEL_HSCIF2_0),
 	PINMUX_IPSR_MSEL(IP9_31_29, SCIFB2_CTS_N, SEL_SCIFB2_0),
-	PINMUX_IPSR_DATA(IP9_31_29, ATAWR1_N),
+	PINMUX_IPSR_GPSR(IP9_31_29, ATAWR1_N),
 
 	/* IPSR10 */
-	PINMUX_IPSR_DATA(IP10_2_0, VI0_G1),
+	PINMUX_IPSR_GPSR(IP10_2_0, VI0_G1),
 	PINMUX_IPSR_MSEL(IP10_2_0, SDA8, SEL_IIC8_0),
 	PINMUX_IPSR_MSEL(IP10_2_0, STP_ISCLK_0_C, SEL_SSP_2),
 	PINMUX_IPSR_MSEL(IP10_2_0, SDA4, SEL_IIC4_0),
 	PINMUX_IPSR_MSEL(IP10_2_0, HRTS2_N, SEL_HSCIF2_0),
 	PINMUX_IPSR_MSEL(IP10_2_0, SCIFB2_RTS_N, SEL_SCIFB2_0),
-	PINMUX_IPSR_DATA(IP10_2_0, ATADIR1_N),
-	PINMUX_IPSR_DATA(IP10_5_3, VI0_G2),
-	PINMUX_IPSR_DATA(IP10_5_3, VI2_HSYNC_N),
+	PINMUX_IPSR_GPSR(IP10_2_0, ATADIR1_N),
+	PINMUX_IPSR_GPSR(IP10_5_3, VI0_G2),
+	PINMUX_IPSR_GPSR(IP10_5_3, VI2_HSYNC_N),
 	PINMUX_IPSR_MSEL(IP10_5_3, STP_ISD_0_C, SEL_SSP_2),
 	PINMUX_IPSR_MSEL(IP10_5_3, SCL3_B, SEL_IIC3_1),
 	PINMUX_IPSR_MSEL(IP10_5_3, HSCK2, SEL_HSCIF2_0),
 	PINMUX_IPSR_MSEL(IP10_5_3, SCIFB2_SCK, SEL_SCIFB2_0),
-	PINMUX_IPSR_DATA(IP10_5_3, ATARD1_N),
-	PINMUX_IPSR_DATA(IP10_8_6, VI0_G3),
-	PINMUX_IPSR_DATA(IP10_8_6, VI2_VSYNC_N),
+	PINMUX_IPSR_GPSR(IP10_5_3, ATARD1_N),
+	PINMUX_IPSR_GPSR(IP10_8_6, VI0_G3),
+	PINMUX_IPSR_GPSR(IP10_8_6, VI2_VSYNC_N),
 	PINMUX_IPSR_MSEL(IP10_8_6, STP_ISEN_0_C, SEL_SSP_2),
 	PINMUX_IPSR_MSEL(IP10_8_6, SDA3_B, SEL_IIC3_1),
 	PINMUX_IPSR_MSEL(IP10_8_6, HRX2, SEL_HSCIF2_0),
 	PINMUX_IPSR_MSEL(IP10_8_6, SCIFB2_RXD, SEL_SCIFB2_0),
-	PINMUX_IPSR_DATA(IP10_8_6, ATACS01_N),
-	PINMUX_IPSR_DATA(IP10_11_9, VI0_G4),
-	PINMUX_IPSR_DATA(IP10_11_9, VI2_CLKENB),
+	PINMUX_IPSR_GPSR(IP10_8_6, ATACS01_N),
+	PINMUX_IPSR_GPSR(IP10_11_9, VI0_G4),
+	PINMUX_IPSR_GPSR(IP10_11_9, VI2_CLKENB),
 	PINMUX_IPSR_MSEL(IP10_11_9, STP_ISSYNC_0_C, SEL_SSP_2),
 	PINMUX_IPSR_MSEL(IP10_11_9, HTX2, SEL_HSCIF2_0),
 	PINMUX_IPSR_MSEL(IP10_11_9, SCIFB2_TXD, SEL_SCIFB2_0),
 	PINMUX_IPSR_MSEL(IP10_11_9, SCIFB0_SCK_D, SEL_SCIFB_3),
-	PINMUX_IPSR_DATA(IP10_14_12, VI0_G5),
-	PINMUX_IPSR_DATA(IP10_14_12, VI2_FIELD),
+	PINMUX_IPSR_GPSR(IP10_14_12, VI0_G5),
+	PINMUX_IPSR_GPSR(IP10_14_12, VI2_FIELD),
 	PINMUX_IPSR_MSEL(IP10_14_12, STP_OPWM_0_C, SEL_SSP_2),
 	PINMUX_IPSR_MSEL(IP10_14_12, FMCLK_D, SEL_FM_3),
 	PINMUX_IPSR_MSEL(IP10_14_12, CAN0_TX_E, SEL_CAN0_4),
 	PINMUX_IPSR_MSEL(IP10_14_12, HTX1_D, SEL_HSCIF1_3),
 	PINMUX_IPSR_MSEL(IP10_14_12, SCIFB0_TXD_D, SEL_SCIFB_3),
-	PINMUX_IPSR_DATA(IP10_16_15, VI0_G6),
-	PINMUX_IPSR_DATA(IP10_16_15, VI2_CLK),
+	PINMUX_IPSR_GPSR(IP10_16_15, VI0_G6),
+	PINMUX_IPSR_GPSR(IP10_16_15, VI2_CLK),
 	PINMUX_IPSR_MSEL(IP10_16_15, BPFCLK_D, SEL_FM_3),
-	PINMUX_IPSR_DATA(IP10_18_17, VI0_G7),
-	PINMUX_IPSR_DATA(IP10_18_17, VI2_DATA0),
+	PINMUX_IPSR_GPSR(IP10_18_17, VI0_G7),
+	PINMUX_IPSR_GPSR(IP10_18_17, VI2_DATA0),
 	PINMUX_IPSR_MSEL(IP10_18_17, FMIN_D, SEL_FM_3),
-	PINMUX_IPSR_DATA(IP10_21_19, VI0_R0),
-	PINMUX_IPSR_DATA(IP10_21_19, VI2_DATA1),
+	PINMUX_IPSR_GPSR(IP10_21_19, VI0_R0),
+	PINMUX_IPSR_GPSR(IP10_21_19, VI2_DATA1),
 	PINMUX_IPSR_MSEL(IP10_21_19, GLO_I0_B, SEL_GPS_1),
 	PINMUX_IPSR_MSEL(IP10_21_19, TS_SDATA0_C, SEL_TSIF0_2),
-	PINMUX_IPSR_DATA(IP10_21_19, ATACS11_N),
-	PINMUX_IPSR_DATA(IP10_24_22, VI0_R1),
-	PINMUX_IPSR_DATA(IP10_24_22, VI2_DATA2),
+	PINMUX_IPSR_GPSR(IP10_21_19, ATACS11_N),
+	PINMUX_IPSR_GPSR(IP10_24_22, VI0_R1),
+	PINMUX_IPSR_GPSR(IP10_24_22, VI2_DATA2),
 	PINMUX_IPSR_MSEL(IP10_24_22, GLO_I1_B, SEL_GPS_1),
 	PINMUX_IPSR_MSEL(IP10_24_22, TS_SCK0_C, SEL_TSIF0_2),
-	PINMUX_IPSR_DATA(IP10_24_22, ATAG1_N),
-	PINMUX_IPSR_DATA(IP10_26_25, VI0_R2),
-	PINMUX_IPSR_DATA(IP10_26_25, VI2_DATA3),
+	PINMUX_IPSR_GPSR(IP10_24_22, ATAG1_N),
+	PINMUX_IPSR_GPSR(IP10_26_25, VI0_R2),
+	PINMUX_IPSR_GPSR(IP10_26_25, VI2_DATA3),
 	PINMUX_IPSR_MSEL(IP10_26_25, GLO_Q0_B, SEL_GPS_1),
 	PINMUX_IPSR_MSEL(IP10_26_25, TS_SDEN0_C, SEL_TSIF0_2),
-	PINMUX_IPSR_DATA(IP10_28_27, VI0_R3),
-	PINMUX_IPSR_DATA(IP10_28_27, VI2_DATA4),
+	PINMUX_IPSR_GPSR(IP10_28_27, VI0_R3),
+	PINMUX_IPSR_GPSR(IP10_28_27, VI2_DATA4),
 	PINMUX_IPSR_MSEL(IP10_28_27, GLO_Q1_B, SEL_GPS_1),
 	PINMUX_IPSR_MSEL(IP10_28_27, TS_SPSYNC0_C, SEL_TSIF0_2),
-	PINMUX_IPSR_DATA(IP10_31_29, VI0_R4),
-	PINMUX_IPSR_DATA(IP10_31_29, VI2_DATA5),
+	PINMUX_IPSR_GPSR(IP10_31_29, VI0_R4),
+	PINMUX_IPSR_GPSR(IP10_31_29, VI2_DATA5),
 	PINMUX_IPSR_MSEL(IP10_31_29, GLO_SCLK_B, SEL_GPS_1),
 	PINMUX_IPSR_MSEL(IP10_31_29, TX0_C, SEL_SCIF0_2),
 	PINMUX_IPSR_MSEL(IP10_31_29, SCL1_D, SEL_IIC1_3),
 
 	/* IPSR11 */
-	PINMUX_IPSR_DATA(IP11_2_0, VI0_R5),
-	PINMUX_IPSR_DATA(IP11_2_0, VI2_DATA6),
+	PINMUX_IPSR_GPSR(IP11_2_0, VI0_R5),
+	PINMUX_IPSR_GPSR(IP11_2_0, VI2_DATA6),
 	PINMUX_IPSR_MSEL(IP11_2_0, GLO_SDATA_B, SEL_GPS_1),
 	PINMUX_IPSR_MSEL(IP11_2_0, RX0_C, SEL_SCIF0_2),
 	PINMUX_IPSR_MSEL(IP11_2_0, SDA1_D, SEL_IIC1_3),
-	PINMUX_IPSR_DATA(IP11_5_3, VI0_R6),
-	PINMUX_IPSR_DATA(IP11_5_3, VI2_DATA7),
+	PINMUX_IPSR_GPSR(IP11_5_3, VI0_R6),
+	PINMUX_IPSR_GPSR(IP11_5_3, VI2_DATA7),
 	PINMUX_IPSR_MSEL(IP11_5_3, GLO_SS_B, SEL_GPS_1),
 	PINMUX_IPSR_MSEL(IP11_5_3, TX1_C, SEL_SCIF1_2),
 	PINMUX_IPSR_MSEL(IP11_5_3, SCL4_B, SEL_IIC4_1),
-	PINMUX_IPSR_DATA(IP11_8_6, VI0_R7),
+	PINMUX_IPSR_GPSR(IP11_8_6, VI0_R7),
 	PINMUX_IPSR_MSEL(IP11_8_6, GLO_RFON_B, SEL_GPS_1),
 	PINMUX_IPSR_MSEL(IP11_8_6, RX1_C, SEL_SCIF1_2),
 	PINMUX_IPSR_MSEL(IP11_8_6, CAN0_RX_E, SEL_CAN0_4),
@@ -1388,180 +1388,180 @@
 	PINMUX_IPSR_MSEL(IP11_8_6, HRX1_D, SEL_HSCIF1_3),
 	PINMUX_IPSR_MSEL(IP11_8_6, SCIFB0_RXD_D, SEL_SCIFB_3),
 	PINMUX_IPSR_MSEL(IP11_11_9, VI1_HSYNC_N, SEL_VI1_0),
-	PINMUX_IPSR_DATA(IP11_11_9, AVB_RXD0),
+	PINMUX_IPSR_GPSR(IP11_11_9, AVB_RXD0),
 	PINMUX_IPSR_MSEL(IP11_11_9, TS_SDATA0_B, SEL_TSIF0_1),
 	PINMUX_IPSR_MSEL(IP11_11_9, TX4_B, SEL_SCIF4_1),
 	PINMUX_IPSR_MSEL(IP11_11_9, SCIFA4_TXD_B, SEL_SCIFA4_1),
 	PINMUX_IPSR_MSEL(IP11_14_12, VI1_VSYNC_N, SEL_VI1_0),
-	PINMUX_IPSR_DATA(IP11_14_12, AVB_RXD1),
+	PINMUX_IPSR_GPSR(IP11_14_12, AVB_RXD1),
 	PINMUX_IPSR_MSEL(IP11_14_12, TS_SCK0_B, SEL_TSIF0_1),
 	PINMUX_IPSR_MSEL(IP11_14_12, RX4_B, SEL_SCIF4_1),
 	PINMUX_IPSR_MSEL(IP11_14_12, SCIFA4_RXD_B, SEL_SCIFA4_1),
 	PINMUX_IPSR_MSEL(IP11_16_15, VI1_CLKENB, SEL_VI1_0),
-	PINMUX_IPSR_DATA(IP11_16_15, AVB_RXD2),
+	PINMUX_IPSR_GPSR(IP11_16_15, AVB_RXD2),
 	PINMUX_IPSR_MSEL(IP11_16_15, TS_SDEN0_B, SEL_TSIF0_1),
 	PINMUX_IPSR_MSEL(IP11_18_17, VI1_FIELD, SEL_VI1_0),
-	PINMUX_IPSR_DATA(IP11_18_17, AVB_RXD3),
+	PINMUX_IPSR_GPSR(IP11_18_17, AVB_RXD3),
 	PINMUX_IPSR_MSEL(IP11_18_17, TS_SPSYNC0_B, SEL_TSIF0_1),
 	PINMUX_IPSR_MSEL(IP11_19, VI1_CLK, SEL_VI1_0),
-	PINMUX_IPSR_DATA(IP11_19, AVB_RXD4),
+	PINMUX_IPSR_GPSR(IP11_19, AVB_RXD4),
 	PINMUX_IPSR_MSEL(IP11_20, VI1_DATA0, SEL_VI1_0),
-	PINMUX_IPSR_DATA(IP11_20, AVB_RXD5),
+	PINMUX_IPSR_GPSR(IP11_20, AVB_RXD5),
 	PINMUX_IPSR_MSEL(IP11_21, VI1_DATA1, SEL_VI1_0),
-	PINMUX_IPSR_DATA(IP11_21, AVB_RXD6),
+	PINMUX_IPSR_GPSR(IP11_21, AVB_RXD6),
 	PINMUX_IPSR_MSEL(IP11_22, VI1_DATA2, SEL_VI1_0),
-	PINMUX_IPSR_DATA(IP11_22, AVB_RXD7),
+	PINMUX_IPSR_GPSR(IP11_22, AVB_RXD7),
 	PINMUX_IPSR_MSEL(IP11_23, VI1_DATA3, SEL_VI1_0),
-	PINMUX_IPSR_DATA(IP11_23, AVB_RX_ER),
+	PINMUX_IPSR_GPSR(IP11_23, AVB_RX_ER),
 	PINMUX_IPSR_MSEL(IP11_24, VI1_DATA4, SEL_VI1_0),
-	PINMUX_IPSR_DATA(IP11_24, AVB_MDIO),
+	PINMUX_IPSR_GPSR(IP11_24, AVB_MDIO),
 	PINMUX_IPSR_MSEL(IP11_25, VI1_DATA5, SEL_VI1_0),
-	PINMUX_IPSR_DATA(IP11_25, AVB_RX_DV),
+	PINMUX_IPSR_GPSR(IP11_25, AVB_RX_DV),
 	PINMUX_IPSR_MSEL(IP11_26, VI1_DATA6, SEL_VI1_0),
-	PINMUX_IPSR_DATA(IP11_26, AVB_MAGIC),
+	PINMUX_IPSR_GPSR(IP11_26, AVB_MAGIC),
 	PINMUX_IPSR_MSEL(IP11_27, VI1_DATA7, SEL_VI1_0),
-	PINMUX_IPSR_DATA(IP11_27, AVB_MDC),
-	PINMUX_IPSR_DATA(IP11_29_28, ETH_MDIO),
-	PINMUX_IPSR_DATA(IP11_29_28, AVB_RX_CLK),
+	PINMUX_IPSR_GPSR(IP11_27, AVB_MDC),
+	PINMUX_IPSR_GPSR(IP11_29_28, ETH_MDIO),
+	PINMUX_IPSR_GPSR(IP11_29_28, AVB_RX_CLK),
 	PINMUX_IPSR_MSEL(IP11_29_28, SCL2_C, SEL_IIC2_2),
-	PINMUX_IPSR_DATA(IP11_31_30, ETH_CRS_DV),
-	PINMUX_IPSR_DATA(IP11_31_30, AVB_LINK),
+	PINMUX_IPSR_GPSR(IP11_31_30, ETH_CRS_DV),
+	PINMUX_IPSR_GPSR(IP11_31_30, AVB_LINK),
 	PINMUX_IPSR_MSEL(IP11_31_30, SDA2_C, SEL_IIC2_2),
 
 	/* IPSR12 */
-	PINMUX_IPSR_DATA(IP12_1_0, ETH_RX_ER),
-	PINMUX_IPSR_DATA(IP12_1_0, AVB_CRS),
+	PINMUX_IPSR_GPSR(IP12_1_0, ETH_RX_ER),
+	PINMUX_IPSR_GPSR(IP12_1_0, AVB_CRS),
 	PINMUX_IPSR_MSEL(IP12_1_0, SCL3, SEL_IIC3_0),
 	PINMUX_IPSR_MSEL(IP12_1_0, SCL7, SEL_IIC7_0),
-	PINMUX_IPSR_DATA(IP12_3_2, ETH_RXD0),
-	PINMUX_IPSR_DATA(IP12_3_2, AVB_PHY_INT),
+	PINMUX_IPSR_GPSR(IP12_3_2, ETH_RXD0),
+	PINMUX_IPSR_GPSR(IP12_3_2, AVB_PHY_INT),
 	PINMUX_IPSR_MSEL(IP12_3_2, SDA3, SEL_IIC3_0),
 	PINMUX_IPSR_MSEL(IP12_3_2, SDA7, SEL_IIC7_0),
-	PINMUX_IPSR_DATA(IP12_6_4, ETH_RXD1),
-	PINMUX_IPSR_DATA(IP12_6_4, AVB_GTXREFCLK),
+	PINMUX_IPSR_GPSR(IP12_6_4, ETH_RXD1),
+	PINMUX_IPSR_GPSR(IP12_6_4, AVB_GTXREFCLK),
 	PINMUX_IPSR_MSEL(IP12_6_4, CAN0_TX_C, SEL_CAN0_2),
 	PINMUX_IPSR_MSEL(IP12_6_4, SCL2_D, SEL_IIC2_3),
 	PINMUX_IPSR_MSEL(IP12_6_4, MSIOF1_RXD_E, SEL_SOF1_4),
-	PINMUX_IPSR_DATA(IP12_9_7, ETH_LINK),
-	PINMUX_IPSR_DATA(IP12_9_7, AVB_TXD0),
+	PINMUX_IPSR_GPSR(IP12_9_7, ETH_LINK),
+	PINMUX_IPSR_GPSR(IP12_9_7, AVB_TXD0),
 	PINMUX_IPSR_MSEL(IP12_9_7, CAN0_RX_C, SEL_CAN0_2),
 	PINMUX_IPSR_MSEL(IP12_9_7, SDA2_D, SEL_IIC2_3),
 	PINMUX_IPSR_MSEL(IP12_9_7, MSIOF1_SCK_E, SEL_SOF1_4),
-	PINMUX_IPSR_DATA(IP12_12_10, ETH_REFCLK),
-	PINMUX_IPSR_DATA(IP12_12_10, AVB_TXD1),
+	PINMUX_IPSR_GPSR(IP12_12_10, ETH_REFCLK),
+	PINMUX_IPSR_GPSR(IP12_12_10, AVB_TXD1),
 	PINMUX_IPSR_MSEL(IP12_12_10, SCIFA3_RXD_B, SEL_SCIFA3_1),
 	PINMUX_IPSR_MSEL(IP12_12_10, CAN1_RX_C, SEL_CAN1_2),
 	PINMUX_IPSR_MSEL(IP12_12_10, MSIOF1_SYNC_E, SEL_SOF1_4),
-	PINMUX_IPSR_DATA(IP12_15_13, ETH_TXD1),
-	PINMUX_IPSR_DATA(IP12_15_13, AVB_TXD2),
+	PINMUX_IPSR_GPSR(IP12_15_13, ETH_TXD1),
+	PINMUX_IPSR_GPSR(IP12_15_13, AVB_TXD2),
 	PINMUX_IPSR_MSEL(IP12_15_13, SCIFA3_TXD_B, SEL_SCIFA3_1),
 	PINMUX_IPSR_MSEL(IP12_15_13, CAN1_TX_C, SEL_CAN1_2),
 	PINMUX_IPSR_MSEL(IP12_15_13, MSIOF1_TXD_E, SEL_SOF1_4),
-	PINMUX_IPSR_DATA(IP12_17_16, ETH_TX_EN),
-	PINMUX_IPSR_DATA(IP12_17_16, AVB_TXD3),
+	PINMUX_IPSR_GPSR(IP12_17_16, ETH_TX_EN),
+	PINMUX_IPSR_GPSR(IP12_17_16, AVB_TXD3),
 	PINMUX_IPSR_MSEL(IP12_17_16, TCLK1_B, SEL_TMU1_0),
 	PINMUX_IPSR_MSEL(IP12_17_16, CAN_CLK_B, SEL_CANCLK_1),
-	PINMUX_IPSR_DATA(IP12_19_18, ETH_MAGIC),
-	PINMUX_IPSR_DATA(IP12_19_18, AVB_TXD4),
+	PINMUX_IPSR_GPSR(IP12_19_18, ETH_MAGIC),
+	PINMUX_IPSR_GPSR(IP12_19_18, AVB_TXD4),
 	PINMUX_IPSR_MSEL(IP12_19_18, IETX_C, SEL_IEB_2),
-	PINMUX_IPSR_DATA(IP12_21_20, ETH_TXD0),
-	PINMUX_IPSR_DATA(IP12_21_20, AVB_TXD5),
+	PINMUX_IPSR_GPSR(IP12_21_20, ETH_TXD0),
+	PINMUX_IPSR_GPSR(IP12_21_20, AVB_TXD5),
 	PINMUX_IPSR_MSEL(IP12_21_20, IECLK_C, SEL_IEB_2),
-	PINMUX_IPSR_DATA(IP12_23_22, ETH_MDC),
-	PINMUX_IPSR_DATA(IP12_23_22, AVB_TXD6),
+	PINMUX_IPSR_GPSR(IP12_23_22, ETH_MDC),
+	PINMUX_IPSR_GPSR(IP12_23_22, AVB_TXD6),
 	PINMUX_IPSR_MSEL(IP12_23_22, IERX_C, SEL_IEB_2),
 	PINMUX_IPSR_MSEL(IP12_26_24, STP_IVCXO27_0, SEL_SSP_0),
-	PINMUX_IPSR_DATA(IP12_26_24, AVB_TXD7),
+	PINMUX_IPSR_GPSR(IP12_26_24, AVB_TXD7),
 	PINMUX_IPSR_MSEL(IP12_26_24, SCIFB2_TXD_D, SEL_SCIFB2_3),
 	PINMUX_IPSR_MSEL(IP12_26_24, ADIDATA_B, SEL_RAD_1),
 	PINMUX_IPSR_MSEL(IP12_26_24, MSIOF0_SYNC_C, SEL_SOF0_2),
 	PINMUX_IPSR_MSEL(IP12_29_27, STP_ISCLK_0, SEL_SSP_0),
-	PINMUX_IPSR_DATA(IP12_29_27, AVB_TX_EN),
+	PINMUX_IPSR_GPSR(IP12_29_27, AVB_TX_EN),
 	PINMUX_IPSR_MSEL(IP12_29_27, SCIFB2_RXD_D, SEL_SCIFB2_3),
 	PINMUX_IPSR_MSEL(IP12_29_27, ADICS_SAMP_B, SEL_RAD_1),
 	PINMUX_IPSR_MSEL(IP12_29_27, MSIOF0_SCK_C, SEL_SOF0_2),
 
 	/* IPSR13 */
 	PINMUX_IPSR_MSEL(IP13_2_0, STP_ISD_0, SEL_SSP_0),
-	PINMUX_IPSR_DATA(IP13_2_0, AVB_TX_ER),
+	PINMUX_IPSR_GPSR(IP13_2_0, AVB_TX_ER),
 	PINMUX_IPSR_MSEL(IP13_2_0, SCIFB2_SCK_C, SEL_SCIFB2_2),
 	PINMUX_IPSR_MSEL(IP13_2_0, ADICLK_B, SEL_RAD_1),
 	PINMUX_IPSR_MSEL(IP13_2_0, MSIOF0_SS1_C, SEL_SOF0_2),
 	PINMUX_IPSR_MSEL(IP13_4_3, STP_ISEN_0, SEL_SSP_0),
-	PINMUX_IPSR_DATA(IP13_4_3, AVB_TX_CLK),
+	PINMUX_IPSR_GPSR(IP13_4_3, AVB_TX_CLK),
 	PINMUX_IPSR_MSEL(IP13_4_3, ADICHS0_B, SEL_RAD_1),
 	PINMUX_IPSR_MSEL(IP13_4_3, MSIOF0_SS2_C, SEL_SOF0_2),
 	PINMUX_IPSR_MSEL(IP13_6_5, STP_ISSYNC_0, SEL_SSP_0),
-	PINMUX_IPSR_DATA(IP13_6_5, AVB_COL),
+	PINMUX_IPSR_GPSR(IP13_6_5, AVB_COL),
 	PINMUX_IPSR_MSEL(IP13_6_5, ADICHS1_B, SEL_RAD_1),
 	PINMUX_IPSR_MSEL(IP13_6_5, MSIOF0_RXD_C, SEL_SOF0_2),
 	PINMUX_IPSR_MSEL(IP13_9_7, STP_OPWM_0, SEL_SSP_0),
-	PINMUX_IPSR_DATA(IP13_9_7, AVB_GTX_CLK),
-	PINMUX_IPSR_DATA(IP13_9_7, PWM0_B),
+	PINMUX_IPSR_GPSR(IP13_9_7, AVB_GTX_CLK),
+	PINMUX_IPSR_GPSR(IP13_9_7, PWM0_B),
 	PINMUX_IPSR_MSEL(IP13_9_7, ADICHS2_B, SEL_RAD_1),
 	PINMUX_IPSR_MSEL(IP13_9_7, MSIOF0_TXD_C, SEL_SOF0_2),
-	PINMUX_IPSR_DATA(IP13_10, SD0_CLK),
+	PINMUX_IPSR_GPSR(IP13_10, SD0_CLK),
 	PINMUX_IPSR_MSEL(IP13_10, SPCLK_B, SEL_QSP_1),
-	PINMUX_IPSR_DATA(IP13_11, SD0_CMD),
+	PINMUX_IPSR_GPSR(IP13_11, SD0_CMD),
 	PINMUX_IPSR_MSEL(IP13_11, MOSI_IO0_B, SEL_QSP_1),
-	PINMUX_IPSR_DATA(IP13_12, SD0_DATA0),
+	PINMUX_IPSR_GPSR(IP13_12, SD0_DATA0),
 	PINMUX_IPSR_MSEL(IP13_12, MISO_IO1_B, SEL_QSP_1),
-	PINMUX_IPSR_DATA(IP13_13, SD0_DATA1),
+	PINMUX_IPSR_GPSR(IP13_13, SD0_DATA1),
 	PINMUX_IPSR_MSEL(IP13_13, IO2_B, SEL_QSP_1),
-	PINMUX_IPSR_DATA(IP13_14, SD0_DATA2),
+	PINMUX_IPSR_GPSR(IP13_14, SD0_DATA2),
 	PINMUX_IPSR_MSEL(IP13_14, IO3_B, SEL_QSP_1),
-	PINMUX_IPSR_DATA(IP13_15, SD0_DATA3),
+	PINMUX_IPSR_GPSR(IP13_15, SD0_DATA3),
 	PINMUX_IPSR_MSEL(IP13_15, SSL_B, SEL_QSP_1),
-	PINMUX_IPSR_DATA(IP13_18_16, SD0_CD),
+	PINMUX_IPSR_GPSR(IP13_18_16, SD0_CD),
 	PINMUX_IPSR_MSEL(IP13_18_16, MMC_D6_B, SEL_MMC_1),
 	PINMUX_IPSR_MSEL(IP13_18_16, SIM0_RST_B, SEL_SIM_1),
 	PINMUX_IPSR_MSEL(IP13_18_16, CAN0_RX_F, SEL_CAN0_5),
 	PINMUX_IPSR_MSEL(IP13_18_16, SCIFA5_TXD_B, SEL_SCIFA5_1),
 	PINMUX_IPSR_MSEL(IP13_18_16, TX3_C, SEL_SCIF3_2),
-	PINMUX_IPSR_DATA(IP13_21_19, SD0_WP),
+	PINMUX_IPSR_GPSR(IP13_21_19, SD0_WP),
 	PINMUX_IPSR_MSEL(IP13_21_19, MMC_D7_B, SEL_MMC_1),
 	PINMUX_IPSR_MSEL(IP13_21_19, SIM0_D_B, SEL_SIM_1),
 	PINMUX_IPSR_MSEL(IP13_21_19, CAN0_TX_F, SEL_CAN0_5),
 	PINMUX_IPSR_MSEL(IP13_21_19, SCIFA5_RXD_B, SEL_SCIFA5_1),
 	PINMUX_IPSR_MSEL(IP13_21_19, RX3_C, SEL_SCIF3_2),
-	PINMUX_IPSR_DATA(IP13_22, SD1_CMD),
+	PINMUX_IPSR_GPSR(IP13_22, SD1_CMD),
 	PINMUX_IPSR_MSEL(IP13_22, REMOCON_B, SEL_RCN_1),
-	PINMUX_IPSR_DATA(IP13_24_23, SD1_DATA0),
+	PINMUX_IPSR_GPSR(IP13_24_23, SD1_DATA0),
 	PINMUX_IPSR_MSEL(IP13_24_23, SPEEDIN_B, SEL_RSP_1),
-	PINMUX_IPSR_DATA(IP13_25, SD1_DATA1),
+	PINMUX_IPSR_GPSR(IP13_25, SD1_DATA1),
 	PINMUX_IPSR_MSEL(IP13_25, IETX_B, SEL_IEB_1),
-	PINMUX_IPSR_DATA(IP13_26, SD1_DATA2),
+	PINMUX_IPSR_GPSR(IP13_26, SD1_DATA2),
 	PINMUX_IPSR_MSEL(IP13_26, IECLK_B, SEL_IEB_1),
-	PINMUX_IPSR_DATA(IP13_27, SD1_DATA3),
+	PINMUX_IPSR_GPSR(IP13_27, SD1_DATA3),
 	PINMUX_IPSR_MSEL(IP13_27, IERX_B, SEL_IEB_1),
-	PINMUX_IPSR_DATA(IP13_30_28, SD1_CD),
-	PINMUX_IPSR_DATA(IP13_30_28, PWM0),
-	PINMUX_IPSR_DATA(IP13_30_28, TPU_TO0),
+	PINMUX_IPSR_GPSR(IP13_30_28, SD1_CD),
+	PINMUX_IPSR_GPSR(IP13_30_28, PWM0),
+	PINMUX_IPSR_GPSR(IP13_30_28, TPU_TO0),
 	PINMUX_IPSR_MSEL(IP13_30_28, SCL1_C, SEL_IIC1_2),
 
 	/* IPSR14 */
-	PINMUX_IPSR_DATA(IP14_1_0, SD1_WP),
-	PINMUX_IPSR_DATA(IP14_1_0, PWM1_B),
+	PINMUX_IPSR_GPSR(IP14_1_0, SD1_WP),
+	PINMUX_IPSR_GPSR(IP14_1_0, PWM1_B),
 	PINMUX_IPSR_MSEL(IP14_1_0, SDA1_C, SEL_IIC1_2),
-	PINMUX_IPSR_DATA(IP14_2, SD2_CLK),
-	PINMUX_IPSR_DATA(IP14_2, MMC_CLK),
-	PINMUX_IPSR_DATA(IP14_3, SD2_CMD),
-	PINMUX_IPSR_DATA(IP14_3, MMC_CMD),
-	PINMUX_IPSR_DATA(IP14_4, SD2_DATA0),
-	PINMUX_IPSR_DATA(IP14_4, MMC_D0),
-	PINMUX_IPSR_DATA(IP14_5, SD2_DATA1),
-	PINMUX_IPSR_DATA(IP14_5, MMC_D1),
-	PINMUX_IPSR_DATA(IP14_6, SD2_DATA2),
-	PINMUX_IPSR_DATA(IP14_6, MMC_D2),
-	PINMUX_IPSR_DATA(IP14_7, SD2_DATA3),
-	PINMUX_IPSR_DATA(IP14_7, MMC_D3),
-	PINMUX_IPSR_DATA(IP14_10_8, SD2_CD),
-	PINMUX_IPSR_DATA(IP14_10_8, MMC_D4),
+	PINMUX_IPSR_GPSR(IP14_2, SD2_CLK),
+	PINMUX_IPSR_GPSR(IP14_2, MMC_CLK),
+	PINMUX_IPSR_GPSR(IP14_3, SD2_CMD),
+	PINMUX_IPSR_GPSR(IP14_3, MMC_CMD),
+	PINMUX_IPSR_GPSR(IP14_4, SD2_DATA0),
+	PINMUX_IPSR_GPSR(IP14_4, MMC_D0),
+	PINMUX_IPSR_GPSR(IP14_5, SD2_DATA1),
+	PINMUX_IPSR_GPSR(IP14_5, MMC_D1),
+	PINMUX_IPSR_GPSR(IP14_6, SD2_DATA2),
+	PINMUX_IPSR_GPSR(IP14_6, MMC_D2),
+	PINMUX_IPSR_GPSR(IP14_7, SD2_DATA3),
+	PINMUX_IPSR_GPSR(IP14_7, MMC_D3),
+	PINMUX_IPSR_GPSR(IP14_10_8, SD2_CD),
+	PINMUX_IPSR_GPSR(IP14_10_8, MMC_D4),
 	PINMUX_IPSR_MSEL(IP14_10_8, SCL8_C, SEL_IIC8_2),
 	PINMUX_IPSR_MSEL(IP14_10_8, TX5_B, SEL_SCIF5_1),
 	PINMUX_IPSR_MSEL(IP14_10_8, SCIFA5_TXD_C, SEL_SCIFA5_2),
-	PINMUX_IPSR_DATA(IP14_13_11, SD2_WP),
-	PINMUX_IPSR_DATA(IP14_13_11, MMC_D5),
+	PINMUX_IPSR_GPSR(IP14_13_11, SD2_WP),
+	PINMUX_IPSR_GPSR(IP14_13_11, MMC_D5),
 	PINMUX_IPSR_MSEL(IP14_13_11, SDA8_C, SEL_IIC8_2),
 	PINMUX_IPSR_MSEL(IP14_13_11, RX5_B, SEL_SCIF5_1),
 	PINMUX_IPSR_MSEL(IP14_13_11, SCIFA5_RXD_C, SEL_SCIFA5_2),
@@ -1569,40 +1569,40 @@
 	PINMUX_IPSR_MSEL(IP14_16_14, RX2_C, SEL_SCIF2_2),
 	PINMUX_IPSR_MSEL(IP14_16_14, ADIDATA, SEL_RAD_0),
 	PINMUX_IPSR_MSEL(IP14_16_14, VI1_CLK_C, SEL_VI1_2),
-	PINMUX_IPSR_DATA(IP14_16_14, VI1_G0_B),
+	PINMUX_IPSR_GPSR(IP14_16_14, VI1_G0_B),
 	PINMUX_IPSR_MSEL(IP14_19_17, MSIOF0_SYNC, SEL_SOF0_0),
 	PINMUX_IPSR_MSEL(IP14_19_17, TX2_C, SEL_SCIF2_2),
 	PINMUX_IPSR_MSEL(IP14_19_17, ADICS_SAMP, SEL_RAD_0),
 	PINMUX_IPSR_MSEL(IP14_19_17, VI1_CLKENB_C, SEL_VI1_2),
-	PINMUX_IPSR_DATA(IP14_19_17, VI1_G1_B),
+	PINMUX_IPSR_GPSR(IP14_19_17, VI1_G1_B),
 	PINMUX_IPSR_MSEL(IP14_22_20, MSIOF0_TXD, SEL_SOF0_0),
 	PINMUX_IPSR_MSEL(IP14_22_20, ADICLK, SEL_RAD_0),
 	PINMUX_IPSR_MSEL(IP14_22_20, VI1_FIELD_C, SEL_VI1_2),
-	PINMUX_IPSR_DATA(IP14_22_20, VI1_G2_B),
+	PINMUX_IPSR_GPSR(IP14_22_20, VI1_G2_B),
 	PINMUX_IPSR_MSEL(IP14_25_23, MSIOF0_RXD, SEL_SOF0_0),
 	PINMUX_IPSR_MSEL(IP14_25_23, ADICHS0, SEL_RAD_0),
 	PINMUX_IPSR_MSEL(IP14_25_23, VI1_DATA0_C, SEL_VI1_2),
-	PINMUX_IPSR_DATA(IP14_25_23, VI1_G3_B),
+	PINMUX_IPSR_GPSR(IP14_25_23, VI1_G3_B),
 	PINMUX_IPSR_MSEL(IP14_28_26, MSIOF0_SS1, SEL_SOF0_0),
 	PINMUX_IPSR_MSEL(IP14_28_26, MMC_D6, SEL_MMC_0),
 	PINMUX_IPSR_MSEL(IP14_28_26, ADICHS1, SEL_RAD_0),
 	PINMUX_IPSR_MSEL(IP14_28_26, TX0_E, SEL_SCIF0_4),
 	PINMUX_IPSR_MSEL(IP14_28_26, VI1_HSYNC_N_C, SEL_VI1_2),
 	PINMUX_IPSR_MSEL(IP14_28_26, SCL7_C, SEL_IIC7_2),
-	PINMUX_IPSR_DATA(IP14_28_26, VI1_G4_B),
+	PINMUX_IPSR_GPSR(IP14_28_26, VI1_G4_B),
 	PINMUX_IPSR_MSEL(IP14_31_29, MSIOF0_SS2, SEL_SOF0_0),
 	PINMUX_IPSR_MSEL(IP14_31_29, MMC_D7, SEL_MMC_0),
 	PINMUX_IPSR_MSEL(IP14_31_29, ADICHS2, SEL_RAD_0),
 	PINMUX_IPSR_MSEL(IP14_31_29, RX0_E, SEL_SCIF0_4),
 	PINMUX_IPSR_MSEL(IP14_31_29, VI1_VSYNC_N_C, SEL_VI1_2),
 	PINMUX_IPSR_MSEL(IP14_31_29, SDA7_C, SEL_IIC7_2),
-	PINMUX_IPSR_DATA(IP14_31_29, VI1_G5_B),
+	PINMUX_IPSR_GPSR(IP14_31_29, VI1_G5_B),
 
 	/* IPSR15 */
 	PINMUX_IPSR_MSEL(IP15_1_0, SIM0_RST, SEL_SIM_0),
 	PINMUX_IPSR_MSEL(IP15_1_0, IETX, SEL_IEB_0),
 	PINMUX_IPSR_MSEL(IP15_1_0, CAN1_TX_D, SEL_CAN1_3),
-	PINMUX_IPSR_DATA(IP15_3_2, SIM0_CLK),
+	PINMUX_IPSR_GPSR(IP15_3_2, SIM0_CLK),
 	PINMUX_IPSR_MSEL(IP15_3_2, IECLK, SEL_IEB_0),
 	PINMUX_IPSR_MSEL(IP15_3_2, CAN_CLK_C, SEL_CANCLK_2),
 	PINMUX_IPSR_MSEL(IP15_5_4, SIM0_D, SEL_SIM_0),
@@ -1611,19 +1611,19 @@
 	PINMUX_IPSR_MSEL(IP15_8_6, GPS_CLK, SEL_GPS_0),
 	PINMUX_IPSR_MSEL(IP15_8_6, DU1_DOTCLKIN_C, SEL_DIS_2),
 	PINMUX_IPSR_MSEL(IP15_8_6, AUDIO_CLKB_B, SEL_ADG_1),
-	PINMUX_IPSR_DATA(IP15_8_6, PWM5_B),
+	PINMUX_IPSR_GPSR(IP15_8_6, PWM5_B),
 	PINMUX_IPSR_MSEL(IP15_8_6, SCIFA3_TXD_C, SEL_SCIFA3_2),
 	PINMUX_IPSR_MSEL(IP15_11_9, GPS_SIGN, SEL_GPS_0),
 	PINMUX_IPSR_MSEL(IP15_11_9, TX4_C, SEL_SCIF4_2),
 	PINMUX_IPSR_MSEL(IP15_11_9, SCIFA4_TXD_C, SEL_SCIFA4_2),
-	PINMUX_IPSR_DATA(IP15_11_9, PWM5),
-	PINMUX_IPSR_DATA(IP15_11_9, VI1_G6_B),
+	PINMUX_IPSR_GPSR(IP15_11_9, PWM5),
+	PINMUX_IPSR_GPSR(IP15_11_9, VI1_G6_B),
 	PINMUX_IPSR_MSEL(IP15_11_9, SCIFA3_RXD_C, SEL_SCIFA3_2),
 	PINMUX_IPSR_MSEL(IP15_14_12, GPS_MAG, SEL_GPS_0),
 	PINMUX_IPSR_MSEL(IP15_14_12, RX4_C, SEL_SCIF4_2),
 	PINMUX_IPSR_MSEL(IP15_14_12, SCIFA4_RXD_C, SEL_SCIFA4_2),
-	PINMUX_IPSR_DATA(IP15_14_12, PWM6),
-	PINMUX_IPSR_DATA(IP15_14_12, VI1_G7_B),
+	PINMUX_IPSR_GPSR(IP15_14_12, PWM6),
+	PINMUX_IPSR_GPSR(IP15_14_12, VI1_G7_B),
 	PINMUX_IPSR_MSEL(IP15_14_12, SCIFA3_SCK_C, SEL_SCIFA3_2),
 	PINMUX_IPSR_MSEL(IP15_17_15, HCTS0_N, SEL_HSCIF0_0),
 	PINMUX_IPSR_MSEL(IP15_17_15, SCIFB0_CTS_N, SEL_SCIFB_0),
@@ -1638,7 +1638,7 @@
 	PINMUX_IPSR_MSEL(IP15_23_21, SCIFB0_SCK, SEL_SCIFB_0),
 	PINMUX_IPSR_MSEL(IP15_23_21, GLO_Q0_C, SEL_GPS_2),
 	PINMUX_IPSR_MSEL(IP15_23_21, CAN_CLK, SEL_CANCLK_0),
-	PINMUX_IPSR_DATA(IP15_23_21, TCLK2),
+	PINMUX_IPSR_GPSR(IP15_23_21, TCLK2),
 	PINMUX_IPSR_MSEL(IP15_23_21, VI1_DATA3_C, SEL_VI1_2),
 	PINMUX_IPSR_MSEL(IP15_26_24, HRX0, SEL_HSCIF0_0),
 	PINMUX_IPSR_MSEL(IP15_26_24, SCIFB0_RXD, SEL_SCIFB_0),
@@ -1654,25 +1654,25 @@
 	/* IPSR16 */
 	PINMUX_IPSR_MSEL(IP16_2_0, HRX1, SEL_HSCIF1_0),
 	PINMUX_IPSR_MSEL(IP16_2_0, SCIFB1_RXD, SEL_SCIFB1_0),
-	PINMUX_IPSR_DATA(IP16_2_0, VI1_R0_B),
+	PINMUX_IPSR_GPSR(IP16_2_0, VI1_R0_B),
 	PINMUX_IPSR_MSEL(IP16_2_0, GLO_SDATA_C, SEL_GPS_2),
 	PINMUX_IPSR_MSEL(IP16_2_0, VI1_DATA6_C, SEL_VI1_2),
 	PINMUX_IPSR_MSEL(IP16_5_3, HTX1, SEL_HSCIF1_0),
 	PINMUX_IPSR_MSEL(IP16_5_3, SCIFB1_TXD, SEL_SCIFB1_0),
-	PINMUX_IPSR_DATA(IP16_5_3, VI1_R1_B),
+	PINMUX_IPSR_GPSR(IP16_5_3, VI1_R1_B),
 	PINMUX_IPSR_MSEL(IP16_5_3, GLO_SS_C, SEL_GPS_2),
 	PINMUX_IPSR_MSEL(IP16_5_3, VI1_DATA7_C, SEL_VI1_2),
 	PINMUX_IPSR_MSEL(IP16_7_6, HSCK1, SEL_HSCIF1_0),
 	PINMUX_IPSR_MSEL(IP16_7_6, SCIFB1_SCK, SEL_SCIFB1_0),
-	PINMUX_IPSR_DATA(IP16_7_6, MLB_CLK),
+	PINMUX_IPSR_GPSR(IP16_7_6, MLB_CLK),
 	PINMUX_IPSR_MSEL(IP16_7_6, GLO_RFON_C, SEL_GPS_2),
 	PINMUX_IPSR_MSEL(IP16_9_8, HCTS1_N, SEL_HSCIF1_0),
-	PINMUX_IPSR_DATA(IP16_9_8, SCIFB1_CTS_N),
-	PINMUX_IPSR_DATA(IP16_9_8, MLB_SIG),
+	PINMUX_IPSR_GPSR(IP16_9_8, SCIFB1_CTS_N),
+	PINMUX_IPSR_GPSR(IP16_9_8, MLB_SIG),
 	PINMUX_IPSR_MSEL(IP16_9_8, CAN1_TX_B, SEL_CAN1_1),
 	PINMUX_IPSR_MSEL(IP16_11_10, HRTS1_N, SEL_HSCIF1_0),
-	PINMUX_IPSR_DATA(IP16_11_10, SCIFB1_RTS_N),
-	PINMUX_IPSR_DATA(IP16_11_10, MLB_DAT),
+	PINMUX_IPSR_GPSR(IP16_11_10, SCIFB1_RTS_N),
+	PINMUX_IPSR_GPSR(IP16_11_10, MLB_DAT),
 	PINMUX_IPSR_MSEL(IP16_11_10, CAN1_RX_B, SEL_CAN1_1),
 };
 
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7794.c b/drivers/pinctrl/sh-pfc/pfc-r8a7794.c
index 3718c78..38912cf 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7794.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7794.c
@@ -1,9 +1,9 @@
 /*
  * r8a7794 processor support - PFC hardware block.
  *
- * Copyright (C) 2014 Renesas Electronics Corporation
+ * Copyright (C) 2014-2015 Renesas Electronics Corporation
  * Copyright (C) 2015 Renesas Solutions Corp.
- * Copyright (C) 2015 Cogent  Embedded, Inc., <source@cogentembedded.com>
+ * Copyright (C) 2015-2016 Cogent Embedded, Inc., <source@cogentembedded.com>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2
@@ -623,848 +623,848 @@
 	PINMUX_SINGLE(SD1_DATA3),
 
 	/* IPSR0 */
-	PINMUX_IPSR_DATA(IP0_0, SD1_CD),
+	PINMUX_IPSR_GPSR(IP0_0, SD1_CD),
 	PINMUX_IPSR_MSEL(IP0_0, CAN0_RX, SEL_CAN0_0),
-	PINMUX_IPSR_DATA(IP0_9_8, SD1_WP),
-	PINMUX_IPSR_DATA(IP0_9_8, IRQ7),
+	PINMUX_IPSR_GPSR(IP0_9_8, SD1_WP),
+	PINMUX_IPSR_GPSR(IP0_9_8, IRQ7),
 	PINMUX_IPSR_MSEL(IP0_9_8, CAN0_TX, SEL_CAN0_0),
-	PINMUX_IPSR_DATA(IP0_10, MMC_CLK),
-	PINMUX_IPSR_DATA(IP0_10, SD2_CLK),
-	PINMUX_IPSR_DATA(IP0_11, MMC_CMD),
-	PINMUX_IPSR_DATA(IP0_11, SD2_CMD),
-	PINMUX_IPSR_DATA(IP0_12, MMC_D0),
-	PINMUX_IPSR_DATA(IP0_12, SD2_DATA0),
-	PINMUX_IPSR_DATA(IP0_13, MMC_D1),
-	PINMUX_IPSR_DATA(IP0_13, SD2_DATA1),
-	PINMUX_IPSR_DATA(IP0_14, MMC_D2),
-	PINMUX_IPSR_DATA(IP0_14, SD2_DATA2),
-	PINMUX_IPSR_DATA(IP0_15, MMC_D3),
-	PINMUX_IPSR_DATA(IP0_15, SD2_DATA3),
-	PINMUX_IPSR_DATA(IP0_16, MMC_D4),
-	PINMUX_IPSR_DATA(IP0_16, SD2_CD),
-	PINMUX_IPSR_DATA(IP0_17, MMC_D5),
-	PINMUX_IPSR_DATA(IP0_17, SD2_WP),
-	PINMUX_IPSR_DATA(IP0_19_18, MMC_D6),
+	PINMUX_IPSR_GPSR(IP0_10, MMC_CLK),
+	PINMUX_IPSR_GPSR(IP0_10, SD2_CLK),
+	PINMUX_IPSR_GPSR(IP0_11, MMC_CMD),
+	PINMUX_IPSR_GPSR(IP0_11, SD2_CMD),
+	PINMUX_IPSR_GPSR(IP0_12, MMC_D0),
+	PINMUX_IPSR_GPSR(IP0_12, SD2_DATA0),
+	PINMUX_IPSR_GPSR(IP0_13, MMC_D1),
+	PINMUX_IPSR_GPSR(IP0_13, SD2_DATA1),
+	PINMUX_IPSR_GPSR(IP0_14, MMC_D2),
+	PINMUX_IPSR_GPSR(IP0_14, SD2_DATA2),
+	PINMUX_IPSR_GPSR(IP0_15, MMC_D3),
+	PINMUX_IPSR_GPSR(IP0_15, SD2_DATA3),
+	PINMUX_IPSR_GPSR(IP0_16, MMC_D4),
+	PINMUX_IPSR_GPSR(IP0_16, SD2_CD),
+	PINMUX_IPSR_GPSR(IP0_17, MMC_D5),
+	PINMUX_IPSR_GPSR(IP0_17, SD2_WP),
+	PINMUX_IPSR_GPSR(IP0_19_18, MMC_D6),
 	PINMUX_IPSR_MSEL(IP0_19_18, SCIF0_RXD, SEL_SCIF0_0),
 	PINMUX_IPSR_MSEL(IP0_19_18, I2C2_SCL_B, SEL_I2C02_1),
 	PINMUX_IPSR_MSEL(IP0_19_18, CAN1_RX, SEL_CAN1_0),
-	PINMUX_IPSR_DATA(IP0_21_20, MMC_D7),
+	PINMUX_IPSR_GPSR(IP0_21_20, MMC_D7),
 	PINMUX_IPSR_MSEL(IP0_21_20, SCIF0_TXD, SEL_SCIF0_0),
 	PINMUX_IPSR_MSEL(IP0_21_20, I2C2_SDA_B, SEL_I2C02_1),
 	PINMUX_IPSR_MSEL(IP0_21_20, CAN1_TX, SEL_CAN1_0),
-	PINMUX_IPSR_DATA(IP0_23_22, D0),
+	PINMUX_IPSR_GPSR(IP0_23_22, D0),
 	PINMUX_IPSR_MSEL(IP0_23_22, SCIFA3_SCK_B, SEL_SCIFA3_1),
-	PINMUX_IPSR_DATA(IP0_23_22, IRQ4),
-	PINMUX_IPSR_DATA(IP0_24, D1),
+	PINMUX_IPSR_GPSR(IP0_23_22, IRQ4),
+	PINMUX_IPSR_GPSR(IP0_24, D1),
 	PINMUX_IPSR_MSEL(IP0_24, SCIFA3_RXD_B, SEL_SCIFA3_1),
-	PINMUX_IPSR_DATA(IP0_25, D2),
+	PINMUX_IPSR_GPSR(IP0_25, D2),
 	PINMUX_IPSR_MSEL(IP0_25, SCIFA3_TXD_B, SEL_SCIFA3_1),
-	PINMUX_IPSR_DATA(IP0_27_26, D3),
+	PINMUX_IPSR_GPSR(IP0_27_26, D3),
 	PINMUX_IPSR_MSEL(IP0_27_26, I2C3_SCL_B, SEL_I2C03_1),
 	PINMUX_IPSR_MSEL(IP0_27_26, SCIF5_RXD_B, SEL_SCIF5_1),
-	PINMUX_IPSR_DATA(IP0_29_28, D4),
+	PINMUX_IPSR_GPSR(IP0_29_28, D4),
 	PINMUX_IPSR_MSEL(IP0_29_28, I2C3_SDA_B, SEL_I2C03_1),
 	PINMUX_IPSR_MSEL(IP0_29_28, SCIF5_TXD_B, SEL_SCIF5_1),
-	PINMUX_IPSR_DATA(IP0_31_30, D5),
+	PINMUX_IPSR_GPSR(IP0_31_30, D5),
 	PINMUX_IPSR_MSEL(IP0_31_30, SCIF4_RXD_B, SEL_SCIF4_1),
 	PINMUX_IPSR_MSEL(IP0_31_30, I2C0_SCL_D, SEL_I2C00_3),
 
 	/* IPSR1 */
-	PINMUX_IPSR_DATA(IP1_1_0, D6),
+	PINMUX_IPSR_GPSR(IP1_1_0, D6),
 	PINMUX_IPSR_MSEL(IP1_1_0, SCIF4_TXD_B, SEL_SCIF4_1),
 	PINMUX_IPSR_MSEL(IP1_1_0, I2C0_SDA_D, SEL_I2C00_3),
-	PINMUX_IPSR_DATA(IP1_3_2, D7),
-	PINMUX_IPSR_DATA(IP1_3_2, IRQ3),
+	PINMUX_IPSR_GPSR(IP1_3_2, D7),
+	PINMUX_IPSR_GPSR(IP1_3_2, IRQ3),
 	PINMUX_IPSR_MSEL(IP1_3_2, TCLK1, SEL_TMU_0),
-	PINMUX_IPSR_DATA(IP1_3_2, PWM6_B),
-	PINMUX_IPSR_DATA(IP1_5_4, D8),
-	PINMUX_IPSR_DATA(IP1_5_4, HSCIF2_HRX),
+	PINMUX_IPSR_GPSR(IP1_3_2, PWM6_B),
+	PINMUX_IPSR_GPSR(IP1_5_4, D8),
+	PINMUX_IPSR_GPSR(IP1_5_4, HSCIF2_HRX),
 	PINMUX_IPSR_MSEL(IP1_5_4, I2C1_SCL_B, SEL_I2C01_1),
-	PINMUX_IPSR_DATA(IP1_7_6, D9),
-	PINMUX_IPSR_DATA(IP1_7_6, HSCIF2_HTX),
+	PINMUX_IPSR_GPSR(IP1_7_6, D9),
+	PINMUX_IPSR_GPSR(IP1_7_6, HSCIF2_HTX),
 	PINMUX_IPSR_MSEL(IP1_7_6, I2C1_SDA_B, SEL_I2C01_1),
-	PINMUX_IPSR_DATA(IP1_10_8, D10),
-	PINMUX_IPSR_DATA(IP1_10_8, HSCIF2_HSCK),
+	PINMUX_IPSR_GPSR(IP1_10_8, D10),
+	PINMUX_IPSR_GPSR(IP1_10_8, HSCIF2_HSCK),
 	PINMUX_IPSR_MSEL(IP1_10_8, SCIF1_SCK_C, SEL_SCIF1_2),
-	PINMUX_IPSR_DATA(IP1_10_8, IRQ6),
-	PINMUX_IPSR_DATA(IP1_10_8, PWM5_C),
-	PINMUX_IPSR_DATA(IP1_12_11, D11),
-	PINMUX_IPSR_DATA(IP1_12_11, HSCIF2_HCTS_N),
+	PINMUX_IPSR_GPSR(IP1_10_8, IRQ6),
+	PINMUX_IPSR_GPSR(IP1_10_8, PWM5_C),
+	PINMUX_IPSR_GPSR(IP1_12_11, D11),
+	PINMUX_IPSR_GPSR(IP1_12_11, HSCIF2_HCTS_N),
 	PINMUX_IPSR_MSEL(IP1_12_11, SCIF1_RXD_C, SEL_SCIF1_2),
 	PINMUX_IPSR_MSEL(IP1_12_11, I2C1_SCL_D, SEL_I2C01_3),
-	PINMUX_IPSR_DATA(IP1_14_13, D12),
-	PINMUX_IPSR_DATA(IP1_14_13, HSCIF2_HRTS_N),
+	PINMUX_IPSR_GPSR(IP1_14_13, D12),
+	PINMUX_IPSR_GPSR(IP1_14_13, HSCIF2_HRTS_N),
 	PINMUX_IPSR_MSEL(IP1_14_13, SCIF1_TXD_C, SEL_SCIF1_2),
 	PINMUX_IPSR_MSEL(IP1_14_13, I2C1_SDA_D, SEL_I2C01_3),
-	PINMUX_IPSR_DATA(IP1_17_15, D13),
+	PINMUX_IPSR_GPSR(IP1_17_15, D13),
 	PINMUX_IPSR_MSEL(IP1_17_15, SCIFA1_SCK, SEL_SCIFA1_0),
-	PINMUX_IPSR_DATA(IP1_17_15, TANS1),
-	PINMUX_IPSR_DATA(IP1_17_15, PWM2_C),
+	PINMUX_IPSR_GPSR(IP1_17_15, TANS1),
+	PINMUX_IPSR_GPSR(IP1_17_15, PWM2_C),
 	PINMUX_IPSR_MSEL(IP1_17_15, TCLK2_B, SEL_TMU_1),
-	PINMUX_IPSR_DATA(IP1_19_18, D14),
+	PINMUX_IPSR_GPSR(IP1_19_18, D14),
 	PINMUX_IPSR_MSEL(IP1_19_18, SCIFA1_RXD, SEL_SCIFA1_0),
 	PINMUX_IPSR_MSEL(IP1_19_18, IIC0_SCL_B, SEL_IIC00_1),
-	PINMUX_IPSR_DATA(IP1_21_20, D15),
+	PINMUX_IPSR_GPSR(IP1_21_20, D15),
 	PINMUX_IPSR_MSEL(IP1_21_20, SCIFA1_TXD, SEL_SCIFA1_0),
 	PINMUX_IPSR_MSEL(IP1_21_20, IIC0_SDA_B, SEL_IIC00_1),
-	PINMUX_IPSR_DATA(IP1_23_22, A0),
-	PINMUX_IPSR_DATA(IP1_23_22, SCIFB1_SCK),
-	PINMUX_IPSR_DATA(IP1_23_22, PWM3_B),
-	PINMUX_IPSR_DATA(IP1_24, A1),
-	PINMUX_IPSR_DATA(IP1_24, SCIFB1_TXD),
-	PINMUX_IPSR_DATA(IP1_26, A3),
-	PINMUX_IPSR_DATA(IP1_26, SCIFB0_SCK),
-	PINMUX_IPSR_DATA(IP1_27, A4),
-	PINMUX_IPSR_DATA(IP1_27, SCIFB0_TXD),
-	PINMUX_IPSR_DATA(IP1_29_28, A5),
-	PINMUX_IPSR_DATA(IP1_29_28, SCIFB0_RXD),
-	PINMUX_IPSR_DATA(IP1_29_28, PWM4_B),
-	PINMUX_IPSR_DATA(IP1_29_28, TPUTO3_C),
-	PINMUX_IPSR_DATA(IP1_31_30, A6),
-	PINMUX_IPSR_DATA(IP1_31_30, SCIFB0_CTS_N),
+	PINMUX_IPSR_GPSR(IP1_23_22, A0),
+	PINMUX_IPSR_GPSR(IP1_23_22, SCIFB1_SCK),
+	PINMUX_IPSR_GPSR(IP1_23_22, PWM3_B),
+	PINMUX_IPSR_GPSR(IP1_24, A1),
+	PINMUX_IPSR_GPSR(IP1_24, SCIFB1_TXD),
+	PINMUX_IPSR_GPSR(IP1_26, A3),
+	PINMUX_IPSR_GPSR(IP1_26, SCIFB0_SCK),
+	PINMUX_IPSR_GPSR(IP1_27, A4),
+	PINMUX_IPSR_GPSR(IP1_27, SCIFB0_TXD),
+	PINMUX_IPSR_GPSR(IP1_29_28, A5),
+	PINMUX_IPSR_GPSR(IP1_29_28, SCIFB0_RXD),
+	PINMUX_IPSR_GPSR(IP1_29_28, PWM4_B),
+	PINMUX_IPSR_GPSR(IP1_29_28, TPUTO3_C),
+	PINMUX_IPSR_GPSR(IP1_31_30, A6),
+	PINMUX_IPSR_GPSR(IP1_31_30, SCIFB0_CTS_N),
 	PINMUX_IPSR_MSEL(IP1_31_30, SCIFA4_RXD_B, SEL_SCIFA4_1),
-	PINMUX_IPSR_DATA(IP1_31_30, TPUTO2_C),
+	PINMUX_IPSR_GPSR(IP1_31_30, TPUTO2_C),
 
 	/* IPSR2 */
-	PINMUX_IPSR_DATA(IP2_1_0, A7),
-	PINMUX_IPSR_DATA(IP2_1_0, SCIFB0_RTS_N),
+	PINMUX_IPSR_GPSR(IP2_1_0, A7),
+	PINMUX_IPSR_GPSR(IP2_1_0, SCIFB0_RTS_N),
 	PINMUX_IPSR_MSEL(IP2_1_0, SCIFA4_TXD_B, SEL_SCIFA4_1),
-	PINMUX_IPSR_DATA(IP2_3_2, A8),
+	PINMUX_IPSR_GPSR(IP2_3_2, A8),
 	PINMUX_IPSR_MSEL(IP2_3_2, MSIOF1_RXD, SEL_MSI1_0),
 	PINMUX_IPSR_MSEL(IP2_3_2, SCIFA0_RXD_B, SEL_SCIFA0_1),
-	PINMUX_IPSR_DATA(IP2_5_4, A9),
+	PINMUX_IPSR_GPSR(IP2_5_4, A9),
 	PINMUX_IPSR_MSEL(IP2_5_4, MSIOF1_TXD, SEL_MSI1_0),
 	PINMUX_IPSR_MSEL(IP2_5_4, SCIFA0_TXD_B, SEL_SCIFA0_1),
-	PINMUX_IPSR_DATA(IP2_7_6, A10),
+	PINMUX_IPSR_GPSR(IP2_7_6, A10),
 	PINMUX_IPSR_MSEL(IP2_7_6, MSIOF1_SCK, SEL_MSI1_0),
 	PINMUX_IPSR_MSEL(IP2_7_6, IIC1_SCL_B, SEL_IIC01_1),
-	PINMUX_IPSR_DATA(IP2_9_8, A11),
+	PINMUX_IPSR_GPSR(IP2_9_8, A11),
 	PINMUX_IPSR_MSEL(IP2_9_8, MSIOF1_SYNC, SEL_MSI1_0),
 	PINMUX_IPSR_MSEL(IP2_9_8, IIC1_SDA_B, SEL_IIC01_1),
-	PINMUX_IPSR_DATA(IP2_11_10, A12),
+	PINMUX_IPSR_GPSR(IP2_11_10, A12),
 	PINMUX_IPSR_MSEL(IP2_11_10, MSIOF1_SS1, SEL_MSI1_0),
 	PINMUX_IPSR_MSEL(IP2_11_10, SCIFA5_RXD_B, SEL_SCIFA5_1),
-	PINMUX_IPSR_DATA(IP2_13_12, A13),
+	PINMUX_IPSR_GPSR(IP2_13_12, A13),
 	PINMUX_IPSR_MSEL(IP2_13_12, MSIOF1_SS2, SEL_MSI1_0),
 	PINMUX_IPSR_MSEL(IP2_13_12, SCIFA5_TXD_B, SEL_SCIFA5_1),
-	PINMUX_IPSR_DATA(IP2_15_14, A14),
+	PINMUX_IPSR_GPSR(IP2_15_14, A14),
 	PINMUX_IPSR_MSEL(IP2_15_14, MSIOF2_RXD, SEL_MSI2_0),
 	PINMUX_IPSR_MSEL(IP2_15_14, HSCIF0_HRX_B, SEL_HSCIF0_1),
 	PINMUX_IPSR_MSEL(IP2_15_14, DREQ1_N, SEL_LBS_0),
-	PINMUX_IPSR_DATA(IP2_17_16, A15),
+	PINMUX_IPSR_GPSR(IP2_17_16, A15),
 	PINMUX_IPSR_MSEL(IP2_17_16, MSIOF2_TXD, SEL_MSI2_0),
 	PINMUX_IPSR_MSEL(IP2_17_16, HSCIF0_HTX_B, SEL_HSCIF0_1),
 	PINMUX_IPSR_MSEL(IP2_17_16, DACK1, SEL_LBS_0),
-	PINMUX_IPSR_DATA(IP2_20_18, A16),
+	PINMUX_IPSR_GPSR(IP2_20_18, A16),
 	PINMUX_IPSR_MSEL(IP2_20_18, MSIOF2_SCK, SEL_MSI2_0),
 	PINMUX_IPSR_MSEL(IP2_20_18, HSCIF0_HSCK_B, SEL_HSCIF0_1),
 	PINMUX_IPSR_MSEL(IP2_20_18, SPEEDIN, SEL_RSP_0),
 	PINMUX_IPSR_MSEL(IP2_20_18, VSP, SEL_SPDM_0),
 	PINMUX_IPSR_MSEL(IP2_20_18, CAN_CLK_C, SEL_CAN_2),
-	PINMUX_IPSR_DATA(IP2_20_18, TPUTO2_B),
-	PINMUX_IPSR_DATA(IP2_23_21, A17),
+	PINMUX_IPSR_GPSR(IP2_20_18, TPUTO2_B),
+	PINMUX_IPSR_GPSR(IP2_23_21, A17),
 	PINMUX_IPSR_MSEL(IP2_23_21, MSIOF2_SYNC, SEL_MSI2_0),
 	PINMUX_IPSR_MSEL(IP2_23_21, SCIF4_RXD_E, SEL_SCIF4_4),
 	PINMUX_IPSR_MSEL(IP2_23_21, CAN1_RX_B, SEL_CAN1_1),
 	PINMUX_IPSR_MSEL(IP2_23_21, AVB_AVTP_CAPTURE_B, SEL_AVB_1),
-	PINMUX_IPSR_DATA(IP2_26_24, A18),
+	PINMUX_IPSR_GPSR(IP2_26_24, A18),
 	PINMUX_IPSR_MSEL(IP2_26_24, MSIOF2_SS1, SEL_MSI2_0),
 	PINMUX_IPSR_MSEL(IP2_26_24, SCIF4_TXD_E, SEL_SCIF4_4),
 	PINMUX_IPSR_MSEL(IP2_26_24, CAN1_TX_B, SEL_CAN1_1),
 	PINMUX_IPSR_MSEL(IP2_26_24, AVB_AVTP_MATCH_B, SEL_AVB_1),
-	PINMUX_IPSR_DATA(IP2_29_27, A19),
+	PINMUX_IPSR_GPSR(IP2_29_27, A19),
 	PINMUX_IPSR_MSEL(IP2_29_27, MSIOF2_SS2, SEL_MSI2_0),
-	PINMUX_IPSR_DATA(IP2_29_27, PWM4),
-	PINMUX_IPSR_DATA(IP2_29_27, TPUTO2),
-	PINMUX_IPSR_DATA(IP2_29_27, MOUT0),
-	PINMUX_IPSR_DATA(IP2_31_30, A20),
-	PINMUX_IPSR_DATA(IP2_31_30, SPCLK),
-	PINMUX_IPSR_DATA(IP2_29_27, MOUT1),
+	PINMUX_IPSR_GPSR(IP2_29_27, PWM4),
+	PINMUX_IPSR_GPSR(IP2_29_27, TPUTO2),
+	PINMUX_IPSR_GPSR(IP2_29_27, MOUT0),
+	PINMUX_IPSR_GPSR(IP2_31_30, A20),
+	PINMUX_IPSR_GPSR(IP2_31_30, SPCLK),
+	PINMUX_IPSR_GPSR(IP2_29_27, MOUT1),
 
 	/* IPSR3 */
-	PINMUX_IPSR_DATA(IP3_1_0, A21),
-	PINMUX_IPSR_DATA(IP3_1_0, MOSI_IO0),
-	PINMUX_IPSR_DATA(IP3_1_0, MOUT2),
-	PINMUX_IPSR_DATA(IP3_3_2, A22),
-	PINMUX_IPSR_DATA(IP3_3_2, MISO_IO1),
-	PINMUX_IPSR_DATA(IP3_3_2, MOUT5),
-	PINMUX_IPSR_DATA(IP3_3_2, ATADIR1_N),
-	PINMUX_IPSR_DATA(IP3_5_4, A23),
-	PINMUX_IPSR_DATA(IP3_5_4, IO2),
-	PINMUX_IPSR_DATA(IP3_5_4, MOUT6),
-	PINMUX_IPSR_DATA(IP3_5_4, ATAWR1_N),
-	PINMUX_IPSR_DATA(IP3_7_6, A24),
-	PINMUX_IPSR_DATA(IP3_7_6, IO3),
-	PINMUX_IPSR_DATA(IP3_7_6, EX_WAIT2),
-	PINMUX_IPSR_DATA(IP3_9_8, A25),
-	PINMUX_IPSR_DATA(IP3_9_8, SSL),
-	PINMUX_IPSR_DATA(IP3_9_8, ATARD1_N),
-	PINMUX_IPSR_DATA(IP3_10, CS0_N),
-	PINMUX_IPSR_DATA(IP3_10, VI1_DATA8),
-	PINMUX_IPSR_DATA(IP3_11, CS1_N_A26),
-	PINMUX_IPSR_DATA(IP3_11, VI1_DATA9),
-	PINMUX_IPSR_DATA(IP3_12, EX_CS0_N),
-	PINMUX_IPSR_DATA(IP3_12, VI1_DATA10),
-	PINMUX_IPSR_DATA(IP3_14_13, EX_CS1_N),
-	PINMUX_IPSR_DATA(IP3_14_13, TPUTO3_B),
-	PINMUX_IPSR_DATA(IP3_14_13, SCIFB2_RXD),
-	PINMUX_IPSR_DATA(IP3_14_13, VI1_DATA11),
-	PINMUX_IPSR_DATA(IP3_17_15, EX_CS2_N),
-	PINMUX_IPSR_DATA(IP3_17_15, PWM0),
+	PINMUX_IPSR_GPSR(IP3_1_0, A21),
+	PINMUX_IPSR_GPSR(IP3_1_0, MOSI_IO0),
+	PINMUX_IPSR_GPSR(IP3_1_0, MOUT2),
+	PINMUX_IPSR_GPSR(IP3_3_2, A22),
+	PINMUX_IPSR_GPSR(IP3_3_2, MISO_IO1),
+	PINMUX_IPSR_GPSR(IP3_3_2, MOUT5),
+	PINMUX_IPSR_GPSR(IP3_3_2, ATADIR1_N),
+	PINMUX_IPSR_GPSR(IP3_5_4, A23),
+	PINMUX_IPSR_GPSR(IP3_5_4, IO2),
+	PINMUX_IPSR_GPSR(IP3_5_4, MOUT6),
+	PINMUX_IPSR_GPSR(IP3_5_4, ATAWR1_N),
+	PINMUX_IPSR_GPSR(IP3_7_6, A24),
+	PINMUX_IPSR_GPSR(IP3_7_6, IO3),
+	PINMUX_IPSR_GPSR(IP3_7_6, EX_WAIT2),
+	PINMUX_IPSR_GPSR(IP3_9_8, A25),
+	PINMUX_IPSR_GPSR(IP3_9_8, SSL),
+	PINMUX_IPSR_GPSR(IP3_9_8, ATARD1_N),
+	PINMUX_IPSR_GPSR(IP3_10, CS0_N),
+	PINMUX_IPSR_GPSR(IP3_10, VI1_DATA8),
+	PINMUX_IPSR_GPSR(IP3_11, CS1_N_A26),
+	PINMUX_IPSR_GPSR(IP3_11, VI1_DATA9),
+	PINMUX_IPSR_GPSR(IP3_12, EX_CS0_N),
+	PINMUX_IPSR_GPSR(IP3_12, VI1_DATA10),
+	PINMUX_IPSR_GPSR(IP3_14_13, EX_CS1_N),
+	PINMUX_IPSR_GPSR(IP3_14_13, TPUTO3_B),
+	PINMUX_IPSR_GPSR(IP3_14_13, SCIFB2_RXD),
+	PINMUX_IPSR_GPSR(IP3_14_13, VI1_DATA11),
+	PINMUX_IPSR_GPSR(IP3_17_15, EX_CS2_N),
+	PINMUX_IPSR_GPSR(IP3_17_15, PWM0),
 	PINMUX_IPSR_MSEL(IP3_17_15, SCIF4_RXD_C, SEL_SCIF4_2),
 	PINMUX_IPSR_MSEL(IP3_17_15, TS_SDATA_B, SEL_TSIF0_1),
 	PINMUX_IPSR_MSEL(IP3_17_15, RIF0_SYNC, SEL_DR0_0),
-	PINMUX_IPSR_DATA(IP3_17_15, TPUTO3),
-	PINMUX_IPSR_DATA(IP3_17_15, SCIFB2_TXD),
+	PINMUX_IPSR_GPSR(IP3_17_15, TPUTO3),
+	PINMUX_IPSR_GPSR(IP3_17_15, SCIFB2_TXD),
 	PINMUX_IPSR_MSEL(IP3_17_15, SDATA_B, SEL_FSN_1),
-	PINMUX_IPSR_DATA(IP3_20_18, EX_CS3_N),
+	PINMUX_IPSR_GPSR(IP3_20_18, EX_CS3_N),
 	PINMUX_IPSR_MSEL(IP3_20_18, SCIFA2_SCK, SEL_SCIFA2_0),
 	PINMUX_IPSR_MSEL(IP3_20_18, SCIF4_TXD_C, SEL_SCIF4_2),
 	PINMUX_IPSR_MSEL(IP3_20_18, TS_SCK_B, SEL_TSIF0_1),
 	PINMUX_IPSR_MSEL(IP3_20_18, RIF0_CLK, SEL_DR0_0),
 	PINMUX_IPSR_MSEL(IP3_20_18, BPFCLK, SEL_DARC_0),
-	PINMUX_IPSR_DATA(IP3_20_18, SCIFB2_SCK),
+	PINMUX_IPSR_GPSR(IP3_20_18, SCIFB2_SCK),
 	PINMUX_IPSR_MSEL(IP3_20_18, MDATA_B, SEL_FSN_1),
-	PINMUX_IPSR_DATA(IP3_23_21, EX_CS4_N),
+	PINMUX_IPSR_GPSR(IP3_23_21, EX_CS4_N),
 	PINMUX_IPSR_MSEL(IP3_23_21, SCIFA2_RXD, SEL_SCIFA2_0),
 	PINMUX_IPSR_MSEL(IP3_23_21, I2C2_SCL_E, SEL_I2C02_4),
 	PINMUX_IPSR_MSEL(IP3_23_21, TS_SDEN_B, SEL_TSIF0_1),
 	PINMUX_IPSR_MSEL(IP3_23_21, RIF0_D0, SEL_DR0_0),
 	PINMUX_IPSR_MSEL(IP3_23_21, FMCLK, SEL_DARC_0),
-	PINMUX_IPSR_DATA(IP3_23_21, SCIFB2_CTS_N),
+	PINMUX_IPSR_GPSR(IP3_23_21, SCIFB2_CTS_N),
 	PINMUX_IPSR_MSEL(IP3_23_21, SCKZ_B, SEL_FSN_1),
-	PINMUX_IPSR_DATA(IP3_26_24, EX_CS5_N),
+	PINMUX_IPSR_GPSR(IP3_26_24, EX_CS5_N),
 	PINMUX_IPSR_MSEL(IP3_26_24, SCIFA2_TXD, SEL_SCIFA2_0),
 	PINMUX_IPSR_MSEL(IP3_26_24, I2C2_SDA_E, SEL_I2C02_4),
 	PINMUX_IPSR_MSEL(IP3_26_24, TS_SPSYNC_B, SEL_TSIF0_1),
 	PINMUX_IPSR_MSEL(IP3_26_24, RIF0_D1, SEL_DR1_0),
 	PINMUX_IPSR_MSEL(IP3_26_24, FMIN, SEL_DARC_0),
-	PINMUX_IPSR_DATA(IP3_26_24, SCIFB2_RTS_N),
+	PINMUX_IPSR_GPSR(IP3_26_24, SCIFB2_RTS_N),
 	PINMUX_IPSR_MSEL(IP3_26_24, STM_N_B, SEL_FSN_1),
-	PINMUX_IPSR_DATA(IP3_29_27, BS_N),
-	PINMUX_IPSR_DATA(IP3_29_27, DRACK0),
-	PINMUX_IPSR_DATA(IP3_29_27, PWM1_C),
-	PINMUX_IPSR_DATA(IP3_29_27, TPUTO0_C),
-	PINMUX_IPSR_DATA(IP3_29_27, ATACS01_N),
+	PINMUX_IPSR_GPSR(IP3_29_27, BS_N),
+	PINMUX_IPSR_GPSR(IP3_29_27, DRACK0),
+	PINMUX_IPSR_GPSR(IP3_29_27, PWM1_C),
+	PINMUX_IPSR_GPSR(IP3_29_27, TPUTO0_C),
+	PINMUX_IPSR_GPSR(IP3_29_27, ATACS01_N),
 	PINMUX_IPSR_MSEL(IP3_29_27, MTS_N_B, SEL_FSN_1),
-	PINMUX_IPSR_DATA(IP3_30, RD_N),
-	PINMUX_IPSR_DATA(IP3_30, ATACS11_N),
-	PINMUX_IPSR_DATA(IP3_31, RD_WR_N),
-	PINMUX_IPSR_DATA(IP3_31, ATAG1_N),
+	PINMUX_IPSR_GPSR(IP3_30, RD_N),
+	PINMUX_IPSR_GPSR(IP3_30, ATACS11_N),
+	PINMUX_IPSR_GPSR(IP3_31, RD_WR_N),
+	PINMUX_IPSR_GPSR(IP3_31, ATAG1_N),
 
 	/* IPSR4 */
-	PINMUX_IPSR_DATA(IP4_1_0, EX_WAIT0),
+	PINMUX_IPSR_GPSR(IP4_1_0, EX_WAIT0),
 	PINMUX_IPSR_MSEL(IP4_1_0, CAN_CLK_B, SEL_CAN_1),
 	PINMUX_IPSR_MSEL(IP4_1_0, SCIF_CLK, SEL_SCIF0_0),
-	PINMUX_IPSR_DATA(IP4_1_0, PWMFSW0),
-	PINMUX_IPSR_DATA(IP4_4_2, DU0_DR0),
-	PINMUX_IPSR_DATA(IP4_4_2, LCDOUT16),
+	PINMUX_IPSR_GPSR(IP4_1_0, PWMFSW0),
+	PINMUX_IPSR_GPSR(IP4_4_2, DU0_DR0),
+	PINMUX_IPSR_GPSR(IP4_4_2, LCDOUT16),
 	PINMUX_IPSR_MSEL(IP4_4_2, SCIF5_RXD_C, SEL_SCIF5_2),
 	PINMUX_IPSR_MSEL(IP4_4_2, I2C2_SCL_D, SEL_I2C02_3),
-	PINMUX_IPSR_DATA(IP4_4_2, CC50_STATE0),
-	PINMUX_IPSR_DATA(IP4_7_5, DU0_DR1),
-	PINMUX_IPSR_DATA(IP4_7_5, LCDOUT17),
+	PINMUX_IPSR_GPSR(IP4_4_2, CC50_STATE0),
+	PINMUX_IPSR_GPSR(IP4_7_5, DU0_DR1),
+	PINMUX_IPSR_GPSR(IP4_7_5, LCDOUT17),
 	PINMUX_IPSR_MSEL(IP4_7_5, SCIF5_TXD_C, SEL_SCIF5_2),
 	PINMUX_IPSR_MSEL(IP4_7_5, I2C2_SDA_D, SEL_I2C02_3),
-	PINMUX_IPSR_DATA(IP4_9_8, CC50_STATE1),
-	PINMUX_IPSR_DATA(IP4_9_8, DU0_DR2),
-	PINMUX_IPSR_DATA(IP4_9_8, LCDOUT18),
-	PINMUX_IPSR_DATA(IP4_9_8, CC50_STATE2),
-	PINMUX_IPSR_DATA(IP4_11_10, DU0_DR3),
-	PINMUX_IPSR_DATA(IP4_11_10, LCDOUT19),
-	PINMUX_IPSR_DATA(IP4_11_10, CC50_STATE3),
-	PINMUX_IPSR_DATA(IP4_13_12, DU0_DR4),
-	PINMUX_IPSR_DATA(IP4_13_12, LCDOUT20),
-	PINMUX_IPSR_DATA(IP4_13_12, CC50_STATE4),
-	PINMUX_IPSR_DATA(IP4_15_14, DU0_DR5),
-	PINMUX_IPSR_DATA(IP4_15_14, LCDOUT21),
-	PINMUX_IPSR_DATA(IP4_15_14, CC50_STATE5),
-	PINMUX_IPSR_DATA(IP4_17_16, DU0_DR6),
-	PINMUX_IPSR_DATA(IP4_17_16, LCDOUT22),
-	PINMUX_IPSR_DATA(IP4_17_16, CC50_STATE6),
-	PINMUX_IPSR_DATA(IP4_19_18, DU0_DR7),
-	PINMUX_IPSR_DATA(IP4_19_18, LCDOUT23),
-	PINMUX_IPSR_DATA(IP4_19_18, CC50_STATE7),
-	PINMUX_IPSR_DATA(IP4_22_20, DU0_DG0),
-	PINMUX_IPSR_DATA(IP4_22_20, LCDOUT8),
+	PINMUX_IPSR_GPSR(IP4_9_8, CC50_STATE1),
+	PINMUX_IPSR_GPSR(IP4_9_8, DU0_DR2),
+	PINMUX_IPSR_GPSR(IP4_9_8, LCDOUT18),
+	PINMUX_IPSR_GPSR(IP4_9_8, CC50_STATE2),
+	PINMUX_IPSR_GPSR(IP4_11_10, DU0_DR3),
+	PINMUX_IPSR_GPSR(IP4_11_10, LCDOUT19),
+	PINMUX_IPSR_GPSR(IP4_11_10, CC50_STATE3),
+	PINMUX_IPSR_GPSR(IP4_13_12, DU0_DR4),
+	PINMUX_IPSR_GPSR(IP4_13_12, LCDOUT20),
+	PINMUX_IPSR_GPSR(IP4_13_12, CC50_STATE4),
+	PINMUX_IPSR_GPSR(IP4_15_14, DU0_DR5),
+	PINMUX_IPSR_GPSR(IP4_15_14, LCDOUT21),
+	PINMUX_IPSR_GPSR(IP4_15_14, CC50_STATE5),
+	PINMUX_IPSR_GPSR(IP4_17_16, DU0_DR6),
+	PINMUX_IPSR_GPSR(IP4_17_16, LCDOUT22),
+	PINMUX_IPSR_GPSR(IP4_17_16, CC50_STATE6),
+	PINMUX_IPSR_GPSR(IP4_19_18, DU0_DR7),
+	PINMUX_IPSR_GPSR(IP4_19_18, LCDOUT23),
+	PINMUX_IPSR_GPSR(IP4_19_18, CC50_STATE7),
+	PINMUX_IPSR_GPSR(IP4_22_20, DU0_DG0),
+	PINMUX_IPSR_GPSR(IP4_22_20, LCDOUT8),
 	PINMUX_IPSR_MSEL(IP4_22_20, SCIFA0_RXD_C, SEL_SCIFA0_2),
 	PINMUX_IPSR_MSEL(IP4_22_20, I2C3_SCL_D, SEL_I2C03_3),
-	PINMUX_IPSR_DATA(IP4_22_20, CC50_STATE8),
-	PINMUX_IPSR_DATA(IP4_25_23, DU0_DG1),
-	PINMUX_IPSR_DATA(IP4_25_23, LCDOUT9),
+	PINMUX_IPSR_GPSR(IP4_22_20, CC50_STATE8),
+	PINMUX_IPSR_GPSR(IP4_25_23, DU0_DG1),
+	PINMUX_IPSR_GPSR(IP4_25_23, LCDOUT9),
 	PINMUX_IPSR_MSEL(IP4_25_23, SCIFA0_TXD_C, SEL_SCIFA0_2),
 	PINMUX_IPSR_MSEL(IP4_25_23, I2C3_SDA_D, SEL_I2C03_3),
-	PINMUX_IPSR_DATA(IP4_25_23, CC50_STATE9),
-	PINMUX_IPSR_DATA(IP4_27_26, DU0_DG2),
-	PINMUX_IPSR_DATA(IP4_27_26, LCDOUT10),
-	PINMUX_IPSR_DATA(IP4_27_26, CC50_STATE10),
-	PINMUX_IPSR_DATA(IP4_29_28, DU0_DG3),
-	PINMUX_IPSR_DATA(IP4_29_28, LCDOUT11),
-	PINMUX_IPSR_DATA(IP4_29_28, CC50_STATE11),
-	PINMUX_IPSR_DATA(IP4_31_30, DU0_DG4),
-	PINMUX_IPSR_DATA(IP4_31_30, LCDOUT12),
-	PINMUX_IPSR_DATA(IP4_31_30, CC50_STATE12),
+	PINMUX_IPSR_GPSR(IP4_25_23, CC50_STATE9),
+	PINMUX_IPSR_GPSR(IP4_27_26, DU0_DG2),
+	PINMUX_IPSR_GPSR(IP4_27_26, LCDOUT10),
+	PINMUX_IPSR_GPSR(IP4_27_26, CC50_STATE10),
+	PINMUX_IPSR_GPSR(IP4_29_28, DU0_DG3),
+	PINMUX_IPSR_GPSR(IP4_29_28, LCDOUT11),
+	PINMUX_IPSR_GPSR(IP4_29_28, CC50_STATE11),
+	PINMUX_IPSR_GPSR(IP4_31_30, DU0_DG4),
+	PINMUX_IPSR_GPSR(IP4_31_30, LCDOUT12),
+	PINMUX_IPSR_GPSR(IP4_31_30, CC50_STATE12),
 
 	/* IPSR5 */
-	PINMUX_IPSR_DATA(IP5_1_0, DU0_DG5),
-	PINMUX_IPSR_DATA(IP5_1_0, LCDOUT13),
-	PINMUX_IPSR_DATA(IP5_1_0, CC50_STATE13),
-	PINMUX_IPSR_DATA(IP5_3_2, DU0_DG6),
-	PINMUX_IPSR_DATA(IP5_3_2, LCDOUT14),
-	PINMUX_IPSR_DATA(IP5_3_2, CC50_STATE14),
-	PINMUX_IPSR_DATA(IP5_5_4, DU0_DG7),
-	PINMUX_IPSR_DATA(IP5_5_4, LCDOUT15),
-	PINMUX_IPSR_DATA(IP5_5_4, CC50_STATE15),
-	PINMUX_IPSR_DATA(IP5_8_6, DU0_DB0),
-	PINMUX_IPSR_DATA(IP5_8_6, LCDOUT0),
+	PINMUX_IPSR_GPSR(IP5_1_0, DU0_DG5),
+	PINMUX_IPSR_GPSR(IP5_1_0, LCDOUT13),
+	PINMUX_IPSR_GPSR(IP5_1_0, CC50_STATE13),
+	PINMUX_IPSR_GPSR(IP5_3_2, DU0_DG6),
+	PINMUX_IPSR_GPSR(IP5_3_2, LCDOUT14),
+	PINMUX_IPSR_GPSR(IP5_3_2, CC50_STATE14),
+	PINMUX_IPSR_GPSR(IP5_5_4, DU0_DG7),
+	PINMUX_IPSR_GPSR(IP5_5_4, LCDOUT15),
+	PINMUX_IPSR_GPSR(IP5_5_4, CC50_STATE15),
+	PINMUX_IPSR_GPSR(IP5_8_6, DU0_DB0),
+	PINMUX_IPSR_GPSR(IP5_8_6, LCDOUT0),
 	PINMUX_IPSR_MSEL(IP5_8_6, SCIFA4_RXD_C, SEL_SCIFA4_2),
 	PINMUX_IPSR_MSEL(IP5_8_6, I2C4_SCL_D, SEL_I2C04_3),
 	PINMUX_IPSR_MSEL(IP7_8_6, CAN0_RX_C, SEL_CAN0_2),
-	PINMUX_IPSR_DATA(IP5_8_6, CC50_STATE16),
-	PINMUX_IPSR_DATA(IP5_11_9, DU0_DB1),
-	PINMUX_IPSR_DATA(IP5_11_9, LCDOUT1),
+	PINMUX_IPSR_GPSR(IP5_8_6, CC50_STATE16),
+	PINMUX_IPSR_GPSR(IP5_11_9, DU0_DB1),
+	PINMUX_IPSR_GPSR(IP5_11_9, LCDOUT1),
 	PINMUX_IPSR_MSEL(IP5_11_9, SCIFA4_TXD_C, SEL_SCIFA4_2),
 	PINMUX_IPSR_MSEL(IP5_11_9, I2C4_SDA_D, SEL_I2C04_3),
 	PINMUX_IPSR_MSEL(IP5_11_9, CAN0_TX_C, SEL_CAN0_2),
-	PINMUX_IPSR_DATA(IP5_11_9, CC50_STATE17),
-	PINMUX_IPSR_DATA(IP5_13_12, DU0_DB2),
-	PINMUX_IPSR_DATA(IP5_13_12, LCDOUT2),
-	PINMUX_IPSR_DATA(IP5_13_12, CC50_STATE18),
-	PINMUX_IPSR_DATA(IP5_15_14, DU0_DB3),
-	PINMUX_IPSR_DATA(IP5_15_14, LCDOUT3),
-	PINMUX_IPSR_DATA(IP5_15_14, CC50_STATE19),
-	PINMUX_IPSR_DATA(IP5_17_16, DU0_DB4),
-	PINMUX_IPSR_DATA(IP5_17_16, LCDOUT4),
-	PINMUX_IPSR_DATA(IP5_17_16, CC50_STATE20),
-	PINMUX_IPSR_DATA(IP5_19_18, DU0_DB5),
-	PINMUX_IPSR_DATA(IP5_19_18, LCDOUT5),
-	PINMUX_IPSR_DATA(IP5_19_18, CC50_STATE21),
-	PINMUX_IPSR_DATA(IP5_21_20, DU0_DB6),
-	PINMUX_IPSR_DATA(IP5_21_20, LCDOUT6),
-	PINMUX_IPSR_DATA(IP5_21_20, CC50_STATE22),
-	PINMUX_IPSR_DATA(IP5_23_22, DU0_DB7),
-	PINMUX_IPSR_DATA(IP5_23_22, LCDOUT7),
-	PINMUX_IPSR_DATA(IP5_23_22, CC50_STATE23),
-	PINMUX_IPSR_DATA(IP5_25_24, DU0_DOTCLKIN),
-	PINMUX_IPSR_DATA(IP5_25_24, QSTVA_QVS),
-	PINMUX_IPSR_DATA(IP5_25_24, CC50_STATE24),
-	PINMUX_IPSR_DATA(IP5_27_26, DU0_DOTCLKOUT0),
-	PINMUX_IPSR_DATA(IP5_27_26, QCLK),
-	PINMUX_IPSR_DATA(IP5_27_26, CC50_STATE25),
-	PINMUX_IPSR_DATA(IP5_29_28, DU0_DOTCLKOUT1),
-	PINMUX_IPSR_DATA(IP5_29_28, QSTVB_QVE),
-	PINMUX_IPSR_DATA(IP5_29_28, CC50_STATE26),
-	PINMUX_IPSR_DATA(IP5_31_30, DU0_EXHSYNC_DU0_HSYNC),
-	PINMUX_IPSR_DATA(IP5_31_30, QSTH_QHS),
-	PINMUX_IPSR_DATA(IP5_31_30, CC50_STATE27),
+	PINMUX_IPSR_GPSR(IP5_11_9, CC50_STATE17),
+	PINMUX_IPSR_GPSR(IP5_13_12, DU0_DB2),
+	PINMUX_IPSR_GPSR(IP5_13_12, LCDOUT2),
+	PINMUX_IPSR_GPSR(IP5_13_12, CC50_STATE18),
+	PINMUX_IPSR_GPSR(IP5_15_14, DU0_DB3),
+	PINMUX_IPSR_GPSR(IP5_15_14, LCDOUT3),
+	PINMUX_IPSR_GPSR(IP5_15_14, CC50_STATE19),
+	PINMUX_IPSR_GPSR(IP5_17_16, DU0_DB4),
+	PINMUX_IPSR_GPSR(IP5_17_16, LCDOUT4),
+	PINMUX_IPSR_GPSR(IP5_17_16, CC50_STATE20),
+	PINMUX_IPSR_GPSR(IP5_19_18, DU0_DB5),
+	PINMUX_IPSR_GPSR(IP5_19_18, LCDOUT5),
+	PINMUX_IPSR_GPSR(IP5_19_18, CC50_STATE21),
+	PINMUX_IPSR_GPSR(IP5_21_20, DU0_DB6),
+	PINMUX_IPSR_GPSR(IP5_21_20, LCDOUT6),
+	PINMUX_IPSR_GPSR(IP5_21_20, CC50_STATE22),
+	PINMUX_IPSR_GPSR(IP5_23_22, DU0_DB7),
+	PINMUX_IPSR_GPSR(IP5_23_22, LCDOUT7),
+	PINMUX_IPSR_GPSR(IP5_23_22, CC50_STATE23),
+	PINMUX_IPSR_GPSR(IP5_25_24, DU0_DOTCLKIN),
+	PINMUX_IPSR_GPSR(IP5_25_24, QSTVA_QVS),
+	PINMUX_IPSR_GPSR(IP5_25_24, CC50_STATE24),
+	PINMUX_IPSR_GPSR(IP5_27_26, DU0_DOTCLKOUT0),
+	PINMUX_IPSR_GPSR(IP5_27_26, QCLK),
+	PINMUX_IPSR_GPSR(IP5_27_26, CC50_STATE25),
+	PINMUX_IPSR_GPSR(IP5_29_28, DU0_DOTCLKOUT1),
+	PINMUX_IPSR_GPSR(IP5_29_28, QSTVB_QVE),
+	PINMUX_IPSR_GPSR(IP5_29_28, CC50_STATE26),
+	PINMUX_IPSR_GPSR(IP5_31_30, DU0_EXHSYNC_DU0_HSYNC),
+	PINMUX_IPSR_GPSR(IP5_31_30, QSTH_QHS),
+	PINMUX_IPSR_GPSR(IP5_31_30, CC50_STATE27),
 
 	/* IPSR6 */
-	PINMUX_IPSR_DATA(IP6_1_0, DU0_EXVSYNC_DU0_VSYNC),
-	PINMUX_IPSR_DATA(IP6_1_0, QSTB_QHE),
-	PINMUX_IPSR_DATA(IP6_1_0, CC50_STATE28),
-	PINMUX_IPSR_DATA(IP6_3_2, DU0_EXODDF_DU0_ODDF_DISP_CDE),
-	PINMUX_IPSR_DATA(IP6_3_2, QCPV_QDE),
-	PINMUX_IPSR_DATA(IP6_3_2, CC50_STATE29),
-	PINMUX_IPSR_DATA(IP6_5_4, DU0_DISP),
-	PINMUX_IPSR_DATA(IP6_5_4, QPOLA),
-	PINMUX_IPSR_DATA(IP6_5_4, CC50_STATE30),
-	PINMUX_IPSR_DATA(IP6_7_6, DU0_CDE),
-	PINMUX_IPSR_DATA(IP6_7_6, QPOLB),
-	PINMUX_IPSR_DATA(IP6_7_6, CC50_STATE31),
-	PINMUX_IPSR_DATA(IP6_8, VI0_CLK),
-	PINMUX_IPSR_DATA(IP6_8, AVB_RX_CLK),
-	PINMUX_IPSR_DATA(IP6_9, VI0_DATA0_VI0_B0),
-	PINMUX_IPSR_DATA(IP6_9, AVB_RX_DV),
-	PINMUX_IPSR_DATA(IP6_10, VI0_DATA1_VI0_B1),
-	PINMUX_IPSR_DATA(IP6_10, AVB_RXD0),
-	PINMUX_IPSR_DATA(IP6_11, VI0_DATA2_VI0_B2),
-	PINMUX_IPSR_DATA(IP6_11, AVB_RXD1),
-	PINMUX_IPSR_DATA(IP6_12, VI0_DATA3_VI0_B3),
-	PINMUX_IPSR_DATA(IP6_12, AVB_RXD2),
-	PINMUX_IPSR_DATA(IP6_13, VI0_DATA4_VI0_B4),
-	PINMUX_IPSR_DATA(IP6_13, AVB_RXD3),
-	PINMUX_IPSR_DATA(IP6_14, VI0_DATA5_VI0_B5),
-	PINMUX_IPSR_DATA(IP6_14, AVB_RXD4),
-	PINMUX_IPSR_DATA(IP6_15, VI0_DATA6_VI0_B6),
-	PINMUX_IPSR_DATA(IP6_15, AVB_RXD5),
-	PINMUX_IPSR_DATA(IP6_16, VI0_DATA7_VI0_B7),
-	PINMUX_IPSR_DATA(IP6_16, AVB_RXD6),
-	PINMUX_IPSR_DATA(IP6_19_17, VI0_CLKENB),
+	PINMUX_IPSR_GPSR(IP6_1_0, DU0_EXVSYNC_DU0_VSYNC),
+	PINMUX_IPSR_GPSR(IP6_1_0, QSTB_QHE),
+	PINMUX_IPSR_GPSR(IP6_1_0, CC50_STATE28),
+	PINMUX_IPSR_GPSR(IP6_3_2, DU0_EXODDF_DU0_ODDF_DISP_CDE),
+	PINMUX_IPSR_GPSR(IP6_3_2, QCPV_QDE),
+	PINMUX_IPSR_GPSR(IP6_3_2, CC50_STATE29),
+	PINMUX_IPSR_GPSR(IP6_5_4, DU0_DISP),
+	PINMUX_IPSR_GPSR(IP6_5_4, QPOLA),
+	PINMUX_IPSR_GPSR(IP6_5_4, CC50_STATE30),
+	PINMUX_IPSR_GPSR(IP6_7_6, DU0_CDE),
+	PINMUX_IPSR_GPSR(IP6_7_6, QPOLB),
+	PINMUX_IPSR_GPSR(IP6_7_6, CC50_STATE31),
+	PINMUX_IPSR_GPSR(IP6_8, VI0_CLK),
+	PINMUX_IPSR_GPSR(IP6_8, AVB_RX_CLK),
+	PINMUX_IPSR_GPSR(IP6_9, VI0_DATA0_VI0_B0),
+	PINMUX_IPSR_GPSR(IP6_9, AVB_RX_DV),
+	PINMUX_IPSR_GPSR(IP6_10, VI0_DATA1_VI0_B1),
+	PINMUX_IPSR_GPSR(IP6_10, AVB_RXD0),
+	PINMUX_IPSR_GPSR(IP6_11, VI0_DATA2_VI0_B2),
+	PINMUX_IPSR_GPSR(IP6_11, AVB_RXD1),
+	PINMUX_IPSR_GPSR(IP6_12, VI0_DATA3_VI0_B3),
+	PINMUX_IPSR_GPSR(IP6_12, AVB_RXD2),
+	PINMUX_IPSR_GPSR(IP6_13, VI0_DATA4_VI0_B4),
+	PINMUX_IPSR_GPSR(IP6_13, AVB_RXD3),
+	PINMUX_IPSR_GPSR(IP6_14, VI0_DATA5_VI0_B5),
+	PINMUX_IPSR_GPSR(IP6_14, AVB_RXD4),
+	PINMUX_IPSR_GPSR(IP6_15, VI0_DATA6_VI0_B6),
+	PINMUX_IPSR_GPSR(IP6_15, AVB_RXD5),
+	PINMUX_IPSR_GPSR(IP6_16, VI0_DATA7_VI0_B7),
+	PINMUX_IPSR_GPSR(IP6_16, AVB_RXD6),
+	PINMUX_IPSR_GPSR(IP6_19_17, VI0_CLKENB),
 	PINMUX_IPSR_MSEL(IP6_19_17, I2C3_SCL, SEL_I2C03_0),
 	PINMUX_IPSR_MSEL(IP6_19_17, SCIFA5_RXD_C, SEL_SCIFA5_2),
 	PINMUX_IPSR_MSEL(IP6_19_17, IETX_C, SEL_IEB_2),
-	PINMUX_IPSR_DATA(IP6_19_17, AVB_RXD7),
-	PINMUX_IPSR_DATA(IP6_22_20, VI0_FIELD),
+	PINMUX_IPSR_GPSR(IP6_19_17, AVB_RXD7),
+	PINMUX_IPSR_GPSR(IP6_22_20, VI0_FIELD),
 	PINMUX_IPSR_MSEL(IP6_22_20, I2C3_SDA, SEL_I2C03_0),
 	PINMUX_IPSR_MSEL(IP6_22_20, SCIFA5_TXD_C, SEL_SCIFA5_2),
 	PINMUX_IPSR_MSEL(IP6_22_20, IECLK_C, SEL_IEB_2),
-	PINMUX_IPSR_DATA(IP6_22_20, AVB_RX_ER),
-	PINMUX_IPSR_DATA(IP6_25_23, VI0_HSYNC_N),
+	PINMUX_IPSR_GPSR(IP6_22_20, AVB_RX_ER),
+	PINMUX_IPSR_GPSR(IP6_25_23, VI0_HSYNC_N),
 	PINMUX_IPSR_MSEL(IP6_25_23, SCIF0_RXD_B, SEL_SCIF0_1),
 	PINMUX_IPSR_MSEL(IP6_25_23, I2C0_SCL_C, SEL_I2C00_2),
 	PINMUX_IPSR_MSEL(IP6_25_23, IERX_C, SEL_IEB_2),
-	PINMUX_IPSR_DATA(IP6_25_23, AVB_COL),
-	PINMUX_IPSR_DATA(IP6_28_26, VI0_VSYNC_N),
+	PINMUX_IPSR_GPSR(IP6_25_23, AVB_COL),
+	PINMUX_IPSR_GPSR(IP6_28_26, VI0_VSYNC_N),
 	PINMUX_IPSR_MSEL(IP6_28_26, SCIF0_TXD_B, SEL_SCIF0_1),
 	PINMUX_IPSR_MSEL(IP6_28_26, I2C0_SDA_C, SEL_I2C00_2),
 	PINMUX_IPSR_MSEL(IP6_28_26, AUDIO_CLKOUT_B, SEL_ADG_1),
-	PINMUX_IPSR_DATA(IP6_28_26, AVB_TX_EN),
+	PINMUX_IPSR_GPSR(IP6_28_26, AVB_TX_EN),
 	PINMUX_IPSR_MSEL(IP6_31_29, ETH_MDIO, SEL_ETH_0),
-	PINMUX_IPSR_DATA(IP6_31_29, VI0_G0),
+	PINMUX_IPSR_GPSR(IP6_31_29, VI0_G0),
 	PINMUX_IPSR_MSEL(IP6_31_29, MSIOF2_RXD_B, SEL_MSI2_1),
 	PINMUX_IPSR_MSEL(IP6_31_29, IIC0_SCL_D, SEL_IIC00_3),
-	PINMUX_IPSR_DATA(IP6_31_29, AVB_TX_CLK),
+	PINMUX_IPSR_GPSR(IP6_31_29, AVB_TX_CLK),
 	PINMUX_IPSR_MSEL(IP6_31_29, ADIDATA, SEL_RAD_0),
 	PINMUX_IPSR_MSEL(IP6_31_29, AD_DI, SEL_ADI_0),
 
 	/* IPSR7 */
 	PINMUX_IPSR_MSEL(IP7_2_0, ETH_CRS_DV, SEL_ETH_0),
-	PINMUX_IPSR_DATA(IP7_2_0, VI0_G1),
+	PINMUX_IPSR_GPSR(IP7_2_0, VI0_G1),
 	PINMUX_IPSR_MSEL(IP7_2_0, MSIOF2_TXD_B, SEL_MSI2_1),
 	PINMUX_IPSR_MSEL(IP7_2_0, IIC0_SDA_D, SEL_IIC00_3),
-	PINMUX_IPSR_DATA(IP7_2_0, AVB_TXD0),
+	PINMUX_IPSR_GPSR(IP7_2_0, AVB_TXD0),
 	PINMUX_IPSR_MSEL(IP7_2_0, ADICS_SAMP, SEL_RAD_0),
 	PINMUX_IPSR_MSEL(IP7_2_0, AD_DO, SEL_ADI_0),
 	PINMUX_IPSR_MSEL(IP7_5_3, ETH_RX_ER, SEL_ETH_0),
-	PINMUX_IPSR_DATA(IP7_5_3, VI0_G2),
+	PINMUX_IPSR_GPSR(IP7_5_3, VI0_G2),
 	PINMUX_IPSR_MSEL(IP7_5_3, MSIOF2_SCK_B, SEL_MSI2_1),
 	PINMUX_IPSR_MSEL(IP7_5_3, CAN0_RX_B, SEL_CAN0_1),
-	PINMUX_IPSR_DATA(IP7_5_3, AVB_TXD1),
+	PINMUX_IPSR_GPSR(IP7_5_3, AVB_TXD1),
 	PINMUX_IPSR_MSEL(IP7_5_3, ADICLK, SEL_RAD_0),
 	PINMUX_IPSR_MSEL(IP7_5_3, AD_CLK, SEL_ADI_0),
 	PINMUX_IPSR_MSEL(IP7_8_6, ETH_RXD0, SEL_ETH_0),
-	PINMUX_IPSR_DATA(IP7_8_6, VI0_G3),
+	PINMUX_IPSR_GPSR(IP7_8_6, VI0_G3),
 	PINMUX_IPSR_MSEL(IP7_8_6, MSIOF2_SYNC_B, SEL_MSI2_1),
 	PINMUX_IPSR_MSEL(IP7_8_6, CAN0_TX_B, SEL_CAN0_1),
-	PINMUX_IPSR_DATA(IP7_8_6, AVB_TXD2),
+	PINMUX_IPSR_GPSR(IP7_8_6, AVB_TXD2),
 	PINMUX_IPSR_MSEL(IP7_8_6, ADICHS0, SEL_RAD_0),
 	PINMUX_IPSR_MSEL(IP7_8_6, AD_NCS_N, SEL_ADI_0),
 	PINMUX_IPSR_MSEL(IP7_11_9, ETH_RXD1, SEL_ETH_0),
-	PINMUX_IPSR_DATA(IP7_11_9, VI0_G4),
+	PINMUX_IPSR_GPSR(IP7_11_9, VI0_G4),
 	PINMUX_IPSR_MSEL(IP7_11_9, MSIOF2_SS1_B, SEL_MSI2_1),
 	PINMUX_IPSR_MSEL(IP7_11_9, SCIF4_RXD_D, SEL_SCIF4_3),
-	PINMUX_IPSR_DATA(IP7_11_9, AVB_TXD3),
+	PINMUX_IPSR_GPSR(IP7_11_9, AVB_TXD3),
 	PINMUX_IPSR_MSEL(IP7_11_9, ADICHS1, SEL_RAD_0),
 	PINMUX_IPSR_MSEL(IP7_14_12, ETH_LINK, SEL_ETH_0),
-	PINMUX_IPSR_DATA(IP7_14_12, VI0_G5),
+	PINMUX_IPSR_GPSR(IP7_14_12, VI0_G5),
 	PINMUX_IPSR_MSEL(IP7_14_12, MSIOF2_SS2_B, SEL_MSI2_1),
 	PINMUX_IPSR_MSEL(IP7_14_12, SCIF4_TXD_D, SEL_SCIF4_3),
-	PINMUX_IPSR_DATA(IP7_14_12, AVB_TXD4),
+	PINMUX_IPSR_GPSR(IP7_14_12, AVB_TXD4),
 	PINMUX_IPSR_MSEL(IP7_14_12, ADICHS2, SEL_RAD_0),
 	PINMUX_IPSR_MSEL(IP7_17_15, ETH_REFCLK, SEL_ETH_0),
-	PINMUX_IPSR_DATA(IP7_17_15, VI0_G6),
+	PINMUX_IPSR_GPSR(IP7_17_15, VI0_G6),
 	PINMUX_IPSR_MSEL(IP7_17_15, SCIF2_SCK_C, SEL_SCIF2_2),
-	PINMUX_IPSR_DATA(IP7_17_15, AVB_TXD5),
+	PINMUX_IPSR_GPSR(IP7_17_15, AVB_TXD5),
 	PINMUX_IPSR_MSEL(IP7_17_15, SSI_SCK5_B, SEL_SSI5_1),
 	PINMUX_IPSR_MSEL(IP7_20_18, ETH_TXD1, SEL_ETH_0),
-	PINMUX_IPSR_DATA(IP7_20_18, VI0_G7),
+	PINMUX_IPSR_GPSR(IP7_20_18, VI0_G7),
 	PINMUX_IPSR_MSEL(IP7_20_18, SCIF2_RXD_C, SEL_SCIF2_2),
 	PINMUX_IPSR_MSEL(IP7_20_18, IIC1_SCL_D, SEL_IIC01_3),
-	PINMUX_IPSR_DATA(IP7_20_18, AVB_TXD6),
+	PINMUX_IPSR_GPSR(IP7_20_18, AVB_TXD6),
 	PINMUX_IPSR_MSEL(IP7_20_18, SSI_WS5_B, SEL_SSI5_1),
 	PINMUX_IPSR_MSEL(IP7_23_21, ETH_TX_EN, SEL_ETH_0),
-	PINMUX_IPSR_DATA(IP7_23_21, VI0_R0),
+	PINMUX_IPSR_GPSR(IP7_23_21, VI0_R0),
 	PINMUX_IPSR_MSEL(IP7_23_21, SCIF2_TXD_C, SEL_SCIF2_2),
 	PINMUX_IPSR_MSEL(IP7_23_21, IIC1_SDA_D, SEL_IIC01_3),
-	PINMUX_IPSR_DATA(IP7_23_21, AVB_TXD7),
+	PINMUX_IPSR_GPSR(IP7_23_21, AVB_TXD7),
 	PINMUX_IPSR_MSEL(IP7_23_21, SSI_SDATA5_B, SEL_SSI5_1),
 	PINMUX_IPSR_MSEL(IP7_26_24, ETH_MAGIC, SEL_ETH_0),
-	PINMUX_IPSR_DATA(IP7_26_24, VI0_R1),
+	PINMUX_IPSR_GPSR(IP7_26_24, VI0_R1),
 	PINMUX_IPSR_MSEL(IP7_26_24, SCIF3_SCK_B, SEL_SCIF3_1),
-	PINMUX_IPSR_DATA(IP7_26_24, AVB_TX_ER),
+	PINMUX_IPSR_GPSR(IP7_26_24, AVB_TX_ER),
 	PINMUX_IPSR_MSEL(IP7_26_24, SSI_SCK6_B, SEL_SSI6_1),
 	PINMUX_IPSR_MSEL(IP7_29_27, ETH_TXD0, SEL_ETH_0),
-	PINMUX_IPSR_DATA(IP7_29_27, VI0_R2),
+	PINMUX_IPSR_GPSR(IP7_29_27, VI0_R2),
 	PINMUX_IPSR_MSEL(IP7_29_27, SCIF3_RXD_B, SEL_SCIF3_1),
 	PINMUX_IPSR_MSEL(IP7_29_27, I2C4_SCL_E, SEL_I2C04_4),
-	PINMUX_IPSR_DATA(IP7_29_27, AVB_GTX_CLK),
+	PINMUX_IPSR_GPSR(IP7_29_27, AVB_GTX_CLK),
 	PINMUX_IPSR_MSEL(IP7_29_27, SSI_WS6_B, SEL_SSI6_1),
-	PINMUX_IPSR_DATA(IP7_31, DREQ0_N),
-	PINMUX_IPSR_DATA(IP7_31, SCIFB1_RXD),
+	PINMUX_IPSR_GPSR(IP7_31, DREQ0_N),
+	PINMUX_IPSR_GPSR(IP7_31, SCIFB1_RXD),
 
 	/* IPSR8 */
 	PINMUX_IPSR_MSEL(IP8_2_0, ETH_MDC, SEL_ETH_0),
-	PINMUX_IPSR_DATA(IP8_2_0, VI0_R3),
+	PINMUX_IPSR_GPSR(IP8_2_0, VI0_R3),
 	PINMUX_IPSR_MSEL(IP8_2_0, SCIF3_TXD_B, SEL_SCIF3_1),
 	PINMUX_IPSR_MSEL(IP8_2_0, I2C4_SDA_E, SEL_I2C04_4),
-	PINMUX_IPSR_DATA(IP8_2_0, AVB_MDC),
+	PINMUX_IPSR_GPSR(IP8_2_0, AVB_MDC),
 	PINMUX_IPSR_MSEL(IP8_2_0, SSI_SDATA6_B, SEL_SSI6_1),
 	PINMUX_IPSR_MSEL(IP8_5_3, HSCIF0_HRX, SEL_HSCIF0_0),
-	PINMUX_IPSR_DATA(IP8_5_3, VI0_R4),
+	PINMUX_IPSR_GPSR(IP8_5_3, VI0_R4),
 	PINMUX_IPSR_MSEL(IP8_5_3, I2C1_SCL_C, SEL_I2C01_2),
 	PINMUX_IPSR_MSEL(IP8_5_3, AUDIO_CLKA_B, SEL_ADG_1),
-	PINMUX_IPSR_DATA(IP8_5_3, AVB_MDIO),
+	PINMUX_IPSR_GPSR(IP8_5_3, AVB_MDIO),
 	PINMUX_IPSR_MSEL(IP8_5_3, SSI_SCK78_B, SEL_SSI7_1),
 	PINMUX_IPSR_MSEL(IP8_8_6, HSCIF0_HTX, SEL_HSCIF0_0),
-	PINMUX_IPSR_DATA(IP8_8_6, VI0_R5),
+	PINMUX_IPSR_GPSR(IP8_8_6, VI0_R5),
 	PINMUX_IPSR_MSEL(IP8_8_6, I2C1_SDA_C, SEL_I2C01_2),
 	PINMUX_IPSR_MSEL(IP8_8_6, AUDIO_CLKB_B, SEL_ADG_1),
-	PINMUX_IPSR_DATA(IP8_5_3, AVB_LINK),
+	PINMUX_IPSR_GPSR(IP8_5_3, AVB_LINK),
 	PINMUX_IPSR_MSEL(IP8_8_6, SSI_WS78_B, SEL_SSI7_1),
-	PINMUX_IPSR_DATA(IP8_11_9, HSCIF0_HCTS_N),
-	PINMUX_IPSR_DATA(IP8_11_9, VI0_R6),
+	PINMUX_IPSR_GPSR(IP8_11_9, HSCIF0_HCTS_N),
+	PINMUX_IPSR_GPSR(IP8_11_9, VI0_R6),
 	PINMUX_IPSR_MSEL(IP8_11_9, SCIF0_RXD_D, SEL_SCIF0_3),
 	PINMUX_IPSR_MSEL(IP8_11_9, I2C0_SCL_E, SEL_I2C00_4),
-	PINMUX_IPSR_DATA(IP8_11_9, AVB_MAGIC),
+	PINMUX_IPSR_GPSR(IP8_11_9, AVB_MAGIC),
 	PINMUX_IPSR_MSEL(IP8_11_9, SSI_SDATA7_B, SEL_SSI7_1),
-	PINMUX_IPSR_DATA(IP8_14_12, HSCIF0_HRTS_N),
-	PINMUX_IPSR_DATA(IP8_14_12, VI0_R7),
+	PINMUX_IPSR_GPSR(IP8_14_12, HSCIF0_HRTS_N),
+	PINMUX_IPSR_GPSR(IP8_14_12, VI0_R7),
 	PINMUX_IPSR_MSEL(IP8_14_12, SCIF0_TXD_D, SEL_SCIF0_3),
 	PINMUX_IPSR_MSEL(IP8_14_12, I2C0_SDA_E, SEL_I2C00_4),
-	PINMUX_IPSR_DATA(IP8_14_12, AVB_PHY_INT),
+	PINMUX_IPSR_GPSR(IP8_14_12, AVB_PHY_INT),
 	PINMUX_IPSR_MSEL(IP8_14_12, SSI_SDATA8_B, SEL_SSI8_1),
 	PINMUX_IPSR_MSEL(IP8_16_15, HSCIF0_HSCK, SEL_HSCIF0_0),
 	PINMUX_IPSR_MSEL(IP8_16_15, SCIF_CLK_B, SEL_SCIF0_1),
-	PINMUX_IPSR_DATA(IP8_16_15, AVB_CRS),
+	PINMUX_IPSR_GPSR(IP8_16_15, AVB_CRS),
 	PINMUX_IPSR_MSEL(IP8_16_15, AUDIO_CLKC_B, SEL_ADG_1),
 	PINMUX_IPSR_MSEL(IP8_19_17, I2C0_SCL, SEL_I2C00_0),
 	PINMUX_IPSR_MSEL(IP8_19_17, SCIF0_RXD_C, SEL_SCIF0_2),
-	PINMUX_IPSR_DATA(IP8_19_17, PWM5),
+	PINMUX_IPSR_GPSR(IP8_19_17, PWM5),
 	PINMUX_IPSR_MSEL(IP8_19_17, TCLK1_B, SEL_TMU_1),
-	PINMUX_IPSR_DATA(IP8_19_17, AVB_GTXREFCLK),
+	PINMUX_IPSR_GPSR(IP8_19_17, AVB_GTXREFCLK),
 	PINMUX_IPSR_MSEL(IP8_19_17, CAN1_RX_D, SEL_CAN1_3),
-	PINMUX_IPSR_DATA(IP8_19_17, TPUTO0_B),
+	PINMUX_IPSR_GPSR(IP8_19_17, TPUTO0_B),
 	PINMUX_IPSR_MSEL(IP8_22_20, I2C0_SDA, SEL_I2C00_0),
 	PINMUX_IPSR_MSEL(IP8_22_20, SCIF0_TXD_C, SEL_SCIF0_2),
-	PINMUX_IPSR_DATA(IP8_22_20, TPUTO0),
+	PINMUX_IPSR_GPSR(IP8_22_20, TPUTO0),
 	PINMUX_IPSR_MSEL(IP8_22_20, CAN_CLK, SEL_CAN_0),
-	PINMUX_IPSR_DATA(IP8_22_20, DVC_MUTE),
+	PINMUX_IPSR_GPSR(IP8_22_20, DVC_MUTE),
 	PINMUX_IPSR_MSEL(IP8_22_20, CAN1_TX_D, SEL_CAN1_3),
 	PINMUX_IPSR_MSEL(IP8_25_23, I2C1_SCL, SEL_I2C01_0),
 	PINMUX_IPSR_MSEL(IP8_25_23, SCIF4_RXD, SEL_SCIF4_0),
-	PINMUX_IPSR_DATA(IP8_25_23, PWM5_B),
-	PINMUX_IPSR_DATA(IP8_25_23, DU1_DR0),
+	PINMUX_IPSR_GPSR(IP8_25_23, PWM5_B),
+	PINMUX_IPSR_GPSR(IP8_25_23, DU1_DR0),
 	PINMUX_IPSR_MSEL(IP8_25_23, RIF1_SYNC_B, SEL_DR2_1),
 	PINMUX_IPSR_MSEL(IP8_25_23, TS_SDATA_D, SEL_TSIF0_3),
-	PINMUX_IPSR_DATA(IP8_25_23, TPUTO1_B),
+	PINMUX_IPSR_GPSR(IP8_25_23, TPUTO1_B),
 	PINMUX_IPSR_MSEL(IP8_28_26, I2C1_SDA, SEL_I2C01_0),
 	PINMUX_IPSR_MSEL(IP8_28_26, SCIF4_TXD, SEL_SCIF4_0),
-	PINMUX_IPSR_DATA(IP8_28_26, IRQ5),
-	PINMUX_IPSR_DATA(IP8_28_26, DU1_DR1),
+	PINMUX_IPSR_GPSR(IP8_28_26, IRQ5),
+	PINMUX_IPSR_GPSR(IP8_28_26, DU1_DR1),
 	PINMUX_IPSR_MSEL(IP8_28_26, RIF1_CLK_B, SEL_DR2_1),
 	PINMUX_IPSR_MSEL(IP8_28_26, TS_SCK_D, SEL_TSIF0_3),
 	PINMUX_IPSR_MSEL(IP8_28_26, BPFCLK_C, SEL_DARC_2),
-	PINMUX_IPSR_DATA(IP8_31_29, MSIOF0_RXD),
+	PINMUX_IPSR_GPSR(IP8_31_29, MSIOF0_RXD),
 	PINMUX_IPSR_MSEL(IP8_31_29, SCIF5_RXD, SEL_SCIF5_0),
 	PINMUX_IPSR_MSEL(IP8_31_29, I2C2_SCL_C, SEL_I2C02_2),
-	PINMUX_IPSR_DATA(IP8_31_29, DU1_DR2),
+	PINMUX_IPSR_GPSR(IP8_31_29, DU1_DR2),
 	PINMUX_IPSR_MSEL(IP8_31_29, RIF1_D0_B, SEL_DR2_1),
 	PINMUX_IPSR_MSEL(IP8_31_29, TS_SDEN_D, SEL_TSIF0_3),
 	PINMUX_IPSR_MSEL(IP8_31_29, FMCLK_C, SEL_DARC_2),
 	PINMUX_IPSR_MSEL(IP8_31_29, RDS_CLK, SEL_RDS_0),
 
 	/* IPSR9 */
-	PINMUX_IPSR_DATA(IP9_2_0, MSIOF0_TXD),
+	PINMUX_IPSR_GPSR(IP9_2_0, MSIOF0_TXD),
 	PINMUX_IPSR_MSEL(IP9_2_0, SCIF5_TXD, SEL_SCIF5_0),
 	PINMUX_IPSR_MSEL(IP9_2_0, I2C2_SDA_C, SEL_I2C02_2),
-	PINMUX_IPSR_DATA(IP9_2_0, DU1_DR3),
+	PINMUX_IPSR_GPSR(IP9_2_0, DU1_DR3),
 	PINMUX_IPSR_MSEL(IP9_2_0, RIF1_D1_B, SEL_DR3_1),
 	PINMUX_IPSR_MSEL(IP9_2_0, TS_SPSYNC_D, SEL_TSIF0_3),
 	PINMUX_IPSR_MSEL(IP9_2_0, FMIN_C, SEL_DARC_2),
 	PINMUX_IPSR_MSEL(IP9_2_0, RDS_DATA, SEL_RDS_0),
-	PINMUX_IPSR_DATA(IP9_5_3, MSIOF0_SCK),
-	PINMUX_IPSR_DATA(IP9_5_3, IRQ0),
+	PINMUX_IPSR_GPSR(IP9_5_3, MSIOF0_SCK),
+	PINMUX_IPSR_GPSR(IP9_5_3, IRQ0),
 	PINMUX_IPSR_MSEL(IP9_5_3, TS_SDATA, SEL_TSIF0_0),
-	PINMUX_IPSR_DATA(IP9_5_3, DU1_DR4),
+	PINMUX_IPSR_GPSR(IP9_5_3, DU1_DR4),
 	PINMUX_IPSR_MSEL(IP9_5_3, RIF1_SYNC, SEL_DR2_0),
-	PINMUX_IPSR_DATA(IP9_5_3, TPUTO1_C),
-	PINMUX_IPSR_DATA(IP9_8_6, MSIOF0_SYNC),
-	PINMUX_IPSR_DATA(IP9_8_6, PWM1),
+	PINMUX_IPSR_GPSR(IP9_5_3, TPUTO1_C),
+	PINMUX_IPSR_GPSR(IP9_8_6, MSIOF0_SYNC),
+	PINMUX_IPSR_GPSR(IP9_8_6, PWM1),
 	PINMUX_IPSR_MSEL(IP9_8_6, TS_SCK, SEL_TSIF0_0),
-	PINMUX_IPSR_DATA(IP9_8_6, DU1_DR5),
+	PINMUX_IPSR_GPSR(IP9_8_6, DU1_DR5),
 	PINMUX_IPSR_MSEL(IP9_8_6, RIF1_CLK, SEL_DR2_0),
 	PINMUX_IPSR_MSEL(IP9_8_6, BPFCLK_B, SEL_DARC_1),
-	PINMUX_IPSR_DATA(IP9_11_9, MSIOF0_SS1),
+	PINMUX_IPSR_GPSR(IP9_11_9, MSIOF0_SS1),
 	PINMUX_IPSR_MSEL(IP9_11_9, SCIFA0_RXD, SEL_SCIFA0_0),
 	PINMUX_IPSR_MSEL(IP9_11_9, TS_SDEN, SEL_TSIF0_0),
-	PINMUX_IPSR_DATA(IP9_11_9, DU1_DR6),
+	PINMUX_IPSR_GPSR(IP9_11_9, DU1_DR6),
 	PINMUX_IPSR_MSEL(IP9_11_9, RIF1_D0, SEL_DR2_0),
 	PINMUX_IPSR_MSEL(IP9_11_9, FMCLK_B, SEL_DARC_1),
 	PINMUX_IPSR_MSEL(IP9_11_9, RDS_CLK_B, SEL_RDS_1),
-	PINMUX_IPSR_DATA(IP9_14_12, MSIOF0_SS2),
+	PINMUX_IPSR_GPSR(IP9_14_12, MSIOF0_SS2),
 	PINMUX_IPSR_MSEL(IP9_14_12, SCIFA0_TXD, SEL_SCIFA0_0),
 	PINMUX_IPSR_MSEL(IP9_14_12, TS_SPSYNC, SEL_TSIF0_0),
-	PINMUX_IPSR_DATA(IP9_14_12, DU1_DR7),
+	PINMUX_IPSR_GPSR(IP9_14_12, DU1_DR7),
 	PINMUX_IPSR_MSEL(IP9_14_12, RIF1_D1, SEL_DR3_0),
 	PINMUX_IPSR_MSEL(IP9_14_12, FMIN_B, SEL_DARC_1),
 	PINMUX_IPSR_MSEL(IP9_14_12, RDS_DATA_B, SEL_RDS_1),
 	PINMUX_IPSR_MSEL(IP9_16_15, HSCIF1_HRX, SEL_HSCIF1_0),
 	PINMUX_IPSR_MSEL(IP9_16_15, I2C4_SCL, SEL_I2C04_0),
-	PINMUX_IPSR_DATA(IP9_16_15, PWM6),
-	PINMUX_IPSR_DATA(IP9_16_15, DU1_DG0),
+	PINMUX_IPSR_GPSR(IP9_16_15, PWM6),
+	PINMUX_IPSR_GPSR(IP9_16_15, DU1_DG0),
 	PINMUX_IPSR_MSEL(IP9_18_17, HSCIF1_HTX, SEL_HSCIF1_0),
 	PINMUX_IPSR_MSEL(IP9_18_17, I2C4_SDA, SEL_I2C04_0),
-	PINMUX_IPSR_DATA(IP9_18_17, TPUTO1),
-	PINMUX_IPSR_DATA(IP9_18_17, DU1_DG1),
-	PINMUX_IPSR_DATA(IP9_21_19, HSCIF1_HSCK),
-	PINMUX_IPSR_DATA(IP9_21_19, PWM2),
+	PINMUX_IPSR_GPSR(IP9_18_17, TPUTO1),
+	PINMUX_IPSR_GPSR(IP9_18_17, DU1_DG1),
+	PINMUX_IPSR_GPSR(IP9_21_19, HSCIF1_HSCK),
+	PINMUX_IPSR_GPSR(IP9_21_19, PWM2),
 	PINMUX_IPSR_MSEL(IP9_21_19, IETX, SEL_IEB_0),
-	PINMUX_IPSR_DATA(IP9_21_19, DU1_DG2),
+	PINMUX_IPSR_GPSR(IP9_21_19, DU1_DG2),
 	PINMUX_IPSR_MSEL(IP9_21_19, REMOCON_B, SEL_RCN_1),
 	PINMUX_IPSR_MSEL(IP9_21_19, SPEEDIN_B, SEL_RSP_1),
 	PINMUX_IPSR_MSEL(IP9_21_19, VSP_B, SEL_SPDM_1),
 	PINMUX_IPSR_MSEL(IP9_24_22, HSCIF1_HCTS_N, SEL_HSCIF1_0),
 	PINMUX_IPSR_MSEL(IP9_24_22, SCIFA4_RXD, SEL_SCIFA4_0),
 	PINMUX_IPSR_MSEL(IP9_24_22, IECLK, SEL_IEB_0),
-	PINMUX_IPSR_DATA(IP9_24_22, DU1_DG3),
+	PINMUX_IPSR_GPSR(IP9_24_22, DU1_DG3),
 	PINMUX_IPSR_MSEL(IP9_24_22, SSI_SCK1_B, SEL_SSI1_1),
-	PINMUX_IPSR_DATA(IP9_24_22, CAN_DEBUG_HW_TRIGGER),
-	PINMUX_IPSR_DATA(IP9_24_22, CC50_STATE32),
+	PINMUX_IPSR_GPSR(IP9_24_22, CAN_DEBUG_HW_TRIGGER),
+	PINMUX_IPSR_GPSR(IP9_24_22, CC50_STATE32),
 	PINMUX_IPSR_MSEL(IP9_27_25, HSCIF1_HRTS_N, SEL_HSCIF1_0),
 	PINMUX_IPSR_MSEL(IP9_27_25, SCIFA4_TXD, SEL_SCIFA4_0),
 	PINMUX_IPSR_MSEL(IP9_27_25, IERX, SEL_IEB_0),
-	PINMUX_IPSR_DATA(IP9_27_25, DU1_DG4),
+	PINMUX_IPSR_GPSR(IP9_27_25, DU1_DG4),
 	PINMUX_IPSR_MSEL(IP9_27_25, SSI_WS1_B, SEL_SSI1_1),
-	PINMUX_IPSR_DATA(IP9_27_25, CAN_STEP0),
-	PINMUX_IPSR_DATA(IP9_27_25, CC50_STATE33),
+	PINMUX_IPSR_GPSR(IP9_27_25, CAN_STEP0),
+	PINMUX_IPSR_GPSR(IP9_27_25, CC50_STATE33),
 	PINMUX_IPSR_MSEL(IP9_30_28, SCIF1_SCK, SEL_SCIF1_0),
-	PINMUX_IPSR_DATA(IP9_30_28, PWM3),
+	PINMUX_IPSR_GPSR(IP9_30_28, PWM3),
 	PINMUX_IPSR_MSEL(IP9_30_28, TCLK2, SEL_TMU_0),
-	PINMUX_IPSR_DATA(IP9_30_28, DU1_DG5),
+	PINMUX_IPSR_GPSR(IP9_30_28, DU1_DG5),
 	PINMUX_IPSR_MSEL(IP9_30_28, SSI_SDATA1_B, SEL_SSI1_1),
-	PINMUX_IPSR_DATA(IP9_30_28, CAN_TXCLK),
-	PINMUX_IPSR_DATA(IP9_30_28, CC50_STATE34),
+	PINMUX_IPSR_GPSR(IP9_30_28, CAN_TXCLK),
+	PINMUX_IPSR_GPSR(IP9_30_28, CC50_STATE34),
 
 	/* IPSR10 */
 	PINMUX_IPSR_MSEL(IP10_2_0, SCIF1_RXD, SEL_SCIF1_0),
 	PINMUX_IPSR_MSEL(IP10_2_0, IIC0_SCL, SEL_IIC00_0),
-	PINMUX_IPSR_DATA(IP10_2_0, DU1_DG6),
+	PINMUX_IPSR_GPSR(IP10_2_0, DU1_DG6),
 	PINMUX_IPSR_MSEL(IP10_2_0, SSI_SCK2_B, SEL_SSI2_1),
-	PINMUX_IPSR_DATA(IP10_2_0, CAN_DEBUGOUT0),
-	PINMUX_IPSR_DATA(IP10_2_0, CC50_STATE35),
+	PINMUX_IPSR_GPSR(IP10_2_0, CAN_DEBUGOUT0),
+	PINMUX_IPSR_GPSR(IP10_2_0, CC50_STATE35),
 	PINMUX_IPSR_MSEL(IP10_5_3, SCIF1_TXD, SEL_SCIF1_0),
 	PINMUX_IPSR_MSEL(IP10_5_3, IIC0_SDA, SEL_IIC00_0),
-	PINMUX_IPSR_DATA(IP10_5_3, DU1_DG7),
+	PINMUX_IPSR_GPSR(IP10_5_3, DU1_DG7),
 	PINMUX_IPSR_MSEL(IP10_5_3, SSI_WS2_B, SEL_SSI2_1),
-	PINMUX_IPSR_DATA(IP10_5_3, CAN_DEBUGOUT1),
-	PINMUX_IPSR_DATA(IP10_5_3, CC50_STATE36),
+	PINMUX_IPSR_GPSR(IP10_5_3, CAN_DEBUGOUT1),
+	PINMUX_IPSR_GPSR(IP10_5_3, CC50_STATE36),
 	PINMUX_IPSR_MSEL(IP10_8_6, SCIF2_RXD, SEL_SCIF2_0),
 	PINMUX_IPSR_MSEL(IP10_8_6, IIC1_SCL, SEL_IIC01_0),
-	PINMUX_IPSR_DATA(IP10_8_6, DU1_DB0),
+	PINMUX_IPSR_GPSR(IP10_8_6, DU1_DB0),
 	PINMUX_IPSR_MSEL(IP10_8_6, SSI_SDATA2_B, SEL_SSI2_1),
-	PINMUX_IPSR_DATA(IP10_8_6, USB0_EXTLP),
-	PINMUX_IPSR_DATA(IP10_8_6, CAN_DEBUGOUT2),
-	PINMUX_IPSR_DATA(IP10_8_6, CC50_STATE37),
+	PINMUX_IPSR_GPSR(IP10_8_6, USB0_EXTLP),
+	PINMUX_IPSR_GPSR(IP10_8_6, CAN_DEBUGOUT2),
+	PINMUX_IPSR_GPSR(IP10_8_6, CC50_STATE37),
 	PINMUX_IPSR_MSEL(IP10_11_9, SCIF2_TXD, SEL_SCIF2_0),
 	PINMUX_IPSR_MSEL(IP10_11_9, IIC1_SDA, SEL_IIC01_0),
-	PINMUX_IPSR_DATA(IP10_11_9, DU1_DB1),
+	PINMUX_IPSR_GPSR(IP10_11_9, DU1_DB1),
 	PINMUX_IPSR_MSEL(IP10_11_9, SSI_SCK9_B, SEL_SSI9_1),
-	PINMUX_IPSR_DATA(IP10_11_9, USB0_OVC1),
-	PINMUX_IPSR_DATA(IP10_11_9, CAN_DEBUGOUT3),
-	PINMUX_IPSR_DATA(IP10_11_9, CC50_STATE38),
+	PINMUX_IPSR_GPSR(IP10_11_9, USB0_OVC1),
+	PINMUX_IPSR_GPSR(IP10_11_9, CAN_DEBUGOUT3),
+	PINMUX_IPSR_GPSR(IP10_11_9, CC50_STATE38),
 	PINMUX_IPSR_MSEL(IP10_14_12, SCIF2_SCK, SEL_SCIF2_0),
-	PINMUX_IPSR_DATA(IP10_14_12, IRQ1),
-	PINMUX_IPSR_DATA(IP10_14_12, DU1_DB2),
+	PINMUX_IPSR_GPSR(IP10_14_12, IRQ1),
+	PINMUX_IPSR_GPSR(IP10_14_12, DU1_DB2),
 	PINMUX_IPSR_MSEL(IP10_14_12, SSI_WS9_B, SEL_SSI9_1),
-	PINMUX_IPSR_DATA(IP10_14_12, USB0_IDIN),
-	PINMUX_IPSR_DATA(IP10_14_12, CAN_DEBUGOUT4),
-	PINMUX_IPSR_DATA(IP10_14_12, CC50_STATE39),
+	PINMUX_IPSR_GPSR(IP10_14_12, USB0_IDIN),
+	PINMUX_IPSR_GPSR(IP10_14_12, CAN_DEBUGOUT4),
+	PINMUX_IPSR_GPSR(IP10_14_12, CC50_STATE39),
 	PINMUX_IPSR_MSEL(IP10_17_15, SCIF3_SCK, SEL_SCIF3_0),
-	PINMUX_IPSR_DATA(IP10_17_15, IRQ2),
+	PINMUX_IPSR_GPSR(IP10_17_15, IRQ2),
 	PINMUX_IPSR_MSEL(IP10_17_15, BPFCLK_D, SEL_DARC_3),
-	PINMUX_IPSR_DATA(IP10_17_15, DU1_DB3),
+	PINMUX_IPSR_GPSR(IP10_17_15, DU1_DB3),
 	PINMUX_IPSR_MSEL(IP10_17_15, SSI_SDATA9_B, SEL_SSI9_1),
-	PINMUX_IPSR_DATA(IP10_17_15, TANS2),
-	PINMUX_IPSR_DATA(IP10_17_15, CAN_DEBUGOUT5),
-	PINMUX_IPSR_DATA(IP10_17_15, CC50_OSCOUT),
+	PINMUX_IPSR_GPSR(IP10_17_15, TANS2),
+	PINMUX_IPSR_GPSR(IP10_17_15, CAN_DEBUGOUT5),
+	PINMUX_IPSR_GPSR(IP10_17_15, CC50_OSCOUT),
 	PINMUX_IPSR_MSEL(IP10_20_18, SCIF3_RXD, SEL_SCIF3_0),
 	PINMUX_IPSR_MSEL(IP10_20_18, I2C1_SCL_E, SEL_I2C01_4),
 	PINMUX_IPSR_MSEL(IP10_20_18, FMCLK_D, SEL_DARC_3),
-	PINMUX_IPSR_DATA(IP10_20_18, DU1_DB4),
+	PINMUX_IPSR_GPSR(IP10_20_18, DU1_DB4),
 	PINMUX_IPSR_MSEL(IP10_20_18, AUDIO_CLKA_C, SEL_ADG_2),
 	PINMUX_IPSR_MSEL(IP10_20_18, SSI_SCK4_B, SEL_SSI4_1),
-	PINMUX_IPSR_DATA(IP10_20_18, CAN_DEBUGOUT6),
+	PINMUX_IPSR_GPSR(IP10_20_18, CAN_DEBUGOUT6),
 	PINMUX_IPSR_MSEL(IP10_20_18, RDS_CLK_C, SEL_RDS_2),
 	PINMUX_IPSR_MSEL(IP10_23_21, SCIF3_TXD, SEL_SCIF3_0),
 	PINMUX_IPSR_MSEL(IP10_23_21, I2C1_SDA_E, SEL_I2C01_4),
 	PINMUX_IPSR_MSEL(IP10_23_21, FMIN_D, SEL_DARC_3),
-	PINMUX_IPSR_DATA(IP10_23_21, DU1_DB5),
+	PINMUX_IPSR_GPSR(IP10_23_21, DU1_DB5),
 	PINMUX_IPSR_MSEL(IP10_23_21, AUDIO_CLKB_C, SEL_ADG_2),
 	PINMUX_IPSR_MSEL(IP10_23_21, SSI_WS4_B, SEL_SSI4_1),
-	PINMUX_IPSR_DATA(IP10_23_21, CAN_DEBUGOUT7),
+	PINMUX_IPSR_GPSR(IP10_23_21, CAN_DEBUGOUT7),
 	PINMUX_IPSR_MSEL(IP10_23_21, RDS_DATA_C, SEL_RDS_2),
 	PINMUX_IPSR_MSEL(IP10_26_24, I2C2_SCL, SEL_I2C02_0),
 	PINMUX_IPSR_MSEL(IP10_26_24, SCIFA5_RXD, SEL_SCIFA5_0),
-	PINMUX_IPSR_DATA(IP10_26_24, DU1_DB6),
+	PINMUX_IPSR_GPSR(IP10_26_24, DU1_DB6),
 	PINMUX_IPSR_MSEL(IP10_26_24, AUDIO_CLKC_C, SEL_ADG_2),
 	PINMUX_IPSR_MSEL(IP10_26_24, SSI_SDATA4_B, SEL_SSI4_1),
-	PINMUX_IPSR_DATA(IP10_26_24, CAN_DEBUGOUT8),
+	PINMUX_IPSR_GPSR(IP10_26_24, CAN_DEBUGOUT8),
 	PINMUX_IPSR_MSEL(IP10_29_27, I2C2_SDA, SEL_I2C02_0),
 	PINMUX_IPSR_MSEL(IP10_29_27, SCIFA5_TXD, SEL_SCIFA5_0),
-	PINMUX_IPSR_DATA(IP10_29_27, DU1_DB7),
+	PINMUX_IPSR_GPSR(IP10_29_27, DU1_DB7),
 	PINMUX_IPSR_MSEL(IP10_29_27, AUDIO_CLKOUT_C, SEL_ADG_2),
-	PINMUX_IPSR_DATA(IP10_29_27, CAN_DEBUGOUT9),
+	PINMUX_IPSR_GPSR(IP10_29_27, CAN_DEBUGOUT9),
 	PINMUX_IPSR_MSEL(IP10_31_30, SSI_SCK5, SEL_SSI5_0),
 	PINMUX_IPSR_MSEL(IP10_31_30, SCIFA3_SCK, SEL_SCIFA3_0),
-	PINMUX_IPSR_DATA(IP10_31_30, DU1_DOTCLKIN),
-	PINMUX_IPSR_DATA(IP10_31_30, CAN_DEBUGOUT10),
+	PINMUX_IPSR_GPSR(IP10_31_30, DU1_DOTCLKIN),
+	PINMUX_IPSR_GPSR(IP10_31_30, CAN_DEBUGOUT10),
 
 	/* IPSR11 */
 	PINMUX_IPSR_MSEL(IP11_2_0, SSI_WS5, SEL_SSI5_0),
 	PINMUX_IPSR_MSEL(IP11_2_0, SCIFA3_RXD, SEL_SCIFA3_0),
 	PINMUX_IPSR_MSEL(IP11_2_0, I2C3_SCL_C, SEL_I2C03_2),
-	PINMUX_IPSR_DATA(IP11_2_0, DU1_DOTCLKOUT0),
-	PINMUX_IPSR_DATA(IP11_2_0, CAN_DEBUGOUT11),
+	PINMUX_IPSR_GPSR(IP11_2_0, DU1_DOTCLKOUT0),
+	PINMUX_IPSR_GPSR(IP11_2_0, CAN_DEBUGOUT11),
 	PINMUX_IPSR_MSEL(IP11_5_3, SSI_SDATA5, SEL_SSI5_0),
 	PINMUX_IPSR_MSEL(IP11_5_3, SCIFA3_TXD, SEL_SCIFA3_0),
 	PINMUX_IPSR_MSEL(IP11_5_3, I2C3_SDA_C, SEL_I2C03_2),
-	PINMUX_IPSR_DATA(IP11_5_3, DU1_DOTCLKOUT1),
-	PINMUX_IPSR_DATA(IP11_5_3, CAN_DEBUGOUT12),
+	PINMUX_IPSR_GPSR(IP11_5_3, DU1_DOTCLKOUT1),
+	PINMUX_IPSR_GPSR(IP11_5_3, CAN_DEBUGOUT12),
 	PINMUX_IPSR_MSEL(IP11_7_6, SSI_SCK6, SEL_SSI6_0),
 	PINMUX_IPSR_MSEL(IP11_7_6, SCIFA1_SCK_B, SEL_SCIFA1_1),
-	PINMUX_IPSR_DATA(IP11_7_6, DU1_EXHSYNC_DU1_HSYNC),
-	PINMUX_IPSR_DATA(IP11_7_6, CAN_DEBUGOUT13),
+	PINMUX_IPSR_GPSR(IP11_7_6, DU1_EXHSYNC_DU1_HSYNC),
+	PINMUX_IPSR_GPSR(IP11_7_6, CAN_DEBUGOUT13),
 	PINMUX_IPSR_MSEL(IP11_10_8, SSI_WS6, SEL_SSI6_0),
 	PINMUX_IPSR_MSEL(IP11_10_8, SCIFA1_RXD_B, SEL_SCIFA1_1),
 	PINMUX_IPSR_MSEL(IP11_10_8, I2C4_SCL_C, SEL_I2C04_2),
-	PINMUX_IPSR_DATA(IP11_10_8, DU1_EXVSYNC_DU1_VSYNC),
-	PINMUX_IPSR_DATA(IP11_10_8, CAN_DEBUGOUT14),
+	PINMUX_IPSR_GPSR(IP11_10_8, DU1_EXVSYNC_DU1_VSYNC),
+	PINMUX_IPSR_GPSR(IP11_10_8, CAN_DEBUGOUT14),
 	PINMUX_IPSR_MSEL(IP11_13_11, SSI_SDATA6, SEL_SSI6_0),
 	PINMUX_IPSR_MSEL(IP11_13_11, SCIFA1_TXD_B, SEL_SCIFA1_1),
 	PINMUX_IPSR_MSEL(IP11_13_11, I2C4_SDA_C, SEL_I2C04_2),
-	PINMUX_IPSR_DATA(IP11_13_11, DU1_EXODDF_DU1_ODDF_DISP_CDE),
-	PINMUX_IPSR_DATA(IP11_13_11, CAN_DEBUGOUT15),
+	PINMUX_IPSR_GPSR(IP11_13_11, DU1_EXODDF_DU1_ODDF_DISP_CDE),
+	PINMUX_IPSR_GPSR(IP11_13_11, CAN_DEBUGOUT15),
 	PINMUX_IPSR_MSEL(IP11_15_14, SSI_SCK78, SEL_SSI7_0),
 	PINMUX_IPSR_MSEL(IP11_15_14, SCIFA2_SCK_B, SEL_SCIFA2_1),
 	PINMUX_IPSR_MSEL(IP11_15_14, IIC0_SDA_C, SEL_IIC00_2),
-	PINMUX_IPSR_DATA(IP11_15_14, DU1_DISP),
+	PINMUX_IPSR_GPSR(IP11_15_14, DU1_DISP),
 	PINMUX_IPSR_MSEL(IP11_17_16, SSI_WS78, SEL_SSI7_0),
 	PINMUX_IPSR_MSEL(IP11_17_16, SCIFA2_RXD_B, SEL_SCIFA2_1),
 	PINMUX_IPSR_MSEL(IP11_17_16, IIC0_SCL_C, SEL_IIC00_2),
-	PINMUX_IPSR_DATA(IP11_17_16, DU1_CDE),
+	PINMUX_IPSR_GPSR(IP11_17_16, DU1_CDE),
 	PINMUX_IPSR_MSEL(IP11_20_18, SSI_SDATA7, SEL_SSI7_0),
 	PINMUX_IPSR_MSEL(IP11_20_18, SCIFA2_TXD_B, SEL_SCIFA2_1),
-	PINMUX_IPSR_DATA(IP11_20_18, IRQ8),
+	PINMUX_IPSR_GPSR(IP11_20_18, IRQ8),
 	PINMUX_IPSR_MSEL(IP11_20_18, AUDIO_CLKA_D, SEL_ADG_3),
 	PINMUX_IPSR_MSEL(IP11_20_18, CAN_CLK_D, SEL_CAN_3),
-	PINMUX_IPSR_DATA(IP11_20_18, PCMOE_N),
-	PINMUX_IPSR_DATA(IP11_23_21, SSI_SCK0129),
+	PINMUX_IPSR_GPSR(IP11_20_18, PCMOE_N),
+	PINMUX_IPSR_GPSR(IP11_23_21, SSI_SCK0129),
 	PINMUX_IPSR_MSEL(IP11_23_21, MSIOF1_RXD_B, SEL_MSI1_1),
 	PINMUX_IPSR_MSEL(IP11_23_21, SCIF5_RXD_D, SEL_SCIF5_3),
 	PINMUX_IPSR_MSEL(IP11_23_21, ADIDATA_B, SEL_RAD_1),
 	PINMUX_IPSR_MSEL(IP11_23_21, AD_DI_B, SEL_ADI_1),
-	PINMUX_IPSR_DATA(IP11_23_21, PCMWE_N),
-	PINMUX_IPSR_DATA(IP11_26_24, SSI_WS0129),
+	PINMUX_IPSR_GPSR(IP11_23_21, PCMWE_N),
+	PINMUX_IPSR_GPSR(IP11_26_24, SSI_WS0129),
 	PINMUX_IPSR_MSEL(IP11_26_24, MSIOF1_TXD_B, SEL_MSI1_1),
 	PINMUX_IPSR_MSEL(IP11_26_24, SCIF5_TXD_D, SEL_SCIF5_3),
 	PINMUX_IPSR_MSEL(IP11_26_24, ADICS_SAMP_B, SEL_RAD_1),
 	PINMUX_IPSR_MSEL(IP11_26_24, AD_DO_B, SEL_ADI_1),
-	PINMUX_IPSR_DATA(IP11_29_27, SSI_SDATA0),
+	PINMUX_IPSR_GPSR(IP11_29_27, SSI_SDATA0),
 	PINMUX_IPSR_MSEL(IP11_29_27, MSIOF1_SCK_B, SEL_MSI1_1),
-	PINMUX_IPSR_DATA(IP11_29_27, PWM0_B),
+	PINMUX_IPSR_GPSR(IP11_29_27, PWM0_B),
 	PINMUX_IPSR_MSEL(IP11_29_27, ADICLK_B, SEL_RAD_1),
 	PINMUX_IPSR_MSEL(IP11_29_27, AD_CLK_B, SEL_ADI_1),
 
 	/* IPSR12 */
-	PINMUX_IPSR_DATA(IP12_2_0, SSI_SCK34),
+	PINMUX_IPSR_GPSR(IP12_2_0, SSI_SCK34),
 	PINMUX_IPSR_MSEL(IP12_2_0, MSIOF1_SYNC_B, SEL_MSI1_1),
 	PINMUX_IPSR_MSEL(IP12_2_0, SCIFA1_SCK_C, SEL_SCIFA1_2),
 	PINMUX_IPSR_MSEL(IP12_2_0, ADICHS0_B, SEL_RAD_1),
 	PINMUX_IPSR_MSEL(IP12_2_0, AD_NCS_N_B, SEL_ADI_1),
 	PINMUX_IPSR_MSEL(IP12_2_0, DREQ1_N_B, SEL_LBS_1),
-	PINMUX_IPSR_DATA(IP12_5_3, SSI_WS34),
+	PINMUX_IPSR_GPSR(IP12_5_3, SSI_WS34),
 	PINMUX_IPSR_MSEL(IP12_5_3, MSIOF1_SS1_B, SEL_MSI1_1),
 	PINMUX_IPSR_MSEL(IP12_5_3, SCIFA1_RXD_C, SEL_SCIFA1_2),
 	PINMUX_IPSR_MSEL(IP12_5_3, ADICHS1_B, SEL_RAD_1),
 	PINMUX_IPSR_MSEL(IP12_5_3, CAN1_RX_C, SEL_CAN1_2),
 	PINMUX_IPSR_MSEL(IP12_5_3, DACK1_B, SEL_LBS_1),
-	PINMUX_IPSR_DATA(IP12_8_6, SSI_SDATA3),
+	PINMUX_IPSR_GPSR(IP12_8_6, SSI_SDATA3),
 	PINMUX_IPSR_MSEL(IP12_8_6, MSIOF1_SS2_B, SEL_MSI1_1),
 	PINMUX_IPSR_MSEL(IP12_8_6, SCIFA1_TXD_C, SEL_SCIFA1_2),
 	PINMUX_IPSR_MSEL(IP12_8_6, ADICHS2_B, SEL_RAD_1),
 	PINMUX_IPSR_MSEL(IP12_8_6, CAN1_TX_C, SEL_CAN1_2),
-	PINMUX_IPSR_DATA(IP12_8_6, DREQ2_N),
+	PINMUX_IPSR_GPSR(IP12_8_6, DREQ2_N),
 	PINMUX_IPSR_MSEL(IP12_10_9, SSI_SCK4, SEL_SSI4_0),
-	PINMUX_IPSR_DATA(IP12_10_9, MLB_CLK),
+	PINMUX_IPSR_GPSR(IP12_10_9, MLB_CLK),
 	PINMUX_IPSR_MSEL(IP12_10_9, IETX_B, SEL_IEB_1),
-	PINMUX_IPSR_DATA(IP12_10_9, IRD_TX),
+	PINMUX_IPSR_GPSR(IP12_10_9, IRD_TX),
 	PINMUX_IPSR_MSEL(IP12_12_11, SSI_WS4, SEL_SSI4_0),
-	PINMUX_IPSR_DATA(IP12_12_11, MLB_SIG),
+	PINMUX_IPSR_GPSR(IP12_12_11, MLB_SIG),
 	PINMUX_IPSR_MSEL(IP12_12_11, IECLK_B, SEL_IEB_1),
-	PINMUX_IPSR_DATA(IP12_12_11, IRD_RX),
+	PINMUX_IPSR_GPSR(IP12_12_11, IRD_RX),
 	PINMUX_IPSR_MSEL(IP12_14_13, SSI_SDATA4, SEL_SSI4_0),
-	PINMUX_IPSR_DATA(IP12_14_13, MLB_DAT),
+	PINMUX_IPSR_GPSR(IP12_14_13, MLB_DAT),
 	PINMUX_IPSR_MSEL(IP12_14_13, IERX_B, SEL_IEB_1),
-	PINMUX_IPSR_DATA(IP12_14_13, IRD_SCK),
+	PINMUX_IPSR_GPSR(IP12_14_13, IRD_SCK),
 	PINMUX_IPSR_MSEL(IP12_17_15, SSI_SDATA8, SEL_SSI8_0),
 	PINMUX_IPSR_MSEL(IP12_17_15, SCIF1_SCK_B, SEL_SCIF1_1),
-	PINMUX_IPSR_DATA(IP12_17_15, PWM1_B),
-	PINMUX_IPSR_DATA(IP12_17_15, IRQ9),
+	PINMUX_IPSR_GPSR(IP12_17_15, PWM1_B),
+	PINMUX_IPSR_GPSR(IP12_17_15, IRQ9),
 	PINMUX_IPSR_MSEL(IP12_17_15, REMOCON, SEL_RCN_0),
-	PINMUX_IPSR_DATA(IP12_17_15, DACK2),
+	PINMUX_IPSR_GPSR(IP12_17_15, DACK2),
 	PINMUX_IPSR_MSEL(IP12_17_15, ETH_MDIO_B, SEL_ETH_1),
 	PINMUX_IPSR_MSEL(IP12_20_18, SSI_SCK1, SEL_SSI1_0),
 	PINMUX_IPSR_MSEL(IP12_20_18, SCIF1_RXD_B, SEL_SCIF1_1),
 	PINMUX_IPSR_MSEL(IP12_20_18, IIC1_SCL_C, SEL_IIC01_2),
-	PINMUX_IPSR_DATA(IP12_20_18, VI1_CLK),
+	PINMUX_IPSR_GPSR(IP12_20_18, VI1_CLK),
 	PINMUX_IPSR_MSEL(IP12_20_18, CAN0_RX_D, SEL_CAN0_3),
 	PINMUX_IPSR_MSEL(IP12_20_18, AVB_AVTP_CAPTURE, SEL_AVB_0),
 	PINMUX_IPSR_MSEL(IP12_20_18, ETH_CRS_DV_B, SEL_ETH_1),
 	PINMUX_IPSR_MSEL(IP12_23_21, SSI_WS1, SEL_SSI1_0),
 	PINMUX_IPSR_MSEL(IP12_23_21, SCIF1_TXD_B, SEL_SCIF1_1),
 	PINMUX_IPSR_MSEL(IP12_23_21, IIC1_SDA_C, SEL_IIC01_2),
-	PINMUX_IPSR_DATA(IP12_23_21, VI1_DATA0),
+	PINMUX_IPSR_GPSR(IP12_23_21, VI1_DATA0),
 	PINMUX_IPSR_MSEL(IP12_23_21, CAN0_TX_D, SEL_CAN0_3),
 	PINMUX_IPSR_MSEL(IP12_23_21, AVB_AVTP_MATCH, SEL_AVB_0),
 	PINMUX_IPSR_MSEL(IP12_23_21, ETH_RX_ER_B, SEL_ETH_1),
 	PINMUX_IPSR_MSEL(IP12_26_24, SSI_SDATA1, SEL_SSI1_0),
 	PINMUX_IPSR_MSEL(IP12_26_24, HSCIF1_HRX_B, SEL_HSCIF1_1),
-	PINMUX_IPSR_DATA(IP12_26_24, VI1_DATA1),
+	PINMUX_IPSR_GPSR(IP12_26_24, VI1_DATA1),
 	PINMUX_IPSR_MSEL(IP12_26_24, SDATA, SEL_FSN_0),
-	PINMUX_IPSR_DATA(IP12_26_24, ATAG0_N),
+	PINMUX_IPSR_GPSR(IP12_26_24, ATAG0_N),
 	PINMUX_IPSR_MSEL(IP12_26_24, ETH_RXD0_B, SEL_ETH_1),
 	PINMUX_IPSR_MSEL(IP12_29_27, SSI_SCK2, SEL_SSI2_0),
 	PINMUX_IPSR_MSEL(IP12_29_27, HSCIF1_HTX_B, SEL_HSCIF1_1),
-	PINMUX_IPSR_DATA(IP12_29_27, VI1_DATA2),
+	PINMUX_IPSR_GPSR(IP12_29_27, VI1_DATA2),
 	PINMUX_IPSR_MSEL(IP12_29_27, MDATA, SEL_FSN_0),
-	PINMUX_IPSR_DATA(IP12_29_27, ATAWR0_N),
+	PINMUX_IPSR_GPSR(IP12_29_27, ATAWR0_N),
 	PINMUX_IPSR_MSEL(IP12_29_27, ETH_RXD1_B, SEL_ETH_1),
 
 	/* IPSR13 */
 	PINMUX_IPSR_MSEL(IP13_2_0, SSI_WS2, SEL_SSI2_0),
 	PINMUX_IPSR_MSEL(IP13_2_0, HSCIF1_HCTS_N_B, SEL_HSCIF1_1),
 	PINMUX_IPSR_MSEL(IP13_2_0, SCIFA0_RXD_D, SEL_SCIFA0_3),
-	PINMUX_IPSR_DATA(IP13_2_0, VI1_DATA3),
+	PINMUX_IPSR_GPSR(IP13_2_0, VI1_DATA3),
 	PINMUX_IPSR_MSEL(IP13_2_0, SCKZ, SEL_FSN_0),
-	PINMUX_IPSR_DATA(IP13_2_0, ATACS00_N),
+	PINMUX_IPSR_GPSR(IP13_2_0, ATACS00_N),
 	PINMUX_IPSR_MSEL(IP13_2_0, ETH_LINK_B, SEL_ETH_1),
 	PINMUX_IPSR_MSEL(IP13_5_3, SSI_SDATA2, SEL_SSI2_0),
 	PINMUX_IPSR_MSEL(IP13_5_3, HSCIF1_HRTS_N_B, SEL_HSCIF1_1),
 	PINMUX_IPSR_MSEL(IP13_5_3, SCIFA0_TXD_D, SEL_SCIFA0_3),
-	PINMUX_IPSR_DATA(IP13_5_3, VI1_DATA4),
+	PINMUX_IPSR_GPSR(IP13_5_3, VI1_DATA4),
 	PINMUX_IPSR_MSEL(IP13_5_3, STM_N, SEL_FSN_0),
-	PINMUX_IPSR_DATA(IP13_5_3, ATACS10_N),
+	PINMUX_IPSR_GPSR(IP13_5_3, ATACS10_N),
 	PINMUX_IPSR_MSEL(IP13_5_3, ETH_REFCLK_B, SEL_ETH_1),
 	PINMUX_IPSR_MSEL(IP13_8_6, SSI_SCK9, SEL_SSI9_0),
 	PINMUX_IPSR_MSEL(IP13_8_6, SCIF2_SCK_B, SEL_SCIF2_1),
-	PINMUX_IPSR_DATA(IP13_8_6, PWM2_B),
-	PINMUX_IPSR_DATA(IP13_8_6, VI1_DATA5),
+	PINMUX_IPSR_GPSR(IP13_8_6, PWM2_B),
+	PINMUX_IPSR_GPSR(IP13_8_6, VI1_DATA5),
 	PINMUX_IPSR_MSEL(IP13_8_6, MTS_N, SEL_FSN_0),
-	PINMUX_IPSR_DATA(IP13_8_6, EX_WAIT1),
+	PINMUX_IPSR_GPSR(IP13_8_6, EX_WAIT1),
 	PINMUX_IPSR_MSEL(IP13_8_6, ETH_TXD1_B, SEL_ETH_1),
 	PINMUX_IPSR_MSEL(IP13_11_9, SSI_WS9, SEL_SSI9_0),
 	PINMUX_IPSR_MSEL(IP13_11_9, SCIF2_RXD_B, SEL_SCIF2_1),
 	PINMUX_IPSR_MSEL(IP13_11_9, I2C3_SCL_E, SEL_I2C03_4),
-	PINMUX_IPSR_DATA(IP13_11_9, VI1_DATA6),
-	PINMUX_IPSR_DATA(IP13_11_9, ATARD0_N),
+	PINMUX_IPSR_GPSR(IP13_11_9, VI1_DATA6),
+	PINMUX_IPSR_GPSR(IP13_11_9, ATARD0_N),
 	PINMUX_IPSR_MSEL(IP13_11_9, ETH_TX_EN_B, SEL_ETH_1),
 	PINMUX_IPSR_MSEL(IP13_14_12, SSI_SDATA9, SEL_SSI9_0),
 	PINMUX_IPSR_MSEL(IP13_14_12, SCIF2_TXD_B, SEL_SCIF2_1),
 	PINMUX_IPSR_MSEL(IP13_14_12, I2C3_SDA_E, SEL_I2C03_4),
-	PINMUX_IPSR_DATA(IP13_14_12, VI1_DATA7),
-	PINMUX_IPSR_DATA(IP13_14_12, ATADIR0_N),
+	PINMUX_IPSR_GPSR(IP13_14_12, VI1_DATA7),
+	PINMUX_IPSR_GPSR(IP13_14_12, ATADIR0_N),
 	PINMUX_IPSR_MSEL(IP13_14_12, ETH_MAGIC_B, SEL_ETH_1),
 	PINMUX_IPSR_MSEL(IP13_17_15, AUDIO_CLKA, SEL_ADG_0),
 	PINMUX_IPSR_MSEL(IP13_17_15, I2C0_SCL_B, SEL_I2C00_1),
 	PINMUX_IPSR_MSEL(IP13_17_15, SCIFA4_RXD_D, SEL_SCIFA4_3),
-	PINMUX_IPSR_DATA(IP13_17_15, VI1_CLKENB),
+	PINMUX_IPSR_GPSR(IP13_17_15, VI1_CLKENB),
 	PINMUX_IPSR_MSEL(IP13_17_15, TS_SDATA_C, SEL_TSIF0_2),
 	PINMUX_IPSR_MSEL(IP13_17_15, RIF0_SYNC_B, SEL_DR0_1),
 	PINMUX_IPSR_MSEL(IP13_17_15, ETH_TXD0_B, SEL_ETH_1),
 	PINMUX_IPSR_MSEL(IP13_20_18, AUDIO_CLKB, SEL_ADG_0),
 	PINMUX_IPSR_MSEL(IP13_20_18, I2C0_SDA_B, SEL_I2C00_1),
 	PINMUX_IPSR_MSEL(IP13_20_18, SCIFA4_TXD_D, SEL_SCIFA4_3),
-	PINMUX_IPSR_DATA(IP13_20_18, VI1_FIELD),
+	PINMUX_IPSR_GPSR(IP13_20_18, VI1_FIELD),
 	PINMUX_IPSR_MSEL(IP13_20_18, TS_SCK_C, SEL_TSIF0_2),
 	PINMUX_IPSR_MSEL(IP13_20_18, RIF0_CLK_B, SEL_DR0_1),
 	PINMUX_IPSR_MSEL(IP13_20_18, BPFCLK_E, SEL_DARC_4),
@@ -1472,7 +1472,7 @@
 	PINMUX_IPSR_MSEL(IP13_23_21, AUDIO_CLKC, SEL_ADG_0),
 	PINMUX_IPSR_MSEL(IP13_23_21, I2C4_SCL_B, SEL_I2C04_1),
 	PINMUX_IPSR_MSEL(IP13_23_21, SCIFA5_RXD_D, SEL_SCIFA5_3),
-	PINMUX_IPSR_DATA(IP13_23_21, VI1_HSYNC_N),
+	PINMUX_IPSR_GPSR(IP13_23_21, VI1_HSYNC_N),
 	PINMUX_IPSR_MSEL(IP13_23_21, TS_SDEN_C, SEL_TSIF0_2),
 	PINMUX_IPSR_MSEL(IP13_23_21, RIF0_D0_B, SEL_DR0_1),
 	PINMUX_IPSR_MSEL(IP13_23_21, FMCLK_E, SEL_DARC_4),
@@ -1480,7 +1480,7 @@
 	PINMUX_IPSR_MSEL(IP13_26_24, AUDIO_CLKOUT, SEL_ADG_0),
 	PINMUX_IPSR_MSEL(IP13_26_24, I2C4_SDA_B, SEL_I2C04_1),
 	PINMUX_IPSR_MSEL(IP13_26_24, SCIFA5_TXD_D, SEL_SCIFA5_3),
-	PINMUX_IPSR_DATA(IP13_26_24, VI1_VSYNC_N),
+	PINMUX_IPSR_GPSR(IP13_26_24, VI1_VSYNC_N),
 	PINMUX_IPSR_MSEL(IP13_26_24, TS_SPSYNC_C, SEL_TSIF0_2),
 	PINMUX_IPSR_MSEL(IP13_26_24, RIF0_D1_B, SEL_DR1_1),
 	PINMUX_IPSR_MSEL(IP13_26_24, FMIN_E, SEL_DARC_4),
@@ -1491,6 +1491,197 @@
 	PINMUX_GPIO_GP_ALL(),
 };
 
+/* - Audio Clock ------------------------------------------------------------ */
+static const unsigned int audio_clka_pins[] = {
+	/* CLKA */
+	RCAR_GP_PIN(5, 20),
+};
+static const unsigned int audio_clka_mux[] = {
+	AUDIO_CLKA_MARK,
+};
+static const unsigned int audio_clka_b_pins[] = {
+	/* CLKA */
+	RCAR_GP_PIN(3, 25),
+};
+static const unsigned int audio_clka_b_mux[] = {
+	AUDIO_CLKA_B_MARK,
+};
+static const unsigned int audio_clka_c_pins[] = {
+	/* CLKA */
+	RCAR_GP_PIN(4, 20),
+};
+static const unsigned int audio_clka_c_mux[] = {
+	AUDIO_CLKA_C_MARK,
+};
+static const unsigned int audio_clka_d_pins[] = {
+	/* CLKA */
+	RCAR_GP_PIN(5, 0),
+};
+static const unsigned int audio_clka_d_mux[] = {
+	AUDIO_CLKA_D_MARK,
+};
+static const unsigned int audio_clkb_pins[] = {
+	/* CLKB */
+	RCAR_GP_PIN(5, 21),
+};
+static const unsigned int audio_clkb_mux[] = {
+	AUDIO_CLKB_MARK,
+};
+static const unsigned int audio_clkb_b_pins[] = {
+	/* CLKB */
+	RCAR_GP_PIN(3, 26),
+};
+static const unsigned int audio_clkb_b_mux[] = {
+	AUDIO_CLKB_B_MARK,
+};
+static const unsigned int audio_clkb_c_pins[] = {
+	/* CLKB */
+	RCAR_GP_PIN(4, 21),
+};
+static const unsigned int audio_clkb_c_mux[] = {
+	AUDIO_CLKB_C_MARK,
+};
+static const unsigned int audio_clkc_pins[] = {
+	/* CLKC */
+	RCAR_GP_PIN(5, 22),
+};
+static const unsigned int audio_clkc_mux[] = {
+	AUDIO_CLKC_MARK,
+};
+static const unsigned int audio_clkc_b_pins[] = {
+	/* CLKC */
+	RCAR_GP_PIN(3, 29),
+};
+static const unsigned int audio_clkc_b_mux[] = {
+	AUDIO_CLKC_B_MARK,
+};
+static const unsigned int audio_clkc_c_pins[] = {
+	/* CLKC */
+	RCAR_GP_PIN(4, 22),
+};
+static const unsigned int audio_clkc_c_mux[] = {
+	AUDIO_CLKC_C_MARK,
+};
+static const unsigned int audio_clkout_pins[] = {
+	/* CLKOUT */
+	RCAR_GP_PIN(5, 23),
+};
+static const unsigned int audio_clkout_mux[] = {
+	AUDIO_CLKOUT_MARK,
+};
+static const unsigned int audio_clkout_b_pins[] = {
+	/* CLKOUT */
+	RCAR_GP_PIN(3, 12),
+};
+static const unsigned int audio_clkout_b_mux[] = {
+	AUDIO_CLKOUT_B_MARK,
+};
+static const unsigned int audio_clkout_c_pins[] = {
+	/* CLKOUT */
+	RCAR_GP_PIN(4, 23),
+};
+static const unsigned int audio_clkout_c_mux[] = {
+	AUDIO_CLKOUT_C_MARK,
+};
+/* - AVB -------------------------------------------------------------------- */
+static const unsigned int avb_link_pins[] = {
+	RCAR_GP_PIN(3, 26),
+};
+static const unsigned int avb_link_mux[] = {
+	AVB_LINK_MARK,
+};
+static const unsigned int avb_magic_pins[] = {
+	RCAR_GP_PIN(3, 27),
+};
+static const unsigned int avb_magic_mux[] = {
+	AVB_MAGIC_MARK,
+};
+static const unsigned int avb_phy_int_pins[] = {
+	RCAR_GP_PIN(3, 28),
+};
+static const unsigned int avb_phy_int_mux[] = {
+	AVB_PHY_INT_MARK,
+};
+static const unsigned int avb_mdio_pins[] = {
+	RCAR_GP_PIN(3, 24), RCAR_GP_PIN(3, 25),
+};
+static const unsigned int avb_mdio_mux[] = {
+	AVB_MDC_MARK, AVB_MDIO_MARK,
+};
+static const unsigned int avb_mii_pins[] = {
+	RCAR_GP_PIN(3, 14), RCAR_GP_PIN(3, 15), RCAR_GP_PIN(3, 16),
+	RCAR_GP_PIN(3, 17),
+
+	RCAR_GP_PIN(3, 2), RCAR_GP_PIN(3, 3), RCAR_GP_PIN(3, 4),
+	RCAR_GP_PIN(3, 5),
+
+	RCAR_GP_PIN(3, 10), RCAR_GP_PIN(3, 0), RCAR_GP_PIN(3, 1),
+	RCAR_GP_PIN(3, 29), RCAR_GP_PIN(3, 12), RCAR_GP_PIN(3, 22),
+	RCAR_GP_PIN(3, 13), RCAR_GP_PIN(3, 11),
+};
+static const unsigned int avb_mii_mux[] = {
+	AVB_TXD0_MARK, AVB_TXD1_MARK, AVB_TXD2_MARK,
+	AVB_TXD3_MARK,
+
+	AVB_RXD0_MARK, AVB_RXD1_MARK, AVB_RXD2_MARK,
+	AVB_RXD3_MARK,
+
+	AVB_RX_ER_MARK, AVB_RX_CLK_MARK, AVB_RX_DV_MARK,
+	AVB_CRS_MARK, AVB_TX_EN_MARK, AVB_TX_ER_MARK,
+	AVB_TX_CLK_MARK, AVB_COL_MARK,
+};
+static const unsigned int avb_gmii_pins[] = {
+	RCAR_GP_PIN(3, 14), RCAR_GP_PIN(3, 15), RCAR_GP_PIN(3, 16),
+	RCAR_GP_PIN(3, 17), RCAR_GP_PIN(3, 18), RCAR_GP_PIN(3, 19),
+	RCAR_GP_PIN(3, 20), RCAR_GP_PIN(3, 21),
+
+	RCAR_GP_PIN(3, 2), RCAR_GP_PIN(3, 3), RCAR_GP_PIN(3, 4),
+	RCAR_GP_PIN(3, 5), RCAR_GP_PIN(3, 6), RCAR_GP_PIN(3, 7),
+	RCAR_GP_PIN(3, 8), RCAR_GP_PIN(3, 9),
+
+	RCAR_GP_PIN(3, 10), RCAR_GP_PIN(3, 0), RCAR_GP_PIN(3, 1),
+	RCAR_GP_PIN(3, 29), RCAR_GP_PIN(3, 23), RCAR_GP_PIN(3, 30),
+	RCAR_GP_PIN(3, 12), RCAR_GP_PIN(3, 22), RCAR_GP_PIN(3, 13),
+	RCAR_GP_PIN(3, 11),
+};
+static const unsigned int avb_gmii_mux[] = {
+	AVB_TXD0_MARK, AVB_TXD1_MARK, AVB_TXD2_MARK,
+	AVB_TXD3_MARK, AVB_TXD4_MARK, AVB_TXD5_MARK,
+	AVB_TXD6_MARK, AVB_TXD7_MARK,
+
+	AVB_RXD0_MARK, AVB_RXD1_MARK, AVB_RXD2_MARK,
+	AVB_RXD3_MARK, AVB_RXD4_MARK, AVB_RXD5_MARK,
+	AVB_RXD6_MARK, AVB_RXD7_MARK,
+
+	AVB_RX_ER_MARK, AVB_RX_CLK_MARK, AVB_RX_DV_MARK,
+	AVB_CRS_MARK, AVB_GTX_CLK_MARK, AVB_GTXREFCLK_MARK,
+	AVB_TX_EN_MARK, AVB_TX_ER_MARK, AVB_TX_CLK_MARK,
+	AVB_COL_MARK,
+};
+static const unsigned int avb_avtp_capture_pins[] = {
+	RCAR_GP_PIN(5, 11),
+};
+static const unsigned int avb_avtp_capture_mux[] = {
+	AVB_AVTP_CAPTURE_MARK,
+};
+static const unsigned int avb_avtp_match_pins[] = {
+	RCAR_GP_PIN(5, 12),
+};
+static const unsigned int avb_avtp_match_mux[] = {
+	AVB_AVTP_MATCH_MARK,
+};
+static const unsigned int avb_avtp_capture_b_pins[] = {
+	RCAR_GP_PIN(1, 1),
+};
+static const unsigned int avb_avtp_capture_b_mux[] = {
+	AVB_AVTP_CAPTURE_B_MARK,
+};
+static const unsigned int avb_avtp_match_b_pins[] = {
+	RCAR_GP_PIN(1, 2),
+};
+static const unsigned int avb_avtp_match_b_mux[] = {
+	AVB_AVTP_MATCH_B_MARK,
+};
 /* - ETH -------------------------------------------------------------------- */
 static const unsigned int eth_link_pins[] = {
 	/* LINK */
@@ -2751,6 +2942,245 @@
 static const unsigned int sdhi2_wp_mux[] = {
 	SD2_WP_MARK,
 };
+/* - SSI -------------------------------------------------------------------- */
+static const unsigned int ssi0_data_pins[] = {
+	/* SDATA0 */
+	RCAR_GP_PIN(5, 3),
+};
+static const unsigned int ssi0_data_mux[] = {
+	SSI_SDATA0_MARK,
+};
+static const unsigned int ssi0129_ctrl_pins[] = {
+	/* SCK0129, WS0129 */
+	RCAR_GP_PIN(5, 1), RCAR_GP_PIN(5, 2),
+};
+static const unsigned int ssi0129_ctrl_mux[] = {
+	SSI_SCK0129_MARK, SSI_WS0129_MARK,
+};
+static const unsigned int ssi1_data_pins[] = {
+	/* SDATA1 */
+	RCAR_GP_PIN(5, 13),
+};
+static const unsigned int ssi1_data_mux[] = {
+	SSI_SDATA1_MARK,
+};
+static const unsigned int ssi1_ctrl_pins[] = {
+	/* SCK1, WS1 */
+	RCAR_GP_PIN(5, 11), RCAR_GP_PIN(5, 12),
+};
+static const unsigned int ssi1_ctrl_mux[] = {
+	SSI_SCK1_MARK, SSI_WS1_MARK,
+};
+static const unsigned int ssi1_data_b_pins[] = {
+	/* SDATA1 */
+	RCAR_GP_PIN(4, 13),
+};
+static const unsigned int ssi1_data_b_mux[] = {
+	SSI_SDATA1_B_MARK,
+};
+static const unsigned int ssi1_ctrl_b_pins[] = {
+	/* SCK1, WS1 */
+	RCAR_GP_PIN(4, 11), RCAR_GP_PIN(4, 12),
+};
+static const unsigned int ssi1_ctrl_b_mux[] = {
+	SSI_SCK1_B_MARK, SSI_WS1_B_MARK,
+};
+static const unsigned int ssi2_data_pins[] = {
+	/* SDATA2 */
+	RCAR_GP_PIN(5, 16),
+};
+static const unsigned int ssi2_data_mux[] = {
+	SSI_SDATA2_MARK,
+};
+static const unsigned int ssi2_ctrl_pins[] = {
+	/* SCK2, WS2 */
+	RCAR_GP_PIN(5, 14), RCAR_GP_PIN(5, 15),
+};
+static const unsigned int ssi2_ctrl_mux[] = {
+	SSI_SCK2_MARK, SSI_WS2_MARK,
+};
+static const unsigned int ssi2_data_b_pins[] = {
+	/* SDATA2 */
+	RCAR_GP_PIN(4, 16),
+};
+static const unsigned int ssi2_data_b_mux[] = {
+	SSI_SDATA2_B_MARK,
+};
+static const unsigned int ssi2_ctrl_b_pins[] = {
+	/* SCK2, WS2 */
+	RCAR_GP_PIN(4, 14), RCAR_GP_PIN(4, 15),
+};
+static const unsigned int ssi2_ctrl_b_mux[] = {
+	SSI_SCK2_B_MARK, SSI_WS2_B_MARK,
+};
+static const unsigned int ssi3_data_pins[] = {
+	/* SDATA3 */
+	RCAR_GP_PIN(5, 6),
+};
+static const unsigned int ssi3_data_mux[] = {
+	SSI_SDATA3_MARK
+};
+static const unsigned int ssi34_ctrl_pins[] = {
+	/* SCK34, WS34 */
+	RCAR_GP_PIN(5, 4), RCAR_GP_PIN(5, 5),
+};
+static const unsigned int ssi34_ctrl_mux[] = {
+	SSI_SCK34_MARK, SSI_WS34_MARK,
+};
+static const unsigned int ssi4_data_pins[] = {
+	/* SDATA4 */
+	RCAR_GP_PIN(5, 9),
+};
+static const unsigned int ssi4_data_mux[] = {
+	SSI_SDATA4_MARK,
+};
+static const unsigned int ssi4_ctrl_pins[] = {
+	/* SCK4, WS4 */
+	RCAR_GP_PIN(5, 7), RCAR_GP_PIN(5, 8),
+};
+static const unsigned int ssi4_ctrl_mux[] = {
+	SSI_SCK4_MARK, SSI_WS4_MARK,
+};
+static const unsigned int ssi4_data_b_pins[] = {
+	/* SDATA4 */
+	RCAR_GP_PIN(4, 22),
+};
+static const unsigned int ssi4_data_b_mux[] = {
+	SSI_SDATA4_B_MARK,
+};
+static const unsigned int ssi4_ctrl_b_pins[] = {
+	/* SCK4, WS4 */
+	RCAR_GP_PIN(4, 20), RCAR_GP_PIN(4, 21),
+};
+static const unsigned int ssi4_ctrl_b_mux[] = {
+	SSI_SCK4_B_MARK, SSI_WS4_B_MARK,
+};
+static const unsigned int ssi5_data_pins[] = {
+	/* SDATA5 */
+	RCAR_GP_PIN(4, 26),
+};
+static const unsigned int ssi5_data_mux[] = {
+	SSI_SDATA5_MARK,
+};
+static const unsigned int ssi5_ctrl_pins[] = {
+	/* SCK5, WS5 */
+	RCAR_GP_PIN(4, 24), RCAR_GP_PIN(4, 25),
+};
+static const unsigned int ssi5_ctrl_mux[] = {
+	SSI_SCK5_MARK, SSI_WS5_MARK,
+};
+static const unsigned int ssi5_data_b_pins[] = {
+	/* SDATA5 */
+	RCAR_GP_PIN(3, 21),
+};
+static const unsigned int ssi5_data_b_mux[] = {
+	SSI_SDATA5_B_MARK,
+};
+static const unsigned int ssi5_ctrl_b_pins[] = {
+	/* SCK5, WS5 */
+	RCAR_GP_PIN(3, 19), RCAR_GP_PIN(3, 20),
+};
+static const unsigned int ssi5_ctrl_b_mux[] = {
+	SSI_SCK5_B_MARK, SSI_WS5_B_MARK,
+};
+static const unsigned int ssi6_data_pins[] = {
+	/* SDATA6 */
+	RCAR_GP_PIN(4, 29),
+};
+static const unsigned int ssi6_data_mux[] = {
+	SSI_SDATA6_MARK,
+};
+static const unsigned int ssi6_ctrl_pins[] = {
+	/* SCK6, WS6 */
+	RCAR_GP_PIN(4, 27), RCAR_GP_PIN(4, 28),
+};
+static const unsigned int ssi6_ctrl_mux[] = {
+	SSI_SCK6_MARK, SSI_WS6_MARK,
+};
+static const unsigned int ssi6_data_b_pins[] = {
+	/* SDATA6 */
+	RCAR_GP_PIN(3, 24),
+};
+static const unsigned int ssi6_data_b_mux[] = {
+	SSI_SDATA6_B_MARK,
+};
+static const unsigned int ssi6_ctrl_b_pins[] = {
+	/* SCK6, WS6 */
+	RCAR_GP_PIN(3, 22), RCAR_GP_PIN(3, 23),
+};
+static const unsigned int ssi6_ctrl_b_mux[] = {
+	SSI_SCK6_B_MARK, SSI_WS6_B_MARK,
+};
+static const unsigned int ssi7_data_pins[] = {
+	/* SDATA7 */
+	RCAR_GP_PIN(5, 0),
+};
+static const unsigned int ssi7_data_mux[] = {
+	SSI_SDATA7_MARK,
+};
+static const unsigned int ssi78_ctrl_pins[] = {
+	/* SCK78, WS78 */
+	RCAR_GP_PIN(4, 30), RCAR_GP_PIN(4, 31),
+};
+static const unsigned int ssi78_ctrl_mux[] = {
+	SSI_SCK78_MARK, SSI_WS78_MARK,
+};
+static const unsigned int ssi7_data_b_pins[] = {
+	/* SDATA7 */
+	RCAR_GP_PIN(3, 27),
+};
+static const unsigned int ssi7_data_b_mux[] = {
+	SSI_SDATA7_B_MARK,
+};
+static const unsigned int ssi78_ctrl_b_pins[] = {
+	/* SCK78, WS78 */
+	RCAR_GP_PIN(3, 25), RCAR_GP_PIN(3, 26),
+};
+static const unsigned int ssi78_ctrl_b_mux[] = {
+	SSI_SCK78_B_MARK, SSI_WS78_B_MARK,
+};
+static const unsigned int ssi8_data_pins[] = {
+	/* SDATA8 */
+	RCAR_GP_PIN(5, 10),
+};
+static const unsigned int ssi8_data_mux[] = {
+	SSI_SDATA8_MARK,
+};
+static const unsigned int ssi8_data_b_pins[] = {
+	/* SDATA8 */
+	RCAR_GP_PIN(3, 28),
+};
+static const unsigned int ssi8_data_b_mux[] = {
+	SSI_SDATA8_B_MARK,
+};
+static const unsigned int ssi9_data_pins[] = {
+	/* SDATA9 */
+	RCAR_GP_PIN(5, 19),
+};
+static const unsigned int ssi9_data_mux[] = {
+	SSI_SDATA9_MARK,
+};
+static const unsigned int ssi9_ctrl_pins[] = {
+	/* SCK9, WS9 */
+	RCAR_GP_PIN(5, 17), RCAR_GP_PIN(5, 18),
+};
+static const unsigned int ssi9_ctrl_mux[] = {
+	SSI_SCK9_MARK, SSI_WS9_MARK,
+};
+static const unsigned int ssi9_data_b_pins[] = {
+	/* SDATA9 */
+	RCAR_GP_PIN(4, 19),
+};
+static const unsigned int ssi9_data_b_mux[] = {
+	SSI_SDATA9_B_MARK,
+};
+static const unsigned int ssi9_ctrl_b_pins[] = {
+	/* SCK9, WS9 */
+	RCAR_GP_PIN(4, 17), RCAR_GP_PIN(4, 18),
+};
+static const unsigned int ssi9_ctrl_b_mux[] = {
+	SSI_SCK9_B_MARK, SSI_WS9_B_MARK,
+};
 /* - USB0 ------------------------------------------------------------------- */
 static const unsigned int usb0_pins[] = {
 	RCAR_GP_PIN(5, 24), /* PWEN */
@@ -2911,6 +3341,29 @@
 };
 
 static const struct sh_pfc_pin_group pinmux_groups[] = {
+	SH_PFC_PIN_GROUP(audio_clka),
+	SH_PFC_PIN_GROUP(audio_clka_b),
+	SH_PFC_PIN_GROUP(audio_clka_c),
+	SH_PFC_PIN_GROUP(audio_clka_d),
+	SH_PFC_PIN_GROUP(audio_clkb),
+	SH_PFC_PIN_GROUP(audio_clkb_b),
+	SH_PFC_PIN_GROUP(audio_clkb_c),
+	SH_PFC_PIN_GROUP(audio_clkc),
+	SH_PFC_PIN_GROUP(audio_clkc_b),
+	SH_PFC_PIN_GROUP(audio_clkc_c),
+	SH_PFC_PIN_GROUP(audio_clkout),
+	SH_PFC_PIN_GROUP(audio_clkout_b),
+	SH_PFC_PIN_GROUP(audio_clkout_c),
+	SH_PFC_PIN_GROUP(avb_link),
+	SH_PFC_PIN_GROUP(avb_magic),
+	SH_PFC_PIN_GROUP(avb_phy_int),
+	SH_PFC_PIN_GROUP(avb_mdio),
+	SH_PFC_PIN_GROUP(avb_mii),
+	SH_PFC_PIN_GROUP(avb_gmii),
+	SH_PFC_PIN_GROUP(avb_avtp_capture),
+	SH_PFC_PIN_GROUP(avb_avtp_match),
+	SH_PFC_PIN_GROUP(avb_avtp_capture_b),
+	SH_PFC_PIN_GROUP(avb_avtp_match_b),
 	SH_PFC_PIN_GROUP(eth_link),
 	SH_PFC_PIN_GROUP(eth_magic),
 	SH_PFC_PIN_GROUP(eth_mdio),
@@ -3084,6 +3537,40 @@
 	SH_PFC_PIN_GROUP(sdhi2_ctrl),
 	SH_PFC_PIN_GROUP(sdhi2_cd),
 	SH_PFC_PIN_GROUP(sdhi2_wp),
+	SH_PFC_PIN_GROUP(ssi0_data),
+	SH_PFC_PIN_GROUP(ssi0129_ctrl),
+	SH_PFC_PIN_GROUP(ssi1_data),
+	SH_PFC_PIN_GROUP(ssi1_ctrl),
+	SH_PFC_PIN_GROUP(ssi1_data_b),
+	SH_PFC_PIN_GROUP(ssi1_ctrl_b),
+	SH_PFC_PIN_GROUP(ssi2_data),
+	SH_PFC_PIN_GROUP(ssi2_ctrl),
+	SH_PFC_PIN_GROUP(ssi2_data_b),
+	SH_PFC_PIN_GROUP(ssi2_ctrl_b),
+	SH_PFC_PIN_GROUP(ssi3_data),
+	SH_PFC_PIN_GROUP(ssi34_ctrl),
+	SH_PFC_PIN_GROUP(ssi4_data),
+	SH_PFC_PIN_GROUP(ssi4_ctrl),
+	SH_PFC_PIN_GROUP(ssi4_data_b),
+	SH_PFC_PIN_GROUP(ssi4_ctrl_b),
+	SH_PFC_PIN_GROUP(ssi5_data),
+	SH_PFC_PIN_GROUP(ssi5_ctrl),
+	SH_PFC_PIN_GROUP(ssi5_data_b),
+	SH_PFC_PIN_GROUP(ssi5_ctrl_b),
+	SH_PFC_PIN_GROUP(ssi6_data),
+	SH_PFC_PIN_GROUP(ssi6_ctrl),
+	SH_PFC_PIN_GROUP(ssi6_data_b),
+	SH_PFC_PIN_GROUP(ssi6_ctrl_b),
+	SH_PFC_PIN_GROUP(ssi7_data),
+	SH_PFC_PIN_GROUP(ssi78_ctrl),
+	SH_PFC_PIN_GROUP(ssi7_data_b),
+	SH_PFC_PIN_GROUP(ssi78_ctrl_b),
+	SH_PFC_PIN_GROUP(ssi8_data),
+	SH_PFC_PIN_GROUP(ssi8_data_b),
+	SH_PFC_PIN_GROUP(ssi9_data),
+	SH_PFC_PIN_GROUP(ssi9_ctrl),
+	SH_PFC_PIN_GROUP(ssi9_data_b),
+	SH_PFC_PIN_GROUP(ssi9_ctrl_b),
 	SH_PFC_PIN_GROUP(usb0),
 	SH_PFC_PIN_GROUP(usb1),
 	VIN_DATA_PIN_GROUP(vin0_data, 24),
@@ -3106,6 +3593,35 @@
 	SH_PFC_PIN_GROUP(vin1_clk),
 };
 
+static const char * const audio_clk_groups[] = {
+	"audio_clka",
+	"audio_clka_b",
+	"audio_clka_c",
+	"audio_clka_d",
+	"audio_clkb",
+	"audio_clkb_b",
+	"audio_clkb_c",
+	"audio_clkc",
+	"audio_clkc_b",
+	"audio_clkc_c",
+	"audio_clkout",
+	"audio_clkout_b",
+	"audio_clkout_c",
+};
+
+static const char * const avb_groups[] = {
+	"avb_link",
+	"avb_magic",
+	"avb_phy_int",
+	"avb_mdio",
+	"avb_mii",
+	"avb_gmii",
+	"avb_avtp_capture",
+	"avb_avtp_match",
+	"avb_avtp_capture_b",
+	"avb_avtp_match_b",
+};
+
 static const char * const eth_groups[] = {
 	"eth_link",
 	"eth_magic",
@@ -3381,6 +3897,43 @@
 	"sdhi2_wp",
 };
 
+static const char * const ssi_groups[] = {
+	"ssi0_data",
+	"ssi0129_ctrl",
+	"ssi1_data",
+	"ssi1_ctrl",
+	"ssi1_data_b",
+	"ssi1_ctrl_b",
+	"ssi2_data",
+	"ssi2_ctrl",
+	"ssi2_data_b",
+	"ssi2_ctrl_b",
+	"ssi3_data",
+	"ssi34_ctrl",
+	"ssi4_data",
+	"ssi4_ctrl",
+	"ssi4_data_b",
+	"ssi4_ctrl_b",
+	"ssi5_data",
+	"ssi5_ctrl",
+	"ssi5_data_b",
+	"ssi5_ctrl_b",
+	"ssi6_data",
+	"ssi6_ctrl",
+	"ssi6_data_b",
+	"ssi6_ctrl_b",
+	"ssi7_data",
+	"ssi78_ctrl",
+	"ssi7_data_b",
+	"ssi78_ctrl_b",
+	"ssi8_data",
+	"ssi8_data_b",
+	"ssi9_data",
+	"ssi9_ctrl",
+	"ssi9_data_b",
+	"ssi9_ctrl_b",
+};
+
 static const char * const usb0_groups[] = {
 	"usb0",
 };
@@ -3414,6 +3967,8 @@
 };
 
 static const struct sh_pfc_function pinmux_functions[] = {
+	SH_PFC_FUNCTION(audio_clk),
+	SH_PFC_FUNCTION(avb),
 	SH_PFC_FUNCTION(eth),
 	SH_PFC_FUNCTION(hscif0),
 	SH_PFC_FUNCTION(hscif1),
@@ -3448,6 +4003,7 @@
 	SH_PFC_FUNCTION(sdhi0),
 	SH_PFC_FUNCTION(sdhi1),
 	SH_PFC_FUNCTION(sdhi2),
+	SH_PFC_FUNCTION(ssi),
 	SH_PFC_FUNCTION(usb0),
 	SH_PFC_FUNCTION(usb1),
 	SH_PFC_FUNCTION(vin0),
@@ -3974,6 +4530,7 @@
 		FN_DU0_DISP, FN_QPOLA, FN_CC50_STATE30, 0,
 		/* IP6_3_2 [2] */
 		FN_DU0_EXODDF_DU0_ODDF_DISP_CDE, FN_QCPV_QDE, FN_CC50_STATE29,
+		0,
 		/* IP6_1_0 [2] */
 		FN_DU0_EXVSYNC_DU0_VSYNC, FN_QSTB_QHE, FN_CC50_STATE28, 0, }
 	},
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7795.c b/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
index ce4f5cd..5979dab 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
@@ -189,8 +189,8 @@
 #define GPSR6_4		F_(SSI_SDATA2_A,	IP14_7_4)
 #define GPSR6_3		F_(SSI_SDATA1_A,	IP14_3_0)
 #define GPSR6_2		F_(SSI_SDATA0,		IP13_31_28)
-#define GPSR6_1		F_(SSI_WS0129,		IP13_27_24)
-#define GPSR6_0		F_(SSI_SCK0129,		IP13_23_20)
+#define GPSR6_1		F_(SSI_WS01239,		IP13_27_24)
+#define GPSR6_0		F_(SSI_SCK01239,	IP13_23_20)
 
 /* GPSR7 */
 #define GPSR7_3		FM(HDMI1_CEC)
@@ -315,8 +315,8 @@
 #define IP13_11_8	FM(MLB_CLK)		F_(0, 0)	FM(MSIOF1_SCK_F)	F_(0, 0)			FM(SCL1_B)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
 #define IP13_15_12	FM(MLB_SIG)		FM(RX1_B)	FM(MSIOF1_SYNC_F)	F_(0, 0)			FM(SDA1_B)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
 #define IP13_19_16	FM(MLB_DAT)		FM(TX1_B)	FM(MSIOF1_RXD_F)	F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP13_23_20	FM(SSI_SCK0129)		F_(0, 0)	FM(MSIOF1_TXD_F)	F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP13_27_24	FM(SSI_WS0129)		F_(0, 0)	FM(MSIOF1_SS1_F)	F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP13_23_20	FM(SSI_SCK01239)	F_(0, 0)	FM(MSIOF1_TXD_F)	F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP13_27_24	FM(SSI_WS01239)		F_(0, 0)	FM(MSIOF1_SS1_F)	F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
 #define IP13_31_28	FM(SSI_SDATA0)		F_(0, 0)	FM(MSIOF1_SS2_F)	F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
 #define IP14_3_0	FM(SSI_SDATA1_A)	F_(0, 0)	F_(0, 0)		F_(0, 0)			F_(0, 0)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
 #define IP14_7_4	FM(SSI_SDATA2_A)	F_(0, 0)	F_(0, 0)		F_(0, 0)			FM(SSI_SCK1_B)	F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
@@ -478,7 +478,6 @@
 #define MOD_SEL2_31		FM(I2C_SEL_5_0)		FM(I2C_SEL_5_1)
 #define MOD_SEL2_30		FM(I2C_SEL_3_0)		FM(I2C_SEL_3_1)
 #define MOD_SEL2_29		FM(I2C_SEL_0_0)		FM(I2C_SEL_0_1)
-#define MOD_SEL2_2_1		FM(SEL_VSP_0)		FM(SEL_VSP_1)		FM(SEL_VSP_2)		FM(SEL_VSP_3)
 #define MOD_SEL2_0		FM(SEL_VIN4_0)		FM(SEL_VIN4_1)
 
 #define PINMUX_MOD_SELS\
@@ -512,7 +511,7 @@
 MOD_SEL0_5_4		MOD_SEL1_5 \
 			MOD_SEL1_4 \
 MOD_SEL0_3		MOD_SEL1_3 \
-MOD_SEL0_2_1		MOD_SEL1_2		MOD_SEL2_2_1 \
+MOD_SEL0_2_1		MOD_SEL1_2 \
 			MOD_SEL1_1 \
 			MOD_SEL1_0		MOD_SEL2_0
 
@@ -569,18 +568,18 @@
 	PINMUX_SINGLE(SSI_WS5),
 
 	/* IPSR0 */
-	PINMUX_IPSR_DATA(IP0_3_0,	AVB_MDC),
+	PINMUX_IPSR_GPSR(IP0_3_0,	AVB_MDC),
 	PINMUX_IPSR_MSEL(IP0_3_0,	MSIOF2_SS2_C,		SEL_MSIOF2_2),
 
-	PINMUX_IPSR_DATA(IP0_7_4,	AVB_MAGIC),
+	PINMUX_IPSR_GPSR(IP0_7_4,	AVB_MAGIC),
 	PINMUX_IPSR_MSEL(IP0_7_4,	MSIOF2_SS1_C,		SEL_MSIOF2_2),
 	PINMUX_IPSR_MSEL(IP0_7_4,	SCK4_A,			SEL_SCIF4_0),
 
-	PINMUX_IPSR_DATA(IP0_11_8,	AVB_PHY_INT),
+	PINMUX_IPSR_GPSR(IP0_11_8,	AVB_PHY_INT),
 	PINMUX_IPSR_MSEL(IP0_11_8,	MSIOF2_SYNC_C,		SEL_MSIOF2_2),
 	PINMUX_IPSR_MSEL(IP0_11_8,	RX4_A,			SEL_SCIF4_0),
 
-	PINMUX_IPSR_DATA(IP0_15_12,	AVB_LINK),
+	PINMUX_IPSR_GPSR(IP0_15_12,	AVB_LINK),
 	PINMUX_IPSR_MSEL(IP0_15_12,	MSIOF2_SCK_C,		SEL_MSIOF2_2),
 	PINMUX_IPSR_MSEL(IP0_15_12,	TX4_A,			SEL_SCIF4_0),
 
@@ -592,126 +591,126 @@
 	PINMUX_IPSR_MSEL(IP0_23_20,	MSIOF2_TXD_C,		SEL_MSIOF2_2),
 	PINMUX_IPSR_MSEL(IP0_23_20,	RTS4_N_TANS_A,		SEL_SCIF4_0),
 
-	PINMUX_IPSR_DATA(IP0_27_24,	IRQ0),
-	PINMUX_IPSR_DATA(IP0_27_24,	QPOLB),
-	PINMUX_IPSR_DATA(IP0_27_24,	DU_CDE),
+	PINMUX_IPSR_GPSR(IP0_27_24,	IRQ0),
+	PINMUX_IPSR_GPSR(IP0_27_24,	QPOLB),
+	PINMUX_IPSR_GPSR(IP0_27_24,	DU_CDE),
 	PINMUX_IPSR_MSEL(IP0_27_24,	VI4_DATA0_B,		SEL_VIN4_1),
 	PINMUX_IPSR_MSEL(IP0_27_24,	CAN0_TX_B,		SEL_RCAN0_1),
 	PINMUX_IPSR_MSEL(IP0_27_24,	CANFD0_TX_B,		SEL_CANFD0_1),
 
-	PINMUX_IPSR_DATA(IP0_31_28,	IRQ1),
-	PINMUX_IPSR_DATA(IP0_31_28,	QPOLA),
-	PINMUX_IPSR_DATA(IP0_31_28,	DU_DISP),
+	PINMUX_IPSR_GPSR(IP0_31_28,	IRQ1),
+	PINMUX_IPSR_GPSR(IP0_31_28,	QPOLA),
+	PINMUX_IPSR_GPSR(IP0_31_28,	DU_DISP),
 	PINMUX_IPSR_MSEL(IP0_31_28,	VI4_DATA1_B,		SEL_VIN4_1),
 	PINMUX_IPSR_MSEL(IP0_31_28,	CAN0_RX_B,		SEL_RCAN0_1),
 	PINMUX_IPSR_MSEL(IP0_31_28,	CANFD0_RX_B,		SEL_CANFD0_1),
 
 	/* IPSR1 */
-	PINMUX_IPSR_DATA(IP1_3_0,	IRQ2),
-	PINMUX_IPSR_DATA(IP1_3_0,	QCPV_QDE),
-	PINMUX_IPSR_DATA(IP1_3_0,	DU_EXODDF_DU_ODDF_DISP_CDE),
+	PINMUX_IPSR_GPSR(IP1_3_0,	IRQ2),
+	PINMUX_IPSR_GPSR(IP1_3_0,	QCPV_QDE),
+	PINMUX_IPSR_GPSR(IP1_3_0,	DU_EXODDF_DU_ODDF_DISP_CDE),
 	PINMUX_IPSR_MSEL(IP1_3_0,	VI4_DATA2_B,		SEL_VIN4_1),
 	PINMUX_IPSR_MSEL(IP1_3_0,	PWM3_B,			SEL_PWM3_1),
 
-	PINMUX_IPSR_DATA(IP1_7_4,	IRQ3),
-	PINMUX_IPSR_DATA(IP1_7_4,	QSTVB_QVE),
-	PINMUX_IPSR_DATA(IP1_7_4,	A25),
-	PINMUX_IPSR_DATA(IP1_7_4,	DU_DOTCLKOUT1),
+	PINMUX_IPSR_GPSR(IP1_7_4,	IRQ3),
+	PINMUX_IPSR_GPSR(IP1_7_4,	QSTVB_QVE),
+	PINMUX_IPSR_GPSR(IP1_7_4,	A25),
+	PINMUX_IPSR_GPSR(IP1_7_4,	DU_DOTCLKOUT1),
 	PINMUX_IPSR_MSEL(IP1_7_4,	VI4_DATA3_B,		SEL_VIN4_1),
 	PINMUX_IPSR_MSEL(IP1_7_4,	PWM4_B,			SEL_PWM4_1),
 
-	PINMUX_IPSR_DATA(IP1_11_8,	IRQ4),
-	PINMUX_IPSR_DATA(IP1_11_8,	QSTH_QHS),
-	PINMUX_IPSR_DATA(IP1_11_8,	A24),
-	PINMUX_IPSR_DATA(IP1_11_8,	DU_EXHSYNC_DU_HSYNC),
+	PINMUX_IPSR_GPSR(IP1_11_8,	IRQ4),
+	PINMUX_IPSR_GPSR(IP1_11_8,	QSTH_QHS),
+	PINMUX_IPSR_GPSR(IP1_11_8,	A24),
+	PINMUX_IPSR_GPSR(IP1_11_8,	DU_EXHSYNC_DU_HSYNC),
 	PINMUX_IPSR_MSEL(IP1_11_8,	VI4_DATA4_B,		SEL_VIN4_1),
 	PINMUX_IPSR_MSEL(IP1_11_8,	PWM5_B,			SEL_PWM5_1),
 
-	PINMUX_IPSR_DATA(IP1_15_12,	IRQ5),
-	PINMUX_IPSR_DATA(IP1_15_12,	QSTB_QHE),
-	PINMUX_IPSR_DATA(IP1_15_12,	A23),
-	PINMUX_IPSR_DATA(IP1_15_12,	DU_EXVSYNC_DU_VSYNC),
+	PINMUX_IPSR_GPSR(IP1_15_12,	IRQ5),
+	PINMUX_IPSR_GPSR(IP1_15_12,	QSTB_QHE),
+	PINMUX_IPSR_GPSR(IP1_15_12,	A23),
+	PINMUX_IPSR_GPSR(IP1_15_12,	DU_EXVSYNC_DU_VSYNC),
 	PINMUX_IPSR_MSEL(IP1_15_12,	VI4_DATA5_B,		SEL_VIN4_1),
 	PINMUX_IPSR_MSEL(IP1_15_12,	PWM6_B,			SEL_PWM6_1),
 
-	PINMUX_IPSR_DATA(IP1_19_16,	PWM0),
-	PINMUX_IPSR_DATA(IP1_19_16,	AVB_AVTP_PPS),
-	PINMUX_IPSR_DATA(IP1_19_16,	A22),
+	PINMUX_IPSR_GPSR(IP1_19_16,	PWM0),
+	PINMUX_IPSR_GPSR(IP1_19_16,	AVB_AVTP_PPS),
+	PINMUX_IPSR_GPSR(IP1_19_16,	A22),
 	PINMUX_IPSR_MSEL(IP1_19_16,	VI4_DATA6_B,		SEL_VIN4_1),
 	PINMUX_IPSR_MSEL(IP1_19_16,	IECLK_B,		SEL_IEBUS_1),
 
 	PINMUX_IPSR_MSEL(IP1_23_20,	PWM1_A,			SEL_PWM1_0),
-	PINMUX_IPSR_DATA(IP1_23_20,	A21),
+	PINMUX_IPSR_GPSR(IP1_23_20,	A21),
 	PINMUX_IPSR_MSEL(IP1_23_20,	HRX3_D,			SEL_HSCIF3_3),
 	PINMUX_IPSR_MSEL(IP1_23_20,	VI4_DATA7_B,		SEL_VIN4_1),
 	PINMUX_IPSR_MSEL(IP1_23_20,	IERX_B,			SEL_IEBUS_1),
 
 	PINMUX_IPSR_MSEL(IP1_27_24,	PWM2_A,			SEL_PWM2_0),
-	PINMUX_IPSR_DATA(IP1_27_24,	A20),
+	PINMUX_IPSR_GPSR(IP1_27_24,	A20),
 	PINMUX_IPSR_MSEL(IP1_27_24,	HTX3_D,			SEL_HSCIF3_3),
 	PINMUX_IPSR_MSEL(IP1_27_24,	IETX_B,			SEL_IEBUS_1),
 
-	PINMUX_IPSR_DATA(IP1_31_28,	A0),
-	PINMUX_IPSR_DATA(IP1_31_28,	LCDOUT16),
+	PINMUX_IPSR_GPSR(IP1_31_28,	A0),
+	PINMUX_IPSR_GPSR(IP1_31_28,	LCDOUT16),
 	PINMUX_IPSR_MSEL(IP1_31_28,	MSIOF3_SYNC_B,		SEL_MSIOF3_1),
-	PINMUX_IPSR_DATA(IP1_31_28,	VI4_DATA8),
-	PINMUX_IPSR_DATA(IP1_31_28,	DU_DB0),
+	PINMUX_IPSR_GPSR(IP1_31_28,	VI4_DATA8),
+	PINMUX_IPSR_GPSR(IP1_31_28,	DU_DB0),
 	PINMUX_IPSR_MSEL(IP1_31_28,	PWM3_A,			SEL_PWM3_0),
 
 	/* IPSR2 */
-	PINMUX_IPSR_DATA(IP2_3_0,	A1),
-	PINMUX_IPSR_DATA(IP2_3_0,	LCDOUT17),
+	PINMUX_IPSR_GPSR(IP2_3_0,	A1),
+	PINMUX_IPSR_GPSR(IP2_3_0,	LCDOUT17),
 	PINMUX_IPSR_MSEL(IP2_3_0,	MSIOF3_TXD_B,		SEL_MSIOF3_1),
-	PINMUX_IPSR_DATA(IP2_3_0,	VI4_DATA9),
-	PINMUX_IPSR_DATA(IP2_3_0,	DU_DB1),
+	PINMUX_IPSR_GPSR(IP2_3_0,	VI4_DATA9),
+	PINMUX_IPSR_GPSR(IP2_3_0,	DU_DB1),
 	PINMUX_IPSR_MSEL(IP2_3_0,	PWM4_A,			SEL_PWM4_0),
 
-	PINMUX_IPSR_DATA(IP2_7_4,	A2),
-	PINMUX_IPSR_DATA(IP2_7_4,	LCDOUT18),
+	PINMUX_IPSR_GPSR(IP2_7_4,	A2),
+	PINMUX_IPSR_GPSR(IP2_7_4,	LCDOUT18),
 	PINMUX_IPSR_MSEL(IP2_7_4,	MSIOF3_SCK_B,		SEL_MSIOF3_1),
-	PINMUX_IPSR_DATA(IP2_7_4,	VI4_DATA10),
-	PINMUX_IPSR_DATA(IP2_7_4,	DU_DB2),
+	PINMUX_IPSR_GPSR(IP2_7_4,	VI4_DATA10),
+	PINMUX_IPSR_GPSR(IP2_7_4,	DU_DB2),
 	PINMUX_IPSR_MSEL(IP2_7_4,	PWM5_A,			SEL_PWM5_0),
 
-	PINMUX_IPSR_DATA(IP2_11_8,	A3),
-	PINMUX_IPSR_DATA(IP2_11_8,	LCDOUT19),
+	PINMUX_IPSR_GPSR(IP2_11_8,	A3),
+	PINMUX_IPSR_GPSR(IP2_11_8,	LCDOUT19),
 	PINMUX_IPSR_MSEL(IP2_11_8,	MSIOF3_RXD_B,		SEL_MSIOF3_1),
-	PINMUX_IPSR_DATA(IP2_11_8,	VI4_DATA11),
-	PINMUX_IPSR_DATA(IP2_11_8,	DU_DB3),
+	PINMUX_IPSR_GPSR(IP2_11_8,	VI4_DATA11),
+	PINMUX_IPSR_GPSR(IP2_11_8,	DU_DB3),
 	PINMUX_IPSR_MSEL(IP2_11_8,	PWM6_A,			SEL_PWM6_0),
 
-	PINMUX_IPSR_DATA(IP2_15_12,	A4),
-	PINMUX_IPSR_DATA(IP2_15_12,	LCDOUT20),
+	PINMUX_IPSR_GPSR(IP2_15_12,	A4),
+	PINMUX_IPSR_GPSR(IP2_15_12,	LCDOUT20),
 	PINMUX_IPSR_MSEL(IP2_15_12,	MSIOF3_SS1_B,		SEL_MSIOF3_1),
-	PINMUX_IPSR_DATA(IP2_15_12,	VI4_DATA12),
-	PINMUX_IPSR_DATA(IP2_15_12,	VI5_DATA12),
-	PINMUX_IPSR_DATA(IP2_15_12,	DU_DB4),
+	PINMUX_IPSR_GPSR(IP2_15_12,	VI4_DATA12),
+	PINMUX_IPSR_GPSR(IP2_15_12,	VI5_DATA12),
+	PINMUX_IPSR_GPSR(IP2_15_12,	DU_DB4),
 
-	PINMUX_IPSR_DATA(IP2_19_16,	A5),
-	PINMUX_IPSR_DATA(IP2_19_16,	LCDOUT21),
+	PINMUX_IPSR_GPSR(IP2_19_16,	A5),
+	PINMUX_IPSR_GPSR(IP2_19_16,	LCDOUT21),
 	PINMUX_IPSR_MSEL(IP2_19_16,	MSIOF3_SS2_B,		SEL_MSIOF3_1),
 	PINMUX_IPSR_MSEL(IP2_19_16,	SCK4_B,			SEL_SCIF4_1),
-	PINMUX_IPSR_DATA(IP2_19_16,	VI4_DATA13),
-	PINMUX_IPSR_DATA(IP2_19_16,	VI5_DATA13),
-	PINMUX_IPSR_DATA(IP2_19_16,	DU_DB5),
+	PINMUX_IPSR_GPSR(IP2_19_16,	VI4_DATA13),
+	PINMUX_IPSR_GPSR(IP2_19_16,	VI5_DATA13),
+	PINMUX_IPSR_GPSR(IP2_19_16,	DU_DB5),
 
-	PINMUX_IPSR_DATA(IP2_23_20,	A6),
-	PINMUX_IPSR_DATA(IP2_23_20,	LCDOUT22),
+	PINMUX_IPSR_GPSR(IP2_23_20,	A6),
+	PINMUX_IPSR_GPSR(IP2_23_20,	LCDOUT22),
 	PINMUX_IPSR_MSEL(IP2_23_20,	MSIOF2_SS1_A,		SEL_MSIOF2_0),
 	PINMUX_IPSR_MSEL(IP2_23_20,	RX4_B,			SEL_SCIF4_1),
-	PINMUX_IPSR_DATA(IP2_23_20,	VI4_DATA14),
-	PINMUX_IPSR_DATA(IP2_23_20,	VI5_DATA14),
-	PINMUX_IPSR_DATA(IP2_23_20,	DU_DB6),
+	PINMUX_IPSR_GPSR(IP2_23_20,	VI4_DATA14),
+	PINMUX_IPSR_GPSR(IP2_23_20,	VI5_DATA14),
+	PINMUX_IPSR_GPSR(IP2_23_20,	DU_DB6),
 
-	PINMUX_IPSR_DATA(IP2_27_24,	A7),
-	PINMUX_IPSR_DATA(IP2_27_24,	LCDOUT23),
+	PINMUX_IPSR_GPSR(IP2_27_24,	A7),
+	PINMUX_IPSR_GPSR(IP2_27_24,	LCDOUT23),
 	PINMUX_IPSR_MSEL(IP2_27_24,	MSIOF2_SS2_A,		SEL_MSIOF2_0),
 	PINMUX_IPSR_MSEL(IP2_27_24,	TX4_B,			SEL_SCIF4_1),
-	PINMUX_IPSR_DATA(IP2_27_24,	VI4_DATA15),
-	PINMUX_IPSR_DATA(IP2_27_24,	VI5_DATA15),
-	PINMUX_IPSR_DATA(IP2_27_24,	DU_DB7),
+	PINMUX_IPSR_GPSR(IP2_27_24,	VI4_DATA15),
+	PINMUX_IPSR_GPSR(IP2_27_24,	VI5_DATA15),
+	PINMUX_IPSR_GPSR(IP2_27_24,	DU_DB7),
 
-	PINMUX_IPSR_DATA(IP2_31_28,	A8),
+	PINMUX_IPSR_GPSR(IP2_31_28,	A8),
 	PINMUX_IPSR_MSEL(IP2_31_28,	RX3_B,			SEL_SCIF3_1),
 	PINMUX_IPSR_MSEL(IP2_31_28,	MSIOF2_SYNC_A,		SEL_MSIOF2_0),
 	PINMUX_IPSR_MSEL(IP2_31_28,	HRX4_B,			SEL_HSCIF4_1),
@@ -720,99 +719,99 @@
 	PINMUX_IPSR_MSEL(IP2_31_28,	PWM1_B,			SEL_PWM1_1),
 
 	/* IPSR3 */
-	PINMUX_IPSR_DATA(IP3_3_0,	A9),
+	PINMUX_IPSR_GPSR(IP3_3_0,	A9),
 	PINMUX_IPSR_MSEL(IP3_3_0,	MSIOF2_SCK_A,		SEL_MSIOF2_0),
 	PINMUX_IPSR_MSEL(IP3_3_0,	CTS4_N_B,		SEL_SCIF4_1),
-	PINMUX_IPSR_DATA(IP3_3_0,	VI5_VSYNC_N),
+	PINMUX_IPSR_GPSR(IP3_3_0,	VI5_VSYNC_N),
 
-	PINMUX_IPSR_DATA(IP3_7_4,	A10),
+	PINMUX_IPSR_GPSR(IP3_7_4,	A10),
 	PINMUX_IPSR_MSEL(IP3_7_4,	MSIOF2_RXD_A,		SEL_MSIOF2_0),
 	PINMUX_IPSR_MSEL(IP3_7_4,	RTS4_N_TANS_B,		SEL_SCIF4_1),
-	PINMUX_IPSR_DATA(IP3_7_4,	VI5_HSYNC_N),
+	PINMUX_IPSR_GPSR(IP3_7_4,	VI5_HSYNC_N),
 
-	PINMUX_IPSR_DATA(IP3_11_8,	A11),
+	PINMUX_IPSR_GPSR(IP3_11_8,	A11),
 	PINMUX_IPSR_MSEL(IP3_11_8,	TX3_B,			SEL_SCIF3_1),
 	PINMUX_IPSR_MSEL(IP3_11_8,	MSIOF2_TXD_A,		SEL_MSIOF2_0),
 	PINMUX_IPSR_MSEL(IP3_11_8,	HTX4_B,			SEL_HSCIF4_1),
-	PINMUX_IPSR_DATA(IP3_11_8,	HSCK4),
-	PINMUX_IPSR_DATA(IP3_11_8,	VI5_FIELD),
+	PINMUX_IPSR_GPSR(IP3_11_8,	HSCK4),
+	PINMUX_IPSR_GPSR(IP3_11_8,	VI5_FIELD),
 	PINMUX_IPSR_MSEL(IP3_11_8,	SCL6_A,			SEL_I2C6_0),
 	PINMUX_IPSR_MSEL(IP3_11_8,	AVB_AVTP_CAPTURE_B,	SEL_ETHERAVB_1),
 	PINMUX_IPSR_MSEL(IP3_11_8,	PWM2_B,			SEL_PWM2_1),
 
-	PINMUX_IPSR_DATA(IP3_15_12,	A12),
-	PINMUX_IPSR_DATA(IP3_15_12,	LCDOUT12),
+	PINMUX_IPSR_GPSR(IP3_15_12,	A12),
+	PINMUX_IPSR_GPSR(IP3_15_12,	LCDOUT12),
 	PINMUX_IPSR_MSEL(IP3_15_12,	MSIOF3_SCK_C,		SEL_MSIOF3_2),
 	PINMUX_IPSR_MSEL(IP3_15_12,	HRX4_A,			SEL_HSCIF4_0),
-	PINMUX_IPSR_DATA(IP3_15_12,	VI5_DATA8),
-	PINMUX_IPSR_DATA(IP3_15_12,	DU_DG4),
+	PINMUX_IPSR_GPSR(IP3_15_12,	VI5_DATA8),
+	PINMUX_IPSR_GPSR(IP3_15_12,	DU_DG4),
 
-	PINMUX_IPSR_DATA(IP3_19_16,	A13),
-	PINMUX_IPSR_DATA(IP3_19_16,	LCDOUT13),
+	PINMUX_IPSR_GPSR(IP3_19_16,	A13),
+	PINMUX_IPSR_GPSR(IP3_19_16,	LCDOUT13),
 	PINMUX_IPSR_MSEL(IP3_19_16,	MSIOF3_SYNC_C,		SEL_MSIOF3_2),
 	PINMUX_IPSR_MSEL(IP3_19_16,	HTX4_A,			SEL_HSCIF4_0),
-	PINMUX_IPSR_DATA(IP3_19_16,	VI5_DATA9),
-	PINMUX_IPSR_DATA(IP3_19_16,	DU_DG5),
+	PINMUX_IPSR_GPSR(IP3_19_16,	VI5_DATA9),
+	PINMUX_IPSR_GPSR(IP3_19_16,	DU_DG5),
 
-	PINMUX_IPSR_DATA(IP3_23_20,	A14),
-	PINMUX_IPSR_DATA(IP3_23_20,	LCDOUT14),
+	PINMUX_IPSR_GPSR(IP3_23_20,	A14),
+	PINMUX_IPSR_GPSR(IP3_23_20,	LCDOUT14),
 	PINMUX_IPSR_MSEL(IP3_23_20,	MSIOF3_RXD_C,		SEL_MSIOF3_2),
-	PINMUX_IPSR_DATA(IP3_23_20,	HCTS4_N),
-	PINMUX_IPSR_DATA(IP3_23_20,	VI5_DATA10),
-	PINMUX_IPSR_DATA(IP3_23_20,	DU_DG6),
+	PINMUX_IPSR_GPSR(IP3_23_20,	HCTS4_N),
+	PINMUX_IPSR_GPSR(IP3_23_20,	VI5_DATA10),
+	PINMUX_IPSR_GPSR(IP3_23_20,	DU_DG6),
 
-	PINMUX_IPSR_DATA(IP3_27_24,	A15),
-	PINMUX_IPSR_DATA(IP3_27_24,	LCDOUT15),
+	PINMUX_IPSR_GPSR(IP3_27_24,	A15),
+	PINMUX_IPSR_GPSR(IP3_27_24,	LCDOUT15),
 	PINMUX_IPSR_MSEL(IP3_27_24,	MSIOF3_TXD_C,		SEL_MSIOF3_2),
-	PINMUX_IPSR_DATA(IP3_27_24,	HRTS4_N),
-	PINMUX_IPSR_DATA(IP3_27_24,	VI5_DATA11),
-	PINMUX_IPSR_DATA(IP3_27_24,	DU_DG7),
+	PINMUX_IPSR_GPSR(IP3_27_24,	HRTS4_N),
+	PINMUX_IPSR_GPSR(IP3_27_24,	VI5_DATA11),
+	PINMUX_IPSR_GPSR(IP3_27_24,	DU_DG7),
 
-	PINMUX_IPSR_DATA(IP3_31_28,	A16),
-	PINMUX_IPSR_DATA(IP3_31_28,	LCDOUT8),
-	PINMUX_IPSR_DATA(IP3_31_28,	VI4_FIELD),
-	PINMUX_IPSR_DATA(IP3_31_28,	DU_DG0),
+	PINMUX_IPSR_GPSR(IP3_31_28,	A16),
+	PINMUX_IPSR_GPSR(IP3_31_28,	LCDOUT8),
+	PINMUX_IPSR_GPSR(IP3_31_28,	VI4_FIELD),
+	PINMUX_IPSR_GPSR(IP3_31_28,	DU_DG0),
 
 	/* IPSR4 */
-	PINMUX_IPSR_DATA(IP4_3_0,	A17),
-	PINMUX_IPSR_DATA(IP4_3_0,	LCDOUT9),
-	PINMUX_IPSR_DATA(IP4_3_0,	VI4_VSYNC_N),
-	PINMUX_IPSR_DATA(IP4_3_0,	DU_DG1),
+	PINMUX_IPSR_GPSR(IP4_3_0,	A17),
+	PINMUX_IPSR_GPSR(IP4_3_0,	LCDOUT9),
+	PINMUX_IPSR_GPSR(IP4_3_0,	VI4_VSYNC_N),
+	PINMUX_IPSR_GPSR(IP4_3_0,	DU_DG1),
 
-	PINMUX_IPSR_DATA(IP4_7_4,	A18),
-	PINMUX_IPSR_DATA(IP4_7_4,	LCDOUT10),
-	PINMUX_IPSR_DATA(IP4_7_4,	VI4_HSYNC_N),
-	PINMUX_IPSR_DATA(IP4_7_4,	DU_DG2),
+	PINMUX_IPSR_GPSR(IP4_7_4,	A18),
+	PINMUX_IPSR_GPSR(IP4_7_4,	LCDOUT10),
+	PINMUX_IPSR_GPSR(IP4_7_4,	VI4_HSYNC_N),
+	PINMUX_IPSR_GPSR(IP4_7_4,	DU_DG2),
 
-	PINMUX_IPSR_DATA(IP4_11_8,	A19),
-	PINMUX_IPSR_DATA(IP4_11_8,	LCDOUT11),
-	PINMUX_IPSR_DATA(IP4_11_8,	VI4_CLKENB),
-	PINMUX_IPSR_DATA(IP4_11_8,	DU_DG3),
+	PINMUX_IPSR_GPSR(IP4_11_8,	A19),
+	PINMUX_IPSR_GPSR(IP4_11_8,	LCDOUT11),
+	PINMUX_IPSR_GPSR(IP4_11_8,	VI4_CLKENB),
+	PINMUX_IPSR_GPSR(IP4_11_8,	DU_DG3),
 
-	PINMUX_IPSR_DATA(IP4_15_12,	CS0_N),
-	PINMUX_IPSR_DATA(IP4_15_12,	VI5_CLKENB),
+	PINMUX_IPSR_GPSR(IP4_15_12,	CS0_N),
+	PINMUX_IPSR_GPSR(IP4_15_12,	VI5_CLKENB),
 
-	PINMUX_IPSR_DATA(IP4_19_16,	CS1_N_A26),
-	PINMUX_IPSR_DATA(IP4_19_16,	VI5_CLK),
+	PINMUX_IPSR_GPSR(IP4_19_16,	CS1_N_A26),
+	PINMUX_IPSR_GPSR(IP4_19_16,	VI5_CLK),
 	PINMUX_IPSR_MSEL(IP4_19_16,	EX_WAIT0_B,		SEL_LBSC_1),
 
-	PINMUX_IPSR_DATA(IP4_23_20,	BS_N),
-	PINMUX_IPSR_DATA(IP4_23_20,	QSTVA_QVS),
+	PINMUX_IPSR_GPSR(IP4_23_20,	BS_N),
+	PINMUX_IPSR_GPSR(IP4_23_20,	QSTVA_QVS),
 	PINMUX_IPSR_MSEL(IP4_23_20,	MSIOF3_SCK_D,		SEL_MSIOF3_3),
-	PINMUX_IPSR_DATA(IP4_23_20,	SCK3),
-	PINMUX_IPSR_DATA(IP4_23_20,	HSCK3),
-	PINMUX_IPSR_DATA(IP4_23_20,	CAN1_TX),
-	PINMUX_IPSR_DATA(IP4_23_20,	CANFD1_TX),
+	PINMUX_IPSR_GPSR(IP4_23_20,	SCK3),
+	PINMUX_IPSR_GPSR(IP4_23_20,	HSCK3),
+	PINMUX_IPSR_GPSR(IP4_23_20,	CAN1_TX),
+	PINMUX_IPSR_GPSR(IP4_23_20,	CANFD1_TX),
 	PINMUX_IPSR_MSEL(IP4_23_20,	IETX_A,			SEL_IEBUS_0),
 
-	PINMUX_IPSR_DATA(IP4_27_24,	RD_N),
+	PINMUX_IPSR_GPSR(IP4_27_24,	RD_N),
 	PINMUX_IPSR_MSEL(IP4_27_24,	MSIOF3_SYNC_D,		SEL_MSIOF3_3),
 	PINMUX_IPSR_MSEL(IP4_27_24,	RX3_A,			SEL_SCIF3_0),
 	PINMUX_IPSR_MSEL(IP4_27_24,	HRX3_A,			SEL_HSCIF3_0),
 	PINMUX_IPSR_MSEL(IP4_27_24,	CAN0_TX_A,		SEL_RCAN0_0),
 	PINMUX_IPSR_MSEL(IP4_27_24,	CANFD0_TX_A,		SEL_CANFD0_0),
 
-	PINMUX_IPSR_DATA(IP4_31_28,	RD_WR_N),
+	PINMUX_IPSR_GPSR(IP4_31_28,	RD_WR_N),
 	PINMUX_IPSR_MSEL(IP4_31_28,	MSIOF3_RXD_D,		SEL_MSIOF3_3),
 	PINMUX_IPSR_MSEL(IP4_31_28,	TX3_A,			SEL_SCIF3_0),
 	PINMUX_IPSR_MSEL(IP4_31_28,	HTX3_A,			SEL_HSCIF3_0),
@@ -820,236 +819,236 @@
 	PINMUX_IPSR_MSEL(IP4_31_28,	CANFD0_RX_A,		SEL_CANFD0_0),
 
 	/* IPSR5 */
-	PINMUX_IPSR_DATA(IP5_3_0,	WE0_N),
+	PINMUX_IPSR_GPSR(IP5_3_0,	WE0_N),
 	PINMUX_IPSR_MSEL(IP5_3_0,	MSIOF3_TXD_D,		SEL_MSIOF3_3),
-	PINMUX_IPSR_DATA(IP5_3_0,	CTS3_N),
-	PINMUX_IPSR_DATA(IP5_3_0,	HCTS3_N),
+	PINMUX_IPSR_GPSR(IP5_3_0,	CTS3_N),
+	PINMUX_IPSR_GPSR(IP5_3_0,	HCTS3_N),
 	PINMUX_IPSR_MSEL(IP5_3_0,	SCL6_B,			SEL_I2C6_1),
-	PINMUX_IPSR_DATA(IP5_3_0,	CAN_CLK),
+	PINMUX_IPSR_GPSR(IP5_3_0,	CAN_CLK),
 	PINMUX_IPSR_MSEL(IP5_3_0,	IECLK_A,		SEL_IEBUS_0),
 
-	PINMUX_IPSR_DATA(IP5_7_4,	WE1_N),
+	PINMUX_IPSR_GPSR(IP5_7_4,	WE1_N),
 	PINMUX_IPSR_MSEL(IP5_7_4,	MSIOF3_SS1_D,		SEL_MSIOF3_3),
-	PINMUX_IPSR_DATA(IP5_7_4,	RTS3_N_TANS),
-	PINMUX_IPSR_DATA(IP5_7_4,	HRTS3_N),
+	PINMUX_IPSR_GPSR(IP5_7_4,	RTS3_N_TANS),
+	PINMUX_IPSR_GPSR(IP5_7_4,	HRTS3_N),
 	PINMUX_IPSR_MSEL(IP5_7_4,	SDA6_B,			SEL_I2C6_1),
-	PINMUX_IPSR_DATA(IP5_7_4,	CAN1_RX),
-	PINMUX_IPSR_DATA(IP5_7_4,	CANFD1_RX),
+	PINMUX_IPSR_GPSR(IP5_7_4,	CAN1_RX),
+	PINMUX_IPSR_GPSR(IP5_7_4,	CANFD1_RX),
 	PINMUX_IPSR_MSEL(IP5_7_4,	IERX_A,			SEL_IEBUS_0),
 
 	PINMUX_IPSR_MSEL(IP5_11_8,	EX_WAIT0_A,		SEL_LBSC_0),
-	PINMUX_IPSR_DATA(IP5_11_8,	QCLK),
-	PINMUX_IPSR_DATA(IP5_11_8,	VI4_CLK),
-	PINMUX_IPSR_DATA(IP5_11_8,	DU_DOTCLKOUT0),
+	PINMUX_IPSR_GPSR(IP5_11_8,	QCLK),
+	PINMUX_IPSR_GPSR(IP5_11_8,	VI4_CLK),
+	PINMUX_IPSR_GPSR(IP5_11_8,	DU_DOTCLKOUT0),
 
-	PINMUX_IPSR_DATA(IP5_15_12,	D0),
+	PINMUX_IPSR_GPSR(IP5_15_12,	D0),
 	PINMUX_IPSR_MSEL(IP5_15_12,	MSIOF2_SS1_B,		SEL_MSIOF2_1),
 	PINMUX_IPSR_MSEL(IP5_15_12,	MSIOF3_SCK_A,		SEL_MSIOF3_0),
-	PINMUX_IPSR_DATA(IP5_15_12,	VI4_DATA16),
-	PINMUX_IPSR_DATA(IP5_15_12,	VI5_DATA0),
+	PINMUX_IPSR_GPSR(IP5_15_12,	VI4_DATA16),
+	PINMUX_IPSR_GPSR(IP5_15_12,	VI5_DATA0),
 
-	PINMUX_IPSR_DATA(IP5_19_16,	D1),
+	PINMUX_IPSR_GPSR(IP5_19_16,	D1),
 	PINMUX_IPSR_MSEL(IP5_19_16,	MSIOF2_SS2_B,		SEL_MSIOF2_1),
 	PINMUX_IPSR_MSEL(IP5_19_16,	MSIOF3_SYNC_A,		SEL_MSIOF3_0),
-	PINMUX_IPSR_DATA(IP5_19_16,	VI4_DATA17),
-	PINMUX_IPSR_DATA(IP5_19_16,	VI5_DATA1),
+	PINMUX_IPSR_GPSR(IP5_19_16,	VI4_DATA17),
+	PINMUX_IPSR_GPSR(IP5_19_16,	VI5_DATA1),
 
-	PINMUX_IPSR_DATA(IP5_23_20,	D2),
+	PINMUX_IPSR_GPSR(IP5_23_20,	D2),
 	PINMUX_IPSR_MSEL(IP5_23_20,	MSIOF3_RXD_A,		SEL_MSIOF3_0),
-	PINMUX_IPSR_DATA(IP5_23_20,	VI4_DATA18),
-	PINMUX_IPSR_DATA(IP5_23_20,	VI5_DATA2),
+	PINMUX_IPSR_GPSR(IP5_23_20,	VI4_DATA18),
+	PINMUX_IPSR_GPSR(IP5_23_20,	VI5_DATA2),
 
-	PINMUX_IPSR_DATA(IP5_27_24,	D3),
+	PINMUX_IPSR_GPSR(IP5_27_24,	D3),
 	PINMUX_IPSR_MSEL(IP5_27_24,	MSIOF3_TXD_A,		SEL_MSIOF3_0),
-	PINMUX_IPSR_DATA(IP5_27_24,	VI4_DATA19),
-	PINMUX_IPSR_DATA(IP5_27_24,	VI5_DATA3),
+	PINMUX_IPSR_GPSR(IP5_27_24,	VI4_DATA19),
+	PINMUX_IPSR_GPSR(IP5_27_24,	VI5_DATA3),
 
-	PINMUX_IPSR_DATA(IP5_31_28,	D4),
+	PINMUX_IPSR_GPSR(IP5_31_28,	D4),
 	PINMUX_IPSR_MSEL(IP5_31_28,	MSIOF2_SCK_B,		SEL_MSIOF2_1),
-	PINMUX_IPSR_DATA(IP5_31_28,	VI4_DATA20),
-	PINMUX_IPSR_DATA(IP5_31_28,	VI5_DATA4),
+	PINMUX_IPSR_GPSR(IP5_31_28,	VI4_DATA20),
+	PINMUX_IPSR_GPSR(IP5_31_28,	VI5_DATA4),
 
 	/* IPSR6 */
-	PINMUX_IPSR_DATA(IP6_3_0,	D5),
+	PINMUX_IPSR_GPSR(IP6_3_0,	D5),
 	PINMUX_IPSR_MSEL(IP6_3_0,	MSIOF2_SYNC_B,		SEL_MSIOF2_1),
-	PINMUX_IPSR_DATA(IP6_3_0,	VI4_DATA21),
-	PINMUX_IPSR_DATA(IP6_3_0,	VI5_DATA5),
+	PINMUX_IPSR_GPSR(IP6_3_0,	VI4_DATA21),
+	PINMUX_IPSR_GPSR(IP6_3_0,	VI5_DATA5),
 
-	PINMUX_IPSR_DATA(IP6_7_4,	D6),
+	PINMUX_IPSR_GPSR(IP6_7_4,	D6),
 	PINMUX_IPSR_MSEL(IP6_7_4,	MSIOF2_RXD_B,		SEL_MSIOF2_1),
-	PINMUX_IPSR_DATA(IP6_7_4,	VI4_DATA22),
-	PINMUX_IPSR_DATA(IP6_7_4,	VI5_DATA6),
+	PINMUX_IPSR_GPSR(IP6_7_4,	VI4_DATA22),
+	PINMUX_IPSR_GPSR(IP6_7_4,	VI5_DATA6),
 
-	PINMUX_IPSR_DATA(IP6_11_8,	D7),
+	PINMUX_IPSR_GPSR(IP6_11_8,	D7),
 	PINMUX_IPSR_MSEL(IP6_11_8,	MSIOF2_TXD_B,		SEL_MSIOF2_1),
-	PINMUX_IPSR_DATA(IP6_11_8,	VI4_DATA23),
-	PINMUX_IPSR_DATA(IP6_11_8,	VI5_DATA7),
+	PINMUX_IPSR_GPSR(IP6_11_8,	VI4_DATA23),
+	PINMUX_IPSR_GPSR(IP6_11_8,	VI5_DATA7),
 
-	PINMUX_IPSR_DATA(IP6_15_12,	D8),
-	PINMUX_IPSR_DATA(IP6_15_12,	LCDOUT0),
+	PINMUX_IPSR_GPSR(IP6_15_12,	D8),
+	PINMUX_IPSR_GPSR(IP6_15_12,	LCDOUT0),
 	PINMUX_IPSR_MSEL(IP6_15_12,	MSIOF2_SCK_D,		SEL_MSIOF2_3),
 	PINMUX_IPSR_MSEL(IP6_15_12,	SCK4_C,			SEL_SCIF4_2),
 	PINMUX_IPSR_MSEL(IP6_15_12,	VI4_DATA0_A,		SEL_VIN4_0),
-	PINMUX_IPSR_DATA(IP6_15_12,	DU_DR0),
+	PINMUX_IPSR_GPSR(IP6_15_12,	DU_DR0),
 
-	PINMUX_IPSR_DATA(IP6_19_16,	D9),
-	PINMUX_IPSR_DATA(IP6_19_16,	LCDOUT1),
+	PINMUX_IPSR_GPSR(IP6_19_16,	D9),
+	PINMUX_IPSR_GPSR(IP6_19_16,	LCDOUT1),
 	PINMUX_IPSR_MSEL(IP6_19_16,	MSIOF2_SYNC_D,		SEL_MSIOF2_3),
 	PINMUX_IPSR_MSEL(IP6_19_16,	VI4_DATA1_A,		SEL_VIN4_0),
-	PINMUX_IPSR_DATA(IP6_19_16,	DU_DR1),
+	PINMUX_IPSR_GPSR(IP6_19_16,	DU_DR1),
 
-	PINMUX_IPSR_DATA(IP6_23_20,	D10),
-	PINMUX_IPSR_DATA(IP6_23_20,	LCDOUT2),
+	PINMUX_IPSR_GPSR(IP6_23_20,	D10),
+	PINMUX_IPSR_GPSR(IP6_23_20,	LCDOUT2),
 	PINMUX_IPSR_MSEL(IP6_23_20,	MSIOF2_RXD_D,		SEL_MSIOF2_3),
 	PINMUX_IPSR_MSEL(IP6_23_20,	HRX3_B,			SEL_HSCIF3_1),
 	PINMUX_IPSR_MSEL(IP6_23_20,	VI4_DATA2_A,		SEL_VIN4_0),
 	PINMUX_IPSR_MSEL(IP6_23_20,	CTS4_N_C,		SEL_SCIF4_2),
-	PINMUX_IPSR_DATA(IP6_23_20,	DU_DR2),
+	PINMUX_IPSR_GPSR(IP6_23_20,	DU_DR2),
 
-	PINMUX_IPSR_DATA(IP6_27_24,	D11),
-	PINMUX_IPSR_DATA(IP6_27_24,	LCDOUT3),
+	PINMUX_IPSR_GPSR(IP6_27_24,	D11),
+	PINMUX_IPSR_GPSR(IP6_27_24,	LCDOUT3),
 	PINMUX_IPSR_MSEL(IP6_27_24,	MSIOF2_TXD_D,		SEL_MSIOF2_3),
 	PINMUX_IPSR_MSEL(IP6_27_24,	HTX3_B,			SEL_HSCIF3_1),
 	PINMUX_IPSR_MSEL(IP6_27_24,	VI4_DATA3_A,		SEL_VIN4_0),
 	PINMUX_IPSR_MSEL(IP6_27_24,	RTS4_N_TANS_C,		SEL_SCIF4_2),
-	PINMUX_IPSR_DATA(IP6_27_24,	DU_DR3),
+	PINMUX_IPSR_GPSR(IP6_27_24,	DU_DR3),
 
-	PINMUX_IPSR_DATA(IP6_31_28,	D12),
-	PINMUX_IPSR_DATA(IP6_31_28,	LCDOUT4),
+	PINMUX_IPSR_GPSR(IP6_31_28,	D12),
+	PINMUX_IPSR_GPSR(IP6_31_28,	LCDOUT4),
 	PINMUX_IPSR_MSEL(IP6_31_28,	MSIOF2_SS1_D,		SEL_MSIOF2_3),
 	PINMUX_IPSR_MSEL(IP6_31_28,	RX4_C,			SEL_SCIF4_2),
 	PINMUX_IPSR_MSEL(IP6_31_28,	VI4_DATA4_A,		SEL_VIN4_0),
-	PINMUX_IPSR_DATA(IP6_31_28,	DU_DR4),
+	PINMUX_IPSR_GPSR(IP6_31_28,	DU_DR4),
 
 	/* IPSR7 */
-	PINMUX_IPSR_DATA(IP7_3_0,	D13),
-	PINMUX_IPSR_DATA(IP7_3_0,	LCDOUT5),
+	PINMUX_IPSR_GPSR(IP7_3_0,	D13),
+	PINMUX_IPSR_GPSR(IP7_3_0,	LCDOUT5),
 	PINMUX_IPSR_MSEL(IP7_3_0,	MSIOF2_SS2_D,		SEL_MSIOF2_3),
 	PINMUX_IPSR_MSEL(IP7_3_0,	TX4_C,			SEL_SCIF4_2),
 	PINMUX_IPSR_MSEL(IP7_3_0,	VI4_DATA5_A,		SEL_VIN4_0),
-	PINMUX_IPSR_DATA(IP7_3_0,	DU_DR5),
+	PINMUX_IPSR_GPSR(IP7_3_0,	DU_DR5),
 
-	PINMUX_IPSR_DATA(IP7_7_4,	D14),
-	PINMUX_IPSR_DATA(IP7_7_4,	LCDOUT6),
+	PINMUX_IPSR_GPSR(IP7_7_4,	D14),
+	PINMUX_IPSR_GPSR(IP7_7_4,	LCDOUT6),
 	PINMUX_IPSR_MSEL(IP7_7_4,	MSIOF3_SS1_A,		SEL_MSIOF3_0),
 	PINMUX_IPSR_MSEL(IP7_7_4,	HRX3_C,			SEL_HSCIF3_2),
 	PINMUX_IPSR_MSEL(IP7_7_4,	VI4_DATA6_A,		SEL_VIN4_0),
-	PINMUX_IPSR_DATA(IP7_7_4,	DU_DR6),
+	PINMUX_IPSR_GPSR(IP7_7_4,	DU_DR6),
 	PINMUX_IPSR_MSEL(IP7_7_4,	SCL6_C,			SEL_I2C6_2),
 
-	PINMUX_IPSR_DATA(IP7_11_8,	D15),
-	PINMUX_IPSR_DATA(IP7_11_8,	LCDOUT7),
+	PINMUX_IPSR_GPSR(IP7_11_8,	D15),
+	PINMUX_IPSR_GPSR(IP7_11_8,	LCDOUT7),
 	PINMUX_IPSR_MSEL(IP7_11_8,	MSIOF3_SS2_A,		SEL_MSIOF3_0),
 	PINMUX_IPSR_MSEL(IP7_11_8,	HTX3_C,			SEL_HSCIF3_2),
 	PINMUX_IPSR_MSEL(IP7_11_8,	VI4_DATA7_A,		SEL_VIN4_0),
-	PINMUX_IPSR_DATA(IP7_11_8,	DU_DR7),
+	PINMUX_IPSR_GPSR(IP7_11_8,	DU_DR7),
 	PINMUX_IPSR_MSEL(IP7_11_8,	SDA6_C,			SEL_I2C6_2),
 
-	PINMUX_IPSR_DATA(IP7_15_12,	FSCLKST),
+	PINMUX_IPSR_GPSR(IP7_15_12,	FSCLKST),
 
-	PINMUX_IPSR_DATA(IP7_19_16,	SD0_CLK),
+	PINMUX_IPSR_GPSR(IP7_19_16,	SD0_CLK),
 	PINMUX_IPSR_MSEL(IP7_19_16,	MSIOF1_SCK_E,		SEL_MSIOF1_4),
 	PINMUX_IPSR_MSEL(IP7_19_16,	STP_OPWM_0_B,		SEL_SSP1_0_1),
 
-	PINMUX_IPSR_DATA(IP7_23_20,	SD0_CMD),
+	PINMUX_IPSR_GPSR(IP7_23_20,	SD0_CMD),
 	PINMUX_IPSR_MSEL(IP7_23_20,	MSIOF1_SYNC_E,		SEL_MSIOF1_4),
 	PINMUX_IPSR_MSEL(IP7_23_20,	STP_IVCXO27_0_B,	SEL_SSP1_0_1),
 
-	PINMUX_IPSR_DATA(IP7_27_24,	SD0_DAT0),
+	PINMUX_IPSR_GPSR(IP7_27_24,	SD0_DAT0),
 	PINMUX_IPSR_MSEL(IP7_27_24,	MSIOF1_RXD_E,		SEL_MSIOF1_4),
 	PINMUX_IPSR_MSEL(IP7_27_24,	TS_SCK0_B,		SEL_TSIF0_1),
 	PINMUX_IPSR_MSEL(IP7_27_24,	STP_ISCLK_0_B,		SEL_SSP1_0_1),
 
-	PINMUX_IPSR_DATA(IP7_31_28,	SD0_DAT1),
+	PINMUX_IPSR_GPSR(IP7_31_28,	SD0_DAT1),
 	PINMUX_IPSR_MSEL(IP7_31_28,	MSIOF1_TXD_E,		SEL_MSIOF1_4),
 	PINMUX_IPSR_MSEL(IP7_31_28,	TS_SPSYNC0_B,		SEL_TSIF0_1),
 	PINMUX_IPSR_MSEL(IP7_31_28,	STP_ISSYNC_0_B,		SEL_SSP1_0_1),
 
 	/* IPSR8 */
-	PINMUX_IPSR_DATA(IP8_3_0,	SD0_DAT2),
+	PINMUX_IPSR_GPSR(IP8_3_0,	SD0_DAT2),
 	PINMUX_IPSR_MSEL(IP8_3_0,	MSIOF1_SS1_E,		SEL_MSIOF1_4),
 	PINMUX_IPSR_MSEL(IP8_3_0,	TS_SDAT0_B,		SEL_TSIF0_1),
 	PINMUX_IPSR_MSEL(IP8_3_0,	STP_ISD_0_B,		SEL_SSP1_0_1),
 
-	PINMUX_IPSR_DATA(IP8_7_4,	SD0_DAT3),
+	PINMUX_IPSR_GPSR(IP8_7_4,	SD0_DAT3),
 	PINMUX_IPSR_MSEL(IP8_7_4,	MSIOF1_SS2_E,		SEL_MSIOF1_4),
 	PINMUX_IPSR_MSEL(IP8_7_4,	TS_SDEN0_B,		SEL_TSIF0_1),
 	PINMUX_IPSR_MSEL(IP8_7_4,	STP_ISEN_0_B,		SEL_SSP1_0_1),
 
-	PINMUX_IPSR_DATA(IP8_11_8,	SD1_CLK),
+	PINMUX_IPSR_GPSR(IP8_11_8,	SD1_CLK),
 	PINMUX_IPSR_MSEL(IP8_11_8,	MSIOF1_SCK_G,		SEL_MSIOF1_6),
 	PINMUX_IPSR_MSEL(IP8_11_8,	SIM0_CLK_A,		SEL_SIMCARD_0),
 
-	PINMUX_IPSR_DATA(IP8_15_12,	SD1_CMD),
+	PINMUX_IPSR_GPSR(IP8_15_12,	SD1_CMD),
 	PINMUX_IPSR_MSEL(IP8_15_12,	MSIOF1_SYNC_G,		SEL_MSIOF1_6),
 	PINMUX_IPSR_MSEL(IP8_15_12,	SIM0_D_A,		SEL_SIMCARD_0),
 	PINMUX_IPSR_MSEL(IP8_15_12,	STP_IVCXO27_1_B,	SEL_SSP1_1_1),
 
-	PINMUX_IPSR_DATA(IP8_19_16,	SD1_DAT0),
-	PINMUX_IPSR_DATA(IP8_19_16,	SD2_DAT4),
+	PINMUX_IPSR_GPSR(IP8_19_16,	SD1_DAT0),
+	PINMUX_IPSR_GPSR(IP8_19_16,	SD2_DAT4),
 	PINMUX_IPSR_MSEL(IP8_19_16,	MSIOF1_RXD_G,		SEL_MSIOF1_6),
 	PINMUX_IPSR_MSEL(IP8_19_16,	TS_SCK1_B,		SEL_TSIF1_1),
 	PINMUX_IPSR_MSEL(IP8_19_16,	STP_ISCLK_1_B,		SEL_SSP1_1_1),
 
-	PINMUX_IPSR_DATA(IP8_23_20,	SD1_DAT1),
-	PINMUX_IPSR_DATA(IP8_23_20,	SD2_DAT5),
+	PINMUX_IPSR_GPSR(IP8_23_20,	SD1_DAT1),
+	PINMUX_IPSR_GPSR(IP8_23_20,	SD2_DAT5),
 	PINMUX_IPSR_MSEL(IP8_23_20,	MSIOF1_TXD_G,		SEL_MSIOF1_6),
 	PINMUX_IPSR_MSEL(IP8_23_20,	TS_SPSYNC1_B,		SEL_TSIF1_1),
 	PINMUX_IPSR_MSEL(IP8_23_20,	STP_ISSYNC_1_B,		SEL_SSP1_1_1),
 
-	PINMUX_IPSR_DATA(IP8_27_24,	SD1_DAT2),
-	PINMUX_IPSR_DATA(IP8_27_24,	SD2_DAT6),
+	PINMUX_IPSR_GPSR(IP8_27_24,	SD1_DAT2),
+	PINMUX_IPSR_GPSR(IP8_27_24,	SD2_DAT6),
 	PINMUX_IPSR_MSEL(IP8_27_24,	MSIOF1_SS1_G,		SEL_MSIOF1_6),
 	PINMUX_IPSR_MSEL(IP8_27_24,	TS_SDAT1_B,		SEL_TSIF1_1),
 	PINMUX_IPSR_MSEL(IP8_27_24,	STP_ISD_1_B,		SEL_SSP1_1_1),
 
-	PINMUX_IPSR_DATA(IP8_31_28,	SD1_DAT3),
-	PINMUX_IPSR_DATA(IP8_31_28,	SD2_DAT7),
+	PINMUX_IPSR_GPSR(IP8_31_28,	SD1_DAT3),
+	PINMUX_IPSR_GPSR(IP8_31_28,	SD2_DAT7),
 	PINMUX_IPSR_MSEL(IP8_31_28,	MSIOF1_SS2_G,		SEL_MSIOF1_6),
 	PINMUX_IPSR_MSEL(IP8_31_28,	TS_SDEN1_B,		SEL_TSIF1_1),
 	PINMUX_IPSR_MSEL(IP8_31_28,	STP_ISEN_1_B,		SEL_SSP1_1_1),
 
 	/* IPSR9 */
-	PINMUX_IPSR_DATA(IP9_3_0,	SD2_CLK),
+	PINMUX_IPSR_GPSR(IP9_3_0,	SD2_CLK),
 
-	PINMUX_IPSR_DATA(IP9_7_4,	SD2_DAT0),
+	PINMUX_IPSR_GPSR(IP9_7_4,	SD2_DAT0),
 
-	PINMUX_IPSR_DATA(IP9_11_8,	SD2_DAT1),
+	PINMUX_IPSR_GPSR(IP9_11_8,	SD2_DAT1),
 
-	PINMUX_IPSR_DATA(IP9_15_12,	SD2_DAT2),
+	PINMUX_IPSR_GPSR(IP9_15_12,	SD2_DAT2),
 
-	PINMUX_IPSR_DATA(IP9_19_16,	SD2_DAT3),
+	PINMUX_IPSR_GPSR(IP9_19_16,	SD2_DAT3),
 
-	PINMUX_IPSR_DATA(IP9_23_20,	SD2_DS),
+	PINMUX_IPSR_GPSR(IP9_23_20,	SD2_DS),
 	PINMUX_IPSR_MSEL(IP9_23_20,	SATA_DEVSLP_B,		SEL_SATA_1),
 
-	PINMUX_IPSR_DATA(IP9_27_24,	SD3_DAT4),
+	PINMUX_IPSR_GPSR(IP9_27_24,	SD3_DAT4),
 	PINMUX_IPSR_MSEL(IP9_27_24,	SD2_CD_A,		SEL_SDHI2_0),
 
-	PINMUX_IPSR_DATA(IP9_31_28,	SD3_DAT5),
+	PINMUX_IPSR_GPSR(IP9_31_28,	SD3_DAT5),
 	PINMUX_IPSR_MSEL(IP9_31_28,	SD2_WP_A,		SEL_SDHI2_0),
 
 	/* IPSR10 */
-	PINMUX_IPSR_DATA(IP10_3_0,	SD3_DAT6),
-	PINMUX_IPSR_DATA(IP10_3_0,	SD3_CD),
+	PINMUX_IPSR_GPSR(IP10_3_0,	SD3_DAT6),
+	PINMUX_IPSR_GPSR(IP10_3_0,	SD3_CD),
 
-	PINMUX_IPSR_DATA(IP10_7_4,	SD3_DAT7),
-	PINMUX_IPSR_DATA(IP10_7_4,	SD3_WP),
+	PINMUX_IPSR_GPSR(IP10_7_4,	SD3_DAT7),
+	PINMUX_IPSR_GPSR(IP10_7_4,	SD3_WP),
 
-	PINMUX_IPSR_DATA(IP10_11_8,	SD0_CD),
+	PINMUX_IPSR_GPSR(IP10_11_8,	SD0_CD),
 	PINMUX_IPSR_MSEL(IP10_11_8,	SCL2_B,			SEL_I2C2_1),
 	PINMUX_IPSR_MSEL(IP10_11_8,	SIM0_RST_A,		SEL_SIMCARD_0),
 
-	PINMUX_IPSR_DATA(IP10_15_12,	SD0_WP),
+	PINMUX_IPSR_GPSR(IP10_15_12,	SD0_WP),
 	PINMUX_IPSR_MSEL(IP10_15_12,	SDA2_B,			SEL_I2C2_1),
 
-	PINMUX_IPSR_DATA(IP10_19_16,	SD1_CD),
+	PINMUX_IPSR_GPSR(IP10_19_16,	SD1_CD),
 	PINMUX_IPSR_MSEL(IP10_19_16,	SIM0_CLK_B,		SEL_SIMCARD_1),
 
-	PINMUX_IPSR_DATA(IP10_23_20,	SD1_WP),
+	PINMUX_IPSR_GPSR(IP10_23_20,	SD1_WP),
 	PINMUX_IPSR_MSEL(IP10_23_20,	SIM0_D_B,		SEL_SIMCARD_1),
 
-	PINMUX_IPSR_DATA(IP10_27_24,	SCK0),
+	PINMUX_IPSR_GPSR(IP10_27_24,	SCK0),
 	PINMUX_IPSR_MSEL(IP10_27_24,	HSCK1_B,		SEL_HSCIF1_1),
 	PINMUX_IPSR_MSEL(IP10_27_24,	MSIOF1_SS2_B,		SEL_MSIOF1_1),
 	PINMUX_IPSR_MSEL(IP10_27_24,	AUDIO_CLKC_B,		SEL_ADG_1),
@@ -1057,38 +1056,38 @@
 	PINMUX_IPSR_MSEL(IP10_27_24,	SIM0_RST_B,		SEL_SIMCARD_1),
 	PINMUX_IPSR_MSEL(IP10_27_24,	STP_OPWM_0_C,		SEL_SSP1_0_2),
 	PINMUX_IPSR_MSEL(IP10_27_24,	RIF0_CLK_B,		SEL_DRIF0_1),
-	PINMUX_IPSR_DATA(IP10_27_24,	ADICHS2),
+	PINMUX_IPSR_GPSR(IP10_27_24,	ADICHS2),
 
-	PINMUX_IPSR_DATA(IP10_31_28,	RX0),
+	PINMUX_IPSR_GPSR(IP10_31_28,	RX0),
 	PINMUX_IPSR_MSEL(IP10_31_28,	HRX1_B,			SEL_HSCIF1_1),
 	PINMUX_IPSR_MSEL(IP10_31_28,	TS_SCK0_C,		SEL_TSIF0_2),
 	PINMUX_IPSR_MSEL(IP10_31_28,	STP_ISCLK_0_C,		SEL_SSP1_0_2),
 	PINMUX_IPSR_MSEL(IP10_31_28,	RIF0_D0_B,		SEL_DRIF0_1),
 
 	/* IPSR11 */
-	PINMUX_IPSR_DATA(IP11_3_0,	TX0),
+	PINMUX_IPSR_GPSR(IP11_3_0,	TX0),
 	PINMUX_IPSR_MSEL(IP11_3_0,	HTX1_B,			SEL_HSCIF1_1),
 	PINMUX_IPSR_MSEL(IP11_3_0,	TS_SPSYNC0_C,		SEL_TSIF0_2),
 	PINMUX_IPSR_MSEL(IP11_3_0,	STP_ISSYNC_0_C,		SEL_SSP1_0_2),
 	PINMUX_IPSR_MSEL(IP11_3_0,	RIF0_D1_B,		SEL_DRIF0_1),
 
-	PINMUX_IPSR_DATA(IP11_7_4,	CTS0_N),
+	PINMUX_IPSR_GPSR(IP11_7_4,	CTS0_N),
 	PINMUX_IPSR_MSEL(IP11_7_4,	HCTS1_N_B,		SEL_HSCIF1_1),
 	PINMUX_IPSR_MSEL(IP11_7_4,	MSIOF1_SYNC_B,		SEL_MSIOF1_1),
 	PINMUX_IPSR_MSEL(IP11_7_4,	TS_SPSYNC1_C,		SEL_TSIF1_2),
 	PINMUX_IPSR_MSEL(IP11_7_4,	STP_ISSYNC_1_C,		SEL_SSP1_1_2),
 	PINMUX_IPSR_MSEL(IP11_7_4,	RIF1_SYNC_B,		SEL_DRIF1_1),
 	PINMUX_IPSR_MSEL(IP11_7_4,	AUDIO_CLKOUT_C,		SEL_ADG_2),
-	PINMUX_IPSR_DATA(IP11_7_4,	ADICS_SAMP),
+	PINMUX_IPSR_GPSR(IP11_7_4,	ADICS_SAMP),
 
-	PINMUX_IPSR_DATA(IP11_11_8,	RTS0_N_TANS),
+	PINMUX_IPSR_GPSR(IP11_11_8,	RTS0_N_TANS),
 	PINMUX_IPSR_MSEL(IP11_11_8,	HRTS1_N_B,		SEL_HSCIF1_1),
 	PINMUX_IPSR_MSEL(IP11_11_8,	MSIOF1_SS1_B,		SEL_MSIOF1_1),
 	PINMUX_IPSR_MSEL(IP11_11_8,	AUDIO_CLKA_B,		SEL_ADG_1),
 	PINMUX_IPSR_MSEL(IP11_11_8,	SCL2_A,			SEL_I2C2_0),
 	PINMUX_IPSR_MSEL(IP11_11_8,	STP_IVCXO27_1_C,	SEL_SSP1_1_2),
 	PINMUX_IPSR_MSEL(IP11_11_8,	RIF0_SYNC_B,		SEL_DRIF0_1),
-	PINMUX_IPSR_DATA(IP11_11_8,	ADICHS1),
+	PINMUX_IPSR_GPSR(IP11_11_8,	ADICHS1),
 
 	PINMUX_IPSR_MSEL(IP11_15_12,	RX1_A,			SEL_SCIF1_0),
 	PINMUX_IPSR_MSEL(IP11_15_12,	HRX1_A,			SEL_HSCIF1_0),
@@ -1102,29 +1101,29 @@
 	PINMUX_IPSR_MSEL(IP11_19_16,	STP_ISEN_0_C,		SEL_SSP1_0_2),
 	PINMUX_IPSR_MSEL(IP11_19_16,	RIF1_D0_C,		SEL_DRIF1_2),
 
-	PINMUX_IPSR_DATA(IP11_23_20,	CTS1_N),
+	PINMUX_IPSR_GPSR(IP11_23_20,	CTS1_N),
 	PINMUX_IPSR_MSEL(IP11_23_20,	HCTS1_N_A,		SEL_HSCIF1_0),
 	PINMUX_IPSR_MSEL(IP11_23_20,	MSIOF1_RXD_B,		SEL_MSIOF1_1),
 	PINMUX_IPSR_MSEL(IP11_23_20,	TS_SDEN1_C,		SEL_TSIF1_2),
 	PINMUX_IPSR_MSEL(IP11_23_20,	STP_ISEN_1_C,		SEL_SSP1_1_2),
 	PINMUX_IPSR_MSEL(IP11_23_20,	RIF1_D0_B,		SEL_DRIF1_1),
-	PINMUX_IPSR_DATA(IP11_23_20,	ADIDATA),
+	PINMUX_IPSR_GPSR(IP11_23_20,	ADIDATA),
 
-	PINMUX_IPSR_DATA(IP11_27_24,	RTS1_N_TANS),
+	PINMUX_IPSR_GPSR(IP11_27_24,	RTS1_N_TANS),
 	PINMUX_IPSR_MSEL(IP11_27_24,	HRTS1_N_A,		SEL_HSCIF1_0),
 	PINMUX_IPSR_MSEL(IP11_27_24,	MSIOF1_TXD_B,		SEL_MSIOF1_1),
 	PINMUX_IPSR_MSEL(IP11_27_24,	TS_SDAT1_C,		SEL_TSIF1_2),
 	PINMUX_IPSR_MSEL(IP11_27_24,	STP_ISD_1_C,		SEL_SSP1_1_2),
 	PINMUX_IPSR_MSEL(IP11_27_24,	RIF1_D1_B,		SEL_DRIF1_1),
-	PINMUX_IPSR_DATA(IP11_27_24,	ADICHS0),
+	PINMUX_IPSR_GPSR(IP11_27_24,	ADICHS0),
 
-	PINMUX_IPSR_DATA(IP11_31_28,	SCK2),
+	PINMUX_IPSR_GPSR(IP11_31_28,	SCK2),
 	PINMUX_IPSR_MSEL(IP11_31_28,	SCIF_CLK_B,		SEL_SCIF1_1),
 	PINMUX_IPSR_MSEL(IP11_31_28,	MSIOF1_SCK_B,		SEL_MSIOF1_1),
 	PINMUX_IPSR_MSEL(IP11_31_28,	TS_SCK1_C,		SEL_TSIF1_2),
 	PINMUX_IPSR_MSEL(IP11_31_28,	STP_ISCLK_1_C,		SEL_SSP1_1_2),
 	PINMUX_IPSR_MSEL(IP11_31_28,	RIF1_CLK_B,		SEL_DRIF1_1),
-	PINMUX_IPSR_DATA(IP11_31_28,	ADICLK),
+	PINMUX_IPSR_GPSR(IP11_31_28,	ADICLK),
 
 	/* IPSR12 */
 	PINMUX_IPSR_MSEL(IP12_3_0,	TX2_A,			SEL_SCIF2_0),
@@ -1141,7 +1140,7 @@
 	PINMUX_IPSR_MSEL(IP12_7_4,	RIF1_SYNC_C,		SEL_DRIF1_2),
 	PINMUX_IPSR_MSEL(IP12_7_4,	FSO_CFE_1_B,		SEL_FSO_1),
 
-	PINMUX_IPSR_DATA(IP12_11_8,	HSCK0),
+	PINMUX_IPSR_GPSR(IP12_11_8,	HSCK0),
 	PINMUX_IPSR_MSEL(IP12_11_8,	MSIOF1_SCK_D,		SEL_MSIOF1_3),
 	PINMUX_IPSR_MSEL(IP12_11_8,	AUDIO_CLKB_A,		SEL_ADG_0),
 	PINMUX_IPSR_MSEL(IP12_11_8,	SSI_SDATA1_B,		SEL_SSI_1),
@@ -1149,21 +1148,21 @@
 	PINMUX_IPSR_MSEL(IP12_11_8,	STP_ISCLK_0_D,		SEL_SSP1_0_3),
 	PINMUX_IPSR_MSEL(IP12_11_8,	RIF0_CLK_C,		SEL_DRIF0_2),
 
-	PINMUX_IPSR_DATA(IP12_15_12,	HRX0),
+	PINMUX_IPSR_GPSR(IP12_15_12,	HRX0),
 	PINMUX_IPSR_MSEL(IP12_15_12,	MSIOF1_RXD_D,		SEL_MSIOF1_3),
 	PINMUX_IPSR_MSEL(IP12_15_12,	SSI_SDATA2_B,		SEL_SSI_1),
 	PINMUX_IPSR_MSEL(IP12_15_12,	TS_SDEN0_D,		SEL_TSIF0_3),
 	PINMUX_IPSR_MSEL(IP12_15_12,	STP_ISEN_0_D,		SEL_SSP1_0_3),
 	PINMUX_IPSR_MSEL(IP12_15_12,	RIF0_D0_C,		SEL_DRIF0_2),
 
-	PINMUX_IPSR_DATA(IP12_19_16,	HTX0),
+	PINMUX_IPSR_GPSR(IP12_19_16,	HTX0),
 	PINMUX_IPSR_MSEL(IP12_19_16,	MSIOF1_TXD_D,		SEL_MSIOF1_3),
 	PINMUX_IPSR_MSEL(IP12_19_16,	SSI_SDATA9_B,		SEL_SSI_1),
 	PINMUX_IPSR_MSEL(IP12_19_16,	TS_SDAT0_D,		SEL_TSIF0_3),
 	PINMUX_IPSR_MSEL(IP12_19_16,	STP_ISD_0_D,		SEL_SSP1_0_3),
 	PINMUX_IPSR_MSEL(IP12_19_16,	RIF0_D1_C,		SEL_DRIF0_2),
 
-	PINMUX_IPSR_DATA(IP12_23_20,	HCTS0_N),
+	PINMUX_IPSR_GPSR(IP12_23_20,	HCTS0_N),
 	PINMUX_IPSR_MSEL(IP12_23_20,	RX2_B,			SEL_SCIF2_1),
 	PINMUX_IPSR_MSEL(IP12_23_20,	MSIOF1_SYNC_D,		SEL_MSIOF1_3),
 	PINMUX_IPSR_MSEL(IP12_23_20,	SSI_SCK9_A,		SEL_SSI_0),
@@ -1172,7 +1171,7 @@
 	PINMUX_IPSR_MSEL(IP12_23_20,	RIF0_SYNC_C,		SEL_DRIF0_2),
 	PINMUX_IPSR_MSEL(IP12_23_20,	AUDIO_CLKOUT1_A,	SEL_ADG_0),
 
-	PINMUX_IPSR_DATA(IP12_27_24,	HRTS0_N),
+	PINMUX_IPSR_GPSR(IP12_27_24,	HRTS0_N),
 	PINMUX_IPSR_MSEL(IP12_27_24,	TX2_B,			SEL_SCIF2_1),
 	PINMUX_IPSR_MSEL(IP12_27_24,	MSIOF1_SS1_D,		SEL_MSIOF1_3),
 	PINMUX_IPSR_MSEL(IP12_27_24,	SSI_WS9_A,		SEL_SSI_0),
@@ -1180,20 +1179,20 @@
 	PINMUX_IPSR_MSEL(IP12_27_24,	BPFCLK_A,		SEL_FM_0),
 	PINMUX_IPSR_MSEL(IP12_27_24,	AUDIO_CLKOUT2_A,	SEL_ADG_0),
 
-	PINMUX_IPSR_DATA(IP12_31_28,	MSIOF0_SYNC),
+	PINMUX_IPSR_GPSR(IP12_31_28,	MSIOF0_SYNC),
 	PINMUX_IPSR_MSEL(IP12_31_28,	AUDIO_CLKOUT_A,		SEL_ADG_0),
 
 	/* IPSR13 */
-	PINMUX_IPSR_DATA(IP13_3_0,	MSIOF0_SS1),
-	PINMUX_IPSR_DATA(IP13_3_0,	RX5),
+	PINMUX_IPSR_GPSR(IP13_3_0,	MSIOF0_SS1),
+	PINMUX_IPSR_GPSR(IP13_3_0,	RX5),
 	PINMUX_IPSR_MSEL(IP13_3_0,	AUDIO_CLKA_C,		SEL_ADG_2),
 	PINMUX_IPSR_MSEL(IP13_3_0,	SSI_SCK2_A,		SEL_SSI_0),
 	PINMUX_IPSR_MSEL(IP13_3_0,	STP_IVCXO27_0_C,	SEL_SSP1_0_2),
 	PINMUX_IPSR_MSEL(IP13_3_0,	AUDIO_CLKOUT3_A,	SEL_ADG_0),
 	PINMUX_IPSR_MSEL(IP13_3_0,	TCLK1_B,		SEL_TIMER_TMU_1),
 
-	PINMUX_IPSR_DATA(IP13_7_4,	MSIOF0_SS2),
-	PINMUX_IPSR_DATA(IP13_7_4,	TX5),
+	PINMUX_IPSR_GPSR(IP13_7_4,	MSIOF0_SS2),
+	PINMUX_IPSR_GPSR(IP13_7_4,	TX5),
 	PINMUX_IPSR_MSEL(IP13_7_4,	MSIOF1_SS2_D,		SEL_MSIOF1_3),
 	PINMUX_IPSR_MSEL(IP13_7_4,	AUDIO_CLKC_A,		SEL_ADG_0),
 	PINMUX_IPSR_MSEL(IP13_7_4,	SSI_WS2_A,		SEL_SSI_0),
@@ -1201,26 +1200,26 @@
 	PINMUX_IPSR_MSEL(IP13_7_4,	AUDIO_CLKOUT_D,		SEL_ADG_3),
 	PINMUX_IPSR_MSEL(IP13_7_4,	SPEEDIN_B,		SEL_SPEED_PULSE_1),
 
-	PINMUX_IPSR_DATA(IP13_11_8,	MLB_CLK),
+	PINMUX_IPSR_GPSR(IP13_11_8,	MLB_CLK),
 	PINMUX_IPSR_MSEL(IP13_11_8,	MSIOF1_SCK_F,		SEL_MSIOF1_5),
 	PINMUX_IPSR_MSEL(IP13_11_8,	SCL1_B,			SEL_I2C1_1),
 
-	PINMUX_IPSR_DATA(IP13_15_12,	MLB_SIG),
+	PINMUX_IPSR_GPSR(IP13_15_12,	MLB_SIG),
 	PINMUX_IPSR_MSEL(IP13_15_12,	RX1_B,			SEL_SCIF1_1),
 	PINMUX_IPSR_MSEL(IP13_15_12,	MSIOF1_SYNC_F,		SEL_MSIOF1_5),
 	PINMUX_IPSR_MSEL(IP13_15_12,	SDA1_B,			SEL_I2C1_1),
 
-	PINMUX_IPSR_DATA(IP13_19_16,	MLB_DAT),
+	PINMUX_IPSR_GPSR(IP13_19_16,	MLB_DAT),
 	PINMUX_IPSR_MSEL(IP13_19_16,	TX1_B,			SEL_SCIF1_1),
 	PINMUX_IPSR_MSEL(IP13_19_16,	MSIOF1_RXD_F,		SEL_MSIOF1_5),
 
-	PINMUX_IPSR_DATA(IP13_23_20,	SSI_SCK0129),
+	PINMUX_IPSR_GPSR(IP13_23_20,	SSI_SCK01239),
 	PINMUX_IPSR_MSEL(IP13_23_20,	MSIOF1_TXD_F,		SEL_MSIOF1_5),
 
-	PINMUX_IPSR_DATA(IP13_27_24,	SSI_WS0129),
+	PINMUX_IPSR_GPSR(IP13_27_24,	SSI_WS01239),
 	PINMUX_IPSR_MSEL(IP13_27_24,	MSIOF1_SS1_F,		SEL_MSIOF1_5),
 
-	PINMUX_IPSR_DATA(IP13_31_28,	SSI_SDATA0),
+	PINMUX_IPSR_GPSR(IP13_31_28,	SSI_SDATA0),
 	PINMUX_IPSR_MSEL(IP13_31_28,	MSIOF1_SS2_F,		SEL_MSIOF1_5),
 
 	/* IPSR14 */
@@ -1229,16 +1228,16 @@
 	PINMUX_IPSR_MSEL(IP14_7_4,	SSI_SDATA2_A,		SEL_SSI_0),
 	PINMUX_IPSR_MSEL(IP14_7_4,	SSI_SCK1_B,		SEL_SSI_1),
 
-	PINMUX_IPSR_DATA(IP14_11_8,	SSI_SCK34),
+	PINMUX_IPSR_GPSR(IP14_11_8,	SSI_SCK34),
 	PINMUX_IPSR_MSEL(IP14_11_8,	MSIOF1_SS1_A,		SEL_MSIOF1_0),
 	PINMUX_IPSR_MSEL(IP14_11_8,	STP_OPWM_0_A,		SEL_SSP1_0_0),
 
-	PINMUX_IPSR_DATA(IP14_15_12,	SSI_WS34),
+	PINMUX_IPSR_GPSR(IP14_15_12,	SSI_WS34),
 	PINMUX_IPSR_MSEL(IP14_15_12,	HCTS2_N_A,		SEL_HSCIF2_0),
 	PINMUX_IPSR_MSEL(IP14_15_12,	MSIOF1_SS2_A,		SEL_MSIOF1_0),
 	PINMUX_IPSR_MSEL(IP14_15_12,	STP_IVCXO27_0_A,	SEL_SSP1_0_0),
 
-	PINMUX_IPSR_DATA(IP14_19_16,	SSI_SDATA3),
+	PINMUX_IPSR_GPSR(IP14_19_16,	SSI_SDATA3),
 	PINMUX_IPSR_MSEL(IP14_19_16,	HRTS2_N_A,		SEL_HSCIF2_0),
 	PINMUX_IPSR_MSEL(IP14_19_16,	MSIOF1_TXD_A,		SEL_MSIOF1_0),
 	PINMUX_IPSR_MSEL(IP14_19_16,	TS_SCK0_A,		SEL_TSIF0_0),
@@ -1246,7 +1245,7 @@
 	PINMUX_IPSR_MSEL(IP14_19_16,	RIF0_D1_A,		SEL_DRIF0_0),
 	PINMUX_IPSR_MSEL(IP14_19_16,	RIF2_D0_A,		SEL_DRIF2_0),
 
-	PINMUX_IPSR_DATA(IP14_23_20,	SSI_SCK4),
+	PINMUX_IPSR_GPSR(IP14_23_20,	SSI_SCK4),
 	PINMUX_IPSR_MSEL(IP14_23_20,	HRX2_A,			SEL_HSCIF2_0),
 	PINMUX_IPSR_MSEL(IP14_23_20,	MSIOF1_SCK_A,		SEL_MSIOF1_0),
 	PINMUX_IPSR_MSEL(IP14_23_20,	TS_SDAT0_A,		SEL_TSIF0_0),
@@ -1254,7 +1253,7 @@
 	PINMUX_IPSR_MSEL(IP14_23_20,	RIF0_CLK_A,		SEL_DRIF0_0),
 	PINMUX_IPSR_MSEL(IP14_23_20,	RIF2_CLK_A,		SEL_DRIF2_0),
 
-	PINMUX_IPSR_DATA(IP14_27_24,	SSI_WS4),
+	PINMUX_IPSR_GPSR(IP14_27_24,	SSI_WS4),
 	PINMUX_IPSR_MSEL(IP14_27_24,	HTX2_A,			SEL_HSCIF2_0),
 	PINMUX_IPSR_MSEL(IP14_27_24,	MSIOF1_SYNC_A,		SEL_MSIOF1_0),
 	PINMUX_IPSR_MSEL(IP14_27_24,	TS_SDEN0_A,		SEL_TSIF0_0),
@@ -1262,7 +1261,7 @@
 	PINMUX_IPSR_MSEL(IP14_27_24,	RIF0_SYNC_A,		SEL_DRIF0_0),
 	PINMUX_IPSR_MSEL(IP14_27_24,	RIF2_SYNC_A,		SEL_DRIF2_0),
 
-	PINMUX_IPSR_DATA(IP14_31_28,	SSI_SDATA4),
+	PINMUX_IPSR_GPSR(IP14_31_28,	SSI_SDATA4),
 	PINMUX_IPSR_MSEL(IP14_31_28,	HSCK2_A,		SEL_HSCIF2_0),
 	PINMUX_IPSR_MSEL(IP14_31_28,	MSIOF1_RXD_A,		SEL_MSIOF1_0),
 	PINMUX_IPSR_MSEL(IP14_31_28,	TS_SPSYNC0_A,		SEL_TSIF0_0),
@@ -1271,19 +1270,19 @@
 	PINMUX_IPSR_MSEL(IP14_31_28,	RIF2_D1_A,		SEL_DRIF2_0),
 
 	/* IPSR15 */
-	PINMUX_IPSR_DATA(IP15_3_0,	SSI_SCK6),
-	PINMUX_IPSR_DATA(IP15_3_0,	USB2_PWEN),
+	PINMUX_IPSR_GPSR(IP15_3_0,	SSI_SCK6),
+	PINMUX_IPSR_GPSR(IP15_3_0,	USB2_PWEN),
 	PINMUX_IPSR_MSEL(IP15_3_0,	SIM0_RST_D,		SEL_SIMCARD_3),
 
-	PINMUX_IPSR_DATA(IP15_7_4,	SSI_WS6),
-	PINMUX_IPSR_DATA(IP15_7_4,	USB2_OVC),
+	PINMUX_IPSR_GPSR(IP15_7_4,	SSI_WS6),
+	PINMUX_IPSR_GPSR(IP15_7_4,	USB2_OVC),
 	PINMUX_IPSR_MSEL(IP15_7_4,	SIM0_D_D,		SEL_SIMCARD_3),
 
-	PINMUX_IPSR_DATA(IP15_11_8,	SSI_SDATA6),
+	PINMUX_IPSR_GPSR(IP15_11_8,	SSI_SDATA6),
 	PINMUX_IPSR_MSEL(IP15_11_8,	SIM0_CLK_D,		SEL_SIMCARD_3),
 	PINMUX_IPSR_MSEL(IP15_11_8,	SATA_DEVSLP_A,		SEL_SATA_0),
 
-	PINMUX_IPSR_DATA(IP15_15_12,	SSI_SCK78),
+	PINMUX_IPSR_GPSR(IP15_15_12,	SSI_SCK78),
 	PINMUX_IPSR_MSEL(IP15_15_12,	HRX2_B,			SEL_HSCIF2_1),
 	PINMUX_IPSR_MSEL(IP15_15_12,	MSIOF1_SCK_C,		SEL_MSIOF1_2),
 	PINMUX_IPSR_MSEL(IP15_15_12,	TS_SCK1_A,		SEL_TSIF1_0),
@@ -1291,7 +1290,7 @@
 	PINMUX_IPSR_MSEL(IP15_15_12,	RIF1_CLK_A,		SEL_DRIF1_0),
 	PINMUX_IPSR_MSEL(IP15_15_12,	RIF3_CLK_A,		SEL_DRIF3_0),
 
-	PINMUX_IPSR_DATA(IP15_19_16,	SSI_WS78),
+	PINMUX_IPSR_GPSR(IP15_19_16,	SSI_WS78),
 	PINMUX_IPSR_MSEL(IP15_19_16,	HTX2_B,			SEL_HSCIF2_1),
 	PINMUX_IPSR_MSEL(IP15_19_16,	MSIOF1_SYNC_C,		SEL_MSIOF1_2),
 	PINMUX_IPSR_MSEL(IP15_19_16,	TS_SDAT1_A,		SEL_TSIF1_0),
@@ -1299,7 +1298,7 @@
 	PINMUX_IPSR_MSEL(IP15_19_16,	RIF1_SYNC_A,		SEL_DRIF1_0),
 	PINMUX_IPSR_MSEL(IP15_19_16,	RIF3_SYNC_A,		SEL_DRIF3_0),
 
-	PINMUX_IPSR_DATA(IP15_23_20,	SSI_SDATA7),
+	PINMUX_IPSR_GPSR(IP15_23_20,	SSI_SDATA7),
 	PINMUX_IPSR_MSEL(IP15_23_20,	HCTS2_N_B,		SEL_HSCIF2_1),
 	PINMUX_IPSR_MSEL(IP15_23_20,	MSIOF1_RXD_C,		SEL_MSIOF1_2),
 	PINMUX_IPSR_MSEL(IP15_23_20,	TS_SDEN1_A,		SEL_TSIF1_0),
@@ -1308,7 +1307,7 @@
 	PINMUX_IPSR_MSEL(IP15_23_20,	RIF3_D0_A,		SEL_DRIF3_0),
 	PINMUX_IPSR_MSEL(IP15_23_20,	TCLK2_A,		SEL_TIMER_TMU_0),
 
-	PINMUX_IPSR_DATA(IP15_27_24,	SSI_SDATA8),
+	PINMUX_IPSR_GPSR(IP15_27_24,	SSI_SDATA8),
 	PINMUX_IPSR_MSEL(IP15_27_24,	HRTS2_N_B,		SEL_HSCIF2_1),
 	PINMUX_IPSR_MSEL(IP15_27_24,	MSIOF1_TXD_C,		SEL_MSIOF1_2),
 	PINMUX_IPSR_MSEL(IP15_27_24,	TS_SPSYNC1_A,		SEL_TSIF1_0),
@@ -1321,13 +1320,13 @@
 	PINMUX_IPSR_MSEL(IP15_31_28,	MSIOF1_SS1_C,		SEL_MSIOF1_2),
 	PINMUX_IPSR_MSEL(IP15_31_28,	HSCK1_A,		SEL_HSCIF1_0),
 	PINMUX_IPSR_MSEL(IP15_31_28,	SSI_WS1_B,		SEL_SSI_1),
-	PINMUX_IPSR_DATA(IP15_31_28,	SCK1),
+	PINMUX_IPSR_GPSR(IP15_31_28,	SCK1),
 	PINMUX_IPSR_MSEL(IP15_31_28,	STP_IVCXO27_1_A,	SEL_SSP1_1_0),
-	PINMUX_IPSR_DATA(IP15_31_28,	SCK5),
+	PINMUX_IPSR_GPSR(IP15_31_28,	SCK5),
 
 	/* IPSR16 */
 	PINMUX_IPSR_MSEL(IP16_3_0,	AUDIO_CLKA_A,		SEL_ADG_0),
-	PINMUX_IPSR_DATA(IP16_3_0,	CC5_OSCOUT),
+	PINMUX_IPSR_GPSR(IP16_3_0,	CC5_OSCOUT),
 
 	PINMUX_IPSR_MSEL(IP16_7_4,	AUDIO_CLKB_B,		SEL_ADG_1),
 	PINMUX_IPSR_MSEL(IP16_7_4,	SCIF_CLK_A,		SEL_SCIF1_0),
@@ -1335,20 +1334,20 @@
 	PINMUX_IPSR_MSEL(IP16_7_4,	REMOCON_A,		SEL_REMOCON_0),
 	PINMUX_IPSR_MSEL(IP16_7_4,	TCLK1_A,		SEL_TIMER_TMU_0),
 
-	PINMUX_IPSR_DATA(IP16_11_8,	USB0_PWEN),
+	PINMUX_IPSR_GPSR(IP16_11_8,	USB0_PWEN),
 	PINMUX_IPSR_MSEL(IP16_11_8,	SIM0_RST_C,		SEL_SIMCARD_2),
 	PINMUX_IPSR_MSEL(IP16_11_8,	TS_SCK1_D,		SEL_TSIF1_3),
 	PINMUX_IPSR_MSEL(IP16_11_8,	STP_ISCLK_1_D,		SEL_SSP1_1_3),
 	PINMUX_IPSR_MSEL(IP16_11_8,	BPFCLK_B,		SEL_FM_1),
 	PINMUX_IPSR_MSEL(IP16_11_8,	RIF3_CLK_B,		SEL_DRIF3_1),
 
-	PINMUX_IPSR_DATA(IP16_15_12,	USB0_OVC),
+	PINMUX_IPSR_GPSR(IP16_15_12,	USB0_OVC),
 	PINMUX_IPSR_MSEL(IP16_11_8,	SIM0_D_C,		SEL_SIMCARD_2),
 	PINMUX_IPSR_MSEL(IP16_11_8,	TS_SDAT1_D,		SEL_TSIF1_3),
 	PINMUX_IPSR_MSEL(IP16_11_8,	STP_ISD_1_D,		SEL_SSP1_1_3),
 	PINMUX_IPSR_MSEL(IP16_11_8,	RIF3_SYNC_B,		SEL_DRIF3_1),
 
-	PINMUX_IPSR_DATA(IP16_19_16,	USB1_PWEN),
+	PINMUX_IPSR_GPSR(IP16_19_16,	USB1_PWEN),
 	PINMUX_IPSR_MSEL(IP16_19_16,	SIM0_CLK_C,		SEL_SIMCARD_2),
 	PINMUX_IPSR_MSEL(IP16_19_16,	SSI_SCK1_A,		SEL_SSI_0),
 	PINMUX_IPSR_MSEL(IP16_19_16,	TS_SCK0_E,		SEL_TSIF0_4),
@@ -1357,7 +1356,7 @@
 	PINMUX_IPSR_MSEL(IP16_19_16,	RIF2_CLK_B,		SEL_DRIF2_1),
 	PINMUX_IPSR_MSEL(IP16_19_16,	SPEEDIN_A,		SEL_SPEED_PULSE_0),
 
-	PINMUX_IPSR_DATA(IP16_23_20,	USB1_OVC),
+	PINMUX_IPSR_GPSR(IP16_23_20,	USB1_OVC),
 	PINMUX_IPSR_MSEL(IP16_23_20,	MSIOF1_SS2_C,		SEL_MSIOF1_2),
 	PINMUX_IPSR_MSEL(IP16_23_20,	SSI_WS1_A,		SEL_SSI_0),
 	PINMUX_IPSR_MSEL(IP16_23_20,	TS_SDAT0_E,		SEL_TSIF0_4),
@@ -1366,7 +1365,7 @@
 	PINMUX_IPSR_MSEL(IP16_23_20,	RIF2_SYNC_B,		SEL_DRIF2_1),
 	PINMUX_IPSR_MSEL(IP16_23_20,	REMOCON_B,		SEL_REMOCON_1),
 
-	PINMUX_IPSR_DATA(IP16_27_24,	USB30_PWEN),
+	PINMUX_IPSR_GPSR(IP16_27_24,	USB30_PWEN),
 	PINMUX_IPSR_MSEL(IP16_27_24,	AUDIO_CLKOUT_B,		SEL_ADG_1),
 	PINMUX_IPSR_MSEL(IP16_27_24,	SSI_SCK2_B,		SEL_SSI_1),
 	PINMUX_IPSR_MSEL(IP16_27_24,	TS_SDEN1_D,		SEL_TSIF1_3),
@@ -1374,9 +1373,9 @@
 	PINMUX_IPSR_MSEL(IP16_27_24,	STP_OPWM_0_E,		SEL_SSP1_0_4),
 	PINMUX_IPSR_MSEL(IP16_27_24,	RIF3_D0_B,		SEL_DRIF3_1),
 	PINMUX_IPSR_MSEL(IP16_27_24,	TCLK2_B,		SEL_TIMER_TMU_1),
-	PINMUX_IPSR_DATA(IP16_27_24,	TPU0TO0),
+	PINMUX_IPSR_GPSR(IP16_27_24,	TPU0TO0),
 
-	PINMUX_IPSR_DATA(IP16_31_28,	USB30_OVC),
+	PINMUX_IPSR_GPSR(IP16_31_28,	USB30_OVC),
 	PINMUX_IPSR_MSEL(IP16_31_28,	AUDIO_CLKOUT1_B,	SEL_ADG_1),
 	PINMUX_IPSR_MSEL(IP16_31_28,	SSI_WS2_B,		SEL_SSI_1),
 	PINMUX_IPSR_MSEL(IP16_31_28,	TS_SPSYNC1_D,		SEL_TSIF1_3),
@@ -1384,24 +1383,24 @@
 	PINMUX_IPSR_MSEL(IP16_31_28,	STP_IVCXO27_0_E,	SEL_SSP1_0_4),
 	PINMUX_IPSR_MSEL(IP16_31_28,	RIF3_D1_B,		SEL_DRIF3_1),
 	PINMUX_IPSR_MSEL(IP16_31_28,	FSO_TOE_B,		SEL_FSO_1),
-	PINMUX_IPSR_DATA(IP16_31_28,	TPU0TO1),
+	PINMUX_IPSR_GPSR(IP16_31_28,	TPU0TO1),
 
 	/* IPSR17 */
-	PINMUX_IPSR_DATA(IP17_3_0,	USB31_PWEN),
+	PINMUX_IPSR_GPSR(IP17_3_0,	USB31_PWEN),
 	PINMUX_IPSR_MSEL(IP17_3_0,	AUDIO_CLKOUT2_B,	SEL_ADG_1),
 	PINMUX_IPSR_MSEL(IP17_3_0,	SSI_SCK9_B,		SEL_SSI_1),
 	PINMUX_IPSR_MSEL(IP17_3_0,	TS_SDEN0_E,		SEL_TSIF0_4),
 	PINMUX_IPSR_MSEL(IP17_3_0,	STP_ISEN_0_E,		SEL_SSP1_0_4),
 	PINMUX_IPSR_MSEL(IP17_3_0,	RIF2_D0_B,		SEL_DRIF2_1),
-	PINMUX_IPSR_DATA(IP17_3_0,	TPU0TO2),
+	PINMUX_IPSR_GPSR(IP17_3_0,	TPU0TO2),
 
-	PINMUX_IPSR_DATA(IP17_7_4,	USB31_OVC),
+	PINMUX_IPSR_GPSR(IP17_7_4,	USB31_OVC),
 	PINMUX_IPSR_MSEL(IP17_7_4,	AUDIO_CLKOUT3_B,	SEL_ADG_1),
 	PINMUX_IPSR_MSEL(IP17_7_4,	SSI_WS9_B,		SEL_SSI_1),
 	PINMUX_IPSR_MSEL(IP17_7_4,	TS_SPSYNC0_E,		SEL_TSIF0_4),
 	PINMUX_IPSR_MSEL(IP17_7_4,	STP_ISSYNC_0_E,		SEL_SSP1_0_4),
 	PINMUX_IPSR_MSEL(IP17_7_4,	RIF2_D1_B,		SEL_DRIF2_1),
-	PINMUX_IPSR_DATA(IP17_7_4,	TPU0TO3),
+	PINMUX_IPSR_GPSR(IP17_7_4,	TPU0TO3),
 
 	/* I2C */
 	PINMUX_IPSR_NOGP(0,		I2C_SEL_0_1),
@@ -1600,6 +1599,61 @@
 	AVB_AVTP_CAPTURE_B_MARK,
 };
 
+/* - CAN ------------------------------------------------------------------ */
+static const unsigned int can0_data_a_pins[] = {
+	/* TX, RX */
+	RCAR_GP_PIN(1, 23),	RCAR_GP_PIN(1, 24),
+};
+static const unsigned int can0_data_a_mux[] = {
+	CAN0_TX_A_MARK,		CAN0_RX_A_MARK,
+};
+static const unsigned int can0_data_b_pins[] = {
+	/* TX, RX */
+	RCAR_GP_PIN(2, 0),	RCAR_GP_PIN(2, 1),
+};
+static const unsigned int can0_data_b_mux[] = {
+	CAN0_TX_B_MARK,		CAN0_RX_B_MARK,
+};
+static const unsigned int can1_data_pins[] = {
+	/* TX, RX */
+	RCAR_GP_PIN(1, 22),	RCAR_GP_PIN(1, 26),
+};
+static const unsigned int can1_data_mux[] = {
+	CAN1_TX_MARK,		CAN1_RX_MARK,
+};
+
+/* - CAN Clock -------------------------------------------------------------- */
+static const unsigned int can_clk_pins[] = {
+	/* CLK */
+	RCAR_GP_PIN(1, 25),
+};
+static const unsigned int can_clk_mux[] = {
+	CAN_CLK_MARK,
+};
+
+/* - CAN FD --------------------------------------------------------------- */
+static const unsigned int canfd0_data_a_pins[] = {
+	/* TX, RX */
+	RCAR_GP_PIN(1, 23),     RCAR_GP_PIN(1, 24),
+};
+static const unsigned int canfd0_data_a_mux[] = {
+	CANFD0_TX_A_MARK,       CANFD0_RX_A_MARK,
+};
+static const unsigned int canfd0_data_b_pins[] = {
+	/* TX, RX */
+	RCAR_GP_PIN(2, 0),      RCAR_GP_PIN(2, 1),
+};
+static const unsigned int canfd0_data_b_mux[] = {
+	CANFD0_TX_B_MARK,       CANFD0_RX_B_MARK,
+};
+static const unsigned int canfd1_data_pins[] = {
+	/* TX, RX */
+	RCAR_GP_PIN(1, 22),     RCAR_GP_PIN(1, 26),
+};
+static const unsigned int canfd1_data_mux[] = {
+	CANFD1_TX_MARK,         CANFD1_RX_MARK,
+};
+
 /* - HSCIF0 ----------------------------------------------------------------- */
 static const unsigned int hscif0_data_pins[] = {
 	/* RX, TX */
@@ -1836,6 +1890,50 @@
 	SDA6_C_MARK, SCL6_C_MARK,
 };
 
+/* - INTC-EX ---------------------------------------------------------------- */
+static const unsigned int intc_ex_irq0_pins[] = {
+	/* IRQ0 */
+	RCAR_GP_PIN(2, 0),
+};
+static const unsigned int intc_ex_irq0_mux[] = {
+	IRQ0_MARK,
+};
+static const unsigned int intc_ex_irq1_pins[] = {
+	/* IRQ1 */
+	RCAR_GP_PIN(2, 1),
+};
+static const unsigned int intc_ex_irq1_mux[] = {
+	IRQ1_MARK,
+};
+static const unsigned int intc_ex_irq2_pins[] = {
+	/* IRQ2 */
+	RCAR_GP_PIN(2, 2),
+};
+static const unsigned int intc_ex_irq2_mux[] = {
+	IRQ2_MARK,
+};
+static const unsigned int intc_ex_irq3_pins[] = {
+	/* IRQ3 */
+	RCAR_GP_PIN(2, 3),
+};
+static const unsigned int intc_ex_irq3_mux[] = {
+	IRQ3_MARK,
+};
+static const unsigned int intc_ex_irq4_pins[] = {
+	/* IRQ4 */
+	RCAR_GP_PIN(2, 4),
+};
+static const unsigned int intc_ex_irq4_mux[] = {
+	IRQ4_MARK,
+};
+static const unsigned int intc_ex_irq5_pins[] = {
+	/* IRQ5 */
+	RCAR_GP_PIN(2, 5),
+};
+static const unsigned int intc_ex_irq5_mux[] = {
+	IRQ5_MARK,
+};
+
 /* - MSIOF0 ----------------------------------------------------------------- */
 static const unsigned int msiof0_clk_pins[] = {
 	/* SCK */
@@ -2492,6 +2590,105 @@
 	MSIOF3_RXD_D_MARK,
 };
 
+/* - PWM0 --------------------------------------------------------------------*/
+static const unsigned int pwm0_pins[] = {
+	/* PWM */
+	RCAR_GP_PIN(2, 6),
+};
+static const unsigned int pwm0_mux[] = {
+	PWM0_MARK,
+};
+/* - PWM1 --------------------------------------------------------------------*/
+static const unsigned int pwm1_a_pins[] = {
+	/* PWM */
+	RCAR_GP_PIN(2, 7),
+};
+static const unsigned int pwm1_a_mux[] = {
+	PWM1_A_MARK,
+};
+static const unsigned int pwm1_b_pins[] = {
+	/* PWM */
+	RCAR_GP_PIN(1, 8),
+};
+static const unsigned int pwm1_b_mux[] = {
+	PWM1_B_MARK,
+};
+/* - PWM2 --------------------------------------------------------------------*/
+static const unsigned int pwm2_a_pins[] = {
+	/* PWM */
+	RCAR_GP_PIN(2, 8),
+};
+static const unsigned int pwm2_a_mux[] = {
+	PWM2_A_MARK,
+};
+static const unsigned int pwm2_b_pins[] = {
+	/* PWM */
+	RCAR_GP_PIN(1, 11),
+};
+static const unsigned int pwm2_b_mux[] = {
+	PWM2_B_MARK,
+};
+/* - PWM3 --------------------------------------------------------------------*/
+static const unsigned int pwm3_a_pins[] = {
+	/* PWM */
+	RCAR_GP_PIN(1, 0),
+};
+static const unsigned int pwm3_a_mux[] = {
+	PWM3_A_MARK,
+};
+static const unsigned int pwm3_b_pins[] = {
+	/* PWM */
+	RCAR_GP_PIN(2, 2),
+};
+static const unsigned int pwm3_b_mux[] = {
+	PWM3_B_MARK,
+};
+/* - PWM4 --------------------------------------------------------------------*/
+static const unsigned int pwm4_a_pins[] = {
+	/* PWM */
+	RCAR_GP_PIN(1, 1),
+};
+static const unsigned int pwm4_a_mux[] = {
+	PWM4_A_MARK,
+};
+static const unsigned int pwm4_b_pins[] = {
+	/* PWM */
+	RCAR_GP_PIN(2, 3),
+};
+static const unsigned int pwm4_b_mux[] = {
+	PWM4_B_MARK,
+};
+/* - PWM5 --------------------------------------------------------------------*/
+static const unsigned int pwm5_a_pins[] = {
+	/* PWM */
+	RCAR_GP_PIN(1, 2),
+};
+static const unsigned int pwm5_a_mux[] = {
+	PWM5_A_MARK,
+};
+static const unsigned int pwm5_b_pins[] = {
+	/* PWM */
+	RCAR_GP_PIN(2, 4),
+};
+static const unsigned int pwm5_b_mux[] = {
+	PWM5_B_MARK,
+};
+/* - PWM6 --------------------------------------------------------------------*/
+static const unsigned int pwm6_a_pins[] = {
+	/* PWM */
+	RCAR_GP_PIN(1, 3),
+};
+static const unsigned int pwm6_a_mux[] = {
+	PWM6_A_MARK,
+};
+static const unsigned int pwm6_b_pins[] = {
+	/* PWM */
+	RCAR_GP_PIN(2, 5),
+};
+static const unsigned int pwm6_b_mux[] = {
+	PWM6_B_MARK,
+};
+
 /* - SATA --------------------------------------------------------------------*/
 static const unsigned int sata0_devslp_a_pins[] = {
 	/* DEVSLP */
@@ -2926,7 +3123,7 @@
 	RCAR_GP_PIN(6, 0), RCAR_GP_PIN(6, 1),
 };
 static const unsigned int ssi01239_ctrl_mux[] = {
-	SSI_SCK0129_MARK, SSI_WS0129_MARK,
+	SSI_SCK01239_MARK, SSI_WS01239_MARK,
 };
 static const unsigned int ssi1_data_a_pins[] = {
 	/* SDATA */
@@ -3090,6 +3287,31 @@
 	SSI_SCK9_B_MARK, SSI_WS9_B_MARK,
 };
 
+/* - USB0 ------------------------------------------------------------------- */
+static const unsigned int usb0_pins[] = {
+	/* PWEN, OVC */
+	RCAR_GP_PIN(6, 24), RCAR_GP_PIN(6, 25),
+};
+static const unsigned int usb0_mux[] = {
+	USB0_PWEN_MARK, USB0_OVC_MARK,
+};
+/* - USB1 ------------------------------------------------------------------- */
+static const unsigned int usb1_pins[] = {
+	/* PWEN, OVC */
+	RCAR_GP_PIN(6, 26), RCAR_GP_PIN(6, 27),
+};
+static const unsigned int usb1_mux[] = {
+	USB1_PWEN_MARK, USB1_OVC_MARK,
+};
+/* - USB2 ------------------------------------------------------------------- */
+static const unsigned int usb2_pins[] = {
+	/* PWEN, OVC */
+	RCAR_GP_PIN(6, 14), RCAR_GP_PIN(6, 15),
+};
+static const unsigned int usb2_mux[] = {
+	USB2_PWEN_MARK, USB2_OVC_MARK,
+};
+
 static const struct sh_pfc_pin_group pinmux_groups[] = {
 	SH_PFC_PIN_GROUP(audio_clk_a_a),
 	SH_PFC_PIN_GROUP(audio_clk_a_b),
@@ -3117,6 +3339,13 @@
 	SH_PFC_PIN_GROUP(avb_avtp_capture_a),
 	SH_PFC_PIN_GROUP(avb_avtp_match_b),
 	SH_PFC_PIN_GROUP(avb_avtp_capture_b),
+	SH_PFC_PIN_GROUP(can0_data_a),
+	SH_PFC_PIN_GROUP(can0_data_b),
+	SH_PFC_PIN_GROUP(can1_data),
+	SH_PFC_PIN_GROUP(can_clk),
+	SH_PFC_PIN_GROUP(canfd0_data_a),
+	SH_PFC_PIN_GROUP(canfd0_data_b),
+	SH_PFC_PIN_GROUP(canfd1_data),
 	SH_PFC_PIN_GROUP(hscif0_data),
 	SH_PFC_PIN_GROUP(hscif0_clk),
 	SH_PFC_PIN_GROUP(hscif0_ctrl),
@@ -3149,6 +3378,12 @@
 	SH_PFC_PIN_GROUP(i2c6_a),
 	SH_PFC_PIN_GROUP(i2c6_b),
 	SH_PFC_PIN_GROUP(i2c6_c),
+	SH_PFC_PIN_GROUP(intc_ex_irq0),
+	SH_PFC_PIN_GROUP(intc_ex_irq1),
+	SH_PFC_PIN_GROUP(intc_ex_irq2),
+	SH_PFC_PIN_GROUP(intc_ex_irq3),
+	SH_PFC_PIN_GROUP(intc_ex_irq4),
+	SH_PFC_PIN_GROUP(intc_ex_irq5),
 	SH_PFC_PIN_GROUP(msiof0_clk),
 	SH_PFC_PIN_GROUP(msiof0_sync),
 	SH_PFC_PIN_GROUP(msiof0_ss1),
@@ -3242,6 +3477,19 @@
 	SH_PFC_PIN_GROUP(msiof3_ss1_d),
 	SH_PFC_PIN_GROUP(msiof3_txd_d),
 	SH_PFC_PIN_GROUP(msiof3_rxd_d),
+	SH_PFC_PIN_GROUP(pwm0),
+	SH_PFC_PIN_GROUP(pwm1_a),
+	SH_PFC_PIN_GROUP(pwm1_b),
+	SH_PFC_PIN_GROUP(pwm2_a),
+	SH_PFC_PIN_GROUP(pwm2_b),
+	SH_PFC_PIN_GROUP(pwm3_a),
+	SH_PFC_PIN_GROUP(pwm3_b),
+	SH_PFC_PIN_GROUP(pwm4_a),
+	SH_PFC_PIN_GROUP(pwm4_b),
+	SH_PFC_PIN_GROUP(pwm5_a),
+	SH_PFC_PIN_GROUP(pwm5_b),
+	SH_PFC_PIN_GROUP(pwm6_a),
+	SH_PFC_PIN_GROUP(pwm6_b),
 	SH_PFC_PIN_GROUP(sata0_devslp_a),
 	SH_PFC_PIN_GROUP(sata0_devslp_b),
 	SH_PFC_PIN_GROUP(scif0_data),
@@ -3322,6 +3570,9 @@
 	SH_PFC_PIN_GROUP(ssi9_data_b),
 	SH_PFC_PIN_GROUP(ssi9_ctrl_a),
 	SH_PFC_PIN_GROUP(ssi9_ctrl_b),
+	SH_PFC_PIN_GROUP(usb0),
+	SH_PFC_PIN_GROUP(usb1),
+	SH_PFC_PIN_GROUP(usb2),
 };
 
 static const char * const audio_clk_groups[] = {
@@ -3356,6 +3607,28 @@
 	"avb_avtp_capture_b",
 };
 
+static const char * const can0_groups[] = {
+	"can0_data_a",
+	"can0_data_b",
+};
+
+static const char * const can1_groups[] = {
+	"can1_data",
+};
+
+static const char * const can_clk_groups[] = {
+	"can_clk",
+};
+
+static const char * const canfd0_groups[] = {
+	"canfd0_data_a",
+	"canfd0_data_b",
+};
+
+static const char * const canfd1_groups[] = {
+	"canfd1_data",
+};
+
 static const char * const hscif0_groups[] = {
 	"hscif0_data",
 	"hscif0_clk",
@@ -3412,6 +3685,15 @@
 	"i2c6_c",
 };
 
+static const char * const intc_ex_groups[] = {
+	"intc_ex_irq0",
+	"intc_ex_irq1",
+	"intc_ex_irq2",
+	"intc_ex_irq3",
+	"intc_ex_irq4",
+	"intc_ex_irq5",
+};
+
 static const char * const msiof0_groups[] = {
 	"msiof0_clk",
 	"msiof0_sync",
@@ -3517,6 +3799,40 @@
 	"msiof3_rxd_d",
 };
 
+static const char * const pwm0_groups[] = {
+	"pwm0",
+};
+
+static const char * const pwm1_groups[] = {
+	"pwm1_a",
+	"pwm1_b",
+};
+
+static const char * const pwm2_groups[] = {
+	"pwm2_a",
+	"pwm2_b",
+};
+
+static const char * const pwm3_groups[] = {
+	"pwm3_a",
+	"pwm3_b",
+};
+
+static const char * const pwm4_groups[] = {
+	"pwm4_a",
+	"pwm4_b",
+};
+
+static const char * const pwm5_groups[] = {
+	"pwm5_a",
+	"pwm5_b",
+};
+
+static const char * const pwm6_groups[] = {
+	"pwm6_a",
+	"pwm6_b",
+};
+
 static const char * const sata0_groups[] = {
 	"sata0_devslp_a",
 	"sata0_devslp_b",
@@ -3636,9 +3952,26 @@
 	"ssi9_ctrl_b",
 };
 
+static const char * const usb0_groups[] = {
+	"usb0",
+};
+
+static const char * const usb1_groups[] = {
+	"usb1",
+};
+
+static const char * const usb2_groups[] = {
+	"usb2",
+};
+
 static const struct sh_pfc_function pinmux_functions[] = {
 	SH_PFC_FUNCTION(audio_clk),
 	SH_PFC_FUNCTION(avb),
+	SH_PFC_FUNCTION(can0),
+	SH_PFC_FUNCTION(can1),
+	SH_PFC_FUNCTION(can_clk),
+	SH_PFC_FUNCTION(canfd0),
+	SH_PFC_FUNCTION(canfd1),
 	SH_PFC_FUNCTION(hscif0),
 	SH_PFC_FUNCTION(hscif1),
 	SH_PFC_FUNCTION(hscif2),
@@ -3647,10 +3980,18 @@
 	SH_PFC_FUNCTION(i2c1),
 	SH_PFC_FUNCTION(i2c2),
 	SH_PFC_FUNCTION(i2c6),
+	SH_PFC_FUNCTION(intc_ex),
 	SH_PFC_FUNCTION(msiof0),
 	SH_PFC_FUNCTION(msiof1),
 	SH_PFC_FUNCTION(msiof2),
 	SH_PFC_FUNCTION(msiof3),
+	SH_PFC_FUNCTION(pwm0),
+	SH_PFC_FUNCTION(pwm1),
+	SH_PFC_FUNCTION(pwm2),
+	SH_PFC_FUNCTION(pwm3),
+	SH_PFC_FUNCTION(pwm4),
+	SH_PFC_FUNCTION(pwm5),
+	SH_PFC_FUNCTION(pwm6),
 	SH_PFC_FUNCTION(sata0),
 	SH_PFC_FUNCTION(scif0),
 	SH_PFC_FUNCTION(scif1),
@@ -3664,6 +4005,9 @@
 	SH_PFC_FUNCTION(sdhi2),
 	SH_PFC_FUNCTION(sdhi3),
 	SH_PFC_FUNCTION(ssi),
+	SH_PFC_FUNCTION(usb0),
+	SH_PFC_FUNCTION(usb1),
+	SH_PFC_FUNCTION(usb2),
 };
 
 static const struct pinmux_cfg_reg pinmux_config_regs[] = {
@@ -4213,7 +4557,8 @@
 		0, 0, 0, 0, 0, 0, 0, 0,
 		/* RESERVED 3 */
 		0, 0,
-		MOD_SEL2_2_1
+		/* RESERVED 2, 1 */
+		0, 0, 0, 0,
 		MOD_SEL2_0 }
 	},
 	{ },
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7734.c b/drivers/pinctrl/sh-pfc/pfc-sh7734.c
index b0b328b..6502e67 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7734.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7734.c
@@ -591,547 +591,547 @@
 	PINMUX_SINGLE(IRQ3_B),
 
 	/* IPSR0 */
-	PINMUX_IPSR_DATA(IP0_1_0, A0),
-	PINMUX_IPSR_DATA(IP0_1_0, ST0_CLKIN),
+	PINMUX_IPSR_GPSR(IP0_1_0, A0),
+	PINMUX_IPSR_GPSR(IP0_1_0, ST0_CLKIN),
 	PINMUX_IPSR_MSEL(IP0_1_0, LCD_DATA0_A, SEL_LCDC_0),
 	PINMUX_IPSR_MSEL(IP0_1_0, TCLKA_C, SEL_MTU2_CLK_1),
 
-	PINMUX_IPSR_DATA(IP0_3_2, A1),
-	PINMUX_IPSR_DATA(IP0_3_2, ST0_REQ),
+	PINMUX_IPSR_GPSR(IP0_3_2, A1),
+	PINMUX_IPSR_GPSR(IP0_3_2, ST0_REQ),
 	PINMUX_IPSR_MSEL(IP0_3_2, LCD_DATA1_A, SEL_LCDC_0),
 	PINMUX_IPSR_MSEL(IP0_3_2, TCLKB_C, SEL_MTU2_CLK_1),
 
-	PINMUX_IPSR_DATA(IP0_5_4, A2),
-	PINMUX_IPSR_DATA(IP0_5_4, ST0_SYC),
+	PINMUX_IPSR_GPSR(IP0_5_4, A2),
+	PINMUX_IPSR_GPSR(IP0_5_4, ST0_SYC),
 	PINMUX_IPSR_MSEL(IP0_5_4, LCD_DATA2_A, SEL_LCDC_0),
 	PINMUX_IPSR_MSEL(IP0_5_4, TCLKC_C, SEL_MTU2_CLK_1),
 
-	PINMUX_IPSR_DATA(IP0_7_6, A3),
-	PINMUX_IPSR_DATA(IP0_7_6, ST0_VLD),
+	PINMUX_IPSR_GPSR(IP0_7_6, A3),
+	PINMUX_IPSR_GPSR(IP0_7_6, ST0_VLD),
 	PINMUX_IPSR_MSEL(IP0_7_6, LCD_DATA3_A, SEL_LCDC_0),
 	PINMUX_IPSR_MSEL(IP0_7_6, TCLKD_C, SEL_MTU2_CLK_1),
 
-	PINMUX_IPSR_DATA(IP0_9_8, A4),
-	PINMUX_IPSR_DATA(IP0_9_8, ST0_D0),
+	PINMUX_IPSR_GPSR(IP0_9_8, A4),
+	PINMUX_IPSR_GPSR(IP0_9_8, ST0_D0),
 	PINMUX_IPSR_MSEL(IP0_9_8, LCD_DATA4_A, SEL_LCDC_0),
 	PINMUX_IPSR_MSEL(IP0_9_8, TIOC0A_C, SEL_MTU2_CH0_1),
 
-	PINMUX_IPSR_DATA(IP0_11_10, A5),
-	PINMUX_IPSR_DATA(IP0_11_10, ST0_D1),
+	PINMUX_IPSR_GPSR(IP0_11_10, A5),
+	PINMUX_IPSR_GPSR(IP0_11_10, ST0_D1),
 	PINMUX_IPSR_MSEL(IP0_11_10, LCD_DATA5_A, SEL_LCDC_0),
 	PINMUX_IPSR_MSEL(IP0_11_10, TIOC0B_C, SEL_MTU2_CH0_1),
 
-	PINMUX_IPSR_DATA(IP0_13_12, A6),
-	PINMUX_IPSR_DATA(IP0_13_12, ST0_D2),
+	PINMUX_IPSR_GPSR(IP0_13_12, A6),
+	PINMUX_IPSR_GPSR(IP0_13_12, ST0_D2),
 	PINMUX_IPSR_MSEL(IP0_13_12, LCD_DATA6_A, SEL_LCDC_0),
 	PINMUX_IPSR_MSEL(IP0_13_12, TIOC0C_C, SEL_MTU2_CH0_1),
 
-	PINMUX_IPSR_DATA(IP0_15_14, A7),
-	PINMUX_IPSR_DATA(IP0_15_14, ST0_D3),
+	PINMUX_IPSR_GPSR(IP0_15_14, A7),
+	PINMUX_IPSR_GPSR(IP0_15_14, ST0_D3),
 	PINMUX_IPSR_MSEL(IP0_15_14, LCD_DATA7_A, SEL_LCDC_0),
 	PINMUX_IPSR_MSEL(IP0_15_14, TIOC0D_C, SEL_MTU2_CH0_1),
 
-	PINMUX_IPSR_DATA(IP0_17_16, A8),
-	PINMUX_IPSR_DATA(IP0_17_16, ST0_D4),
+	PINMUX_IPSR_GPSR(IP0_17_16, A8),
+	PINMUX_IPSR_GPSR(IP0_17_16, ST0_D4),
 	PINMUX_IPSR_MSEL(IP0_17_16, LCD_DATA8_A, SEL_LCDC_0),
 	PINMUX_IPSR_MSEL(IP0_17_16, TIOC1A_C, SEL_MTU2_CH1_2),
 
-	PINMUX_IPSR_DATA(IP0_19_18, A9),
-	PINMUX_IPSR_DATA(IP0_19_18, ST0_D5),
+	PINMUX_IPSR_GPSR(IP0_19_18, A9),
+	PINMUX_IPSR_GPSR(IP0_19_18, ST0_D5),
 	PINMUX_IPSR_MSEL(IP0_19_18, LCD_DATA9_A, SEL_LCDC_0),
 	PINMUX_IPSR_MSEL(IP0_19_18, TIOC1B_C, SEL_MTU2_CH1_2),
 
-	PINMUX_IPSR_DATA(IP0_21_20, A10),
-	PINMUX_IPSR_DATA(IP0_21_20, ST0_D6),
+	PINMUX_IPSR_GPSR(IP0_21_20, A10),
+	PINMUX_IPSR_GPSR(IP0_21_20, ST0_D6),
 	PINMUX_IPSR_MSEL(IP0_21_20, LCD_DATA10_A, SEL_LCDC_0),
 	PINMUX_IPSR_MSEL(IP0_21_20, TIOC2A_C, SEL_MTU2_CH2_2),
 
-	PINMUX_IPSR_DATA(IP0_23_22, A11),
-	PINMUX_IPSR_DATA(IP0_23_22, ST0_D7),
+	PINMUX_IPSR_GPSR(IP0_23_22, A11),
+	PINMUX_IPSR_GPSR(IP0_23_22, ST0_D7),
 	PINMUX_IPSR_MSEL(IP0_23_22, LCD_DATA11_A, SEL_LCDC_0),
 	PINMUX_IPSR_MSEL(IP0_23_22, TIOC2B_C, SEL_MTU2_CH2_2),
 
-	PINMUX_IPSR_DATA(IP0_25_24, A12),
+	PINMUX_IPSR_GPSR(IP0_25_24, A12),
 	PINMUX_IPSR_MSEL(IP0_25_24, LCD_DATA12_A, SEL_LCDC_0),
 	PINMUX_IPSR_MSEL(IP0_25_24, TIOC3A_C, SEL_MTU2_CH3_1),
 
-	PINMUX_IPSR_DATA(IP0_27_26, A13),
+	PINMUX_IPSR_GPSR(IP0_27_26, A13),
 	PINMUX_IPSR_MSEL(IP0_27_26, LCD_DATA13_A, SEL_LCDC_0),
 	PINMUX_IPSR_MSEL(IP0_27_26, TIOC3B_C, SEL_MTU2_CH3_1),
 
-	PINMUX_IPSR_DATA(IP0_29_28, A14),
+	PINMUX_IPSR_GPSR(IP0_29_28, A14),
 	PINMUX_IPSR_MSEL(IP0_29_28, LCD_DATA14_A, SEL_LCDC_0),
 	PINMUX_IPSR_MSEL(IP0_29_28, TIOC3C_C, SEL_MTU2_CH3_1),
 
-	PINMUX_IPSR_DATA(IP0_31_30, A15),
-	PINMUX_IPSR_DATA(IP0_31_30, ST0_VCO_CLKIN),
+	PINMUX_IPSR_GPSR(IP0_31_30, A15),
+	PINMUX_IPSR_GPSR(IP0_31_30, ST0_VCO_CLKIN),
 	PINMUX_IPSR_MSEL(IP0_31_30, LCD_DATA15_A, SEL_LCDC_0),
 	PINMUX_IPSR_MSEL(IP0_31_30, TIOC3D_C, SEL_MTU2_CH3_1),
 
 
 	/* IPSR1 */
-	PINMUX_IPSR_DATA(IP1_1_0, A16),
-	PINMUX_IPSR_DATA(IP1_1_0, ST0_PWM),
+	PINMUX_IPSR_GPSR(IP1_1_0, A16),
+	PINMUX_IPSR_GPSR(IP1_1_0, ST0_PWM),
 	PINMUX_IPSR_MSEL(IP1_1_0, LCD_DON_A, SEL_LCDC_0),
 	PINMUX_IPSR_MSEL(IP1_1_0, TIOC4A_C, SEL_MTU2_CH4_1),
 
-	PINMUX_IPSR_DATA(IP1_3_2, A17),
-	PINMUX_IPSR_DATA(IP1_3_2, ST1_VCO_CLKIN),
+	PINMUX_IPSR_GPSR(IP1_3_2, A17),
+	PINMUX_IPSR_GPSR(IP1_3_2, ST1_VCO_CLKIN),
 	PINMUX_IPSR_MSEL(IP1_3_2, LCD_CL1_A, SEL_LCDC_0),
 	PINMUX_IPSR_MSEL(IP1_3_2, TIOC4B_C, SEL_MTU2_CH4_1),
 
-	PINMUX_IPSR_DATA(IP1_5_4, A18),
-	PINMUX_IPSR_DATA(IP1_5_4, ST1_PWM),
+	PINMUX_IPSR_GPSR(IP1_5_4, A18),
+	PINMUX_IPSR_GPSR(IP1_5_4, ST1_PWM),
 	PINMUX_IPSR_MSEL(IP1_5_4, LCD_CL2_A, SEL_LCDC_0),
 	PINMUX_IPSR_MSEL(IP1_5_4, TIOC4C_C, SEL_MTU2_CH4_1),
 
-	PINMUX_IPSR_DATA(IP1_7_6, A19),
-	PINMUX_IPSR_DATA(IP1_7_6, ST1_CLKIN),
+	PINMUX_IPSR_GPSR(IP1_7_6, A19),
+	PINMUX_IPSR_GPSR(IP1_7_6, ST1_CLKIN),
 	PINMUX_IPSR_MSEL(IP1_7_6, LCD_CLK_A, SEL_LCDC_0),
 	PINMUX_IPSR_MSEL(IP1_7_6, TIOC4D_C, SEL_MTU2_CH4_1),
 
-	PINMUX_IPSR_DATA(IP1_9_8, A20),
-	PINMUX_IPSR_DATA(IP1_9_8, ST1_REQ),
+	PINMUX_IPSR_GPSR(IP1_9_8, A20),
+	PINMUX_IPSR_GPSR(IP1_9_8, ST1_REQ),
 	PINMUX_IPSR_MSEL(IP1_9_8, LCD_FLM_A, SEL_LCDC_0),
 
-	PINMUX_IPSR_DATA(IP1_11_10, A21),
-	PINMUX_IPSR_DATA(IP1_11_10, ST1_SYC),
+	PINMUX_IPSR_GPSR(IP1_11_10, A21),
+	PINMUX_IPSR_GPSR(IP1_11_10, ST1_SYC),
 	PINMUX_IPSR_MSEL(IP1_11_10, LCD_VCPWC_A, SEL_LCDC_0),
 
-	PINMUX_IPSR_DATA(IP1_13_12, A22),
-	PINMUX_IPSR_DATA(IP1_13_12, ST1_VLD),
+	PINMUX_IPSR_GPSR(IP1_13_12, A22),
+	PINMUX_IPSR_GPSR(IP1_13_12, ST1_VLD),
 	PINMUX_IPSR_MSEL(IP1_13_12, LCD_VEPWC_A, SEL_LCDC_0),
 
-	PINMUX_IPSR_DATA(IP1_15_14, A23),
-	PINMUX_IPSR_DATA(IP1_15_14, ST1_D0),
+	PINMUX_IPSR_GPSR(IP1_15_14, A23),
+	PINMUX_IPSR_GPSR(IP1_15_14, ST1_D0),
 	PINMUX_IPSR_MSEL(IP1_15_14, LCD_M_DISP_A, SEL_LCDC_0),
 
-	PINMUX_IPSR_DATA(IP1_17_16, A24),
+	PINMUX_IPSR_GPSR(IP1_17_16, A24),
 	PINMUX_IPSR_MSEL(IP1_17_16, RX2_D, SEL_SCIF2_3),
-	PINMUX_IPSR_DATA(IP1_17_16, ST1_D1),
+	PINMUX_IPSR_GPSR(IP1_17_16, ST1_D1),
 
-	PINMUX_IPSR_DATA(IP1_19_18, A25),
+	PINMUX_IPSR_GPSR(IP1_19_18, A25),
 	PINMUX_IPSR_MSEL(IP1_17_16, RX2_D, SEL_SCIF2_3),
-	PINMUX_IPSR_DATA(IP1_17_16, ST1_D2),
+	PINMUX_IPSR_GPSR(IP1_17_16, ST1_D2),
 
-	PINMUX_IPSR_DATA(IP1_22_20, D0),
+	PINMUX_IPSR_GPSR(IP1_22_20, D0),
 	PINMUX_IPSR_MSEL(IP1_22_20, SD0_DAT0_A, SEL_SDHI0_0),
 	PINMUX_IPSR_MSEL(IP1_22_20, MMC_D0_A, SEL_MMC_0),
-	PINMUX_IPSR_DATA(IP1_22_20, ST1_D3),
+	PINMUX_IPSR_GPSR(IP1_22_20, ST1_D3),
 	PINMUX_IPSR_MSEL(IP1_22_20, FD0_A, SEL_FLCTL_0),
 
-	PINMUX_IPSR_DATA(IP1_25_23, D1),
+	PINMUX_IPSR_GPSR(IP1_25_23, D1),
 	PINMUX_IPSR_MSEL(IP1_25_23, SD0_DAT0_A, SEL_SDHI0_0),
 	PINMUX_IPSR_MSEL(IP1_25_23, MMC_D1_A, SEL_MMC_0),
-	PINMUX_IPSR_DATA(IP1_25_23, ST1_D4),
+	PINMUX_IPSR_GPSR(IP1_25_23, ST1_D4),
 	PINMUX_IPSR_MSEL(IP1_25_23, FD1_A, SEL_FLCTL_0),
 
-	PINMUX_IPSR_DATA(IP1_28_26, D2),
+	PINMUX_IPSR_GPSR(IP1_28_26, D2),
 	PINMUX_IPSR_MSEL(IP1_28_26, SD0_DAT0_A, SEL_SDHI0_0),
 	PINMUX_IPSR_MSEL(IP1_28_26, MMC_D2_A, SEL_MMC_0),
-	PINMUX_IPSR_DATA(IP1_28_26, ST1_D5),
+	PINMUX_IPSR_GPSR(IP1_28_26, ST1_D5),
 	PINMUX_IPSR_MSEL(IP1_28_26, FD2_A, SEL_FLCTL_0),
 
-	PINMUX_IPSR_DATA(IP1_31_29, D3),
+	PINMUX_IPSR_GPSR(IP1_31_29, D3),
 	PINMUX_IPSR_MSEL(IP1_31_29, SD0_DAT0_A, SEL_SDHI0_0),
 	PINMUX_IPSR_MSEL(IP1_31_29, MMC_D3_A, SEL_MMC_0),
-	PINMUX_IPSR_DATA(IP1_31_29, ST1_D6),
+	PINMUX_IPSR_GPSR(IP1_31_29, ST1_D6),
 	PINMUX_IPSR_MSEL(IP1_31_29, FD3_A, SEL_FLCTL_0),
 
 	/* IPSR2 */
-	PINMUX_IPSR_DATA(IP2_2_0, D4),
+	PINMUX_IPSR_GPSR(IP2_2_0, D4),
 	PINMUX_IPSR_MSEL(IP2_2_0, SD0_CD_A, SEL_SDHI0_0),
 	PINMUX_IPSR_MSEL(IP2_2_0, MMC_D4_A, SEL_MMC_0),
-	PINMUX_IPSR_DATA(IP2_2_0, ST1_D7),
+	PINMUX_IPSR_GPSR(IP2_2_0, ST1_D7),
 	PINMUX_IPSR_MSEL(IP2_2_0, FD4_A, SEL_FLCTL_0),
 
-	PINMUX_IPSR_DATA(IP2_4_3, D5),
+	PINMUX_IPSR_GPSR(IP2_4_3, D5),
 	PINMUX_IPSR_MSEL(IP2_4_3, SD0_WP_A, SEL_SDHI0_0),
 	PINMUX_IPSR_MSEL(IP2_4_3, MMC_D5_A, SEL_MMC_0),
 	PINMUX_IPSR_MSEL(IP2_4_3, FD5_A, SEL_FLCTL_0),
 
-	PINMUX_IPSR_DATA(IP2_7_5, D6),
+	PINMUX_IPSR_GPSR(IP2_7_5, D6),
 	PINMUX_IPSR_MSEL(IP2_7_5, RSPI_RSPCK_A, SEL_RSPI_0),
 	PINMUX_IPSR_MSEL(IP2_7_5, MMC_D6_A, SEL_MMC_0),
 	PINMUX_IPSR_MSEL(IP2_7_5, QSPCLK_A, SEL_RQSPI_0),
 	PINMUX_IPSR_MSEL(IP2_7_5, FD6_A, SEL_FLCTL_0),
 
-	PINMUX_IPSR_DATA(IP2_10_8, D7),
+	PINMUX_IPSR_GPSR(IP2_10_8, D7),
 	PINMUX_IPSR_MSEL(IP2_10_8, RSPI_SSL_A, SEL_RSPI_0),
 	PINMUX_IPSR_MSEL(IP2_10_8, MMC_D7_A, SEL_MMC_0),
 	PINMUX_IPSR_MSEL(IP2_10_8, QSSL_A, SEL_RQSPI_0),
 	PINMUX_IPSR_MSEL(IP2_10_8, FD7_A, SEL_FLCTL_0),
 
-	PINMUX_IPSR_DATA(IP2_13_11, D8),
+	PINMUX_IPSR_GPSR(IP2_13_11, D8),
 	PINMUX_IPSR_MSEL(IP2_13_11, SD0_CLK_A, SEL_SDHI0_0),
 	PINMUX_IPSR_MSEL(IP2_13_11, MMC_CLK_A, SEL_MMC_0),
 	PINMUX_IPSR_MSEL(IP2_13_11, QIO2_A, SEL_RQSPI_0),
 	PINMUX_IPSR_MSEL(IP2_13_11, FCE_A, SEL_FLCTL_0),
 	PINMUX_IPSR_MSEL(IP2_13_11, ET0_GTX_CLK_B, SEL_ET0_1),
 
-	PINMUX_IPSR_DATA(IP2_16_14, D9),
+	PINMUX_IPSR_GPSR(IP2_16_14, D9),
 	PINMUX_IPSR_MSEL(IP2_16_14, SD0_CMD_A, SEL_SDHI0_0),
 	PINMUX_IPSR_MSEL(IP2_16_14, MMC_CMD_A, SEL_MMC_0),
 	PINMUX_IPSR_MSEL(IP2_16_14, QIO3_A, SEL_RQSPI_0),
 	PINMUX_IPSR_MSEL(IP2_16_14, FCLE_A, SEL_FLCTL_0),
 	PINMUX_IPSR_MSEL(IP2_16_14, ET0_ETXD1_B, SEL_ET0_1),
 
-	PINMUX_IPSR_DATA(IP2_19_17, D10),
+	PINMUX_IPSR_GPSR(IP2_19_17, D10),
 	PINMUX_IPSR_MSEL(IP2_19_17, RSPI_MOSI_A, SEL_RSPI_0),
 	PINMUX_IPSR_MSEL(IP2_19_17, QMO_QIO0_A, SEL_RQSPI_0),
 	PINMUX_IPSR_MSEL(IP2_19_17, FALE_A, SEL_FLCTL_0),
 	PINMUX_IPSR_MSEL(IP2_19_17, ET0_ETXD2_B, SEL_ET0_1),
 
-	PINMUX_IPSR_DATA(IP2_22_20, D11),
+	PINMUX_IPSR_GPSR(IP2_22_20, D11),
 	PINMUX_IPSR_MSEL(IP2_22_20, RSPI_MISO_A, SEL_RSPI_0),
 	PINMUX_IPSR_MSEL(IP2_22_20, QMI_QIO1_A, SEL_RQSPI_0),
 	PINMUX_IPSR_MSEL(IP2_22_20, FRE_A, SEL_FLCTL_0),
 
-	PINMUX_IPSR_DATA(IP2_24_23, D12),
+	PINMUX_IPSR_GPSR(IP2_24_23, D12),
 	PINMUX_IPSR_MSEL(IP2_24_23, FWE_A, SEL_FLCTL_0),
 	PINMUX_IPSR_MSEL(IP2_24_23, ET0_ETXD5_B, SEL_ET0_1),
 
-	PINMUX_IPSR_DATA(IP2_27_25, D13),
+	PINMUX_IPSR_GPSR(IP2_27_25, D13),
 	PINMUX_IPSR_MSEL(IP2_27_25, RX2_B, SEL_SCIF2_1),
 	PINMUX_IPSR_MSEL(IP2_27_25, FRB_A, SEL_FLCTL_0),
 	PINMUX_IPSR_MSEL(IP2_27_25, ET0_ETXD6_B, SEL_ET0_1),
 
-	PINMUX_IPSR_DATA(IP2_30_28, D14),
+	PINMUX_IPSR_GPSR(IP2_30_28, D14),
 	PINMUX_IPSR_MSEL(IP2_30_28, TX2_B, SEL_SCIF2_1),
 	PINMUX_IPSR_MSEL(IP2_30_28, FSE_A, SEL_FLCTL_0),
 	PINMUX_IPSR_MSEL(IP2_30_28, ET0_TX_CLK_B, SEL_ET0_1),
 
 	/* IPSR3 */
-	PINMUX_IPSR_DATA(IP3_1_0, D15),
+	PINMUX_IPSR_GPSR(IP3_1_0, D15),
 	PINMUX_IPSR_MSEL(IP3_1_0, SCK2_B, SEL_SCIF2_1),
 
-	PINMUX_IPSR_DATA(IP3_2, CS1_A26),
+	PINMUX_IPSR_GPSR(IP3_2, CS1_A26),
 	PINMUX_IPSR_MSEL(IP3_2, QIO3_B, SEL_RQSPI_1),
 
-	PINMUX_IPSR_DATA(IP3_5_3, EX_CS1),
+	PINMUX_IPSR_GPSR(IP3_5_3, EX_CS1),
 	PINMUX_IPSR_MSEL(IP3_5_3, RX3_B, SEL_SCIF2_1),
-	PINMUX_IPSR_DATA(IP3_5_3, ATACS0),
+	PINMUX_IPSR_GPSR(IP3_5_3, ATACS0),
 	PINMUX_IPSR_MSEL(IP3_5_3, QIO2_B, SEL_RQSPI_1),
-	PINMUX_IPSR_DATA(IP3_5_3, ET0_ETXD0),
+	PINMUX_IPSR_GPSR(IP3_5_3, ET0_ETXD0),
 
-	PINMUX_IPSR_DATA(IP3_8_6, EX_CS2),
+	PINMUX_IPSR_GPSR(IP3_8_6, EX_CS2),
 	PINMUX_IPSR_MSEL(IP3_8_6, TX3_B, SEL_SCIF3_1),
-	PINMUX_IPSR_DATA(IP3_8_6, ATACS1),
+	PINMUX_IPSR_GPSR(IP3_8_6, ATACS1),
 	PINMUX_IPSR_MSEL(IP3_8_6, QSPCLK_B, SEL_RQSPI_1),
 	PINMUX_IPSR_MSEL(IP3_8_6, ET0_GTX_CLK_A, SEL_ET0_0),
 
-	PINMUX_IPSR_DATA(IP3_11_9, EX_CS3),
+	PINMUX_IPSR_GPSR(IP3_11_9, EX_CS3),
 	PINMUX_IPSR_MSEL(IP3_11_9, SD1_CD_A, SEL_SDHI1_0),
-	PINMUX_IPSR_DATA(IP3_11_9, ATARD),
+	PINMUX_IPSR_GPSR(IP3_11_9, ATARD),
 	PINMUX_IPSR_MSEL(IP3_11_9, QMO_QIO0_B, SEL_RQSPI_1),
 	PINMUX_IPSR_MSEL(IP3_11_9, ET0_ETXD1_A, SEL_ET0_0),
 
-	PINMUX_IPSR_DATA(IP3_14_12, EX_CS4),
+	PINMUX_IPSR_GPSR(IP3_14_12, EX_CS4),
 	PINMUX_IPSR_MSEL(IP3_14_12, SD1_WP_A, SEL_SDHI1_0),
-	PINMUX_IPSR_DATA(IP3_14_12, ATAWR),
+	PINMUX_IPSR_GPSR(IP3_14_12, ATAWR),
 	PINMUX_IPSR_MSEL(IP3_14_12, QMI_QIO1_B, SEL_RQSPI_1),
 	PINMUX_IPSR_MSEL(IP3_14_12, ET0_ETXD2_A, SEL_ET0_0),
 
-	PINMUX_IPSR_DATA(IP3_17_15, EX_CS5),
+	PINMUX_IPSR_GPSR(IP3_17_15, EX_CS5),
 	PINMUX_IPSR_MSEL(IP3_17_15, SD1_CMD_A, SEL_SDHI1_0),
-	PINMUX_IPSR_DATA(IP3_17_15, ATADIR),
+	PINMUX_IPSR_GPSR(IP3_17_15, ATADIR),
 	PINMUX_IPSR_MSEL(IP3_17_15, QSSL_B, SEL_RQSPI_1),
 	PINMUX_IPSR_MSEL(IP3_17_15, ET0_ETXD3_A, SEL_ET0_0),
 
-	PINMUX_IPSR_DATA(IP3_19_18, RD_WR),
-	PINMUX_IPSR_DATA(IP3_19_18, TCLK0),
+	PINMUX_IPSR_GPSR(IP3_19_18, RD_WR),
+	PINMUX_IPSR_GPSR(IP3_19_18, TCLK0),
 	PINMUX_IPSR_MSEL(IP3_19_18, CAN_CLK_B, SEL_RCAN_CLK_1),
-	PINMUX_IPSR_DATA(IP3_19_18, ET0_ETXD4),
+	PINMUX_IPSR_GPSR(IP3_19_18, ET0_ETXD4),
 
-	PINMUX_IPSR_DATA(IP3_20, EX_WAIT0),
+	PINMUX_IPSR_GPSR(IP3_20, EX_WAIT0),
 	PINMUX_IPSR_MSEL(IP3_20, TCLK1_B, SEL_TMU_1),
 
-	PINMUX_IPSR_DATA(IP3_23_21, EX_WAIT1),
+	PINMUX_IPSR_GPSR(IP3_23_21, EX_WAIT1),
 	PINMUX_IPSR_MSEL(IP3_23_21, SD1_DAT0_A, SEL_SDHI1_0),
-	PINMUX_IPSR_DATA(IP3_23_21, DREQ2),
+	PINMUX_IPSR_GPSR(IP3_23_21, DREQ2),
 	PINMUX_IPSR_MSEL(IP3_23_21, CAN1_TX_C, SEL_RCAN1_2),
 	PINMUX_IPSR_MSEL(IP3_23_21, ET0_LINK_C, SEL_ET0_CTL_2),
 	PINMUX_IPSR_MSEL(IP3_23_21, ET0_ETXD5_A, SEL_ET0_0),
 
-	PINMUX_IPSR_DATA(IP3_26_24, EX_WAIT2),
+	PINMUX_IPSR_GPSR(IP3_26_24, EX_WAIT2),
 	PINMUX_IPSR_MSEL(IP3_26_24, SD1_DAT1_A, SEL_SDHI1_0),
-	PINMUX_IPSR_DATA(IP3_26_24, DACK2),
+	PINMUX_IPSR_GPSR(IP3_26_24, DACK2),
 	PINMUX_IPSR_MSEL(IP3_26_24, CAN1_RX_C, SEL_RCAN1_2),
 	PINMUX_IPSR_MSEL(IP3_26_24, ET0_MAGIC_C, SEL_ET0_CTL_2),
 	PINMUX_IPSR_MSEL(IP3_26_24, ET0_ETXD6_A, SEL_ET0_0),
 
-	PINMUX_IPSR_DATA(IP3_29_27, DRACK0),
+	PINMUX_IPSR_GPSR(IP3_29_27, DRACK0),
 	PINMUX_IPSR_MSEL(IP3_29_27, SD1_DAT2_A, SEL_SDHI1_0),
-	PINMUX_IPSR_DATA(IP3_29_27, ATAG),
+	PINMUX_IPSR_GPSR(IP3_29_27, ATAG),
 	PINMUX_IPSR_MSEL(IP3_29_27, TCLK1_A, SEL_TMU_0),
-	PINMUX_IPSR_DATA(IP3_29_27, ET0_ETXD7),
+	PINMUX_IPSR_GPSR(IP3_29_27, ET0_ETXD7),
 
 	/* IPSR4 */
 	PINMUX_IPSR_MSEL(IP4_2_0, HCTS0_A, SEL_HSCIF_0),
 	PINMUX_IPSR_MSEL(IP4_2_0, CTS1_A, SEL_SCIF1_0),
-	PINMUX_IPSR_DATA(IP4_2_0, VI0_FIELD),
+	PINMUX_IPSR_GPSR(IP4_2_0, VI0_FIELD),
 	PINMUX_IPSR_MSEL(IP4_2_0, RMII0_RXD1_A, SEL_RMII_0),
-	PINMUX_IPSR_DATA(IP4_2_0, ET0_ERXD7),
+	PINMUX_IPSR_GPSR(IP4_2_0, ET0_ERXD7),
 
 	PINMUX_IPSR_MSEL(IP4_5_3, HRTS0_A, SEL_HSCIF_0),
 	PINMUX_IPSR_MSEL(IP4_5_3, RTS1_A, SEL_SCIF1_0),
-	PINMUX_IPSR_DATA(IP4_5_3, VI0_HSYNC),
+	PINMUX_IPSR_GPSR(IP4_5_3, VI0_HSYNC),
 	PINMUX_IPSR_MSEL(IP4_5_3, RMII0_TXD_EN_A, SEL_RMII_0),
-	PINMUX_IPSR_DATA(IP4_5_3, ET0_RX_DV),
+	PINMUX_IPSR_GPSR(IP4_5_3, ET0_RX_DV),
 
 	PINMUX_IPSR_MSEL(IP4_8_6, HSCK0_A, SEL_HSCIF_0),
 	PINMUX_IPSR_MSEL(IP4_8_6, SCK1_A, SEL_SCIF1_0),
-	PINMUX_IPSR_DATA(IP4_8_6, VI0_VSYNC),
+	PINMUX_IPSR_GPSR(IP4_8_6, VI0_VSYNC),
 	PINMUX_IPSR_MSEL(IP4_8_6, RMII0_RX_ER_A, SEL_RMII_0),
-	PINMUX_IPSR_DATA(IP4_8_6, ET0_RX_ER),
+	PINMUX_IPSR_GPSR(IP4_8_6, ET0_RX_ER),
 
 	PINMUX_IPSR_MSEL(IP4_11_9, HRX0_A, SEL_HSCIF_0),
 	PINMUX_IPSR_MSEL(IP4_11_9, RX1_A, SEL_SCIF1_0),
-	PINMUX_IPSR_DATA(IP4_11_9, VI0_DATA0_VI0_B0),
+	PINMUX_IPSR_GPSR(IP4_11_9, VI0_DATA0_VI0_B0),
 	PINMUX_IPSR_MSEL(IP4_11_9, RMII0_CRS_DV_A, SEL_RMII_0),
-	PINMUX_IPSR_DATA(IP4_11_9, ET0_CRS),
+	PINMUX_IPSR_GPSR(IP4_11_9, ET0_CRS),
 
 	PINMUX_IPSR_MSEL(IP4_14_12, HTX0_A, SEL_HSCIF_0),
 	PINMUX_IPSR_MSEL(IP4_14_12, TX1_A, SEL_SCIF1_0),
-	PINMUX_IPSR_DATA(IP4_14_12, VI0_DATA1_VI0_B1),
+	PINMUX_IPSR_GPSR(IP4_14_12, VI0_DATA1_VI0_B1),
 	PINMUX_IPSR_MSEL(IP4_14_12, RMII0_MDC_A, SEL_RMII_0),
-	PINMUX_IPSR_DATA(IP4_14_12, ET0_COL),
+	PINMUX_IPSR_GPSR(IP4_14_12, ET0_COL),
 
 	PINMUX_IPSR_MSEL(IP4_17_15, CTS0_B, SEL_SCIF0_1),
-	PINMUX_IPSR_DATA(IP4_17_15, VI0_DATA2_VI0_B2),
+	PINMUX_IPSR_GPSR(IP4_17_15, VI0_DATA2_VI0_B2),
 	PINMUX_IPSR_MSEL(IP4_17_15, RMII0_MDIO_A, SEL_RMII_0),
-	PINMUX_IPSR_DATA(IP4_17_15, ET0_MDC),
+	PINMUX_IPSR_GPSR(IP4_17_15, ET0_MDC),
 
 	PINMUX_IPSR_MSEL(IP4_19_18, RTS0_B, SEL_SCIF0_1),
-	PINMUX_IPSR_DATA(IP4_19_18, VI0_DATA3_VI0_B3),
+	PINMUX_IPSR_GPSR(IP4_19_18, VI0_DATA3_VI0_B3),
 	PINMUX_IPSR_MSEL(IP4_19_18, ET0_MDIO_A, SEL_ET0_0),
 
 	PINMUX_IPSR_MSEL(IP4_21_20, SCK1_B, SEL_SCIF1_1),
-	PINMUX_IPSR_DATA(IP4_21_20, VI0_DATA4_VI0_B4),
+	PINMUX_IPSR_GPSR(IP4_21_20, VI0_DATA4_VI0_B4),
 	PINMUX_IPSR_MSEL(IP4_21_20, ET0_LINK_A, SEL_ET0_CTL_0),
 
 	PINMUX_IPSR_MSEL(IP4_23_22, RX1_B, SEL_SCIF1_1),
-	PINMUX_IPSR_DATA(IP4_23_22, VI0_DATA5_VI0_B5),
+	PINMUX_IPSR_GPSR(IP4_23_22, VI0_DATA5_VI0_B5),
 	PINMUX_IPSR_MSEL(IP4_23_22, ET0_MAGIC_A, SEL_ET0_CTL_0),
 
 	PINMUX_IPSR_MSEL(IP4_25_24, TX1_B, SEL_SCIF1_1),
-	PINMUX_IPSR_DATA(IP4_25_24, VI0_DATA6_VI0_G0),
+	PINMUX_IPSR_GPSR(IP4_25_24, VI0_DATA6_VI0_G0),
 	PINMUX_IPSR_MSEL(IP4_25_24, ET0_PHY_INT_A, SEL_ET0_CTL_0),
 
 	PINMUX_IPSR_MSEL(IP4_27_26, CTS1_B, SEL_SCIF1_1),
-	PINMUX_IPSR_DATA(IP4_27_26, VI0_DATA7_VI0_G1),
+	PINMUX_IPSR_GPSR(IP4_27_26, VI0_DATA7_VI0_G1),
 
 	PINMUX_IPSR_MSEL(IP4_29_28, RTS1_B, SEL_SCIF1_1),
-	PINMUX_IPSR_DATA(IP4_29_28, VI0_G2),
+	PINMUX_IPSR_GPSR(IP4_29_28, VI0_G2),
 
 	PINMUX_IPSR_MSEL(IP4_31_30, SCK2_A, SEL_SCIF2_0),
-	PINMUX_IPSR_DATA(IP4_31_30, VI0_G3),
+	PINMUX_IPSR_GPSR(IP4_31_30, VI0_G3),
 
 	/* IPSR5 */
 	PINMUX_IPSR_MSEL(IP5_2_0, SD2_CLK_A, SEL_SDHI2_0),
 	PINMUX_IPSR_MSEL(IP5_2_0, RX2_A, SEL_SCIF2_0),
-	PINMUX_IPSR_DATA(IP5_2_0, VI0_G4),
+	PINMUX_IPSR_GPSR(IP5_2_0, VI0_G4),
 	PINMUX_IPSR_MSEL(IP5_2_0, ET0_RX_CLK_B, SEL_ET0_1),
 
 	PINMUX_IPSR_MSEL(IP5_5_3, SD2_CMD_A, SEL_SDHI2_0),
 	PINMUX_IPSR_MSEL(IP5_5_3, TX2_A, SEL_SCIF2_0),
-	PINMUX_IPSR_DATA(IP5_5_3, VI0_G5),
+	PINMUX_IPSR_GPSR(IP5_5_3, VI0_G5),
 	PINMUX_IPSR_MSEL(IP5_5_3, ET0_ERXD2_B, SEL_ET0_1),
 
 	PINMUX_IPSR_MSEL(IP5_8_6, SD2_DAT0_A, SEL_SDHI2_0),
 	PINMUX_IPSR_MSEL(IP5_8_6, RX3_A, SEL_SCIF3_0),
-	PINMUX_IPSR_DATA(IP4_8_6, VI0_R0),
+	PINMUX_IPSR_GPSR(IP4_8_6, VI0_R0),
 	PINMUX_IPSR_MSEL(IP4_8_6, ET0_ERXD2_B, SEL_ET0_1),
 
 	PINMUX_IPSR_MSEL(IP5_11_9, SD2_DAT1_A, SEL_SDHI2_0),
 	PINMUX_IPSR_MSEL(IP5_11_9, TX3_A, SEL_SCIF3_0),
-	PINMUX_IPSR_DATA(IP5_11_9, VI0_R1),
+	PINMUX_IPSR_GPSR(IP5_11_9, VI0_R1),
 	PINMUX_IPSR_MSEL(IP5_11_9, ET0_MDIO_B, SEL_ET0_1),
 
 	PINMUX_IPSR_MSEL(IP5_14_12, SD2_DAT2_A, SEL_SDHI2_0),
 	PINMUX_IPSR_MSEL(IP5_14_12, RX4_A, SEL_SCIF4_0),
-	PINMUX_IPSR_DATA(IP5_14_12, VI0_R2),
+	PINMUX_IPSR_GPSR(IP5_14_12, VI0_R2),
 	PINMUX_IPSR_MSEL(IP5_14_12, ET0_LINK_B, SEL_ET0_CTL_1),
 
 	PINMUX_IPSR_MSEL(IP5_17_15, SD2_DAT3_A, SEL_SDHI2_0),
 	PINMUX_IPSR_MSEL(IP5_17_15, TX4_A, SEL_SCIF4_0),
-	PINMUX_IPSR_DATA(IP5_17_15, VI0_R3),
+	PINMUX_IPSR_GPSR(IP5_17_15, VI0_R3),
 	PINMUX_IPSR_MSEL(IP5_17_15, ET0_MAGIC_B, SEL_ET0_CTL_1),
 
 	PINMUX_IPSR_MSEL(IP5_20_18, SD2_CD_A, SEL_SDHI2_0),
 	PINMUX_IPSR_MSEL(IP5_20_18, RX5_A, SEL_SCIF5_0),
-	PINMUX_IPSR_DATA(IP5_20_18, VI0_R4),
+	PINMUX_IPSR_GPSR(IP5_20_18, VI0_R4),
 	PINMUX_IPSR_MSEL(IP5_20_18, ET0_PHY_INT_B, SEL_ET0_CTL_1),
 
 	PINMUX_IPSR_MSEL(IP5_22_21, SD2_WP_A, SEL_SDHI2_0),
 	PINMUX_IPSR_MSEL(IP5_22_21, TX5_A, SEL_SCIF5_0),
-	PINMUX_IPSR_DATA(IP5_22_21, VI0_R5),
+	PINMUX_IPSR_GPSR(IP5_22_21, VI0_R5),
 
-	PINMUX_IPSR_DATA(IP5_24_23, REF125CK),
-	PINMUX_IPSR_DATA(IP5_24_23, ADTRG),
+	PINMUX_IPSR_GPSR(IP5_24_23, REF125CK),
+	PINMUX_IPSR_GPSR(IP5_24_23, ADTRG),
 	PINMUX_IPSR_MSEL(IP5_24_23, RX5_C, SEL_SCIF5_2),
-	PINMUX_IPSR_DATA(IP5_26_25, REF50CK),
+	PINMUX_IPSR_GPSR(IP5_26_25, REF50CK),
 	PINMUX_IPSR_MSEL(IP5_26_25, CTS1_E, SEL_SCIF1_3),
 	PINMUX_IPSR_MSEL(IP5_26_25, HCTS0_D, SEL_HSCIF_3),
 
 	/* IPSR6 */
-	PINMUX_IPSR_DATA(IP6_2_0, DU0_DR0),
+	PINMUX_IPSR_GPSR(IP6_2_0, DU0_DR0),
 	PINMUX_IPSR_MSEL(IP6_2_0, SCIF_CLK_B, SEL_SCIF_CLK_1),
 	PINMUX_IPSR_MSEL(IP6_2_0, HRX0_D, SEL_HSCIF_3),
 	PINMUX_IPSR_MSEL(IP6_2_0, IETX_A, SEL_IEBUS_0),
 	PINMUX_IPSR_MSEL(IP6_2_0, TCLKA_A, SEL_MTU2_CLK_0),
-	PINMUX_IPSR_DATA(IP6_2_0, HIFD00),
+	PINMUX_IPSR_GPSR(IP6_2_0, HIFD00),
 
-	PINMUX_IPSR_DATA(IP6_5_3, DU0_DR1),
+	PINMUX_IPSR_GPSR(IP6_5_3, DU0_DR1),
 	PINMUX_IPSR_MSEL(IP6_5_3, SCK0_B, SEL_SCIF0_1),
 	PINMUX_IPSR_MSEL(IP6_5_3, HTX0_D, SEL_HSCIF_3),
 	PINMUX_IPSR_MSEL(IP6_5_3, IERX_A, SEL_IEBUS_0),
 	PINMUX_IPSR_MSEL(IP6_5_3, TCLKB_A, SEL_MTU2_CLK_0),
-	PINMUX_IPSR_DATA(IP6_5_3, HIFD01),
+	PINMUX_IPSR_GPSR(IP6_5_3, HIFD01),
 
-	PINMUX_IPSR_DATA(IP6_7_6, DU0_DR2),
+	PINMUX_IPSR_GPSR(IP6_7_6, DU0_DR2),
 	PINMUX_IPSR_MSEL(IP6_7_6, RX0_B, SEL_SCIF0_1),
 	PINMUX_IPSR_MSEL(IP6_7_6, TCLKC_A, SEL_MTU2_CLK_0),
-	PINMUX_IPSR_DATA(IP6_7_6, HIFD02),
+	PINMUX_IPSR_GPSR(IP6_7_6, HIFD02),
 
-	PINMUX_IPSR_DATA(IP6_9_8, DU0_DR3),
+	PINMUX_IPSR_GPSR(IP6_9_8, DU0_DR3),
 	PINMUX_IPSR_MSEL(IP6_9_8, TX0_B, SEL_SCIF0_1),
 	PINMUX_IPSR_MSEL(IP6_9_8, TCLKD_A, SEL_MTU2_CLK_0),
-	PINMUX_IPSR_DATA(IP6_9_8, HIFD03),
+	PINMUX_IPSR_GPSR(IP6_9_8, HIFD03),
 
-	PINMUX_IPSR_DATA(IP6_11_10, DU0_DR4),
+	PINMUX_IPSR_GPSR(IP6_11_10, DU0_DR4),
 	PINMUX_IPSR_MSEL(IP6_11_10, CTS0_C, SEL_SCIF0_2),
 	PINMUX_IPSR_MSEL(IP6_11_10, TIOC0A_A, SEL_MTU2_CH0_0),
-	PINMUX_IPSR_DATA(IP6_11_10, HIFD04),
+	PINMUX_IPSR_GPSR(IP6_11_10, HIFD04),
 
-	PINMUX_IPSR_DATA(IP6_13_12, DU0_DR5),
+	PINMUX_IPSR_GPSR(IP6_13_12, DU0_DR5),
 	PINMUX_IPSR_MSEL(IP6_13_12, RTS0_C, SEL_SCIF0_1),
 	PINMUX_IPSR_MSEL(IP6_13_12, TIOC0B_A, SEL_MTU2_CH0_0),
-	PINMUX_IPSR_DATA(IP6_13_12, HIFD05),
+	PINMUX_IPSR_GPSR(IP6_13_12, HIFD05),
 
-	PINMUX_IPSR_DATA(IP6_15_14, DU0_DR6),
+	PINMUX_IPSR_GPSR(IP6_15_14, DU0_DR6),
 	PINMUX_IPSR_MSEL(IP6_15_14, SCK1_C, SEL_SCIF1_2),
 	PINMUX_IPSR_MSEL(IP6_15_14, TIOC0C_A, SEL_MTU2_CH0_0),
-	PINMUX_IPSR_DATA(IP6_15_14, HIFD06),
+	PINMUX_IPSR_GPSR(IP6_15_14, HIFD06),
 
-	PINMUX_IPSR_DATA(IP6_17_16, DU0_DR7),
+	PINMUX_IPSR_GPSR(IP6_17_16, DU0_DR7),
 	PINMUX_IPSR_MSEL(IP6_17_16, RX1_C, SEL_SCIF1_2),
 	PINMUX_IPSR_MSEL(IP6_17_16, TIOC0D_A, SEL_MTU2_CH0_0),
-	PINMUX_IPSR_DATA(IP6_17_16, HIFD07),
+	PINMUX_IPSR_GPSR(IP6_17_16, HIFD07),
 
-	PINMUX_IPSR_DATA(IP6_20_18, DU0_DG0),
+	PINMUX_IPSR_GPSR(IP6_20_18, DU0_DG0),
 	PINMUX_IPSR_MSEL(IP6_20_18, TX1_C, SEL_SCIF1_2),
 	PINMUX_IPSR_MSEL(IP6_20_18, HSCK0_D, SEL_HSCIF_3),
 	PINMUX_IPSR_MSEL(IP6_20_18, IECLK_A, SEL_IEBUS_0),
 	PINMUX_IPSR_MSEL(IP6_20_18, TIOC1A_A, SEL_MTU2_CH1_0),
-	PINMUX_IPSR_DATA(IP6_20_18, HIFD08),
+	PINMUX_IPSR_GPSR(IP6_20_18, HIFD08),
 
-	PINMUX_IPSR_DATA(IP6_23_21, DU0_DG1),
+	PINMUX_IPSR_GPSR(IP6_23_21, DU0_DG1),
 	PINMUX_IPSR_MSEL(IP6_23_21, CTS1_C, SEL_SCIF1_2),
 	PINMUX_IPSR_MSEL(IP6_23_21, HRTS0_D, SEL_HSCIF_3),
 	PINMUX_IPSR_MSEL(IP6_23_21, TIOC1B_A, SEL_MTU2_CH1_0),
-	PINMUX_IPSR_DATA(IP6_23_21, HIFD09),
+	PINMUX_IPSR_GPSR(IP6_23_21, HIFD09),
 
 	/* IPSR7 */
-	PINMUX_IPSR_DATA(IP7_2_0, DU0_DG2),
+	PINMUX_IPSR_GPSR(IP7_2_0, DU0_DG2),
 	PINMUX_IPSR_MSEL(IP7_2_0, RTS1_C, SEL_SCIF1_2),
 	PINMUX_IPSR_MSEL(IP7_2_0, RMII0_MDC_B, SEL_RMII_1),
 	PINMUX_IPSR_MSEL(IP7_2_0, TIOC2A_A, SEL_MTU2_CH2_0),
-	PINMUX_IPSR_DATA(IP7_2_0, HIFD10),
+	PINMUX_IPSR_GPSR(IP7_2_0, HIFD10),
 
-	PINMUX_IPSR_DATA(IP7_5_3, DU0_DG3),
+	PINMUX_IPSR_GPSR(IP7_5_3, DU0_DG3),
 	PINMUX_IPSR_MSEL(IP7_5_3, SCK2_C, SEL_SCIF2_2),
 	PINMUX_IPSR_MSEL(IP7_5_3, RMII0_MDIO_B, SEL_RMII_1),
 	PINMUX_IPSR_MSEL(IP7_5_3, TIOC2B_A, SEL_MTU2_CH2_0),
-	PINMUX_IPSR_DATA(IP7_5_3, HIFD11),
+	PINMUX_IPSR_GPSR(IP7_5_3, HIFD11),
 
-	PINMUX_IPSR_DATA(IP7_8_6, DU0_DG4),
+	PINMUX_IPSR_GPSR(IP7_8_6, DU0_DG4),
 	PINMUX_IPSR_MSEL(IP7_8_6, RX2_C, SEL_SCIF2_2),
 	PINMUX_IPSR_MSEL(IP7_8_6, RMII0_CRS_DV_B, SEL_RMII_1),
 	PINMUX_IPSR_MSEL(IP7_8_6, TIOC3A_A, SEL_MTU2_CH3_0),
-	PINMUX_IPSR_DATA(IP7_8_6, HIFD12),
+	PINMUX_IPSR_GPSR(IP7_8_6, HIFD12),
 
-	PINMUX_IPSR_DATA(IP7_11_9, DU0_DG5),
+	PINMUX_IPSR_GPSR(IP7_11_9, DU0_DG5),
 	PINMUX_IPSR_MSEL(IP7_11_9, TX2_C, SEL_SCIF2_2),
 	PINMUX_IPSR_MSEL(IP7_11_9, RMII0_RX_ER_B, SEL_RMII_1),
 	PINMUX_IPSR_MSEL(IP7_11_9, TIOC3B_A, SEL_MTU2_CH3_0),
-	PINMUX_IPSR_DATA(IP7_11_9, HIFD13),
+	PINMUX_IPSR_GPSR(IP7_11_9, HIFD13),
 
-	PINMUX_IPSR_DATA(IP7_14_12, DU0_DG6),
+	PINMUX_IPSR_GPSR(IP7_14_12, DU0_DG6),
 	PINMUX_IPSR_MSEL(IP7_14_12, RX3_C, SEL_SCIF3_2),
 	PINMUX_IPSR_MSEL(IP7_14_12, RMII0_RXD0_B, SEL_RMII_1),
 	PINMUX_IPSR_MSEL(IP7_14_12, TIOC3C_A, SEL_MTU2_CH3_0),
-	PINMUX_IPSR_DATA(IP7_14_12, HIFD14),
+	PINMUX_IPSR_GPSR(IP7_14_12, HIFD14),
 
-	PINMUX_IPSR_DATA(IP7_17_15, DU0_DG7),
+	PINMUX_IPSR_GPSR(IP7_17_15, DU0_DG7),
 	PINMUX_IPSR_MSEL(IP7_17_15, TX3_C, SEL_SCIF3_2),
 	PINMUX_IPSR_MSEL(IP7_17_15, RMII0_RXD1_B, SEL_RMII_1),
 	PINMUX_IPSR_MSEL(IP7_17_15, TIOC3D_A, SEL_MTU2_CH3_0),
-	PINMUX_IPSR_DATA(IP7_17_15, HIFD15),
+	PINMUX_IPSR_GPSR(IP7_17_15, HIFD15),
 
-	PINMUX_IPSR_DATA(IP7_20_18, DU0_DB0),
+	PINMUX_IPSR_GPSR(IP7_20_18, DU0_DB0),
 	PINMUX_IPSR_MSEL(IP7_20_18, RX4_C, SEL_SCIF4_2),
 	PINMUX_IPSR_MSEL(IP7_20_18, RMII0_TXD_EN_B, SEL_RMII_1),
 	PINMUX_IPSR_MSEL(IP7_20_18, TIOC4A_A, SEL_MTU2_CH4_0),
-	PINMUX_IPSR_DATA(IP7_20_18, HIFCS),
+	PINMUX_IPSR_GPSR(IP7_20_18, HIFCS),
 
-	PINMUX_IPSR_DATA(IP7_23_21, DU0_DB1),
+	PINMUX_IPSR_GPSR(IP7_23_21, DU0_DB1),
 	PINMUX_IPSR_MSEL(IP7_23_21, TX4_C, SEL_SCIF4_2),
 	PINMUX_IPSR_MSEL(IP7_23_21, RMII0_TXD0_B, SEL_RMII_1),
 	PINMUX_IPSR_MSEL(IP7_23_21, TIOC4B_A, SEL_MTU2_CH4_0),
-	PINMUX_IPSR_DATA(IP7_23_21, HIFWR),
+	PINMUX_IPSR_GPSR(IP7_23_21, HIFWR),
 
-	PINMUX_IPSR_DATA(IP7_26_24, DU0_DB2),
+	PINMUX_IPSR_GPSR(IP7_26_24, DU0_DB2),
 	PINMUX_IPSR_MSEL(IP7_26_24, RX5_B, SEL_SCIF5_1),
 	PINMUX_IPSR_MSEL(IP7_26_24, RMII0_TXD1_B, SEL_RMII_1),
 	PINMUX_IPSR_MSEL(IP7_26_24, TIOC4C_A, SEL_MTU2_CH4_0),
 
-	PINMUX_IPSR_DATA(IP7_28_27, DU0_DB3),
+	PINMUX_IPSR_GPSR(IP7_28_27, DU0_DB3),
 	PINMUX_IPSR_MSEL(IP7_28_27, TX5_B, SEL_SCIF5_1),
 	PINMUX_IPSR_MSEL(IP7_28_27, TIOC4D_A, SEL_MTU2_CH4_0),
-	PINMUX_IPSR_DATA(IP7_28_27, HIFRD),
+	PINMUX_IPSR_GPSR(IP7_28_27, HIFRD),
 
-	PINMUX_IPSR_DATA(IP7_30_29, DU0_DB4),
-	PINMUX_IPSR_DATA(IP7_30_29, HIFINT),
+	PINMUX_IPSR_GPSR(IP7_30_29, DU0_DB4),
+	PINMUX_IPSR_GPSR(IP7_30_29, HIFINT),
 
 	/* IPSR8 */
-	PINMUX_IPSR_DATA(IP8_1_0, DU0_DB5),
-	PINMUX_IPSR_DATA(IP8_1_0, HIFDREQ),
+	PINMUX_IPSR_GPSR(IP8_1_0, DU0_DB5),
+	PINMUX_IPSR_GPSR(IP8_1_0, HIFDREQ),
 
-	PINMUX_IPSR_DATA(IP8_3_2, DU0_DB6),
-	PINMUX_IPSR_DATA(IP8_3_2, HIFRDY),
+	PINMUX_IPSR_GPSR(IP8_3_2, DU0_DB6),
+	PINMUX_IPSR_GPSR(IP8_3_2, HIFRDY),
 
-	PINMUX_IPSR_DATA(IP8_5_4, DU0_DB7),
+	PINMUX_IPSR_GPSR(IP8_5_4, DU0_DB7),
 	PINMUX_IPSR_MSEL(IP8_5_4, SSI_SCK0_B, SEL_SSI0_1),
 	PINMUX_IPSR_MSEL(IP8_5_4, HIFEBL_B, SEL_HIF_1),
 
-	PINMUX_IPSR_DATA(IP8_7_6, DU0_DOTCLKIN),
+	PINMUX_IPSR_GPSR(IP8_7_6, DU0_DOTCLKIN),
 	PINMUX_IPSR_MSEL(IP8_7_6, HSPI_CS0_C, SEL_HSPI_2),
 	PINMUX_IPSR_MSEL(IP8_7_6, SSI_WS0_B, SEL_SSI0_1),
 
-	PINMUX_IPSR_DATA(IP8_9_8, DU0_DOTCLKOUT),
+	PINMUX_IPSR_GPSR(IP8_9_8, DU0_DOTCLKOUT),
 	PINMUX_IPSR_MSEL(IP8_9_8, HSPI_CLK0_C, SEL_HSPI_2),
 	PINMUX_IPSR_MSEL(IP8_9_8, SSI_SDATA0_B, SEL_SSI0_1),
 
-	PINMUX_IPSR_DATA(IP8_11_10, DU0_EXHSYNC_DU0_HSYNC),
+	PINMUX_IPSR_GPSR(IP8_11_10, DU0_EXHSYNC_DU0_HSYNC),
 	PINMUX_IPSR_MSEL(IP8_11_10, HSPI_TX0_C, SEL_HSPI_2),
 	PINMUX_IPSR_MSEL(IP8_11_10, SSI_SCK1_B, SEL_SSI1_1),
 
-	PINMUX_IPSR_DATA(IP8_13_12, DU0_EXVSYNC_DU0_VSYNC),
+	PINMUX_IPSR_GPSR(IP8_13_12, DU0_EXVSYNC_DU0_VSYNC),
 	PINMUX_IPSR_MSEL(IP8_13_12, HSPI_RX0_C, SEL_HSPI_2),
 	PINMUX_IPSR_MSEL(IP8_13_12, SSI_WS1_B, SEL_SSI1_1),
 
-	PINMUX_IPSR_DATA(IP8_15_14, DU0_EXODDF_DU0_ODDF),
+	PINMUX_IPSR_GPSR(IP8_15_14, DU0_EXODDF_DU0_ODDF),
 	PINMUX_IPSR_MSEL(IP8_15_14, CAN0_RX_B, SEL_RCAN0_1),
 	PINMUX_IPSR_MSEL(IP8_15_14, HSCK0_B, SEL_HSCIF_1),
 	PINMUX_IPSR_MSEL(IP8_15_14, SSI_SDATA1_B, SEL_SSI1_1),
 
-	PINMUX_IPSR_DATA(IP8_17_16, DU0_DISP),
+	PINMUX_IPSR_GPSR(IP8_17_16, DU0_DISP),
 	PINMUX_IPSR_MSEL(IP8_17_16, CAN0_TX_B, SEL_RCAN0_1),
 	PINMUX_IPSR_MSEL(IP8_17_16, HRX0_B, SEL_HSCIF_1),
 	PINMUX_IPSR_MSEL(IP8_17_16, AUDIO_CLKA_B, SEL_AUDIO_CLKA_1),
 
-	PINMUX_IPSR_DATA(IP8_19_18, DU0_CDE),
+	PINMUX_IPSR_GPSR(IP8_19_18, DU0_CDE),
 	PINMUX_IPSR_MSEL(IP8_19_18, HTX0_B, SEL_HSCIF_1),
 	PINMUX_IPSR_MSEL(IP8_19_18, AUDIO_CLKB_B, SEL_AUDIO_CLKB_1),
 	PINMUX_IPSR_MSEL(IP8_19_18, LCD_VCPWC_B, SEL_LCDC_1),
@@ -1139,12 +1139,12 @@
 	PINMUX_IPSR_MSEL(IP8_22_20, IRQ0_A, SEL_INTC_0),
 	PINMUX_IPSR_MSEL(IP8_22_20, HSPI_TX_B, SEL_HSPI_1),
 	PINMUX_IPSR_MSEL(IP8_22_20, RX3_E, SEL_SCIF3_4),
-	PINMUX_IPSR_DATA(IP8_22_20, ET0_ERXD0),
+	PINMUX_IPSR_GPSR(IP8_22_20, ET0_ERXD0),
 
 	PINMUX_IPSR_MSEL(IP8_25_23, IRQ1_A, SEL_INTC_0),
 	PINMUX_IPSR_MSEL(IP8_25_23, HSPI_RX_B, SEL_HSPI_1),
 	PINMUX_IPSR_MSEL(IP8_25_23, TX3_E, SEL_SCIF3_4),
-	PINMUX_IPSR_DATA(IP8_25_23, ET0_ERXD1),
+	PINMUX_IPSR_GPSR(IP8_25_23, ET0_ERXD1),
 
 	PINMUX_IPSR_MSEL(IP8_27_26, IRQ2_A, SEL_INTC_0),
 	PINMUX_IPSR_MSEL(IP8_27_26, CTS0_A, SEL_SCIF0_0),
@@ -1220,26 +1220,26 @@
 	PINMUX_IPSR_MSEL(IP9_29_28, LCD_DATA14_B, SEL_LCDC_1),
 
 	/* IPSE10 */
-	PINMUX_IPSR_DATA(IP10_2_0, SSI_SCK23),
+	PINMUX_IPSR_GPSR(IP10_2_0, SSI_SCK23),
 	PINMUX_IPSR_MSEL(IP10_2_0, VI1_4_B, SEL_VIN1_1),
 	PINMUX_IPSR_MSEL(IP10_2_0, RX1_D, SEL_SCIF1_3),
 	PINMUX_IPSR_MSEL(IP10_2_0, FCLE_B, SEL_FLCTL_1),
 	PINMUX_IPSR_MSEL(IP10_2_0, LCD_DATA15_B, SEL_LCDC_1),
 
-	PINMUX_IPSR_DATA(IP10_5_3, SSI_WS23),
+	PINMUX_IPSR_GPSR(IP10_5_3, SSI_WS23),
 	PINMUX_IPSR_MSEL(IP10_5_3, VI1_5_B, SEL_VIN1_1),
 	PINMUX_IPSR_MSEL(IP10_5_3, TX1_D, SEL_SCIF1_3),
 	PINMUX_IPSR_MSEL(IP10_5_3, HSCK0_C, SEL_HSCIF_2),
 	PINMUX_IPSR_MSEL(IP10_5_3, FALE_B, SEL_FLCTL_1),
 	PINMUX_IPSR_MSEL(IP10_5_3, LCD_DON_B, SEL_LCDC_1),
 
-	PINMUX_IPSR_DATA(IP10_8_6, SSI_SDATA2),
+	PINMUX_IPSR_GPSR(IP10_8_6, SSI_SDATA2),
 	PINMUX_IPSR_MSEL(IP10_8_6, VI1_6_B, SEL_VIN1_1),
 	PINMUX_IPSR_MSEL(IP10_8_6, HRX0_C, SEL_HSCIF_2),
 	PINMUX_IPSR_MSEL(IP10_8_6, FRE_B, SEL_FLCTL_1),
 	PINMUX_IPSR_MSEL(IP10_8_6, LCD_CL1_B, SEL_LCDC_1),
 
-	PINMUX_IPSR_DATA(IP10_11_9, SSI_SDATA3),
+	PINMUX_IPSR_GPSR(IP10_11_9, SSI_SDATA3),
 	PINMUX_IPSR_MSEL(IP10_11_9, VI1_7_B, SEL_VIN1_1),
 	PINMUX_IPSR_MSEL(IP10_11_9, HTX0_C, SEL_HSCIF_2),
 	PINMUX_IPSR_MSEL(IP10_11_9, FWE_B, SEL_FLCTL_1),
@@ -1254,13 +1254,13 @@
 	PINMUX_IPSR_MSEL(IP10_15, AUDIO_CLKB_A, SEL_AUDIO_CLKB_0),
 	PINMUX_IPSR_MSEL(IP10_15, LCD_CLK_B, SEL_LCDC_1),
 
-	PINMUX_IPSR_DATA(IP10_18_16, AUDIO_CLKC),
+	PINMUX_IPSR_GPSR(IP10_18_16, AUDIO_CLKC),
 	PINMUX_IPSR_MSEL(IP10_18_16, SCK1_E, SEL_SCIF1_4),
 	PINMUX_IPSR_MSEL(IP10_18_16, HCTS0_C, SEL_HSCIF_2),
 	PINMUX_IPSR_MSEL(IP10_18_16, FRB_B, SEL_FLCTL_1),
 	PINMUX_IPSR_MSEL(IP10_18_16, LCD_VEPWC_B, SEL_LCDC_1),
 
-	PINMUX_IPSR_DATA(IP10_21_19, AUDIO_CLKOUT),
+	PINMUX_IPSR_GPSR(IP10_21_19, AUDIO_CLKOUT),
 	PINMUX_IPSR_MSEL(IP10_21_19, TX1_E, SEL_SCIF1_4),
 	PINMUX_IPSR_MSEL(IP10_21_19, HRTS0_C, SEL_HSCIF_2),
 	PINMUX_IPSR_MSEL(IP10_21_19, FSE_B, SEL_FLCTL_1),
@@ -1271,85 +1271,85 @@
 
 	PINMUX_IPSR_MSEL(IP10_24_23, CAN0_TX_A, SEL_RCAN0_0),
 	PINMUX_IPSR_MSEL(IP10_24_23, TX4_D, SEL_SCIF4_3),
-	PINMUX_IPSR_DATA(IP10_24_23, MLB_CLK),
+	PINMUX_IPSR_GPSR(IP10_24_23, MLB_CLK),
 
 	PINMUX_IPSR_MSEL(IP10_25, CAN1_RX_A, SEL_RCAN1_0),
 	PINMUX_IPSR_MSEL(IP10_25, IRQ1_B, SEL_INTC_1),
 
 	PINMUX_IPSR_MSEL(IP10_27_26, CAN0_RX_A, SEL_RCAN0_0),
 	PINMUX_IPSR_MSEL(IP10_27_26, IRQ0_B, SEL_INTC_1),
-	PINMUX_IPSR_DATA(IP10_27_26, MLB_SIG),
+	PINMUX_IPSR_GPSR(IP10_27_26, MLB_SIG),
 
 	PINMUX_IPSR_MSEL(IP10_29_28, CAN1_TX_A, SEL_RCAN1_0),
 	PINMUX_IPSR_MSEL(IP10_29_28, TX5_C, SEL_SCIF1_2),
-	PINMUX_IPSR_DATA(IP10_29_28, MLB_DAT),
+	PINMUX_IPSR_GPSR(IP10_29_28, MLB_DAT),
 
 	/* IPSR11 */
-	PINMUX_IPSR_DATA(IP11_0, SCL1),
+	PINMUX_IPSR_GPSR(IP11_0, SCL1),
 	PINMUX_IPSR_MSEL(IP11_0, SCIF_CLK_C, SEL_SCIF_CLK_2),
 
-	PINMUX_IPSR_DATA(IP11_1, SDA1),
+	PINMUX_IPSR_GPSR(IP11_1, SDA1),
 	PINMUX_IPSR_MSEL(IP11_0, RX1_E, SEL_SCIF1_4),
 
-	PINMUX_IPSR_DATA(IP11_2, SDA0),
+	PINMUX_IPSR_GPSR(IP11_2, SDA0),
 	PINMUX_IPSR_MSEL(IP11_2, HIFEBL_A, SEL_HIF_0),
 
-	PINMUX_IPSR_DATA(IP11_3, SDSELF),
+	PINMUX_IPSR_GPSR(IP11_3, SDSELF),
 	PINMUX_IPSR_MSEL(IP11_3, RTS1_E, SEL_SCIF1_3),
 
 	PINMUX_IPSR_MSEL(IP11_6_4, SCIF_CLK_A, SEL_SCIF_CLK_0),
 	PINMUX_IPSR_MSEL(IP11_6_4, HSPI_CLK_A, SEL_HSPI_0),
-	PINMUX_IPSR_DATA(IP11_6_4, VI0_CLK),
+	PINMUX_IPSR_GPSR(IP11_6_4, VI0_CLK),
 	PINMUX_IPSR_MSEL(IP11_6_4, RMII0_TXD0_A, SEL_RMII_0),
-	PINMUX_IPSR_DATA(IP11_6_4, ET0_ERXD4),
+	PINMUX_IPSR_GPSR(IP11_6_4, ET0_ERXD4),
 
 	PINMUX_IPSR_MSEL(IP11_9_7, SCK0_A, SEL_SCIF0_0),
 	PINMUX_IPSR_MSEL(IP11_9_7, HSPI_CS_A, SEL_HSPI_0),
-	PINMUX_IPSR_DATA(IP11_9_7, VI0_CLKENB),
+	PINMUX_IPSR_GPSR(IP11_9_7, VI0_CLKENB),
 	PINMUX_IPSR_MSEL(IP11_9_7, RMII0_TXD1_A, SEL_RMII_0),
-	PINMUX_IPSR_DATA(IP11_9_7, ET0_ERXD5),
+	PINMUX_IPSR_GPSR(IP11_9_7, ET0_ERXD5),
 
 	PINMUX_IPSR_MSEL(IP11_11_10, RX0_A, SEL_SCIF0_0),
 	PINMUX_IPSR_MSEL(IP11_11_10, HSPI_RX_A, SEL_HSPI_0),
 	PINMUX_IPSR_MSEL(IP11_11_10, RMII0_RXD0_A, SEL_RMII_0),
-	PINMUX_IPSR_DATA(IP11_11_10, ET0_ERXD6),
+	PINMUX_IPSR_GPSR(IP11_11_10, ET0_ERXD6),
 
 	PINMUX_IPSR_MSEL(IP11_12, TX0_A, SEL_SCIF0_0),
 	PINMUX_IPSR_MSEL(IP11_12, HSPI_TX_A, SEL_HSPI_0),
 
-	PINMUX_IPSR_DATA(IP11_15_13, PENC1),
+	PINMUX_IPSR_GPSR(IP11_15_13, PENC1),
 	PINMUX_IPSR_MSEL(IP11_15_13, TX3_D, SEL_SCIF3_3),
 	PINMUX_IPSR_MSEL(IP11_15_13, CAN1_TX_B,  SEL_RCAN1_1),
 	PINMUX_IPSR_MSEL(IP11_15_13, TX5_D, SEL_SCIF5_3),
 	PINMUX_IPSR_MSEL(IP11_15_13, IETX_B, SEL_IEBUS_1),
 
-	PINMUX_IPSR_DATA(IP11_18_16, USB_OVC1),
+	PINMUX_IPSR_GPSR(IP11_18_16, USB_OVC1),
 	PINMUX_IPSR_MSEL(IP11_18_16, RX3_D, SEL_SCIF3_3),
 	PINMUX_IPSR_MSEL(IP11_18_16, CAN1_RX_B, SEL_RCAN1_1),
 	PINMUX_IPSR_MSEL(IP11_18_16, RX5_D, SEL_SCIF5_3),
 	PINMUX_IPSR_MSEL(IP11_18_16, IERX_B, SEL_IEBUS_1),
 
-	PINMUX_IPSR_DATA(IP11_20_19, DREQ0),
+	PINMUX_IPSR_GPSR(IP11_20_19, DREQ0),
 	PINMUX_IPSR_MSEL(IP11_20_19, SD1_CLK_A, SEL_SDHI1_0),
-	PINMUX_IPSR_DATA(IP11_20_19, ET0_TX_EN),
+	PINMUX_IPSR_GPSR(IP11_20_19, ET0_TX_EN),
 
-	PINMUX_IPSR_DATA(IP11_22_21, DACK0),
+	PINMUX_IPSR_GPSR(IP11_22_21, DACK0),
 	PINMUX_IPSR_MSEL(IP11_22_21, SD1_DAT3_A, SEL_SDHI1_0),
-	PINMUX_IPSR_DATA(IP11_22_21, ET0_TX_ER),
+	PINMUX_IPSR_GPSR(IP11_22_21, ET0_TX_ER),
 
-	PINMUX_IPSR_DATA(IP11_25_23, DREQ1),
+	PINMUX_IPSR_GPSR(IP11_25_23, DREQ1),
 	PINMUX_IPSR_MSEL(IP11_25_23, HSPI_CLK_B, SEL_HSPI_1),
 	PINMUX_IPSR_MSEL(IP11_25_23, RX4_B, SEL_SCIF4_1),
 	PINMUX_IPSR_MSEL(IP11_25_23, ET0_PHY_INT_C, SEL_ET0_CTL_0),
 	PINMUX_IPSR_MSEL(IP11_25_23, ET0_TX_CLK_A, SEL_ET0_0),
 
-	PINMUX_IPSR_DATA(IP11_27_26, DACK1),
+	PINMUX_IPSR_GPSR(IP11_27_26, DACK1),
 	PINMUX_IPSR_MSEL(IP11_27_26, HSPI_CS_B, SEL_HSPI_1),
 	PINMUX_IPSR_MSEL(IP11_27_26, TX4_B, SEL_SCIF3_1),
 	PINMUX_IPSR_MSEL(IP11_27_26, ET0_RX_CLK_A, SEL_ET0_0),
 
-	PINMUX_IPSR_DATA(IP11_28, PRESETOUT),
-	PINMUX_IPSR_DATA(IP11_28, ST_CLKOUT),
+	PINMUX_IPSR_GPSR(IP11_28, PRESETOUT),
+	PINMUX_IPSR_GPSR(IP11_28, ST_CLKOUT),
 };
 
 static const struct sh_pfc_pin pinmux_pins[] = {
diff --git a/drivers/pinctrl/sh-pfc/sh_pfc.h b/drivers/pinctrl/sh-pfc/sh_pfc.h
index 2123ab4..a490834 100644
--- a/drivers/pinctrl/sh-pfc/sh_pfc.h
+++ b/drivers/pinctrl/sh-pfc/sh_pfc.h
@@ -100,10 +100,31 @@
 	const u8 *var_field_width;
 };
 
+/*
+ * Describe a config register consisting of several fields of the same width
+ *   - name: Register name (unused, for documentation purposes only)
+ *   - r: Physical register address
+ *   - r_width: Width of the register (in bits)
+ *   - f_width: Width of the fixed-width register fields (in bits)
+ * This macro must be followed by initialization data: For each register field
+ * (from left to right, i.e. MSB to LSB), 2^f_width enum IDs must be specified,
+ * one for each possible combination of the register field bit values.
+ */
 #define PINMUX_CFG_REG(name, r, r_width, f_width) \
 	.reg = r, .reg_width = r_width, .field_width = f_width,		\
 	.enum_ids = (const u16 [(r_width / f_width) * (1 << f_width)])
 
+/*
+ * Describe a config register consisting of several fields of different widths
+ *   - name: Register name (unused, for documentation purposes only)
+ *   - r: Physical register address
+ *   - r_width: Width of the register (in bits)
+ *   - var_fw0, var_fwn...: List of widths of the register fields (in bits),
+ *                          From left to right (i.e. MSB to LSB)
+ * This macro must be followed by initialization data: For each register field
+ * (from left to right, i.e. MSB to LSB), 2^var_fwi enum IDs must be specified,
+ * one for each possible combination of the register field bit values.
+ */
 #define PINMUX_CFG_REG_VAR(name, r, r_width, var_fw0, var_fwn...) \
 	.reg = r, .reg_width = r_width,	\
 	.var_field_width = (const u8 [r_width]) \
@@ -116,6 +137,14 @@
 	const u16 *enum_ids;
 };
 
+/*
+ * Describe a data register
+ *   - name: Register name (unused, for documentation purposes only)
+ *   - r: Physical register address
+ *   - r_width: Width of the register (in bits)
+ * This macro must be followed by initialization data: For each register bit
+ * (from left to right, i.e. MSB to LSB), one enum ID must be specified.
+ */
 #define PINMUX_DATA_REG(name, r, r_width) \
 	.reg = r, .reg_width = r_width,	\
 	.enum_ids = (const u16 [r_width]) \
@@ -124,6 +153,10 @@
 	const short *gpios;
 };
 
+/*
+ * Describe the mapping from GPIOs to a single IRQ
+ *   - ids...: List of GPIOs that are mapped to the same IRQ
+ */
 #define PINMUX_IRQ(ids...)			   \
 	{ .gpios = (const short []) { ids, -1 } }
 
@@ -185,18 +218,65 @@
  * sh_pfc_soc_info pinmux_data array macros
  */
 
+/*
+ * Describe generic pinmux data
+ *   - data_or_mark: *_DATA or *_MARK enum ID
+ *   - ids...: List of enum IDs to associate with data_or_mark
+ */
 #define PINMUX_DATA(data_or_mark, ids...)	data_or_mark, ids, 0
 
-#define PINMUX_IPSR_NOGP(ispr, fn)					\
+/*
+ * Describe a pinmux configuration without GPIO function that needs
+ * configuration in a Peripheral Function Select Register (IPSR)
+ *   - ipsr: IPSR field (unused, for documentation purposes only)
+ *   - fn: Function name, referring to a field in the IPSR
+ */
+#define PINMUX_IPSR_NOGP(ipsr, fn)					\
 	PINMUX_DATA(fn##_MARK, FN_##fn)
-#define PINMUX_IPSR_DATA(ipsr, fn)					\
+
+/*
+ * Describe a pinmux configuration with GPIO function that needs configuration
+ * in both a Peripheral Function Select Register (IPSR) and in a
+ * GPIO/Peripheral Function Select Register (GPSR)
+ *   - ipsr: IPSR field
+ *   - fn: Function name, also referring to the IPSR field
+ */
+#define PINMUX_IPSR_GPSR(ipsr, fn)					\
 	PINMUX_DATA(fn##_MARK, FN_##fn, FN_##ipsr)
-#define PINMUX_IPSR_NOGM(ispr, fn, ms)					\
-	PINMUX_DATA(fn##_MARK, FN_##fn, FN_##ms)
-#define PINMUX_IPSR_NOFN(ipsr, fn, ms)					\
-	PINMUX_DATA(fn##_MARK, FN_##ipsr, FN_##ms)
-#define PINMUX_IPSR_MSEL(ipsr, fn, ms)					\
-	PINMUX_DATA(fn##_MARK, FN_##ms, FN_##ipsr, FN_##fn)
+
+/*
+ * Describe a pinmux configuration without GPIO function that needs
+ * configuration in a Peripheral Function Select Register (IPSR), and where the
+ * pinmux function has a representation in a Module Select Register (MOD_SEL).
+ *   - ipsr: IPSR field (unused, for documentation purposes only)
+ *   - fn: Function name, also referring to the IPSR field
+ *   - msel: Module selector
+ */
+#define PINMUX_IPSR_NOGM(ipsr, fn, msel)				\
+	PINMUX_DATA(fn##_MARK, FN_##fn, FN_##msel)
+
+/*
+ * Describe a pinmux configuration with GPIO function where the pinmux function
+ * has no representation in a Peripheral Function Select Register (IPSR), but
+ * instead solely depends on a group selection.
+ *   - gpsr: GPSR field
+ *   - fn: Function name, also referring to the GPSR field
+ *   - gsel: Group selector
+ */
+#define PINMUX_IPSR_NOFN(gpsr, fn, gsel)				\
+	PINMUX_DATA(fn##_MARK, FN_##gpsr, FN_##gsel)
+
+/*
+ * Describe a pinmux configuration with GPIO function that needs configuration
+ * in both a Peripheral Function Select Register (IPSR) and a GPIO/Peripheral
+ * Function Select Register (GPSR), and where the pinmux function has a
+ * representation in a Module Select Register (MOD_SEL).
+ *   - ipsr: IPSR field
+ *   - fn: Function name, also referring to the IPSR field
+ *   - msel: Module selector
+ */
+#define PINMUX_IPSR_MSEL(ipsr, fn, msel)				\
+	PINMUX_DATA(fn##_MARK, FN_##msel, FN_##ipsr, FN_##fn)
 
 /*
  * Describe a pinmux configuration for a single-function pin with GPIO
@@ -381,7 +461,7 @@
 	PINMUX_GPIO_FN(GPIO_FN_##str, PINMUX_FN_BASE, str##_MARK)
 
 /*
- * PORTnCR macro
+ * PORTnCR helper macro for SH-Mobile/R-Mobile
  */
 #define PORTCR(nr, reg)							\
 	{								\
diff --git a/drivers/pinctrl/stm32/Kconfig b/drivers/pinctrl/stm32/Kconfig
new file mode 100644
index 0000000..0f28841
--- /dev/null
+++ b/drivers/pinctrl/stm32/Kconfig
@@ -0,0 +1,16 @@
+if ARCH_STM32 || COMPILE_TEST
+
+config PINCTRL_STM32
+	bool
+	depends on OF
+	select PINMUX
+	select GENERIC_PINCONF
+	select GPIOLIB
+
+config PINCTRL_STM32F429
+	bool "STMicroelectronics STM32F429 pin control" if COMPILE_TEST && !MACH_STM32F429
+	depends on OF
+	default MACH_STM32F429
+	select PINCTRL_STM32
+
+endif
diff --git a/drivers/pinctrl/stm32/Makefile b/drivers/pinctrl/stm32/Makefile
new file mode 100644
index 0000000..fc17d42
--- /dev/null
+++ b/drivers/pinctrl/stm32/Makefile
@@ -0,0 +1,5 @@
+# Core
+obj-$(CONFIG_PINCTRL_STM32) += pinctrl-stm32.o
+
+# SoC Drivers
+obj-$(CONFIG_PINCTRL_STM32F429)	+= pinctrl-stm32f429.o
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c
new file mode 100644
index 0000000..8deb566
--- /dev/null
+++ b/drivers/pinctrl/stm32/pinctrl-stm32.c
@@ -0,0 +1,829 @@
+/*
+ * Copyright (C) Maxime Coquelin 2015
+ * Author:  Maxime Coquelin <mcoquelin.stm32@gmail.com>
+ * License terms:  GNU General Public License (GPL), version 2
+ *
+ * Heavily based on Mediatek's pinctrl driver
+ */
+#include <linux/clk.h>
+#include <linux/gpio/driver.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/pinctrl/machine.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+
+#include "../core.h"
+#include "../pinconf.h"
+#include "../pinctrl-utils.h"
+#include "pinctrl-stm32.h"
+
+#define STM32_GPIO_MODER	0x00
+#define STM32_GPIO_TYPER	0x04
+#define STM32_GPIO_SPEEDR	0x08
+#define STM32_GPIO_PUPDR	0x0c
+#define STM32_GPIO_IDR		0x10
+#define STM32_GPIO_ODR		0x14
+#define STM32_GPIO_BSRR		0x18
+#define STM32_GPIO_LCKR		0x1c
+#define STM32_GPIO_AFRL		0x20
+#define STM32_GPIO_AFRH		0x24
+
+#define STM32_GPIO_PINS_PER_BANK 16
+
+#define gpio_range_to_bank(chip) \
+		container_of(chip, struct stm32_gpio_bank, range)
+
+static const char * const stm32_gpio_functions[] = {
+	"gpio", "af0", "af1",
+	"af2", "af3", "af4",
+	"af5", "af6", "af7",
+	"af8", "af9", "af10",
+	"af11", "af12", "af13",
+	"af14", "af15", "analog",
+};
+
+struct stm32_pinctrl_group {
+	const char *name;
+	unsigned long config;
+	unsigned pin;
+};
+
+struct stm32_gpio_bank {
+	void __iomem *base;
+	struct clk *clk;
+	spinlock_t lock;
+	struct gpio_chip gpio_chip;
+	struct pinctrl_gpio_range range;
+};
+
+struct stm32_pinctrl {
+	struct device *dev;
+	struct pinctrl_dev *pctl_dev;
+	struct pinctrl_desc pctl_desc;
+	struct stm32_pinctrl_group *groups;
+	unsigned ngroups;
+	const char **grp_names;
+	struct stm32_gpio_bank *banks;
+	unsigned nbanks;
+	const struct stm32_pinctrl_match_data *match_data;
+};
+
+static inline int stm32_gpio_pin(int gpio)
+{
+	return gpio % STM32_GPIO_PINS_PER_BANK;
+}
+
+static inline u32 stm32_gpio_get_mode(u32 function)
+{
+	switch (function) {
+	case STM32_PIN_GPIO:
+		return 0;
+	case STM32_PIN_AF(0) ... STM32_PIN_AF(15):
+		return 2;
+	case STM32_PIN_ANALOG:
+		return 3;
+	}
+
+	return 0;
+}
+
+static inline u32 stm32_gpio_get_alt(u32 function)
+{
+	switch (function) {
+	case STM32_PIN_GPIO:
+		return 0;
+	case STM32_PIN_AF(0) ... STM32_PIN_AF(15):
+		return function - 1;
+	case STM32_PIN_ANALOG:
+		return 0;
+	}
+
+	return 0;
+}
+
+/* GPIO functions */
+
+static inline void __stm32_gpio_set(struct stm32_gpio_bank *bank,
+	unsigned offset, int value)
+{
+	if (!value)
+		offset += STM32_GPIO_PINS_PER_BANK;
+
+	clk_enable(bank->clk);
+
+	writel_relaxed(BIT(offset), bank->base + STM32_GPIO_BSRR);
+
+	clk_disable(bank->clk);
+}
+
+static int stm32_gpio_request(struct gpio_chip *chip, unsigned offset)
+{
+	return pinctrl_request_gpio(chip->base + offset);
+}
+
+static void stm32_gpio_free(struct gpio_chip *chip, unsigned offset)
+{
+	pinctrl_free_gpio(chip->base + offset);
+}
+
+static int stm32_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+	struct stm32_gpio_bank *bank = gpiochip_get_data(chip);
+	int ret;
+
+	clk_enable(bank->clk);
+
+	ret = !!(readl_relaxed(bank->base + STM32_GPIO_IDR) & BIT(offset));
+
+	clk_disable(bank->clk);
+
+	return ret;
+}
+
+static void stm32_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+	struct stm32_gpio_bank *bank = gpiochip_get_data(chip);
+
+	__stm32_gpio_set(bank, offset, value);
+}
+
+static int stm32_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+{
+	return pinctrl_gpio_direction_input(chip->base + offset);
+}
+
+static int stm32_gpio_direction_output(struct gpio_chip *chip,
+	unsigned offset, int value)
+{
+	struct stm32_gpio_bank *bank = gpiochip_get_data(chip);
+
+	__stm32_gpio_set(bank, offset, value);
+	pinctrl_gpio_direction_output(chip->base + offset);
+
+	return 0;
+}
+
+static struct gpio_chip stm32_gpio_template = {
+	.request		= stm32_gpio_request,
+	.free			= stm32_gpio_free,
+	.get			= stm32_gpio_get,
+	.set			= stm32_gpio_set,
+	.direction_input	= stm32_gpio_direction_input,
+	.direction_output	= stm32_gpio_direction_output,
+};
+
+/* Pinctrl functions */
+
+static struct stm32_pinctrl_group *
+stm32_pctrl_find_group_by_pin(struct stm32_pinctrl *pctl, u32 pin)
+{
+	int i;
+
+	for (i = 0; i < pctl->ngroups; i++) {
+		struct stm32_pinctrl_group *grp = pctl->groups + i;
+
+		if (grp->pin == pin)
+			return grp;
+	}
+
+	return NULL;
+}
+
+static bool stm32_pctrl_is_function_valid(struct stm32_pinctrl *pctl,
+		u32 pin_num, u32 fnum)
+{
+	int i;
+
+	for (i = 0; i < pctl->match_data->npins; i++) {
+		const struct stm32_desc_pin *pin = pctl->match_data->pins + i;
+		const struct stm32_desc_function *func = pin->functions;
+
+		if (pin->pin.number != pin_num)
+			continue;
+
+		while (func && func->name) {
+			if (func->num == fnum)
+				return true;
+			func++;
+		}
+
+		break;
+	}
+
+	return false;
+}
+
+static int stm32_pctrl_dt_node_to_map_func(struct stm32_pinctrl *pctl,
+		u32 pin, u32 fnum, struct stm32_pinctrl_group *grp,
+		struct pinctrl_map **map, unsigned *reserved_maps,
+		unsigned *num_maps)
+{
+	if (*num_maps == *reserved_maps)
+		return -ENOSPC;
+
+	(*map)[*num_maps].type = PIN_MAP_TYPE_MUX_GROUP;
+	(*map)[*num_maps].data.mux.group = grp->name;
+
+	if (!stm32_pctrl_is_function_valid(pctl, pin, fnum)) {
+		dev_err(pctl->dev, "invalid function %d on pin %d .\n",
+				fnum, pin);
+		return -EINVAL;
+	}
+
+	(*map)[*num_maps].data.mux.function = stm32_gpio_functions[fnum];
+	(*num_maps)++;
+
+	return 0;
+}
+
+static int stm32_pctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev,
+				      struct device_node *node,
+				      struct pinctrl_map **map,
+				      unsigned *reserved_maps,
+				      unsigned *num_maps)
+{
+	struct stm32_pinctrl *pctl;
+	struct stm32_pinctrl_group *grp;
+	struct property *pins;
+	u32 pinfunc, pin, func;
+	unsigned long *configs;
+	unsigned int num_configs;
+	bool has_config = 0;
+	unsigned reserve = 0;
+	int num_pins, num_funcs, maps_per_pin, i, err;
+
+	pctl = pinctrl_dev_get_drvdata(pctldev);
+
+	pins = of_find_property(node, "pinmux", NULL);
+	if (!pins) {
+		dev_err(pctl->dev, "missing pins property in node %s .\n",
+				node->name);
+		return -EINVAL;
+	}
+
+	err = pinconf_generic_parse_dt_config(node, pctldev, &configs,
+		&num_configs);
+	if (err)
+		return err;
+
+	if (num_configs)
+		has_config = 1;
+
+	num_pins = pins->length / sizeof(u32);
+	num_funcs = num_pins;
+	maps_per_pin = 0;
+	if (num_funcs)
+		maps_per_pin++;
+	if (has_config && num_pins >= 1)
+		maps_per_pin++;
+
+	if (!num_pins || !maps_per_pin)
+		return -EINVAL;
+
+	reserve = num_pins * maps_per_pin;
+
+	err = pinctrl_utils_reserve_map(pctldev, map,
+			reserved_maps, num_maps, reserve);
+	if (err)
+		return err;
+
+	for (i = 0; i < num_pins; i++) {
+		err = of_property_read_u32_index(node, "pinmux",
+				i, &pinfunc);
+		if (err)
+			return err;
+
+		pin = STM32_GET_PIN_NO(pinfunc);
+		func = STM32_GET_PIN_FUNC(pinfunc);
+
+		if (pin >= pctl->match_data->npins) {
+			dev_err(pctl->dev, "invalid pin number.\n");
+			return -EINVAL;
+		}
+
+		if (!stm32_pctrl_is_function_valid(pctl, pin, func)) {
+			dev_err(pctl->dev, "invalid function.\n");
+			return -EINVAL;
+		}
+
+		grp = stm32_pctrl_find_group_by_pin(pctl, pin);
+		if (!grp) {
+			dev_err(pctl->dev, "unable to match pin %d to group\n",
+					pin);
+			return -EINVAL;
+		}
+
+		err = stm32_pctrl_dt_node_to_map_func(pctl, pin, func, grp, map,
+				reserved_maps, num_maps);
+		if (err)
+			return err;
+
+		if (has_config) {
+			err = pinctrl_utils_add_map_configs(pctldev, map,
+					reserved_maps, num_maps, grp->name,
+					configs, num_configs,
+					PIN_MAP_TYPE_CONFIGS_GROUP);
+			if (err)
+				return err;
+		}
+	}
+
+	return 0;
+}
+
+static int stm32_pctrl_dt_node_to_map(struct pinctrl_dev *pctldev,
+				 struct device_node *np_config,
+				 struct pinctrl_map **map, unsigned *num_maps)
+{
+	struct device_node *np;
+	unsigned reserved_maps;
+	int ret;
+
+	*map = NULL;
+	*num_maps = 0;
+	reserved_maps = 0;
+
+	for_each_child_of_node(np_config, np) {
+		ret = stm32_pctrl_dt_subnode_to_map(pctldev, np, map,
+				&reserved_maps, num_maps);
+		if (ret < 0) {
+			pinctrl_utils_dt_free_map(pctldev, *map, *num_maps);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int stm32_pctrl_get_groups_count(struct pinctrl_dev *pctldev)
+{
+	struct stm32_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+	return pctl->ngroups;
+}
+
+static const char *stm32_pctrl_get_group_name(struct pinctrl_dev *pctldev,
+					      unsigned group)
+{
+	struct stm32_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+	return pctl->groups[group].name;
+}
+
+static int stm32_pctrl_get_group_pins(struct pinctrl_dev *pctldev,
+				      unsigned group,
+				      const unsigned **pins,
+				      unsigned *num_pins)
+{
+	struct stm32_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+	*pins = (unsigned *)&pctl->groups[group].pin;
+	*num_pins = 1;
+
+	return 0;
+}
+
+static const struct pinctrl_ops stm32_pctrl_ops = {
+	.dt_node_to_map		= stm32_pctrl_dt_node_to_map,
+	.dt_free_map		= pinctrl_utils_dt_free_map,
+	.get_groups_count	= stm32_pctrl_get_groups_count,
+	.get_group_name		= stm32_pctrl_get_group_name,
+	.get_group_pins		= stm32_pctrl_get_group_pins,
+};
+
+
+/* Pinmux functions */
+
+static int stm32_pmx_get_funcs_cnt(struct pinctrl_dev *pctldev)
+{
+	return ARRAY_SIZE(stm32_gpio_functions);
+}
+
+static const char *stm32_pmx_get_func_name(struct pinctrl_dev *pctldev,
+					   unsigned selector)
+{
+	return stm32_gpio_functions[selector];
+}
+
+static int stm32_pmx_get_func_groups(struct pinctrl_dev *pctldev,
+				     unsigned function,
+				     const char * const **groups,
+				     unsigned * const num_groups)
+{
+	struct stm32_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+	*groups = pctl->grp_names;
+	*num_groups = pctl->ngroups;
+
+	return 0;
+}
+
+static void stm32_pmx_set_mode(struct stm32_gpio_bank *bank,
+		int pin, u32 mode, u32 alt)
+{
+	u32 val;
+	int alt_shift = (pin % 8) * 4;
+	int alt_offset = STM32_GPIO_AFRL + (pin / 8) * 4;
+	unsigned long flags;
+
+	clk_enable(bank->clk);
+	spin_lock_irqsave(&bank->lock, flags);
+
+	val = readl_relaxed(bank->base + alt_offset);
+	val &= ~GENMASK(alt_shift + 3, alt_shift);
+	val |= (alt << alt_shift);
+	writel_relaxed(val, bank->base + alt_offset);
+
+	val = readl_relaxed(bank->base + STM32_GPIO_MODER);
+	val &= ~GENMASK(pin * 2 + 1, pin * 2);
+	val |= mode << (pin * 2);
+	writel_relaxed(val, bank->base + STM32_GPIO_MODER);
+
+	spin_unlock_irqrestore(&bank->lock, flags);
+	clk_disable(bank->clk);
+}
+
+static int stm32_pmx_set_mux(struct pinctrl_dev *pctldev,
+			    unsigned function,
+			    unsigned group)
+{
+	bool ret;
+	struct stm32_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+	struct stm32_pinctrl_group *g = pctl->groups + group;
+	struct pinctrl_gpio_range *range;
+	struct stm32_gpio_bank *bank;
+	u32 mode, alt;
+	int pin;
+
+	ret = stm32_pctrl_is_function_valid(pctl, g->pin, function);
+	if (!ret) {
+		dev_err(pctl->dev, "invalid function %d on group %d .\n",
+				function, group);
+		return -EINVAL;
+	}
+
+	range = pinctrl_find_gpio_range_from_pin(pctldev, g->pin);
+	bank = gpio_range_to_bank(range);
+	pin = stm32_gpio_pin(g->pin);
+
+	mode = stm32_gpio_get_mode(function);
+	alt = stm32_gpio_get_alt(function);
+
+	stm32_pmx_set_mode(bank, pin, mode, alt);
+
+	return 0;
+}
+
+static int stm32_pmx_gpio_set_direction(struct pinctrl_dev *pctldev,
+			struct pinctrl_gpio_range *range, unsigned gpio,
+			bool input)
+{
+	struct stm32_gpio_bank *bank = gpio_range_to_bank(range);
+	int pin = stm32_gpio_pin(gpio);
+
+	stm32_pmx_set_mode(bank, pin, !input, 0);
+
+	return 0;
+}
+
+static const struct pinmux_ops stm32_pmx_ops = {
+	.get_functions_count	= stm32_pmx_get_funcs_cnt,
+	.get_function_name	= stm32_pmx_get_func_name,
+	.get_function_groups	= stm32_pmx_get_func_groups,
+	.set_mux		= stm32_pmx_set_mux,
+	.gpio_set_direction	= stm32_pmx_gpio_set_direction,
+};
+
+/* Pinconf functions */
+
+static void stm32_pconf_set_driving(struct stm32_gpio_bank *bank,
+	unsigned offset, u32 drive)
+{
+	unsigned long flags;
+	u32 val;
+
+	clk_enable(bank->clk);
+	spin_lock_irqsave(&bank->lock, flags);
+
+	val = readl_relaxed(bank->base + STM32_GPIO_TYPER);
+	val &= ~BIT(offset);
+	val |= drive << offset;
+	writel_relaxed(val, bank->base + STM32_GPIO_TYPER);
+
+	spin_unlock_irqrestore(&bank->lock, flags);
+	clk_disable(bank->clk);
+}
+
+static void stm32_pconf_set_speed(struct stm32_gpio_bank *bank,
+	unsigned offset, u32 speed)
+{
+	unsigned long flags;
+	u32 val;
+
+	clk_enable(bank->clk);
+	spin_lock_irqsave(&bank->lock, flags);
+
+	val = readl_relaxed(bank->base + STM32_GPIO_SPEEDR);
+	val &= ~GENMASK(offset * 2 + 1, offset * 2);
+	val |= speed << (offset * 2);
+	writel_relaxed(val, bank->base + STM32_GPIO_SPEEDR);
+
+	spin_unlock_irqrestore(&bank->lock, flags);
+	clk_disable(bank->clk);
+}
+
+static void stm32_pconf_set_bias(struct stm32_gpio_bank *bank,
+	unsigned offset, u32 bias)
+{
+	unsigned long flags;
+	u32 val;
+
+	clk_enable(bank->clk);
+	spin_lock_irqsave(&bank->lock, flags);
+
+	val = readl_relaxed(bank->base + STM32_GPIO_PUPDR);
+	val &= ~GENMASK(offset * 2 + 1, offset * 2);
+	val |= bias << (offset * 2);
+	writel_relaxed(val, bank->base + STM32_GPIO_PUPDR);
+
+	spin_unlock_irqrestore(&bank->lock, flags);
+	clk_disable(bank->clk);
+}
+
+static int stm32_pconf_parse_conf(struct pinctrl_dev *pctldev,
+		unsigned int pin, enum pin_config_param param,
+		enum pin_config_param arg)
+{
+	struct pinctrl_gpio_range *range;
+	struct stm32_gpio_bank *bank;
+	int offset, ret = 0;
+
+	range = pinctrl_find_gpio_range_from_pin(pctldev, pin);
+	bank = gpio_range_to_bank(range);
+	offset = stm32_gpio_pin(pin);
+
+	switch (param) {
+	case PIN_CONFIG_DRIVE_PUSH_PULL:
+		stm32_pconf_set_driving(bank, offset, 0);
+		break;
+	case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+		stm32_pconf_set_driving(bank, offset, 1);
+		break;
+	case PIN_CONFIG_SLEW_RATE:
+		stm32_pconf_set_speed(bank, offset, arg);
+		break;
+	case PIN_CONFIG_BIAS_DISABLE:
+		stm32_pconf_set_bias(bank, offset, 0);
+		break;
+	case PIN_CONFIG_BIAS_PULL_UP:
+		stm32_pconf_set_bias(bank, offset, 1);
+		break;
+	case PIN_CONFIG_BIAS_PULL_DOWN:
+		stm32_pconf_set_bias(bank, offset, 2);
+		break;
+	case PIN_CONFIG_OUTPUT:
+		__stm32_gpio_set(bank, offset, arg);
+		ret = stm32_pmx_gpio_set_direction(pctldev, NULL, pin, false);
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static int stm32_pconf_group_get(struct pinctrl_dev *pctldev,
+				 unsigned group,
+				 unsigned long *config)
+{
+	struct stm32_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+	*config = pctl->groups[group].config;
+
+	return 0;
+}
+
+static int stm32_pconf_group_set(struct pinctrl_dev *pctldev, unsigned group,
+				 unsigned long *configs, unsigned num_configs)
+{
+	struct stm32_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+	struct stm32_pinctrl_group *g = &pctl->groups[group];
+	int i, ret;
+
+	for (i = 0; i < num_configs; i++) {
+		ret = stm32_pconf_parse_conf(pctldev, g->pin,
+			pinconf_to_config_param(configs[i]),
+			pinconf_to_config_argument(configs[i]));
+		if (ret < 0)
+			return ret;
+
+		g->config = configs[i];
+	}
+
+	return 0;
+}
+
+static const struct pinconf_ops stm32_pconf_ops = {
+	.pin_config_group_get	= stm32_pconf_group_get,
+	.pin_config_group_set	= stm32_pconf_group_set,
+};
+
+static int stm32_gpiolib_register_bank(struct stm32_pinctrl *pctl,
+	struct device_node *np)
+{
+	int bank_nr = pctl->nbanks;
+	struct stm32_gpio_bank *bank = &pctl->banks[bank_nr];
+	struct pinctrl_gpio_range *range = &bank->range;
+	struct device *dev = pctl->dev;
+	struct resource res;
+	struct reset_control *rstc;
+	int err, npins;
+
+	rstc = of_reset_control_get(np, NULL);
+	if (!IS_ERR(rstc))
+		reset_control_deassert(rstc);
+
+	if (of_address_to_resource(np, 0, &res))
+		return -ENODEV;
+
+	bank->base = devm_ioremap_resource(dev, &res);
+	if (IS_ERR(bank->base))
+		return PTR_ERR(bank->base);
+
+	bank->clk = of_clk_get_by_name(np, NULL);
+	if (IS_ERR(bank->clk)) {
+		dev_err(dev, "failed to get clk (%ld)\n", PTR_ERR(bank->clk));
+		return PTR_ERR(bank->clk);
+	}
+
+	err = clk_prepare(bank->clk);
+	if (err) {
+		dev_err(dev, "failed to prepare clk (%d)\n", err);
+		return err;
+	}
+
+	npins = pctl->match_data->npins;
+	npins -= bank_nr * STM32_GPIO_PINS_PER_BANK;
+	if (npins < 0)
+		return -EINVAL;
+	else if (npins > STM32_GPIO_PINS_PER_BANK)
+		npins = STM32_GPIO_PINS_PER_BANK;
+
+	bank->gpio_chip = stm32_gpio_template;
+	bank->gpio_chip.base = bank_nr * STM32_GPIO_PINS_PER_BANK;
+	bank->gpio_chip.ngpio = npins;
+	bank->gpio_chip.of_node = np;
+	bank->gpio_chip.parent = dev;
+	spin_lock_init(&bank->lock);
+
+	of_property_read_string(np, "st,bank-name", &range->name);
+	bank->gpio_chip.label = range->name;
+
+	range->id = bank_nr;
+	range->pin_base = range->base = range->id * STM32_GPIO_PINS_PER_BANK;
+	range->npins = bank->gpio_chip.ngpio;
+	range->gc = &bank->gpio_chip;
+	err = gpiochip_add_data(&bank->gpio_chip, bank);
+	if (err) {
+		dev_err(dev, "Failed to add gpiochip(%d)!\n", bank_nr);
+		return err;
+	}
+
+	dev_info(dev, "%s bank added\n", range->name);
+	return 0;
+}
+
+static int stm32_pctrl_build_state(struct platform_device *pdev)
+{
+	struct stm32_pinctrl *pctl = platform_get_drvdata(pdev);
+	int i;
+
+	pctl->ngroups = pctl->match_data->npins;
+
+	/* Allocate groups */
+	pctl->groups = devm_kcalloc(&pdev->dev, pctl->ngroups,
+				    sizeof(*pctl->groups), GFP_KERNEL);
+	if (!pctl->groups)
+		return -ENOMEM;
+
+	/* We assume that one pin is one group, use pin name as group name. */
+	pctl->grp_names = devm_kcalloc(&pdev->dev, pctl->ngroups,
+				       sizeof(*pctl->grp_names), GFP_KERNEL);
+	if (!pctl->grp_names)
+		return -ENOMEM;
+
+	for (i = 0; i < pctl->match_data->npins; i++) {
+		const struct stm32_desc_pin *pin = pctl->match_data->pins + i;
+		struct stm32_pinctrl_group *group = pctl->groups + i;
+
+		group->name = pin->pin.name;
+		group->pin = pin->pin.number;
+
+		pctl->grp_names[i] = pin->pin.name;
+	}
+
+	return 0;
+}
+
+int stm32_pctl_probe(struct platform_device *pdev)
+{
+	struct device_node *np = pdev->dev.of_node;
+	struct device_node *child;
+	const struct of_device_id *match;
+	struct device *dev = &pdev->dev;
+	struct stm32_pinctrl *pctl;
+	struct pinctrl_pin_desc *pins;
+	int i, ret, banks = 0;
+
+	if (!np)
+		return -EINVAL;
+
+	match = of_match_device(dev->driver->of_match_table, dev);
+	if (!match || !match->data)
+		return -EINVAL;
+
+	if (!of_find_property(np, "pins-are-numbered", NULL)) {
+		dev_err(dev, "only support pins-are-numbered format\n");
+		return -EINVAL;
+	}
+
+	pctl = devm_kzalloc(dev, sizeof(*pctl), GFP_KERNEL);
+	if (!pctl)
+		return -ENOMEM;
+
+	platform_set_drvdata(pdev, pctl);
+
+	pctl->dev = dev;
+	pctl->match_data = match->data;
+	ret = stm32_pctrl_build_state(pdev);
+	if (ret) {
+		dev_err(dev, "build state failed: %d\n", ret);
+		return -EINVAL;
+	}
+
+	for_each_child_of_node(np, child)
+		if (of_property_read_bool(child, "gpio-controller"))
+			banks++;
+
+	if (!banks) {
+		dev_err(dev, "at least one GPIO bank is required\n");
+		return -EINVAL;
+	}
+
+	pctl->banks = devm_kcalloc(dev, banks, sizeof(*pctl->banks),
+			GFP_KERNEL);
+	if (!pctl->banks)
+		return -ENOMEM;
+
+	for_each_child_of_node(np, child) {
+		if (of_property_read_bool(child, "gpio-controller")) {
+			ret = stm32_gpiolib_register_bank(pctl, child);
+			if (ret)
+				return ret;
+
+			pctl->nbanks++;
+		}
+	}
+
+	pins = devm_kcalloc(&pdev->dev, pctl->match_data->npins, sizeof(*pins),
+			    GFP_KERNEL);
+	if (!pins)
+		return -ENOMEM;
+
+	for (i = 0; i < pctl->match_data->npins; i++)
+		pins[i] = pctl->match_data->pins[i].pin;
+
+	pctl->pctl_desc.name = dev_name(&pdev->dev);
+	pctl->pctl_desc.owner = THIS_MODULE;
+	pctl->pctl_desc.pins = pins;
+	pctl->pctl_desc.npins = pctl->match_data->npins;
+	pctl->pctl_desc.confops = &stm32_pconf_ops;
+	pctl->pctl_desc.pctlops = &stm32_pctrl_ops;
+	pctl->pctl_desc.pmxops = &stm32_pmx_ops;
+	pctl->dev = &pdev->dev;
+
+	pctl->pctl_dev = pinctrl_register(&pctl->pctl_desc, &pdev->dev, pctl);
+	if (!pctl->pctl_dev) {
+		dev_err(&pdev->dev, "Failed pinctrl registration\n");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < pctl->nbanks; i++)
+		pinctrl_add_gpio_range(pctl->pctl_dev, &pctl->banks[i].range);
+
+	dev_info(dev, "Pinctrl STM32 initialized\n");
+
+	return 0;
+}
+
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.h b/drivers/pinctrl/stm32/pinctrl-stm32.h
new file mode 100644
index 0000000..35ebc94
--- /dev/null
+++ b/drivers/pinctrl/stm32/pinctrl-stm32.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) Maxime Coquelin 2015
+ * Author:  Maxime Coquelin <mcoquelin.stm32@gmail.com>
+ * License terms:  GNU General Public License (GPL), version 2
+ */
+#ifndef __PINCTRL_STM32_H
+#define __PINCTRL_STM32_H
+
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinconf-generic.h>
+
+#define STM32_PIN_NO(x) ((x) << 8)
+#define STM32_GET_PIN_NO(x) ((x) >> 8)
+#define STM32_GET_PIN_FUNC(x) ((x) & 0xff)
+
+#define STM32_PIN_GPIO		0
+#define STM32_PIN_AF(x)		((x) + 1)
+#define STM32_PIN_ANALOG	(STM32_PIN_AF(15) + 1)
+
+struct stm32_desc_function {
+	const char *name;
+	const unsigned char num;
+};
+
+struct stm32_desc_pin {
+	struct pinctrl_pin_desc pin;
+	const struct stm32_desc_function *functions;
+};
+
+#define STM32_PIN(_pin, ...)					\
+	{							\
+		.pin = _pin,					\
+		.functions = (struct stm32_desc_function[]){	\
+			__VA_ARGS__, { } },			\
+	}
+
+#define STM32_FUNCTION(_num, _name)		\
+	{							\
+		.num = _num,					\
+		.name = _name,					\
+	}
+
+struct stm32_pinctrl_match_data {
+	const struct stm32_desc_pin *pins;
+	const unsigned int npins;
+};
+
+int stm32_pctl_probe(struct platform_device *pdev);
+
+#endif /* __PINCTRL_STM32_H */
+
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32f429.c b/drivers/pinctrl/stm32/pinctrl-stm32f429.c
new file mode 100644
index 0000000..e9b15dc
--- /dev/null
+++ b/drivers/pinctrl/stm32/pinctrl-stm32f429.c
@@ -0,0 +1,1591 @@
+/*
+ * Copyright (C) Maxime Coquelin 2015
+ * Author:  Maxime Coquelin <mcoquelin.stm32@gmail.com>
+ * License terms:  GNU General Public License (GPL), version 2
+ */
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include "pinctrl-stm32.h"
+
+static const struct stm32_desc_pin stm32f429_pins[] = {
+	STM32_PIN(
+		PINCTRL_PIN(0, "PA0"),
+		STM32_FUNCTION(0, "GPIOA0"),
+		STM32_FUNCTION(2, "TIM2_CH1 TIM2_ETR"),
+		STM32_FUNCTION(3, "TIM5_CH1"),
+		STM32_FUNCTION(4, "TIM8_ETR"),
+		STM32_FUNCTION(8, "USART2_CTS"),
+		STM32_FUNCTION(9, "UART4_TX"),
+		STM32_FUNCTION(12, "ETH_MII_CRS"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(1, "PA1"),
+		STM32_FUNCTION(0, "GPIOA1"),
+		STM32_FUNCTION(2, "TIM2_CH2"),
+		STM32_FUNCTION(3, "TIM5_CH2"),
+		STM32_FUNCTION(8, "USART2_RTS"),
+		STM32_FUNCTION(9, "UART4_RX"),
+		STM32_FUNCTION(12, "ETH_MII_RX_CLK ETH_RMII_REF_CLK"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(2, "PA2"),
+		STM32_FUNCTION(0, "GPIOA2"),
+		STM32_FUNCTION(2, "TIM2_CH3"),
+		STM32_FUNCTION(3, "TIM5_CH3"),
+		STM32_FUNCTION(4, "TIM9_CH1"),
+		STM32_FUNCTION(8, "USART2_TX"),
+		STM32_FUNCTION(12, "ETH_MDIO"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(3, "PA3"),
+		STM32_FUNCTION(0, "GPIOA3"),
+		STM32_FUNCTION(2, "TIM2_CH4"),
+		STM32_FUNCTION(3, "TIM5_CH4"),
+		STM32_FUNCTION(4, "TIM9_CH2"),
+		STM32_FUNCTION(8, "USART2_RX"),
+		STM32_FUNCTION(11, "OTG_HS_ULPI_D0"),
+		STM32_FUNCTION(12, "ETH_MII_COL"),
+		STM32_FUNCTION(15, "LCD_B5"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(4, "PA4"),
+		STM32_FUNCTION(0, "GPIOA4"),
+		STM32_FUNCTION(6, "SPI1_NSS"),
+		STM32_FUNCTION(7, "SPI3_NSS I2S3_WS"),
+		STM32_FUNCTION(8, "USART2_CK"),
+		STM32_FUNCTION(13, "OTG_HS_SOF"),
+		STM32_FUNCTION(14, "DCMI_HSYNC"),
+		STM32_FUNCTION(15, "LCD_VSYNC"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(5, "PA5"),
+		STM32_FUNCTION(0, "GPIOA5"),
+		STM32_FUNCTION(2, "TIM2_CH1 TIM2_ETR"),
+		STM32_FUNCTION(4, "TIM8_CH1N"),
+		STM32_FUNCTION(6, "SPI1_SCK"),
+		STM32_FUNCTION(11, "OTG_HS_ULPI_CK"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(6, "PA6"),
+		STM32_FUNCTION(0, "GPIOA6"),
+		STM32_FUNCTION(2, "TIM1_BKIN"),
+		STM32_FUNCTION(3, "TIM3_CH1"),
+		STM32_FUNCTION(4, "TIM8_BKIN"),
+		STM32_FUNCTION(6, "SPI1_MISO"),
+		STM32_FUNCTION(10, "TIM13_CH1"),
+		STM32_FUNCTION(14, "DCMI_PIXCLK"),
+		STM32_FUNCTION(15, "LCD_G2"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(7, "PA7"),
+		STM32_FUNCTION(0, "GPIOA7"),
+		STM32_FUNCTION(2, "TIM1_CH1N"),
+		STM32_FUNCTION(3, "TIM3_CH2"),
+		STM32_FUNCTION(4, "TIM8_CH1N"),
+		STM32_FUNCTION(6, "SPI1_MOSI"),
+		STM32_FUNCTION(10, "TIM14_CH1"),
+		STM32_FUNCTION(12, "ETH_MII_RX_DV ETH_RMII_CRS_DV"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(8, "PA8"),
+		STM32_FUNCTION(0, "GPIOA8"),
+		STM32_FUNCTION(1, "MCO1"),
+		STM32_FUNCTION(2, "TIM1_CH1"),
+		STM32_FUNCTION(5, "I2C3_SCL"),
+		STM32_FUNCTION(8, "USART1_CK"),
+		STM32_FUNCTION(11, "OTG_FS_SOF"),
+		STM32_FUNCTION(15, "LCD_R6"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(9, "PA9"),
+		STM32_FUNCTION(0, "GPIOA9"),
+		STM32_FUNCTION(2, "TIM1_CH2"),
+		STM32_FUNCTION(5, "I2C3_SMBA"),
+		STM32_FUNCTION(8, "USART1_TX"),
+		STM32_FUNCTION(14, "DCMI_D0"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(10, "PA10"),
+		STM32_FUNCTION(0, "GPIOA10"),
+		STM32_FUNCTION(2, "TIM1_CH3"),
+		STM32_FUNCTION(8, "USART1_RX"),
+		STM32_FUNCTION(11, "OTG_FS_ID"),
+		STM32_FUNCTION(14, "DCMI_D1"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(11, "PA11"),
+		STM32_FUNCTION(0, "GPIOA11"),
+		STM32_FUNCTION(2, "TIM1_CH4"),
+		STM32_FUNCTION(8, "USART1_CTS"),
+		STM32_FUNCTION(10, "CAN1_RX"),
+		STM32_FUNCTION(11, "OTG_FS_DM"),
+		STM32_FUNCTION(15, "LCD_R4"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(12, "PA12"),
+		STM32_FUNCTION(0, "GPIOA12"),
+		STM32_FUNCTION(2, "TIM1_ETR"),
+		STM32_FUNCTION(8, "USART1_RTS"),
+		STM32_FUNCTION(10, "CAN1_TX"),
+		STM32_FUNCTION(11, "OTG_FS_DP"),
+		STM32_FUNCTION(15, "LCD_R5"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(13, "PA13"),
+		STM32_FUNCTION(0, "GPIOA13"),
+		STM32_FUNCTION(1, "JTMS SWDIO"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(14, "PA14"),
+		STM32_FUNCTION(0, "GPIOA14"),
+		STM32_FUNCTION(1, "JTCK SWCLK"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(15, "PA15"),
+		STM32_FUNCTION(0, "GPIOA15"),
+		STM32_FUNCTION(1, "JTDI"),
+		STM32_FUNCTION(2, "TIM2_CH1 TIM2_ETR"),
+		STM32_FUNCTION(6, "SPI1_NSS"),
+		STM32_FUNCTION(7, "SPI3_NSS I2S3_WS"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(16, "PB0"),
+		STM32_FUNCTION(0, "GPIOB0"),
+		STM32_FUNCTION(2, "TIM1_CH2N"),
+		STM32_FUNCTION(3, "TIM3_CH3"),
+		STM32_FUNCTION(4, "TIM8_CH2N"),
+		STM32_FUNCTION(10, "LCD_R3"),
+		STM32_FUNCTION(11, "OTG_HS_ULPI_D1"),
+		STM32_FUNCTION(12, "ETH_MII_RXD2"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(17, "PB1"),
+		STM32_FUNCTION(0, "GPIOB1"),
+		STM32_FUNCTION(2, "TIM1_CH3N"),
+		STM32_FUNCTION(3, "TIM3_CH4"),
+		STM32_FUNCTION(4, "TIM8_CH3N"),
+		STM32_FUNCTION(10, "LCD_R6"),
+		STM32_FUNCTION(11, "OTG_HS_ULPI_D2"),
+		STM32_FUNCTION(12, "ETH_MII_RXD3"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(18, "PB2"),
+		STM32_FUNCTION(0, "GPIOB2"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(19, "PB3"),
+		STM32_FUNCTION(0, "GPIOB3"),
+		STM32_FUNCTION(1, "JTDO TRACESWO"),
+		STM32_FUNCTION(2, "TIM2_CH2"),
+		STM32_FUNCTION(6, "SPI1_SCK"),
+		STM32_FUNCTION(7, "SPI3_SCK I2S3_CK"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(20, "PB4"),
+		STM32_FUNCTION(0, "GPIOB4"),
+		STM32_FUNCTION(1, "NJTRST"),
+		STM32_FUNCTION(3, "TIM3_CH1"),
+		STM32_FUNCTION(6, "SPI1_MISO"),
+		STM32_FUNCTION(7, "SPI3_MISO"),
+		STM32_FUNCTION(8, "I2S3EXT_SD"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(21, "PB5"),
+		STM32_FUNCTION(0, "GPIOB5"),
+		STM32_FUNCTION(3, "TIM3_CH2"),
+		STM32_FUNCTION(5, "I2C1_SMBA"),
+		STM32_FUNCTION(6, "SPI1_MOSI"),
+		STM32_FUNCTION(7, "SPI3_MOSI I2S3_SD"),
+		STM32_FUNCTION(10, "CAN2_RX"),
+		STM32_FUNCTION(11, "OTG_HS_ULPI_D7"),
+		STM32_FUNCTION(12, "ETH_PPS_OUT"),
+		STM32_FUNCTION(13, "FMC_SDCKE1"),
+		STM32_FUNCTION(14, "DCMI_D10"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(22, "PB6"),
+		STM32_FUNCTION(0, "GPIOB6"),
+		STM32_FUNCTION(3, "TIM4_CH1"),
+		STM32_FUNCTION(5, "I2C1_SCL"),
+		STM32_FUNCTION(8, "USART1_TX"),
+		STM32_FUNCTION(10, "CAN2_TX"),
+		STM32_FUNCTION(13, "FMC_SDNE1"),
+		STM32_FUNCTION(14, "DCMI_D5"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(23, "PB7"),
+		STM32_FUNCTION(0, "GPIOB7"),
+		STM32_FUNCTION(3, "TIM4_CH2"),
+		STM32_FUNCTION(5, "I2C1_SDA"),
+		STM32_FUNCTION(8, "USART1_RX"),
+		STM32_FUNCTION(13, "FMC_NL"),
+		STM32_FUNCTION(14, "DCMI_VSYNC"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(24, "PB8"),
+		STM32_FUNCTION(0, "GPIOB8"),
+		STM32_FUNCTION(3, "TIM4_CH3"),
+		STM32_FUNCTION(4, "TIM10_CH1"),
+		STM32_FUNCTION(5, "I2C1_SCL"),
+		STM32_FUNCTION(10, "CAN1_RX"),
+		STM32_FUNCTION(12, "ETH_MII_TXD3"),
+		STM32_FUNCTION(13, "SDIO_D4"),
+		STM32_FUNCTION(14, "DCMI_D6"),
+		STM32_FUNCTION(15, "LCD_B6"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(25, "PB9"),
+		STM32_FUNCTION(0, "GPIOB9"),
+		STM32_FUNCTION(3, "TIM4_CH4"),
+		STM32_FUNCTION(4, "TIM11_CH1"),
+		STM32_FUNCTION(5, "I2C1_SDA"),
+		STM32_FUNCTION(6, "SPI2_NSS I2S2_WS"),
+		STM32_FUNCTION(10, "CAN1_TX"),
+		STM32_FUNCTION(13, "SDIO_D5"),
+		STM32_FUNCTION(14, "DCMI_D7"),
+		STM32_FUNCTION(15, "LCD_B7"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(26, "PB10"),
+		STM32_FUNCTION(0, "GPIOB10"),
+		STM32_FUNCTION(2, "TIM2_CH3"),
+		STM32_FUNCTION(5, "I2C2_SCL"),
+		STM32_FUNCTION(6, "SPI2_SCK I2S2_CK"),
+		STM32_FUNCTION(8, "USART3_TX"),
+		STM32_FUNCTION(11, "OTG_HS_ULPI_D3"),
+		STM32_FUNCTION(12, "ETH_MII_RX_ER"),
+		STM32_FUNCTION(15, "LCD_G4"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(27, "PB11"),
+		STM32_FUNCTION(0, "GPIOB11"),
+		STM32_FUNCTION(2, "TIM2_CH4"),
+		STM32_FUNCTION(5, "I2C2_SDA"),
+		STM32_FUNCTION(8, "USART3_RX"),
+		STM32_FUNCTION(11, "OTG_HS_ULPI_D4"),
+		STM32_FUNCTION(12, "ETH_MII_TX_EN ETH_RMII_TX_EN"),
+		STM32_FUNCTION(15, "LCD_G5"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(28, "PB12"),
+		STM32_FUNCTION(0, "GPIOB12"),
+		STM32_FUNCTION(2, "TIM1_BKIN"),
+		STM32_FUNCTION(5, "I2C2_SMBA"),
+		STM32_FUNCTION(6, "SPI2_NSS I2S2_WS"),
+		STM32_FUNCTION(8, "USART3_CK"),
+		STM32_FUNCTION(10, "CAN2_RX"),
+		STM32_FUNCTION(11, "OTG_HS_ULPI_D5"),
+		STM32_FUNCTION(12, "ETH_MII_TXD0 ETH_RMII_TXD0"),
+		STM32_FUNCTION(13, "OTG_HS_ID"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(29, "PB13"),
+		STM32_FUNCTION(0, "GPIOB13"),
+		STM32_FUNCTION(2, "TIM1_CH1N"),
+		STM32_FUNCTION(6, "SPI2_SCK I2S2_CK"),
+		STM32_FUNCTION(8, "USART3_CTS"),
+		STM32_FUNCTION(10, "CAN2_TX"),
+		STM32_FUNCTION(11, "OTG_HS_ULPI_D6"),
+		STM32_FUNCTION(12, "ETH_MII_TXD1 ETH_RMII_TXD1"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(30, "PB14"),
+		STM32_FUNCTION(0, "GPIOB14"),
+		STM32_FUNCTION(2, "TIM1_CH2N"),
+		STM32_FUNCTION(4, "TIM8_CH2N"),
+		STM32_FUNCTION(6, "SPI2_MISO"),
+		STM32_FUNCTION(7, "I2S2EXT_SD"),
+		STM32_FUNCTION(8, "USART3_RTS"),
+		STM32_FUNCTION(10, "TIM12_CH1"),
+		STM32_FUNCTION(13, "OTG_HS_DM"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(31, "PB15"),
+		STM32_FUNCTION(0, "GPIOB15"),
+		STM32_FUNCTION(1, "RTC_REFIN"),
+		STM32_FUNCTION(2, "TIM1_CH3N"),
+		STM32_FUNCTION(4, "TIM8_CH3N"),
+		STM32_FUNCTION(6, "SPI2_MOSI I2S2_SD"),
+		STM32_FUNCTION(10, "TIM12_CH2"),
+		STM32_FUNCTION(13, "OTG_HS_DP"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(32, "PC0"),
+		STM32_FUNCTION(0, "GPIOC0"),
+		STM32_FUNCTION(11, "OTG_HS_ULPI_STP"),
+		STM32_FUNCTION(13, "FMC_SDNWE"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(33, "PC1"),
+		STM32_FUNCTION(0, "GPIOC1"),
+		STM32_FUNCTION(12, "ETH_MDC"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(34, "PC2"),
+		STM32_FUNCTION(0, "GPIOC2"),
+		STM32_FUNCTION(6, "SPI2_MISO"),
+		STM32_FUNCTION(7, "I2S2EXT_SD"),
+		STM32_FUNCTION(11, "OTG_HS_ULPI_DIR"),
+		STM32_FUNCTION(12, "ETH_MII_TXD2"),
+		STM32_FUNCTION(13, "FMC_SDNE0"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(35, "PC3"),
+		STM32_FUNCTION(0, "GPIOC3"),
+		STM32_FUNCTION(6, "SPI2_MOSI I2S2_SD"),
+		STM32_FUNCTION(11, "OTG_HS_ULPI_NXT"),
+		STM32_FUNCTION(12, "ETH_MII_TX_CLK"),
+		STM32_FUNCTION(13, "FMC_SDCKE0"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(36, "PC4"),
+		STM32_FUNCTION(0, "GPIOC4"),
+		STM32_FUNCTION(12, "ETH_MII_RXD0 ETH_RMII_RXD0"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(37, "PC5"),
+		STM32_FUNCTION(0, "GPIOC5"),
+		STM32_FUNCTION(12, "ETH_MII_RXD1 ETH_RMII_RXD1"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(38, "PC6"),
+		STM32_FUNCTION(0, "GPIOC6"),
+		STM32_FUNCTION(3, "TIM3_CH1"),
+		STM32_FUNCTION(4, "TIM8_CH1"),
+		STM32_FUNCTION(6, "I2S2_MCK"),
+		STM32_FUNCTION(9, "USART6_TX"),
+		STM32_FUNCTION(13, "SDIO_D6"),
+		STM32_FUNCTION(14, "DCMI_D0"),
+		STM32_FUNCTION(15, "LCD_HSYNC"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(39, "PC7"),
+		STM32_FUNCTION(0, "GPIOC7"),
+		STM32_FUNCTION(3, "TIM3_CH2"),
+		STM32_FUNCTION(4, "TIM8_CH2"),
+		STM32_FUNCTION(7, "I2S3_MCK"),
+		STM32_FUNCTION(9, "USART6_RX"),
+		STM32_FUNCTION(13, "SDIO_D7"),
+		STM32_FUNCTION(14, "DCMI_D1"),
+		STM32_FUNCTION(15, "LCD_G6"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(40, "PC8"),
+		STM32_FUNCTION(0, "GPIOC8"),
+		STM32_FUNCTION(3, "TIM3_CH3"),
+		STM32_FUNCTION(4, "TIM8_CH3"),
+		STM32_FUNCTION(9, "USART6_CK"),
+		STM32_FUNCTION(13, "SDIO_D0"),
+		STM32_FUNCTION(14, "DCMI_D2"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(41, "PC9"),
+		STM32_FUNCTION(0, "GPIOC9"),
+		STM32_FUNCTION(1, "MCO2"),
+		STM32_FUNCTION(3, "TIM3_CH4"),
+		STM32_FUNCTION(4, "TIM8_CH4"),
+		STM32_FUNCTION(5, "I2C3_SDA"),
+		STM32_FUNCTION(6, "I2S_CKIN"),
+		STM32_FUNCTION(13, "SDIO_D1"),
+		STM32_FUNCTION(14, "DCMI_D3"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(42, "PC10"),
+		STM32_FUNCTION(0, "GPIOC10"),
+		STM32_FUNCTION(7, "SPI3_SCK I2S3_CK"),
+		STM32_FUNCTION(8, "USART3_TX"),
+		STM32_FUNCTION(9, "UART4_TX"),
+		STM32_FUNCTION(13, "SDIO_D2"),
+		STM32_FUNCTION(14, "DCMI_D8"),
+		STM32_FUNCTION(15, "LCD_R2"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(43, "PC11"),
+		STM32_FUNCTION(0, "GPIOC11"),
+		STM32_FUNCTION(6, "I2S3EXT_SD"),
+		STM32_FUNCTION(7, "SPI3_MISO"),
+		STM32_FUNCTION(8, "USART3_RX"),
+		STM32_FUNCTION(9, "UART4_RX"),
+		STM32_FUNCTION(13, "SDIO_D3"),
+		STM32_FUNCTION(14, "DCMI_D4"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(44, "PC12"),
+		STM32_FUNCTION(0, "GPIOC12"),
+		STM32_FUNCTION(7, "SPI3_MOSI I2S3_SD"),
+		STM32_FUNCTION(8, "USART3_CK"),
+		STM32_FUNCTION(9, "UART5_TX"),
+		STM32_FUNCTION(13, "SDIO_CK"),
+		STM32_FUNCTION(14, "DCMI_D9"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(45, "PC13"),
+		STM32_FUNCTION(0, "GPIOC13"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(46, "PC14"),
+		STM32_FUNCTION(0, "GPIOC14"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(47, "PC15"),
+		STM32_FUNCTION(0, "GPIOC15"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(48, "PD0"),
+		STM32_FUNCTION(0, "GPIOD0"),
+		STM32_FUNCTION(10, "CAN1_RX"),
+		STM32_FUNCTION(13, "FMC_D2"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(49, "PD1"),
+		STM32_FUNCTION(0, "GPIOD1"),
+		STM32_FUNCTION(10, "CAN1_TX"),
+		STM32_FUNCTION(13, "FMC_D3"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(50, "PD2"),
+		STM32_FUNCTION(0, "GPIOD2"),
+		STM32_FUNCTION(3, "TIM3_ETR"),
+		STM32_FUNCTION(9, "UART5_RX"),
+		STM32_FUNCTION(13, "SDIO_CMD"),
+		STM32_FUNCTION(14, "DCMI_D11"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(51, "PD3"),
+		STM32_FUNCTION(0, "GPIOD3"),
+		STM32_FUNCTION(6, "SPI2_SCK I2S2_CK"),
+		STM32_FUNCTION(8, "USART2_CTS"),
+		STM32_FUNCTION(13, "FMC_CLK"),
+		STM32_FUNCTION(14, "DCMI_D5"),
+		STM32_FUNCTION(15, "LCD_G7"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(52, "PD4"),
+		STM32_FUNCTION(0, "GPIOD4"),
+		STM32_FUNCTION(8, "USART2_RTS"),
+		STM32_FUNCTION(13, "FMC_NOE"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(53, "PD5"),
+		STM32_FUNCTION(0, "GPIOD5"),
+		STM32_FUNCTION(8, "USART2_TX"),
+		STM32_FUNCTION(13, "FMC_NWE"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(54, "PD6"),
+		STM32_FUNCTION(0, "GPIOD6"),
+		STM32_FUNCTION(6, "SPI3_MOSI I2S3_SD"),
+		STM32_FUNCTION(7, "SAI1_SD_A"),
+		STM32_FUNCTION(8, "USART2_RX"),
+		STM32_FUNCTION(13, "FMC_NWAIT"),
+		STM32_FUNCTION(14, "DCMI_D10"),
+		STM32_FUNCTION(15, "LCD_B2"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(55, "PD7"),
+		STM32_FUNCTION(0, "GPIOD7"),
+		STM32_FUNCTION(8, "USART2_CK"),
+		STM32_FUNCTION(13, "FMC_NE1 FMC_NCE2"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(56, "PD8"),
+		STM32_FUNCTION(0, "GPIOD8"),
+		STM32_FUNCTION(8, "USART3_TX"),
+		STM32_FUNCTION(13, "FMC_D13"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(57, "PD9"),
+		STM32_FUNCTION(0, "GPIOD9"),
+		STM32_FUNCTION(8, "USART3_RX"),
+		STM32_FUNCTION(13, "FMC_D14"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(58, "PD10"),
+		STM32_FUNCTION(0, "GPIOD10"),
+		STM32_FUNCTION(8, "USART3_CK"),
+		STM32_FUNCTION(13, "FMC_D15"),
+		STM32_FUNCTION(15, "LCD_B3"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(59, "PD11"),
+		STM32_FUNCTION(0, "GPIOD11"),
+		STM32_FUNCTION(8, "USART3_CTS"),
+		STM32_FUNCTION(13, "FMC_A16"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(60, "PD12"),
+		STM32_FUNCTION(0, "GPIOD12"),
+		STM32_FUNCTION(3, "TIM4_CH1"),
+		STM32_FUNCTION(8, "USART3_RTS"),
+		STM32_FUNCTION(13, "FMC_A17"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(61, "PD13"),
+		STM32_FUNCTION(0, "GPIOD13"),
+		STM32_FUNCTION(3, "TIM4_CH2"),
+		STM32_FUNCTION(13, "FMC_A18"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(62, "PD14"),
+		STM32_FUNCTION(0, "GPIOD14"),
+		STM32_FUNCTION(3, "TIM4_CH3"),
+		STM32_FUNCTION(13, "FMC_D0"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(63, "PD15"),
+		STM32_FUNCTION(0, "GPIOD15"),
+		STM32_FUNCTION(3, "TIM4_CH4"),
+		STM32_FUNCTION(13, "FMC_D1"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(64, "PE0"),
+		STM32_FUNCTION(0, "GPIOE0"),
+		STM32_FUNCTION(3, "TIM4_ETR"),
+		STM32_FUNCTION(9, "UART8_RX"),
+		STM32_FUNCTION(13, "FMC_NBL0"),
+		STM32_FUNCTION(14, "DCMI_D2"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(65, "PE1"),
+		STM32_FUNCTION(0, "GPIOE1"),
+		STM32_FUNCTION(9, "UART8_TX"),
+		STM32_FUNCTION(13, "FMC_NBL1"),
+		STM32_FUNCTION(14, "DCMI_D3"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(66, "PE2"),
+		STM32_FUNCTION(0, "GPIOE2"),
+		STM32_FUNCTION(1, "TRACECLK"),
+		STM32_FUNCTION(6, "SPI4_SCK"),
+		STM32_FUNCTION(7, "SAI1_MCLK_A"),
+		STM32_FUNCTION(12, "ETH_MII_TXD3"),
+		STM32_FUNCTION(13, "FMC_A23"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(67, "PE3"),
+		STM32_FUNCTION(0, "GPIOE3"),
+		STM32_FUNCTION(1, "TRACED0"),
+		STM32_FUNCTION(7, "SAI1_SD_B"),
+		STM32_FUNCTION(13, "FMC_A19"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(68, "PE4"),
+		STM32_FUNCTION(0, "GPIOE4"),
+		STM32_FUNCTION(1, "TRACED1"),
+		STM32_FUNCTION(6, "SPI4_NSS"),
+		STM32_FUNCTION(7, "SAI1_FS_A"),
+		STM32_FUNCTION(13, "FMC_A20"),
+		STM32_FUNCTION(14, "DCMI_D4"),
+		STM32_FUNCTION(15, "LCD_B0"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(69, "PE5"),
+		STM32_FUNCTION(0, "GPIOE5"),
+		STM32_FUNCTION(1, "TRACED2"),
+		STM32_FUNCTION(4, "TIM9_CH1"),
+		STM32_FUNCTION(6, "SPI4_MISO"),
+		STM32_FUNCTION(7, "SAI1_SCK_A"),
+		STM32_FUNCTION(13, "FMC_A21"),
+		STM32_FUNCTION(14, "DCMI_D6"),
+		STM32_FUNCTION(15, "LCD_G0"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(70, "PE6"),
+		STM32_FUNCTION(0, "GPIOE6"),
+		STM32_FUNCTION(1, "TRACED3"),
+		STM32_FUNCTION(4, "TIM9_CH2"),
+		STM32_FUNCTION(6, "SPI4_MOSI"),
+		STM32_FUNCTION(7, "SAI1_SD_A"),
+		STM32_FUNCTION(13, "FMC_A22"),
+		STM32_FUNCTION(14, "DCMI_D7"),
+		STM32_FUNCTION(15, "LCD_G1"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(71, "PE7"),
+		STM32_FUNCTION(0, "GPIOE7"),
+		STM32_FUNCTION(2, "TIM1_ETR"),
+		STM32_FUNCTION(9, "UART7_RX"),
+		STM32_FUNCTION(13, "FMC_D4"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(72, "PE8"),
+		STM32_FUNCTION(0, "GPIOE8"),
+		STM32_FUNCTION(2, "TIM1_CH1N"),
+		STM32_FUNCTION(9, "UART7_TX"),
+		STM32_FUNCTION(13, "FMC_D5"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(73, "PE9"),
+		STM32_FUNCTION(0, "GPIOE9"),
+		STM32_FUNCTION(2, "TIM1_CH1"),
+		STM32_FUNCTION(13, "FMC_D6"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(74, "PE10"),
+		STM32_FUNCTION(0, "GPIOE10"),
+		STM32_FUNCTION(2, "TIM1_CH2N"),
+		STM32_FUNCTION(13, "FMC_D7"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(75, "PE11"),
+		STM32_FUNCTION(0, "GPIOE11"),
+		STM32_FUNCTION(2, "TIM1_CH2"),
+		STM32_FUNCTION(6, "SPI4_NSS"),
+		STM32_FUNCTION(13, "FMC_D8"),
+		STM32_FUNCTION(15, "LCD_G3"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(76, "PE12"),
+		STM32_FUNCTION(0, "GPIOE12"),
+		STM32_FUNCTION(2, "TIM1_CH3N"),
+		STM32_FUNCTION(6, "SPI4_SCK"),
+		STM32_FUNCTION(13, "FMC_D9"),
+		STM32_FUNCTION(15, "LCD_B4"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(77, "PE13"),
+		STM32_FUNCTION(0, "GPIOE13"),
+		STM32_FUNCTION(2, "TIM1_CH3"),
+		STM32_FUNCTION(6, "SPI4_MISO"),
+		STM32_FUNCTION(13, "FMC_D10"),
+		STM32_FUNCTION(15, "LCD_DE"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(78, "PE14"),
+		STM32_FUNCTION(0, "GPIOE14"),
+		STM32_FUNCTION(2, "TIM1_CH4"),
+		STM32_FUNCTION(6, "SPI4_MOSI"),
+		STM32_FUNCTION(13, "FMC_D11"),
+		STM32_FUNCTION(15, "LCD_CLK"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(79, "PE15"),
+		STM32_FUNCTION(0, "GPIOE15"),
+		STM32_FUNCTION(2, "TIM1_BKIN"),
+		STM32_FUNCTION(13, "FMC_D12"),
+		STM32_FUNCTION(15, "LCD_R7"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(80, "PF0"),
+		STM32_FUNCTION(0, "GPIOF0"),
+		STM32_FUNCTION(5, "I2C2_SDA"),
+		STM32_FUNCTION(13, "FMC_A0"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(81, "PF1"),
+		STM32_FUNCTION(0, "GPIOF1"),
+		STM32_FUNCTION(5, "I2C2_SCL"),
+		STM32_FUNCTION(13, "FMC_A1"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(82, "PF2"),
+		STM32_FUNCTION(0, "GPIOF2"),
+		STM32_FUNCTION(5, "I2C2_SMBA"),
+		STM32_FUNCTION(13, "FMC_A2"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(83, "PF3"),
+		STM32_FUNCTION(0, "GPIOF3"),
+		STM32_FUNCTION(13, "FMC_A3"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(84, "PF4"),
+		STM32_FUNCTION(0, "GPIOF4"),
+		STM32_FUNCTION(13, "FMC_A4"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(85, "PF5"),
+		STM32_FUNCTION(0, "GPIOF5"),
+		STM32_FUNCTION(13, "FMC_A5"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(86, "PF6"),
+		STM32_FUNCTION(0, "GPIOF6"),
+		STM32_FUNCTION(4, "TIM10_CH1"),
+		STM32_FUNCTION(6, "SPI5_NSS"),
+		STM32_FUNCTION(7, "SAI1_SD_B"),
+		STM32_FUNCTION(9, "UART7_RX"),
+		STM32_FUNCTION(13, "FMC_NIORD"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(87, "PF7"),
+		STM32_FUNCTION(0, "GPIOF7"),
+		STM32_FUNCTION(4, "TIM11_CH1"),
+		STM32_FUNCTION(6, "SPI5_SCK"),
+		STM32_FUNCTION(7, "SAI1_MCLK_B"),
+		STM32_FUNCTION(9, "UART7_TX"),
+		STM32_FUNCTION(13, "FMC_NREG"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(88, "PF8"),
+		STM32_FUNCTION(0, "GPIOF8"),
+		STM32_FUNCTION(6, "SPI5_MISO"),
+		STM32_FUNCTION(7, "SAI1_SCK_B"),
+		STM32_FUNCTION(10, "TIM13_CH1"),
+		STM32_FUNCTION(13, "FMC_NIOWR"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(89, "PF9"),
+		STM32_FUNCTION(0, "GPIOF9"),
+		STM32_FUNCTION(6, "SPI5_MOSI"),
+		STM32_FUNCTION(7, "SAI1_FS_B"),
+		STM32_FUNCTION(10, "TIM14_CH1"),
+		STM32_FUNCTION(13, "FMC_CD"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(90, "PF10"),
+		STM32_FUNCTION(0, "GPIOF10"),
+		STM32_FUNCTION(13, "FMC_INTR"),
+		STM32_FUNCTION(14, "DCMI_D11"),
+		STM32_FUNCTION(15, "LCD_DE"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(91, "PF11"),
+		STM32_FUNCTION(0, "GPIOF11"),
+		STM32_FUNCTION(6, "SPI5_MOSI"),
+		STM32_FUNCTION(13, "FMC_SDNRAS"),
+		STM32_FUNCTION(14, "DCMI_D12"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(92, "PF12"),
+		STM32_FUNCTION(0, "GPIOF12"),
+		STM32_FUNCTION(13, "FMC_A6"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(93, "PF13"),
+		STM32_FUNCTION(0, "GPIOF13"),
+		STM32_FUNCTION(13, "FMC_A7"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(94, "PF14"),
+		STM32_FUNCTION(0, "GPIOF14"),
+		STM32_FUNCTION(13, "FMC_A8"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(95, "PF15"),
+		STM32_FUNCTION(0, "GPIOF15"),
+		STM32_FUNCTION(13, "FMC_A9"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(96, "PG0"),
+		STM32_FUNCTION(0, "GPIOG0"),
+		STM32_FUNCTION(13, "FMC_A10"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(97, "PG1"),
+		STM32_FUNCTION(0, "GPIOG1"),
+		STM32_FUNCTION(13, "FMC_A11"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(98, "PG2"),
+		STM32_FUNCTION(0, "GPIOG2"),
+		STM32_FUNCTION(13, "FMC_A12"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(99, "PG3"),
+		STM32_FUNCTION(0, "GPIOG3"),
+		STM32_FUNCTION(13, "FMC_A13"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(100, "PG4"),
+		STM32_FUNCTION(0, "GPIOG4"),
+		STM32_FUNCTION(13, "FMC_A14 FMC_BA0"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(101, "PG5"),
+		STM32_FUNCTION(0, "GPIOG5"),
+		STM32_FUNCTION(13, "FMC_A15 FMC_BA1"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(102, "PG6"),
+		STM32_FUNCTION(0, "GPIOG6"),
+		STM32_FUNCTION(13, "FMC_INT2"),
+		STM32_FUNCTION(14, "DCMI_D12"),
+		STM32_FUNCTION(15, "LCD_R7"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(103, "PG7"),
+		STM32_FUNCTION(0, "GPIOG7"),
+		STM32_FUNCTION(9, "USART6_CK"),
+		STM32_FUNCTION(13, "FMC_INT3"),
+		STM32_FUNCTION(14, "DCMI_D13"),
+		STM32_FUNCTION(15, "LCD_CLK"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(104, "PG8"),
+		STM32_FUNCTION(0, "GPIOG8"),
+		STM32_FUNCTION(6, "SPI6_NSS"),
+		STM32_FUNCTION(9, "USART6_RTS"),
+		STM32_FUNCTION(12, "ETH_PPS_OUT"),
+		STM32_FUNCTION(13, "FMC_SDCLK"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(105, "PG9"),
+		STM32_FUNCTION(0, "GPIOG9"),
+		STM32_FUNCTION(9, "USART6_RX"),
+		STM32_FUNCTION(13, "FMC_NE2 FMC_NCE3"),
+		STM32_FUNCTION(14, "DCMI_VSYNC"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(106, "PG10"),
+		STM32_FUNCTION(0, "GPIOG10"),
+		STM32_FUNCTION(10, "LCD_G3"),
+		STM32_FUNCTION(13, "FMC_NCE4_1 FMC_NE3"),
+		STM32_FUNCTION(14, "DCMI_D2"),
+		STM32_FUNCTION(15, "LCD_B2"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(107, "PG11"),
+		STM32_FUNCTION(0, "GPIOG11"),
+		STM32_FUNCTION(12, "ETH_MII_TX_EN ETH_RMII_TX_EN"),
+		STM32_FUNCTION(13, "FMC_NCE4_2"),
+		STM32_FUNCTION(14, "DCMI_D3"),
+		STM32_FUNCTION(15, "LCD_B3"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(108, "PG12"),
+		STM32_FUNCTION(0, "GPIOG12"),
+		STM32_FUNCTION(6, "SPI6_MISO"),
+		STM32_FUNCTION(9, "USART6_RTS"),
+		STM32_FUNCTION(10, "LCD_B4"),
+		STM32_FUNCTION(13, "FMC_NE4"),
+		STM32_FUNCTION(15, "LCD_B1"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(109, "PG13"),
+		STM32_FUNCTION(0, "GPIOG13"),
+		STM32_FUNCTION(6, "SPI6_SCK"),
+		STM32_FUNCTION(9, "USART6_CTS"),
+		STM32_FUNCTION(12, "ETH_MII_TXD0 ETH_RMII_TXD0"),
+		STM32_FUNCTION(13, "FMC_A24"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(110, "PG14"),
+		STM32_FUNCTION(0, "GPIOG14"),
+		STM32_FUNCTION(6, "SPI6_MOSI"),
+		STM32_FUNCTION(9, "USART6_TX"),
+		STM32_FUNCTION(12, "ETH_MII_TXD1 ETH_RMII_TXD1"),
+		STM32_FUNCTION(13, "FMC_A25"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(111, "PG15"),
+		STM32_FUNCTION(0, "GPIOG15"),
+		STM32_FUNCTION(9, "USART6_CTS"),
+		STM32_FUNCTION(13, "FMC_SDNCAS"),
+		STM32_FUNCTION(14, "DCMI_D13"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(112, "PH0"),
+		STM32_FUNCTION(0, "GPIOH0"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(113, "PH1"),
+		STM32_FUNCTION(0, "GPIOH1"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(114, "PH2"),
+		STM32_FUNCTION(0, "GPIOH2"),
+		STM32_FUNCTION(12, "ETH_MII_CRS"),
+		STM32_FUNCTION(13, "FMC_SDCKE0"),
+		STM32_FUNCTION(15, "LCD_R0"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(115, "PH3"),
+		STM32_FUNCTION(0, "GPIOH3"),
+		STM32_FUNCTION(12, "ETH_MII_COL"),
+		STM32_FUNCTION(13, "FMC_SDNE0"),
+		STM32_FUNCTION(15, "LCD_R1"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(116, "PH4"),
+		STM32_FUNCTION(0, "GPIOH4"),
+		STM32_FUNCTION(5, "I2C2_SCL"),
+		STM32_FUNCTION(11, "OTG_HS_ULPI_NXT"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(117, "PH5"),
+		STM32_FUNCTION(0, "GPIOH5"),
+		STM32_FUNCTION(5, "I2C2_SDA"),
+		STM32_FUNCTION(6, "SPI5_NSS"),
+		STM32_FUNCTION(13, "FMC_SDNWE"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(118, "PH6"),
+		STM32_FUNCTION(0, "GPIOH6"),
+		STM32_FUNCTION(5, "I2C2_SMBA"),
+		STM32_FUNCTION(6, "SPI5_SCK"),
+		STM32_FUNCTION(10, "TIM12_CH1"),
+		STM32_FUNCTION(12, "ETH_MII_RXD2"),
+		STM32_FUNCTION(13, "FMC_SDNE1"),
+		STM32_FUNCTION(14, "DCMI_D8"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(119, "PH7"),
+		STM32_FUNCTION(0, "GPIOH7"),
+		STM32_FUNCTION(5, "I2C3_SCL"),
+		STM32_FUNCTION(6, "SPI5_MISO"),
+		STM32_FUNCTION(12, "ETH_MII_RXD3"),
+		STM32_FUNCTION(13, "FMC_SDCKE1"),
+		STM32_FUNCTION(14, "DCMI_D9"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(120, "PH8"),
+		STM32_FUNCTION(0, "GPIOH8"),
+		STM32_FUNCTION(5, "I2C3_SDA"),
+		STM32_FUNCTION(13, "FMC_D16"),
+		STM32_FUNCTION(14, "DCMI_HSYNC"),
+		STM32_FUNCTION(15, "LCD_R2"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(121, "PH9"),
+		STM32_FUNCTION(0, "GPIOH9"),
+		STM32_FUNCTION(5, "I2C3_SMBA"),
+		STM32_FUNCTION(10, "TIM12_CH2"),
+		STM32_FUNCTION(13, "FMC_D17"),
+		STM32_FUNCTION(14, "DCMI_D0"),
+		STM32_FUNCTION(15, "LCD_R3"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(122, "PH10"),
+		STM32_FUNCTION(0, "GPIOH10"),
+		STM32_FUNCTION(3, "TIM5_CH1"),
+		STM32_FUNCTION(13, "FMC_D18"),
+		STM32_FUNCTION(14, "DCMI_D1"),
+		STM32_FUNCTION(15, "LCD_R4"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(123, "PH11"),
+		STM32_FUNCTION(0, "GPIOH11"),
+		STM32_FUNCTION(3, "TIM5_CH2"),
+		STM32_FUNCTION(13, "FMC_D19"),
+		STM32_FUNCTION(14, "DCMI_D2"),
+		STM32_FUNCTION(15, "LCD_R5"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(124, "PH12"),
+		STM32_FUNCTION(0, "GPIOH12"),
+		STM32_FUNCTION(3, "TIM5_CH3"),
+		STM32_FUNCTION(13, "FMC_D20"),
+		STM32_FUNCTION(14, "DCMI_D3"),
+		STM32_FUNCTION(15, "LCD_R6"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(125, "PH13"),
+		STM32_FUNCTION(0, "GPIOH13"),
+		STM32_FUNCTION(4, "TIM8_CH1N"),
+		STM32_FUNCTION(10, "CAN1_TX"),
+		STM32_FUNCTION(13, "FMC_D21"),
+		STM32_FUNCTION(15, "LCD_G2"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(126, "PH14"),
+		STM32_FUNCTION(0, "GPIOH14"),
+		STM32_FUNCTION(4, "TIM8_CH2N"),
+		STM32_FUNCTION(13, "FMC_D22"),
+		STM32_FUNCTION(14, "DCMI_D4"),
+		STM32_FUNCTION(15, "LCD_G3"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(127, "PH15"),
+		STM32_FUNCTION(0, "GPIOH15"),
+		STM32_FUNCTION(4, "TIM8_CH3N"),
+		STM32_FUNCTION(13, "FMC_D23"),
+		STM32_FUNCTION(14, "DCMI_D11"),
+		STM32_FUNCTION(15, "LCD_G4"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(128, "PI0"),
+		STM32_FUNCTION(0, "GPIOI0"),
+		STM32_FUNCTION(3, "TIM5_CH4"),
+		STM32_FUNCTION(6, "SPI2_NSS I2S2_WS"),
+		STM32_FUNCTION(13, "FMC_D24"),
+		STM32_FUNCTION(14, "DCMI_D13"),
+		STM32_FUNCTION(15, "LCD_G5"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(129, "PI1"),
+		STM32_FUNCTION(0, "GPIOI1"),
+		STM32_FUNCTION(6, "SPI2_SCK I2S2_CK"),
+		STM32_FUNCTION(13, "FMC_D25"),
+		STM32_FUNCTION(14, "DCMI_D8"),
+		STM32_FUNCTION(15, "LCD_G6"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(130, "PI2"),
+		STM32_FUNCTION(0, "GPIOI2"),
+		STM32_FUNCTION(4, "TIM8_CH4"),
+		STM32_FUNCTION(6, "SPI2_MISO"),
+		STM32_FUNCTION(7, "I2S2EXT_SD"),
+		STM32_FUNCTION(13, "FMC_D26"),
+		STM32_FUNCTION(14, "DCMI_D9"),
+		STM32_FUNCTION(15, "LCD_G7"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(131, "PI3"),
+		STM32_FUNCTION(0, "GPIOI3"),
+		STM32_FUNCTION(4, "TIM8_ETR"),
+		STM32_FUNCTION(6, "SPI2_MOSI I2S2_SD"),
+		STM32_FUNCTION(13, "FMC_D27"),
+		STM32_FUNCTION(14, "DCMI_D10"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(132, "PI4"),
+		STM32_FUNCTION(0, "GPIOI4"),
+		STM32_FUNCTION(4, "TIM8_BKIN"),
+		STM32_FUNCTION(13, "FMC_NBL2"),
+		STM32_FUNCTION(14, "DCMI_D5"),
+		STM32_FUNCTION(15, "LCD_B4"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(133, "PI5"),
+		STM32_FUNCTION(0, "GPIOI5"),
+		STM32_FUNCTION(4, "TIM8_CH1"),
+		STM32_FUNCTION(13, "FMC_NBL3"),
+		STM32_FUNCTION(14, "DCMI_VSYNC"),
+		STM32_FUNCTION(15, "LCD_B5"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(134, "PI6"),
+		STM32_FUNCTION(0, "GPIOI6"),
+		STM32_FUNCTION(4, "TIM8_CH2"),
+		STM32_FUNCTION(13, "FMC_D28"),
+		STM32_FUNCTION(14, "DCMI_D6"),
+		STM32_FUNCTION(15, "LCD_B6"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(135, "PI7"),
+		STM32_FUNCTION(0, "GPIOI7"),
+		STM32_FUNCTION(4, "TIM8_CH3"),
+		STM32_FUNCTION(13, "FMC_D29"),
+		STM32_FUNCTION(14, "DCMI_D7"),
+		STM32_FUNCTION(15, "LCD_B7"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(136, "PI8"),
+		STM32_FUNCTION(0, "GPIOI8"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(137, "PI9"),
+		STM32_FUNCTION(0, "GPIOI9"),
+		STM32_FUNCTION(10, "CAN1_RX"),
+		STM32_FUNCTION(13, "FMC_D30"),
+		STM32_FUNCTION(15, "LCD_VSYNC"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(138, "PI10"),
+		STM32_FUNCTION(0, "GPIOI10"),
+		STM32_FUNCTION(12, "ETH_MII_RX_ER"),
+		STM32_FUNCTION(13, "FMC_D31"),
+		STM32_FUNCTION(15, "LCD_HSYNC"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(139, "PI11"),
+		STM32_FUNCTION(0, "GPIOI11"),
+		STM32_FUNCTION(11, "OTG_HS_ULPI_DIR"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(140, "PI12"),
+		STM32_FUNCTION(0, "GPIOI12"),
+		STM32_FUNCTION(15, "LCD_HSYNC"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(141, "PI13"),
+		STM32_FUNCTION(0, "GPIOI13"),
+		STM32_FUNCTION(15, "LCD_VSYNC"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(142, "PI14"),
+		STM32_FUNCTION(0, "GPIOI14"),
+		STM32_FUNCTION(15, "LCD_CLK"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(143, "PI15"),
+		STM32_FUNCTION(0, "GPIOI15"),
+		STM32_FUNCTION(15, "LCD_R0"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(144, "PJ0"),
+		STM32_FUNCTION(0, "GPIOJ0"),
+		STM32_FUNCTION(15, "LCD_R1"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(145, "PJ1"),
+		STM32_FUNCTION(0, "GPIOJ1"),
+		STM32_FUNCTION(15, "LCD_R2"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(146, "PJ2"),
+		STM32_FUNCTION(0, "GPIOJ2"),
+		STM32_FUNCTION(15, "LCD_R3"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(147, "PJ3"),
+		STM32_FUNCTION(0, "GPIOJ3"),
+		STM32_FUNCTION(15, "LCD_R4"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(148, "PJ4"),
+		STM32_FUNCTION(0, "GPIOJ4"),
+		STM32_FUNCTION(15, "LCD_R5"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(149, "PJ5"),
+		STM32_FUNCTION(0, "GPIOJ5"),
+		STM32_FUNCTION(15, "LCD_R6"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(150, "PJ6"),
+		STM32_FUNCTION(0, "GPIOJ6"),
+		STM32_FUNCTION(15, "LCD_R7"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(151, "PJ7"),
+		STM32_FUNCTION(0, "GPIOJ7"),
+		STM32_FUNCTION(15, "LCD_G0"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(152, "PJ8"),
+		STM32_FUNCTION(0, "GPIOJ8"),
+		STM32_FUNCTION(15, "LCD_G1"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(153, "PJ9"),
+		STM32_FUNCTION(0, "GPIOJ9"),
+		STM32_FUNCTION(15, "LCD_G2"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(154, "PJ10"),
+		STM32_FUNCTION(0, "GPIOJ10"),
+		STM32_FUNCTION(15, "LCD_G3"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(155, "PJ11"),
+		STM32_FUNCTION(0, "GPIOJ11"),
+		STM32_FUNCTION(15, "LCD_G4"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(156, "PJ12"),
+		STM32_FUNCTION(0, "GPIOJ12"),
+		STM32_FUNCTION(15, "LCD_B0"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(157, "PJ13"),
+		STM32_FUNCTION(0, "GPIOJ13"),
+		STM32_FUNCTION(15, "LCD_B1"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(158, "PJ14"),
+		STM32_FUNCTION(0, "GPIOJ14"),
+		STM32_FUNCTION(15, "LCD_B2"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(159, "PJ15"),
+		STM32_FUNCTION(0, "GPIOJ15"),
+		STM32_FUNCTION(15, "LCD_B3"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(160, "PK0"),
+		STM32_FUNCTION(0, "GPIOK0"),
+		STM32_FUNCTION(15, "LCD_G5"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(161, "PK1"),
+		STM32_FUNCTION(0, "GPIOK1"),
+		STM32_FUNCTION(15, "LCD_G6"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(162, "PK2"),
+		STM32_FUNCTION(0, "GPIOK2"),
+		STM32_FUNCTION(15, "LCD_G7"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(163, "PK3"),
+		STM32_FUNCTION(0, "GPIOK3"),
+		STM32_FUNCTION(15, "LCD_B4"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(164, "PK4"),
+		STM32_FUNCTION(0, "GPIOK4"),
+		STM32_FUNCTION(15, "LCD_B5"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(165, "PK5"),
+		STM32_FUNCTION(0, "GPIOK5"),
+		STM32_FUNCTION(15, "LCD_B6"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(166, "PK6"),
+		STM32_FUNCTION(0, "GPIOK6"),
+		STM32_FUNCTION(15, "LCD_B7"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+	STM32_PIN(
+		PINCTRL_PIN(167, "PK7"),
+		STM32_FUNCTION(0, "GPIOK7"),
+		STM32_FUNCTION(15, "LCD_DE"),
+		STM32_FUNCTION(16, "EVENTOUT"),
+		STM32_FUNCTION(17, "ANALOG")
+	),
+};
+
+static struct stm32_pinctrl_match_data stm32f429_match_data = {
+	.pins = stm32f429_pins,
+	.npins = ARRAY_SIZE(stm32f429_pins),
+};
+
+static const struct of_device_id stm32f429_pctrl_match[] = {
+	{
+		.compatible = "st,stm32f429-pinctrl",
+		.data = &stm32f429_match_data,
+	},
+	{ }
+};
+
+static struct platform_driver stm32f429_pinctrl_driver = {
+	.probe = stm32_pctl_probe,
+	.driver = {
+		.name = "stm32f429-pinctrl",
+		.of_match_table = stm32f429_pctrl_match,
+	},
+};
+
+static int __init stm32f429_pinctrl_init(void)
+{
+	return platform_driver_register(&stm32f429_pinctrl_driver);
+}
+device_initcall(stm32f429_pinctrl_init);
diff --git a/drivers/pinctrl/sunxi/Kconfig b/drivers/pinctrl/sunxi/Kconfig
index f8dbc8b..aaf075b 100644
--- a/drivers/pinctrl/sunxi/Kconfig
+++ b/drivers/pinctrl/sunxi/Kconfig
@@ -1,67 +1,75 @@
 if ARCH_SUNXI
 
-config PINCTRL_SUNXI_COMMON
+config PINCTRL_SUNXI
 	bool
 	select PINMUX
 	select GENERIC_PINCONF
 
 config PINCTRL_SUN4I_A10
 	def_bool MACH_SUN4I
-	select PINCTRL_SUNXI_COMMON
+	select PINCTRL_SUNXI
 
 config PINCTRL_SUN5I_A10S
 	def_bool MACH_SUN5I
-	select PINCTRL_SUNXI_COMMON
+	select PINCTRL_SUNXI
 
 config PINCTRL_SUN5I_A13
 	def_bool MACH_SUN5I
-	select PINCTRL_SUNXI_COMMON
+	select PINCTRL_SUNXI
 
 config PINCTRL_SUN6I_A31
 	def_bool MACH_SUN6I
-	select PINCTRL_SUNXI_COMMON
+	select PINCTRL_SUNXI
 
 config PINCTRL_SUN6I_A31S
 	def_bool MACH_SUN6I
-	select PINCTRL_SUNXI_COMMON
+	select PINCTRL_SUNXI
 
 config PINCTRL_SUN6I_A31_R
 	def_bool MACH_SUN6I
 	depends on RESET_CONTROLLER
-	select PINCTRL_SUNXI_COMMON
+	select PINCTRL_SUNXI
 
 config PINCTRL_SUN7I_A20
 	def_bool MACH_SUN7I
-	select PINCTRL_SUNXI_COMMON
+	select PINCTRL_SUNXI
 
 config PINCTRL_SUN8I_A23
 	def_bool MACH_SUN8I
-	select PINCTRL_SUNXI_COMMON
+	select PINCTRL_SUNXI
 
 config PINCTRL_SUN8I_A33
 	def_bool MACH_SUN8I
-	select PINCTRL_SUNXI_COMMON
+	select PINCTRL_SUNXI
 
 config PINCTRL_SUN8I_A83T
 	def_bool MACH_SUN8I
-	select PINCTRL_SUNXI_COMMON
+	select PINCTRL_SUNXI
 
 config PINCTRL_SUN8I_A23_R
 	def_bool MACH_SUN8I
 	depends on RESET_CONTROLLER
-	select PINCTRL_SUNXI_COMMON
+	select PINCTRL_SUNXI
 
 config PINCTRL_SUN8I_H3
 	def_bool MACH_SUN8I
+	select PINCTRL_SUNXI
+
+config PINCTRL_SUN8I_H3_R
+	def_bool MACH_SUN8I
 	select PINCTRL_SUNXI_COMMON
 
 config PINCTRL_SUN9I_A80
 	def_bool MACH_SUN9I
-	select PINCTRL_SUNXI_COMMON
+	select PINCTRL_SUNXI
 
 config PINCTRL_SUN9I_A80_R
 	def_bool MACH_SUN9I
 	depends on RESET_CONTROLLER
-	select PINCTRL_SUNXI_COMMON
+	select PINCTRL_SUNXI
+
+config PINCTRL_SUN50I_A64
+	bool
+	select PINCTRL_SUNXI
 
 endif
diff --git a/drivers/pinctrl/sunxi/Makefile b/drivers/pinctrl/sunxi/Makefile
index ef82f22..2d8b64e 100644
--- a/drivers/pinctrl/sunxi/Makefile
+++ b/drivers/pinctrl/sunxi/Makefile
@@ -1,5 +1,5 @@
 # Core
-obj-$(CONFIG_PINCTRL_SUNXI_COMMON)	+= pinctrl-sunxi.o
+obj-y					+= pinctrl-sunxi.o
 
 # SoC Drivers
 obj-$(CONFIG_PINCTRL_SUN4I_A10)		+= pinctrl-sun4i-a10.o
@@ -12,7 +12,9 @@
 obj-$(CONFIG_PINCTRL_SUN8I_A23)		+= pinctrl-sun8i-a23.o
 obj-$(CONFIG_PINCTRL_SUN8I_A23_R)	+= pinctrl-sun8i-a23-r.o
 obj-$(CONFIG_PINCTRL_SUN8I_A33)		+= pinctrl-sun8i-a33.o
+obj-$(CONFIG_PINCTRL_SUN50I_A64)	+= pinctrl-sun50i-a64.o
 obj-$(CONFIG_PINCTRL_SUN8I_A83T)	+= pinctrl-sun8i-a83t.o
 obj-$(CONFIG_PINCTRL_SUN8I_H3)		+= pinctrl-sun8i-h3.o
+obj-$(CONFIG_PINCTRL_SUN8I_H3_R)	+= pinctrl-sun8i-h3-r.o
 obj-$(CONFIG_PINCTRL_SUN9I_A80)		+= pinctrl-sun9i-a80.o
 obj-$(CONFIG_PINCTRL_SUN9I_A80_R)	+= pinctrl-sun9i-a80-r.o
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-a64.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-a64.c
new file mode 100644
index 0000000..4f2a726
--- /dev/null
+++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-a64.c
@@ -0,0 +1,601 @@
+/*
+ * Allwinner A64 SoCs pinctrl driver.
+ *
+ * Copyright (C) 2016 - ARM Ltd.
+ * Author: Andre Przywara <andre.przywara@arm.com>
+ *
+ * Based on pinctrl-sun7i-a20.c, which is:
+ * Copyright (C) 2014 Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-sunxi.h"
+
+static const struct sunxi_desc_pin a64_pins[] = {
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 0),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "uart2"),		/* TX */
+		  SUNXI_FUNCTION(0x4, "jtag"),		/* MS0 */
+		  SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 0)),	/* EINT0 */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 1),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "uart2"),		/* RX */
+		  SUNXI_FUNCTION(0x4, "jtag"),		/* CK0 */
+		  SUNXI_FUNCTION(0x5, "sim"),		/* VCCEN */
+		  SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 1)),		/* EINT1 */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 2),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "uart2"),		/* RTS */
+		  SUNXI_FUNCTION(0x4, "jtag"),		/* DO0 */
+		  SUNXI_FUNCTION(0x5, "sim"),		/* VPPEN */
+		  SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 2)),		/* EINT2 */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 3),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "uart2"),		/* CTS */
+		  SUNXI_FUNCTION(0x3, "i2s0"),		/* MCLK */
+		  SUNXI_FUNCTION(0x4, "jtag"),		/* DI0 */
+		  SUNXI_FUNCTION(0x5, "sim"),		/* VPPPP */
+		  SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 3)),		/* EINT3 */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 4),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "aif2"),		/* SYNC */
+		  SUNXI_FUNCTION(0x3, "i2s0"),		/* SYNC */
+		  SUNXI_FUNCTION(0x5, "sim"),		/* CLK */
+		  SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 4)),		/* EINT4 */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 5),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "aif2"),		/* BCLK */
+		  SUNXI_FUNCTION(0x3, "i2s0"),		/* BCLK */
+		  SUNXI_FUNCTION(0x5, "sim"),		/* DATA */
+		  SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 5)),		/* EINT5 */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 6),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "aif2"),		/* DOUT */
+		  SUNXI_FUNCTION(0x3, "i2s0"),		/* DOUT */
+		  SUNXI_FUNCTION(0x5, "sim"),		/* RST */
+		  SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 6)),		/* EINT6 */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 7),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "aif2"),		/* DIN */
+		  SUNXI_FUNCTION(0x3, "i2s0"),		/* DIN */
+		  SUNXI_FUNCTION(0x5, "sim"),		/* DET */
+		  SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 7)),		/* EINT7 */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 8),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x4, "uart0"),		/* TX */
+		  SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 8)),		/* EINT8 */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 9),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x4, "uart0"),		/* RX */
+		  SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 9)),		/* EINT9 */
+	/* Hole */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 0),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "nand0"),		/* NWE */
+		  SUNXI_FUNCTION(0x4, "spi0")),		/* MOSI */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 1),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "nand0"),		/* NALE */
+		  SUNXI_FUNCTION(0x3, "mmc2"),		/* DS */
+		  SUNXI_FUNCTION(0x4, "spi0")),		/* MISO */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 2),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "nand0"),		/* NCLE */
+		  SUNXI_FUNCTION(0x4, "spi0")),		/* SCK */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 3),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "nand0"),		/* NCE1 */
+		  SUNXI_FUNCTION(0x4, "spi0")),		/* CS */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 4),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "nand0")),	/* NCE0 */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 5),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "nand0"),		/* NRE# */
+		  SUNXI_FUNCTION(0x3, "mmc2")),		/* CLK */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 6),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "nand0"),		/* NRB0 */
+		  SUNXI_FUNCTION(0x3, "mmc2")),		/* CMD */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 7),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "nand0")),	/* NRB1 */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 8),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "nand0"),		/* NDQ0 */
+		  SUNXI_FUNCTION(0x3, "mmc2")),		/* D0 */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 9),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "nand0"),		/* NDQ1 */
+		  SUNXI_FUNCTION(0x3, "mmc2")),		/* D1 */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 10),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "nand0"),		/* NDQ2 */
+		  SUNXI_FUNCTION(0x3, "mmc2")),		/* D2 */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 11),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "nand0"),		/* NDQ3 */
+		  SUNXI_FUNCTION(0x3, "mmc2")),		/* D3 */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 12),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "nand0"),		/* NDQ4 */
+		  SUNXI_FUNCTION(0x3, "mmc2")),		/* D4 */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 13),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "nand0"),		/* NDQ5 */
+		  SUNXI_FUNCTION(0x3, "mmc2")),		/* D5 */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 14),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "nand0"),		/* NDQ6 */
+		  SUNXI_FUNCTION(0x3, "mmc2")),		/* D6 */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 15),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "nand0"),		/* NDQ7 */
+		  SUNXI_FUNCTION(0x3, "mmc2")),		/* D7 */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 16),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "nand0"),		/* NDQS */
+		  SUNXI_FUNCTION(0x3, "mmc2")),		/* RST */
+	/* Hole */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 0),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "lcd0"),		/* D2 */
+		  SUNXI_FUNCTION(0x3, "uart3"),		/* TX */
+		  SUNXI_FUNCTION(0x4, "spi1"),		/* CS */
+		  SUNXI_FUNCTION(0x5, "ccir")),		/* CLK */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 1),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "lcd0"),		/* D3 */
+		  SUNXI_FUNCTION(0x3, "uart3"),		/* RX */
+		  SUNXI_FUNCTION(0x4, "spi1"),		/* CLK */
+		  SUNXI_FUNCTION(0x5, "ccir")),		/* DE */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 2),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "lcd0"),		/* D4 */
+		  SUNXI_FUNCTION(0x3, "uart4"),		/* TX */
+		  SUNXI_FUNCTION(0x4, "spi1"),		/* MOSI */
+		  SUNXI_FUNCTION(0x5, "ccir")),		/* HSYNC */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 3),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "lcd0"),		/* D5 */
+		  SUNXI_FUNCTION(0x3, "uart4"),		/* RX */
+		  SUNXI_FUNCTION(0x4, "spi1"),		/* MISO */
+		  SUNXI_FUNCTION(0x5, "ccir")),		/* VSYNC */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 4),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "lcd0"),		/* D6 */
+		  SUNXI_FUNCTION(0x3, "uart4"),		/* RTS */
+		  SUNXI_FUNCTION(0x5, "ccir")),		/* D0 */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 5),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "lcd0"),		/* D7 */
+		  SUNXI_FUNCTION(0x3, "uart4"),		/* CTS */
+		  SUNXI_FUNCTION(0x5, "ccir")),		/* D1 */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 6),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "lcd0"),		/* D10 */
+		  SUNXI_FUNCTION(0x5, "ccir")),		/* D2 */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 7),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "lcd0"),		/* D11 */
+		  SUNXI_FUNCTION(0x5, "ccir")),		/* D3 */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 8),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "lcd0"),		/* D12 */
+		  SUNXI_FUNCTION(0x4, "emac"),		/* ERXD3 */
+		  SUNXI_FUNCTION(0x5, "ccir")),		/* D4 */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 9),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "lcd0"),		/* D13 */
+		  SUNXI_FUNCTION(0x4, "emac"),		/* ERXD2 */
+		  SUNXI_FUNCTION(0x5, "ccir")),		/* D5 */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 10),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "lcd0"),		/* D14 */
+		  SUNXI_FUNCTION(0x4, "emac")),		/* ERXD1 */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 11),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "lcd0"),		/* D15 */
+		  SUNXI_FUNCTION(0x4, "emac")),		/* ERXD0 */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 12),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "lcd0"),		/* D18 */
+		  SUNXI_FUNCTION(0x3, "lvds0"),		/* VP0 */
+		  SUNXI_FUNCTION(0x4, "emac")),		/* ERXCK */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 13),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "lcd0"),		/* D19 */
+		  SUNXI_FUNCTION(0x3, "lvds0"),		/* VN0 */
+		  SUNXI_FUNCTION(0x4, "emac")),		/* ERXCTL */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 14),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "lcd0"),		/* D20 */
+		  SUNXI_FUNCTION(0x3, "lvds0"),		/* VP1 */
+		  SUNXI_FUNCTION(0x4, "emac")),		/* ENULL */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 15),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "lcd0"),		/* D21 */
+		  SUNXI_FUNCTION(0x3, "lvds0"),		/* VN1 */
+		  SUNXI_FUNCTION(0x4, "emac"),		/* ETXD3 */
+		  SUNXI_FUNCTION(0x5, "ccir")),		/* D6 */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 16),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "lcd0"),		/* D22 */
+		  SUNXI_FUNCTION(0x3, "lvds0"),		/* VP2 */
+		  SUNXI_FUNCTION(0x4, "emac"),		/* ETXD2 */
+		  SUNXI_FUNCTION(0x5, "ccir")),		/* D7 */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 17),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "lcd0"),		/* D23 */
+		  SUNXI_FUNCTION(0x3, "lvds0"),		/* VN2 */
+		  SUNXI_FUNCTION(0x4, "emac")),		/* ETXD1 */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 18),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "lcd0"),		/* CLK */
+		  SUNXI_FUNCTION(0x3, "lvds0"),		/* VPC */
+		  SUNXI_FUNCTION(0x4, "emac")),		/* ETXD0 */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 19),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "lcd0"),		/* DE */
+		  SUNXI_FUNCTION(0x3, "lvds0"),		/* VNC */
+		  SUNXI_FUNCTION(0x4, "emac")),		/* ETXCK */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 20),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "lcd0"),		/* HSYNC */
+		  SUNXI_FUNCTION(0x3, "lvds0"),		/* VP3 */
+		  SUNXI_FUNCTION(0x4, "emac")),		/* ETXCTL */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 21),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "lcd0"),		/* VSYNC */
+		  SUNXI_FUNCTION(0x3, "lvds0"),		/* VN3 */
+		  SUNXI_FUNCTION(0x4, "emac")),		/* ECLKIN */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 22),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "pwm"),		/* PWM0 */
+		  SUNXI_FUNCTION(0x4, "emac")),		/* EMDC */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 23),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x4, "emac")),		/* EMDIO */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 24),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out")),
+	/* Hole */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 0),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "csi0"),		/* PCK */
+		  SUNXI_FUNCTION(0x4, "ts0")),		/* CLK */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 1),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "csi0"),		/* CK */
+		  SUNXI_FUNCTION(0x4, "ts0")),		/* ERR */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 2),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "csi0"),		/* HSYNC */
+		  SUNXI_FUNCTION(0x4, "ts0")),		/* SYNC */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 3),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "csi0"),		/* VSYNC */
+		  SUNXI_FUNCTION(0x4, "ts0")),		/* DVLD */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 4),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "csi0"),		/* D0 */
+		  SUNXI_FUNCTION(0x4, "ts0")),		/* D0 */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 5),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "csi0"),		/* D1 */
+		  SUNXI_FUNCTION(0x4, "ts0")),		/* D1 */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 6),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "csi0"),		/* D2 */
+		  SUNXI_FUNCTION(0x4, "ts0")),		/* D2 */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 7),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "csi0"),		/* D3 */
+		  SUNXI_FUNCTION(0x4, "ts0")),		/* D3 */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 8),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "csi0"),		/* D4 */
+		  SUNXI_FUNCTION(0x4, "ts0")),		/* D4 */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 9),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "csi0"),		/* D5 */
+		  SUNXI_FUNCTION(0x4, "ts0")),		/* D5 */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 10),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "csi0"),		/* D6 */
+		  SUNXI_FUNCTION(0x4, "ts0")),		/* D6 */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 11),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "csi0"),		/* D7 */
+		  SUNXI_FUNCTION(0x4, "ts0")),		/* D7 */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 12),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "csi0")),		/* SCK */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 13),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "csi0")),		/* SDA */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 14),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "pll"),		/* LOCK_DBG */
+		  SUNXI_FUNCTION(0x3, "i2c2")),		/* SCK */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 15),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x3, "i2c2")),		/* SDA */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 16),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out")),
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 17),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out")),
+	/* Hole */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 0),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "mmc0"),		/* D1 */
+		  SUNXI_FUNCTION(0x3, "jtag")),		/* MSI */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 1),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "mmc0"),		/* D0 */
+		  SUNXI_FUNCTION(0x3, "jtag")),		/* DI1 */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 2),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "mmc0"),		/* CLK */
+		  SUNXI_FUNCTION(0x3, "uart0")),	/* TX */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 3),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "mmc0"),		/* CMD */
+		  SUNXI_FUNCTION(0x3, "jtag")),		/* DO1 */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 4),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "mmc0"),		/* D3 */
+		  SUNXI_FUNCTION(0x4, "uart0")),	/* RX */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 5),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "mmc0"),		/* D2 */
+		  SUNXI_FUNCTION(0x3, "jtag")),		/* CK1 */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 6),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out")),
+	/* Hole */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 0),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "mmc1"),		/* CLK */
+		  SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 0)),	/* EINT0 */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 1),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "mmc1"),		/* CMD */
+		  SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 1)),	/* EINT1 */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 2),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "mmc1"),		/* D0 */
+		  SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 2)),	/* EINT2 */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 3),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "mmc1"),		/* D1 */
+		  SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 3)),	/* EINT3 */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 4),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "mmc1"),		/* D2 */
+		  SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 4)),	/* EINT4 */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 5),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "mmc1"),		/* D3 */
+		  SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 5)),	/* EINT5 */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 6),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "uart1"),		/* TX */
+		  SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 6)),	/* EINT6 */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 7),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "uart1"),		/* RX */
+		  SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 7)),	/* EINT7 */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 8),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "uart1"),		/* RTS */
+		  SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 8)),	/* EINT8 */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 9),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "uart1"),		/* CTS */
+		  SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 9)),	/* EINT9 */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 10),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "aif3"),		/* SYNC */
+		  SUNXI_FUNCTION(0x3, "i2s1"),		/* SYNC */
+		  SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 10)),	/* EINT10 */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 11),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "aif3"),		/* BCLK */
+		  SUNXI_FUNCTION(0x3, "i2s1"),		/* BCLK */
+		  SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 11)),	/* EINT11 */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 12),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "aif3"),		/* DOUT */
+		  SUNXI_FUNCTION(0x3, "i2s1"),		/* DOUT */
+		  SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 12)),	/* EINT12 */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 13),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "aif3"),		/* DIN */
+		  SUNXI_FUNCTION(0x3, "i2s1"),		/* DIN */
+		  SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 13)),	/* EINT13 */
+	/* Hole */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 0),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "i2c0"),		/* SCK */
+		  SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 0)),	/* EINT0 */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 1),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "i2c0"),		/* SDA */
+		  SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 1)),	/* EINT1 */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 2),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "i2c1"),		/* SCK */
+		  SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 2)),	/* EINT2 */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 3),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "i2c1"),		/* SDA */
+		  SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 3)),	/* EINT3 */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 4),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "uart3"),		/* TX */
+		  SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 4)),	/* EINT4 */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 5),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "uart3"),		/* RX */
+		  SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 5)),	/* EINT5 */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 6),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "uart3"),		/* RTS */
+		  SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 6)),	/* EINT6 */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 7),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "uart3"),		/* CTS */
+		  SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 7)),	/* EINT7 */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 8),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "spdif"),		/* OUT */
+		  SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 8)),	/* EINT8 */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 9),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 9)),	/* EINT9 */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 10),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "mic"),		/* CLK */
+		  SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 10)),	/* EINT10 */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 11),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "mic"),		/* DATA */
+		  SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 11)),	/* EINT11 */
+};
+
+static const struct sunxi_pinctrl_desc a64_pinctrl_data = {
+	.pins = a64_pins,
+	.npins = ARRAY_SIZE(a64_pins),
+	.irq_banks = 3,
+};
+
+static int a64_pinctrl_probe(struct platform_device *pdev)
+{
+	return sunxi_pinctrl_init(pdev,
+				  &a64_pinctrl_data);
+}
+
+static const struct of_device_id a64_pinctrl_match[] = {
+	{ .compatible = "allwinner,sun50i-a64-pinctrl", },
+	{}
+};
+
+static struct platform_driver a64_pinctrl_driver = {
+	.probe	= a64_pinctrl_probe,
+	.driver	= {
+		.name		= "sun50i-a64-pinctrl",
+		.of_match_table	= a64_pinctrl_match,
+	},
+};
+builtin_platform_driver(a64_pinctrl_driver);
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun7i-a20.c b/drivers/pinctrl/sunxi/pinctrl-sun7i-a20.c
index cf1ce0c..435ad30 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun7i-a20.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun7i-a20.c
@@ -343,26 +343,22 @@
 		  SUNXI_FUNCTION(0x0, "gpio_in"),
 		  SUNXI_FUNCTION(0x1, "gpio_out"),
 		  SUNXI_FUNCTION(0x2, "nand0"),		/* NCE4 */
-		  SUNXI_FUNCTION(0x3, "spi2"),		/* CS0 */
-		  SUNXI_FUNCTION_IRQ(0x6, 12)),		/* EINT12 */
+		  SUNXI_FUNCTION(0x3, "spi2")),		/* CS0 */
 	SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 20),
 		  SUNXI_FUNCTION(0x0, "gpio_in"),
 		  SUNXI_FUNCTION(0x1, "gpio_out"),
 		  SUNXI_FUNCTION(0x2, "nand0"),		/* NCE5 */
-		  SUNXI_FUNCTION(0x3, "spi2"),		/* CLK */
-		  SUNXI_FUNCTION_IRQ(0x6, 13)),		/* EINT13 */
+		  SUNXI_FUNCTION(0x3, "spi2")),		/* CLK */
 	SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 21),
 		  SUNXI_FUNCTION(0x0, "gpio_in"),
 		  SUNXI_FUNCTION(0x1, "gpio_out"),
 		  SUNXI_FUNCTION(0x2, "nand0"),		/* NCE6 */
-		  SUNXI_FUNCTION(0x3, "spi2"),		/* MOSI */
-		  SUNXI_FUNCTION_IRQ(0x6, 14)),		/* EINT14 */
+		  SUNXI_FUNCTION(0x3, "spi2")),		/* MOSI */
 	SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 22),
 		  SUNXI_FUNCTION(0x0, "gpio_in"),
 		  SUNXI_FUNCTION(0x1, "gpio_out"),
 		  SUNXI_FUNCTION(0x2, "nand0"),		/* NCE7 */
-		  SUNXI_FUNCTION(0x3, "spi2"),		/* MISO */
-		  SUNXI_FUNCTION_IRQ(0x6, 15)),		/* EINT15 */
+		  SUNXI_FUNCTION(0x3, "spi2")),		/* MISO */
 	SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 23),
 		  SUNXI_FUNCTION(0x0, "gpio_in"),
 		  SUNXI_FUNCTION(0x1, "gpio_out"),
@@ -960,65 +956,65 @@
 		  SUNXI_FUNCTION(0x1, "gpio_out"),
 		  SUNXI_FUNCTION(0x2, "spi0"),		/* CS0 */
 		  SUNXI_FUNCTION(0x3, "uart5"),		/* TX */
-		  SUNXI_FUNCTION_IRQ(0x5, 22)),		/* EINT22 */
+		  SUNXI_FUNCTION_IRQ(0x6, 22)),		/* EINT22 */
 	SUNXI_PIN(SUNXI_PINCTRL_PIN(I, 11),
 		  SUNXI_FUNCTION(0x0, "gpio_in"),
 		  SUNXI_FUNCTION(0x1, "gpio_out"),
 		  SUNXI_FUNCTION(0x2, "spi0"),		/* CLK */
 		  SUNXI_FUNCTION(0x3, "uart5"),		/* RX */
-		  SUNXI_FUNCTION_IRQ(0x5, 23)),		/* EINT23 */
+		  SUNXI_FUNCTION_IRQ(0x6, 23)),		/* EINT23 */
 	SUNXI_PIN(SUNXI_PINCTRL_PIN(I, 12),
 		  SUNXI_FUNCTION(0x0, "gpio_in"),
 		  SUNXI_FUNCTION(0x1, "gpio_out"),
 		  SUNXI_FUNCTION(0x2, "spi0"),		/* MOSI */
 		  SUNXI_FUNCTION(0x3, "uart6"),		/* TX */
 		  SUNXI_FUNCTION(0x4, "clk_out_a"),	/* CLK_OUT_A */
-		  SUNXI_FUNCTION_IRQ(0x5, 24)),		/* EINT24 */
+		  SUNXI_FUNCTION_IRQ(0x6, 24)),		/* EINT24 */
 	SUNXI_PIN(SUNXI_PINCTRL_PIN(I, 13),
 		  SUNXI_FUNCTION(0x0, "gpio_in"),
 		  SUNXI_FUNCTION(0x1, "gpio_out"),
 		  SUNXI_FUNCTION(0x2, "spi0"),		/* MISO */
 		  SUNXI_FUNCTION(0x3, "uart6"),		/* RX */
 		  SUNXI_FUNCTION(0x4, "clk_out_b"),	/* CLK_OUT_B */
-		  SUNXI_FUNCTION_IRQ(0x5, 25)),		/* EINT25 */
+		  SUNXI_FUNCTION_IRQ(0x6, 25)),		/* EINT25 */
 	SUNXI_PIN(SUNXI_PINCTRL_PIN(I, 14),
 		  SUNXI_FUNCTION(0x0, "gpio_in"),
 		  SUNXI_FUNCTION(0x1, "gpio_out"),
 		  SUNXI_FUNCTION(0x2, "spi0"),		/* CS1 */
 		  SUNXI_FUNCTION(0x3, "ps2"),		/* SCK1 */
 		  SUNXI_FUNCTION(0x4, "timer4"),	/* TCLKIN0 */
-		  SUNXI_FUNCTION_IRQ(0x5, 26)),		/* EINT26 */
+		  SUNXI_FUNCTION_IRQ(0x6, 26)),		/* EINT26 */
 	SUNXI_PIN(SUNXI_PINCTRL_PIN(I, 15),
 		  SUNXI_FUNCTION(0x0, "gpio_in"),
 		  SUNXI_FUNCTION(0x1, "gpio_out"),
 		  SUNXI_FUNCTION(0x2, "spi1"),		/* CS1 */
 		  SUNXI_FUNCTION(0x3, "ps2"),		/* SDA1 */
 		  SUNXI_FUNCTION(0x4, "timer5"),	/* TCLKIN1 */
-		  SUNXI_FUNCTION_IRQ(0x5, 27)),		/* EINT27 */
+		  SUNXI_FUNCTION_IRQ(0x6, 27)),		/* EINT27 */
 	SUNXI_PIN(SUNXI_PINCTRL_PIN(I, 16),
 		  SUNXI_FUNCTION(0x0, "gpio_in"),
 		  SUNXI_FUNCTION(0x1, "gpio_out"),
 		  SUNXI_FUNCTION(0x2, "spi1"),		/* CS0 */
 		  SUNXI_FUNCTION(0x3, "uart2"),		/* RTS */
-		  SUNXI_FUNCTION_IRQ(0x5, 28)),		/* EINT28 */
+		  SUNXI_FUNCTION_IRQ(0x6, 28)),		/* EINT28 */
 	SUNXI_PIN(SUNXI_PINCTRL_PIN(I, 17),
 		  SUNXI_FUNCTION(0x0, "gpio_in"),
 		  SUNXI_FUNCTION(0x1, "gpio_out"),
 		  SUNXI_FUNCTION(0x2, "spi1"),		/* CLK */
 		  SUNXI_FUNCTION(0x3, "uart2"),		/* CTS */
-		  SUNXI_FUNCTION_IRQ(0x5, 29)),		/* EINT29 */
+		  SUNXI_FUNCTION_IRQ(0x6, 29)),		/* EINT29 */
 	SUNXI_PIN(SUNXI_PINCTRL_PIN(I, 18),
 		  SUNXI_FUNCTION(0x0, "gpio_in"),
 		  SUNXI_FUNCTION(0x1, "gpio_out"),
 		  SUNXI_FUNCTION(0x2, "spi1"),		/* MOSI */
 		  SUNXI_FUNCTION(0x3, "uart2"),		/* TX */
-		  SUNXI_FUNCTION_IRQ(0x5, 30)),		/* EINT30 */
+		  SUNXI_FUNCTION_IRQ(0x6, 30)),		/* EINT30 */
 	SUNXI_PIN(SUNXI_PINCTRL_PIN(I, 19),
 		  SUNXI_FUNCTION(0x0, "gpio_in"),
 		  SUNXI_FUNCTION(0x1, "gpio_out"),
 		  SUNXI_FUNCTION(0x2, "spi1"),		/* MISO */
 		  SUNXI_FUNCTION(0x3, "uart2"),		/* RX */
-		  SUNXI_FUNCTION_IRQ(0x5, 31)),		/* EINT31 */
+		  SUNXI_FUNCTION_IRQ(0x6, 31)),		/* EINT31 */
 	SUNXI_PIN(SUNXI_PINCTRL_PIN(I, 20),
 		  SUNXI_FUNCTION(0x0, "gpio_in"),
 		  SUNXI_FUNCTION(0x1, "gpio_out"),
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-h3-r.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-h3-r.c
new file mode 100644
index 0000000..686ec21
--- /dev/null
+++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-h3-r.c
@@ -0,0 +1,106 @@
+/*
+ * Allwinner H3 SoCs pinctrl driver.
+ *
+ * Copyright (C) 2016 Krzysztof Adamski <k@japko.eu>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-sunxi.h"
+
+static const struct sunxi_desc_pin sun8i_h3_r_pins[] = {
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 0),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "s_twi"),         /* SCK */
+		  SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 0)),	/* PL_EINT0 */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 1),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "s_twi"),         /* SDA */
+		  SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 1)),	/* PL_EINT1 */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 2),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "s_uart"),        /* TX */
+		  SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 2)),	/* PL_EINT2 */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 3),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "s_uart"),        /* RX */
+		  SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 3)),	/* PL_EINT3 */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 4),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "s_jtag"),        /* MS */
+		  SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 4)),	/* PL_EINT4 */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 5),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "s_jtag"),        /* CK */
+		  SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 5)),	/* PL_EINT5 */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 6),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "s_jtag"),        /* DO */
+		  SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 6)),	/* PL_EINT6 */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 7),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "s_jtag"),        /* DI */
+		  SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 7)),	/* PL_EINT7 */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 8),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 8)),	/* PL_EINT8 */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 9),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 9)),	/* PL_EINT9 */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 10),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "s_pwm"),
+		  SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 10)),	/* PL_EINT10 */
+	SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 11),
+		  SUNXI_FUNCTION(0x0, "gpio_in"),
+		  SUNXI_FUNCTION(0x1, "gpio_out"),
+		  SUNXI_FUNCTION(0x2, "s_cir_rx"),
+		  SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 11)),	/* PL_EINT11 */
+};
+
+static const struct sunxi_pinctrl_desc sun8i_h3_r_pinctrl_data = {
+	.pins = sun8i_h3_r_pins,
+	.npins = ARRAY_SIZE(sun8i_h3_r_pins),
+	.irq_banks = 1,
+	.pin_base = PL_BASE,
+	.irq_read_needs_mux = true
+};
+
+static int sun8i_h3_r_pinctrl_probe(struct platform_device *pdev)
+{
+	return sunxi_pinctrl_init(pdev,
+				  &sun8i_h3_r_pinctrl_data);
+}
+
+static const struct of_device_id sun8i_h3_r_pinctrl_match[] = {
+	{ .compatible = "allwinner,sun8i-h3-r-pinctrl", },
+	{}
+};
+
+static struct platform_driver sun8i_h3_r_pinctrl_driver = {
+	.probe	= sun8i_h3_r_pinctrl_probe,
+	.driver	= {
+		.name		= "sun8i-h3-r-pinctrl",
+		.of_match_table	= sun8i_h3_r_pinctrl_match,
+	},
+};
+builtin_platform_driver(sun8i_h3_r_pinctrl_driver);
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun9i-a80-r.c b/drivers/pinctrl/sunxi/pinctrl-sun9i-a80-r.c
index 42547ff..92a873f 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun9i-a80-r.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun9i-a80-r.c
@@ -9,7 +9,7 @@
  * warranty of any kind, whether express or implied.
  */
 
-#include <linux/module.h>
+#include <linux/init.h>
 #include <linux/platform_device.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
@@ -164,7 +164,6 @@
 	{ .compatible = "allwinner,sun9i-a80-r-pinctrl", },
 	{}
 };
-MODULE_DEVICE_TABLE(of, sun9i_a80_r_pinctrl_match);
 
 static struct platform_driver sun9i_a80_r_pinctrl_driver = {
 	.probe	= sun9i_a80_r_pinctrl_probe,
@@ -174,8 +173,4 @@
 		.of_match_table	= sun9i_a80_r_pinctrl_match,
 	},
 };
-module_platform_driver(sun9i_a80_r_pinctrl_driver);
-
-MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com");
-MODULE_DESCRIPTION("Allwinner A80 R_PIO pinctrl driver");
-MODULE_LICENSE("GPL");
+builtin_platform_driver(sun9i_a80_r_pinctrl_driver);
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
index 7a2465f..3a2f561 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
@@ -15,7 +15,7 @@
 #include <linux/gpio/driver.h>
 #include <linux/irqdomain.h>
 #include <linux/irqchip/chained_irq.h>
-#include <linux/module.h>
+#include <linux/export.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/of_device.h>
@@ -459,15 +459,16 @@
 	u8 index = sunxi_data_offset(offset);
 	u32 set_mux = pctl->desc->irq_read_needs_mux &&
 			test_bit(FLAG_USED_AS_IRQ, &chip->desc[offset].flags);
+	u32 pin = offset + chip->base;
 	u32 val;
 
 	if (set_mux)
-		sunxi_pmx_set(pctl->pctl_dev, offset, SUN4I_FUNC_INPUT);
+		sunxi_pmx_set(pctl->pctl_dev, pin, SUN4I_FUNC_INPUT);
 
 	val = (readl(pctl->membase + reg) >> index) & DATA_PINS_MASK;
 
 	if (set_mux)
-		sunxi_pmx_set(pctl->pctl_dev, offset, SUN4I_FUNC_IRQ);
+		sunxi_pmx_set(pctl->pctl_dev, pin, SUN4I_FUNC_IRQ);
 
 	return !!val;
 }
diff --git a/drivers/pinctrl/tegra/Kconfig b/drivers/pinctrl/tegra/Kconfig
new file mode 100644
index 0000000..24e20cc
--- /dev/null
+++ b/drivers/pinctrl/tegra/Kconfig
@@ -0,0 +1,30 @@
+config PINCTRL_TEGRA
+	bool
+	select PINMUX
+	select PINCONF
+
+config PINCTRL_TEGRA20
+	bool
+	select PINCTRL_TEGRA
+
+config PINCTRL_TEGRA30
+	bool
+	select PINCTRL_TEGRA
+
+config PINCTRL_TEGRA114
+	bool
+	select PINCTRL_TEGRA
+
+config PINCTRL_TEGRA124
+	bool
+	select PINCTRL_TEGRA
+
+config PINCTRL_TEGRA210
+	bool
+	select PINCTRL_TEGRA
+
+config PINCTRL_TEGRA_XUSB
+	def_bool y if ARCH_TEGRA
+	select GENERIC_PHY
+	select PINCONF
+	select PINMUX
diff --git a/drivers/pinctrl/tegra/Makefile b/drivers/pinctrl/tegra/Makefile
new file mode 100644
index 0000000..a927379
--- /dev/null
+++ b/drivers/pinctrl/tegra/Makefile
@@ -0,0 +1,7 @@
+obj-y					+= pinctrl-tegra.o
+obj-$(CONFIG_PINCTRL_TEGRA20)		+= pinctrl-tegra20.o
+obj-$(CONFIG_PINCTRL_TEGRA30)		+= pinctrl-tegra30.o
+obj-$(CONFIG_PINCTRL_TEGRA114)		+= pinctrl-tegra114.o
+obj-$(CONFIG_PINCTRL_TEGRA124)		+= pinctrl-tegra124.o
+obj-$(CONFIG_PINCTRL_TEGRA210)		+= pinctrl-tegra210.o
+obj-$(CONFIG_PINCTRL_TEGRA_XUSB)	+= pinctrl-tegra-xusb.o
diff --git a/drivers/pinctrl/pinctrl-tegra-xusb.c b/drivers/pinctrl/tegra/pinctrl-tegra-xusb.c
similarity index 99%
rename from drivers/pinctrl/pinctrl-tegra-xusb.c
rename to drivers/pinctrl/tegra/pinctrl-tegra-xusb.c
index bd3aa5a..2f06029 100644
--- a/drivers/pinctrl/pinctrl-tegra-xusb.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra-xusb.c
@@ -24,8 +24,8 @@
 
 #include <dt-bindings/pinctrl/pinctrl-tegra-xusb.h>
 
-#include "core.h"
-#include "pinctrl-utils.h"
+#include "../core.h"
+#include "../pinctrl-utils.h"
 
 #define XUSB_PADCTL_ELPG_PROGRAM 0x01c
 #define XUSB_PADCTL_ELPG_PROGRAM_AUX_MUX_LP0_VCORE_DOWN (1 << 26)
diff --git a/drivers/pinctrl/pinctrl-tegra.c b/drivers/pinctrl/tegra/pinctrl-tegra.c
similarity index 99%
rename from drivers/pinctrl/pinctrl-tegra.c
rename to drivers/pinctrl/tegra/pinctrl-tegra.c
index 9da4da2..4938882 100644
--- a/drivers/pinctrl/pinctrl-tegra.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra.c
@@ -30,9 +30,9 @@
 #include <linux/pinctrl/pinconf.h>
 #include <linux/slab.h>
 
-#include "core.h"
+#include "../core.h"
+#include "../pinctrl-utils.h"
 #include "pinctrl-tegra.h"
-#include "pinctrl-utils.h"
 
 struct tegra_pmx {
 	struct device *dev;
diff --git a/drivers/pinctrl/pinctrl-tegra.h b/drivers/pinctrl/tegra/pinctrl-tegra.h
similarity index 100%
rename from drivers/pinctrl/pinctrl-tegra.h
rename to drivers/pinctrl/tegra/pinctrl-tegra.h
diff --git a/drivers/pinctrl/pinctrl-tegra114.c b/drivers/pinctrl/tegra/pinctrl-tegra114.c
similarity index 100%
rename from drivers/pinctrl/pinctrl-tegra114.c
rename to drivers/pinctrl/tegra/pinctrl-tegra114.c
diff --git a/drivers/pinctrl/pinctrl-tegra124.c b/drivers/pinctrl/tegra/pinctrl-tegra124.c
similarity index 100%
rename from drivers/pinctrl/pinctrl-tegra124.c
rename to drivers/pinctrl/tegra/pinctrl-tegra124.c
diff --git a/drivers/pinctrl/pinctrl-tegra20.c b/drivers/pinctrl/tegra/pinctrl-tegra20.c
similarity index 100%
rename from drivers/pinctrl/pinctrl-tegra20.c
rename to drivers/pinctrl/tegra/pinctrl-tegra20.c
diff --git a/drivers/pinctrl/pinctrl-tegra210.c b/drivers/pinctrl/tegra/pinctrl-tegra210.c
similarity index 100%
rename from drivers/pinctrl/pinctrl-tegra210.c
rename to drivers/pinctrl/tegra/pinctrl-tegra210.c
diff --git a/drivers/pinctrl/pinctrl-tegra30.c b/drivers/pinctrl/tegra/pinctrl-tegra30.c
similarity index 100%
rename from drivers/pinctrl/pinctrl-tegra30.c
rename to drivers/pinctrl/tegra/pinctrl-tegra30.c
diff --git a/drivers/pinctrl/uniphier/Kconfig b/drivers/pinctrl/uniphier/Kconfig
index 7abd614..0b40ded 100644
--- a/drivers/pinctrl/uniphier/Kconfig
+++ b/drivers/pinctrl/uniphier/Kconfig
@@ -1,6 +1,6 @@
 menuconfig PINCTRL_UNIPHIER
 	bool "UniPhier SoC pinctrl drivers"
-	depends on ARCH_UNIPHIER
+	depends on ARCH_UNIPHIER || COMPILE_TEST
 	depends on OF && MFD_SYSCON
 	default y
 	select PINMUX
@@ -8,27 +8,27 @@
 
 if PINCTRL_UNIPHIER
 
-config PINCTRL_UNIPHIER_PH1_LD4
+config PINCTRL_UNIPHIER_LD4
 	tristate "UniPhier PH1-LD4 SoC pinctrl driver"
 	default y
 
-config PINCTRL_UNIPHIER_PH1_PRO4
+config PINCTRL_UNIPHIER_PRO4
 	tristate "UniPhier PH1-Pro4 SoC pinctrl driver"
 	default y
 
-config PINCTRL_UNIPHIER_PH1_SLD8
+config PINCTRL_UNIPHIER_SLD8
 	tristate "UniPhier PH1-sLD8 SoC pinctrl driver"
 	default y
 
-config PINCTRL_UNIPHIER_PH1_PRO5
+config PINCTRL_UNIPHIER_PRO5
 	tristate "UniPhier PH1-Pro5 SoC pinctrl driver"
 	default y
 
-config PINCTRL_UNIPHIER_PROXSTREAM2
+config PINCTRL_UNIPHIER_PXS2
 	tristate "UniPhier ProXstream2 SoC pinctrl driver"
 	default y
 
-config PINCTRL_UNIPHIER_PH1_LD6B
+config PINCTRL_UNIPHIER_LD6B
 	tristate "UniPhier PH1-LD6b SoC pinctrl driver"
 	default y
 
diff --git a/drivers/pinctrl/uniphier/Makefile b/drivers/pinctrl/uniphier/Makefile
index e7ce967..3b8f9ee 100644
--- a/drivers/pinctrl/uniphier/Makefile
+++ b/drivers/pinctrl/uniphier/Makefile
@@ -1,8 +1,8 @@
-obj-y						+= pinctrl-uniphier-core.o
+obj-y					+= pinctrl-uniphier-core.o
 
-obj-$(CONFIG_PINCTRL_UNIPHIER_PH1_LD4)		+= pinctrl-ph1-ld4.o
-obj-$(CONFIG_PINCTRL_UNIPHIER_PH1_PRO4)		+= pinctrl-ph1-pro4.o
-obj-$(CONFIG_PINCTRL_UNIPHIER_PH1_SLD8)		+= pinctrl-ph1-sld8.o
-obj-$(CONFIG_PINCTRL_UNIPHIER_PH1_PRO5)		+= pinctrl-ph1-pro5.o
-obj-$(CONFIG_PINCTRL_UNIPHIER_PROXSTREAM2)	+= pinctrl-proxstream2.o
-obj-$(CONFIG_PINCTRL_UNIPHIER_PH1_LD6B)		+= pinctrl-ph1-ld6b.o
+obj-$(CONFIG_PINCTRL_UNIPHIER_LD4)	+= pinctrl-uniphier-ld4.o
+obj-$(CONFIG_PINCTRL_UNIPHIER_PRO4)	+= pinctrl-uniphier-pro4.o
+obj-$(CONFIG_PINCTRL_UNIPHIER_SLD8)	+= pinctrl-uniphier-sld8.o
+obj-$(CONFIG_PINCTRL_UNIPHIER_PRO5)	+= pinctrl-uniphier-pro5.o
+obj-$(CONFIG_PINCTRL_UNIPHIER_PXS2)	+= pinctrl-uniphier-pxs2.o
+obj-$(CONFIG_PINCTRL_UNIPHIER_LD6B)	+= pinctrl-uniphier-ld6b.o
diff --git a/drivers/pinctrl/uniphier/pinctrl-ph1-ld4.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld4.c
similarity index 100%
rename from drivers/pinctrl/uniphier/pinctrl-ph1-ld4.c
rename to drivers/pinctrl/uniphier/pinctrl-uniphier-ld4.c
diff --git a/drivers/pinctrl/uniphier/pinctrl-ph1-ld6b.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld6b.c
similarity index 100%
rename from drivers/pinctrl/uniphier/pinctrl-ph1-ld6b.c
rename to drivers/pinctrl/uniphier/pinctrl-uniphier-ld6b.c
diff --git a/drivers/pinctrl/uniphier/pinctrl-ph1-pro4.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-pro4.c
similarity index 100%
rename from drivers/pinctrl/uniphier/pinctrl-ph1-pro4.c
rename to drivers/pinctrl/uniphier/pinctrl-uniphier-pro4.c
diff --git a/drivers/pinctrl/uniphier/pinctrl-ph1-pro5.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-pro5.c
similarity index 100%
rename from drivers/pinctrl/uniphier/pinctrl-ph1-pro5.c
rename to drivers/pinctrl/uniphier/pinctrl-uniphier-pro5.c
diff --git a/drivers/pinctrl/uniphier/pinctrl-proxstream2.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-pxs2.c
similarity index 100%
rename from drivers/pinctrl/uniphier/pinctrl-proxstream2.c
rename to drivers/pinctrl/uniphier/pinctrl-uniphier-pxs2.c
diff --git a/drivers/pinctrl/uniphier/pinctrl-ph1-sld8.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-sld8.c
similarity index 100%
rename from drivers/pinctrl/uniphier/pinctrl-ph1-sld8.c
rename to drivers/pinctrl/uniphier/pinctrl-uniphier-sld8.c
diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
index da7bae9..579fd65 100644
--- a/drivers/ptp/ptp_chardev.c
+++ b/drivers/ptp/ptp_chardev.c
@@ -22,6 +22,7 @@
 #include <linux/poll.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
+#include <linux/timekeeping.h>
 
 #include "ptp_private.h"
 
@@ -120,11 +121,13 @@
 	struct ptp_clock_caps caps;
 	struct ptp_clock_request req;
 	struct ptp_sys_offset *sysoff = NULL;
+	struct ptp_sys_offset_precise precise_offset;
 	struct ptp_pin_desc pd;
 	struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
 	struct ptp_clock_info *ops = ptp->info;
 	struct ptp_clock_time *pct;
 	struct timespec64 ts;
+	struct system_device_crosststamp xtstamp;
 	int enable, err = 0;
 	unsigned int i, pin_index;
 
@@ -138,6 +141,7 @@
 		caps.n_per_out = ptp->info->n_per_out;
 		caps.pps = ptp->info->pps;
 		caps.n_pins = ptp->info->n_pins;
+		caps.cross_timestamping = ptp->info->getcrosststamp != NULL;
 		if (copy_to_user((void __user *)arg, &caps, sizeof(caps)))
 			err = -EFAULT;
 		break;
@@ -180,6 +184,29 @@
 		err = ops->enable(ops, &req, enable);
 		break;
 
+	case PTP_SYS_OFFSET_PRECISE:
+		if (!ptp->info->getcrosststamp) {
+			err = -EOPNOTSUPP;
+			break;
+		}
+		err = ptp->info->getcrosststamp(ptp->info, &xtstamp);
+		if (err)
+			break;
+
+		ts = ktime_to_timespec64(xtstamp.device);
+		precise_offset.device.sec = ts.tv_sec;
+		precise_offset.device.nsec = ts.tv_nsec;
+		ts = ktime_to_timespec64(xtstamp.sys_realtime);
+		precise_offset.sys_realtime.sec = ts.tv_sec;
+		precise_offset.sys_realtime.nsec = ts.tv_nsec;
+		ts = ktime_to_timespec64(xtstamp.sys_monoraw);
+		precise_offset.sys_monoraw.sec = ts.tv_sec;
+		precise_offset.sys_monoraw.nsec = ts.tv_nsec;
+		if (copy_to_user((void __user *)arg, &precise_offset,
+				 sizeof(precise_offset)))
+			err = -EFAULT;
+		break;
+
 	case PTP_SYS_OFFSET:
 		sysoff = kmalloc(sizeof(*sysoff), GFP_KERNEL);
 		if (!sysoff) {
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 8155e80..91040ec 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -78,6 +78,15 @@
 	  This driver controls a active-semi act8865 voltage output
 	  regulator via I2C bus.
 
+config REGULATOR_ACT8945A
+	tristate "Active-semi ACT8945A voltage regulator"
+	depends on MFD_ACT8945A
+	help
+	  This driver controls a active-semi ACT8945A voltage regulator
+	  via I2C bus. The ACT8945A features three step-down DC/DC converters
+	  and four low-dropout linear regulators, along with a ActivePath
+	  battery charger.
+
 config REGULATOR_AD5398
 	tristate "Analog Devices AD5398/AD5821 regulators"
 	depends on I2C
@@ -261,6 +270,14 @@
 	  21 general purpose LDOs, 3 dedicated LDOs, and 5 BUCKs. All
 	  of them come with support to either ECO (idle) or sleep mode.
 
+config REGULATOR_HI655X
+	tristate "Hisilicon HI655X PMIC regulators support"
+	depends on ARCH_HISI || COMPILE_TEST
+	depends on MFD_HI655X_PMIC && OF
+	help
+	  This driver provides support for the voltage regulators of the
+	  Hisilicon Hi655x PMIC device.
+
 config REGULATOR_ISL9305
 	tristate "Intersil ISL9305 regulator"
 	depends on I2C
@@ -343,6 +360,15 @@
 	  regulator via I2C bus. The provided regulator is suitable
 	  for PXA27x chips to control VCC_CORE and VCC_USIM voltages.
 
+config REGULATOR_MAX77620
+	tristate "Maxim 77620/MAX20024 voltage regulator"
+	depends on MFD_MAX77620
+	help
+	  This driver controls Maxim MAX77620 voltage output regulator
+	  via I2C bus. The provided regulator is suitable for Tegra
+	  chip to control Step-Down DC-DC and LDOs. Say Y here to
+	  enable the regulator driver.
+
 config REGULATOR_MAX8649
 	tristate "Maxim 8649 voltage regulator"
 	depends on I2C
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 980b194..61bfbb9 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -15,6 +15,7 @@
 obj-$(CONFIG_REGULATOR_AB3100) += ab3100.o
 obj-$(CONFIG_REGULATOR_AB8500)	+= ab8500-ext.o ab8500.o
 obj-$(CONFIG_REGULATOR_ACT8865) += act8865-regulator.o
+obj-$(CONFIG_REGULATOR_ACT8945A) += act8945a-regulator.o
 obj-$(CONFIG_REGULATOR_AD5398) += ad5398.o
 obj-$(CONFIG_REGULATOR_ANATOP) += anatop-regulator.o
 obj-$(CONFIG_REGULATOR_ARIZONA) += arizona-micsupp.o arizona-ldo1.o
@@ -34,6 +35,7 @@
 obj-$(CONFIG_REGULATOR_FAN53555) += fan53555.o
 obj-$(CONFIG_REGULATOR_GPIO) += gpio-regulator.o
 obj-$(CONFIG_REGULATOR_HI6421) += hi6421-regulator.o
+obj-$(CONFIG_REGULATOR_HI655X) += hi655x-regulator.o
 obj-$(CONFIG_REGULATOR_ISL6271A) += isl6271a-regulator.o
 obj-$(CONFIG_REGULATOR_ISL9305) += isl9305.o
 obj-$(CONFIG_REGULATOR_LM363X) += lm363x-regulator.o
@@ -46,6 +48,7 @@
 obj-$(CONFIG_REGULATOR_LTC3589) += ltc3589.o
 obj-$(CONFIG_REGULATOR_MAX14577) += max14577.o
 obj-$(CONFIG_REGULATOR_MAX1586) += max1586.o
+obj-$(CONFIG_REGULATOR_MAX77620) += max77620-regulator.o
 obj-$(CONFIG_REGULATOR_MAX8649)	+= max8649.o
 obj-$(CONFIG_REGULATOR_MAX8660) += max8660.o
 obj-$(CONFIG_REGULATOR_MAX8907) += max8907-regulator.o
@@ -54,9 +57,9 @@
 obj-$(CONFIG_REGULATOR_MAX8973) += max8973-regulator.o
 obj-$(CONFIG_REGULATOR_MAX8997) += max8997.o
 obj-$(CONFIG_REGULATOR_MAX8998) += max8998.o
-obj-$(CONFIG_REGULATOR_MAX77686) += max77686.o
+obj-$(CONFIG_REGULATOR_MAX77686) += max77686-regulator.o
 obj-$(CONFIG_REGULATOR_MAX77693) += max77693.o
-obj-$(CONFIG_REGULATOR_MAX77802) += max77802.o
+obj-$(CONFIG_REGULATOR_MAX77802) += max77802-regulator.o
 obj-$(CONFIG_REGULATOR_MC13783) += mc13783-regulator.o
 obj-$(CONFIG_REGULATOR_MC13892) += mc13892-regulator.o
 obj-$(CONFIG_REGULATOR_MC13XXX_CORE) +=  mc13xxx-regulator-core.o
@@ -98,7 +101,7 @@
 obj-$(CONFIG_REGULATOR_TPS65912) += tps65912-regulator.o
 obj-$(CONFIG_REGULATOR_TPS80031) += tps80031-regulator.o
 obj-$(CONFIG_REGULATOR_TWL4030) += twl-regulator.o
-obj-$(CONFIG_REGULATOR_VEXPRESS) += vexpress.o
+obj-$(CONFIG_REGULATOR_VEXPRESS) += vexpress-regulator.o
 obj-$(CONFIG_REGULATOR_WM831X) += wm831x-dcdc.o
 obj-$(CONFIG_REGULATOR_WM831X) += wm831x-isink.o
 obj-$(CONFIG_REGULATOR_WM831X) += wm831x-ldo.o
diff --git a/drivers/regulator/act8865-regulator.c b/drivers/regulator/act8865-regulator.c
index f8d4cd3..000d566 100644
--- a/drivers/regulator/act8865-regulator.c
+++ b/drivers/regulator/act8865-regulator.c
@@ -218,7 +218,7 @@
 		.ops = &act8865_ldo_ops,
 		.type = REGULATOR_VOLTAGE,
 		.n_voltages = 1,
-		.fixed_uV = 1800000,
+		.fixed_uV = 3300000,
 		.enable_reg = ACT8600_LDO910_CTRL,
 		.enable_mask = ACT8865_ENA,
 		.owner = THIS_MODULE,
@@ -369,7 +369,7 @@
 	for (i = 0; i < num_matches; i++) {
 		regulator->id = i;
 		regulator->name = matches[i].name;
-		regulator->platform_data = matches[i].init_data;
+		regulator->init_data = matches[i].init_data;
 		of_node[i] = matches[i].of_node;
 		regulator++;
 	}
@@ -396,7 +396,7 @@
 
 	for (i = 0; i < pdata->num_regulators; i++) {
 		if (pdata->regulators[i].id == id)
-			return pdata->regulators[i].platform_data;
+			return pdata->regulators[i].init_data;
 	}
 
 	return NULL;
@@ -415,7 +415,7 @@
 static int act8865_pmic_probe(struct i2c_client *client,
 			      const struct i2c_device_id *i2c_id)
 {
-	static const struct regulator_desc *regulators;
+	const struct regulator_desc *regulators;
 	struct act8865_platform_data pdata_of, *pdata;
 	struct device *dev = &client->dev;
 	struct device_node **of_node;
diff --git a/drivers/regulator/act8945a-regulator.c b/drivers/regulator/act8945a-regulator.c
new file mode 100644
index 0000000..441864b
--- /dev/null
+++ b/drivers/regulator/act8945a-regulator.c
@@ -0,0 +1,165 @@
+/*
+ * Voltage regulation driver for active-semi ACT8945A PMIC
+ *
+ * Copyright (C) 2015 Atmel Corporation
+ *
+ * Author: Wenyou Yang <wenyou.yang@atmel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+
+/**
+ * ACT8945A Global Register Map.
+ */
+#define ACT8945A_SYS_MODE	0x00
+#define ACT8945A_SYS_CTRL	0x01
+#define ACT8945A_DCDC1_VSET1	0x20
+#define ACT8945A_DCDC1_VSET2	0x21
+#define ACT8945A_DCDC1_CTRL	0x22
+#define ACT8945A_DCDC2_VSET1	0x30
+#define ACT8945A_DCDC2_VSET2	0x31
+#define ACT8945A_DCDC2_CTRL	0x32
+#define ACT8945A_DCDC3_VSET1	0x40
+#define ACT8945A_DCDC3_VSET2	0x41
+#define ACT8945A_DCDC3_CTRL	0x42
+#define ACT8945A_LDO1_VSET	0x50
+#define ACT8945A_LDO1_CTRL	0x51
+#define ACT8945A_LDO2_VSET	0x54
+#define ACT8945A_LDO2_CTRL	0x55
+#define ACT8945A_LDO3_VSET	0x60
+#define ACT8945A_LDO3_CTRL	0x61
+#define ACT8945A_LDO4_VSET	0x64
+#define ACT8945A_LDO4_CTRL	0x65
+
+/**
+ * Field Definitions.
+ */
+#define ACT8945A_ENA		0x80	/* ON - [7] */
+#define ACT8945A_VSEL_MASK	0x3F	/* VSET - [5:0] */
+
+/**
+ * ACT8945A Voltage Number
+ */
+#define ACT8945A_VOLTAGE_NUM	64
+
+enum {
+	ACT8945A_ID_DCDC1,
+	ACT8945A_ID_DCDC2,
+	ACT8945A_ID_DCDC3,
+	ACT8945A_ID_LDO1,
+	ACT8945A_ID_LDO2,
+	ACT8945A_ID_LDO3,
+	ACT8945A_ID_LDO4,
+	ACT8945A_REG_NUM,
+};
+
+static const struct regulator_linear_range act8945a_voltage_ranges[] = {
+	REGULATOR_LINEAR_RANGE(600000, 0, 23, 25000),
+	REGULATOR_LINEAR_RANGE(1200000, 24, 47, 50000),
+	REGULATOR_LINEAR_RANGE(2400000, 48, 63, 100000),
+};
+
+static struct regulator_ops act8945a_ops = {
+	.list_voltage		= regulator_list_voltage_linear_range,
+	.map_voltage		= regulator_map_voltage_linear_range,
+	.get_voltage_sel	= regulator_get_voltage_sel_regmap,
+	.set_voltage_sel	= regulator_set_voltage_sel_regmap,
+	.enable			= regulator_enable_regmap,
+	.disable		= regulator_disable_regmap,
+	.is_enabled		= regulator_is_enabled_regmap,
+};
+
+#define ACT89xx_REG(_name, _family, _id, _vsel_reg, _supply)		\
+	[_family##_ID_##_id] = {					\
+		.name			= _name,			\
+		.supply_name		= _supply,			\
+		.of_match		= of_match_ptr("REG_"#_id),	\
+		.regulators_node	= of_match_ptr("regulators"),	\
+		.id			= _family##_ID_##_id,		\
+		.type			= REGULATOR_VOLTAGE,		\
+		.ops			= &act8945a_ops,		\
+		.n_voltages		= ACT8945A_VOLTAGE_NUM,		\
+		.linear_ranges		= act8945a_voltage_ranges,	\
+		.n_linear_ranges	= ARRAY_SIZE(act8945a_voltage_ranges), \
+		.vsel_reg		= _family##_##_id##_##_vsel_reg, \
+		.vsel_mask		= ACT8945A_VSEL_MASK,		\
+		.enable_reg		= _family##_##_id##_CTRL,	\
+		.enable_mask		= ACT8945A_ENA,			\
+		.owner			= THIS_MODULE,			\
+	}
+
+static const struct regulator_desc act8945a_regulators[] = {
+	ACT89xx_REG("DCDC_REG1", ACT8945A, DCDC1, VSET1, "vp1"),
+	ACT89xx_REG("DCDC_REG2", ACT8945A, DCDC2, VSET1, "vp2"),
+	ACT89xx_REG("DCDC_REG3", ACT8945A, DCDC3, VSET1, "vp3"),
+	ACT89xx_REG("LDO_REG1", ACT8945A, LDO1, VSET, "inl45"),
+	ACT89xx_REG("LDO_REG2", ACT8945A, LDO2, VSET, "inl45"),
+	ACT89xx_REG("LDO_REG3", ACT8945A, LDO3, VSET, "inl67"),
+	ACT89xx_REG("LDO_REG4", ACT8945A, LDO4, VSET, "inl67"),
+};
+
+static const struct regulator_desc act8945a_alt_regulators[] = {
+	ACT89xx_REG("DCDC_REG1", ACT8945A, DCDC1, VSET2, "vp1"),
+	ACT89xx_REG("DCDC_REG2", ACT8945A, DCDC2, VSET2, "vp2"),
+	ACT89xx_REG("DCDC_REG3", ACT8945A, DCDC3, VSET2, "vp3"),
+	ACT89xx_REG("LDO_REG1", ACT8945A, LDO1, VSET, "inl45"),
+	ACT89xx_REG("LDO_REG2", ACT8945A, LDO2, VSET, "inl45"),
+	ACT89xx_REG("LDO_REG3", ACT8945A, LDO3, VSET, "inl67"),
+	ACT89xx_REG("LDO_REG4", ACT8945A, LDO4, VSET, "inl67"),
+};
+
+static int act8945a_pmic_probe(struct platform_device *pdev)
+{
+	struct regulator_config config = { };
+	const struct regulator_desc *regulators;
+	struct regulator_dev *rdev;
+	int i, num_regulators;
+	bool voltage_select;
+
+	voltage_select = of_property_read_bool(pdev->dev.parent->of_node,
+					       "active-semi,vsel-high");
+
+	if (voltage_select) {
+		regulators = act8945a_alt_regulators;
+		num_regulators = ARRAY_SIZE(act8945a_alt_regulators);
+	} else {
+		regulators = act8945a_regulators;
+		num_regulators = ARRAY_SIZE(act8945a_regulators);
+	}
+
+	config.dev = &pdev->dev;
+	config.dev->of_node = pdev->dev.parent->of_node;
+	for (i = 0; i < num_regulators; i++) {
+		rdev = devm_regulator_register(&pdev->dev, &regulators[i], &config);
+		if (IS_ERR(rdev)) {
+			dev_err(&pdev->dev,
+				"failed to register %s regulator\n",
+				regulators[i].name);
+			return PTR_ERR(rdev);
+		}
+	}
+
+	return 0;
+}
+
+static struct platform_driver act8945a_pmic_driver = {
+	.driver = {
+		.name = "act8945a-regulator",
+	},
+	.probe = act8945a_pmic_probe,
+};
+module_platform_driver(act8945a_pmic_driver);
+
+MODULE_DESCRIPTION("Active-semi ACT8945A voltage regulator driver");
+MODULE_AUTHOR("Wenyou Yang <wenyou.yang@atmel.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/ad5398.c b/drivers/regulator/ad5398.c
index ea50a88..8b0f788 100644
--- a/drivers/regulator/ad5398.c
+++ b/drivers/regulator/ad5398.c
@@ -58,10 +58,12 @@
 
 	val = cpu_to_be16(data);
 	ret = i2c_master_send(client, (char *)&val, 2);
-	if (ret < 0)
+	if (ret != 2) {
 		dev_err(&client->dev, "I2C write error\n");
+		return ret < 0 ? ret : -EIO;
+	}
 
-	return ret;
+	return 0;
 }
 
 static int ad5398_get_current_limit(struct regulator_dev *rdev)
diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
index f2e1a39..214e815 100644
--- a/drivers/regulator/axp20x-regulator.c
+++ b/drivers/regulator/axp20x-regulator.c
@@ -39,7 +39,7 @@
 #define AXP_DESC_IO(_family, _id, _match, _supply, _min, _max, _step, _vreg,	\
 		    _vmask, _ereg, _emask, _enable_val, _disable_val)		\
 	[_family##_##_id] = {							\
-		.name		= #_id,						\
+		.name		= (_match),					\
 		.supply_name	= (_supply),					\
 		.of_match	= of_match_ptr(_match),				\
 		.regulators_node = of_match_ptr("regulators"),			\
@@ -61,7 +61,7 @@
 #define AXP_DESC(_family, _id, _match, _supply, _min, _max, _step, _vreg,	\
 		 _vmask, _ereg, _emask) 					\
 	[_family##_##_id] = {							\
-		.name		= #_id,						\
+		.name		= (_match),					\
 		.supply_name	= (_supply),					\
 		.of_match	= of_match_ptr(_match),				\
 		.regulators_node = of_match_ptr("regulators"),			\
@@ -78,21 +78,15 @@
 		.ops		= &axp20x_ops,					\
 	}
 
-#define AXP_DESC_SW(_family, _id, _match, _supply, _min, _max, _step, _vreg,	\
-		    _vmask, _ereg, _emask) 					\
+#define AXP_DESC_SW(_family, _id, _match, _supply, _ereg, _emask)		\
 	[_family##_##_id] = {							\
-		.name		= #_id,						\
+		.name		= (_match),					\
 		.supply_name	= (_supply),					\
 		.of_match	= of_match_ptr(_match),				\
 		.regulators_node = of_match_ptr("regulators"),			\
 		.type		= REGULATOR_VOLTAGE,				\
 		.id		= _family##_##_id,				\
-		.n_voltages	= (((_max) - (_min)) / (_step) + 1),		\
 		.owner		= THIS_MODULE,					\
-		.min_uV		= (_min) * 1000,				\
-		.uV_step	= (_step) * 1000,				\
-		.vsel_reg	= (_vreg),					\
-		.vsel_mask	= (_vmask),					\
 		.enable_reg	= (_ereg),					\
 		.enable_mask	= (_emask),					\
 		.ops		= &axp20x_ops_sw,				\
@@ -100,7 +94,7 @@
 
 #define AXP_DESC_FIXED(_family, _id, _match, _supply, _volt)			\
 	[_family##_##_id] = {							\
-		.name		= #_id,						\
+		.name		= (_match),					\
 		.supply_name	= (_supply),					\
 		.of_match	= of_match_ptr(_match),				\
 		.regulators_node = of_match_ptr("regulators"),			\
@@ -112,39 +106,34 @@
 		.ops		= &axp20x_ops_fixed				\
 	}
 
-#define AXP_DESC_TABLE(_family, _id, _match, _supply, _table, _vreg, _vmask,	\
-		       _ereg, _emask)						\
+#define AXP_DESC_RANGES(_family, _id, _match, _supply, _ranges, _n_voltages,	\
+			_vreg, _vmask, _ereg, _emask)				\
 	[_family##_##_id] = {							\
-		.name		= #_id,						\
+		.name		= (_match),					\
 		.supply_name	= (_supply),					\
 		.of_match	= of_match_ptr(_match),				\
 		.regulators_node = of_match_ptr("regulators"),			\
 		.type		= REGULATOR_VOLTAGE,				\
 		.id		= _family##_##_id,				\
-		.n_voltages	= ARRAY_SIZE(_table),				\
+		.n_voltages	= (_n_voltages),				\
 		.owner		= THIS_MODULE,					\
 		.vsel_reg	= (_vreg),					\
 		.vsel_mask	= (_vmask),					\
 		.enable_reg	= (_ereg),					\
 		.enable_mask	= (_emask),					\
-		.volt_table	= (_table),					\
-		.ops		= &axp20x_ops_table,				\
+		.linear_ranges	= (_ranges),					\
+		.n_linear_ranges = ARRAY_SIZE(_ranges),				\
+		.ops		= &axp20x_ops_range,				\
 	}
 
-static const int axp20x_ldo4_data[] = { 1250000, 1300000, 1400000, 1500000, 1600000,
-					1700000, 1800000, 1900000, 2000000, 2500000,
-					2700000, 2800000, 3000000, 3100000, 3200000,
-					3300000 };
-
 static struct regulator_ops axp20x_ops_fixed = {
 	.list_voltage		= regulator_list_voltage_linear,
 };
 
-static struct regulator_ops axp20x_ops_table = {
+static struct regulator_ops axp20x_ops_range = {
 	.set_voltage_sel	= regulator_set_voltage_sel_regmap,
 	.get_voltage_sel	= regulator_get_voltage_sel_regmap,
-	.list_voltage		= regulator_list_voltage_table,
-	.map_voltage		= regulator_map_voltage_ascend,
+	.list_voltage		= regulator_list_voltage_linear_range,
 	.enable			= regulator_enable_regmap,
 	.disable		= regulator_disable_regmap,
 	.is_enabled		= regulator_is_enabled_regmap,
@@ -160,13 +149,17 @@
 };
 
 static struct regulator_ops axp20x_ops_sw = {
-	.get_voltage_sel	= regulator_get_voltage_sel_regmap,
-	.list_voltage		= regulator_list_voltage_linear,
 	.enable			= regulator_enable_regmap,
 	.disable		= regulator_disable_regmap,
 	.is_enabled		= regulator_is_enabled_regmap,
 };
 
+static const struct regulator_linear_range axp20x_ldo4_ranges[] = {
+	REGULATOR_LINEAR_RANGE(1250000, 0x0, 0x0, 0),
+	REGULATOR_LINEAR_RANGE(1300000, 0x1, 0x8, 100000),
+	REGULATOR_LINEAR_RANGE(2500000, 0x9, 0xf, 100000),
+};
+
 static const struct regulator_desc axp20x_regulators[] = {
 	AXP_DESC(AXP20X, DCDC2, "dcdc2", "vin2", 700, 2275, 25,
 		 AXP20X_DCDC2_V_OUT, 0x3f, AXP20X_PWR_OUT_CTRL, 0x10),
@@ -177,8 +170,9 @@
 		 AXP20X_LDO24_V_OUT, 0xf0, AXP20X_PWR_OUT_CTRL, 0x04),
 	AXP_DESC(AXP20X, LDO3, "ldo3", "ldo3in", 700, 3500, 25,
 		 AXP20X_LDO3_V_OUT, 0x7f, AXP20X_PWR_OUT_CTRL, 0x40),
-	AXP_DESC_TABLE(AXP20X, LDO4, "ldo4", "ldo24in", axp20x_ldo4_data,
-		       AXP20X_LDO24_V_OUT, 0x0f, AXP20X_PWR_OUT_CTRL, 0x08),
+	AXP_DESC_RANGES(AXP20X, LDO4, "ldo4", "ldo24in", axp20x_ldo4_ranges,
+			16, AXP20X_LDO24_V_OUT, 0x0f, AXP20X_PWR_OUT_CTRL,
+			0x08),
 	AXP_DESC_IO(AXP20X, LDO5, "ldo5", "ldo5in", 1800, 3300, 100,
 		    AXP20X_LDO5_V_OUT, 0xf0, AXP20X_GPIO0_CTRL, 0x07,
 		    AXP20X_IO_ENABLED, AXP20X_IO_DISABLED),
@@ -196,8 +190,8 @@
 	AXP_DESC(AXP22X, DCDC5, "dcdc5", "vin5", 1000, 2550, 50,
 		 AXP22X_DCDC5_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL1, BIT(5)),
 	/* secondary switchable output of DCDC1 */
-	AXP_DESC_SW(AXP22X, DC1SW, "dc1sw", NULL, 1600, 3400, 100,
-		    AXP22X_DCDC1_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(7)),
+	AXP_DESC_SW(AXP22X, DC1SW, "dc1sw", NULL, AXP22X_PWR_OUT_CTRL2,
+		    BIT(7)),
 	/* LDO regulator internally chained to DCDC5 */
 	AXP_DESC(AXP22X, DC5LDO, "dc5ldo", NULL, 700, 1400, 100,
 		 AXP22X_DC5LDO_V_OUT, 0x7, AXP22X_PWR_OUT_CTRL1, BIT(0)),
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 744c988..e0b7642 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -1057,18 +1057,18 @@
 
 	ret = machine_constraints_voltage(rdev, rdev->constraints);
 	if (ret != 0)
-		goto out;
+		return ret;
 
 	ret = machine_constraints_current(rdev, rdev->constraints);
 	if (ret != 0)
-		goto out;
+		return ret;
 
 	if (rdev->constraints->ilim_uA && ops->set_input_current_limit) {
 		ret = ops->set_input_current_limit(rdev,
 						   rdev->constraints->ilim_uA);
 		if (ret < 0) {
 			rdev_err(rdev, "failed to set input limit\n");
-			goto out;
+			return ret;
 		}
 	}
 
@@ -1077,21 +1077,20 @@
 		ret = suspend_prepare(rdev, rdev->constraints->initial_state);
 		if (ret < 0) {
 			rdev_err(rdev, "failed to set suspend state\n");
-			goto out;
+			return ret;
 		}
 	}
 
 	if (rdev->constraints->initial_mode) {
 		if (!ops->set_mode) {
 			rdev_err(rdev, "no set_mode operation\n");
-			ret = -EINVAL;
-			goto out;
+			return -EINVAL;
 		}
 
 		ret = ops->set_mode(rdev, rdev->constraints->initial_mode);
 		if (ret < 0) {
 			rdev_err(rdev, "failed to set initial mode: %d\n", ret);
-			goto out;
+			return ret;
 		}
 	}
 
@@ -1102,7 +1101,7 @@
 		ret = _regulator_do_enable(rdev);
 		if (ret < 0 && ret != -EINVAL) {
 			rdev_err(rdev, "failed to enable\n");
-			goto out;
+			return ret;
 		}
 	}
 
@@ -1111,7 +1110,7 @@
 		ret = ops->set_ramp_delay(rdev, rdev->constraints->ramp_delay);
 		if (ret < 0) {
 			rdev_err(rdev, "failed to set ramp_delay\n");
-			goto out;
+			return ret;
 		}
 	}
 
@@ -1119,7 +1118,7 @@
 		ret = ops->set_pull_down(rdev);
 		if (ret < 0) {
 			rdev_err(rdev, "failed to set pull down\n");
-			goto out;
+			return ret;
 		}
 	}
 
@@ -1127,7 +1126,7 @@
 		ret = ops->set_soft_start(rdev);
 		if (ret < 0) {
 			rdev_err(rdev, "failed to set soft start\n");
-			goto out;
+			return ret;
 		}
 	}
 
@@ -1136,16 +1135,34 @@
 		ret = ops->set_over_current_protection(rdev);
 		if (ret < 0) {
 			rdev_err(rdev, "failed to set over current protection\n");
-			goto out;
+			return ret;
+		}
+	}
+
+	if (rdev->constraints->active_discharge && ops->set_active_discharge) {
+		bool ad_state = (rdev->constraints->active_discharge ==
+			      REGULATOR_ACTIVE_DISCHARGE_ENABLE) ? true : false;
+
+		ret = ops->set_active_discharge(rdev, ad_state);
+		if (ret < 0) {
+			rdev_err(rdev, "failed to set active discharge\n");
+			return ret;
+		}
+	}
+
+	if (rdev->constraints->active_discharge && ops->set_active_discharge) {
+		bool ad_state = (rdev->constraints->active_discharge ==
+			      REGULATOR_ACTIVE_DISCHARGE_ENABLE) ? true : false;
+
+		ret = ops->set_active_discharge(rdev, ad_state);
+		if (ret < 0) {
+			rdev_err(rdev, "failed to set active discharge\n");
+			return ret;
 		}
 	}
 
 	print_constraints(rdev);
 	return 0;
-out:
-	kfree(rdev->constraints);
-	rdev->constraints = NULL;
-	return ret;
 }
 
 /**
@@ -3918,6 +3935,16 @@
 			goto clean;
 	}
 
+	if ((config->ena_gpio || config->ena_gpio_initialized) &&
+	    gpio_is_valid(config->ena_gpio)) {
+		ret = regulator_ena_gpio_request(rdev, config);
+		if (ret != 0) {
+			rdev_err(rdev, "Failed to request enable GPIO%d: %d\n",
+				 config->ena_gpio, ret);
+			goto clean;
+		}
+	}
+
 	/* register with sysfs */
 	rdev->dev.class = &regulator_class;
 	rdev->dev.parent = dev;
@@ -3926,21 +3953,11 @@
 	ret = device_register(&rdev->dev);
 	if (ret != 0) {
 		put_device(&rdev->dev);
-		goto clean;
+		goto wash;
 	}
 
 	dev_set_drvdata(&rdev->dev, rdev);
 
-	if ((config->ena_gpio || config->ena_gpio_initialized) &&
-	    gpio_is_valid(config->ena_gpio)) {
-		ret = regulator_ena_gpio_request(rdev, config);
-		if (ret != 0) {
-			rdev_err(rdev, "Failed to request enable GPIO%d: %d\n",
-				 config->ena_gpio, ret);
-			goto wash;
-		}
-	}
-
 	/* set regulator constraints */
 	if (init_data)
 		constraints = &init_data->constraints;
@@ -3979,13 +3996,13 @@
 
 scrub:
 	regulator_ena_gpio_free(rdev);
-	kfree(rdev->constraints);
-wash:
 	device_unregister(&rdev->dev);
 	/* device core frees rdev */
 	rdev = ERR_PTR(ret);
 	goto out;
 
+wash:
+	regulator_ena_gpio_free(rdev);
 clean:
 	kfree(rdev);
 	rdev = ERR_PTR(ret);
diff --git a/drivers/regulator/da9210-regulator.c b/drivers/regulator/da9210-regulator.c
index 8b3cc9f..01c0e37 100644
--- a/drivers/regulator/da9210-regulator.c
+++ b/drivers/regulator/da9210-regulator.c
@@ -132,6 +132,8 @@
 	if (error < 0)
 		goto error_i2c;
 
+	mutex_lock(&chip->rdev->mutex);
+
 	if (val & DA9210_E_OVCURR) {
 		regulator_notifier_call_chain(chip->rdev,
 					      REGULATOR_EVENT_OVER_CURRENT,
@@ -155,6 +157,9 @@
 					      NULL);
 		handled |= DA9210_E_VMAX;
 	}
+
+	mutex_unlock(&chip->rdev->mutex);
+
 	if (handled) {
 		/* Clear handled events */
 		error = regmap_write(chip->regmap, DA9210_REG_EVENT_B, handled);
diff --git a/drivers/regulator/fan53555.c b/drivers/regulator/fan53555.c
index 4940e82..2cb5cc3 100644
--- a/drivers/regulator/fan53555.c
+++ b/drivers/regulator/fan53555.c
@@ -114,6 +114,22 @@
 	return 0;
 }
 
+static int fan53555_set_suspend_enable(struct regulator_dev *rdev)
+{
+	struct fan53555_device_info *di = rdev_get_drvdata(rdev);
+
+	return regmap_update_bits(di->regmap, di->sleep_reg,
+				  VSEL_BUCK_EN, VSEL_BUCK_EN);
+}
+
+static int fan53555_set_suspend_disable(struct regulator_dev *rdev)
+{
+	struct fan53555_device_info *di = rdev_get_drvdata(rdev);
+
+	return regmap_update_bits(di->regmap, di->sleep_reg,
+				  VSEL_BUCK_EN, 0);
+}
+
 static int fan53555_set_mode(struct regulator_dev *rdev, unsigned int mode)
 {
 	struct fan53555_device_info *di = rdev_get_drvdata(rdev);
@@ -192,6 +208,8 @@
 	.set_mode = fan53555_set_mode,
 	.get_mode = fan53555_get_mode,
 	.set_ramp_delay = fan53555_set_ramp,
+	.set_suspend_enable = fan53555_set_suspend_enable,
+	.set_suspend_disable = fan53555_set_suspend_disable,
 };
 
 static int fan53555_voltages_setup_fairchild(struct fan53555_device_info *di)
diff --git a/drivers/regulator/gpio-regulator.c b/drivers/regulator/gpio-regulator.c
index 7bba8b7..a8718e9 100644
--- a/drivers/regulator/gpio-regulator.c
+++ b/drivers/regulator/gpio-regulator.c
@@ -283,8 +283,10 @@
 		drvdata->nr_gpios = config->nr_gpios;
 		ret = gpio_request_array(drvdata->gpios, drvdata->nr_gpios);
 		if (ret) {
-			dev_err(&pdev->dev,
-			"Could not obtain regulator setting GPIOs: %d\n", ret);
+			if (ret != -EPROBE_DEFER)
+				dev_err(&pdev->dev,
+					"Could not obtain regulator setting GPIOs: %d\n",
+					ret);
 			goto err_memstate;
 		}
 	}
diff --git a/drivers/regulator/helpers.c b/drivers/regulator/helpers.c
index 3bbb326..b1e32e7 100644
--- a/drivers/regulator/helpers.c
+++ b/drivers/regulator/helpers.c
@@ -465,3 +465,26 @@
 	return 0;
 }
 EXPORT_SYMBOL_GPL(regulator_get_bypass_regmap);
+
+/**
+ * regulator_set_active_discharge_regmap - Default set_active_discharge()
+ *					   using regmap
+ *
+ * @rdev: device to operate on.
+ * @enable: state to set, 0 to disable and 1 to enable.
+ */
+int regulator_set_active_discharge_regmap(struct regulator_dev *rdev,
+					  bool enable)
+{
+	unsigned int val;
+
+	if (enable)
+		val = rdev->desc->active_discharge_on;
+	else
+		val = rdev->desc->active_discharge_off;
+
+	return regmap_update_bits(rdev->regmap,
+				  rdev->desc->active_discharge_reg,
+				  rdev->desc->active_discharge_mask, val);
+}
+EXPORT_SYMBOL_GPL(regulator_set_active_discharge_regmap);
diff --git a/drivers/regulator/hi655x-regulator.c b/drivers/regulator/hi655x-regulator.c
new file mode 100644
index 0000000..aca1846
--- /dev/null
+++ b/drivers/regulator/hi655x-regulator.c
@@ -0,0 +1,227 @@
+/*
+ * Device driver for regulators in Hi655x IC
+ *
+ * Copyright (c) 2016 Hisilicon.
+ *
+ * Authors:
+ * Chen Feng <puck.chen@hisilicon.com>
+ * Fei  Wang <w.f@huawei.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/mfd/hi655x-pmic.h>
+
+struct hi655x_regulator {
+	unsigned int disable_reg;
+	unsigned int status_reg;
+	unsigned int ctrl_regs;
+	unsigned int ctrl_mask;
+	struct regulator_desc rdesc;
+};
+
+/* LDO7 & LDO10 */
+static const unsigned int ldo7_voltages[] = {
+	1800000, 1850000, 2850000, 2900000,
+	3000000, 3100000, 3200000, 3300000,
+};
+
+static const unsigned int ldo19_voltages[] = {
+	1800000, 1850000, 1900000, 1750000,
+	2800000, 2850000, 2900000, 3000000,
+};
+
+static const unsigned int ldo22_voltages[] = {
+	 900000, 1000000, 1050000, 1100000,
+	1150000, 1175000, 1185000, 1200000,
+};
+
+enum hi655x_regulator_id {
+	HI655X_LDO0,
+	HI655X_LDO1,
+	HI655X_LDO2,
+	HI655X_LDO3,
+	HI655X_LDO4,
+	HI655X_LDO5,
+	HI655X_LDO6,
+	HI655X_LDO7,
+	HI655X_LDO8,
+	HI655X_LDO9,
+	HI655X_LDO10,
+	HI655X_LDO11,
+	HI655X_LDO12,
+	HI655X_LDO13,
+	HI655X_LDO14,
+	HI655X_LDO15,
+	HI655X_LDO16,
+	HI655X_LDO17,
+	HI655X_LDO18,
+	HI655X_LDO19,
+	HI655X_LDO20,
+	HI655X_LDO21,
+	HI655X_LDO22,
+};
+
+static int hi655x_is_enabled(struct regulator_dev *rdev)
+{
+	unsigned int value = 0;
+
+	struct hi655x_regulator *regulator = rdev_get_drvdata(rdev);
+
+	regmap_read(rdev->regmap, regulator->status_reg, &value);
+	return (value & BIT(regulator->ctrl_mask));
+}
+
+static int hi655x_disable(struct regulator_dev *rdev)
+{
+	int ret = 0;
+
+	struct hi655x_regulator *regulator = rdev_get_drvdata(rdev);
+
+	ret = regmap_write(rdev->regmap, regulator->disable_reg,
+			   BIT(regulator->ctrl_mask));
+	return ret;
+}
+
+static struct regulator_ops hi655x_regulator_ops = {
+	.enable = regulator_enable_regmap,
+	.disable = hi655x_disable,
+	.is_enabled = hi655x_is_enabled,
+	.list_voltage = regulator_list_voltage_table,
+	.get_voltage_sel = regulator_get_voltage_sel_regmap,
+	.set_voltage_sel = regulator_set_voltage_sel_regmap,
+};
+
+static struct regulator_ops hi655x_ldo_linear_ops = {
+	.enable = regulator_enable_regmap,
+	.disable = hi655x_disable,
+	.is_enabled = hi655x_is_enabled,
+	.list_voltage = regulator_list_voltage_linear,
+	.get_voltage_sel = regulator_get_voltage_sel_regmap,
+	.set_voltage_sel = regulator_set_voltage_sel_regmap,
+};
+
+#define HI655X_LDO(_ID, vreg, vmask, ereg, dreg,                 \
+		   sreg, cmask, vtable) {                        \
+	.rdesc = {                                               \
+		.name            = #_ID,                         \
+		.of_match        = of_match_ptr(#_ID),           \
+		.ops             = &hi655x_regulator_ops,        \
+		.regulators_node = of_match_ptr("regulators"),   \
+		.type            = REGULATOR_VOLTAGE,            \
+		.id              = HI655X_##_ID,                 \
+		.owner           = THIS_MODULE,                  \
+		.n_voltages      = ARRAY_SIZE(vtable),           \
+		.volt_table      = vtable,                       \
+		.vsel_reg        = HI655X_BUS_ADDR(vreg),        \
+		.vsel_mask       = vmask,                        \
+		.enable_reg      = HI655X_BUS_ADDR(ereg),        \
+		.enable_mask     = BIT(cmask),                   \
+	},                                                       \
+	.disable_reg = HI655X_BUS_ADDR(dreg),                    \
+	.status_reg = HI655X_BUS_ADDR(sreg),                     \
+	.ctrl_mask = cmask,                                      \
+}
+
+#define HI655X_LDO_LINEAR(_ID, vreg, vmask, ereg, dreg,          \
+			  sreg, cmask, minv, nvolt, vstep) {     \
+	.rdesc = {                                               \
+		.name            = #_ID,                         \
+		.of_match        = of_match_ptr(#_ID),           \
+		.ops             = &hi655x_ldo_linear_ops,       \
+		.regulators_node = of_match_ptr("regulators"),   \
+		.type            = REGULATOR_VOLTAGE,            \
+		.id              = HI655X_##_ID,                 \
+		.owner           = THIS_MODULE,                  \
+		.min_uV          = minv,                         \
+		.n_voltages      = nvolt,                        \
+		.uV_step         = vstep,                        \
+		.vsel_reg        = HI655X_BUS_ADDR(vreg),        \
+		.vsel_mask       = vmask,                        \
+		.enable_reg      = HI655X_BUS_ADDR(ereg),        \
+		.enable_mask     = BIT(cmask),                   \
+	},                                                       \
+	.disable_reg = HI655X_BUS_ADDR(dreg),                    \
+	.status_reg = HI655X_BUS_ADDR(sreg),                     \
+	.ctrl_mask = cmask,                                      \
+}
+
+static struct hi655x_regulator regulators[] = {
+	HI655X_LDO_LINEAR(LDO2, 0x72, 0x07, 0x29, 0x2a, 0x2b, 0x01,
+			  2500000, 8, 100000),
+	HI655X_LDO(LDO7, 0x78, 0x07, 0x29, 0x2a, 0x2b, 0x06, ldo7_voltages),
+	HI655X_LDO(LDO10, 0x78, 0x07, 0x29, 0x2a, 0x2b, 0x01, ldo7_voltages),
+	HI655X_LDO_LINEAR(LDO13, 0x7e, 0x07, 0x2c, 0x2d, 0x2e, 0x04,
+			  1600000, 8, 50000),
+	HI655X_LDO_LINEAR(LDO14, 0x7f, 0x07, 0x2c, 0x2d, 0x2e, 0x05,
+			  2500000, 8, 100000),
+	HI655X_LDO_LINEAR(LDO15, 0x80, 0x07, 0x2c, 0x2d, 0x2e, 0x06,
+			  1600000, 8, 50000),
+	HI655X_LDO_LINEAR(LDO17, 0x82, 0x07, 0x2f, 0x30, 0x31, 0x00,
+			  2500000, 8, 100000),
+	HI655X_LDO(LDO19, 0x84, 0x07, 0x2f, 0x30, 0x31, 0x02, ldo19_voltages),
+	HI655X_LDO_LINEAR(LDO21, 0x86, 0x07, 0x2f, 0x30, 0x31, 0x04,
+			  1650000, 8, 50000),
+	HI655X_LDO(LDO22, 0x87, 0x07, 0x2f, 0x30, 0x31, 0x05, ldo22_voltages),
+};
+
+static int hi655x_regulator_probe(struct platform_device *pdev)
+{
+	unsigned int i;
+	struct hi655x_regulator *regulator;
+	struct hi655x_pmic *pmic;
+	struct regulator_config config = { };
+	struct regulator_dev *rdev;
+
+	pmic = dev_get_drvdata(pdev->dev.parent);
+	if (!pmic) {
+		dev_err(&pdev->dev, "no pmic in the regulator parent node\n");
+		return -ENODEV;
+	}
+
+	regulator = devm_kzalloc(&pdev->dev, sizeof(*regulator), GFP_KERNEL);
+	if (!regulator)
+		return -ENOMEM;
+
+	platform_set_drvdata(pdev, regulator);
+
+	config.dev = pdev->dev.parent;
+	config.regmap = pmic->regmap;
+	config.driver_data = regulator;
+	for (i = 0; i < ARRAY_SIZE(regulators); i++) {
+		rdev = devm_regulator_register(&pdev->dev,
+					       &regulators[i].rdesc,
+					       &config);
+		if (IS_ERR(rdev)) {
+			dev_err(&pdev->dev, "failed to register regulator %s\n",
+				regulator->rdesc.name);
+			return PTR_ERR(rdev);
+		}
+	}
+	return 0;
+}
+
+static struct platform_driver hi655x_regulator_driver = {
+	.driver = {
+		.name	= "hi655x-regulator",
+	},
+	.probe	= hi655x_regulator_probe,
+};
+module_platform_driver(hi655x_regulator_driver);
+
+MODULE_AUTHOR("Chen Feng <puck.chen@hisilicon.com>");
+MODULE_DESCRIPTION("Hisilicon Hi655x regulator driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/lp872x.c b/drivers/regulator/lp872x.c
index 19d7584..3899211 100644
--- a/drivers/regulator/lp872x.c
+++ b/drivers/regulator/lp872x.c
@@ -15,6 +15,7 @@
 #include <linux/regmap.h>
 #include <linux/err.h>
 #include <linux/gpio.h>
+#include <linux/delay.h>
 #include <linux/regulator/lp872x.h>
 #include <linux/regulator/driver.h>
 #include <linux/platform_device.h>
@@ -738,10 +739,8 @@
 		goto set_default_dvs_mode;
 
 	gpio = dvs->gpio;
-	if (!gpio_is_valid(gpio)) {
-		dev_warn(lp->dev, "invalid gpio: %d\n", gpio);
+	if (!gpio_is_valid(gpio))
 		goto set_default_dvs_mode;
-	}
 
 	pinstate = dvs->init_state;
 	ret = devm_gpio_request_one(lp->dev, gpio, pinstate, "LP872X DVS");
@@ -759,6 +758,33 @@
 				default_dvs_mode[lp->chipid]);
 }
 
+static int lp872x_hw_enable(struct lp872x *lp)
+{
+	int ret, gpio;
+
+	if (!lp->pdata)
+		return -EINVAL;
+
+	gpio = lp->pdata->enable_gpio;
+	if (!gpio_is_valid(gpio))
+		return 0;
+
+	/* Always set enable GPIO high. */
+	ret = devm_gpio_request_one(lp->dev, gpio, GPIOF_OUT_INIT_HIGH, "LP872X EN");
+	if (ret) {
+		dev_err(lp->dev, "gpio request err: %d\n", ret);
+		return ret;
+	}
+
+	/* Each chip has a different enable delay. */
+	if (lp->chipid == LP8720)
+		usleep_range(LP8720_ENABLE_DELAY, 1.5 * LP8720_ENABLE_DELAY);
+	else
+		usleep_range(LP8725_ENABLE_DELAY, 1.5 * LP8725_ENABLE_DELAY);
+
+	return 0;
+}
+
 static int lp872x_config(struct lp872x *lp)
 {
 	struct lp872x_platform_data *pdata = lp->pdata;
@@ -877,6 +903,8 @@
 	of_property_read_u8(np, "ti,dvs-state", &dvs_state);
 	pdata->dvs->init_state = dvs_state ? DVS_HIGH : DVS_LOW;
 
+	pdata->enable_gpio = of_get_named_gpio(np, "enable-gpios", 0);
+
 	if (of_get_child_count(np) == 0)
 		goto out;
 
@@ -950,6 +978,10 @@
 	lp->chipid = id->driver_data;
 	i2c_set_clientdata(cl, lp);
 
+	ret = lp872x_hw_enable(lp);
+	if (ret)
+		return ret;
+
 	ret = lp872x_config(lp);
 	if (ret)
 		return ret;
diff --git a/drivers/regulator/ltc3589.c b/drivers/regulator/ltc3589.c
index 972c386..47bef32 100644
--- a/drivers/regulator/ltc3589.c
+++ b/drivers/regulator/ltc3589.c
@@ -520,12 +520,15 @@
 		}
 	}
 
-	ret = devm_request_threaded_irq(dev, client->irq, NULL, ltc3589_isr,
-					IRQF_TRIGGER_LOW | IRQF_ONESHOT,
-					client->name, ltc3589);
-	if (ret) {
-		dev_err(dev, "Failed to request IRQ: %d\n", ret);
-		return ret;
+	if (client->irq) {
+		ret = devm_request_threaded_irq(dev, client->irq, NULL,
+						ltc3589_isr,
+						IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+						client->name, ltc3589);
+		if (ret) {
+			dev_err(dev, "Failed to request IRQ: %d\n", ret);
+			return ret;
+		}
 	}
 
 	return 0;
diff --git a/drivers/regulator/max77620-regulator.c b/drivers/regulator/max77620-regulator.c
new file mode 100644
index 0000000..73a3356
--- /dev/null
+++ b/drivers/regulator/max77620-regulator.c
@@ -0,0 +1,813 @@
+/*
+ * Maxim MAX77620 Regulator driver
+ *
+ * Copyright (c) 2016, NVIDIA CORPORATION.  All rights reserved.
+ *
+ * Author: Mallikarjun Kasoju <mkasoju@nvidia.com>
+ *	Laxman Dewangan <ldewangan@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/mfd/max77620.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+
+#define max77620_rails(_name)	"max77620-"#_name
+
+/* Power Mode */
+#define MAX77620_POWER_MODE_NORMAL		3
+#define MAX77620_POWER_MODE_LPM			2
+#define MAX77620_POWER_MODE_GLPM		1
+#define MAX77620_POWER_MODE_DISABLE		0
+
+/* SD Slew Rate */
+#define MAX77620_SD_SR_13_75			0
+#define MAX77620_SD_SR_27_5			1
+#define MAX77620_SD_SR_55			2
+#define MAX77620_SD_SR_100			3
+
+enum max77620_regulators {
+	MAX77620_REGULATOR_ID_SD0,
+	MAX77620_REGULATOR_ID_SD1,
+	MAX77620_REGULATOR_ID_SD2,
+	MAX77620_REGULATOR_ID_SD3,
+	MAX77620_REGULATOR_ID_SD4,
+	MAX77620_REGULATOR_ID_LDO0,
+	MAX77620_REGULATOR_ID_LDO1,
+	MAX77620_REGULATOR_ID_LDO2,
+	MAX77620_REGULATOR_ID_LDO3,
+	MAX77620_REGULATOR_ID_LDO4,
+	MAX77620_REGULATOR_ID_LDO5,
+	MAX77620_REGULATOR_ID_LDO6,
+	MAX77620_REGULATOR_ID_LDO7,
+	MAX77620_REGULATOR_ID_LDO8,
+	MAX77620_NUM_REGS,
+};
+
+/* Regulator types */
+enum max77620_regulator_type {
+	MAX77620_REGULATOR_TYPE_SD,
+	MAX77620_REGULATOR_TYPE_LDO_N,
+	MAX77620_REGULATOR_TYPE_LDO_P,
+};
+
+struct max77620_regulator_info {
+	u8 type;
+	u8 fps_addr;
+	u8 volt_addr;
+	u8 cfg_addr;
+	u8 power_mode_mask;
+	u8 power_mode_shift;
+	u8 remote_sense_addr;
+	u8 remote_sense_mask;
+	struct regulator_desc desc;
+};
+
+struct max77620_regulator_pdata {
+	struct regulator_init_data *reg_idata;
+	int active_fps_src;
+	int active_fps_pd_slot;
+	int active_fps_pu_slot;
+	int suspend_fps_src;
+	int suspend_fps_pd_slot;
+	int suspend_fps_pu_slot;
+	int current_mode;
+};
+
+struct max77620_regulator {
+	struct device *dev;
+	struct regmap *rmap;
+	struct max77620_regulator_info *rinfo[MAX77620_NUM_REGS];
+	struct max77620_regulator_pdata reg_pdata[MAX77620_NUM_REGS];
+	int enable_power_mode[MAX77620_NUM_REGS];
+	int current_power_mode[MAX77620_NUM_REGS];
+	int active_fps_src[MAX77620_NUM_REGS];
+};
+
+#define fps_src_name(fps_src)	\
+	(fps_src == MAX77620_FPS_SRC_0 ? "FPS_SRC_0" :	\
+	fps_src == MAX77620_FPS_SRC_1 ? "FPS_SRC_1" :	\
+	fps_src == MAX77620_FPS_SRC_2 ? "FPS_SRC_2" : "FPS_SRC_NONE")
+
+static int max77620_regulator_get_fps_src(struct max77620_regulator *pmic,
+					  int id)
+{
+	struct max77620_regulator_info *rinfo = pmic->rinfo[id];
+	unsigned int val;
+	int ret;
+
+	ret = regmap_read(pmic->rmap, rinfo->fps_addr, &val);
+	if (ret < 0) {
+		dev_err(pmic->dev, "Reg 0x%02x read failed %d\n",
+			rinfo->fps_addr, ret);
+		return ret;
+	}
+
+	return (val & MAX77620_FPS_SRC_MASK) >> MAX77620_FPS_SRC_SHIFT;
+}
+
+static int max77620_regulator_set_fps_src(struct max77620_regulator *pmic,
+					  int fps_src, int id)
+{
+	struct max77620_regulator_info *rinfo = pmic->rinfo[id];
+	unsigned int val;
+	int ret;
+
+	switch (fps_src) {
+	case MAX77620_FPS_SRC_0:
+	case MAX77620_FPS_SRC_1:
+	case MAX77620_FPS_SRC_2:
+	case MAX77620_FPS_SRC_NONE:
+		break;
+
+	case MAX77620_FPS_SRC_DEF:
+		ret = regmap_read(pmic->rmap, rinfo->fps_addr, &val);
+		if (ret < 0) {
+			dev_err(pmic->dev, "Reg 0x%02x read failed %d\n",
+				rinfo->fps_addr, ret);
+			return ret;
+		}
+		ret = (val & MAX77620_FPS_SRC_MASK) >> MAX77620_FPS_SRC_SHIFT;
+		pmic->active_fps_src[id] = ret;
+		return 0;
+
+	default:
+		dev_err(pmic->dev, "Invalid FPS %d for regulator %d\n",
+			fps_src, id);
+		return -EINVAL;
+	}
+
+	ret = regmap_update_bits(pmic->rmap, rinfo->fps_addr,
+				 MAX77620_FPS_SRC_MASK,
+				 fps_src << MAX77620_FPS_SRC_SHIFT);
+	if (ret < 0) {
+		dev_err(pmic->dev, "Reg 0x%02x update failed %d\n",
+			rinfo->fps_addr, ret);
+		return ret;
+	}
+	pmic->active_fps_src[id] = fps_src;
+
+	return 0;
+}
+
+static int max77620_regulator_set_fps_slots(struct max77620_regulator *pmic,
+					    int id, bool is_suspend)
+{
+	struct max77620_regulator_pdata *rpdata = &pmic->reg_pdata[id];
+	struct max77620_regulator_info *rinfo = pmic->rinfo[id];
+	unsigned int val = 0;
+	unsigned int mask = 0;
+	int pu = rpdata->active_fps_pu_slot;
+	int pd = rpdata->active_fps_pd_slot;
+	int ret = 0;
+
+	if (is_suspend) {
+		pu = rpdata->suspend_fps_pu_slot;
+		pd = rpdata->suspend_fps_pd_slot;
+	}
+
+	/* FPS power up period setting */
+	if (pu >= 0) {
+		val |= (pu << MAX77620_FPS_PU_PERIOD_SHIFT);
+		mask |= MAX77620_FPS_PU_PERIOD_MASK;
+	}
+
+	/* FPS power down period setting */
+	if (pd >= 0) {
+		val |= (pd << MAX77620_FPS_PD_PERIOD_SHIFT);
+		mask |= MAX77620_FPS_PD_PERIOD_MASK;
+	}
+
+	if (mask) {
+		ret = regmap_update_bits(pmic->rmap, rinfo->fps_addr,
+					 mask, val);
+		if (ret < 0) {
+			dev_err(pmic->dev, "Reg 0x%02x update failed: %d\n",
+				rinfo->fps_addr, ret);
+			return ret;
+		}
+	}
+
+	return ret;
+}
+
+static int max77620_regulator_set_power_mode(struct max77620_regulator *pmic,
+					     int power_mode, int id)
+{
+	struct max77620_regulator_info *rinfo = pmic->rinfo[id];
+	u8 mask = rinfo->power_mode_mask;
+	u8 shift = rinfo->power_mode_shift;
+	u8 addr;
+	int ret;
+
+	switch (rinfo->type) {
+	case MAX77620_REGULATOR_TYPE_SD:
+		addr = rinfo->cfg_addr;
+		break;
+	default:
+		addr = rinfo->volt_addr;
+		break;
+	}
+
+	ret = regmap_update_bits(pmic->rmap, addr, mask, power_mode << shift);
+	if (ret < 0) {
+		dev_err(pmic->dev, "Regulator %d mode set failed: %d\n",
+			id, ret);
+		return ret;
+	}
+	pmic->current_power_mode[id] = power_mode;
+
+	return ret;
+}
+
+static int max77620_regulator_get_power_mode(struct max77620_regulator *pmic,
+					     int id)
+{
+	struct max77620_regulator_info *rinfo = pmic->rinfo[id];
+	unsigned int val, addr;
+	u8 mask = rinfo->power_mode_mask;
+	u8 shift = rinfo->power_mode_shift;
+	int ret;
+
+	switch (rinfo->type) {
+	case MAX77620_REGULATOR_TYPE_SD:
+		addr = rinfo->cfg_addr;
+		break;
+	default:
+		addr = rinfo->volt_addr;
+		break;
+	}
+
+	ret = regmap_read(pmic->rmap, addr, &val);
+	if (ret < 0) {
+		dev_err(pmic->dev, "Regulator %d: Reg 0x%02x read failed: %d\n",
+			id, addr, ret);
+		return ret;
+	}
+
+	return (val & mask) >> shift;
+}
+
+static int max77620_read_slew_rate(struct max77620_regulator *pmic, int id)
+{
+	struct max77620_regulator_info *rinfo = pmic->rinfo[id];
+	unsigned int rval;
+	int slew_rate;
+	int ret;
+
+	ret = regmap_read(pmic->rmap, rinfo->cfg_addr, &rval);
+	if (ret < 0) {
+		dev_err(pmic->dev, "Register 0x%02x read failed: %d\n",
+			rinfo->cfg_addr, ret);
+		return ret;
+	}
+
+	switch (rinfo->type) {
+	case MAX77620_REGULATOR_TYPE_SD:
+		slew_rate = (rval >> MAX77620_SD_SR_SHIFT) & 0x3;
+		switch (slew_rate) {
+		case 0:
+			slew_rate = 13750;
+			break;
+		case 1:
+			slew_rate = 27500;
+			break;
+		case 2:
+			slew_rate = 55000;
+			break;
+		case 3:
+			slew_rate = 100000;
+			break;
+		}
+		rinfo->desc.ramp_delay = slew_rate;
+		break;
+	default:
+		slew_rate = rval & 0x1;
+		switch (slew_rate) {
+		case 0:
+			slew_rate = 100000;
+			break;
+		case 1:
+			slew_rate = 5000;
+			break;
+		}
+		rinfo->desc.ramp_delay = slew_rate;
+		break;
+	}
+
+	return 0;
+}
+
+static int max77620_init_pmic(struct max77620_regulator *pmic, int id)
+{
+	struct max77620_regulator_pdata *rpdata = &pmic->reg_pdata[id];
+	int ret;
+
+	/* Update power mode */
+	ret = max77620_regulator_get_power_mode(pmic, id);
+	if (ret < 0)
+		return ret;
+
+	pmic->current_power_mode[id] = ret;
+	pmic->enable_power_mode[id] = MAX77620_POWER_MODE_NORMAL;
+
+	if (rpdata->active_fps_src == MAX77620_FPS_SRC_DEF) {
+		ret = max77620_regulator_get_fps_src(pmic, id);
+		if (ret < 0)
+			return ret;
+		rpdata->active_fps_src = ret;
+	}
+
+	 /* If rails are externally control of FPS then enable it always. */
+	if (rpdata->active_fps_src == MAX77620_FPS_SRC_NONE) {
+		ret = max77620_regulator_set_power_mode(pmic,
+					pmic->enable_power_mode[id], id);
+		if (ret < 0)
+			return ret;
+	} else {
+		if (pmic->current_power_mode[id] !=
+		     pmic->enable_power_mode[id]) {
+			ret = max77620_regulator_set_power_mode(pmic,
+					pmic->enable_power_mode[id], id);
+			if (ret < 0)
+				return ret;
+		}
+	}
+
+	ret = max77620_regulator_set_fps_src(pmic, rpdata->active_fps_src, id);
+	if (ret < 0)
+		return ret;
+
+	ret = max77620_regulator_set_fps_slots(pmic, id, false);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+static int max77620_regulator_enable(struct regulator_dev *rdev)
+{
+	struct max77620_regulator *pmic = rdev_get_drvdata(rdev);
+	int id = rdev_get_id(rdev);
+
+	if (pmic->active_fps_src[id] != MAX77620_FPS_SRC_NONE)
+		return 0;
+
+	return max77620_regulator_set_power_mode(pmic,
+			pmic->enable_power_mode[id], id);
+}
+
+static int max77620_regulator_disable(struct regulator_dev *rdev)
+{
+	struct max77620_regulator *pmic = rdev_get_drvdata(rdev);
+	int id = rdev_get_id(rdev);
+
+	if (pmic->active_fps_src[id] != MAX77620_FPS_SRC_NONE)
+		return 0;
+
+	return max77620_regulator_set_power_mode(pmic,
+			MAX77620_POWER_MODE_DISABLE, id);
+}
+
+static int max77620_regulator_is_enabled(struct regulator_dev *rdev)
+{
+	struct max77620_regulator *pmic = rdev_get_drvdata(rdev);
+	int id = rdev_get_id(rdev);
+	int ret = 1;
+
+	if (pmic->active_fps_src[id] != MAX77620_FPS_SRC_NONE)
+		return 1;
+
+	ret = max77620_regulator_get_power_mode(pmic, id);
+	if (ret < 0)
+		return ret;
+
+	if (ret != MAX77620_POWER_MODE_DISABLE)
+		return 1;
+
+	return 0;
+}
+
+static int max77620_regulator_set_mode(struct regulator_dev *rdev,
+				       unsigned int mode)
+{
+	struct max77620_regulator *pmic = rdev_get_drvdata(rdev);
+	int id = rdev_get_id(rdev);
+	struct max77620_regulator_info *rinfo = pmic->rinfo[id];
+	struct max77620_regulator_pdata *rpdata = &pmic->reg_pdata[id];
+	bool fpwm = false;
+	int power_mode;
+	int ret;
+	u8 val;
+
+	switch (mode) {
+	case REGULATOR_MODE_FAST:
+		fpwm = true;
+		power_mode = MAX77620_POWER_MODE_NORMAL;
+		break;
+
+	case REGULATOR_MODE_NORMAL:
+		power_mode = MAX77620_POWER_MODE_NORMAL;
+		break;
+
+	case REGULATOR_MODE_IDLE:
+		power_mode = MAX77620_POWER_MODE_LPM;
+		break;
+
+	default:
+		dev_err(pmic->dev, "Regulator %d mode %d is invalid\n",
+			id, mode);
+		return -EINVAL;
+	}
+
+	if (rinfo->type != MAX77620_REGULATOR_TYPE_SD)
+		goto skip_fpwm;
+
+	val = (fpwm) ? MAX77620_SD_FPWM_MASK : 0;
+	ret = regmap_update_bits(pmic->rmap, rinfo->cfg_addr,
+				 MAX77620_SD_FPWM_MASK, val);
+	if (ret < 0) {
+		dev_err(pmic->dev, "Reg 0x%02x update failed: %d\n",
+			rinfo->cfg_addr, ret);
+		return ret;
+	}
+	rpdata->current_mode = mode;
+
+skip_fpwm:
+	ret = max77620_regulator_set_power_mode(pmic, power_mode, id);
+	if (ret < 0)
+		return ret;
+
+	pmic->enable_power_mode[id] = power_mode;
+
+	return 0;
+}
+
+static unsigned int max77620_regulator_get_mode(struct regulator_dev *rdev)
+{
+	struct max77620_regulator *pmic = rdev_get_drvdata(rdev);
+	int id = rdev_get_id(rdev);
+	struct max77620_regulator_info *rinfo = pmic->rinfo[id];
+	int fpwm = 0;
+	int ret;
+	int pm_mode, reg_mode;
+	unsigned int val;
+
+	ret = max77620_regulator_get_power_mode(pmic, id);
+	if (ret < 0)
+		return 0;
+
+	pm_mode = ret;
+
+	if (rinfo->type == MAX77620_REGULATOR_TYPE_SD) {
+		ret = regmap_read(pmic->rmap, rinfo->cfg_addr, &val);
+		if (ret < 0) {
+			dev_err(pmic->dev, "Reg 0x%02x read failed: %d\n",
+				rinfo->cfg_addr, ret);
+			return ret;
+		}
+		fpwm = !!(val & MAX77620_SD_FPWM_MASK);
+	}
+
+	switch (pm_mode) {
+	case MAX77620_POWER_MODE_NORMAL:
+	case MAX77620_POWER_MODE_DISABLE:
+		if (fpwm)
+			reg_mode = REGULATOR_MODE_FAST;
+		else
+			reg_mode = REGULATOR_MODE_NORMAL;
+		break;
+	case MAX77620_POWER_MODE_LPM:
+	case MAX77620_POWER_MODE_GLPM:
+		reg_mode = REGULATOR_MODE_IDLE;
+		break;
+	default:
+		return 0;
+	}
+
+	return reg_mode;
+}
+
+static int max77620_regulator_set_ramp_delay(struct regulator_dev *rdev,
+					     int ramp_delay)
+{
+	struct max77620_regulator *pmic = rdev_get_drvdata(rdev);
+	int id = rdev_get_id(rdev);
+	struct max77620_regulator_info *rinfo = pmic->rinfo[id];
+	int ret, val;
+	u8 mask;
+
+	if (rinfo->type == MAX77620_REGULATOR_TYPE_SD) {
+		if (ramp_delay <= 13750)
+			val = 0;
+		else if (ramp_delay <= 27500)
+			val = 1;
+		else if (ramp_delay <= 55000)
+			val = 2;
+		else
+			val = 3;
+		val <<= MAX77620_SD_SR_SHIFT;
+		mask = MAX77620_SD_SR_MASK;
+	} else {
+		if (ramp_delay <= 5000)
+			val = 1;
+		else
+			val = 0;
+		mask = MAX77620_LDO_SLEW_RATE_MASK;
+	}
+
+	ret = regmap_update_bits(pmic->rmap, rinfo->cfg_addr, mask, val);
+	if (ret < 0)
+		dev_err(pmic->dev, "Reg 0x%02x update failed: %d\n",
+			rinfo->cfg_addr, ret);
+
+	return ret;
+}
+
+static int max77620_of_parse_cb(struct device_node *np,
+				const struct regulator_desc *desc,
+				struct regulator_config *config)
+{
+	struct max77620_regulator *pmic = config->driver_data;
+	struct max77620_regulator_pdata *rpdata = &pmic->reg_pdata[desc->id];
+	u32 pval;
+	int ret;
+
+	ret = of_property_read_u32(np, "maxim,active-fps-source", &pval);
+	rpdata->active_fps_src = (!ret) ? pval : MAX77620_FPS_SRC_DEF;
+
+	ret = of_property_read_u32(np, "maxim,active-fps-power-up-slot", &pval);
+	rpdata->active_fps_pu_slot = (!ret) ? pval : -1;
+
+	ret = of_property_read_u32(
+			np, "maxim,active-fps-power-down-slot", &pval);
+	rpdata->active_fps_pd_slot = (!ret) ? pval : -1;
+
+	ret = of_property_read_u32(np, "maxim,suspend-fps-source", &pval);
+	rpdata->suspend_fps_src = (!ret) ? pval : -1;
+
+	ret = of_property_read_u32(
+			np, "maxim,suspend-fps-power-up-slot", &pval);
+	rpdata->suspend_fps_pu_slot = (!ret) ? pval : -1;
+
+	ret = of_property_read_u32(
+			np, "maxim,suspend-fps-power-down-slot", &pval);
+	rpdata->suspend_fps_pd_slot = (!ret) ? pval : -1;
+
+	return max77620_init_pmic(pmic, desc->id);
+}
+
+static struct regulator_ops max77620_regulator_ops = {
+	.is_enabled = max77620_regulator_is_enabled,
+	.enable = max77620_regulator_enable,
+	.disable = max77620_regulator_disable,
+	.list_voltage = regulator_list_voltage_linear,
+	.map_voltage = regulator_map_voltage_linear,
+	.get_voltage_sel = regulator_get_voltage_sel_regmap,
+	.set_voltage_sel = regulator_set_voltage_sel_regmap,
+	.set_mode = max77620_regulator_set_mode,
+	.get_mode = max77620_regulator_get_mode,
+	.set_ramp_delay = max77620_regulator_set_ramp_delay,
+	.set_voltage_time_sel = regulator_set_voltage_time_sel,
+	.set_active_discharge = regulator_set_active_discharge_regmap,
+};
+
+#define MAX77620_SD_CNF2_ROVS_EN_NONE	0
+#define RAIL_SD(_id, _name, _sname, _volt_mask, _min_uV, _max_uV,	\
+		_step_uV, _rs_add, _rs_mask)				\
+	[MAX77620_REGULATOR_ID_##_id] = {				\
+		.type = MAX77620_REGULATOR_TYPE_SD,			\
+		.volt_addr = MAX77620_REG_##_id,			\
+		.cfg_addr = MAX77620_REG_##_id##_CFG,			\
+		.fps_addr = MAX77620_REG_FPS_##_id,			\
+		.remote_sense_addr = _rs_add,				\
+		.remote_sense_mask = MAX77620_SD_CNF2_ROVS_EN_##_rs_mask, \
+		.power_mode_mask = MAX77620_SD_POWER_MODE_MASK,		\
+		.power_mode_shift = MAX77620_SD_POWER_MODE_SHIFT,	\
+		.desc = {						\
+			.name = max77620_rails(_name),			\
+			.of_match = of_match_ptr(#_name),		\
+			.regulators_node = of_match_ptr("regulators"),	\
+			.of_parse_cb = max77620_of_parse_cb,		\
+			.supply_name = _sname,				\
+			.id = MAX77620_REGULATOR_ID_##_id,		\
+			.ops = &max77620_regulator_ops,			\
+			.n_voltages = ((_max_uV - _min_uV) / _step_uV) + 1, \
+			.min_uV = _min_uV,				\
+			.uV_step = _step_uV,				\
+			.enable_time = 500,				\
+			.vsel_mask = MAX77620_##_volt_mask##_VOLT_MASK,	\
+			.vsel_reg = MAX77620_REG_##_id,			\
+			.active_discharge_off = 0,			\
+			.active_discharge_on = MAX77620_SD_CFG1_ADE_ENABLE, \
+			.active_discharge_mask = MAX77620_SD_CFG1_ADE_MASK, \
+			.active_discharge_reg = MAX77620_REG_##_id##_CFG, \
+			.type = REGULATOR_VOLTAGE,			\
+		},							\
+	}
+
+#define RAIL_LDO(_id, _name, _sname, _type, _min_uV, _max_uV, _step_uV) \
+	[MAX77620_REGULATOR_ID_##_id] = {				\
+		.type = MAX77620_REGULATOR_TYPE_LDO_##_type,		\
+		.volt_addr = MAX77620_REG_##_id##_CFG,			\
+		.cfg_addr = MAX77620_REG_##_id##_CFG2,			\
+		.fps_addr = MAX77620_REG_FPS_##_id,			\
+		.remote_sense_addr = 0xFF,				\
+		.power_mode_mask = MAX77620_LDO_POWER_MODE_MASK,	\
+		.power_mode_shift = MAX77620_LDO_POWER_MODE_SHIFT,	\
+		.desc = {						\
+			.name = max77620_rails(_name),			\
+			.of_match = of_match_ptr(#_name),		\
+			.regulators_node = of_match_ptr("regulators"),	\
+			.of_parse_cb = max77620_of_parse_cb,		\
+			.supply_name = _sname,				\
+			.id = MAX77620_REGULATOR_ID_##_id,		\
+			.ops = &max77620_regulator_ops,			\
+			.n_voltages = ((_max_uV - _min_uV) / _step_uV) + 1, \
+			.min_uV = _min_uV,				\
+			.uV_step = _step_uV,				\
+			.enable_time = 500,				\
+			.vsel_mask = MAX77620_LDO_VOLT_MASK,		\
+			.vsel_reg = MAX77620_REG_##_id##_CFG,		\
+			.active_discharge_off = 0,			\
+			.active_discharge_on = MAX77620_LDO_CFG2_ADE_ENABLE, \
+			.active_discharge_mask = MAX77620_LDO_CFG2_ADE_MASK, \
+			.active_discharge_reg = MAX77620_REG_##_id##_CFG2, \
+			.type = REGULATOR_VOLTAGE,			\
+		},							\
+	}
+
+static struct max77620_regulator_info max77620_regs_info[MAX77620_NUM_REGS] = {
+	RAIL_SD(SD0, sd0, "in-sd0", SD0, 600000, 1400000, 12500, 0x22, SD0),
+	RAIL_SD(SD1, sd1, "in-sd1", SD1, 600000, 1550000, 12500, 0x22, SD1),
+	RAIL_SD(SD2, sd2, "in-sd2", SDX, 600000, 3787500, 12500, 0xFF, NONE),
+	RAIL_SD(SD3, sd3, "in-sd3", SDX, 600000, 3787500, 12500, 0xFF, NONE),
+	RAIL_SD(SD4, sd4, "in-sd4", SDX, 600000, 3787500, 12500, 0xFF, NONE),
+
+	RAIL_LDO(LDO0, ldo0, "in-ldo0-1", N, 800000, 2375000, 25000),
+	RAIL_LDO(LDO1, ldo1, "in-ldo0-1", N, 800000, 2375000, 25000),
+	RAIL_LDO(LDO2, ldo2, "in-ldo2",   P, 800000, 3950000, 50000),
+	RAIL_LDO(LDO3, ldo3, "in-ldo3-5", P, 800000, 3950000, 50000),
+	RAIL_LDO(LDO4, ldo4, "in-ldo4-6", P, 800000, 1587500, 12500),
+	RAIL_LDO(LDO5, ldo5, "in-ldo3-5", P, 800000, 3950000, 50000),
+	RAIL_LDO(LDO6, ldo6, "in-ldo4-6", P, 800000, 3950000, 50000),
+	RAIL_LDO(LDO7, ldo7, "in-ldo7-8", N, 800000, 3950000, 50000),
+	RAIL_LDO(LDO8, ldo8, "in-ldo7-8", N, 800000, 3950000, 50000),
+};
+
+static struct max77620_regulator_info max20024_regs_info[MAX77620_NUM_REGS] = {
+	RAIL_SD(SD0, sd0, "in-sd0", SD0, 800000, 1587500, 12500, 0x22, SD0),
+	RAIL_SD(SD1, sd1, "in-sd1", SD1, 600000, 3387500, 12500, 0x22, SD1),
+	RAIL_SD(SD2, sd2, "in-sd2", SDX, 600000, 3787500, 12500, 0xFF, NONE),
+	RAIL_SD(SD3, sd3, "in-sd3", SDX, 600000, 3787500, 12500, 0xFF, NONE),
+	RAIL_SD(SD4, sd4, "in-sd4", SDX, 600000, 3787500, 12500, 0xFF, NONE),
+
+	RAIL_LDO(LDO0, ldo0, "in-ldo0-1", N, 800000, 2375000, 25000),
+	RAIL_LDO(LDO1, ldo1, "in-ldo0-1", N, 800000, 2375000, 25000),
+	RAIL_LDO(LDO2, ldo2, "in-ldo2",   P, 800000, 3950000, 50000),
+	RAIL_LDO(LDO3, ldo3, "in-ldo3-5", P, 800000, 3950000, 50000),
+	RAIL_LDO(LDO4, ldo4, "in-ldo4-6", P, 800000, 1587500, 12500),
+	RAIL_LDO(LDO5, ldo5, "in-ldo3-5", P, 800000, 3950000, 50000),
+	RAIL_LDO(LDO6, ldo6, "in-ldo4-6", P, 800000, 3950000, 50000),
+	RAIL_LDO(LDO7, ldo7, "in-ldo7-8", N, 800000, 3950000, 50000),
+	RAIL_LDO(LDO8, ldo8, "in-ldo7-8", N, 800000, 3950000, 50000),
+};
+
+static int max77620_regulator_probe(struct platform_device *pdev)
+{
+	struct max77620_chip *max77620_chip = dev_get_drvdata(pdev->dev.parent);
+	struct max77620_regulator_info *rinfo;
+	struct device *dev = &pdev->dev;
+	struct regulator_config config = { };
+	struct max77620_regulator *pmic;
+	int ret = 0;
+	int id;
+
+	pmic = devm_kzalloc(dev, sizeof(*pmic), GFP_KERNEL);
+	if (!pmic)
+		return -ENOMEM;
+
+	platform_set_drvdata(pdev, pmic);
+	pmic->dev = dev;
+	pmic->rmap = max77620_chip->rmap;
+	if (!dev->of_node)
+		dev->of_node = pdev->dev.parent->of_node;
+
+	switch (max77620_chip->chip_id) {
+	case MAX77620:
+		rinfo = max77620_regs_info;
+		break;
+	default:
+		rinfo = max20024_regs_info;
+		break;
+	}
+
+	config.regmap = pmic->rmap;
+	config.dev = dev;
+	config.driver_data = pmic;
+
+	for (id = 0; id < MAX77620_NUM_REGS; id++) {
+		struct regulator_dev *rdev;
+		struct regulator_desc *rdesc;
+
+		if ((max77620_chip->chip_id == MAX77620) &&
+		    (id == MAX77620_REGULATOR_ID_SD4))
+			continue;
+
+		rdesc = &rinfo[id].desc;
+		pmic->rinfo[id] = &max77620_regs_info[id];
+		pmic->enable_power_mode[id] = MAX77620_POWER_MODE_NORMAL;
+
+		ret = max77620_read_slew_rate(pmic, id);
+		if (ret < 0)
+			return ret;
+
+		rdev = devm_regulator_register(dev, rdesc, &config);
+		if (IS_ERR(rdev)) {
+			ret = PTR_ERR(rdev);
+			dev_err(dev, "Regulator registration %s failed: %d\n",
+				rdesc->name, ret);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int max77620_regulator_suspend(struct device *dev)
+{
+	struct max77620_regulator *pmic = dev_get_drvdata(dev);
+	struct max77620_regulator_pdata *reg_pdata;
+	int id;
+
+	for (id = 0; id < MAX77620_NUM_REGS; id++) {
+		reg_pdata = &pmic->reg_pdata[id];
+
+		max77620_regulator_set_fps_slots(pmic, id, true);
+		if (reg_pdata->suspend_fps_src < 0)
+			continue;
+
+		max77620_regulator_set_fps_src(pmic, reg_pdata->suspend_fps_src,
+					       id);
+	}
+
+	return 0;
+}
+
+static int max77620_regulator_resume(struct device *dev)
+{
+	struct max77620_regulator *pmic = dev_get_drvdata(dev);
+	struct max77620_regulator_pdata *reg_pdata;
+	int id;
+
+	for (id = 0; id < MAX77620_NUM_REGS; id++) {
+		reg_pdata = &pmic->reg_pdata[id];
+
+		max77620_regulator_set_fps_slots(pmic, id, false);
+		if (reg_pdata->active_fps_src < 0)
+			continue;
+		max77620_regulator_set_fps_src(pmic, reg_pdata->active_fps_src,
+					       id);
+	}
+
+	return 0;
+}
+#endif
+
+static const struct dev_pm_ops max77620_regulator_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(max77620_regulator_suspend,
+				max77620_regulator_resume)
+};
+
+static const struct platform_device_id max77620_regulator_devtype[] = {
+	{ .name = "max77620-pmic", },
+	{ .name = "max20024-pmic", },
+	{},
+};
+MODULE_DEVICE_TABLE(platform, max77620_regulator_devtype);
+
+static struct platform_driver max77620_regulator_driver = {
+	.probe = max77620_regulator_probe,
+	.id_table = max77620_regulator_devtype,
+	.driver = {
+		.name = "max77620-pmic",
+		.pm = &max77620_regulator_pm_ops,
+	},
+};
+
+module_platform_driver(max77620_regulator_driver);
+
+MODULE_DESCRIPTION("MAX77620/MAX20024 regulator driver");
+MODULE_AUTHOR("Mallikarjun Kasoju <mkasoju@nvidia.com>");
+MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/max77686.c b/drivers/regulator/max77686-regulator.c
similarity index 100%
rename from drivers/regulator/max77686.c
rename to drivers/regulator/max77686-regulator.c
diff --git a/drivers/regulator/max77802.c b/drivers/regulator/max77802-regulator.c
similarity index 100%
rename from drivers/regulator/max77802.c
rename to drivers/regulator/max77802-regulator.c
diff --git a/drivers/regulator/mt6397-regulator.c b/drivers/regulator/mt6397-regulator.c
index a5b2f47..17a5b6c 100644
--- a/drivers/regulator/mt6397-regulator.c
+++ b/drivers/regulator/mt6397-regulator.c
@@ -317,11 +317,25 @@
 	return 0;
 }
 
+static const struct platform_device_id mt6397_platform_ids[] = {
+	{"mt6397-regulator", 0},
+	{ /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(platform, mt6397_platform_ids);
+
+static const struct of_device_id mt6397_of_match[] = {
+	{ .compatible = "mediatek,mt6397-regulator", },
+	{ /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, mt6397_of_match);
+
 static struct platform_driver mt6397_regulator_driver = {
 	.driver = {
 		.name = "mt6397-regulator",
+		.of_match_table = of_match_ptr(mt6397_of_match),
 	},
 	.probe = mt6397_regulator_probe,
+	.id_table = mt6397_platform_ids,
 };
 
 module_platform_driver(mt6397_regulator_driver);
@@ -329,4 +343,3 @@
 MODULE_AUTHOR("Flora Fu <flora.fu@mediatek.com>");
 MODULE_DESCRIPTION("Regulator Driver for MediaTek MT6397 PMIC");
 MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:mt6397-regulator");
diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
index 499e437..6b0aa80 100644
--- a/drivers/regulator/of_regulator.c
+++ b/drivers/regulator/of_regulator.c
@@ -28,7 +28,6 @@
 					struct regulator_init_data **init_data,
 					const struct regulator_desc *desc)
 {
-	const __be32 *min_uV, *max_uV;
 	struct regulation_constraints *constraints = &(*init_data)->constraints;
 	struct regulator_state *suspend_state;
 	struct device_node *suspend_np;
@@ -37,18 +36,18 @@
 
 	constraints->name = of_get_property(np, "regulator-name", NULL);
 
-	min_uV = of_get_property(np, "regulator-min-microvolt", NULL);
-	if (min_uV)
-		constraints->min_uV = be32_to_cpu(*min_uV);
-	max_uV = of_get_property(np, "regulator-max-microvolt", NULL);
-	if (max_uV)
-		constraints->max_uV = be32_to_cpu(*max_uV);
+	if (!of_property_read_u32(np, "regulator-min-microvolt", &pval))
+		constraints->min_uV = pval;
+
+	if (!of_property_read_u32(np, "regulator-max-microvolt", &pval))
+		constraints->max_uV = pval;
 
 	/* Voltage change possible? */
 	if (constraints->min_uV != constraints->max_uV)
 		constraints->valid_ops_mask |= REGULATOR_CHANGE_VOLTAGE;
 	/* Only one voltage?  Then make sure it's set. */
-	if (min_uV && max_uV && constraints->min_uV == constraints->max_uV)
+	if (constraints->min_uV && constraints->max_uV &&
+	    constraints->min_uV == constraints->max_uV)
 		constraints->apply_uV = true;
 
 	if (!of_property_read_u32(np, "regulator-microvolt-offset", &pval))
@@ -93,6 +92,12 @@
 
 	constraints->soft_start = of_property_read_bool(np,
 					"regulator-soft-start");
+	ret = of_property_read_u32(np, "regulator-active-discharge", &pval);
+	if (!ret) {
+		constraints->active_discharge =
+				(pval) ? REGULATOR_ACTIVE_DISCHARGE_ENABLE :
+					REGULATOR_ACTIVE_DISCHARGE_DISABLE;
+	}
 
 	if (!of_property_read_u32(np, "regulator-initial-mode", &pval)) {
 		if (desc && desc->of_map_mode) {
diff --git a/drivers/regulator/pv88060-regulator.c b/drivers/regulator/pv88060-regulator.c
index 094376c..c448b72 100644
--- a/drivers/regulator/pv88060-regulator.c
+++ b/drivers/regulator/pv88060-regulator.c
@@ -285,8 +285,8 @@
 			}
 		}
 
-		err = regmap_update_bits(chip->regmap, PV88060_REG_EVENT_A,
-			PV88060_E_VDD_FLT, PV88060_E_VDD_FLT);
+		err = regmap_write(chip->regmap, PV88060_REG_EVENT_A,
+			PV88060_E_VDD_FLT);
 		if (err < 0)
 			goto error_i2c;
 
@@ -302,8 +302,8 @@
 			}
 		}
 
-		err = regmap_update_bits(chip->regmap, PV88060_REG_EVENT_A,
-			PV88060_E_OVER_TEMP, PV88060_E_OVER_TEMP);
+		err = regmap_write(chip->regmap, PV88060_REG_EVENT_A,
+			PV88060_E_OVER_TEMP);
 		if (err < 0)
 			goto error_i2c;
 
diff --git a/drivers/regulator/pv88090-regulator.c b/drivers/regulator/pv88090-regulator.c
index ac15f31..0057c67 100644
--- a/drivers/regulator/pv88090-regulator.c
+++ b/drivers/regulator/pv88090-regulator.c
@@ -283,8 +283,8 @@
 			}
 		}
 
-		err = regmap_update_bits(chip->regmap, PV88090_REG_EVENT_A,
-			PV88090_E_VDD_FLT, PV88090_E_VDD_FLT);
+		err = regmap_write(chip->regmap, PV88090_REG_EVENT_A,
+			PV88090_E_VDD_FLT);
 		if (err < 0)
 			goto error_i2c;
 
@@ -300,8 +300,8 @@
 			}
 		}
 
-		err = regmap_update_bits(chip->regmap, PV88090_REG_EVENT_A,
-			PV88090_E_OVER_TEMP, PV88090_E_OVER_TEMP);
+		err = regmap_write(chip->regmap, PV88090_REG_EVENT_A,
+			PV88090_E_OVER_TEMP);
 		if (err < 0)
 			goto error_i2c;
 
diff --git a/drivers/regulator/pwm-regulator.c b/drivers/regulator/pwm-regulator.c
index 3aca067..4689d62 100644
--- a/drivers/regulator/pwm-regulator.c
+++ b/drivers/regulator/pwm-regulator.c
@@ -27,6 +27,13 @@
 
 	/* Voltage table */
 	struct pwm_voltages *duty_cycle_table;
+
+	/* regulator descriptor */
+	struct regulator_desc desc;
+
+	/* Regulator ops */
+	struct regulator_ops ops;
+
 	int state;
 
 	/* Continuous voltage */
@@ -115,7 +122,7 @@
 	int max_uV = rdev->constraints->max_uV;
 	int diff = max_uV - min_uV;
 
-	return 100 - (((req_uV * 100) - (min_uV * 100)) / diff);
+	return ((req_uV * 100) - (min_uV * 100)) / diff;
 }
 
 static int pwm_regulator_get_voltage(struct regulator_dev *rdev)
@@ -212,8 +219,10 @@
 	}
 
 	drvdata->duty_cycle_table	= duty_cycle_table;
-	pwm_regulator_desc.ops		= &pwm_regulator_voltage_table_ops;
-	pwm_regulator_desc.n_voltages	= length / sizeof(*duty_cycle_table);
+	memcpy(&drvdata->ops, &pwm_regulator_voltage_table_ops,
+	       sizeof(drvdata->ops));
+	drvdata->desc.ops = &drvdata->ops;
+	drvdata->desc.n_voltages	= length / sizeof(*duty_cycle_table);
 
 	return 0;
 }
@@ -221,8 +230,10 @@
 static int pwm_regulator_init_continuous(struct platform_device *pdev,
 					 struct pwm_regulator_data *drvdata)
 {
-	pwm_regulator_desc.ops = &pwm_regulator_voltage_continuous_ops;
-	pwm_regulator_desc.continuous_voltage_range = true;
+	memcpy(&drvdata->ops, &pwm_regulator_voltage_continuous_ops,
+	       sizeof(drvdata->ops));
+	drvdata->desc.ops = &drvdata->ops;
+	drvdata->desc.continuous_voltage_range = true;
 
 	return 0;
 }
@@ -245,6 +256,8 @@
 	if (!drvdata)
 		return -ENOMEM;
 
+	memcpy(&drvdata->desc, &pwm_regulator_desc, sizeof(drvdata->desc));
+
 	if (of_find_property(np, "voltage-table", NULL))
 		ret = pwm_regulator_init_table(pdev, drvdata);
 	else
@@ -253,7 +266,7 @@
 		return ret;
 
 	init_data = of_get_regulator_init_data(&pdev->dev, np,
-					       &pwm_regulator_desc);
+					       &drvdata->desc);
 	if (!init_data)
 		return -ENOMEM;
 
@@ -269,10 +282,10 @@
 	}
 
 	regulator = devm_regulator_register(&pdev->dev,
-					    &pwm_regulator_desc, &config);
+					    &drvdata->desc, &config);
 	if (IS_ERR(regulator)) {
 		dev_err(&pdev->dev, "Failed to register regulator %s\n",
-			pwm_regulator_desc.name);
+			drvdata->desc.name);
 		return PTR_ERR(regulator);
 	}
 
diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
index 3242ffc..d24e2c7 100644
--- a/drivers/regulator/s2mps11.c
+++ b/drivers/regulator/s2mps11.c
@@ -38,7 +38,6 @@
 /* The highest number of possible regulators for supported devices. */
 #define S2MPS_REGULATOR_MAX		S2MPS13_REGULATOR_MAX
 struct s2mps11_info {
-	unsigned int rdev_num;
 	int ramp_delay2;
 	int ramp_delay34;
 	int ramp_delay5;
@@ -54,7 +53,10 @@
 	 */
 	DECLARE_BITMAP(suspend_state, S2MPS_REGULATOR_MAX);
 
-	/* Array of size rdev_num with GPIO-s for external sleep control */
+	/*
+	 * Array (size: number of regulators) with GPIO-s for external
+	 * sleep control.
+	 */
 	int *ext_control_gpio;
 };
 
@@ -819,7 +821,8 @@
 }
 
 static int s2mps11_pmic_dt_parse(struct platform_device *pdev,
-		struct of_regulator_match *rdata, struct s2mps11_info *s2mps11)
+		struct of_regulator_match *rdata, struct s2mps11_info *s2mps11,
+		unsigned int rdev_num)
 {
 	struct device_node *reg_np;
 
@@ -829,7 +832,7 @@
 		return -EINVAL;
 	}
 
-	of_regulator_match(&pdev->dev, reg_np, rdata, s2mps11->rdev_num);
+	of_regulator_match(&pdev->dev, reg_np, rdata, rdev_num);
 	if (s2mps11->dev_type == S2MPS14X)
 		s2mps14_pmic_dt_parse_ext_control_gpio(pdev, rdata, s2mps11);
 
@@ -1077,6 +1080,7 @@
 	struct of_regulator_match *rdata = NULL;
 	struct regulator_config config = { };
 	struct s2mps11_info *s2mps11;
+	unsigned int rdev_num = 0;
 	int i, ret = 0;
 	const struct regulator_desc *regulators;
 
@@ -1088,28 +1092,29 @@
 	s2mps11->dev_type = platform_get_device_id(pdev)->driver_data;
 	switch (s2mps11->dev_type) {
 	case S2MPS11X:
-		s2mps11->rdev_num = ARRAY_SIZE(s2mps11_regulators);
+		rdev_num = ARRAY_SIZE(s2mps11_regulators);
 		regulators = s2mps11_regulators;
-		BUILD_BUG_ON(S2MPS_REGULATOR_MAX < s2mps11->rdev_num);
+		BUILD_BUG_ON(S2MPS_REGULATOR_MAX < ARRAY_SIZE(s2mps11_regulators));
 		break;
 	case S2MPS13X:
-		s2mps11->rdev_num = ARRAY_SIZE(s2mps13_regulators);
+		rdev_num = ARRAY_SIZE(s2mps13_regulators);
 		regulators = s2mps13_regulators;
-		BUILD_BUG_ON(S2MPS_REGULATOR_MAX < s2mps11->rdev_num);
+		BUILD_BUG_ON(S2MPS_REGULATOR_MAX < ARRAY_SIZE(s2mps13_regulators));
 		break;
 	case S2MPS14X:
-		s2mps11->rdev_num = ARRAY_SIZE(s2mps14_regulators);
+		rdev_num = ARRAY_SIZE(s2mps14_regulators);
 		regulators = s2mps14_regulators;
-		BUILD_BUG_ON(S2MPS_REGULATOR_MAX < s2mps11->rdev_num);
+		BUILD_BUG_ON(S2MPS_REGULATOR_MAX < ARRAY_SIZE(s2mps14_regulators));
 		break;
 	case S2MPS15X:
-		s2mps11->rdev_num = ARRAY_SIZE(s2mps15_regulators);
+		rdev_num = ARRAY_SIZE(s2mps15_regulators);
 		regulators = s2mps15_regulators;
+		BUILD_BUG_ON(S2MPS_REGULATOR_MAX < ARRAY_SIZE(s2mps15_regulators));
 		break;
 	case S2MPU02:
-		s2mps11->rdev_num = ARRAY_SIZE(s2mpu02_regulators);
+		rdev_num = ARRAY_SIZE(s2mpu02_regulators);
 		regulators = s2mpu02_regulators;
-		BUILD_BUG_ON(S2MPS_REGULATOR_MAX < s2mps11->rdev_num);
+		BUILD_BUG_ON(S2MPS_REGULATOR_MAX < ARRAY_SIZE(s2mpu02_regulators));
 		break;
 	default:
 		dev_err(&pdev->dev, "Invalid device type: %u\n",
@@ -1118,7 +1123,7 @@
 	}
 
 	s2mps11->ext_control_gpio = devm_kmalloc(&pdev->dev,
-			sizeof(*s2mps11->ext_control_gpio) * s2mps11->rdev_num,
+			sizeof(*s2mps11->ext_control_gpio) * rdev_num,
 			GFP_KERNEL);
 	if (!s2mps11->ext_control_gpio)
 		return -ENOMEM;
@@ -1126,7 +1131,7 @@
 	 * 0 is a valid GPIO so initialize all GPIO-s to negative value
 	 * to indicate that external control won't be used for this regulator.
 	 */
-	for (i = 0; i < s2mps11->rdev_num; i++)
+	for (i = 0; i < rdev_num; i++)
 		s2mps11->ext_control_gpio[i] = -EINVAL;
 
 	if (!iodev->dev->of_node) {
@@ -1140,14 +1145,14 @@
 		}
 	}
 
-	rdata = kzalloc(sizeof(*rdata) * s2mps11->rdev_num, GFP_KERNEL);
+	rdata = kzalloc(sizeof(*rdata) * rdev_num, GFP_KERNEL);
 	if (!rdata)
 		return -ENOMEM;
 
-	for (i = 0; i < s2mps11->rdev_num; i++)
+	for (i = 0; i < rdev_num; i++)
 		rdata[i].name = regulators[i].name;
 
-	ret = s2mps11_pmic_dt_parse(pdev, rdata, s2mps11);
+	ret = s2mps11_pmic_dt_parse(pdev, rdata, s2mps11, rdev_num);
 	if (ret)
 		goto out;
 
@@ -1159,7 +1164,7 @@
 	config.driver_data = s2mps11;
 	config.ena_gpio_flags = GPIOF_OUT_INIT_HIGH;
 	config.ena_gpio_initialized = true;
-	for (i = 0; i < s2mps11->rdev_num; i++) {
+	for (i = 0; i < rdev_num; i++) {
 		struct regulator_dev *regulator;
 
 		if (pdata) {
diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c
index 58f5d3b..27343e1 100644
--- a/drivers/regulator/s5m8767.c
+++ b/drivers/regulator/s5m8767.c
@@ -202,9 +202,10 @@
 		}
 	}
 
-	if (i < s5m8767->num_regulators)
-		*enable_ctrl =
-		s5m8767_opmode_reg[reg_id][mode] << S5M8767_ENCTRL_SHIFT;
+	if (i >= s5m8767->num_regulators)
+		return -EINVAL;
+
+	*enable_ctrl = s5m8767_opmode_reg[reg_id][mode] << S5M8767_ENCTRL_SHIFT;
 
 	return 0;
 }
@@ -937,8 +938,12 @@
 			else
 				regulators[id].vsel_mask = 0xff;
 
-			s5m8767_get_register(s5m8767, id, &enable_reg,
+			ret = s5m8767_get_register(s5m8767, id, &enable_reg,
 					     &enable_val);
+			if (ret) {
+				dev_err(s5m8767->dev, "error reading registers\n");
+				return ret;
+			}
 			regulators[id].enable_reg = enable_reg;
 			regulators[id].enable_mask = S5M8767_ENCTRL_MASK;
 			regulators[id].enable_val = enable_val;
diff --git a/drivers/regulator/vexpress.c b/drivers/regulator/vexpress-regulator.c
similarity index 100%
rename from drivers/regulator/vexpress.c
rename to drivers/regulator/vexpress-regulator.c
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 376322f..544bd34 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -140,7 +140,6 @@
 	  will be called rtc-test.
 
 comment "I2C RTC drivers"
-	depends on I2C
 
 if I2C
 
@@ -212,6 +211,15 @@
 	  This driver can also be built as a module. If so, the module
 	  will be called rtc-ds1307.
 
+config RTC_DRV_DS1307_HWMON
+	bool "HWMON support for rtc-ds1307"
+	depends on RTC_DRV_DS1307 && HWMON
+	depends on !(RTC_DRV_DS1307=y && HWMON=m)
+	default y
+	help
+	  Say Y here if you want to expose temperature sensor data on
+	  rtc-ds1307 (only DS3231)
+
 config RTC_DRV_DS1374
 	tristate "Dallas/Maxim DS1374"
 	help
@@ -239,16 +247,6 @@
 	  This driver can also be built as a module. If so, the module
 	  will be called rtc-ds1672.
 
-config RTC_DRV_DS3232
-	tristate "Dallas/Maxim DS3232"
-	help
-	  If you say yes here you get support for Dallas Semiconductor
-	  DS3232 real-time clock chips. If an interrupt is associated
-	  with the device, the alarm functionality is supported.
-
-	  This driver can also be built as a module.  If so, the module
-	  will be called rtc-ds3232.
-
 config RTC_DRV_HYM8563
 	tristate "Haoyu Microelectronics HYM8563"
 	depends on OF
@@ -317,10 +315,10 @@
 
 config RTC_DRV_MAX77686
 	tristate "Maxim MAX77686"
-	depends on MFD_MAX77686
+	depends on MFD_MAX77686 || MFD_MAX77620
 	help
 	  If you say yes here you will get support for the
-	  RTC of Maxim MAX77686 PMIC.
+	  RTC of Maxim MAX77686/MAX77620/MAX77802 PMIC.
 
 	  This driver can also be built as a module. If so, the module
 	  will be called rtc-max77686.
@@ -335,16 +333,6 @@
 	  This driver can also be built as a module. If so, the module
 	  will be called rk808-rtc.
 
-config RTC_DRV_MAX77802
-	tristate "Maxim 77802 RTC"
-	depends on MFD_MAX77686
-	help
-	  If you say yes here you will get support for the
-	  RTC of Maxim MAX77802 PMIC.
-
-	  This driver can also be built as a module. If so, the module
-	  will be called rtc-max77802.
-
 config RTC_DRV_RS5C372
 	tristate "Ricoh R2025S/D, RS5C372A/B, RV5C386, RV5C387A"
 	help
@@ -391,25 +379,6 @@
 	  This driver can also be built as a module. If so, the module
 	  will be called rtc-x1205.
 
-config RTC_DRV_PALMAS
-	tristate "TI Palmas RTC driver"
-	depends on MFD_PALMAS
-	help
-	  If you say yes here you get support for the RTC of TI PALMA series PMIC
-	  chips.
-
-	  This driver can also be built as a module. If so, the module
-	  will be called rtc-palma.
-
-config RTC_DRV_PCF2127
-	tristate "NXP PCF2127"
-	help
-	  If you say yes here you get support for the NXP PCF2127/29 RTC
-	  chips.
-
-	  This driver can also be built as a module. If so, the module
-	  will be called rtc-pcf2127.
-
 config RTC_DRV_PCF8523
 	tristate "NXP PCF8523"
 	help
@@ -419,6 +388,14 @@
 	  This driver can also be built as a module. If so, the module
 	  will be called rtc-pcf8523.
 
+config RTC_DRV_PCF85063
+	tristate "NXP PCF85063"
+	help
+	  If you say yes here you get support for the PCF85063 RTC chip
+
+	  This driver can also be built as a module. If so, the module
+	  will be called rtc-pcf85063.
+
 config RTC_DRV_PCF8563
 	tristate "Philips PCF8563/Epson RTC8564"
 	help
@@ -429,14 +406,6 @@
 	  This driver can also be built as a module. If so, the module
 	  will be called rtc-pcf8563.
 
-config RTC_DRV_PCF85063
-	tristate "nxp PCF85063"
-	help
-	  If you say yes here you get support for the PCF85063 RTC chip
-
-	  This driver can also be built as a module. If so, the module
-	  will be called rtc-pcf85063.
-
 config RTC_DRV_PCF8583
 	tristate "Philips PCF8583"
 	help
@@ -501,6 +470,16 @@
 	  This driver can also be built as a module. If so, the module
 	  will be called rtc-twl.
 
+config RTC_DRV_PALMAS
+	tristate "TI Palmas RTC driver"
+	depends on MFD_PALMAS
+	help
+	  If you say yes here you get support for the RTC of TI PALMA series PMIC
+	  chips.
+
+	  This driver can also be built as a module. If so, the module
+	  will be called rtc-palma.
+
 config RTC_DRV_TPS6586X
 	tristate "TI TPS6586X RTC driver"
 	depends on MFD_TPS6586X
@@ -595,14 +574,23 @@
 	  will be called rtc-em3027.
 
 config RTC_DRV_RV3029C2
-	tristate "Micro Crystal RTC"
+	tristate "Micro Crystal RV3029"
 	help
 	  If you say yes here you get support for the Micro Crystal
-	  RV3029-C2 RTC chips.
+	  RV3029 RTC chips.
 
 	  This driver can also be built as a module. If so, the module
 	  will be called rtc-rv3029c2.
 
+config RTC_DRV_RV3029_HWMON
+	bool "HWMON support for RV3029"
+	depends on RTC_DRV_RV3029C2 && HWMON
+	depends on !(RTC_DRV_RV3029C2=y && HWMON=m)
+	default y
+	help
+	  Say Y here if you want to expose temperature sensor data on
+	  rtc-rv3029c2.
+
 config RTC_DRV_RV8803
 	tristate "Micro Crystal RV8803"
 	help
@@ -691,15 +679,6 @@
 	  This driver can also be built as a module. If so, the module
 	  will be called rtc-ds1390.
 
-config RTC_DRV_MAX6902
-	tristate "Maxim MAX6902"
-	help
-	  If you say yes here you will get support for the
-	  Maxim MAX6902 SPI RTC chip.
-
-	  This driver can also be built as a module. If so, the module
-	  will be called rtc-max6902.
-
 config RTC_DRV_R9701
 	tristate "Epson RTC-9701JE"
 	help
@@ -709,6 +688,23 @@
 	  This driver can also be built as a module. If so, the module
 	  will be called rtc-r9701.
 
+config RTC_DRV_RX4581
+	tristate "Epson RX-4581"
+	help
+	  If you say yes here you will get support for the Epson RX-4581.
+
+	  This driver can also be built as a module. If so the module
+	  will be called rtc-rx4581.
+
+config RTC_DRV_RX6110
+	tristate "Epson RX-6110"
+	select REGMAP_SPI
+	help
+	  If you say yes here you will get support for the Epson RX-6610.
+
+	  This driver can also be built as a module. If so the module
+	  will be called rtc-rx6110.
+
 config RTC_DRV_RS5C348
 	tristate "Ricoh RS5C348A/B"
 	help
@@ -718,14 +714,14 @@
 	  This driver can also be built as a module. If so, the module
 	  will be called rtc-rs5c348.
 
-config RTC_DRV_DS3234
-	tristate "Maxim/Dallas DS3234"
+config RTC_DRV_MAX6902
+	tristate "Maxim MAX6902"
 	help
-	  If you say yes here you get support for the
-	  Maxim/Dallas DS3234 SPI RTC chip.
+	  If you say yes here you will get support for the
+	  Maxim MAX6902 SPI RTC chip.
 
 	  This driver can also be built as a module. If so, the module
-	  will be called rtc-ds3234.
+	  will be called rtc-max6902.
 
 config RTC_DRV_PCF2123
 	tristate "NXP PCF2123"
@@ -736,14 +732,6 @@
 	  This driver can also be built as a module. If so, the module
 	  will be called rtc-pcf2123.
 
-config RTC_DRV_RX4581
-	tristate "Epson RX-4581"
-	help
-	  If you say yes here you will get support for the Epson RX-4581.
-
-	  This driver can also be built as a module. If so the module
-	  will be called rtc-rx4581.
-
 config RTC_DRV_MCP795
 	tristate "Microchip MCP795"
 	help
@@ -754,6 +742,41 @@
 
 endif # SPI_MASTER
 
+#
+# Helper to resolve issues with configs that have SPI enabled but I2C
+# modular.  See SND_SOC_I2C_AND_SPI for more information
+#
+config RTC_I2C_AND_SPI
+	tristate
+	default m if I2C=m
+	default y if I2C=y
+	default y if SPI_MASTER=y
+	select REGMAP_I2C if I2C
+	select REGMAP_SPI if SPI_MASTER
+
+comment "SPI and I2C RTC drivers"
+
+config RTC_DRV_DS3232
+	tristate "Dallas/Maxim DS3232/DS3234"
+	depends on RTC_I2C_AND_SPI
+	help
+	  If you say yes here you get support for Dallas Semiconductor
+	  DS3232 and DS3234 real-time clock chips. If an interrupt is associated
+	  with the device, the alarm functionality is supported.
+
+	  This driver can also be built as a module.  If so, the module
+	  will be called rtc-ds3232.
+
+config RTC_DRV_PCF2127
+	tristate "NXP PCF2127"
+	depends on RTC_I2C_AND_SPI
+	help
+	  If you say yes here you get support for the NXP PCF2127/29 RTC
+	  chips.
+
+	  This driver can also be built as a module. If so, the module
+	  will be called rtc-pcf2127.
+
 comment "Platform RTC drivers"
 
 # this 'CMOS' RTC driver is arch dependent because <asm-generic/rtc.h>
@@ -1087,7 +1110,7 @@
 
 config RTC_DRV_SPEAR
 	tristate "SPEAR ST RTC"
-	depends on PLAT_SPEAR
+	depends on PLAT_SPEAR || COMPILE_TEST
 	default y
 	help
 	 If you say Y here you will get support for the RTC found on
@@ -1119,7 +1142,7 @@
 
 config RTC_DRV_NUC900
 	tristate "NUC910/NUC920 RTC driver"
-	depends on ARCH_W90X900
+	depends on ARCH_W90X900 || COMPILE_TEST
 	help
 	  If you say yes here you get support for the RTC subsystem of the
 	  NUC910/NUC920 used in embedded systems.
@@ -1144,9 +1167,19 @@
 
 comment "on-CPU RTC drivers"
 
+config RTC_DRV_ASM9260
+	tristate "Alphascale asm9260 RTC"
+	depends on MACH_ASM9260
+	help
+	  If you say yes here you get support for the RTC on the
+	  Alphascale asm9260 SoC.
+
+	  This driver can also be built as a module. If so, the module
+	  will be called rtc-asm9260.
+
 config RTC_DRV_DAVINCI
 	tristate "TI DaVinci RTC"
-	depends on ARCH_DAVINCI_DM365
+	depends on ARCH_DAVINCI_DM365 || COMPILE_TEST
 	help
 	  If you say yes here you get support for the RTC on the
 	  DaVinci platforms (DM365).
@@ -1156,7 +1189,7 @@
 
 config RTC_DRV_DIGICOLOR
 	tristate "Conexant Digicolor RTC"
-	depends on ARCH_DIGICOLOR
+	depends on ARCH_DIGICOLOR || COMPILE_TEST
 	help
 	  If you say yes here you get support for the RTC on Conexant
 	  Digicolor platforms. This currently includes the CX92755 SoC.
@@ -1175,7 +1208,7 @@
 
 config RTC_DRV_OMAP
 	tristate "TI OMAP Real Time Clock"
-	depends on ARCH_OMAP || ARCH_DAVINCI
+	depends on ARCH_OMAP || ARCH_DAVINCI || COMPILE_TEST
 	help
 	  Say "yes" here to support the on chip real time clock
 	  present on TI OMAP1, AM33xx, DA8xx/OMAP-L13x, AM43xx and DRA7xx.
@@ -1192,7 +1225,7 @@
 
 config RTC_DRV_S3C
 	tristate "Samsung S3C series SoC RTC"
-	depends on ARCH_S3C64XX || HAVE_S3C_RTC
+	depends on ARCH_S3C64XX || HAVE_S3C_RTC || COMPILE_TEST
 	help
 	  RTC (Realtime Clock) driver for the clock inbuilt into the
 	  Samsung S3C24XX series of SoCs. This can provide periodic
@@ -1208,7 +1241,7 @@
 
 config RTC_DRV_EP93XX
 	tristate "Cirrus Logic EP93XX"
-	depends on ARCH_EP93XX
+	depends on ARCH_EP93XX || COMPILE_TEST
 	help
 	  If you say yes here you get support for the
 	  RTC embedded in the Cirrus Logic EP93XX processors.
@@ -1238,7 +1271,7 @@
 
 config RTC_DRV_VR41XX
 	tristate "NEC VR41XX"
-	depends on CPU_VR41XX
+	depends on CPU_VR41XX || COMPILE_TEST
 	help
 	  If you say Y here you will get access to the real time clock
 	  built into your NEC VR41XX CPU.
@@ -1268,14 +1301,14 @@
 
 config RTC_DRV_AT32AP700X
 	tristate "AT32AP700X series RTC"
-	depends on PLATFORM_AT32AP
+	depends on PLATFORM_AT32AP || COMPILE_TEST
 	help
 	  Driver for the internal RTC (Realtime Clock) on Atmel AVR32
 	  AT32AP700x family processors.
 
 config RTC_DRV_AT91RM9200
 	tristate "AT91RM9200 or some AT91SAM9 RTC"
-	depends on ARCH_AT91
+	depends on ARCH_AT91 || COMPILE_TEST
 	help
 	  Driver for the internal RTC (Realtime Clock) module found on
 	  Atmel AT91RM9200's and some  AT91SAM9 chips. On AT91SAM9 chips
@@ -1283,7 +1316,7 @@
 
 config RTC_DRV_AT91SAM9
 	tristate "AT91SAM9 RTT as RTC"
-	depends on ARCH_AT91
+	depends on ARCH_AT91 || COMPILE_TEST
 	select MFD_SYSCON
 	help
 	  Some AT91SAM9 SoCs provide an RTT (Real Time Timer) block which
@@ -1325,17 +1358,17 @@
 	tristate "Generic RTC support"
 	# Please consider writing a new RTC driver instead of using the generic
 	# RTC abstraction
-	depends on PARISC || M68K || PPC || SUPERH32
+	depends on PARISC || M68K || PPC || SUPERH32 || COMPILE_TEST
 	help
 	  Say Y or M here to enable RTC support on systems using the generic
 	  RTC abstraction. If you do not know what you are doing, you should
 	  just say Y.
 
 config RTC_DRV_PXA
-       tristate "PXA27x/PXA3xx"
-       depends on ARCH_PXA
-       select RTC_DRV_SA1100
-       help
+	tristate "PXA27x/PXA3xx"
+	depends on ARCH_PXA
+	select RTC_DRV_SA1100
+	help
          If you say Y here you will get access to the real time clock
          built into your PXA27x or PXA3xx CPU. This RTC is actually 2 RTCs
          consisting of an SA1100 compatible RTC and the extended PXA RTC.
@@ -1345,7 +1378,7 @@
 
 config RTC_DRV_VT8500
 	tristate "VIA/WonderMedia 85xx SoC RTC"
-	depends on ARCH_VT8500
+	depends on ARCH_VT8500 || COMPILE_TEST
 	help
 	  If you say Y here you will get access to the real time clock
 	  built into your VIA VT8500 SoC or its relatives.
@@ -1360,14 +1393,15 @@
 
 config RTC_DRV_SUN6I
 	tristate "Allwinner A31 RTC"
-	depends on MACH_SUN6I || MACH_SUN8I
+	default MACH_SUN6I || MACH_SUN8I || COMPILE_TEST
+	depends on ARCH_SUNXI
 	help
-	  If you say Y here you will get support for the RTC found on
-	  Allwinner A31.
+	  If you say Y here you will get support for the RTC found in
+	  some Allwinner SoCs like the A31 or the A64.
 
 config RTC_DRV_SUNXI
 	tristate "Allwinner sun4i/sun7i RTC"
-	depends on MACH_SUN4I || MACH_SUN7I
+	depends on MACH_SUN4I || MACH_SUN7I || COMPILE_TEST
 	help
 	  If you say Y here you will get support for the RTC found on
 	  Allwinner A10/A20.
@@ -1388,7 +1422,7 @@
 
 config RTC_DRV_MV
 	tristate "Marvell SoC RTC"
-	depends on ARCH_DOVE || ARCH_MVEBU
+	depends on ARCH_DOVE || ARCH_MVEBU || COMPILE_TEST
 	help
 	  If you say yes here you will get support for the in-chip RTC
 	  that can be found in some of Marvell's SoC devices, such as
@@ -1399,7 +1433,7 @@
 
 config RTC_DRV_ARMADA38X
 	tristate "Armada 38x Marvell SoC RTC"
-	depends on ARCH_MVEBU
+	depends on ARCH_MVEBU || COMPILE_TEST
 	help
 	  If you say yes here you will get support for the in-chip RTC
 	  that can be found in the Armada 38x Marvell's SoC device
@@ -1429,7 +1463,7 @@
 
 config RTC_DRV_COH901331
 	tristate "ST-Ericsson COH 901 331 RTC"
-	depends on ARCH_U300
+	depends on ARCH_U300 || COMPILE_TEST
 	help
 	  If you say Y here you will get access to ST-Ericsson
 	  COH 901 331 RTC clock found in some ST-Ericsson Mobile
@@ -1441,7 +1475,7 @@
 
 config RTC_DRV_STMP
 	tristate "Freescale STMP3xxx/i.MX23/i.MX28 RTC"
-	depends on ARCH_MXS
+	depends on ARCH_MXS || COMPILE_TEST
 	select STMP_DEVICE
 	help
 	  If you say yes here you will get support for the onboard
@@ -1476,7 +1510,7 @@
 
 config RTC_DRV_JZ4740
 	tristate "Ingenic JZ4740 SoC"
-	depends on MACH_JZ4740
+	depends on MACH_JZ4740 || COMPILE_TEST
 	help
 	  If you say yes here you get support for the Ingenic JZ4740 SoC RTC
 	  controller.
@@ -1497,7 +1531,7 @@
 	  so, the module will be called rtc-lpc24xx.
 
 config RTC_DRV_LPC32XX
-	depends on ARCH_LPC32XX
+	depends on ARCH_LPC32XX || COMPILE_TEST
 	tristate "NXP LPC32XX RTC"
 	help
 	  This enables support for the NXP RTC in the LPC32XX
@@ -1507,7 +1541,7 @@
 
 config RTC_DRV_PM8XXX
 	tristate "Qualcomm PMIC8XXX RTC"
-	depends on MFD_PM8XXX || MFD_SPMI_PMIC
+	depends on MFD_PM8XXX || MFD_SPMI_PMIC || COMPILE_TEST
 	help
 	  If you say yes here you get support for the
 	  Qualcomm PMIC8XXX RTC.
@@ -1517,7 +1551,7 @@
 
 config RTC_DRV_TEGRA
 	tristate "NVIDIA Tegra Internal RTC driver"
-	depends on ARCH_TEGRA
+	depends on ARCH_TEGRA || COMPILE_TEST
 	help
 	  If you say yes here you get support for the
 	  Tegra 200 series internal RTC module.
@@ -1603,7 +1637,7 @@
 
 config RTC_DRV_MT6397
 	tristate "Mediatek Real Time Clock driver"
-	depends on MFD_MT6397 || COMPILE_TEST
+	depends on MFD_MT6397 || (COMPILE_TEST && IRQ_DOMAIN)
 	help
 	  This selects the Mediatek(R) RTC driver. RTC is part of Mediatek
 	  MT6397 PMIC. You should enable MT6397 PMIC MFD before select
@@ -1622,6 +1656,16 @@
 	  This driver can also be built as a module, if so, the module
 	  will be called "rtc-xgene".
 
+config RTC_DRV_PIC32
+	tristate "Microchip PIC32 RTC"
+	depends on MACH_PIC32
+	default y
+	help
+	   If you say yes here you get support for the PIC32 RTC module.
+
+	   This driver can also be built as a module. If so, the module
+	   will be called rtc-pic32
+
 comment "HID Sensor RTC drivers"
 
 config RTC_DRV_HID_SENSOR_TIME
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 62d61b2..ea28337 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -28,6 +28,7 @@
 obj-$(CONFIG_RTC_DRV_ABX80X)	+= rtc-abx80x.o
 obj-$(CONFIG_RTC_DRV_ARMADA38X)	+= rtc-armada38x.o
 obj-$(CONFIG_RTC_DRV_AS3722)	+= rtc-as3722.o
+obj-$(CONFIG_RTC_DRV_ASM9260)	+= rtc-asm9260.o
 obj-$(CONFIG_RTC_DRV_AT32AP700X)+= rtc-at32ap700x.o
 obj-$(CONFIG_RTC_DRV_AT91RM9200)+= rtc-at91rm9200.o
 obj-$(CONFIG_RTC_DRV_AT91SAM9)	+= rtc-at91sam9.o
@@ -59,7 +60,6 @@
 obj-$(CONFIG_RTC_DRV_DS1742)	+= rtc-ds1742.o
 obj-$(CONFIG_RTC_DRV_DS2404)	+= rtc-ds2404.o
 obj-$(CONFIG_RTC_DRV_DS3232)	+= rtc-ds3232.o
-obj-$(CONFIG_RTC_DRV_DS3234)	+= rtc-ds3234.o
 obj-$(CONFIG_RTC_DRV_EFI)	+= rtc-efi.o
 obj-$(CONFIG_RTC_DRV_EM3027)	+= rtc-em3027.o
 obj-$(CONFIG_RTC_DRV_EP93XX)	+= rtc-ep93xx.o
@@ -86,7 +86,6 @@
 obj-$(CONFIG_RTC_DRV_MAX6900)	+= rtc-max6900.o
 obj-$(CONFIG_RTC_DRV_MAX6902)	+= rtc-max6902.o
 obj-$(CONFIG_RTC_DRV_MAX77686)	+= rtc-max77686.o
-obj-$(CONFIG_RTC_DRV_MAX77802)	+= rtc-max77802.o
 obj-$(CONFIG_RTC_DRV_MAX8907)	+= rtc-max8907.o
 obj-$(CONFIG_RTC_DRV_MAX8925)	+= rtc-max8925.o
 obj-$(CONFIG_RTC_DRV_MAX8997)	+= rtc-max8997.o
@@ -112,6 +111,7 @@
 obj-$(CONFIG_RTC_DRV_PCF8523)	+= rtc-pcf8523.o
 obj-$(CONFIG_RTC_DRV_PCF8563)	+= rtc-pcf8563.o
 obj-$(CONFIG_RTC_DRV_PCF8583)	+= rtc-pcf8583.o
+obj-$(CONFIG_RTC_DRV_PIC32)	+= rtc-pic32.o
 obj-$(CONFIG_RTC_DRV_PL030)	+= rtc-pl030.o
 obj-$(CONFIG_RTC_DRV_PL031)	+= rtc-pl031.o
 obj-$(CONFIG_RTC_DRV_PM8XXX)	+= rtc-pm8xxx.o
@@ -128,6 +128,7 @@
 obj-$(CONFIG_RTC_DRV_RV3029C2)	+= rtc-rv3029c2.o
 obj-$(CONFIG_RTC_DRV_RV8803)	+= rtc-rv8803.o
 obj-$(CONFIG_RTC_DRV_RX4581)	+= rtc-rx4581.o
+obj-$(CONFIG_RTC_DRV_RX6110)	+= rtc-rx6110.o
 obj-$(CONFIG_RTC_DRV_RX8010)	+= rtc-rx8010.o
 obj-$(CONFIG_RTC_DRV_RX8025)	+= rtc-rx8025.o
 obj-$(CONFIG_RTC_DRV_RX8581)	+= rtc-rx8581.o
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
index de86578..74fd974 100644
--- a/drivers/rtc/class.c
+++ b/drivers/rtc/class.c
@@ -361,17 +361,4 @@
 	rtc_dev_init();
 	return 0;
 }
-
-static void __exit rtc_exit(void)
-{
-	rtc_dev_exit();
-	class_destroy(rtc_class);
-	ida_destroy(&rtc_ida);
-}
-
 subsys_initcall(rtc_init);
-module_exit(rtc_exit);
-
-MODULE_AUTHOR("Alessandro Zummo <a.zummo@towertech.it>");
-MODULE_DESCRIPTION("RTC class support");
-MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index 5836751..9ef5f6f 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -939,4 +939,58 @@
 	mutex_unlock(&rtc->ops_lock);
 }
 
+/**
+ * rtc_read_offset - Read the amount of rtc offset in parts per billion
+ * @ rtc: rtc device to be used
+ * @ offset: the offset in parts per billion
+ *
+ * see below for details.
+ *
+ * Kernel interface to read rtc clock offset
+ * Returns 0 on success, or a negative number on error.
+ * If read_offset() is not implemented for the rtc, return -EINVAL
+ */
+int rtc_read_offset(struct rtc_device *rtc, long *offset)
+{
+	int ret;
 
+	if (!rtc->ops)
+		return -ENODEV;
+
+	if (!rtc->ops->read_offset)
+		return -EINVAL;
+
+	mutex_lock(&rtc->ops_lock);
+	ret = rtc->ops->read_offset(rtc->dev.parent, offset);
+	mutex_unlock(&rtc->ops_lock);
+	return ret;
+}
+
+/**
+ * rtc_set_offset - Adjusts the duration of the average second
+ * @ rtc: rtc device to be used
+ * @ offset: the offset in parts per billion
+ *
+ * Some rtc's allow an adjustment to the average duration of a second
+ * to compensate for differences in the actual clock rate due to temperature,
+ * the crystal, capacitor, etc.
+ *
+ * Kernel interface to adjust an rtc clock offset.
+ * Return 0 on success, or a negative number on error.
+ * If the rtc offset is not setable (or not implemented), return -EINVAL
+ */
+int rtc_set_offset(struct rtc_device *rtc, long offset)
+{
+	int ret;
+
+	if (!rtc->ops)
+		return -ENODEV;
+
+	if (!rtc->ops->set_offset)
+		return -EINVAL;
+
+	mutex_lock(&rtc->ops_lock);
+	ret = rtc->ops->set_offset(rtc->dev.parent, offset);
+	mutex_unlock(&rtc->ops_lock);
+	return ret;
+}
diff --git a/drivers/rtc/rtc-as3722.c b/drivers/rtc/rtc-as3722.c
index 56cc582..6ef0c88 100644
--- a/drivers/rtc/rtc-as3722.c
+++ b/drivers/rtc/rtc-as3722.c
@@ -210,7 +210,7 @@
 	dev_info(&pdev->dev, "RTC interrupt %d\n", as3722_rtc->alarm_irq);
 
 	ret = devm_request_threaded_irq(&pdev->dev, as3722_rtc->alarm_irq, NULL,
-			as3722_alarm_irq, IRQF_ONESHOT | IRQF_EARLY_RESUME,
+			as3722_alarm_irq, IRQF_ONESHOT,
 			"rtc-alarm", as3722_rtc);
 	if (ret < 0) {
 		dev_err(&pdev->dev, "Failed to request alarm IRQ %d: %d\n",
diff --git a/drivers/rtc/rtc-asm9260.c b/drivers/rtc/rtc-asm9260.c
new file mode 100644
index 0000000..14e08c4
--- /dev/null
+++ b/drivers/rtc/rtc-asm9260.c
@@ -0,0 +1,355 @@
+/*
+ * Copyright (C) 2016 Oleksij Rempel <linux@rempel-privat.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+
+/* Miscellaneous registers */
+/* Interrupt Location Register */
+#define HW_ILR			0x00
+#define BM_RTCALF		BIT(1)
+#define BM_RTCCIF		BIT(0)
+
+/* Clock Control Register */
+#define HW_CCR			0x08
+/* Calibration counter disable */
+#define BM_CCALOFF		BIT(4)
+/* Reset internal oscillator divider */
+#define BM_CTCRST		BIT(1)
+/* Clock Enable */
+#define BM_CLKEN		BIT(0)
+
+/* Counter Increment Interrupt Register */
+#define HW_CIIR			0x0C
+#define BM_CIIR_IMYEAR		BIT(7)
+#define BM_CIIR_IMMON		BIT(6)
+#define BM_CIIR_IMDOY		BIT(5)
+#define BM_CIIR_IMDOW		BIT(4)
+#define BM_CIIR_IMDOM		BIT(3)
+#define BM_CIIR_IMHOUR		BIT(2)
+#define BM_CIIR_IMMIN		BIT(1)
+#define BM_CIIR_IMSEC		BIT(0)
+
+/* Alarm Mask Register */
+#define HW_AMR			0x10
+#define BM_AMR_IMYEAR		BIT(7)
+#define BM_AMR_IMMON		BIT(6)
+#define BM_AMR_IMDOY		BIT(5)
+#define BM_AMR_IMDOW		BIT(4)
+#define BM_AMR_IMDOM		BIT(3)
+#define BM_AMR_IMHOUR		BIT(2)
+#define BM_AMR_IMMIN		BIT(1)
+#define BM_AMR_IMSEC		BIT(0)
+#define BM_AMR_OFF		0xff
+
+/* Consolidated time registers */
+#define HW_CTIME0		0x14
+#define BM_CTIME0_DOW_S		24
+#define BM_CTIME0_DOW_M		0x7
+#define BM_CTIME0_HOUR_S	16
+#define BM_CTIME0_HOUR_M	0x1f
+#define BM_CTIME0_MIN_S		8
+#define BM_CTIME0_MIN_M		0x3f
+#define BM_CTIME0_SEC_S		0
+#define BM_CTIME0_SEC_M		0x3f
+
+#define HW_CTIME1		0x18
+#define BM_CTIME1_YEAR_S	16
+#define BM_CTIME1_YEAR_M	0xfff
+#define BM_CTIME1_MON_S		8
+#define BM_CTIME1_MON_M		0xf
+#define BM_CTIME1_DOM_S		0
+#define BM_CTIME1_DOM_M		0x1f
+
+#define HW_CTIME2		0x1C
+#define BM_CTIME2_DOY_S		0
+#define BM_CTIME2_DOY_M		0xfff
+
+/* Time counter registers */
+#define HW_SEC			0x20
+#define HW_MIN			0x24
+#define HW_HOUR			0x28
+#define HW_DOM			0x2C
+#define HW_DOW			0x30
+#define HW_DOY			0x34
+#define HW_MONTH		0x38
+#define HW_YEAR			0x3C
+
+#define HW_CALIBRATION		0x40
+#define BM_CALDIR_BACK		BIT(17)
+#define BM_CALVAL_M		0x1ffff
+
+/* General purpose registers */
+#define HW_GPREG0		0x44
+#define HW_GPREG1		0x48
+#define HW_GPREG2		0x4C
+#define HW_GPREG3		0x50
+#define HW_GPREG4		0x54
+
+/* Alarm register group */
+#define HW_ALSEC		0x60
+#define HW_ALMIN		0x64
+#define HW_ALHOUR		0x68
+#define HW_ALDOM		0x6C
+#define HW_ALDOW		0x70
+#define HW_ALDOY		0x74
+#define HW_ALMON		0x78
+#define HW_ALYEAR		0x7C
+
+struct asm9260_rtc_priv {
+	struct device		*dev;
+	void __iomem		*iobase;
+	struct rtc_device	*rtc;
+	struct clk		*clk;
+	/* io lock */
+	spinlock_t		lock;
+};
+
+static irqreturn_t asm9260_rtc_irq(int irq, void *dev_id)
+{
+	struct asm9260_rtc_priv *priv = dev_id;
+	u32 isr;
+	unsigned long events = 0;
+
+	isr = ioread32(priv->iobase + HW_CIIR);
+	if (!isr)
+		return IRQ_NONE;
+
+	iowrite32(0, priv->iobase + HW_CIIR);
+
+	events |= RTC_AF | RTC_IRQF;
+
+	rtc_update_irq(priv->rtc, 1, events);
+
+	return IRQ_HANDLED;
+}
+
+static int asm9260_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+	struct asm9260_rtc_priv *priv = dev_get_drvdata(dev);
+	u32 ctime0, ctime1, ctime2;
+	unsigned long irq_flags;
+
+	spin_lock_irqsave(&priv->lock, irq_flags);
+	ctime0 = ioread32(priv->iobase + HW_CTIME0);
+	ctime1 = ioread32(priv->iobase + HW_CTIME1);
+	ctime2 = ioread32(priv->iobase + HW_CTIME2);
+
+	if (ctime1 != ioread32(priv->iobase + HW_CTIME1)) {
+		/*
+		 * woops, counter flipped right now. Now we are safe
+		 * to reread.
+		 */
+		ctime0 = ioread32(priv->iobase + HW_CTIME0);
+		ctime1 = ioread32(priv->iobase + HW_CTIME1);
+		ctime2 = ioread32(priv->iobase + HW_CTIME2);
+	}
+	spin_unlock_irqrestore(&priv->lock, irq_flags);
+
+	tm->tm_sec  = (ctime0 >> BM_CTIME0_SEC_S)  & BM_CTIME0_SEC_M;
+	tm->tm_min  = (ctime0 >> BM_CTIME0_MIN_S)  & BM_CTIME0_MIN_M;
+	tm->tm_hour = (ctime0 >> BM_CTIME0_HOUR_S) & BM_CTIME0_HOUR_M;
+	tm->tm_wday = (ctime0 >> BM_CTIME0_DOW_S)  & BM_CTIME0_DOW_M;
+
+	tm->tm_mday = (ctime1 >> BM_CTIME1_DOM_S)  & BM_CTIME1_DOM_M;
+	tm->tm_mon  = (ctime1 >> BM_CTIME1_MON_S)  & BM_CTIME1_MON_M;
+	tm->tm_year = (ctime1 >> BM_CTIME1_YEAR_S) & BM_CTIME1_YEAR_M;
+
+	tm->tm_yday = (ctime2 >> BM_CTIME2_DOY_S)  & BM_CTIME2_DOY_M;
+
+	return 0;
+}
+
+static int asm9260_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+	struct asm9260_rtc_priv *priv = dev_get_drvdata(dev);
+	unsigned long irq_flags;
+
+	spin_lock_irqsave(&priv->lock, irq_flags);
+	/*
+	 * make sure SEC counter will not flip other counter on write time,
+	 * real value will be written at the enf of sequence.
+	 */
+	iowrite32(0, priv->iobase + HW_SEC);
+
+	iowrite32(tm->tm_year, priv->iobase + HW_YEAR);
+	iowrite32(tm->tm_mon,  priv->iobase + HW_MONTH);
+	iowrite32(tm->tm_mday, priv->iobase + HW_DOM);
+	iowrite32(tm->tm_wday, priv->iobase + HW_DOW);
+	iowrite32(tm->tm_yday, priv->iobase + HW_DOY);
+	iowrite32(tm->tm_hour, priv->iobase + HW_HOUR);
+	iowrite32(tm->tm_min,  priv->iobase + HW_MIN);
+	iowrite32(tm->tm_sec,  priv->iobase + HW_SEC);
+	spin_unlock_irqrestore(&priv->lock, irq_flags);
+
+	return 0;
+}
+
+static int asm9260_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+	struct asm9260_rtc_priv *priv = dev_get_drvdata(dev);
+	unsigned long irq_flags;
+
+	spin_lock_irqsave(&priv->lock, irq_flags);
+	alrm->time.tm_year = ioread32(priv->iobase + HW_ALYEAR);
+	alrm->time.tm_mon  = ioread32(priv->iobase + HW_ALMON);
+	alrm->time.tm_mday = ioread32(priv->iobase + HW_ALDOM);
+	alrm->time.tm_wday = ioread32(priv->iobase + HW_ALDOW);
+	alrm->time.tm_yday = ioread32(priv->iobase + HW_ALDOY);
+	alrm->time.tm_hour = ioread32(priv->iobase + HW_ALHOUR);
+	alrm->time.tm_min  = ioread32(priv->iobase + HW_ALMIN);
+	alrm->time.tm_sec  = ioread32(priv->iobase + HW_ALSEC);
+
+	alrm->enabled = ioread32(priv->iobase + HW_AMR) ? 1 : 0;
+	alrm->pending = ioread32(priv->iobase + HW_CIIR) ? 1 : 0;
+	spin_unlock_irqrestore(&priv->lock, irq_flags);
+
+	return rtc_valid_tm(&alrm->time);
+}
+
+static int asm9260_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+	struct asm9260_rtc_priv *priv = dev_get_drvdata(dev);
+	unsigned long irq_flags;
+
+	spin_lock_irqsave(&priv->lock, irq_flags);
+	iowrite32(alrm->time.tm_year, priv->iobase + HW_ALYEAR);
+	iowrite32(alrm->time.tm_mon,  priv->iobase + HW_ALMON);
+	iowrite32(alrm->time.tm_mday, priv->iobase + HW_ALDOM);
+	iowrite32(alrm->time.tm_wday, priv->iobase + HW_ALDOW);
+	iowrite32(alrm->time.tm_yday, priv->iobase + HW_ALDOY);
+	iowrite32(alrm->time.tm_hour, priv->iobase + HW_ALHOUR);
+	iowrite32(alrm->time.tm_min,  priv->iobase + HW_ALMIN);
+	iowrite32(alrm->time.tm_sec,  priv->iobase + HW_ALSEC);
+
+	iowrite32(alrm->enabled ? 0 : BM_AMR_OFF, priv->iobase + HW_AMR);
+	spin_unlock_irqrestore(&priv->lock, irq_flags);
+
+	return 0;
+}
+
+static int asm9260_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+	struct asm9260_rtc_priv *priv = dev_get_drvdata(dev);
+
+	iowrite32(enabled ? 0 : BM_AMR_OFF, priv->iobase + HW_AMR);
+	return 0;
+}
+
+static const struct rtc_class_ops asm9260_rtc_ops = {
+	.read_time		= asm9260_rtc_read_time,
+	.set_time		= asm9260_rtc_set_time,
+	.read_alarm		= asm9260_rtc_read_alarm,
+	.set_alarm		= asm9260_rtc_set_alarm,
+	.alarm_irq_enable	= asm9260_alarm_irq_enable,
+};
+
+static int __init asm9260_rtc_probe(struct platform_device *pdev)
+{
+	struct asm9260_rtc_priv *priv;
+	struct device *dev = &pdev->dev;
+	struct resource	*res;
+	int irq_alarm, ret;
+	u32 ccr;
+
+	priv = devm_kzalloc(dev, sizeof(struct asm9260_rtc_priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	priv->dev = &pdev->dev;
+	platform_set_drvdata(pdev, priv);
+
+	irq_alarm = platform_get_irq(pdev, 0);
+	if (irq_alarm < 0) {
+		dev_err(dev, "No alarm IRQ resource defined\n");
+		return irq_alarm;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	priv->iobase = devm_ioremap_resource(dev, res);
+	if (IS_ERR(priv->iobase))
+		return PTR_ERR(priv->iobase);
+
+	priv->clk = devm_clk_get(dev, "ahb");
+	ret = clk_prepare_enable(priv->clk);
+	if (ret) {
+		dev_err(dev, "Failed to enable clk!\n");
+		return ret;
+	}
+
+	ccr = ioread32(priv->iobase + HW_CCR);
+	/* if dev is not enabled, reset it */
+	if ((ccr & (BM_CLKEN | BM_CTCRST)) != BM_CLKEN) {
+		iowrite32(BM_CTCRST, priv->iobase + HW_CCR);
+		ccr = 0;
+	}
+
+	iowrite32(BM_CLKEN | ccr, priv->iobase + HW_CCR);
+	iowrite32(0, priv->iobase + HW_CIIR);
+	iowrite32(BM_AMR_OFF, priv->iobase + HW_AMR);
+
+	priv->rtc = devm_rtc_device_register(dev, dev_name(dev),
+					     &asm9260_rtc_ops, THIS_MODULE);
+	if (IS_ERR(priv->rtc)) {
+		ret = PTR_ERR(priv->rtc);
+		dev_err(dev, "Failed to register RTC device: %d\n", ret);
+		goto err_return;
+	}
+
+	ret = devm_request_threaded_irq(dev, irq_alarm, NULL,
+					asm9260_rtc_irq, IRQF_ONESHOT,
+					dev_name(dev), priv);
+	if (ret < 0) {
+		dev_err(dev, "can't get irq %i, err %d\n",
+			irq_alarm, ret);
+		goto err_return;
+	}
+
+	return 0;
+
+err_return:
+	clk_disable_unprepare(priv->clk);
+	return ret;
+}
+
+static int __exit asm9260_rtc_remove(struct platform_device *pdev)
+{
+	struct asm9260_rtc_priv *priv = platform_get_drvdata(pdev);
+
+	/* Disable alarm matching */
+	iowrite32(BM_AMR_OFF, priv->iobase + HW_AMR);
+	clk_disable_unprepare(priv->clk);
+	return 0;
+}
+
+static const struct of_device_id asm9260_dt_ids[] = {
+	{ .compatible = "alphascale,asm9260-rtc", },
+	{}
+};
+
+static struct platform_driver asm9260_rtc_driver = {
+	.probe		= asm9260_rtc_probe,
+	.remove		= asm9260_rtc_remove,
+	.driver		= {
+		.name	= "asm9260-rtc",
+		.owner	= THIS_MODULE,
+		.of_match_table = asm9260_dt_ids,
+	},
+};
+
+module_platform_driver(asm9260_rtc_driver);
+
+MODULE_AUTHOR("Oleksij Rempel <linux@rempel-privat.de>");
+MODULE_DESCRIPTION("Alphascale asm9260 SoC Realtime Clock Driver (RTC)");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-ds1305.c b/drivers/rtc/rtc-ds1305.c
index f39691e..8e41c46 100644
--- a/drivers/rtc/rtc-ds1305.c
+++ b/drivers/rtc/rtc-ds1305.c
@@ -532,7 +532,7 @@
 	struct spi_transfer	x[2];
 	int			status;
 
-	spi = container_of(kobj, struct spi_device, dev.kobj);
+	spi = to_spi_device(kobj_to_dev(kobj));
 
 	addr = DS1305_NVRAM + off;
 	msg_init(&m, x, &addr, count, NULL, buf);
@@ -554,7 +554,7 @@
 	struct spi_transfer	x[2];
 	int			status;
 
-	spi = container_of(kobj, struct spi_device, dev.kobj);
+	spi = to_spi_device(kobj_to_dev(kobj));
 
 	addr = (DS1305_WRITE | DS1305_NVRAM) + off;
 	msg_init(&m, x, &addr, count, buf, NULL);
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index cf685f6..b2156ee 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -19,6 +19,9 @@
 #include <linux/rtc.h>
 #include <linux/slab.h>
 #include <linux/string.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/clk-provider.h>
 
 /*
  * We can't determine type by probing, but if we expect pre-Linux code
@@ -89,6 +92,7 @@
 #	define DS1340_BIT_OSF		0x80
 #define DS1337_REG_STATUS	0x0f
 #	define DS1337_BIT_OSF		0x80
+#	define DS3231_BIT_EN32KHZ	0x08
 #	define DS1337_BIT_A2I		0x02
 #	define DS1337_BIT_A1I		0x01
 #define DS1339_REG_ALARM1_SECS	0x07
@@ -118,6 +122,9 @@
 			       u8 length, u8 *values);
 	s32 (*write_block_data)(const struct i2c_client *client, u8 command,
 				u8 length, const u8 *values);
+#ifdef CONFIG_COMMON_CLK
+	struct clk_hw		clks[2];
+#endif
 };
 
 struct chip_desc {
@@ -842,6 +849,378 @@
 	return;
 }
 
+/*----------------------------------------------------------------------*/
+
+#ifdef CONFIG_RTC_DRV_DS1307_HWMON
+
+/*
+ * Temperature sensor support for ds3231 devices.
+ */
+
+#define DS3231_REG_TEMPERATURE	0x11
+
+/*
+ * A user-initiated temperature conversion is not started by this function,
+ * so the temperature is updated once every 64 seconds.
+ */
+static int ds3231_hwmon_read_temp(struct device *dev, s16 *mC)
+{
+	struct ds1307 *ds1307 = dev_get_drvdata(dev);
+	u8 temp_buf[2];
+	s16 temp;
+	int ret;
+
+	ret = ds1307->read_block_data(ds1307->client, DS3231_REG_TEMPERATURE,
+					sizeof(temp_buf), temp_buf);
+	if (ret < 0)
+		return ret;
+	if (ret != sizeof(temp_buf))
+		return -EIO;
+
+	/*
+	 * Temperature is represented as a 10-bit code with a resolution of
+	 * 0.25 degree celsius and encoded in two's complement format.
+	 */
+	temp = (temp_buf[0] << 8) | temp_buf[1];
+	temp >>= 6;
+	*mC = temp * 250;
+
+	return 0;
+}
+
+static ssize_t ds3231_hwmon_show_temp(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	int ret;
+	s16 temp;
+
+	ret = ds3231_hwmon_read_temp(dev, &temp);
+	if (ret)
+		return ret;
+
+	return sprintf(buf, "%d\n", temp);
+}
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, ds3231_hwmon_show_temp,
+			NULL, 0);
+
+static struct attribute *ds3231_hwmon_attrs[] = {
+	&sensor_dev_attr_temp1_input.dev_attr.attr,
+	NULL,
+};
+ATTRIBUTE_GROUPS(ds3231_hwmon);
+
+static void ds1307_hwmon_register(struct ds1307 *ds1307)
+{
+	struct device *dev;
+
+	if (ds1307->type != ds_3231)
+		return;
+
+	dev = devm_hwmon_device_register_with_groups(&ds1307->client->dev,
+						ds1307->client->name,
+						ds1307, ds3231_hwmon_groups);
+	if (IS_ERR(dev)) {
+		dev_warn(&ds1307->client->dev,
+			"unable to register hwmon device %ld\n", PTR_ERR(dev));
+	}
+}
+
+#else
+
+static void ds1307_hwmon_register(struct ds1307 *ds1307)
+{
+}
+
+#endif /* CONFIG_RTC_DRV_DS1307_HWMON */
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * Square-wave output support for DS3231
+ * Datasheet: https://datasheets.maximintegrated.com/en/ds/DS3231.pdf
+ */
+#ifdef CONFIG_COMMON_CLK
+
+enum {
+	DS3231_CLK_SQW = 0,
+	DS3231_CLK_32KHZ,
+};
+
+#define clk_sqw_to_ds1307(clk)	\
+	container_of(clk, struct ds1307, clks[DS3231_CLK_SQW])
+#define clk_32khz_to_ds1307(clk)	\
+	container_of(clk, struct ds1307, clks[DS3231_CLK_32KHZ])
+
+static int ds3231_clk_sqw_rates[] = {
+	1,
+	1024,
+	4096,
+	8192,
+};
+
+static int ds1337_write_control(struct ds1307 *ds1307, u8 mask, u8 value)
+{
+	struct i2c_client *client = ds1307->client;
+	struct mutex *lock = &ds1307->rtc->ops_lock;
+	int control;
+	int ret;
+
+	mutex_lock(lock);
+
+	control = i2c_smbus_read_byte_data(client, DS1337_REG_CONTROL);
+	if (control < 0) {
+		ret = control;
+		goto out;
+	}
+
+	control &= ~mask;
+	control |= value;
+
+	ret = i2c_smbus_write_byte_data(client, DS1337_REG_CONTROL, control);
+out:
+	mutex_unlock(lock);
+
+	return ret;
+}
+
+static unsigned long ds3231_clk_sqw_recalc_rate(struct clk_hw *hw,
+						unsigned long parent_rate)
+{
+	struct ds1307 *ds1307 = clk_sqw_to_ds1307(hw);
+	int control;
+	int rate_sel = 0;
+
+	control = i2c_smbus_read_byte_data(ds1307->client, DS1337_REG_CONTROL);
+	if (control < 0)
+		return control;
+	if (control & DS1337_BIT_RS1)
+		rate_sel += 1;
+	if (control & DS1337_BIT_RS2)
+		rate_sel += 2;
+
+	return ds3231_clk_sqw_rates[rate_sel];
+}
+
+static long ds3231_clk_sqw_round_rate(struct clk_hw *hw, unsigned long rate,
+					unsigned long *prate)
+{
+	int i;
+
+	for (i = ARRAY_SIZE(ds3231_clk_sqw_rates) - 1; i >= 0; i--) {
+		if (ds3231_clk_sqw_rates[i] <= rate)
+			return ds3231_clk_sqw_rates[i];
+	}
+
+	return 0;
+}
+
+static int ds3231_clk_sqw_set_rate(struct clk_hw *hw, unsigned long rate,
+					unsigned long parent_rate)
+{
+	struct ds1307 *ds1307 = clk_sqw_to_ds1307(hw);
+	int control = 0;
+	int rate_sel;
+
+	for (rate_sel = 0; rate_sel < ARRAY_SIZE(ds3231_clk_sqw_rates);
+			rate_sel++) {
+		if (ds3231_clk_sqw_rates[rate_sel] == rate)
+			break;
+	}
+
+	if (rate_sel == ARRAY_SIZE(ds3231_clk_sqw_rates))
+		return -EINVAL;
+
+	if (rate_sel & 1)
+		control |= DS1337_BIT_RS1;
+	if (rate_sel & 2)
+		control |= DS1337_BIT_RS2;
+
+	return ds1337_write_control(ds1307, DS1337_BIT_RS1 | DS1337_BIT_RS2,
+				control);
+}
+
+static int ds3231_clk_sqw_prepare(struct clk_hw *hw)
+{
+	struct ds1307 *ds1307 = clk_sqw_to_ds1307(hw);
+
+	return ds1337_write_control(ds1307, DS1337_BIT_INTCN, 0);
+}
+
+static void ds3231_clk_sqw_unprepare(struct clk_hw *hw)
+{
+	struct ds1307 *ds1307 = clk_sqw_to_ds1307(hw);
+
+	ds1337_write_control(ds1307, DS1337_BIT_INTCN, DS1337_BIT_INTCN);
+}
+
+static int ds3231_clk_sqw_is_prepared(struct clk_hw *hw)
+{
+	struct ds1307 *ds1307 = clk_sqw_to_ds1307(hw);
+	int control;
+
+	control = i2c_smbus_read_byte_data(ds1307->client, DS1337_REG_CONTROL);
+	if (control < 0)
+		return control;
+
+	return !(control & DS1337_BIT_INTCN);
+}
+
+static const struct clk_ops ds3231_clk_sqw_ops = {
+	.prepare = ds3231_clk_sqw_prepare,
+	.unprepare = ds3231_clk_sqw_unprepare,
+	.is_prepared = ds3231_clk_sqw_is_prepared,
+	.recalc_rate = ds3231_clk_sqw_recalc_rate,
+	.round_rate = ds3231_clk_sqw_round_rate,
+	.set_rate = ds3231_clk_sqw_set_rate,
+};
+
+static unsigned long ds3231_clk_32khz_recalc_rate(struct clk_hw *hw,
+						unsigned long parent_rate)
+{
+	return 32768;
+}
+
+static int ds3231_clk_32khz_control(struct ds1307 *ds1307, bool enable)
+{
+	struct i2c_client *client = ds1307->client;
+	struct mutex *lock = &ds1307->rtc->ops_lock;
+	int status;
+	int ret;
+
+	mutex_lock(lock);
+
+	status = i2c_smbus_read_byte_data(client, DS1337_REG_STATUS);
+	if (status < 0) {
+		ret = status;
+		goto out;
+	}
+
+	if (enable)
+		status |= DS3231_BIT_EN32KHZ;
+	else
+		status &= ~DS3231_BIT_EN32KHZ;
+
+	ret = i2c_smbus_write_byte_data(client, DS1337_REG_STATUS, status);
+out:
+	mutex_unlock(lock);
+
+	return ret;
+}
+
+static int ds3231_clk_32khz_prepare(struct clk_hw *hw)
+{
+	struct ds1307 *ds1307 = clk_32khz_to_ds1307(hw);
+
+	return ds3231_clk_32khz_control(ds1307, true);
+}
+
+static void ds3231_clk_32khz_unprepare(struct clk_hw *hw)
+{
+	struct ds1307 *ds1307 = clk_32khz_to_ds1307(hw);
+
+	ds3231_clk_32khz_control(ds1307, false);
+}
+
+static int ds3231_clk_32khz_is_prepared(struct clk_hw *hw)
+{
+	struct ds1307 *ds1307 = clk_32khz_to_ds1307(hw);
+	int status;
+
+	status = i2c_smbus_read_byte_data(ds1307->client, DS1337_REG_STATUS);
+	if (status < 0)
+		return status;
+
+	return !!(status & DS3231_BIT_EN32KHZ);
+}
+
+static const struct clk_ops ds3231_clk_32khz_ops = {
+	.prepare = ds3231_clk_32khz_prepare,
+	.unprepare = ds3231_clk_32khz_unprepare,
+	.is_prepared = ds3231_clk_32khz_is_prepared,
+	.recalc_rate = ds3231_clk_32khz_recalc_rate,
+};
+
+static struct clk_init_data ds3231_clks_init[] = {
+	[DS3231_CLK_SQW] = {
+		.name = "ds3231_clk_sqw",
+		.ops = &ds3231_clk_sqw_ops,
+		.flags = CLK_IS_ROOT,
+	},
+	[DS3231_CLK_32KHZ] = {
+		.name = "ds3231_clk_32khz",
+		.ops = &ds3231_clk_32khz_ops,
+		.flags = CLK_IS_ROOT,
+	},
+};
+
+static int ds3231_clks_register(struct ds1307 *ds1307)
+{
+	struct i2c_client *client = ds1307->client;
+	struct device_node *node = client->dev.of_node;
+	struct clk_onecell_data	*onecell;
+	int i;
+
+	onecell = devm_kzalloc(&client->dev, sizeof(*onecell), GFP_KERNEL);
+	if (!onecell)
+		return -ENOMEM;
+
+	onecell->clk_num = ARRAY_SIZE(ds3231_clks_init);
+	onecell->clks = devm_kcalloc(&client->dev, onecell->clk_num,
+					sizeof(onecell->clks[0]), GFP_KERNEL);
+	if (!onecell->clks)
+		return -ENOMEM;
+
+	for (i = 0; i < ARRAY_SIZE(ds3231_clks_init); i++) {
+		struct clk_init_data init = ds3231_clks_init[i];
+
+		/*
+		 * Interrupt signal due to alarm conditions and square-wave
+		 * output share same pin, so don't initialize both.
+		 */
+		if (i == DS3231_CLK_SQW && test_bit(HAS_ALARM, &ds1307->flags))
+			continue;
+
+		/* optional override of the clockname */
+		of_property_read_string_index(node, "clock-output-names", i,
+						&init.name);
+		ds1307->clks[i].init = &init;
+
+		onecell->clks[i] = devm_clk_register(&client->dev,
+							&ds1307->clks[i]);
+		if (IS_ERR(onecell->clks[i]))
+			return PTR_ERR(onecell->clks[i]);
+	}
+
+	if (!node)
+		return 0;
+
+	of_clk_add_provider(node, of_clk_src_onecell_get, onecell);
+
+	return 0;
+}
+
+static void ds1307_clks_register(struct ds1307 *ds1307)
+{
+	int ret;
+
+	if (ds1307->type != ds_3231)
+		return;
+
+	ret = ds3231_clks_register(ds1307);
+	if (ret) {
+		dev_warn(&ds1307->client->dev,
+			"unable to register clock device %d\n", ret);
+	}
+}
+
+#else
+
+static void ds1307_clks_register(struct ds1307 *ds1307)
+{
+}
+
+#endif /* CONFIG_COMMON_CLK */
+
 static int ds1307_probe(struct i2c_client *client,
 			const struct i2c_device_id *id)
 {
@@ -851,6 +1230,7 @@
 	struct chip_desc	*chip = &chips[id->driver_data];
 	struct i2c_adapter	*adapter = to_i2c_adapter(client->dev.parent);
 	bool			want_irq = false;
+	bool			ds1307_can_wakeup_device = false;
 	unsigned char		*buf;
 	struct ds1307_platform_data *pdata = dev_get_platdata(&client->dev);
 	irq_handler_t	irq_handler = ds1307_irq;
@@ -898,6 +1278,20 @@
 		ds1307->write_block_data = ds1307_write_block_data;
 	}
 
+#ifdef CONFIG_OF
+/*
+ * For devices with no IRQ directly connected to the SoC, the RTC chip
+ * can be forced as a wakeup source by stating that explicitly in
+ * the device's .dts file using the "wakeup-source" boolean property.
+ * If the "wakeup-source" property is set, don't request an IRQ.
+ * This will guarantee the 'wakealarm' sysfs entry is available on the device,
+ * if supported by the RTC.
+ */
+	if (of_property_read_bool(client->dev.of_node, "wakeup-source")) {
+		ds1307_can_wakeup_device = true;
+	}
+#endif
+
 	switch (ds1307->type) {
 	case ds_1337:
 	case ds_1339:
@@ -916,11 +1310,13 @@
 			ds1307->regs[0] &= ~DS1337_BIT_nEOSC;
 
 		/*
-		 * Using IRQ?  Disable the square wave and both alarms.
+		 * Using IRQ or defined as wakeup-source?
+		 * Disable the square wave and both alarms.
 		 * For some variants, be sure alarms can trigger when we're
 		 * running on Vbackup (BBSQI/BBSQW)
 		 */
-		if (ds1307->client->irq > 0 && chip->alarm) {
+		if (chip->alarm && (ds1307->client->irq > 0 ||
+						ds1307_can_wakeup_device)) {
 			ds1307->regs[0] |= DS1337_BIT_INTCN
 					| bbsqi_bitpos[ds1307->type];
 			ds1307->regs[0] &= ~(DS1337_BIT_A2IE | DS1337_BIT_A1IE);
@@ -1135,6 +1531,14 @@
 		return PTR_ERR(ds1307->rtc);
 	}
 
+	if (ds1307_can_wakeup_device) {
+		/* Disable request for an IRQ */
+		want_irq = false;
+		dev_info(&client->dev, "'wakeup-source' is set, request for an IRQ is disabled!\n");
+		/* We cannot support UIE mode if we do not have an IRQ line */
+		ds1307->rtc->uie_unsupported = 1;
+	}
+
 	if (want_irq) {
 		err = devm_request_threaded_irq(&client->dev,
 						client->irq, NULL, irq_handler,
@@ -1182,6 +1586,9 @@
 		}
 	}
 
+	ds1307_hwmon_register(ds1307);
+	ds1307_clks_register(ds1307);
+
 	return 0;
 
 exit:
diff --git a/drivers/rtc/rtc-ds1685.c b/drivers/rtc/rtc-ds1685.c
index 535050f..1e6cfc8 100644
--- a/drivers/rtc/rtc-ds1685.c
+++ b/drivers/rtc/rtc-ds1685.c
@@ -187,9 +187,9 @@
  * Only use this where you are certain another lock will not be held.
  */
 static inline void
-ds1685_rtc_begin_ctrl_access(struct ds1685_priv *rtc, unsigned long flags)
+ds1685_rtc_begin_ctrl_access(struct ds1685_priv *rtc, unsigned long *flags)
 {
-	spin_lock_irqsave(&rtc->lock, flags);
+	spin_lock_irqsave(&rtc->lock, *flags);
 	ds1685_rtc_switch_to_bank1(rtc);
 }
 
@@ -1300,7 +1300,7 @@
 {
 	struct ds1685_priv *rtc = dev_get_drvdata(dev);
 	u8 reg = 0, bit = 0, tmp;
-	unsigned long flags = 0;
+	unsigned long flags;
 	long int val = 0;
 	const struct ds1685_rtc_ctrl_regs *reg_info =
 		ds1685_rtc_sysfs_ctrl_regs_lookup(attr->attr.name);
@@ -1321,7 +1321,7 @@
 	bit = reg_info->bit;
 
 	/* Safe to spinlock during a write. */
-	ds1685_rtc_begin_ctrl_access(rtc, flags);
+	ds1685_rtc_begin_ctrl_access(rtc, &flags);
 	tmp = rtc->read(rtc, reg);
 	rtc->write(rtc, reg, (val ? (tmp | bit) : (tmp & ~(bit))));
 	ds1685_rtc_end_ctrl_access(rtc, flags);
@@ -2161,6 +2161,7 @@
 	/* Check for valid RTC data, else, spin forever. */
 	if (unlikely(!pdev)) {
 		pr_emerg("platform device data not available, spinning forever ...\n");
+		while(1);
 		unreachable();
 	} else {
 		/* Get the rtc data. */
diff --git a/drivers/rtc/rtc-ds3232.c b/drivers/rtc/rtc-ds3232.c
index 4e99ace..7edc889 100644
--- a/drivers/rtc/rtc-ds3232.c
+++ b/drivers/rtc/rtc-ds3232.c
@@ -1,19 +1,15 @@
 /*
- * RTC client/driver for the Maxim/Dallas DS3232 Real-Time Clock over I2C
+ * RTC client/driver for the Maxim/Dallas DS3232/DS3234 Real-Time Clock
  *
  * Copyright (C) 2009-2011 Freescale Semiconductor.
  * Author: Jack Lan <jack.lan@freescale.com>
+ * Copyright (C) 2008 MIMOMax Wireless Ltd.
  *
  * This program is free software; you can redistribute  it and/or modify it
  * under  the terms of  the GNU General  Public License as published by the
  * Free Software Foundation;  either version 2 of the  License, or (at your
  * option) any later version.
  */
-/*
- * It would be more efficient to use i2c msgs/i2c_transfer directly but, as
- * recommened in .../Documentation/i2c/writing-clients section
- * "Sending and receiving", using SMBus level communication is preferred.
- */
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
@@ -21,10 +17,11 @@
 #include <linux/module.h>
 #include <linux/interrupt.h>
 #include <linux/i2c.h>
+#include <linux/spi/spi.h>
 #include <linux/rtc.h>
 #include <linux/bcd.h>
-#include <linux/workqueue.h>
 #include <linux/slab.h>
+#include <linux/regmap.h>
 
 #define DS3232_REG_SECONDS	0x00
 #define DS3232_REG_MINUTES	0x01
@@ -50,39 +47,33 @@
 #       define DS3232_REG_SR_A1F   0x01
 
 struct ds3232 {
-	struct i2c_client *client;
+	struct device *dev;
+	struct regmap *regmap;
+	int irq;
 	struct rtc_device *rtc;
-	struct work_struct work;
 
-	/* The mutex protects alarm operations, and prevents a race
-	 * between the enable_irq() in the workqueue and the free_irq()
-	 * in the remove function.
-	 */
-	struct mutex mutex;
 	bool suspended;
-	int exiting;
 };
 
-static struct i2c_driver ds3232_driver;
-
-static int ds3232_check_rtc_status(struct i2c_client *client)
+static int ds3232_check_rtc_status(struct device *dev)
 {
+	struct ds3232 *ds3232 = dev_get_drvdata(dev);
 	int ret = 0;
 	int control, stat;
 
-	stat = i2c_smbus_read_byte_data(client, DS3232_REG_SR);
-	if (stat < 0)
-		return stat;
+	ret = regmap_read(ds3232->regmap, DS3232_REG_SR, &stat);
+	if (ret)
+		return ret;
 
 	if (stat & DS3232_REG_SR_OSF)
-		dev_warn(&client->dev,
+		dev_warn(dev,
 				"oscillator discontinuity flagged, "
 				"time unreliable\n");
 
 	stat &= ~(DS3232_REG_SR_OSF | DS3232_REG_SR_A1F | DS3232_REG_SR_A2F);
 
-	ret = i2c_smbus_write_byte_data(client, DS3232_REG_SR, stat);
-	if (ret < 0)
+	ret = regmap_write(ds3232->regmap, DS3232_REG_SR, stat);
+	if (ret)
 		return ret;
 
 	/* If the alarm is pending, clear it before requesting
@@ -90,31 +81,28 @@
 	 * before everything is initialized.
 	 */
 
-	control = i2c_smbus_read_byte_data(client, DS3232_REG_CR);
-	if (control < 0)
-		return control;
+	ret = regmap_read(ds3232->regmap, DS3232_REG_CR, &control);
+	if (ret)
+		return ret;
 
 	control &= ~(DS3232_REG_CR_A1IE | DS3232_REG_CR_A2IE);
 	control |= DS3232_REG_CR_INTCN;
 
-	return i2c_smbus_write_byte_data(client, DS3232_REG_CR, control);
+	return regmap_write(ds3232->regmap, DS3232_REG_CR, control);
 }
 
 static int ds3232_read_time(struct device *dev, struct rtc_time *time)
 {
-	struct i2c_client *client = to_i2c_client(dev);
+	struct ds3232 *ds3232 = dev_get_drvdata(dev);
 	int ret;
 	u8 buf[7];
 	unsigned int year, month, day, hour, minute, second;
 	unsigned int week, twelve_hr, am_pm;
 	unsigned int century, add_century = 0;
 
-	ret = i2c_smbus_read_i2c_block_data(client, DS3232_REG_SECONDS, 7, buf);
-
-	if (ret < 0)
+	ret = regmap_bulk_read(ds3232->regmap, DS3232_REG_SECONDS, buf, 7);
+	if (ret)
 		return ret;
-	if (ret < 7)
-		return -EIO;
 
 	second = buf[0];
 	minute = buf[1];
@@ -159,7 +147,7 @@
 
 static int ds3232_set_time(struct device *dev, struct rtc_time *time)
 {
-	struct i2c_client *client = to_i2c_client(dev);
+	struct ds3232 *ds3232 = dev_get_drvdata(dev);
 	u8 buf[7];
 
 	/* Extract time from rtc_time and load into ds3232*/
@@ -179,8 +167,7 @@
 		buf[6] = bin2bcd(time->tm_year);
 	}
 
-	return i2c_smbus_write_i2c_block_data(client,
-					      DS3232_REG_SECONDS, 7, buf);
+	return regmap_bulk_write(ds3232->regmap, DS3232_REG_SECONDS, buf, 7);
 }
 
 /*
@@ -190,24 +177,19 @@
  */
 static int ds3232_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
 {
-	struct i2c_client *client = to_i2c_client(dev);
-	struct ds3232 *ds3232 = i2c_get_clientdata(client);
+	struct ds3232 *ds3232 = dev_get_drvdata(dev);
 	int control, stat;
 	int ret;
 	u8 buf[4];
 
-	mutex_lock(&ds3232->mutex);
-
-	ret = i2c_smbus_read_byte_data(client, DS3232_REG_SR);
-	if (ret < 0)
+	ret = regmap_read(ds3232->regmap, DS3232_REG_SR, &stat);
+	if (ret)
 		goto out;
-	stat = ret;
-	ret = i2c_smbus_read_byte_data(client, DS3232_REG_CR);
-	if (ret < 0)
+	ret = regmap_read(ds3232->regmap, DS3232_REG_CR, &control);
+	if (ret)
 		goto out;
-	control = ret;
-	ret = i2c_smbus_read_i2c_block_data(client, DS3232_REG_ALARM1, 4, buf);
-	if (ret < 0)
+	ret = regmap_bulk_read(ds3232->regmap, DS3232_REG_ALARM1, buf, 4);
+	if (ret)
 		goto out;
 
 	alarm->time.tm_sec = bcd2bin(buf[0] & 0x7F);
@@ -226,7 +208,6 @@
 
 	ret = 0;
 out:
-	mutex_unlock(&ds3232->mutex);
 	return ret;
 }
 
@@ -236,166 +217,129 @@
  */
 static int ds3232_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
 {
-	struct i2c_client *client = to_i2c_client(dev);
-	struct ds3232 *ds3232 = i2c_get_clientdata(client);
+	struct ds3232 *ds3232 = dev_get_drvdata(dev);
 	int control, stat;
 	int ret;
 	u8 buf[4];
 
-	if (client->irq <= 0)
+	if (ds3232->irq <= 0)
 		return -EINVAL;
 
-	mutex_lock(&ds3232->mutex);
-
 	buf[0] = bin2bcd(alarm->time.tm_sec);
 	buf[1] = bin2bcd(alarm->time.tm_min);
 	buf[2] = bin2bcd(alarm->time.tm_hour);
 	buf[3] = bin2bcd(alarm->time.tm_mday);
 
 	/* clear alarm interrupt enable bit */
-	ret = i2c_smbus_read_byte_data(client, DS3232_REG_CR);
-	if (ret < 0)
+	ret = regmap_read(ds3232->regmap, DS3232_REG_CR, &control);
+	if (ret)
 		goto out;
-	control = ret;
 	control &= ~(DS3232_REG_CR_A1IE | DS3232_REG_CR_A2IE);
-	ret = i2c_smbus_write_byte_data(client, DS3232_REG_CR, control);
-	if (ret < 0)
+	ret = regmap_write(ds3232->regmap, DS3232_REG_CR, control);
+	if (ret)
 		goto out;
 
 	/* clear any pending alarm flag */
-	ret = i2c_smbus_read_byte_data(client, DS3232_REG_SR);
-	if (ret < 0)
+	ret = regmap_read(ds3232->regmap, DS3232_REG_SR, &stat);
+	if (ret)
 		goto out;
-	stat = ret;
 	stat &= ~(DS3232_REG_SR_A1F | DS3232_REG_SR_A2F);
-	ret = i2c_smbus_write_byte_data(client, DS3232_REG_SR, stat);
-	if (ret < 0)
+	ret = regmap_write(ds3232->regmap, DS3232_REG_SR, stat);
+	if (ret)
 		goto out;
 
-	ret = i2c_smbus_write_i2c_block_data(client, DS3232_REG_ALARM1, 4, buf);
+	ret = regmap_bulk_write(ds3232->regmap, DS3232_REG_ALARM1, buf, 4);
+	if (ret)
+		goto out;
 
 	if (alarm->enabled) {
 		control |= DS3232_REG_CR_A1IE;
-		ret = i2c_smbus_write_byte_data(client, DS3232_REG_CR, control);
+		ret = regmap_write(ds3232->regmap, DS3232_REG_CR, control);
 	}
 out:
-	mutex_unlock(&ds3232->mutex);
 	return ret;
 }
 
-static void ds3232_update_alarm(struct i2c_client *client)
+static int ds3232_update_alarm(struct device *dev, unsigned int enabled)
 {
-	struct ds3232 *ds3232 = i2c_get_clientdata(client);
+	struct ds3232 *ds3232 = dev_get_drvdata(dev);
 	int control;
 	int ret;
-	u8 buf[4];
 
-	mutex_lock(&ds3232->mutex);
+	ret = regmap_read(ds3232->regmap, DS3232_REG_CR, &control);
+	if (ret)
+		return ret;
 
-	ret = i2c_smbus_read_i2c_block_data(client, DS3232_REG_ALARM1, 4, buf);
-	if (ret < 0)
-		goto unlock;
-
-	buf[0] = bcd2bin(buf[0]) < 0 || (ds3232->rtc->irq_data & RTC_UF) ?
-								0x80 : buf[0];
-	buf[1] = bcd2bin(buf[1]) < 0 || (ds3232->rtc->irq_data & RTC_UF) ?
-								0x80 : buf[1];
-	buf[2] = bcd2bin(buf[2]) < 0 || (ds3232->rtc->irq_data & RTC_UF) ?
-								0x80 : buf[2];
-	buf[3] = bcd2bin(buf[3]) < 0 || (ds3232->rtc->irq_data & RTC_UF) ?
-								0x80 : buf[3];
-
-	ret = i2c_smbus_write_i2c_block_data(client, DS3232_REG_ALARM1, 4, buf);
-	if (ret < 0)
-		goto unlock;
-
-	control = i2c_smbus_read_byte_data(client, DS3232_REG_CR);
-	if (control < 0)
-		goto unlock;
-
-	if (ds3232->rtc->irq_data & (RTC_AF | RTC_UF))
+	if (enabled)
 		/* enable alarm1 interrupt */
 		control |= DS3232_REG_CR_A1IE;
 	else
 		/* disable alarm1 interrupt */
 		control &= ~(DS3232_REG_CR_A1IE);
-	i2c_smbus_write_byte_data(client, DS3232_REG_CR, control);
+	ret = regmap_write(ds3232->regmap, DS3232_REG_CR, control);
 
-unlock:
-	mutex_unlock(&ds3232->mutex);
+	return ret;
 }
 
 static int ds3232_alarm_irq_enable(struct device *dev, unsigned int enabled)
 {
-	struct i2c_client *client = to_i2c_client(dev);
-	struct ds3232 *ds3232 = i2c_get_clientdata(client);
+	struct ds3232 *ds3232 = dev_get_drvdata(dev);
 
-	if (client->irq <= 0)
+	if (ds3232->irq <= 0)
 		return -EINVAL;
 
-	if (enabled)
-		ds3232->rtc->irq_data |= RTC_AF;
-	else
-		ds3232->rtc->irq_data &= ~RTC_AF;
-
-	ds3232_update_alarm(client);
-	return 0;
+	return ds3232_update_alarm(dev, enabled);
 }
 
 static irqreturn_t ds3232_irq(int irq, void *dev_id)
 {
-	struct i2c_client *client = dev_id;
-	struct ds3232 *ds3232 = i2c_get_clientdata(client);
-
-	disable_irq_nosync(irq);
-
-	/*
-	 * If rtc as a wakeup source, can't schedule the work
-	 * at system resume flow, because at this time the i2c bus
-	 * has not been resumed.
-	 */
-	if (!ds3232->suspended)
-		schedule_work(&ds3232->work);
-
-	return IRQ_HANDLED;
-}
-
-static void ds3232_work(struct work_struct *work)
-{
-	struct ds3232 *ds3232 = container_of(work, struct ds3232, work);
-	struct i2c_client *client = ds3232->client;
+	struct device *dev = dev_id;
+	struct ds3232 *ds3232 = dev_get_drvdata(dev);
+	struct mutex *lock = &ds3232->rtc->ops_lock;
+	int ret;
 	int stat, control;
 
-	mutex_lock(&ds3232->mutex);
+	mutex_lock(lock);
 
-	stat = i2c_smbus_read_byte_data(client, DS3232_REG_SR);
-	if (stat < 0)
+	ret = regmap_read(ds3232->regmap, DS3232_REG_SR, &stat);
+	if (ret)
 		goto unlock;
 
 	if (stat & DS3232_REG_SR_A1F) {
-		control = i2c_smbus_read_byte_data(client, DS3232_REG_CR);
-		if (control < 0) {
-			pr_warn("Read Control Register error - Disable IRQ%d\n",
-				client->irq);
+		ret = regmap_read(ds3232->regmap, DS3232_REG_CR, &control);
+		if (ret) {
+			dev_warn(ds3232->dev,
+				 "Read Control Register error %d\n", ret);
 		} else {
 			/* disable alarm1 interrupt */
 			control &= ~(DS3232_REG_CR_A1IE);
-			i2c_smbus_write_byte_data(client, DS3232_REG_CR,
-						control);
+			ret = regmap_write(ds3232->regmap, DS3232_REG_CR,
+					   control);
+			if (ret) {
+				dev_warn(ds3232->dev,
+					 "Write Control Register error %d\n",
+					 ret);
+				goto unlock;
+			}
 
 			/* clear the alarm pend flag */
 			stat &= ~DS3232_REG_SR_A1F;
-			i2c_smbus_write_byte_data(client, DS3232_REG_SR, stat);
+			ret = regmap_write(ds3232->regmap, DS3232_REG_SR, stat);
+			if (ret) {
+				dev_warn(ds3232->dev,
+					 "Write Status Register error %d\n",
+					 ret);
+				goto unlock;
+			}
 
 			rtc_update_irq(ds3232->rtc, 1, RTC_AF | RTC_IRQF);
-
-			if (!ds3232->exiting)
-				enable_irq(client->irq);
 		}
 	}
 
 unlock:
-	mutex_unlock(&ds3232->mutex);
+	mutex_unlock(lock);
+
+	return IRQ_HANDLED;
 }
 
 static const struct rtc_class_ops ds3232_rtc_ops = {
@@ -406,67 +350,50 @@
 	.alarm_irq_enable = ds3232_alarm_irq_enable,
 };
 
-static int ds3232_probe(struct i2c_client *client,
-			const struct i2c_device_id *id)
+static int ds3232_probe(struct device *dev, struct regmap *regmap, int irq,
+			const char *name)
 {
 	struct ds3232 *ds3232;
 	int ret;
 
-	ds3232 = devm_kzalloc(&client->dev, sizeof(struct ds3232), GFP_KERNEL);
+	ds3232 = devm_kzalloc(dev, sizeof(*ds3232), GFP_KERNEL);
 	if (!ds3232)
 		return -ENOMEM;
 
-	ds3232->client = client;
-	i2c_set_clientdata(client, ds3232);
+	ds3232->regmap = regmap;
+	ds3232->irq = irq;
+	ds3232->dev = dev;
+	dev_set_drvdata(dev, ds3232);
 
-	INIT_WORK(&ds3232->work, ds3232_work);
-	mutex_init(&ds3232->mutex);
-
-	ret = ds3232_check_rtc_status(client);
+	ret = ds3232_check_rtc_status(dev);
 	if (ret)
 		return ret;
 
-	if (client->irq > 0) {
-		ret = devm_request_irq(&client->dev, client->irq, ds3232_irq,
-				       IRQF_SHARED, "ds3232", client);
+	if (ds3232->irq > 0) {
+		ret = devm_request_threaded_irq(dev, ds3232->irq, NULL,
+						ds3232_irq,
+						IRQF_SHARED | IRQF_ONESHOT,
+						name, dev);
 		if (ret) {
-			dev_err(&client->dev, "unable to request IRQ\n");
-		}
-		device_init_wakeup(&client->dev, 1);
+			ds3232->irq = 0;
+			dev_err(dev, "unable to request IRQ\n");
+		} else
+			device_init_wakeup(dev, 1);
 	}
-	ds3232->rtc = devm_rtc_device_register(&client->dev, client->name,
-					  &ds3232_rtc_ops, THIS_MODULE);
+	ds3232->rtc = devm_rtc_device_register(dev, name, &ds3232_rtc_ops,
+						THIS_MODULE);
+
 	return PTR_ERR_OR_ZERO(ds3232->rtc);
 }
 
-static int ds3232_remove(struct i2c_client *client)
-{
-	struct ds3232 *ds3232 = i2c_get_clientdata(client);
-
-	if (client->irq > 0) {
-		mutex_lock(&ds3232->mutex);
-		ds3232->exiting = 1;
-		mutex_unlock(&ds3232->mutex);
-
-		devm_free_irq(&client->dev, client->irq, client);
-		cancel_work_sync(&ds3232->work);
-	}
-
-	return 0;
-}
-
 #ifdef CONFIG_PM_SLEEP
 static int ds3232_suspend(struct device *dev)
 {
 	struct ds3232 *ds3232 = dev_get_drvdata(dev);
-	struct i2c_client *client = to_i2c_client(dev);
 
-	if (device_can_wakeup(dev)) {
-		ds3232->suspended = true;
-		if (irq_set_irq_wake(client->irq, 1)) {
+	if (device_may_wakeup(dev)) {
+		if (enable_irq_wake(ds3232->irq))
 			dev_warn_once(dev, "Cannot set wakeup source\n");
-			ds3232->suspended = false;
-		}
 	}
 
 	return 0;
@@ -475,16 +402,9 @@
 static int ds3232_resume(struct device *dev)
 {
 	struct ds3232 *ds3232 = dev_get_drvdata(dev);
-	struct i2c_client *client = to_i2c_client(dev);
 
-	if (ds3232->suspended) {
-		ds3232->suspended = false;
-
-		/* Clear the hardware alarm pend flag */
-		schedule_work(&ds3232->work);
-
-		irq_set_irq_wake(client->irq, 0);
-	}
+	if (device_may_wakeup(dev))
+		disable_irq_wake(ds3232->irq);
 
 	return 0;
 }
@@ -494,6 +414,27 @@
 	SET_SYSTEM_SLEEP_PM_OPS(ds3232_suspend, ds3232_resume)
 };
 
+#if IS_ENABLED(CONFIG_I2C)
+
+static int ds3232_i2c_probe(struct i2c_client *client,
+			    const struct i2c_device_id *id)
+{
+	struct regmap *regmap;
+	static const struct regmap_config config = {
+		.reg_bits = 8,
+		.val_bits = 8,
+	};
+
+	regmap = devm_regmap_init_i2c(client, &config);
+	if (IS_ERR(regmap)) {
+		dev_err(&client->dev, "%s: regmap allocation failed: %ld\n",
+			__func__, PTR_ERR(regmap));
+		return PTR_ERR(regmap);
+	}
+
+	return ds3232_probe(&client->dev, regmap, client->irq, client->name);
+}
+
 static const struct i2c_device_id ds3232_id[] = {
 	{ "ds3232", 0 },
 	{ }
@@ -505,13 +446,162 @@
 		.name = "rtc-ds3232",
 		.pm	= &ds3232_pm_ops,
 	},
-	.probe = ds3232_probe,
-	.remove = ds3232_remove,
+	.probe = ds3232_i2c_probe,
 	.id_table = ds3232_id,
 };
 
-module_i2c_driver(ds3232_driver);
+static int ds3232_register_driver(void)
+{
+	return i2c_add_driver(&ds3232_driver);
+}
+
+static void ds3232_unregister_driver(void)
+{
+	i2c_del_driver(&ds3232_driver);
+}
+
+#else
+
+static int ds3232_register_driver(void)
+{
+	return 0;
+}
+
+static void ds3232_unregister_driver(void)
+{
+}
+
+#endif
+
+#if IS_ENABLED(CONFIG_SPI_MASTER)
+
+static int ds3234_probe(struct spi_device *spi)
+{
+	int res;
+	unsigned int tmp;
+	static const struct regmap_config config = {
+		.reg_bits = 8,
+		.val_bits = 8,
+		.write_flag_mask = 0x80,
+	};
+	struct regmap *regmap;
+
+	regmap = devm_regmap_init_spi(spi, &config);
+	if (IS_ERR(regmap)) {
+		dev_err(&spi->dev, "%s: regmap allocation failed: %ld\n",
+			__func__, PTR_ERR(regmap));
+		return PTR_ERR(regmap);
+	}
+
+	spi->mode = SPI_MODE_3;
+	spi->bits_per_word = 8;
+	spi_setup(spi);
+
+	res = regmap_read(regmap, DS3232_REG_SECONDS, &tmp);
+	if (res)
+		return res;
+
+	/* Control settings
+	 *
+	 * CONTROL_REG
+	 * BIT 7	6	5	4	3	2	1	0
+	 *     EOSC	BBSQW	CONV	RS2	RS1	INTCN	A2IE	A1IE
+	 *
+	 *     0	0	0	1	1	1	0	0
+	 *
+	 * CONTROL_STAT_REG
+	 * BIT 7	6	5	4	3	2	1	0
+	 *     OSF	BB32kHz	CRATE1	CRATE0	EN32kHz	BSY	A2F	A1F
+	 *
+	 *     1	0	0	0	1	0	0	0
+	 */
+	res = regmap_read(regmap, DS3232_REG_CR, &tmp);
+	if (res)
+		return res;
+	res = regmap_write(regmap, DS3232_REG_CR, tmp & 0x1c);
+	if (res)
+		return res;
+
+	res = regmap_read(regmap, DS3232_REG_SR, &tmp);
+	if (res)
+		return res;
+	res = regmap_write(regmap, DS3232_REG_SR, tmp & 0x88);
+	if (res)
+		return res;
+
+	/* Print our settings */
+	res = regmap_read(regmap, DS3232_REG_CR, &tmp);
+	if (res)
+		return res;
+	dev_info(&spi->dev, "Control Reg: 0x%02x\n", tmp);
+
+	res = regmap_read(regmap, DS3232_REG_SR, &tmp);
+	if (res)
+		return res;
+	dev_info(&spi->dev, "Ctrl/Stat Reg: 0x%02x\n", tmp);
+
+	return ds3232_probe(&spi->dev, regmap, spi->irq, "ds3234");
+}
+
+static struct spi_driver ds3234_driver = {
+	.driver = {
+		.name	 = "ds3234",
+	},
+	.probe	 = ds3234_probe,
+};
+
+static int ds3234_register_driver(void)
+{
+	return spi_register_driver(&ds3234_driver);
+}
+
+static void ds3234_unregister_driver(void)
+{
+	spi_unregister_driver(&ds3234_driver);
+}
+
+#else
+
+static int ds3234_register_driver(void)
+{
+	return 0;
+}
+
+static void ds3234_unregister_driver(void)
+{
+}
+
+#endif
+
+static int __init ds323x_init(void)
+{
+	int ret;
+
+	ret = ds3232_register_driver();
+	if (ret) {
+		pr_err("Failed to register ds3232 driver: %d\n", ret);
+		return ret;
+	}
+
+	ret = ds3234_register_driver();
+	if (ret) {
+		pr_err("Failed to register ds3234 driver: %d\n", ret);
+		ds3232_unregister_driver();
+	}
+
+	return ret;
+}
+module_init(ds323x_init)
+
+static void __exit ds323x_exit(void)
+{
+	ds3234_unregister_driver();
+	ds3232_unregister_driver();
+}
+module_exit(ds323x_exit)
 
 MODULE_AUTHOR("Srikanth Srinivasan <srikanth.srinivasan@freescale.com>");
-MODULE_DESCRIPTION("Maxim/Dallas DS3232 RTC Driver");
+MODULE_AUTHOR("Dennis Aberilla <denzzzhome@yahoo.com>");
+MODULE_DESCRIPTION("Maxim/Dallas DS3232/DS3234 RTC Driver");
 MODULE_LICENSE("GPL");
+MODULE_ALIAS("spi:ds3234");
diff --git a/drivers/rtc/rtc-ds3234.c b/drivers/rtc/rtc-ds3234.c
deleted file mode 100644
index 570ab28..0000000
--- a/drivers/rtc/rtc-ds3234.c
+++ /dev/null
@@ -1,171 +0,0 @@
-/* rtc-ds3234.c
- *
- * Driver for Dallas Semiconductor (DS3234) SPI RTC with Integrated Crystal
- * and SRAM.
- *
- * Copyright (C) 2008 MIMOMax Wireless Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/platform_device.h>
-#include <linux/rtc.h>
-#include <linux/spi/spi.h>
-#include <linux/bcd.h>
-
-#define DS3234_REG_SECONDS	0x00
-#define DS3234_REG_MINUTES	0x01
-#define DS3234_REG_HOURS	0x02
-#define DS3234_REG_DAY		0x03
-#define DS3234_REG_DATE		0x04
-#define DS3234_REG_MONTH	0x05
-#define DS3234_REG_YEAR		0x06
-#define DS3234_REG_CENTURY	(1 << 7) /* Bit 7 of the Month register */
-
-#define DS3234_REG_CONTROL	0x0E
-#define DS3234_REG_CONT_STAT	0x0F
-
-static int ds3234_set_reg(struct device *dev, unsigned char address,
-				unsigned char data)
-{
-	struct spi_device *spi = to_spi_device(dev);
-	unsigned char buf[2];
-
-	/* MSB must be '1' to indicate write */
-	buf[0] = address | 0x80;
-	buf[1] = data;
-
-	return spi_write_then_read(spi, buf, 2, NULL, 0);
-}
-
-static int ds3234_get_reg(struct device *dev, unsigned char address,
-				unsigned char *data)
-{
-	struct spi_device *spi = to_spi_device(dev);
-
-	*data = address & 0x7f;
-
-	return spi_write_then_read(spi, data, 1, data, 1);
-}
-
-static int ds3234_read_time(struct device *dev, struct rtc_time *dt)
-{
-	int err;
-	unsigned char buf[8];
-	struct spi_device *spi = to_spi_device(dev);
-
-	buf[0] = 0x00; /* Start address */
-
-	err = spi_write_then_read(spi, buf, 1, buf, 8);
-	if (err != 0)
-		return err;
-
-	/* Seconds, Minutes, Hours, Day, Date, Month, Year */
-	dt->tm_sec	= bcd2bin(buf[0]);
-	dt->tm_min	= bcd2bin(buf[1]);
-	dt->tm_hour	= bcd2bin(buf[2] & 0x3f);
-	dt->tm_wday	= bcd2bin(buf[3]) - 1; /* 0 = Sun */
-	dt->tm_mday	= bcd2bin(buf[4]);
-	dt->tm_mon	= bcd2bin(buf[5] & 0x1f) - 1; /* 0 = Jan */
-	dt->tm_year	= bcd2bin(buf[6] & 0xff) + 100; /* Assume 20YY */
-
-	return rtc_valid_tm(dt);
-}
-
-static int ds3234_set_time(struct device *dev, struct rtc_time *dt)
-{
-	ds3234_set_reg(dev, DS3234_REG_SECONDS, bin2bcd(dt->tm_sec));
-	ds3234_set_reg(dev, DS3234_REG_MINUTES, bin2bcd(dt->tm_min));
-	ds3234_set_reg(dev, DS3234_REG_HOURS, bin2bcd(dt->tm_hour) & 0x3f);
-
-	/* 0 = Sun */
-	ds3234_set_reg(dev, DS3234_REG_DAY, bin2bcd(dt->tm_wday + 1));
-	ds3234_set_reg(dev, DS3234_REG_DATE, bin2bcd(dt->tm_mday));
-
-	/* 0 = Jan */
-	ds3234_set_reg(dev, DS3234_REG_MONTH, bin2bcd(dt->tm_mon + 1));
-
-	/* Assume 20YY although we just want to make sure not to go negative. */
-	if (dt->tm_year > 100)
-		dt->tm_year -= 100;
-
-	ds3234_set_reg(dev, DS3234_REG_YEAR, bin2bcd(dt->tm_year));
-
-	return 0;
-}
-
-static const struct rtc_class_ops ds3234_rtc_ops = {
-	.read_time	= ds3234_read_time,
-	.set_time	= ds3234_set_time,
-};
-
-static int ds3234_probe(struct spi_device *spi)
-{
-	struct rtc_device *rtc;
-	unsigned char tmp;
-	int res;
-
-	spi->mode = SPI_MODE_3;
-	spi->bits_per_word = 8;
-	spi_setup(spi);
-
-	res = ds3234_get_reg(&spi->dev, DS3234_REG_SECONDS, &tmp);
-	if (res != 0)
-		return res;
-
-	/* Control settings
-	 *
-	 * CONTROL_REG
-	 * BIT 7	6	5	4	3	2	1	0
-	 *     EOSC	BBSQW	CONV	RS2	RS1	INTCN	A2IE	A1IE
-	 *
-	 *     0	0	0	1	1	1	0	0
-	 *
-	 * CONTROL_STAT_REG
-	 * BIT 7	6	5	4	3	2	1	0
-	 *     OSF	BB32kHz	CRATE1	CRATE0	EN32kHz	BSY	A2F	A1F
-	 *
-	 *     1	0	0	0	1	0	0	0
-	 */
-	ds3234_get_reg(&spi->dev, DS3234_REG_CONTROL, &tmp);
-	ds3234_set_reg(&spi->dev, DS3234_REG_CONTROL, tmp & 0x1c);
-
-	ds3234_get_reg(&spi->dev, DS3234_REG_CONT_STAT, &tmp);
-	ds3234_set_reg(&spi->dev, DS3234_REG_CONT_STAT, tmp & 0x88);
-
-	/* Print our settings */
-	ds3234_get_reg(&spi->dev, DS3234_REG_CONTROL, &tmp);
-	dev_info(&spi->dev, "Control Reg: 0x%02x\n", tmp);
-
-	ds3234_get_reg(&spi->dev, DS3234_REG_CONT_STAT, &tmp);
-	dev_info(&spi->dev, "Ctrl/Stat Reg: 0x%02x\n", tmp);
-
-	rtc = devm_rtc_device_register(&spi->dev, "ds3234",
-				&ds3234_rtc_ops, THIS_MODULE);
-	if (IS_ERR(rtc))
-		return PTR_ERR(rtc);
-
-	spi_set_drvdata(spi, rtc);
-
-	return 0;
-}
-
-static struct spi_driver ds3234_driver = {
-	.driver = {
-		.name	 = "ds3234",
-	},
-	.probe	 = ds3234_probe,
-};
-
-module_spi_driver(ds3234_driver);
-
-MODULE_DESCRIPTION("DS3234 SPI RTC driver");
-MODULE_AUTHOR("Dennis Aberilla <denzzzhome@yahoo.com>");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("spi:ds3234");
diff --git a/drivers/rtc/rtc-generic.c b/drivers/rtc/rtc-generic.c
index e782ebd..d726c6a 100644
--- a/drivers/rtc/rtc-generic.c
+++ b/drivers/rtc/rtc-generic.c
@@ -9,6 +9,8 @@
 #include <linux/platform_device.h>
 #include <linux/rtc.h>
 
+#if defined(CONFIG_M68K) || defined(CONFIG_PARISC) || \
+    defined(CONFIG_PPC) || defined(CONFIG_SUPERH32)
 #include <asm/rtc.h>
 
 static int generic_get_time(struct device *dev, struct rtc_time *tm)
@@ -33,13 +35,21 @@
 	.read_time = generic_get_time,
 	.set_time = generic_set_time,
 };
+#else
+#define generic_rtc_ops *(struct rtc_class_ops*)NULL
+#endif
 
 static int __init generic_rtc_probe(struct platform_device *dev)
 {
 	struct rtc_device *rtc;
+	const struct rtc_class_ops *ops;
+
+	ops = dev_get_platdata(&dev->dev);
+	if (!ops)
+		ops = &generic_rtc_ops;
 
 	rtc = devm_rtc_device_register(&dev->dev, "rtc-generic",
-					&generic_rtc_ops, THIS_MODULE);
+					ops, THIS_MODULE);
 	if (IS_ERR(rtc))
 		return PTR_ERR(rtc);
 
diff --git a/drivers/rtc/rtc-hym8563.c b/drivers/rtc/rtc-hym8563.c
index 097325d..b1b4746 100644
--- a/drivers/rtc/rtc-hym8563.c
+++ b/drivers/rtc/rtc-hym8563.c
@@ -144,7 +144,7 @@
 	 * it does not seem to carry it over a subsequent write/read.
 	 * So we'll limit ourself to 100 years, starting at 2000 for now.
 	 */
-	buf[6] = tm->tm_year - 100;
+	buf[6] = bin2bcd(tm->tm_year - 100);
 
 	/*
 	 * CTL1 only contains TEST-mode bits apart from stop,
diff --git a/drivers/rtc/rtc-max77686.c b/drivers/rtc/rtc-max77686.c
index 7184a0e..182fdd0 100644
--- a/drivers/rtc/rtc-max77686.c
+++ b/drivers/rtc/rtc-max77686.c
@@ -1,5 +1,5 @@
 /*
- * RTC driver for Maxim MAX77686
+ * RTC driver for Maxim MAX77686 and MAX77802
  *
  * Copyright (C) 2012 Samsung Electronics Co.Ltd
  *
@@ -12,8 +12,7 @@
  *
  */
 
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
+#include <linux/i2c.h>
 #include <linux/slab.h>
 #include <linux/rtc.h>
 #include <linux/delay.h>
@@ -24,24 +23,38 @@
 #include <linux/irqdomain.h>
 #include <linux/regmap.h>
 
+#define MAX77686_I2C_ADDR_RTC		(0x0C >> 1)
+#define MAX77620_I2C_ADDR_RTC		0x68
+#define MAX77686_INVALID_I2C_ADDR	(-1)
+
+/* Define non existing register */
+#define MAX77686_INVALID_REG		(-1)
+
 /* RTC Control Register */
 #define BCD_EN_SHIFT			0
-#define BCD_EN_MASK			(1 << BCD_EN_SHIFT)
+#define BCD_EN_MASK			BIT(BCD_EN_SHIFT)
 #define MODEL24_SHIFT			1
-#define MODEL24_MASK			(1 << MODEL24_SHIFT)
+#define MODEL24_MASK			BIT(MODEL24_SHIFT)
 /* RTC Update Register1 */
 #define RTC_UDR_SHIFT			0
-#define RTC_UDR_MASK			(1 << RTC_UDR_SHIFT)
+#define RTC_UDR_MASK			BIT(RTC_UDR_SHIFT)
 #define RTC_RBUDR_SHIFT			4
-#define RTC_RBUDR_MASK			(1 << RTC_RBUDR_SHIFT)
+#define RTC_RBUDR_MASK			BIT(RTC_RBUDR_SHIFT)
 /* RTC Hour register */
 #define HOUR_PM_SHIFT			6
-#define HOUR_PM_MASK			(1 << HOUR_PM_SHIFT)
+#define HOUR_PM_MASK			BIT(HOUR_PM_SHIFT)
 /* RTC Alarm Enable */
 #define ALARM_ENABLE_SHIFT		7
-#define ALARM_ENABLE_MASK		(1 << ALARM_ENABLE_SHIFT)
+#define ALARM_ENABLE_MASK		BIT(ALARM_ENABLE_SHIFT)
 
-#define MAX77686_RTC_UPDATE_DELAY	16
+#define REG_RTC_NONE			0xdeadbeef
+
+/*
+ * MAX77802 has separate register (RTCAE1) for alarm enable instead
+ * using 1 bit from registers RTC{SEC,MIN,HOUR,DAY,MONTH,YEAR,DATE}
+ * as in done in MAX77686.
+ */
+#define MAX77802_ALARM_ENABLE_VALUE	0x77
 
 enum {
 	RTC_SEC = 0,
@@ -54,15 +67,38 @@
 	RTC_NR_TIME
 };
 
+struct max77686_rtc_driver_data {
+	/* Minimum usecs needed for a RTC update */
+	unsigned long		delay;
+	/* Mask used to read RTC registers value */
+	u8			mask;
+	/* Registers offset to I2C addresses map */
+	const unsigned int	*map;
+	/* Has a separate alarm enable register? */
+	bool			alarm_enable_reg;
+	/* I2C address for RTC block */
+	int			rtc_i2c_addr;
+	/* RTC interrupt via platform resource */
+	bool			rtc_irq_from_platform;
+	/* Pending alarm status register */
+	int			alarm_pending_status_reg;
+	/* RTC IRQ CHIP for regmap */
+	const struct regmap_irq_chip *rtc_irq_chip;
+};
+
 struct max77686_rtc_info {
 	struct device		*dev;
-	struct max77686_dev	*max77686;
 	struct i2c_client	*rtc;
 	struct rtc_device	*rtc_dev;
 	struct mutex		lock;
 
 	struct regmap		*regmap;
+	struct regmap		*rtc_regmap;
 
+	const struct max77686_rtc_driver_data *drv_data;
+	struct regmap_irq_chip_data *rtc_irq_data;
+
+	int rtc_irq;
 	int virq;
 	int rtc_24hr_mode;
 };
@@ -72,29 +108,190 @@
 	MAX77686_RTC_READ,
 };
 
+/* These are not registers but just offsets that are mapped to addresses */
+enum max77686_rtc_reg_offset {
+	REG_RTC_CONTROLM = 0,
+	REG_RTC_CONTROL,
+	REG_RTC_UPDATE0,
+	REG_WTSR_SMPL_CNTL,
+	REG_RTC_SEC,
+	REG_RTC_MIN,
+	REG_RTC_HOUR,
+	REG_RTC_WEEKDAY,
+	REG_RTC_MONTH,
+	REG_RTC_YEAR,
+	REG_RTC_DATE,
+	REG_ALARM1_SEC,
+	REG_ALARM1_MIN,
+	REG_ALARM1_HOUR,
+	REG_ALARM1_WEEKDAY,
+	REG_ALARM1_MONTH,
+	REG_ALARM1_YEAR,
+	REG_ALARM1_DATE,
+	REG_ALARM2_SEC,
+	REG_ALARM2_MIN,
+	REG_ALARM2_HOUR,
+	REG_ALARM2_WEEKDAY,
+	REG_ALARM2_MONTH,
+	REG_ALARM2_YEAR,
+	REG_ALARM2_DATE,
+	REG_RTC_AE1,
+	REG_RTC_END,
+};
+
+/* Maps RTC registers offset to the MAX77686 register addresses */
+static const unsigned int max77686_map[REG_RTC_END] = {
+	[REG_RTC_CONTROLM]   = MAX77686_RTC_CONTROLM,
+	[REG_RTC_CONTROL]    = MAX77686_RTC_CONTROL,
+	[REG_RTC_UPDATE0]    = MAX77686_RTC_UPDATE0,
+	[REG_WTSR_SMPL_CNTL] = MAX77686_WTSR_SMPL_CNTL,
+	[REG_RTC_SEC]        = MAX77686_RTC_SEC,
+	[REG_RTC_MIN]        = MAX77686_RTC_MIN,
+	[REG_RTC_HOUR]       = MAX77686_RTC_HOUR,
+	[REG_RTC_WEEKDAY]    = MAX77686_RTC_WEEKDAY,
+	[REG_RTC_MONTH]      = MAX77686_RTC_MONTH,
+	[REG_RTC_YEAR]       = MAX77686_RTC_YEAR,
+	[REG_RTC_DATE]       = MAX77686_RTC_DATE,
+	[REG_ALARM1_SEC]     = MAX77686_ALARM1_SEC,
+	[REG_ALARM1_MIN]     = MAX77686_ALARM1_MIN,
+	[REG_ALARM1_HOUR]    = MAX77686_ALARM1_HOUR,
+	[REG_ALARM1_WEEKDAY] = MAX77686_ALARM1_WEEKDAY,
+	[REG_ALARM1_MONTH]   = MAX77686_ALARM1_MONTH,
+	[REG_ALARM1_YEAR]    = MAX77686_ALARM1_YEAR,
+	[REG_ALARM1_DATE]    = MAX77686_ALARM1_DATE,
+	[REG_ALARM2_SEC]     = MAX77686_ALARM2_SEC,
+	[REG_ALARM2_MIN]     = MAX77686_ALARM2_MIN,
+	[REG_ALARM2_HOUR]    = MAX77686_ALARM2_HOUR,
+	[REG_ALARM2_WEEKDAY] = MAX77686_ALARM2_WEEKDAY,
+	[REG_ALARM2_MONTH]   = MAX77686_ALARM2_MONTH,
+	[REG_ALARM2_YEAR]    = MAX77686_ALARM2_YEAR,
+	[REG_ALARM2_DATE]    = MAX77686_ALARM2_DATE,
+	[REG_RTC_AE1]	     = REG_RTC_NONE,
+};
+
+static const struct regmap_irq max77686_rtc_irqs[] = {
+	/* RTC interrupts */
+	REGMAP_IRQ_REG(0, 0, MAX77686_RTCINT_RTC60S_MSK),
+	REGMAP_IRQ_REG(1, 0, MAX77686_RTCINT_RTCA1_MSK),
+	REGMAP_IRQ_REG(2, 0, MAX77686_RTCINT_RTCA2_MSK),
+	REGMAP_IRQ_REG(3, 0, MAX77686_RTCINT_SMPL_MSK),
+	REGMAP_IRQ_REG(4, 0, MAX77686_RTCINT_RTC1S_MSK),
+	REGMAP_IRQ_REG(5, 0, MAX77686_RTCINT_WTSR_MSK),
+};
+
+static const struct regmap_irq_chip max77686_rtc_irq_chip = {
+	.name		= "max77686-rtc",
+	.status_base	= MAX77686_RTC_INT,
+	.mask_base	= MAX77686_RTC_INTM,
+	.num_regs	= 1,
+	.irqs		= max77686_rtc_irqs,
+	.num_irqs	= ARRAY_SIZE(max77686_rtc_irqs),
+};
+
+static const struct max77686_rtc_driver_data max77686_drv_data = {
+	.delay = 16000,
+	.mask  = 0x7f,
+	.map   = max77686_map,
+	.alarm_enable_reg  = false,
+	.rtc_irq_from_platform = false,
+	.alarm_pending_status_reg = MAX77686_REG_STATUS2,
+	.rtc_i2c_addr = MAX77686_I2C_ADDR_RTC,
+	.rtc_irq_chip = &max77686_rtc_irq_chip,
+};
+
+static const struct max77686_rtc_driver_data max77620_drv_data = {
+	.delay = 16000,
+	.mask  = 0x7f,
+	.map   = max77686_map,
+	.alarm_enable_reg  = false,
+	.rtc_irq_from_platform = true,
+	.alarm_pending_status_reg = MAX77686_INVALID_REG,
+	.rtc_i2c_addr = MAX77620_I2C_ADDR_RTC,
+	.rtc_irq_chip = &max77686_rtc_irq_chip,
+};
+
+static const unsigned int max77802_map[REG_RTC_END] = {
+	[REG_RTC_CONTROLM]   = MAX77802_RTC_CONTROLM,
+	[REG_RTC_CONTROL]    = MAX77802_RTC_CONTROL,
+	[REG_RTC_UPDATE0]    = MAX77802_RTC_UPDATE0,
+	[REG_WTSR_SMPL_CNTL] = MAX77802_WTSR_SMPL_CNTL,
+	[REG_RTC_SEC]        = MAX77802_RTC_SEC,
+	[REG_RTC_MIN]        = MAX77802_RTC_MIN,
+	[REG_RTC_HOUR]       = MAX77802_RTC_HOUR,
+	[REG_RTC_WEEKDAY]    = MAX77802_RTC_WEEKDAY,
+	[REG_RTC_MONTH]      = MAX77802_RTC_MONTH,
+	[REG_RTC_YEAR]       = MAX77802_RTC_YEAR,
+	[REG_RTC_DATE]       = MAX77802_RTC_DATE,
+	[REG_ALARM1_SEC]     = MAX77802_ALARM1_SEC,
+	[REG_ALARM1_MIN]     = MAX77802_ALARM1_MIN,
+	[REG_ALARM1_HOUR]    = MAX77802_ALARM1_HOUR,
+	[REG_ALARM1_WEEKDAY] = MAX77802_ALARM1_WEEKDAY,
+	[REG_ALARM1_MONTH]   = MAX77802_ALARM1_MONTH,
+	[REG_ALARM1_YEAR]    = MAX77802_ALARM1_YEAR,
+	[REG_ALARM1_DATE]    = MAX77802_ALARM1_DATE,
+	[REG_ALARM2_SEC]     = MAX77802_ALARM2_SEC,
+	[REG_ALARM2_MIN]     = MAX77802_ALARM2_MIN,
+	[REG_ALARM2_HOUR]    = MAX77802_ALARM2_HOUR,
+	[REG_ALARM2_WEEKDAY] = MAX77802_ALARM2_WEEKDAY,
+	[REG_ALARM2_MONTH]   = MAX77802_ALARM2_MONTH,
+	[REG_ALARM2_YEAR]    = MAX77802_ALARM2_YEAR,
+	[REG_ALARM2_DATE]    = MAX77802_ALARM2_DATE,
+	[REG_RTC_AE1]	     = MAX77802_RTC_AE1,
+};
+
+static const struct regmap_irq_chip max77802_rtc_irq_chip = {
+	.name		= "max77802-rtc",
+	.status_base	= MAX77802_RTC_INT,
+	.mask_base	= MAX77802_RTC_INTM,
+	.num_regs	= 1,
+	.irqs		= max77686_rtc_irqs, /* same masks as 77686 */
+	.num_irqs	= ARRAY_SIZE(max77686_rtc_irqs),
+};
+
+static const struct max77686_rtc_driver_data max77802_drv_data = {
+	.delay = 200,
+	.mask  = 0xff,
+	.map   = max77802_map,
+	.alarm_enable_reg  = true,
+	.rtc_irq_from_platform = false,
+	.alarm_pending_status_reg = MAX77686_REG_STATUS2,
+	.rtc_i2c_addr = MAX77686_INVALID_I2C_ADDR,
+	.rtc_irq_chip = &max77802_rtc_irq_chip,
+};
+
 static void max77686_rtc_data_to_tm(u8 *data, struct rtc_time *tm,
-				   int rtc_24hr_mode)
+				    struct max77686_rtc_info *info)
 {
-	tm->tm_sec = data[RTC_SEC] & 0x7f;
-	tm->tm_min = data[RTC_MIN] & 0x7f;
-	if (rtc_24hr_mode)
+	u8 mask = info->drv_data->mask;
+
+	tm->tm_sec = data[RTC_SEC] & mask;
+	tm->tm_min = data[RTC_MIN] & mask;
+	if (info->rtc_24hr_mode) {
 		tm->tm_hour = data[RTC_HOUR] & 0x1f;
-	else {
+	} else {
 		tm->tm_hour = data[RTC_HOUR] & 0x0f;
 		if (data[RTC_HOUR] & HOUR_PM_MASK)
 			tm->tm_hour += 12;
 	}
 
 	/* Only a single bit is set in data[], so fls() would be equivalent */
-	tm->tm_wday = ffs(data[RTC_WEEKDAY] & 0x7f) - 1;
+	tm->tm_wday = ffs(data[RTC_WEEKDAY] & mask) - 1;
 	tm->tm_mday = data[RTC_DATE] & 0x1f;
 	tm->tm_mon = (data[RTC_MONTH] & 0x0f) - 1;
-	tm->tm_year = (data[RTC_YEAR] & 0x7f) + 100;
+	tm->tm_year = data[RTC_YEAR] & mask;
 	tm->tm_yday = 0;
 	tm->tm_isdst = 0;
+
+	/*
+	 * MAX77686 uses 1 bit from sec/min/hour/etc RTC registers and the
+	 * year values are just 0..99 so add 100 to support up to 2099.
+	 */
+	if (!info->drv_data->alarm_enable_reg)
+		tm->tm_year += 100;
 }
 
-static int max77686_rtc_tm_to_data(struct rtc_time *tm, u8 *data)
+static int max77686_rtc_tm_to_data(struct rtc_time *tm, u8 *data,
+				   struct max77686_rtc_info *info)
 {
 	data[RTC_SEC] = tm->tm_sec;
 	data[RTC_MIN] = tm->tm_min;
@@ -102,35 +299,44 @@
 	data[RTC_WEEKDAY] = 1 << tm->tm_wday;
 	data[RTC_DATE] = tm->tm_mday;
 	data[RTC_MONTH] = tm->tm_mon + 1;
+
+	if (info->drv_data->alarm_enable_reg) {
+		data[RTC_YEAR] = tm->tm_year;
+		return 0;
+	}
+
 	data[RTC_YEAR] = tm->tm_year > 100 ? (tm->tm_year - 100) : 0;
 
 	if (tm->tm_year < 100) {
-		pr_warn("RTC cannot handle the year %d.  Assume it's 2000.\n",
+		dev_err(info->dev, "RTC cannot handle the year %d.\n",
 			1900 + tm->tm_year);
 		return -EINVAL;
 	}
+
 	return 0;
 }
 
 static int max77686_rtc_update(struct max77686_rtc_info *info,
-	enum MAX77686_RTC_OP op)
+			       enum MAX77686_RTC_OP op)
 {
 	int ret;
 	unsigned int data;
+	unsigned long delay = info->drv_data->delay;
 
 	if (op == MAX77686_RTC_WRITE)
 		data = 1 << RTC_UDR_SHIFT;
 	else
 		data = 1 << RTC_RBUDR_SHIFT;
 
-	ret = regmap_update_bits(info->max77686->rtc_regmap,
-				 MAX77686_RTC_UPDATE0, data, data);
+	ret = regmap_update_bits(info->rtc_regmap,
+				 info->drv_data->map[REG_RTC_UPDATE0],
+				 data, data);
 	if (ret < 0)
-		dev_err(info->dev, "%s: fail to write update reg(ret=%d, data=0x%x)\n",
-				__func__, ret, data);
+		dev_err(info->dev, "Fail to write update reg(ret=%d, data=0x%x)\n",
+			ret, data);
 	else {
-		/* Minimum 16ms delay required before RTC update. */
-		msleep(MAX77686_RTC_UPDATE_DELAY);
+		/* Minimum delay required before RTC update. */
+		usleep_range(delay, delay * 2);
 	}
 
 	return ret;
@@ -148,14 +354,15 @@
 	if (ret < 0)
 		goto out;
 
-	ret = regmap_bulk_read(info->max77686->rtc_regmap,
-				MAX77686_RTC_SEC, data, RTC_NR_TIME);
+	ret = regmap_bulk_read(info->rtc_regmap,
+			       info->drv_data->map[REG_RTC_SEC],
+			       data, ARRAY_SIZE(data));
 	if (ret < 0) {
-		dev_err(info->dev, "%s: fail to read time reg(%d)\n", __func__,	ret);
+		dev_err(info->dev, "Fail to read time reg(%d)\n", ret);
 		goto out;
 	}
 
-	max77686_rtc_data_to_tm(data, tm, info->rtc_24hr_mode);
+	max77686_rtc_data_to_tm(data, tm, info);
 
 	ret = rtc_valid_tm(tm);
 
@@ -170,17 +377,17 @@
 	u8 data[RTC_NR_TIME];
 	int ret;
 
-	ret = max77686_rtc_tm_to_data(tm, data);
+	ret = max77686_rtc_tm_to_data(tm, data, info);
 	if (ret < 0)
 		return ret;
 
 	mutex_lock(&info->lock);
 
-	ret = regmap_bulk_write(info->max77686->rtc_regmap,
-				 MAX77686_RTC_SEC, data, RTC_NR_TIME);
+	ret = regmap_bulk_write(info->rtc_regmap,
+				info->drv_data->map[REG_RTC_SEC],
+				data, ARRAY_SIZE(data));
 	if (ret < 0) {
-		dev_err(info->dev, "%s: fail to write time reg(%d)\n", __func__,
-				ret);
+		dev_err(info->dev, "Fail to write time reg(%d)\n", ret);
 		goto out;
 	}
 
@@ -196,6 +403,7 @@
 	struct max77686_rtc_info *info = dev_get_drvdata(dev);
 	u8 data[RTC_NR_TIME];
 	unsigned int val;
+	const unsigned int *map = info->drv_data->map;
 	int i, ret;
 
 	mutex_lock(&info->lock);
@@ -204,29 +412,53 @@
 	if (ret < 0)
 		goto out;
 
-	ret = regmap_bulk_read(info->max77686->rtc_regmap,
-				 MAX77686_ALARM1_SEC, data, RTC_NR_TIME);
+	ret = regmap_bulk_read(info->rtc_regmap, map[REG_ALARM1_SEC],
+			       data, ARRAY_SIZE(data));
 	if (ret < 0) {
-		dev_err(info->dev, "%s:%d fail to read alarm reg(%d)\n",
-				__func__, __LINE__, ret);
+		dev_err(info->dev, "Fail to read alarm reg(%d)\n", ret);
 		goto out;
 	}
 
-	max77686_rtc_data_to_tm(data, &alrm->time, info->rtc_24hr_mode);
+	max77686_rtc_data_to_tm(data, &alrm->time, info);
 
 	alrm->enabled = 0;
-	for (i = 0; i < RTC_NR_TIME; i++) {
-		if (data[i] & ALARM_ENABLE_MASK) {
+
+	if (info->drv_data->alarm_enable_reg) {
+		if (map[REG_RTC_AE1] == REG_RTC_NONE) {
+			ret = -EINVAL;
+			dev_err(info->dev,
+				"alarm enable register not set(%d)\n", ret);
+			goto out;
+		}
+
+		ret = regmap_read(info->rtc_regmap, map[REG_RTC_AE1], &val);
+		if (ret < 0) {
+			dev_err(info->dev,
+				"fail to read alarm enable(%d)\n", ret);
+			goto out;
+		}
+
+		if (val)
 			alrm->enabled = 1;
-			break;
+	} else {
+		for (i = 0; i < ARRAY_SIZE(data); i++) {
+			if (data[i] & ALARM_ENABLE_MASK) {
+				alrm->enabled = 1;
+				break;
+			}
 		}
 	}
 
 	alrm->pending = 0;
-	ret = regmap_read(info->max77686->regmap, MAX77686_REG_STATUS2, &val);
+
+	if (info->drv_data->alarm_pending_status_reg == MAX77686_INVALID_REG)
+		goto out;
+
+	ret = regmap_read(info->regmap,
+			  info->drv_data->alarm_pending_status_reg, &val);
 	if (ret < 0) {
-		dev_err(info->dev, "%s:%d fail to read status2 reg(%d)\n",
-				__func__, __LINE__, ret);
+		dev_err(info->dev,
+			"Fail to read alarm pending status reg(%d)\n", ret);
 		goto out;
 	}
 
@@ -235,7 +467,7 @@
 
 out:
 	mutex_unlock(&info->lock);
-	return 0;
+	return ret;
 }
 
 static int max77686_rtc_stop_alarm(struct max77686_rtc_info *info)
@@ -243,6 +475,7 @@
 	u8 data[RTC_NR_TIME];
 	int ret, i;
 	struct rtc_time tm;
+	const unsigned int *map = info->drv_data->map;
 
 	if (!mutex_is_locked(&info->lock))
 		dev_warn(info->dev, "%s: should have mutex locked\n", __func__);
@@ -251,24 +484,34 @@
 	if (ret < 0)
 		goto out;
 
-	ret = regmap_bulk_read(info->max77686->rtc_regmap,
-				 MAX77686_ALARM1_SEC, data, RTC_NR_TIME);
-	if (ret < 0) {
-		dev_err(info->dev, "%s: fail to read alarm reg(%d)\n",
-				__func__, ret);
-		goto out;
+	if (info->drv_data->alarm_enable_reg) {
+		if (map[REG_RTC_AE1] == REG_RTC_NONE) {
+			ret = -EINVAL;
+			dev_err(info->dev,
+				"alarm enable register not set(%d)\n", ret);
+			goto out;
+		}
+
+		ret = regmap_write(info->rtc_regmap, map[REG_RTC_AE1], 0);
+	} else {
+		ret = regmap_bulk_read(info->rtc_regmap, map[REG_ALARM1_SEC],
+				       data, ARRAY_SIZE(data));
+		if (ret < 0) {
+			dev_err(info->dev, "Fail to read alarm reg(%d)\n", ret);
+			goto out;
+		}
+
+		max77686_rtc_data_to_tm(data, &tm, info);
+
+		for (i = 0; i < ARRAY_SIZE(data); i++)
+			data[i] &= ~ALARM_ENABLE_MASK;
+
+		ret = regmap_bulk_write(info->rtc_regmap, map[REG_ALARM1_SEC],
+					data, ARRAY_SIZE(data));
 	}
 
-	max77686_rtc_data_to_tm(data, &tm, info->rtc_24hr_mode);
-
-	for (i = 0; i < RTC_NR_TIME; i++)
-		data[i] &= ~ALARM_ENABLE_MASK;
-
-	ret = regmap_bulk_write(info->max77686->rtc_regmap,
-				 MAX77686_ALARM1_SEC, data, RTC_NR_TIME);
 	if (ret < 0) {
-		dev_err(info->dev, "%s: fail to write alarm reg(%d)\n",
-				__func__, ret);
+		dev_err(info->dev, "Fail to write alarm reg(%d)\n", ret);
 		goto out;
 	}
 
@@ -282,6 +525,7 @@
 	u8 data[RTC_NR_TIME];
 	int ret;
 	struct rtc_time tm;
+	const unsigned int *map = info->drv_data->map;
 
 	if (!mutex_is_locked(&info->lock))
 		dev_warn(info->dev, "%s: should have mutex locked\n", __func__);
@@ -290,32 +534,36 @@
 	if (ret < 0)
 		goto out;
 
-	ret = regmap_bulk_read(info->max77686->rtc_regmap,
-				 MAX77686_ALARM1_SEC, data, RTC_NR_TIME);
-	if (ret < 0) {
-		dev_err(info->dev, "%s: fail to read alarm reg(%d)\n",
-				__func__, ret);
-		goto out;
+	if (info->drv_data->alarm_enable_reg) {
+		ret = regmap_write(info->rtc_regmap, map[REG_RTC_AE1],
+				   MAX77802_ALARM_ENABLE_VALUE);
+	} else {
+		ret = regmap_bulk_read(info->rtc_regmap, map[REG_ALARM1_SEC],
+				       data, ARRAY_SIZE(data));
+		if (ret < 0) {
+			dev_err(info->dev, "Fail to read alarm reg(%d)\n", ret);
+			goto out;
+		}
+
+		max77686_rtc_data_to_tm(data, &tm, info);
+
+		data[RTC_SEC] |= (1 << ALARM_ENABLE_SHIFT);
+		data[RTC_MIN] |= (1 << ALARM_ENABLE_SHIFT);
+		data[RTC_HOUR] |= (1 << ALARM_ENABLE_SHIFT);
+		data[RTC_WEEKDAY] &= ~ALARM_ENABLE_MASK;
+		if (data[RTC_MONTH] & 0xf)
+			data[RTC_MONTH] |= (1 << ALARM_ENABLE_SHIFT);
+		if (data[RTC_YEAR] & info->drv_data->mask)
+			data[RTC_YEAR] |= (1 << ALARM_ENABLE_SHIFT);
+		if (data[RTC_DATE] & 0x1f)
+			data[RTC_DATE] |= (1 << ALARM_ENABLE_SHIFT);
+
+		ret = regmap_bulk_write(info->rtc_regmap, map[REG_ALARM1_SEC],
+					data, ARRAY_SIZE(data));
 	}
 
-	max77686_rtc_data_to_tm(data, &tm, info->rtc_24hr_mode);
-
-	data[RTC_SEC] |= (1 << ALARM_ENABLE_SHIFT);
-	data[RTC_MIN] |= (1 << ALARM_ENABLE_SHIFT);
-	data[RTC_HOUR] |= (1 << ALARM_ENABLE_SHIFT);
-	data[RTC_WEEKDAY] &= ~ALARM_ENABLE_MASK;
-	if (data[RTC_MONTH] & 0xf)
-		data[RTC_MONTH] |= (1 << ALARM_ENABLE_SHIFT);
-	if (data[RTC_YEAR] & 0x7f)
-		data[RTC_YEAR] |= (1 << ALARM_ENABLE_SHIFT);
-	if (data[RTC_DATE] & 0x1f)
-		data[RTC_DATE] |= (1 << ALARM_ENABLE_SHIFT);
-
-	ret = regmap_bulk_write(info->max77686->rtc_regmap,
-				 MAX77686_ALARM1_SEC, data, RTC_NR_TIME);
 	if (ret < 0) {
-		dev_err(info->dev, "%s: fail to write alarm reg(%d)\n",
-				__func__, ret);
+		dev_err(info->dev, "Fail to write alarm reg(%d)\n", ret);
 		goto out;
 	}
 
@@ -330,7 +578,7 @@
 	u8 data[RTC_NR_TIME];
 	int ret;
 
-	ret = max77686_rtc_tm_to_data(&alrm->time, data);
+	ret = max77686_rtc_tm_to_data(&alrm->time, data, info);
 	if (ret < 0)
 		return ret;
 
@@ -340,12 +588,12 @@
 	if (ret < 0)
 		goto out;
 
-	ret = regmap_bulk_write(info->max77686->rtc_regmap,
-				 MAX77686_ALARM1_SEC, data, RTC_NR_TIME);
+	ret = regmap_bulk_write(info->rtc_regmap,
+				info->drv_data->map[REG_ALARM1_SEC],
+				data, ARRAY_SIZE(data));
 
 	if (ret < 0) {
-		dev_err(info->dev, "%s: fail to write alarm reg(%d)\n",
-				__func__, ret);
+		dev_err(info->dev, "Fail to write alarm reg(%d)\n", ret);
 		goto out;
 	}
 
@@ -361,7 +609,7 @@
 }
 
 static int max77686_rtc_alarm_irq_enable(struct device *dev,
-					unsigned int enabled)
+					 unsigned int enabled)
 {
 	struct max77686_rtc_info *info = dev_get_drvdata(dev);
 	int ret;
@@ -380,7 +628,7 @@
 {
 	struct max77686_rtc_info *info = data;
 
-	dev_info(info->dev, "%s:irq(%d)\n", __func__, irq);
+	dev_dbg(info->dev, "RTC alarm IRQ: %d\n", irq);
 
 	rtc_update_irq(info->rtc_dev, 1, RTC_IRQF | RTC_AF);
 
@@ -406,10 +654,11 @@
 
 	info->rtc_24hr_mode = 1;
 
-	ret = regmap_bulk_write(info->max77686->rtc_regmap, MAX77686_RTC_CONTROLM, data, 2);
+	ret = regmap_bulk_write(info->rtc_regmap,
+				info->drv_data->map[REG_RTC_CONTROLM],
+				data, ARRAY_SIZE(data));
 	if (ret < 0) {
-		dev_err(info->dev, "%s: fail to write controlm reg(%d)\n",
-				__func__, ret);
+		dev_err(info->dev, "Fail to write controlm reg(%d)\n", ret);
 		return ret;
 	}
 
@@ -417,28 +666,97 @@
 	return ret;
 }
 
-static int max77686_rtc_probe(struct platform_device *pdev)
+static const struct regmap_config max77686_rtc_regmap_config = {
+	.reg_bits = 8,
+	.val_bits = 8,
+};
+
+static int max77686_init_rtc_regmap(struct max77686_rtc_info *info)
 {
-	struct max77686_dev *max77686 = dev_get_drvdata(pdev->dev.parent);
-	struct max77686_rtc_info *info;
+	struct device *parent = info->dev->parent;
+	struct i2c_client *parent_i2c = to_i2c_client(parent);
 	int ret;
 
-	dev_info(&pdev->dev, "%s\n", __func__);
+	if (info->drv_data->rtc_irq_from_platform) {
+		struct platform_device *pdev = to_platform_device(info->dev);
+
+		info->rtc_irq = platform_get_irq(pdev, 0);
+		if (info->rtc_irq < 0) {
+			dev_err(info->dev, "Failed to get rtc interrupts: %d\n",
+				info->rtc_irq);
+			return info->rtc_irq;
+		}
+	} else {
+		info->rtc_irq =  parent_i2c->irq;
+	}
+
+	info->regmap = dev_get_regmap(parent, NULL);
+	if (!info->regmap) {
+		dev_err(info->dev, "Failed to get rtc regmap\n");
+		return -ENODEV;
+	}
+
+	if (info->drv_data->rtc_i2c_addr == MAX77686_INVALID_I2C_ADDR) {
+		info->rtc_regmap = info->regmap;
+		goto add_rtc_irq;
+	}
+
+	info->rtc = i2c_new_dummy(parent_i2c->adapter,
+				  info->drv_data->rtc_i2c_addr);
+	if (!info->rtc) {
+		dev_err(info->dev, "Failed to allocate I2C device for RTC\n");
+		return -ENODEV;
+	}
+
+	info->rtc_regmap = devm_regmap_init_i2c(info->rtc,
+						&max77686_rtc_regmap_config);
+	if (IS_ERR(info->rtc_regmap)) {
+		ret = PTR_ERR(info->rtc_regmap);
+		dev_err(info->dev, "Failed to allocate RTC regmap: %d\n", ret);
+		goto err_unregister_i2c;
+	}
+
+add_rtc_irq:
+	ret = regmap_add_irq_chip(info->rtc_regmap, info->rtc_irq,
+				  IRQF_TRIGGER_FALLING | IRQF_ONESHOT |
+				  IRQF_SHARED, 0, info->drv_data->rtc_irq_chip,
+				  &info->rtc_irq_data);
+	if (ret < 0) {
+		dev_err(info->dev, "Failed to add RTC irq chip: %d\n", ret);
+		goto err_unregister_i2c;
+	}
+
+	return 0;
+
+err_unregister_i2c:
+	if (info->rtc)
+		i2c_unregister_device(info->rtc);
+	return ret;
+}
+
+static int max77686_rtc_probe(struct platform_device *pdev)
+{
+	struct max77686_rtc_info *info;
+	const struct platform_device_id *id = platform_get_device_id(pdev);
+	int ret;
 
 	info = devm_kzalloc(&pdev->dev, sizeof(struct max77686_rtc_info),
-				GFP_KERNEL);
+			    GFP_KERNEL);
 	if (!info)
 		return -ENOMEM;
 
 	mutex_init(&info->lock);
 	info->dev = &pdev->dev;
-	info->max77686 = max77686;
-	info->rtc = max77686->rtc;
+	info->drv_data = (const struct max77686_rtc_driver_data *)
+		id->driver_data;
+
+	ret = max77686_init_rtc_regmap(info);
+	if (ret < 0)
+		return ret;
 
 	platform_set_drvdata(pdev, info);
 
 	ret = max77686_rtc_init_reg(info);
-
 	if (ret < 0) {
 		dev_err(&pdev->dev, "Failed to initialize RTC reg:%d\n", ret);
 		goto err_rtc;
@@ -446,7 +764,7 @@
 
 	device_init_wakeup(&pdev->dev, 1);
 
-	info->rtc_dev = devm_rtc_device_register(&pdev->dev, "max77686-rtc",
+	info->rtc_dev = devm_rtc_device_register(&pdev->dev, id->name,
 					&max77686_rtc_ops, THIS_MODULE);
 
 	if (IS_ERR(info->rtc_dev)) {
@@ -457,29 +775,43 @@
 		goto err_rtc;
 	}
 
-	if (!max77686->rtc_irq_data) {
-		ret = -EINVAL;
-		dev_err(&pdev->dev, "%s: no RTC regmap IRQ chip\n", __func__);
-		goto err_rtc;
-	}
-
-	info->virq = regmap_irq_get_virq(max77686->rtc_irq_data,
+	info->virq = regmap_irq_get_virq(info->rtc_irq_data,
 					 MAX77686_RTCIRQ_RTCA1);
-	if (!info->virq) {
+	if (info->virq <= 0) {
 		ret = -ENXIO;
 		goto err_rtc;
 	}
 
-	ret = devm_request_threaded_irq(&pdev->dev, info->virq, NULL,
-				max77686_rtc_alarm_irq, 0, "rtc-alarm1", info);
-	if (ret < 0)
+	ret = request_threaded_irq(info->virq, NULL, max77686_rtc_alarm_irq, 0,
+				   "rtc-alarm1", info);
+	if (ret < 0) {
 		dev_err(&pdev->dev, "Failed to request alarm IRQ: %d: %d\n",
 			info->virq, ret);
+		goto err_rtc;
+	}
+
+	return 0;
 
 err_rtc:
+	regmap_del_irq_chip(info->rtc_irq, info->rtc_irq_data);
+	if (info->rtc)
+		i2c_unregister_device(info->rtc);
+
 	return ret;
 }
 
+static int max77686_rtc_remove(struct platform_device *pdev)
+{
+	struct max77686_rtc_info *info = platform_get_drvdata(pdev);
+
+	free_irq(info->virq, info);
+	regmap_del_irq_chip(info->rtc_irq, info->rtc_irq_data);
+	if (info->rtc)
+		i2c_unregister_device(info->rtc);
+
+	return 0;
+}
+
 #ifdef CONFIG_PM_SLEEP
 static int max77686_rtc_suspend(struct device *dev)
 {
@@ -508,7 +840,9 @@
 			 max77686_rtc_suspend, max77686_rtc_resume);
 
 static const struct platform_device_id rtc_id[] = {
-	{ "max77686-rtc", 0 },
+	{ "max77686-rtc", .driver_data = (kernel_ulong_t)&max77686_drv_data, },
+	{ "max77802-rtc", .driver_data = (kernel_ulong_t)&max77802_drv_data, },
+	{ "max77620-rtc", .driver_data = (kernel_ulong_t)&max77620_drv_data, },
 	{},
 };
 MODULE_DEVICE_TABLE(platform, rtc_id);
@@ -519,6 +853,7 @@
 		.pm	= &max77686_rtc_pm_ops,
 	},
 	.probe		= max77686_rtc_probe,
+	.remove		= max77686_rtc_remove,
 	.id_table	= rtc_id,
 };
 
diff --git a/drivers/rtc/rtc-max77802.c b/drivers/rtc/rtc-max77802.c
deleted file mode 100644
index 82ffcc5..0000000
--- a/drivers/rtc/rtc-max77802.c
+++ /dev/null
@@ -1,502 +0,0 @@
-/*
- * RTC driver for Maxim MAX77802
- *
- * Copyright (C) 2013 Google, Inc
- *
- * Copyright (C) 2012 Samsung Electronics Co.Ltd
- *
- *  based on rtc-max8997.c
- *
- *  This program is free software; you can redistribute  it and/or modify it
- *  under  the terms of  the GNU General  Public License as published by the
- *  Free Software Foundation;  either version 2 of the  License, or (at your
- *  option) any later version.
- *
- */
-
-#include <linux/slab.h>
-#include <linux/rtc.h>
-#include <linux/delay.h>
-#include <linux/mutex.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/mfd/max77686-private.h>
-#include <linux/irqdomain.h>
-#include <linux/regmap.h>
-
-/* RTC Control Register */
-#define BCD_EN_SHIFT			0
-#define BCD_EN_MASK			(1 << BCD_EN_SHIFT)
-#define MODEL24_SHIFT			1
-#define MODEL24_MASK			(1 << MODEL24_SHIFT)
-/* RTC Update Register1 */
-#define RTC_UDR_SHIFT			0
-#define RTC_UDR_MASK			(1 << RTC_UDR_SHIFT)
-#define RTC_RBUDR_SHIFT			4
-#define RTC_RBUDR_MASK			(1 << RTC_RBUDR_SHIFT)
-/* RTC Hour register */
-#define HOUR_PM_SHIFT			6
-#define HOUR_PM_MASK			(1 << HOUR_PM_SHIFT)
-/* RTC Alarm Enable */
-#define ALARM_ENABLE_SHIFT		7
-#define ALARM_ENABLE_MASK		(1 << ALARM_ENABLE_SHIFT)
-
-/* For the RTCAE1 register, we write this value to enable the alarm */
-#define ALARM_ENABLE_VALUE		0x77
-
-#define MAX77802_RTC_UPDATE_DELAY_US	200
-
-enum {
-	RTC_SEC = 0,
-	RTC_MIN,
-	RTC_HOUR,
-	RTC_WEEKDAY,
-	RTC_MONTH,
-	RTC_YEAR,
-	RTC_DATE,
-	RTC_NR_TIME
-};
-
-struct max77802_rtc_info {
-	struct device		*dev;
-	struct max77686_dev	*max77802;
-	struct i2c_client	*rtc;
-	struct rtc_device	*rtc_dev;
-	struct mutex		lock;
-
-	struct regmap		*regmap;
-
-	int virq;
-	int rtc_24hr_mode;
-};
-
-enum MAX77802_RTC_OP {
-	MAX77802_RTC_WRITE,
-	MAX77802_RTC_READ,
-};
-
-static void max77802_rtc_data_to_tm(u8 *data, struct rtc_time *tm,
-				   int rtc_24hr_mode)
-{
-	tm->tm_sec = data[RTC_SEC] & 0xff;
-	tm->tm_min = data[RTC_MIN] & 0xff;
-	if (rtc_24hr_mode)
-		tm->tm_hour = data[RTC_HOUR] & 0x1f;
-	else {
-		tm->tm_hour = data[RTC_HOUR] & 0x0f;
-		if (data[RTC_HOUR] & HOUR_PM_MASK)
-			tm->tm_hour += 12;
-	}
-
-	/* Only a single bit is set in data[], so fls() would be equivalent */
-	tm->tm_wday = ffs(data[RTC_WEEKDAY] & 0xff) - 1;
-	tm->tm_mday = data[RTC_DATE] & 0x1f;
-	tm->tm_mon = (data[RTC_MONTH] & 0x0f) - 1;
-
-	tm->tm_year = data[RTC_YEAR] & 0xff;
-	tm->tm_yday = 0;
-	tm->tm_isdst = 0;
-}
-
-static int max77802_rtc_tm_to_data(struct rtc_time *tm, u8 *data)
-{
-	data[RTC_SEC] = tm->tm_sec;
-	data[RTC_MIN] = tm->tm_min;
-	data[RTC_HOUR] = tm->tm_hour;
-	data[RTC_WEEKDAY] = 1 << tm->tm_wday;
-	data[RTC_DATE] = tm->tm_mday;
-	data[RTC_MONTH] = tm->tm_mon + 1;
-	data[RTC_YEAR] = tm->tm_year;
-
-	return 0;
-}
-
-static int max77802_rtc_update(struct max77802_rtc_info *info,
-	enum MAX77802_RTC_OP op)
-{
-	int ret;
-	unsigned int data;
-
-	if (op == MAX77802_RTC_WRITE)
-		data = 1 << RTC_UDR_SHIFT;
-	else
-		data = 1 << RTC_RBUDR_SHIFT;
-
-	ret = regmap_update_bits(info->max77802->regmap,
-				 MAX77802_RTC_UPDATE0, data, data);
-	if (ret < 0)
-		dev_err(info->dev, "%s: fail to write update reg(ret=%d, data=0x%x)\n",
-				__func__, ret, data);
-	else {
-		/* Minimum delay required before RTC update. */
-		usleep_range(MAX77802_RTC_UPDATE_DELAY_US,
-			     MAX77802_RTC_UPDATE_DELAY_US * 2);
-	}
-
-	return ret;
-}
-
-static int max77802_rtc_read_time(struct device *dev, struct rtc_time *tm)
-{
-	struct max77802_rtc_info *info = dev_get_drvdata(dev);
-	u8 data[RTC_NR_TIME];
-	int ret;
-
-	mutex_lock(&info->lock);
-
-	ret = max77802_rtc_update(info, MAX77802_RTC_READ);
-	if (ret < 0)
-		goto out;
-
-	ret = regmap_bulk_read(info->max77802->regmap,
-				MAX77802_RTC_SEC, data, RTC_NR_TIME);
-	if (ret < 0) {
-		dev_err(info->dev, "%s: fail to read time reg(%d)\n", __func__,
-			ret);
-		goto out;
-	}
-
-	max77802_rtc_data_to_tm(data, tm, info->rtc_24hr_mode);
-
-	ret = rtc_valid_tm(tm);
-
-out:
-	mutex_unlock(&info->lock);
-	return ret;
-}
-
-static int max77802_rtc_set_time(struct device *dev, struct rtc_time *tm)
-{
-	struct max77802_rtc_info *info = dev_get_drvdata(dev);
-	u8 data[RTC_NR_TIME];
-	int ret;
-
-	ret = max77802_rtc_tm_to_data(tm, data);
-	if (ret < 0)
-		return ret;
-
-	mutex_lock(&info->lock);
-
-	ret = regmap_bulk_write(info->max77802->regmap,
-				 MAX77802_RTC_SEC, data, RTC_NR_TIME);
-	if (ret < 0) {
-		dev_err(info->dev, "%s: fail to write time reg(%d)\n", __func__,
-			ret);
-		goto out;
-	}
-
-	ret = max77802_rtc_update(info, MAX77802_RTC_WRITE);
-
-out:
-	mutex_unlock(&info->lock);
-	return ret;
-}
-
-static int max77802_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
-{
-	struct max77802_rtc_info *info = dev_get_drvdata(dev);
-	u8 data[RTC_NR_TIME];
-	unsigned int val;
-	int ret;
-
-	mutex_lock(&info->lock);
-
-	ret = max77802_rtc_update(info, MAX77802_RTC_READ);
-	if (ret < 0)
-		goto out;
-
-	ret = regmap_bulk_read(info->max77802->regmap,
-				 MAX77802_ALARM1_SEC, data, RTC_NR_TIME);
-	if (ret < 0) {
-		dev_err(info->dev, "%s:%d fail to read alarm reg(%d)\n",
-				__func__, __LINE__, ret);
-		goto out;
-	}
-
-	max77802_rtc_data_to_tm(data, &alrm->time, info->rtc_24hr_mode);
-
-	alrm->enabled = 0;
-	ret = regmap_read(info->max77802->regmap,
-			  MAX77802_RTC_AE1, &val);
-	if (ret < 0) {
-		dev_err(info->dev, "%s:%d fail to read alarm enable(%d)\n",
-			__func__, __LINE__, ret);
-		goto out;
-	}
-	if (val)
-		alrm->enabled = 1;
-
-	alrm->pending = 0;
-	ret = regmap_read(info->max77802->regmap, MAX77802_REG_STATUS2, &val);
-	if (ret < 0) {
-		dev_err(info->dev, "%s:%d fail to read status2 reg(%d)\n",
-				__func__, __LINE__, ret);
-		goto out;
-	}
-
-	if (val & (1 << 2)) /* RTCA1 */
-		alrm->pending = 1;
-
-out:
-	mutex_unlock(&info->lock);
-	return 0;
-}
-
-static int max77802_rtc_stop_alarm(struct max77802_rtc_info *info)
-{
-	int ret;
-
-	if (!mutex_is_locked(&info->lock))
-		dev_warn(info->dev, "%s: should have mutex locked\n", __func__);
-
-	ret = max77802_rtc_update(info, MAX77802_RTC_READ);
-	if (ret < 0)
-		goto out;
-
-	ret = regmap_write(info->max77802->regmap,
-			   MAX77802_RTC_AE1, 0);
-	if (ret < 0) {
-		dev_err(info->dev, "%s: fail to write alarm reg(%d)\n",
-			__func__, ret);
-		goto out;
-	}
-
-	ret = max77802_rtc_update(info, MAX77802_RTC_WRITE);
-out:
-	return ret;
-}
-
-static int max77802_rtc_start_alarm(struct max77802_rtc_info *info)
-{
-	int ret;
-
-	if (!mutex_is_locked(&info->lock))
-		dev_warn(info->dev, "%s: should have mutex locked\n",
-			 __func__);
-
-	ret = max77802_rtc_update(info, MAX77802_RTC_READ);
-	if (ret < 0)
-		goto out;
-
-	ret = regmap_write(info->max77802->regmap,
-				   MAX77802_RTC_AE1,
-				   ALARM_ENABLE_VALUE);
-
-	if (ret < 0) {
-		dev_err(info->dev, "%s: fail to read alarm reg(%d)\n",
-				__func__, ret);
-		goto out;
-	}
-
-	ret = max77802_rtc_update(info, MAX77802_RTC_WRITE);
-out:
-	return ret;
-}
-
-static int max77802_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
-{
-	struct max77802_rtc_info *info = dev_get_drvdata(dev);
-	u8 data[RTC_NR_TIME];
-	int ret;
-
-	ret = max77802_rtc_tm_to_data(&alrm->time, data);
-	if (ret < 0)
-		return ret;
-
-	mutex_lock(&info->lock);
-
-	ret = max77802_rtc_stop_alarm(info);
-	if (ret < 0)
-		goto out;
-
-	ret = regmap_bulk_write(info->max77802->regmap,
-				 MAX77802_ALARM1_SEC, data, RTC_NR_TIME);
-
-	if (ret < 0) {
-		dev_err(info->dev, "%s: fail to write alarm reg(%d)\n",
-				__func__, ret);
-		goto out;
-	}
-
-	ret = max77802_rtc_update(info, MAX77802_RTC_WRITE);
-	if (ret < 0)
-		goto out;
-
-	if (alrm->enabled)
-		ret = max77802_rtc_start_alarm(info);
-out:
-	mutex_unlock(&info->lock);
-	return ret;
-}
-
-static int max77802_rtc_alarm_irq_enable(struct device *dev,
-					 unsigned int enabled)
-{
-	struct max77802_rtc_info *info = dev_get_drvdata(dev);
-	int ret;
-
-	mutex_lock(&info->lock);
-	if (enabled)
-		ret = max77802_rtc_start_alarm(info);
-	else
-		ret = max77802_rtc_stop_alarm(info);
-	mutex_unlock(&info->lock);
-
-	return ret;
-}
-
-static irqreturn_t max77802_rtc_alarm_irq(int irq, void *data)
-{
-	struct max77802_rtc_info *info = data;
-
-	dev_dbg(info->dev, "%s:irq(%d)\n", __func__, irq);
-
-	rtc_update_irq(info->rtc_dev, 1, RTC_IRQF | RTC_AF);
-
-	return IRQ_HANDLED;
-}
-
-static const struct rtc_class_ops max77802_rtc_ops = {
-	.read_time = max77802_rtc_read_time,
-	.set_time = max77802_rtc_set_time,
-	.read_alarm = max77802_rtc_read_alarm,
-	.set_alarm = max77802_rtc_set_alarm,
-	.alarm_irq_enable = max77802_rtc_alarm_irq_enable,
-};
-
-static int max77802_rtc_init_reg(struct max77802_rtc_info *info)
-{
-	u8 data[2];
-	int ret;
-
-	max77802_rtc_update(info, MAX77802_RTC_READ);
-
-	/* Set RTC control register : Binary mode, 24hour mdoe */
-	data[0] = (1 << BCD_EN_SHIFT) | (1 << MODEL24_SHIFT);
-	data[1] = (0 << BCD_EN_SHIFT) | (1 << MODEL24_SHIFT);
-
-	info->rtc_24hr_mode = 1;
-
-	ret = regmap_bulk_write(info->max77802->regmap,
-				MAX77802_RTC_CONTROLM, data, ARRAY_SIZE(data));
-	if (ret < 0) {
-		dev_err(info->dev, "%s: fail to write controlm reg(%d)\n",
-				__func__, ret);
-		return ret;
-	}
-
-	ret = max77802_rtc_update(info, MAX77802_RTC_WRITE);
-	return ret;
-}
-
-static int max77802_rtc_probe(struct platform_device *pdev)
-{
-	struct max77686_dev *max77802 = dev_get_drvdata(pdev->dev.parent);
-	struct max77802_rtc_info *info;
-	int ret;
-
-	dev_dbg(&pdev->dev, "%s\n", __func__);
-
-	info = devm_kzalloc(&pdev->dev, sizeof(struct max77802_rtc_info),
-			    GFP_KERNEL);
-	if (!info)
-		return -ENOMEM;
-
-	mutex_init(&info->lock);
-	info->dev = &pdev->dev;
-	info->max77802 = max77802;
-	info->rtc = max77802->i2c;
-
-	platform_set_drvdata(pdev, info);
-
-	ret = max77802_rtc_init_reg(info);
-
-	if (ret < 0) {
-		dev_err(&pdev->dev, "Failed to initialize RTC reg:%d\n", ret);
-		return ret;
-	}
-
-	device_init_wakeup(&pdev->dev, 1);
-
-	info->rtc_dev = devm_rtc_device_register(&pdev->dev, "max77802-rtc",
-						 &max77802_rtc_ops, THIS_MODULE);
-
-	if (IS_ERR(info->rtc_dev)) {
-		ret = PTR_ERR(info->rtc_dev);
-		dev_err(&pdev->dev, "Failed to register RTC device: %d\n", ret);
-		if (ret == 0)
-			ret = -EINVAL;
-		return ret;
-	}
-
-	if (!max77802->rtc_irq_data) {
-		dev_err(&pdev->dev, "No RTC regmap IRQ chip\n");
-		return -EINVAL;
-	}
-
-	info->virq = regmap_irq_get_virq(max77802->rtc_irq_data,
-					 MAX77686_RTCIRQ_RTCA1);
-
-	if (info->virq <= 0) {
-		dev_err(&pdev->dev, "Failed to get virtual IRQ %d\n",
-			MAX77686_RTCIRQ_RTCA1);
-		return -EINVAL;
-	}
-
-	ret = devm_request_threaded_irq(&pdev->dev, info->virq, NULL,
-					max77802_rtc_alarm_irq, 0, "rtc-alarm1",
-					info);
-	if (ret < 0)
-		dev_err(&pdev->dev, "Failed to request alarm IRQ: %d: %d\n",
-			info->virq, ret);
-
-	return ret;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int max77802_rtc_suspend(struct device *dev)
-{
-	if (device_may_wakeup(dev)) {
-		struct max77802_rtc_info *info = dev_get_drvdata(dev);
-
-		return enable_irq_wake(info->virq);
-	}
-
-	return 0;
-}
-
-static int max77802_rtc_resume(struct device *dev)
-{
-	if (device_may_wakeup(dev)) {
-		struct max77802_rtc_info *info = dev_get_drvdata(dev);
-
-		return disable_irq_wake(info->virq);
-	}
-
-	return 0;
-}
-#endif
-
-static SIMPLE_DEV_PM_OPS(max77802_rtc_pm_ops,
-			 max77802_rtc_suspend, max77802_rtc_resume);
-
-static const struct platform_device_id rtc_id[] = {
-	{ "max77802-rtc", 0 },
-	{},
-};
-MODULE_DEVICE_TABLE(platform, rtc_id);
-
-static struct platform_driver max77802_rtc_driver = {
-	.driver		= {
-		.name	= "max77802-rtc",
-		.pm	= &max77802_rtc_pm_ops,
-	},
-	.probe		= max77802_rtc_probe,
-	.id_table	= rtc_id,
-};
-
-module_platform_driver(max77802_rtc_driver);
-
-MODULE_DESCRIPTION("Maxim MAX77802 RTC driver");
-MODULE_AUTHOR("Simon Glass <sjg@chromium.org>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-mt6397.c b/drivers/rtc/rtc-mt6397.c
index 06a5c52..44f622c 100644
--- a/drivers/rtc/rtc-mt6397.c
+++ b/drivers/rtc/rtc-mt6397.c
@@ -419,4 +419,3 @@
 MODULE_LICENSE("GPL v2");
 MODULE_AUTHOR("Tianping Fang <tianping.fang@mediatek.com>");
 MODULE_DESCRIPTION("RTC Driver for MediaTek MT6397 PMIC");
-MODULE_ALIAS("platform:mt6397-rtc");
diff --git a/drivers/rtc/rtc-palmas.c b/drivers/rtc/rtc-palmas.c
index 7ea2c47..6080e0e 100644
--- a/drivers/rtc/rtc-palmas.c
+++ b/drivers/rtc/rtc-palmas.c
@@ -311,8 +311,7 @@
 
 	ret = devm_request_threaded_irq(&pdev->dev, palmas_rtc->irq, NULL,
 			palmas_rtc_interrupt,
-			IRQF_TRIGGER_LOW | IRQF_ONESHOT |
-			IRQF_EARLY_RESUME,
+			IRQF_TRIGGER_LOW | IRQF_ONESHOT,
 			dev_name(&pdev->dev), palmas_rtc);
 	if (ret < 0) {
 		dev_err(&pdev->dev, "IRQ request failed, err = %d\n", ret);
diff --git a/drivers/rtc/rtc-pcf2123.c b/drivers/rtc/rtc-pcf2123.c
index ea8a31c..da27738 100644
--- a/drivers/rtc/rtc-pcf2123.c
+++ b/drivers/rtc/rtc-pcf2123.c
@@ -48,6 +48,7 @@
 
 #define DRV_VERSION "0.6"
 
+/* REGISTERS */
 #define PCF2123_REG_CTRL1	(0x00)	/* Control Register 1 */
 #define PCF2123_REG_CTRL2	(0x01)	/* Control Register 2 */
 #define PCF2123_REG_SC		(0x02)	/* datetime */
@@ -57,10 +58,54 @@
 #define PCF2123_REG_DW		(0x06)
 #define PCF2123_REG_MO		(0x07)
 #define PCF2123_REG_YR		(0x08)
+#define PCF2123_REG_ALRM_MN	(0x09)	/* Alarm Registers */
+#define PCF2123_REG_ALRM_HR	(0x0a)
+#define PCF2123_REG_ALRM_DM	(0x0b)
+#define PCF2123_REG_ALRM_DW	(0x0c)
+#define PCF2123_REG_OFFSET	(0x0d)	/* Clock Rate Offset Register */
+#define PCF2123_REG_TMR_CLKOUT	(0x0e)	/* Timer Registers */
+#define PCF2123_REG_CTDWN_TMR	(0x0f)
 
-#define PCF2123_SUBADDR		(1 << 4)
-#define PCF2123_WRITE		((0 << 7) | PCF2123_SUBADDR)
-#define PCF2123_READ		((1 << 7) | PCF2123_SUBADDR)
+/* PCF2123_REG_CTRL1 BITS */
+#define CTRL1_CLEAR		(0)	/* Clear */
+#define CTRL1_CORR_INT		BIT(1)	/* Correction irq enable */
+#define CTRL1_12_HOUR		BIT(2)	/* 12 hour time */
+#define CTRL1_SW_RESET	(BIT(3) | BIT(4) | BIT(6))	/* Software reset */
+#define CTRL1_STOP		BIT(5)	/* Stop the clock */
+#define CTRL1_EXT_TEST		BIT(7)	/* External clock test mode */
+
+/* PCF2123_REG_CTRL2 BITS */
+#define CTRL2_TIE		BIT(0)	/* Countdown timer irq enable */
+#define CTRL2_AIE		BIT(1)	/* Alarm irq enable */
+#define CTRL2_TF		BIT(2)	/* Countdown timer flag */
+#define CTRL2_AF		BIT(3)	/* Alarm flag */
+#define CTRL2_TI_TP		BIT(4)	/* Irq pin generates pulse */
+#define CTRL2_MSF		BIT(5)	/* Minute or second irq flag */
+#define CTRL2_SI		BIT(6)	/* Second irq enable */
+#define CTRL2_MI		BIT(7)	/* Minute irq enable */
+
+/* PCF2123_REG_SC BITS */
+#define OSC_HAS_STOPPED		BIT(7)	/* Clock has been stopped */
+
+/* PCF2123_REG_ALRM_XX BITS */
+#define ALRM_ENABLE		BIT(7)	/* MN, HR, DM, or DW alarm enable */
+
+/* PCF2123_REG_TMR_CLKOUT BITS */
+#define CD_TMR_4096KHZ		(0)	/* 4096 KHz countdown timer */
+#define CD_TMR_64HZ		(1)	/* 64 Hz countdown timer */
+#define CD_TMR_1HZ		(2)	/* 1 Hz countdown timer */
+#define CD_TMR_60th_HZ		(3)	/* 60th Hz countdown timer */
+#define CD_TMR_TE		BIT(3)	/* Countdown timer enable */
+
+/* PCF2123_REG_OFFSET BITS */
+#define OFFSET_SIGN_BIT		BIT(6)	/* 2's complement sign bit */
+#define OFFSET_COARSE		BIT(7)	/* Coarse mode offset */
+#define OFFSET_STEP		(2170)	/* Offset step in parts per billion */
+
+/* READ/WRITE ADDRESS BITS */
+#define PCF2123_WRITE		BIT(4)
+#define PCF2123_READ		(BIT(4) | BIT(7))
+
 
 static struct spi_driver pcf2123_driver;
 
@@ -84,12 +129,44 @@
 	ndelay(30);
 }
 
+static int pcf2123_read(struct device *dev, u8 reg, u8 *rxbuf, size_t size)
+{
+	struct spi_device *spi = to_spi_device(dev);
+	int ret;
+
+	reg |= PCF2123_READ;
+	ret = spi_write_then_read(spi, &reg, 1, rxbuf, size);
+	pcf2123_delay_trec();
+
+	return ret;
+}
+
+static int pcf2123_write(struct device *dev, u8 *txbuf, size_t size)
+{
+	struct spi_device *spi = to_spi_device(dev);
+	int ret;
+
+	txbuf[0] |= PCF2123_WRITE;
+	ret = spi_write(spi, txbuf, size);
+	pcf2123_delay_trec();
+
+	return ret;
+}
+
+static int pcf2123_write_reg(struct device *dev, u8 reg, u8 val)
+{
+	u8 txbuf[2];
+
+	txbuf[0] = reg;
+	txbuf[1] = val;
+	return pcf2123_write(dev, txbuf, sizeof(txbuf));
+}
+
 static ssize_t pcf2123_show(struct device *dev, struct device_attribute *attr,
 			    char *buffer)
 {
-	struct spi_device *spi = to_spi_device(dev);
 	struct pcf2123_sysfs_reg *r;
-	u8 txbuf[1], rxbuf[1];
+	u8 rxbuf[1];
 	unsigned long reg;
 	int ret;
 
@@ -99,19 +176,16 @@
 	if (ret)
 		return ret;
 
-	txbuf[0] = PCF2123_READ | reg;
-	ret = spi_write_then_read(spi, txbuf, 1, rxbuf, 1);
+	ret = pcf2123_read(dev, reg, rxbuf, 1);
 	if (ret < 0)
 		return -EIO;
-	pcf2123_delay_trec();
+
 	return sprintf(buffer, "0x%x\n", rxbuf[0]);
 }
 
 static ssize_t pcf2123_store(struct device *dev, struct device_attribute *attr,
 			     const char *buffer, size_t count) {
-	struct spi_device *spi = to_spi_device(dev);
 	struct pcf2123_sysfs_reg *r;
-	u8 txbuf[2];
 	unsigned long reg;
 	unsigned long val;
 
@@ -127,27 +201,78 @@
 	if (ret)
 		return ret;
 
-	txbuf[0] = PCF2123_WRITE | reg;
-	txbuf[1] = val;
-	ret = spi_write(spi, txbuf, sizeof(txbuf));
+	pcf2123_write_reg(dev, reg, val);
 	if (ret < 0)
 		return -EIO;
-	pcf2123_delay_trec();
 	return count;
 }
 
+static int pcf2123_read_offset(struct device *dev, long *offset)
+{
+	int ret;
+	s8 reg;
+
+	ret = pcf2123_read(dev, PCF2123_REG_OFFSET, &reg, 1);
+	if (ret < 0)
+		return ret;
+
+	if (reg & OFFSET_COARSE)
+		reg <<= 1; /* multiply by 2 and sign extend */
+	else
+		reg |= (reg & OFFSET_SIGN_BIT) << 1; /* sign extend only */
+
+	*offset = ((long)reg) * OFFSET_STEP;
+
+	return 0;
+}
+
+/*
+ * The offset register is a 7 bit signed value with a coarse bit in bit 7.
+ * The main difference between the two is normal offset adjusts the first
+ * second of n minutes every other hour, with 61, 62 and 63 being shoved
+ * into the 60th minute.
+ * The coarse adjustment does the same, but every hour.
+ * the two overlap, with every even normal offset value corresponding
+ * to a coarse offset. Based on this algorithm, it seems that despite the
+ * name, coarse offset is a better fit for overlapping values.
+ */
+static int pcf2123_set_offset(struct device *dev, long offset)
+{
+	s8 reg;
+
+	if (offset > OFFSET_STEP * 127)
+		reg = 127;
+	else if (offset < OFFSET_STEP * -128)
+		reg = -128;
+	else
+		reg = (s8)((offset + (OFFSET_STEP >> 1)) / OFFSET_STEP);
+
+	/* choose fine offset only for odd values in the normal range */
+	if (reg & 1 && reg <= 63 && reg >= -64) {
+		/* Normal offset. Clear the coarse bit */
+		reg &= ~OFFSET_COARSE;
+	} else {
+		/* Coarse offset. Divide by 2 and set the coarse bit */
+		reg >>= 1;
+		reg |= OFFSET_COARSE;
+	}
+
+	return pcf2123_write_reg(dev, PCF2123_REG_OFFSET, reg);
+}
+
 static int pcf2123_rtc_read_time(struct device *dev, struct rtc_time *tm)
 {
-	struct spi_device *spi = to_spi_device(dev);
-	u8 txbuf[1], rxbuf[7];
+	u8 rxbuf[7];
 	int ret;
 
-	txbuf[0] = PCF2123_READ | PCF2123_REG_SC;
-	ret = spi_write_then_read(spi, txbuf, sizeof(txbuf),
-			rxbuf, sizeof(rxbuf));
+	ret = pcf2123_read(dev, PCF2123_REG_SC, rxbuf, sizeof(rxbuf));
 	if (ret < 0)
 		return ret;
-	pcf2123_delay_trec();
+
+	if (rxbuf[0] & OSC_HAS_STOPPED) {
+		dev_info(dev, "clock was stopped. Time is not valid\n");
+		return -EINVAL;
+	}
 
 	tm->tm_sec = bcd2bin(rxbuf[0] & 0x7F);
 	tm->tm_min = bcd2bin(rxbuf[1] & 0x7F);
@@ -170,7 +295,6 @@
 
 static int pcf2123_rtc_set_time(struct device *dev, struct rtc_time *tm)
 {
-	struct spi_device *spi = to_spi_device(dev);
 	u8 txbuf[8];
 	int ret;
 
@@ -181,15 +305,12 @@
 			tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday);
 
 	/* Stop the counter first */
-	txbuf[0] = PCF2123_WRITE | PCF2123_REG_CTRL1;
-	txbuf[1] = 0x20;
-	ret = spi_write(spi, txbuf, 2);
+	ret = pcf2123_write_reg(dev, PCF2123_REG_CTRL1, CTRL1_STOP);
 	if (ret < 0)
 		return ret;
-	pcf2123_delay_trec();
 
 	/* Set the new time */
-	txbuf[0] = PCF2123_WRITE | PCF2123_REG_SC;
+	txbuf[0] = PCF2123_REG_SC;
 	txbuf[1] = bin2bcd(tm->tm_sec & 0x7F);
 	txbuf[2] = bin2bcd(tm->tm_min & 0x7F);
 	txbuf[3] = bin2bcd(tm->tm_hour & 0x3F);
@@ -198,18 +319,48 @@
 	txbuf[6] = bin2bcd((tm->tm_mon + 1) & 0x1F); /* rtc mn 1-12 */
 	txbuf[7] = bin2bcd(tm->tm_year < 100 ? tm->tm_year : tm->tm_year - 100);
 
-	ret = spi_write(spi, txbuf, sizeof(txbuf));
+	ret = pcf2123_write(dev, txbuf, sizeof(txbuf));
 	if (ret < 0)
 		return ret;
-	pcf2123_delay_trec();
 
 	/* Start the counter */
-	txbuf[0] = PCF2123_WRITE | PCF2123_REG_CTRL1;
-	txbuf[1] = 0x00;
-	ret = spi_write(spi, txbuf, 2);
+	ret = pcf2123_write_reg(dev, PCF2123_REG_CTRL1, CTRL1_CLEAR);
 	if (ret < 0)
 		return ret;
-	pcf2123_delay_trec();
+
+	return 0;
+}
+
+static int pcf2123_reset(struct device *dev)
+{
+	int ret;
+	u8  rxbuf[2];
+
+	ret = pcf2123_write_reg(dev, PCF2123_REG_CTRL1, CTRL1_SW_RESET);
+	if (ret < 0)
+		return ret;
+
+	/* Stop the counter */
+	dev_dbg(dev, "stopping RTC\n");
+	ret = pcf2123_write_reg(dev, PCF2123_REG_CTRL1, CTRL1_STOP);
+	if (ret < 0)
+		return ret;
+
+	/* See if the counter was actually stopped */
+	dev_dbg(dev, "checking for presence of RTC\n");
+	ret = pcf2123_read(dev, PCF2123_REG_CTRL1, rxbuf, sizeof(rxbuf));
+	if (ret < 0)
+		return ret;
+
+	dev_dbg(dev, "received data from RTC (0x%02X 0x%02X)\n",
+		rxbuf[0], rxbuf[1]);
+	if (!(rxbuf[0] & CTRL1_STOP))
+		return -ENODEV;
+
+	/* Start the counter */
+	ret = pcf2123_write_reg(dev, PCF2123_REG_CTRL1, CTRL1_CLEAR);
+	if (ret < 0)
+		return ret;
 
 	return 0;
 }
@@ -217,13 +368,16 @@
 static const struct rtc_class_ops pcf2123_rtc_ops = {
 	.read_time	= pcf2123_rtc_read_time,
 	.set_time	= pcf2123_rtc_set_time,
+	.read_offset	= pcf2123_read_offset,
+	.set_offset	= pcf2123_set_offset,
+
 };
 
 static int pcf2123_probe(struct spi_device *spi)
 {
 	struct rtc_device *rtc;
+	struct rtc_time tm;
 	struct pcf2123_plat_data *pdata;
-	u8 txbuf[2], rxbuf[2];
 	int ret, i;
 
 	pdata = devm_kzalloc(&spi->dev, sizeof(struct pcf2123_plat_data),
@@ -232,56 +386,19 @@
 		return -ENOMEM;
 	spi->dev.platform_data = pdata;
 
-	/* Send a software reset command */
-	txbuf[0] = PCF2123_WRITE | PCF2123_REG_CTRL1;
-	txbuf[1] = 0x58;
-	dev_dbg(&spi->dev, "resetting RTC (0x%02X 0x%02X)\n",
-			txbuf[0], txbuf[1]);
-	ret = spi_write(spi, txbuf, 2 * sizeof(u8));
-	if (ret < 0)
-		goto kfree_exit;
-	pcf2123_delay_trec();
-
-	/* Stop the counter */
-	txbuf[0] = PCF2123_WRITE | PCF2123_REG_CTRL1;
-	txbuf[1] = 0x20;
-	dev_dbg(&spi->dev, "stopping RTC (0x%02X 0x%02X)\n",
-			txbuf[0], txbuf[1]);
-	ret = spi_write(spi, txbuf, 2 * sizeof(u8));
-	if (ret < 0)
-		goto kfree_exit;
-	pcf2123_delay_trec();
-
-	/* See if the counter was actually stopped */
-	txbuf[0] = PCF2123_READ | PCF2123_REG_CTRL1;
-	dev_dbg(&spi->dev, "checking for presence of RTC (0x%02X)\n",
-			txbuf[0]);
-	ret = spi_write_then_read(spi, txbuf, 1 * sizeof(u8),
-					rxbuf, 2 * sizeof(u8));
-	dev_dbg(&spi->dev, "received data from RTC (0x%02X 0x%02X)\n",
-			rxbuf[0], rxbuf[1]);
-	if (ret < 0)
-		goto kfree_exit;
-	pcf2123_delay_trec();
-
-	if (!(rxbuf[0] & 0x20)) {
-		dev_err(&spi->dev, "chip not found\n");
-		ret = -ENODEV;
-		goto kfree_exit;
+	ret = pcf2123_rtc_read_time(&spi->dev, &tm);
+	if (ret < 0) {
+		ret = pcf2123_reset(&spi->dev);
+		if (ret < 0) {
+			dev_err(&spi->dev, "chip not found\n");
+			goto kfree_exit;
+		}
 	}
 
 	dev_info(&spi->dev, "chip found, driver version " DRV_VERSION "\n");
 	dev_info(&spi->dev, "spiclk %u KHz.\n",
 			(spi->max_speed_hz + 500) / 1000);
 
-	/* Start the counter */
-	txbuf[0] = PCF2123_WRITE | PCF2123_REG_CTRL1;
-	txbuf[1] = 0x00;
-	ret = spi_write(spi, txbuf, sizeof(txbuf));
-	if (ret < 0)
-		goto kfree_exit;
-	pcf2123_delay_trec();
-
 	/* Finalize the initialization */
 	rtc = devm_rtc_device_register(&spi->dev, pcf2123_driver.driver.name,
 			&pcf2123_rtc_ops, THIS_MODULE);
diff --git a/drivers/rtc/rtc-pcf2127.c b/drivers/rtc/rtc-pcf2127.c
index 629bfdf..2bfdf63 100644
--- a/drivers/rtc/rtc-pcf2127.c
+++ b/drivers/rtc/rtc-pcf2127.c
@@ -1,12 +1,12 @@
 /*
- * An I2C driver for the NXP PCF2127 RTC
+ * An I2C and SPI driver for the NXP PCF2127/29 RTC
  * Copyright 2013 Til-Technologies
  *
  * Author: Renaud Cerrato <r.cerrato@til-technologies.fr>
  *
  * based on the other drivers in this same directory.
  *
- * http://www.nxp.com/documents/data_sheet/PCF2127AT.pdf
+ * Datasheet: http://cache.nxp.com/documents/data_sheet/PCF2127.pdf
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -14,11 +14,13 @@
  */
 
 #include <linux/i2c.h>
+#include <linux/spi/spi.h>
 #include <linux/bcd.h>
 #include <linux/rtc.h>
 #include <linux/slab.h>
 #include <linux/module.h>
 #include <linux/of.h>
+#include <linux/regmap.h>
 
 #define PCF2127_REG_CTRL1       (0x00)  /* Control Register 1 */
 #define PCF2127_REG_CTRL2       (0x01)  /* Control Register 2 */
@@ -36,29 +38,30 @@
 
 #define PCF2127_OSF             BIT(7)  /* Oscillator Fail flag */
 
-static struct i2c_driver pcf2127_driver;
-
 struct pcf2127 {
 	struct rtc_device *rtc;
+	struct regmap *regmap;
 };
 
 /*
  * In the routines that deal directly with the pcf2127 hardware, we use
  * rtc_time -- month 0-11, hour 0-23, yr = calendar year-epoch.
  */
-static int pcf2127_get_datetime(struct i2c_client *client, struct rtc_time *tm)
+static int pcf2127_rtc_read_time(struct device *dev, struct rtc_time *tm)
 {
-	unsigned char buf[10] = { PCF2127_REG_CTRL1 };
+	struct pcf2127 *pcf2127 = dev_get_drvdata(dev);
+	unsigned char buf[10];
+	int ret;
 
-	/* read registers */
-	if (i2c_master_send(client, buf, 1) != 1 ||
-		i2c_master_recv(client, buf, sizeof(buf)) != sizeof(buf)) {
-		dev_err(&client->dev, "%s: read error\n", __func__);
-		return -EIO;
+	ret = regmap_bulk_read(pcf2127->regmap, PCF2127_REG_CTRL1, buf,
+				sizeof(buf));
+	if (ret) {
+		dev_err(dev, "%s: read error\n", __func__);
+		return ret;
 	}
 
 	if (buf[PCF2127_REG_CTRL3] & PCF2127_REG_CTRL3_BLF)
-		dev_info(&client->dev,
+		dev_info(dev,
 			"low voltage detected, check/replace RTC battery.\n");
 
 	if (buf[PCF2127_REG_SC] & PCF2127_OSF) {
@@ -66,12 +69,12 @@
 		 * no need clear the flag here,
 		 * it will be cleared once the new date is saved
 		 */
-		dev_warn(&client->dev,
+		dev_warn(dev,
 			 "oscillator stop detected, date/time is not reliable\n");
 		return -EINVAL;
 	}
 
-	dev_dbg(&client->dev,
+	dev_dbg(dev,
 		"%s: raw data is cr1=%02x, cr2=%02x, cr3=%02x, "
 		"sec=%02x, min=%02x, hr=%02x, "
 		"mday=%02x, wday=%02x, mon=%02x, year=%02x\n",
@@ -91,7 +94,7 @@
 	if (tm->tm_year < 70)
 		tm->tm_year += 100;	/* assume we are in 1970...2069 */
 
-	dev_dbg(&client->dev, "%s: tm is secs=%d, mins=%d, hours=%d, "
+	dev_dbg(dev, "%s: tm is secs=%d, mins=%d, hours=%d, "
 		"mday=%d, mon=%d, year=%d, wday=%d\n",
 		__func__,
 		tm->tm_sec, tm->tm_min, tm->tm_hour,
@@ -100,20 +103,18 @@
 	return rtc_valid_tm(tm);
 }
 
-static int pcf2127_set_datetime(struct i2c_client *client, struct rtc_time *tm)
+static int pcf2127_rtc_set_time(struct device *dev, struct rtc_time *tm)
 {
-	unsigned char buf[8];
+	struct pcf2127 *pcf2127 = dev_get_drvdata(dev);
+	unsigned char buf[7];
 	int i = 0, err;
 
-	dev_dbg(&client->dev, "%s: secs=%d, mins=%d, hours=%d, "
+	dev_dbg(dev, "%s: secs=%d, mins=%d, hours=%d, "
 		"mday=%d, mon=%d, year=%d, wday=%d\n",
 		__func__,
 		tm->tm_sec, tm->tm_min, tm->tm_hour,
 		tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday);
 
-	/* start register address */
-	buf[i++] = PCF2127_REG_SC;
-
 	/* hours, minutes and seconds */
 	buf[i++] = bin2bcd(tm->tm_sec);	/* this will also clear OSF flag */
 	buf[i++] = bin2bcd(tm->tm_min);
@@ -128,11 +129,11 @@
 	buf[i++] = bin2bcd(tm->tm_year % 100);
 
 	/* write register's data */
-	err = i2c_master_send(client, buf, i);
-	if (err != i) {
-		dev_err(&client->dev,
+	err = regmap_bulk_write(pcf2127->regmap, PCF2127_REG_SC, buf, i);
+	if (err) {
+		dev_err(dev,
 			"%s: err=%d", __func__, err);
-		return -EIO;
+		return err;
 	}
 
 	return 0;
@@ -142,26 +143,17 @@
 static int pcf2127_rtc_ioctl(struct device *dev,
 				unsigned int cmd, unsigned long arg)
 {
-	struct i2c_client *client = to_i2c_client(dev);
-	unsigned char buf = PCF2127_REG_CTRL3;
+	struct pcf2127 *pcf2127 = dev_get_drvdata(dev);
 	int touser;
 	int ret;
 
 	switch (cmd) {
 	case RTC_VL_READ:
-		ret = i2c_master_send(client, &buf, 1);
-		if (!ret)
-			ret = -EIO;
-		if (ret < 0)
+		ret = regmap_read(pcf2127->regmap, PCF2127_REG_CTRL3, &touser);
+		if (ret)
 			return ret;
 
-		ret = i2c_master_recv(client, &buf, 1);
-		if (!ret)
-			ret = -EIO;
-		if (ret < 0)
-			return ret;
-
-		touser = buf & PCF2127_REG_CTRL3_BLF ? 1 : 0;
+		touser = touser & PCF2127_REG_CTRL3_BLF ? 1 : 0;
 
 		if (copy_to_user((void __user *)arg, &touser, sizeof(int)))
 			return -EFAULT;
@@ -174,71 +166,270 @@
 #define pcf2127_rtc_ioctl NULL
 #endif
 
-static int pcf2127_rtc_read_time(struct device *dev, struct rtc_time *tm)
-{
-	return pcf2127_get_datetime(to_i2c_client(dev), tm);
-}
-
-static int pcf2127_rtc_set_time(struct device *dev, struct rtc_time *tm)
-{
-	return pcf2127_set_datetime(to_i2c_client(dev), tm);
-}
-
 static const struct rtc_class_ops pcf2127_rtc_ops = {
 	.ioctl		= pcf2127_rtc_ioctl,
 	.read_time	= pcf2127_rtc_read_time,
 	.set_time	= pcf2127_rtc_set_time,
 };
 
-static int pcf2127_probe(struct i2c_client *client,
-				const struct i2c_device_id *id)
+static int pcf2127_probe(struct device *dev, struct regmap *regmap,
+			const char *name)
 {
 	struct pcf2127 *pcf2127;
 
-	dev_dbg(&client->dev, "%s\n", __func__);
+	dev_dbg(dev, "%s\n", __func__);
 
-	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
-		return -ENODEV;
-
-	pcf2127 = devm_kzalloc(&client->dev, sizeof(struct pcf2127),
-				GFP_KERNEL);
+	pcf2127 = devm_kzalloc(dev, sizeof(*pcf2127), GFP_KERNEL);
 	if (!pcf2127)
 		return -ENOMEM;
 
-	i2c_set_clientdata(client, pcf2127);
+	pcf2127->regmap = regmap;
 
-	pcf2127->rtc = devm_rtc_device_register(&client->dev,
-				pcf2127_driver.driver.name,
-				&pcf2127_rtc_ops, THIS_MODULE);
+	dev_set_drvdata(dev, pcf2127);
+
+	pcf2127->rtc = devm_rtc_device_register(dev, name, &pcf2127_rtc_ops,
+						THIS_MODULE);
 
 	return PTR_ERR_OR_ZERO(pcf2127->rtc);
 }
 
-static const struct i2c_device_id pcf2127_id[] = {
-	{ "pcf2127", 0 },
-	{ }
-};
-MODULE_DEVICE_TABLE(i2c, pcf2127_id);
-
 #ifdef CONFIG_OF
 static const struct of_device_id pcf2127_of_match[] = {
 	{ .compatible = "nxp,pcf2127" },
+	{ .compatible = "nxp,pcf2129" },
 	{}
 };
 MODULE_DEVICE_TABLE(of, pcf2127_of_match);
 #endif
 
-static struct i2c_driver pcf2127_driver = {
-	.driver		= {
-		.name	= "rtc-pcf2127",
-		.of_match_table = of_match_ptr(pcf2127_of_match),
-	},
-	.probe		= pcf2127_probe,
-	.id_table	= pcf2127_id,
+#if IS_ENABLED(CONFIG_I2C)
+
+static int pcf2127_i2c_write(void *context, const void *data, size_t count)
+{
+	struct device *dev = context;
+	struct i2c_client *client = to_i2c_client(dev);
+	int ret;
+
+	ret = i2c_master_send(client, data, count);
+	if (ret != count)
+		return ret < 0 ? ret : -EIO;
+
+	return 0;
+}
+
+static int pcf2127_i2c_gather_write(void *context,
+				const void *reg, size_t reg_size,
+				const void *val, size_t val_size)
+{
+	struct device *dev = context;
+	struct i2c_client *client = to_i2c_client(dev);
+	int ret;
+	void *buf;
+
+	if (WARN_ON(reg_size != 1))
+		return -EINVAL;
+
+	buf = kmalloc(val_size + 1, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	memcpy(buf, reg, 1);
+	memcpy(buf + 1, val, val_size);
+
+	ret = i2c_master_send(client, buf, val_size + 1);
+	if (ret != val_size + 1)
+		return ret < 0 ? ret : -EIO;
+
+	return 0;
+}
+
+static int pcf2127_i2c_read(void *context, const void *reg, size_t reg_size,
+				void *val, size_t val_size)
+{
+	struct device *dev = context;
+	struct i2c_client *client = to_i2c_client(dev);
+	int ret;
+
+	if (WARN_ON(reg_size != 1))
+		return -EINVAL;
+
+	ret = i2c_master_send(client, reg, 1);
+	if (ret != 1)
+		return ret < 0 ? ret : -EIO;
+
+	ret = i2c_master_recv(client, val, val_size);
+	if (ret != val_size)
+		return ret < 0 ? ret : -EIO;
+
+	return 0;
+}
+
+/*
+ * The reason we need this custom regmap_bus instead of using regmap_init_i2c()
+ * is that the STOP condition is required between set register address and
+ * read register data when reading from registers.
+ */
+static const struct regmap_bus pcf2127_i2c_regmap = {
+	.write = pcf2127_i2c_write,
+	.gather_write = pcf2127_i2c_gather_write,
+	.read = pcf2127_i2c_read,
 };
 
-module_i2c_driver(pcf2127_driver);
+static struct i2c_driver pcf2127_i2c_driver;
+
+static int pcf2127_i2c_probe(struct i2c_client *client,
+				const struct i2c_device_id *id)
+{
+	struct regmap *regmap;
+	static const struct regmap_config config = {
+		.reg_bits = 8,
+		.val_bits = 8,
+	};
+
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
+		return -ENODEV;
+
+	regmap = devm_regmap_init(&client->dev, &pcf2127_i2c_regmap,
+					&client->dev, &config);
+	if (IS_ERR(regmap)) {
+		dev_err(&client->dev, "%s: regmap allocation failed: %ld\n",
+			__func__, PTR_ERR(regmap));
+		return PTR_ERR(regmap);
+	}
+
+	return pcf2127_probe(&client->dev, regmap,
+				pcf2127_i2c_driver.driver.name);
+}
+
+static const struct i2c_device_id pcf2127_i2c_id[] = {
+	{ "pcf2127", 0 },
+	{ "pcf2129", 0 },
+	{ }
+};
+MODULE_DEVICE_TABLE(i2c, pcf2127_i2c_id);
+
+static struct i2c_driver pcf2127_i2c_driver = {
+	.driver		= {
+		.name	= "rtc-pcf2127-i2c",
+		.of_match_table = of_match_ptr(pcf2127_of_match),
+	},
+	.probe		= pcf2127_i2c_probe,
+	.id_table	= pcf2127_i2c_id,
+};
+
+static int pcf2127_i2c_register_driver(void)
+{
+	return i2c_add_driver(&pcf2127_i2c_driver);
+}
+
+static void pcf2127_i2c_unregister_driver(void)
+{
+	i2c_del_driver(&pcf2127_i2c_driver);
+}
+
+#else
+
+static int pcf2127_i2c_register_driver(void)
+{
+	return 0;
+}
+
+static void pcf2127_i2c_unregister_driver(void)
+{
+}
+
+#endif
+
+#if IS_ENABLED(CONFIG_SPI_MASTER)
+
+static struct spi_driver pcf2127_spi_driver;
+
+static int pcf2127_spi_probe(struct spi_device *spi)
+{
+	static const struct regmap_config config = {
+		.reg_bits = 8,
+		.val_bits = 8,
+		.read_flag_mask = 0xa0,
+		.write_flag_mask = 0x20,
+	};
+	struct regmap *regmap;
+
+	regmap = devm_regmap_init_spi(spi, &config);
+	if (IS_ERR(regmap)) {
+		dev_err(&spi->dev, "%s: regmap allocation failed: %ld\n",
+			__func__, PTR_ERR(regmap));
+		return PTR_ERR(regmap);
+	}
+
+	return pcf2127_probe(&spi->dev, regmap, pcf2127_spi_driver.driver.name);
+}
+
+static const struct spi_device_id pcf2127_spi_id[] = {
+	{ "pcf2127", 0 },
+	{ "pcf2129", 0 },
+	{ }
+};
+MODULE_DEVICE_TABLE(spi, pcf2127_spi_id);
+
+static struct spi_driver pcf2127_spi_driver = {
+	.driver		= {
+		.name	= "rtc-pcf2127-spi",
+		.of_match_table = of_match_ptr(pcf2127_of_match),
+	},
+	.probe		= pcf2127_spi_probe,
+	.id_table	= pcf2127_spi_id,
+};
+
+static int pcf2127_spi_register_driver(void)
+{
+	return spi_register_driver(&pcf2127_spi_driver);
+}
+
+static void pcf2127_spi_unregister_driver(void)
+{
+	spi_unregister_driver(&pcf2127_spi_driver);
+}
+
+#else
+
+static int pcf2127_spi_register_driver(void)
+{
+	return 0;
+}
+
+static void pcf2127_spi_unregister_driver(void)
+{
+}
+
+#endif
+
+static int __init pcf2127_init(void)
+{
+	int ret;
+
+	ret = pcf2127_i2c_register_driver();
+	if (ret) {
+		pr_err("Failed to register pcf2127 i2c driver: %d\n", ret);
+		return ret;
+	}
+
+	ret = pcf2127_spi_register_driver();
+	if (ret) {
+		pr_err("Failed to register pcf2127 spi driver: %d\n", ret);
+		pcf2127_i2c_unregister_driver();
+	}
+
+	return ret;
+}
+module_init(pcf2127_init)
+
+static void __exit pcf2127_exit(void)
+{
+	pcf2127_spi_unregister_driver();
+	pcf2127_i2c_unregister_driver();
+}
+module_exit(pcf2127_exit)
 
 MODULE_AUTHOR("Renaud Cerrato <r.cerrato@til-technologies.fr>");
-MODULE_DESCRIPTION("NXP PCF2127 RTC driver");
+MODULE_DESCRIPTION("NXP PCF2127/29 RTC driver");
 MODULE_LICENSE("GPL v2");
diff --git a/drivers/rtc/rtc-pcf85063.c b/drivers/rtc/rtc-pcf85063.c
index 63334cb..e8ddbb359 100644
--- a/drivers/rtc/rtc-pcf85063.c
+++ b/drivers/rtc/rtc-pcf85063.c
@@ -16,12 +16,12 @@
 #include <linux/rtc.h>
 #include <linux/module.h>
 
-#define DRV_VERSION "0.0.1"
-
 #define PCF85063_REG_CTRL1		0x00 /* status */
+#define PCF85063_REG_CTRL1_STOP		BIT(5)
 #define PCF85063_REG_CTRL2		0x01
 
 #define PCF85063_REG_SC			0x04 /* datetime */
+#define PCF85063_REG_SC_OS		0x80
 #define PCF85063_REG_MN			0x05
 #define PCF85063_REG_HR			0x06
 #define PCF85063_REG_DM			0x07
@@ -29,15 +29,31 @@
 #define PCF85063_REG_MO			0x09
 #define PCF85063_REG_YR			0x0A
 
-#define PCF85063_MO_C			0x80 /* century */
-
 static struct i2c_driver pcf85063_driver;
 
-struct pcf85063 {
-	struct rtc_device *rtc;
-	int c_polarity;	/* 0: MO_C=1 means 19xx, otherwise MO_C=1 means 20xx */
-	int voltage_low; /* indicates if a low_voltage was detected */
-};
+static int pcf85063_stop_clock(struct i2c_client *client, u8 *ctrl1)
+{
+	s32 ret;
+
+	ret = i2c_smbus_read_byte_data(client, PCF85063_REG_CTRL1);
+	if (ret < 0) {
+		dev_err(&client->dev, "Failing to stop the clock\n");
+		return -EIO;
+	}
+
+	/* stop the clock */
+	ret |= PCF85063_REG_CTRL1_STOP;
+
+	ret = i2c_smbus_write_byte_data(client, PCF85063_REG_CTRL1, ret);
+	if (ret < 0) {
+		dev_err(&client->dev, "Failing to stop the clock\n");
+		return -EIO;
+	}
+
+	*ctrl1 = ret;
+
+	return 0;
+}
 
 /*
  * In the routines that deal directly with the pcf85063 hardware, we use
@@ -45,81 +61,85 @@
  */
 static int pcf85063_get_datetime(struct i2c_client *client, struct rtc_time *tm)
 {
-	struct pcf85063 *pcf85063 = i2c_get_clientdata(client);
-	unsigned char buf[13] = { PCF85063_REG_CTRL1 };
-	struct i2c_msg msgs[] = {
-		{/* setup read ptr */
-			.addr = client->addr,
-			.len = 1,
-			.buf = buf
-		},
-		{/* read status + date */
-			.addr = client->addr,
-			.flags = I2C_M_RD,
-			.len = 13,
-			.buf = buf
-		},
-	};
+	int rc;
+	u8 regs[7];
 
-	/* read registers */
-	if ((i2c_transfer(client->adapter, msgs, 2)) != 2) {
-		dev_err(&client->dev, "%s: read error\n", __func__);
+	/*
+	 * while reading, the time/date registers are blocked and not updated
+	 * anymore until the access is finished. To not lose a second
+	 * event, the access must be finished within one second. So, read all
+	 * time/date registers in one turn.
+	 */
+	rc = i2c_smbus_read_i2c_block_data(client, PCF85063_REG_SC,
+					   sizeof(regs), regs);
+	if (rc != sizeof(regs)) {
+		dev_err(&client->dev, "date/time register read error\n");
 		return -EIO;
 	}
 
-	tm->tm_sec = bcd2bin(buf[PCF85063_REG_SC] & 0x7F);
-	tm->tm_min = bcd2bin(buf[PCF85063_REG_MN] & 0x7F);
-	tm->tm_hour = bcd2bin(buf[PCF85063_REG_HR] & 0x3F); /* rtc hr 0-23 */
-	tm->tm_mday = bcd2bin(buf[PCF85063_REG_DM] & 0x3F);
-	tm->tm_wday = buf[PCF85063_REG_DW] & 0x07;
-	tm->tm_mon = bcd2bin(buf[PCF85063_REG_MO] & 0x1F) - 1; /* rtc mn 1-12 */
-	tm->tm_year = bcd2bin(buf[PCF85063_REG_YR]);
+	/* if the clock has lost its power it makes no sense to use its time */
+	if (regs[0] & PCF85063_REG_SC_OS) {
+		dev_warn(&client->dev, "Power loss detected, invalid time\n");
+		return -EINVAL;
+	}
+
+	tm->tm_sec = bcd2bin(regs[0] & 0x7F);
+	tm->tm_min = bcd2bin(regs[1] & 0x7F);
+	tm->tm_hour = bcd2bin(regs[2] & 0x3F); /* rtc hr 0-23 */
+	tm->tm_mday = bcd2bin(regs[3] & 0x3F);
+	tm->tm_wday = regs[4] & 0x07;
+	tm->tm_mon = bcd2bin(regs[5] & 0x1F) - 1; /* rtc mn 1-12 */
+	tm->tm_year = bcd2bin(regs[6]);
 	if (tm->tm_year < 70)
 		tm->tm_year += 100;	/* assume we are in 1970...2069 */
-	/* detect the polarity heuristically. see note above. */
-	pcf85063->c_polarity = (buf[PCF85063_REG_MO] & PCF85063_MO_C) ?
-		(tm->tm_year >= 100) : (tm->tm_year < 100);
 
 	return rtc_valid_tm(tm);
 }
 
 static int pcf85063_set_datetime(struct i2c_client *client, struct rtc_time *tm)
 {
-	int i = 0, err = 0;
-	unsigned char buf[11];
+	int rc;
+	u8 regs[8];
 
-	/* Control & status */
-	buf[PCF85063_REG_CTRL1] = 0;
-	buf[PCF85063_REG_CTRL2] = 5;
+	/*
+	 * to accurately set the time, reset the divider chain and keep it in
+	 * reset state until all time/date registers are written
+	 */
+	rc = pcf85063_stop_clock(client, &regs[7]);
+	if (rc != 0)
+		return rc;
 
 	/* hours, minutes and seconds */
-	buf[PCF85063_REG_SC] = bin2bcd(tm->tm_sec) & 0x7F;
+	regs[0] = bin2bcd(tm->tm_sec) & 0x7F; /* clear OS flag */
 
-	buf[PCF85063_REG_MN] = bin2bcd(tm->tm_min);
-	buf[PCF85063_REG_HR] = bin2bcd(tm->tm_hour);
+	regs[1] = bin2bcd(tm->tm_min);
+	regs[2] = bin2bcd(tm->tm_hour);
 
 	/* Day of month, 1 - 31 */
-	buf[PCF85063_REG_DM] = bin2bcd(tm->tm_mday);
+	regs[3] = bin2bcd(tm->tm_mday);
 
 	/* Day, 0 - 6 */
-	buf[PCF85063_REG_DW] = tm->tm_wday & 0x07;
+	regs[4] = tm->tm_wday & 0x07;
 
 	/* month, 1 - 12 */
-	buf[PCF85063_REG_MO] = bin2bcd(tm->tm_mon + 1);
+	regs[5] = bin2bcd(tm->tm_mon + 1);
 
 	/* year and century */
-	buf[PCF85063_REG_YR] = bin2bcd(tm->tm_year % 100);
+	regs[6] = bin2bcd(tm->tm_year % 100);
 
-	/* write register's data */
-	for (i = 0; i < sizeof(buf); i++) {
-		unsigned char data[2] = { i, buf[i] };
+	/*
+	 * after all time/date registers are written, let the 'address auto
+	 * increment' feature wrap around and write register CTRL1 to re-enable
+	 * the clock divider chain again
+	 */
+	regs[7] &= ~PCF85063_REG_CTRL1_STOP;
 
-		err = i2c_master_send(client, data, sizeof(data));
-		if (err != sizeof(data)) {
-			dev_err(&client->dev, "%s: err=%d addr=%02x, data=%02x\n",
-					__func__, err, data[0], data[1]);
-			return -EIO;
-		}
+	/* write all registers at once */
+	rc = i2c_smbus_write_i2c_block_data(client, PCF85063_REG_SC,
+					    sizeof(regs), regs);
+	if (rc < 0) {
+		dev_err(&client->dev, "date/time register write error\n");
+		return rc;
 	}
 
 	return 0;
@@ -143,27 +163,18 @@
 static int pcf85063_probe(struct i2c_client *client,
 				const struct i2c_device_id *id)
 {
-	struct pcf85063 *pcf85063;
+	struct rtc_device *rtc;
 
 	dev_dbg(&client->dev, "%s\n", __func__);
 
 	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
 		return -ENODEV;
 
-	pcf85063 = devm_kzalloc(&client->dev, sizeof(struct pcf85063),
-				GFP_KERNEL);
-	if (!pcf85063)
-		return -ENOMEM;
+	rtc = devm_rtc_device_register(&client->dev,
+				       pcf85063_driver.driver.name,
+				       &pcf85063_rtc_ops, THIS_MODULE);
 
-	dev_info(&client->dev, "chip found, driver version " DRV_VERSION "\n");
-
-	i2c_set_clientdata(client, pcf85063);
-
-	pcf85063->rtc = devm_rtc_device_register(&client->dev,
-				pcf85063_driver.driver.name,
-				&pcf85063_rtc_ops, THIS_MODULE);
-
-	return PTR_ERR_OR_ZERO(pcf85063->rtc);
+	return PTR_ERR_OR_ZERO(rtc);
 }
 
 static const struct i2c_device_id pcf85063_id[] = {
@@ -194,4 +205,3 @@
 MODULE_AUTHOR("Søren Andersen <san@rosetechnology.dk>");
 MODULE_DESCRIPTION("PCF85063 RTC driver");
 MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/rtc/rtc-pcf8523.c b/drivers/rtc/rtc-pcf8523.c
index 988566c..28c48b3 100644
--- a/drivers/rtc/rtc-pcf8523.c
+++ b/drivers/rtc/rtc-pcf8523.c
@@ -178,28 +178,8 @@
 	if (err < 0)
 		return err;
 
-	if (regs[0] & REG_SECONDS_OS) {
-		/*
-		 * If the oscillator was stopped, try to clear the flag. Upon
-		 * power-up the flag is always set, but if we cannot clear it
-		 * the oscillator isn't running properly for some reason. The
-		 * sensible thing therefore is to return an error, signalling
-		 * that the clock cannot be assumed to be correct.
-		 */
-
-		regs[0] &= ~REG_SECONDS_OS;
-
-		err = pcf8523_write(client, REG_SECONDS, regs[0]);
-		if (err < 0)
-			return err;
-
-		err = pcf8523_read(client, REG_SECONDS, &regs[0]);
-		if (err < 0)
-			return err;
-
-		if (regs[0] & REG_SECONDS_OS)
-			return -EAGAIN;
-	}
+	if (regs[0] & REG_SECONDS_OS)
+		return -EINVAL;
 
 	tm->tm_sec = bcd2bin(regs[0] & 0x7f);
 	tm->tm_min = bcd2bin(regs[1] & 0x7f);
@@ -235,6 +215,7 @@
 		return err;
 
 	regs[0] = REG_SECONDS;
+	/* This will purposely overwrite REG_SECONDS_OS */
 	regs[1] = bin2bcd(tm->tm_sec);
 	regs[2] = bin2bcd(tm->tm_min);
 	regs[3] = bin2bcd(tm->tm_hour);
diff --git a/drivers/rtc/rtc-pic32.c b/drivers/rtc/rtc-pic32.c
new file mode 100644
index 0000000..64e1e45
--- /dev/null
+++ b/drivers/rtc/rtc-pic32.c
@@ -0,0 +1,411 @@
+/*
+ * PIC32 RTC driver
+ *
+ * Joshua Henderson <joshua.henderson@microchip.com>
+ * Copyright (C) 2016 Microchip Technology Inc.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/rtc.h>
+#include <linux/bcd.h>
+
+#include <asm/mach-pic32/pic32.h>
+
+#define PIC32_RTCCON		0x00
+#define PIC32_RTCCON_ON		BIT(15)
+#define PIC32_RTCCON_SIDL	BIT(13)
+#define PIC32_RTCCON_RTCCLKSEL	(3 << 9)
+#define PIC32_RTCCON_RTCCLKON	BIT(6)
+#define PIC32_RTCCON_RTCWREN	BIT(3)
+#define PIC32_RTCCON_RTCSYNC	BIT(2)
+#define PIC32_RTCCON_HALFSEC	BIT(1)
+#define PIC32_RTCCON_RTCOE	BIT(0)
+
+#define PIC32_RTCALRM		0x10
+#define PIC32_RTCALRM_ALRMEN	BIT(15)
+#define PIC32_RTCALRM_CHIME	BIT(14)
+#define PIC32_RTCALRM_PIV	BIT(13)
+#define PIC32_RTCALRM_ALARMSYNC	BIT(12)
+#define PIC32_RTCALRM_AMASK	0x0F00
+#define PIC32_RTCALRM_ARPT	0xFF
+
+#define PIC32_RTCHOUR		0x23
+#define PIC32_RTCMIN		0x22
+#define PIC32_RTCSEC		0x21
+#define PIC32_RTCYEAR		0x33
+#define PIC32_RTCMON		0x32
+#define PIC32_RTCDAY		0x31
+
+#define PIC32_ALRMTIME		0x40
+#define PIC32_ALRMDATE		0x50
+
+#define PIC32_ALRMHOUR		0x43
+#define PIC32_ALRMMIN		0x42
+#define PIC32_ALRMSEC		0x41
+#define PIC32_ALRMYEAR		0x53
+#define PIC32_ALRMMON		0x52
+#define PIC32_ALRMDAY		0x51
+
+struct pic32_rtc_dev {
+	struct rtc_device	*rtc;
+	void __iomem		*reg_base;
+	struct clk		*clk;
+	spinlock_t		alarm_lock;
+	int			alarm_irq;
+	bool			alarm_clk_enabled;
+};
+
+static void pic32_rtc_alarm_clk_enable(struct pic32_rtc_dev *pdata,
+				       bool enable)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&pdata->alarm_lock, flags);
+	if (enable) {
+		if (!pdata->alarm_clk_enabled) {
+			clk_enable(pdata->clk);
+			pdata->alarm_clk_enabled = true;
+		}
+	} else {
+		if (pdata->alarm_clk_enabled) {
+			clk_disable(pdata->clk);
+			pdata->alarm_clk_enabled = false;
+		}
+	}
+	spin_unlock_irqrestore(&pdata->alarm_lock, flags);
+}
+
+static irqreturn_t pic32_rtc_alarmirq(int irq, void *id)
+{
+	struct pic32_rtc_dev *pdata = (struct pic32_rtc_dev *)id;
+
+	clk_enable(pdata->clk);
+	rtc_update_irq(pdata->rtc, 1, RTC_AF | RTC_IRQF);
+	clk_disable(pdata->clk);
+
+	pic32_rtc_alarm_clk_enable(pdata, false);
+
+	return IRQ_HANDLED;
+}
+
+static int pic32_rtc_setaie(struct device *dev, unsigned int enabled)
+{
+	struct pic32_rtc_dev *pdata = dev_get_drvdata(dev);
+	void __iomem *base = pdata->reg_base;
+
+	clk_enable(pdata->clk);
+
+	writel(PIC32_RTCALRM_ALRMEN,
+	       base + (enabled ? PIC32_SET(PIC32_RTCALRM) :
+		       PIC32_CLR(PIC32_RTCALRM)));
+
+	clk_disable(pdata->clk);
+
+	pic32_rtc_alarm_clk_enable(pdata, enabled);
+
+	return 0;
+}
+
+static int pic32_rtc_setfreq(struct device *dev, int freq)
+{
+	struct pic32_rtc_dev *pdata = dev_get_drvdata(dev);
+	void __iomem *base = pdata->reg_base;
+
+	clk_enable(pdata->clk);
+
+	writel(PIC32_RTCALRM_AMASK, base + PIC32_CLR(PIC32_RTCALRM));
+	writel(freq << 8, base + PIC32_SET(PIC32_RTCALRM));
+	writel(PIC32_RTCALRM_CHIME, base + PIC32_SET(PIC32_RTCALRM));
+
+	clk_disable(pdata->clk);
+
+	return 0;
+}
+
+static int pic32_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm)
+{
+	struct pic32_rtc_dev *pdata = dev_get_drvdata(dev);
+	void __iomem *base = pdata->reg_base;
+	unsigned int tries = 0;
+
+	clk_enable(pdata->clk);
+
+	do {
+		rtc_tm->tm_hour = readb(base + PIC32_RTCHOUR);
+		rtc_tm->tm_min = readb(base + PIC32_RTCMIN);
+		rtc_tm->tm_mon  = readb(base + PIC32_RTCMON);
+		rtc_tm->tm_mday = readb(base + PIC32_RTCDAY);
+		rtc_tm->tm_year = readb(base + PIC32_RTCYEAR);
+		rtc_tm->tm_sec  = readb(base + PIC32_RTCSEC);
+
+		/*
+		 * The only way to work out whether the system was mid-update
+		 * when we read it is to check the second counter, and if it
+		 * is zero, then we re-try the entire read.
+		 */
+		tries += 1;
+	} while (rtc_tm->tm_sec == 0 && tries < 2);
+
+	rtc_tm->tm_sec = bcd2bin(rtc_tm->tm_sec);
+	rtc_tm->tm_min = bcd2bin(rtc_tm->tm_min);
+	rtc_tm->tm_hour = bcd2bin(rtc_tm->tm_hour);
+	rtc_tm->tm_mday = bcd2bin(rtc_tm->tm_mday);
+	rtc_tm->tm_mon = bcd2bin(rtc_tm->tm_mon) - 1;
+	rtc_tm->tm_year = bcd2bin(rtc_tm->tm_year);
+
+	rtc_tm->tm_year += 100;
+
+	dev_dbg(dev, "read time %04d.%02d.%02d %02d:%02d:%02d\n",
+		1900 + rtc_tm->tm_year, rtc_tm->tm_mon, rtc_tm->tm_mday,
+		rtc_tm->tm_hour, rtc_tm->tm_min, rtc_tm->tm_sec);
+
+	clk_disable(pdata->clk);
+	return rtc_valid_tm(rtc_tm);
+}
+
+static int pic32_rtc_settime(struct device *dev, struct rtc_time *tm)
+{
+	struct pic32_rtc_dev *pdata = dev_get_drvdata(dev);
+	void __iomem *base = pdata->reg_base;
+	int year = tm->tm_year - 100;
+
+	dev_dbg(dev, "set time %04d.%02d.%02d %02d:%02d:%02d\n",
+		1900 + tm->tm_year, tm->tm_mon, tm->tm_mday,
+		tm->tm_hour, tm->tm_min, tm->tm_sec);
+
+	if (year < 0 || year >= 100) {
+		dev_err(dev, "rtc only supports 100 years\n");
+		return -EINVAL;
+	}
+
+	clk_enable(pdata->clk);
+	writeb(bin2bcd(tm->tm_sec),  base + PIC32_RTCSEC);
+	writeb(bin2bcd(tm->tm_min),  base + PIC32_RTCMIN);
+	writeb(bin2bcd(tm->tm_hour), base + PIC32_RTCHOUR);
+	writeb(bin2bcd(tm->tm_mday), base + PIC32_RTCDAY);
+	writeb(bin2bcd(tm->tm_mon + 1), base + PIC32_RTCMON);
+	writeb(bin2bcd(year), base + PIC32_RTCYEAR);
+	clk_disable(pdata->clk);
+
+	return 0;
+}
+
+static int pic32_rtc_getalarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+	struct pic32_rtc_dev *pdata = dev_get_drvdata(dev);
+	struct rtc_time *alm_tm = &alrm->time;
+	void __iomem *base = pdata->reg_base;
+	unsigned int alm_en;
+
+	clk_enable(pdata->clk);
+	alm_tm->tm_sec  = readb(base + PIC32_ALRMSEC);
+	alm_tm->tm_min  = readb(base + PIC32_ALRMMIN);
+	alm_tm->tm_hour = readb(base + PIC32_ALRMHOUR);
+	alm_tm->tm_mon  = readb(base + PIC32_ALRMMON);
+	alm_tm->tm_mday = readb(base + PIC32_ALRMDAY);
+	alm_tm->tm_year = readb(base + PIC32_ALRMYEAR);
+
+	alm_en = readb(base + PIC32_RTCALRM);
+
+	alrm->enabled = (alm_en & PIC32_RTCALRM_ALRMEN) ? 1 : 0;
+
+	dev_dbg(dev, "getalarm: %d, %04d.%02d.%02d %02d:%02d:%02d\n",
+		alm_en,
+		1900 + alm_tm->tm_year, alm_tm->tm_mon, alm_tm->tm_mday,
+		alm_tm->tm_hour, alm_tm->tm_min, alm_tm->tm_sec);
+
+	alm_tm->tm_sec = bcd2bin(alm_tm->tm_sec);
+	alm_tm->tm_min = bcd2bin(alm_tm->tm_min);
+	alm_tm->tm_hour = bcd2bin(alm_tm->tm_hour);
+	alm_tm->tm_mday = bcd2bin(alm_tm->tm_mday);
+	alm_tm->tm_mon = bcd2bin(alm_tm->tm_mon) - 1;
+	alm_tm->tm_year = bcd2bin(alm_tm->tm_year);
+
+	clk_disable(pdata->clk);
+	return 0;
+}
+
+static int pic32_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+	struct pic32_rtc_dev *pdata = dev_get_drvdata(dev);
+	struct rtc_time *tm = &alrm->time;
+	void __iomem *base = pdata->reg_base;
+
+	clk_enable(pdata->clk);
+	dev_dbg(dev, "setalarm: %d, %04d.%02d.%02d %02d:%02d:%02d\n",
+		alrm->enabled,
+		1900 + tm->tm_year, tm->tm_mon + 1, tm->tm_mday,
+		tm->tm_hour, tm->tm_min, tm->tm_sec);
+
+	writel(0x00, base + PIC32_ALRMTIME);
+	writel(0x00, base + PIC32_ALRMDATE);
+
+	pic32_rtc_setaie(dev, alrm->enabled);
+
+	clk_disable(pdata->clk);
+	return 0;
+}
+
+static int pic32_rtc_proc(struct device *dev, struct seq_file *seq)
+{
+	struct pic32_rtc_dev *pdata = dev_get_drvdata(dev);
+	void __iomem *base = pdata->reg_base;
+	unsigned int repeat;
+
+	clk_enable(pdata->clk);
+
+	repeat = readw(base + PIC32_RTCALRM);
+	repeat &= PIC32_RTCALRM_ARPT;
+	seq_printf(seq, "periodic_IRQ\t: %s\n", repeat  ? "yes" : "no");
+
+	clk_disable(pdata->clk);
+	return 0;
+}
+
+static const struct rtc_class_ops pic32_rtcops = {
+	.read_time	  = pic32_rtc_gettime,
+	.set_time	  = pic32_rtc_settime,
+	.read_alarm	  = pic32_rtc_getalarm,
+	.set_alarm	  = pic32_rtc_setalarm,
+	.proc		  = pic32_rtc_proc,
+	.alarm_irq_enable = pic32_rtc_setaie,
+};
+
+static void pic32_rtc_enable(struct pic32_rtc_dev *pdata, int en)
+{
+	void __iomem *base = pdata->reg_base;
+
+	if (!base)
+		return;
+
+	clk_enable(pdata->clk);
+	if (!en) {
+		writel(PIC32_RTCCON_ON, base + PIC32_CLR(PIC32_RTCCON));
+	} else {
+		pic32_syskey_unlock();
+
+		writel(PIC32_RTCCON_RTCWREN, base + PIC32_SET(PIC32_RTCCON));
+		writel(3 << 9, base + PIC32_CLR(PIC32_RTCCON));
+
+		if (!(readl(base + PIC32_RTCCON) & PIC32_RTCCON_ON))
+			writel(PIC32_RTCCON_ON, base + PIC32_SET(PIC32_RTCCON));
+	}
+	clk_disable(pdata->clk);
+}
+
+static int pic32_rtc_remove(struct platform_device *pdev)
+{
+	struct pic32_rtc_dev *pdata = platform_get_drvdata(pdev);
+
+	pic32_rtc_setaie(&pdev->dev, 0);
+	clk_unprepare(pdata->clk);
+	pdata->clk = NULL;
+
+	return 0;
+}
+
+static int pic32_rtc_probe(struct platform_device *pdev)
+{
+	struct pic32_rtc_dev *pdata;
+	struct resource *res;
+	int ret;
+
+	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+	if (!pdata)
+		return -ENOMEM;
+
+	platform_set_drvdata(pdev, pdata);
+
+	pdata->alarm_irq = platform_get_irq(pdev, 0);
+	if (pdata->alarm_irq < 0) {
+		dev_err(&pdev->dev, "no irq for alarm\n");
+		return pdata->alarm_irq;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	pdata->reg_base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(pdata->reg_base))
+		return PTR_ERR(pdata->reg_base);
+
+	pdata->clk = devm_clk_get(&pdev->dev, NULL);
+	if (IS_ERR(pdata->clk)) {
+		dev_err(&pdev->dev, "failed to find rtc clock source\n");
+		ret = PTR_ERR(pdata->clk);
+		pdata->clk = NULL;
+		return ret;
+	}
+
+	spin_lock_init(&pdata->alarm_lock);
+
+	clk_prepare_enable(pdata->clk);
+
+	pic32_rtc_enable(pdata, 1);
+
+	device_init_wakeup(&pdev->dev, 1);
+
+	pdata->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
+						 &pic32_rtcops,
+						 THIS_MODULE);
+	if (IS_ERR(pdata->rtc)) {
+		ret = PTR_ERR(pdata->rtc);
+		goto err_nortc;
+	}
+
+	pdata->rtc->max_user_freq = 128;
+
+	pic32_rtc_setfreq(&pdev->dev, 1);
+	ret = devm_request_irq(&pdev->dev, pdata->alarm_irq,
+			       pic32_rtc_alarmirq, 0,
+			       dev_name(&pdev->dev), pdata);
+	if (ret) {
+		dev_err(&pdev->dev,
+			"IRQ %d error %d\n", pdata->alarm_irq, ret);
+		goto err_nortc;
+	}
+
+	clk_disable(pdata->clk);
+
+	return 0;
+
+err_nortc:
+	pic32_rtc_enable(pdata, 0);
+	clk_disable_unprepare(pdata->clk);
+
+	return ret;
+}
+
+static const struct of_device_id pic32_rtc_dt_ids[] = {
+	{ .compatible = "microchip,pic32mzda-rtc" },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, pic32_rtc_dt_ids);
+
+static struct platform_driver pic32_rtc_driver = {
+	.probe		= pic32_rtc_probe,
+	.remove		= pic32_rtc_remove,
+	.driver		= {
+		.name	= "pic32-rtc",
+		.owner	= THIS_MODULE,
+		.of_match_table	= of_match_ptr(pic32_rtc_dt_ids),
+	},
+};
+module_platform_driver(pic32_rtc_driver);
+
+MODULE_DESCRIPTION("Microchip PIC32 RTC Driver");
+MODULE_AUTHOR("Joshua Henderson <joshua.henderson@microchip.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-rv3029c2.c b/drivers/rtc/rtc-rv3029c2.c
index e9ac5a4..d0cbf08 100644
--- a/drivers/rtc/rtc-rv3029c2.c
+++ b/drivers/rtc/rtc-rv3029c2.c
@@ -1,7 +1,8 @@
 /*
- * Micro Crystal RV-3029C2 rtc class driver
+ * Micro Crystal RV-3029 rtc class driver
  *
  * Author: Gregory Hermant <gregory.hermant@calao-systems.com>
+ *         Michael Buesch <m@bues.ch>
  *
  * based on previously existing rtc class drivers
  *
@@ -9,89 +10,120 @@
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  *
- * NOTE: Currently this driver only supports the bare minimum for read
- * and write the RTC and alarms. The extra features provided by this chip
- * (trickle charger, eeprom, T° compensation) are unavailable.
  */
 
 #include <linux/module.h>
 #include <linux/i2c.h>
 #include <linux/bcd.h>
 #include <linux/rtc.h>
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+
 
 /* Register map */
 /* control section */
-#define RV3029C2_ONOFF_CTRL		0x00
-#define RV3029C2_IRQ_CTRL		0x01
-#define RV3029C2_IRQ_CTRL_AIE		(1 << 0)
-#define RV3029C2_IRQ_FLAGS		0x02
-#define RV3029C2_IRQ_FLAGS_AF		(1 << 0)
-#define RV3029C2_STATUS			0x03
-#define RV3029C2_STATUS_VLOW1		(1 << 2)
-#define RV3029C2_STATUS_VLOW2		(1 << 3)
-#define RV3029C2_STATUS_SR		(1 << 4)
-#define RV3029C2_STATUS_PON		(1 << 5)
-#define RV3029C2_STATUS_EEBUSY		(1 << 7)
-#define RV3029C2_RST_CTRL		0x04
-#define RV3029C2_CONTROL_SECTION_LEN	0x05
+#define RV3029_ONOFF_CTRL		0x00
+#define RV3029_ONOFF_CTRL_WE		BIT(0)
+#define RV3029_ONOFF_CTRL_TE		BIT(1)
+#define RV3029_ONOFF_CTRL_TAR		BIT(2)
+#define RV3029_ONOFF_CTRL_EERE		BIT(3)
+#define RV3029_ONOFF_CTRL_SRON		BIT(4)
+#define RV3029_ONOFF_CTRL_TD0		BIT(5)
+#define RV3029_ONOFF_CTRL_TD1		BIT(6)
+#define RV3029_ONOFF_CTRL_CLKINT	BIT(7)
+#define RV3029_IRQ_CTRL			0x01
+#define RV3029_IRQ_CTRL_AIE		BIT(0)
+#define RV3029_IRQ_CTRL_TIE		BIT(1)
+#define RV3029_IRQ_CTRL_V1IE		BIT(2)
+#define RV3029_IRQ_CTRL_V2IE		BIT(3)
+#define RV3029_IRQ_CTRL_SRIE		BIT(4)
+#define RV3029_IRQ_FLAGS		0x02
+#define RV3029_IRQ_FLAGS_AF		BIT(0)
+#define RV3029_IRQ_FLAGS_TF		BIT(1)
+#define RV3029_IRQ_FLAGS_V1IF		BIT(2)
+#define RV3029_IRQ_FLAGS_V2IF		BIT(3)
+#define RV3029_IRQ_FLAGS_SRF		BIT(4)
+#define RV3029_STATUS			0x03
+#define RV3029_STATUS_VLOW1		BIT(2)
+#define RV3029_STATUS_VLOW2		BIT(3)
+#define RV3029_STATUS_SR		BIT(4)
+#define RV3029_STATUS_PON		BIT(5)
+#define RV3029_STATUS_EEBUSY		BIT(7)
+#define RV3029_RST_CTRL			0x04
+#define RV3029_RST_CTRL_SYSR		BIT(4)
+#define RV3029_CONTROL_SECTION_LEN	0x05
 
 /* watch section */
-#define RV3029C2_W_SEC			0x08
-#define RV3029C2_W_MINUTES		0x09
-#define RV3029C2_W_HOURS		0x0A
-#define RV3029C2_REG_HR_12_24		(1<<6)  /* 24h/12h mode */
-#define RV3029C2_REG_HR_PM		(1<<5)  /* PM/AM bit in 12h mode */
-#define RV3029C2_W_DATE			0x0B
-#define RV3029C2_W_DAYS			0x0C
-#define RV3029C2_W_MONTHS		0x0D
-#define RV3029C2_W_YEARS		0x0E
-#define RV3029C2_WATCH_SECTION_LEN	0x07
+#define RV3029_W_SEC			0x08
+#define RV3029_W_MINUTES		0x09
+#define RV3029_W_HOURS			0x0A
+#define RV3029_REG_HR_12_24		BIT(6) /* 24h/12h mode */
+#define RV3029_REG_HR_PM		BIT(5) /* PM/AM bit in 12h mode */
+#define RV3029_W_DATE			0x0B
+#define RV3029_W_DAYS			0x0C
+#define RV3029_W_MONTHS			0x0D
+#define RV3029_W_YEARS			0x0E
+#define RV3029_WATCH_SECTION_LEN	0x07
 
 /* alarm section */
-#define RV3029C2_A_SC			0x10
-#define RV3029C2_A_MN			0x11
-#define RV3029C2_A_HR			0x12
-#define RV3029C2_A_DT			0x13
-#define RV3029C2_A_DW			0x14
-#define RV3029C2_A_MO			0x15
-#define RV3029C2_A_YR			0x16
-#define RV3029C2_ALARM_SECTION_LEN	0x07
+#define RV3029_A_SC			0x10
+#define RV3029_A_MN			0x11
+#define RV3029_A_HR			0x12
+#define RV3029_A_DT			0x13
+#define RV3029_A_DW			0x14
+#define RV3029_A_MO			0x15
+#define RV3029_A_YR			0x16
+#define RV3029_ALARM_SECTION_LEN	0x07
 
 /* timer section */
-#define RV3029C2_TIMER_LOW		0x18
-#define RV3029C2_TIMER_HIGH		0x19
+#define RV3029_TIMER_LOW		0x18
+#define RV3029_TIMER_HIGH		0x19
 
 /* temperature section */
-#define RV3029C2_TEMP_PAGE		0x20
+#define RV3029_TEMP_PAGE		0x20
 
 /* eeprom data section */
-#define RV3029C2_E2P_EEDATA1		0x28
-#define RV3029C2_E2P_EEDATA2		0x29
+#define RV3029_E2P_EEDATA1		0x28
+#define RV3029_E2P_EEDATA2		0x29
+#define RV3029_E2PDATA_SECTION_LEN	0x02
 
 /* eeprom control section */
-#define RV3029C2_CONTROL_E2P_EECTRL	0x30
-#define RV3029C2_TRICKLE_1K		(1<<0)  /*  1K resistance */
-#define RV3029C2_TRICKLE_5K		(1<<1)  /*  5K resistance */
-#define RV3029C2_TRICKLE_20K		(1<<2)  /* 20K resistance */
-#define RV3029C2_TRICKLE_80K		(1<<3)  /* 80K resistance */
-#define RV3029C2_CONTROL_E2P_XTALOFFSET	0x31
-#define RV3029C2_CONTROL_E2P_QCOEF	0x32
-#define RV3029C2_CONTROL_E2P_TURNOVER	0x33
+#define RV3029_CONTROL_E2P_EECTRL	0x30
+#define RV3029_EECTRL_THP		BIT(0) /* temp scan interval */
+#define RV3029_EECTRL_THE		BIT(1) /* thermometer enable */
+#define RV3029_EECTRL_FD0		BIT(2) /* CLKOUT */
+#define RV3029_EECTRL_FD1		BIT(3) /* CLKOUT */
+#define RV3029_TRICKLE_1K		BIT(4) /* 1.5K resistance */
+#define RV3029_TRICKLE_5K		BIT(5) /* 5K   resistance */
+#define RV3029_TRICKLE_20K		BIT(6) /* 20K  resistance */
+#define RV3029_TRICKLE_80K		BIT(7) /* 80K  resistance */
+#define RV3029_TRICKLE_MASK		(RV3029_TRICKLE_1K |\
+					 RV3029_TRICKLE_5K |\
+					 RV3029_TRICKLE_20K |\
+					 RV3029_TRICKLE_80K)
+#define RV3029_TRICKLE_SHIFT		4
+#define RV3029_CONTROL_E2P_XOFFS	0x31 /* XTAL offset */
+#define RV3029_CONTROL_E2P_XOFFS_SIGN	BIT(7) /* Sign: 1->pos, 0->neg */
+#define RV3029_CONTROL_E2P_QCOEF	0x32 /* XTAL temp drift coef */
+#define RV3029_CONTROL_E2P_TURNOVER	0x33 /* XTAL turnover temp (in *C) */
+#define RV3029_CONTROL_E2P_TOV_MASK	0x3F /* XTAL turnover temp mask */
 
 /* user ram section */
-#define RV3029C2_USR1_RAM_PAGE		0x38
-#define RV3029C2_USR1_SECTION_LEN	0x04
-#define RV3029C2_USR2_RAM_PAGE		0x3C
-#define RV3029C2_USR2_SECTION_LEN	0x04
+#define RV3029_USR1_RAM_PAGE		0x38
+#define RV3029_USR1_SECTION_LEN		0x04
+#define RV3029_USR2_RAM_PAGE		0x3C
+#define RV3029_USR2_SECTION_LEN		0x04
 
 static int
-rv3029c2_i2c_read_regs(struct i2c_client *client, u8 reg, u8 *buf,
-	unsigned len)
+rv3029_i2c_read_regs(struct i2c_client *client, u8 reg, u8 *buf,
+		     unsigned len)
 {
 	int ret;
 
-	if ((reg > RV3029C2_USR1_RAM_PAGE + 7) ||
-		(reg + len > RV3029C2_USR1_RAM_PAGE + 8))
+	if ((reg > RV3029_USR1_RAM_PAGE + 7) ||
+		(reg + len > RV3029_USR1_RAM_PAGE + 8))
 		return -EINVAL;
 
 	ret = i2c_smbus_read_i2c_block_data(client, reg, len, buf);
@@ -103,20 +135,38 @@
 }
 
 static int
-rv3029c2_i2c_write_regs(struct i2c_client *client, u8 reg, u8 const buf[],
-			unsigned len)
+rv3029_i2c_write_regs(struct i2c_client *client, u8 reg, u8 const buf[],
+		      unsigned len)
 {
-	if ((reg > RV3029C2_USR1_RAM_PAGE + 7) ||
-		(reg + len > RV3029C2_USR1_RAM_PAGE + 8))
+	if ((reg > RV3029_USR1_RAM_PAGE + 7) ||
+		(reg + len > RV3029_USR1_RAM_PAGE + 8))
 		return -EINVAL;
 
 	return i2c_smbus_write_i2c_block_data(client, reg, len, buf);
 }
 
 static int
-rv3029c2_i2c_get_sr(struct i2c_client *client, u8 *buf)
+rv3029_i2c_update_bits(struct i2c_client *client, u8 reg, u8 mask, u8 set)
 {
-	int ret = rv3029c2_i2c_read_regs(client, RV3029C2_STATUS, buf, 1);
+	u8 buf;
+	int ret;
+
+	ret = rv3029_i2c_read_regs(client, reg, &buf, 1);
+	if (ret < 0)
+		return ret;
+	buf &= ~mask;
+	buf |= set & mask;
+	ret = rv3029_i2c_write_regs(client, reg, &buf, 1);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+static int
+rv3029_i2c_get_sr(struct i2c_client *client, u8 *buf)
+{
+	int ret = rv3029_i2c_read_regs(client, RV3029_STATUS, buf, 1);
 
 	if (ret < 0)
 		return -EIO;
@@ -125,83 +175,224 @@
 }
 
 static int
-rv3029c2_i2c_set_sr(struct i2c_client *client, u8 val)
+rv3029_i2c_set_sr(struct i2c_client *client, u8 val)
 {
 	u8 buf[1];
 	int sr;
 
 	buf[0] = val;
-	sr = rv3029c2_i2c_write_regs(client, RV3029C2_STATUS, buf, 1);
+	sr = rv3029_i2c_write_regs(client, RV3029_STATUS, buf, 1);
 	dev_dbg(&client->dev, "status = 0x%.2x (%d)\n", buf[0], buf[0]);
 	if (sr < 0)
 		return -EIO;
 	return 0;
 }
 
+static int rv3029_eeprom_busywait(struct i2c_client *client)
+{
+	int i, ret;
+	u8 sr;
+
+	for (i = 100; i > 0; i--) {
+		ret = rv3029_i2c_get_sr(client, &sr);
+		if (ret < 0)
+			break;
+		if (!(sr & RV3029_STATUS_EEBUSY))
+			break;
+		usleep_range(1000, 10000);
+	}
+	if (i <= 0) {
+		dev_err(&client->dev, "EEPROM busy wait timeout.\n");
+		return -ETIMEDOUT;
+	}
+
+	return ret;
+}
+
+static int rv3029_eeprom_exit(struct i2c_client *client)
+{
+	/* Re-enable eeprom refresh */
+	return rv3029_i2c_update_bits(client, RV3029_ONOFF_CTRL,
+				      RV3029_ONOFF_CTRL_EERE,
+				      RV3029_ONOFF_CTRL_EERE);
+}
+
+static int rv3029_eeprom_enter(struct i2c_client *client)
+{
+	int ret;
+	u8 sr;
+
+	/* Check whether we are in the allowed voltage range. */
+	ret = rv3029_i2c_get_sr(client, &sr);
+	if (ret < 0)
+		return ret;
+	if (sr & (RV3029_STATUS_VLOW1 | RV3029_STATUS_VLOW2)) {
+		/* We clear the bits and retry once just in case
+		 * we had a brown out in early startup.
+		 */
+		sr &= ~RV3029_STATUS_VLOW1;
+		sr &= ~RV3029_STATUS_VLOW2;
+		ret = rv3029_i2c_set_sr(client, sr);
+		if (ret < 0)
+			return ret;
+		usleep_range(1000, 10000);
+		ret = rv3029_i2c_get_sr(client, &sr);
+		if (ret < 0)
+			return ret;
+		if (sr & (RV3029_STATUS_VLOW1 | RV3029_STATUS_VLOW2)) {
+			dev_err(&client->dev,
+				"Supply voltage is too low to safely access the EEPROM.\n");
+			return -ENODEV;
+		}
+	}
+
+	/* Disable eeprom refresh. */
+	ret = rv3029_i2c_update_bits(client, RV3029_ONOFF_CTRL,
+				     RV3029_ONOFF_CTRL_EERE, 0);
+	if (ret < 0)
+		return ret;
+
+	/* Wait for any previous eeprom accesses to finish. */
+	ret = rv3029_eeprom_busywait(client);
+	if (ret < 0)
+		rv3029_eeprom_exit(client);
+
+	return ret;
+}
+
+static int rv3029_eeprom_read(struct i2c_client *client, u8 reg,
+			      u8 buf[], size_t len)
+{
+	int ret, err;
+
+	err = rv3029_eeprom_enter(client);
+	if (err < 0)
+		return err;
+
+	ret = rv3029_i2c_read_regs(client, reg, buf, len);
+
+	err = rv3029_eeprom_exit(client);
+	if (err < 0)
+		return err;
+
+	return ret;
+}
+
+static int rv3029_eeprom_write(struct i2c_client *client, u8 reg,
+			       u8 const buf[], size_t len)
+{
+	int ret, err;
+	size_t i;
+	u8 tmp;
+
+	err = rv3029_eeprom_enter(client);
+	if (err < 0)
+		return err;
+
+	for (i = 0; i < len; i++, reg++) {
+		ret = rv3029_i2c_read_regs(client, reg, &tmp, 1);
+		if (ret < 0)
+			break;
+		if (tmp != buf[i]) {
+			ret = rv3029_i2c_write_regs(client, reg, &buf[i], 1);
+			if (ret < 0)
+				break;
+		}
+		ret = rv3029_eeprom_busywait(client);
+		if (ret < 0)
+			break;
+	}
+
+	err = rv3029_eeprom_exit(client);
+	if (err < 0)
+		return err;
+
+	return ret;
+}
+
+static int rv3029_eeprom_update_bits(struct i2c_client *client,
+				     u8 reg, u8 mask, u8 set)
+{
+	u8 buf;
+	int ret;
+
+	ret = rv3029_eeprom_read(client, reg, &buf, 1);
+	if (ret < 0)
+		return ret;
+	buf &= ~mask;
+	buf |= set & mask;
+	ret = rv3029_eeprom_write(client, reg, &buf, 1);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
 static int
-rv3029c2_i2c_read_time(struct i2c_client *client, struct rtc_time *tm)
+rv3029_i2c_read_time(struct i2c_client *client, struct rtc_time *tm)
 {
 	u8 buf[1];
 	int ret;
-	u8 regs[RV3029C2_WATCH_SECTION_LEN] = { 0, };
+	u8 regs[RV3029_WATCH_SECTION_LEN] = { 0, };
 
-	ret = rv3029c2_i2c_get_sr(client, buf);
+	ret = rv3029_i2c_get_sr(client, buf);
 	if (ret < 0) {
 		dev_err(&client->dev, "%s: reading SR failed\n", __func__);
 		return -EIO;
 	}
 
-	ret = rv3029c2_i2c_read_regs(client, RV3029C2_W_SEC , regs,
-					RV3029C2_WATCH_SECTION_LEN);
+	ret = rv3029_i2c_read_regs(client, RV3029_W_SEC, regs,
+				   RV3029_WATCH_SECTION_LEN);
 	if (ret < 0) {
 		dev_err(&client->dev, "%s: reading RTC section failed\n",
 			__func__);
 		return ret;
 	}
 
-	tm->tm_sec = bcd2bin(regs[RV3029C2_W_SEC-RV3029C2_W_SEC]);
-	tm->tm_min = bcd2bin(regs[RV3029C2_W_MINUTES-RV3029C2_W_SEC]);
+	tm->tm_sec = bcd2bin(regs[RV3029_W_SEC-RV3029_W_SEC]);
+	tm->tm_min = bcd2bin(regs[RV3029_W_MINUTES-RV3029_W_SEC]);
 
 	/* HR field has a more complex interpretation */
 	{
-		const u8 _hr = regs[RV3029C2_W_HOURS-RV3029C2_W_SEC];
-		if (_hr & RV3029C2_REG_HR_12_24) {
+		const u8 _hr = regs[RV3029_W_HOURS-RV3029_W_SEC];
+
+		if (_hr & RV3029_REG_HR_12_24) {
 			/* 12h format */
 			tm->tm_hour = bcd2bin(_hr & 0x1f);
-			if (_hr & RV3029C2_REG_HR_PM)	/* PM flag set */
+			if (_hr & RV3029_REG_HR_PM)	/* PM flag set */
 				tm->tm_hour += 12;
 		} else /* 24h format */
 			tm->tm_hour = bcd2bin(_hr & 0x3f);
 	}
 
-	tm->tm_mday = bcd2bin(regs[RV3029C2_W_DATE-RV3029C2_W_SEC]);
-	tm->tm_mon = bcd2bin(regs[RV3029C2_W_MONTHS-RV3029C2_W_SEC]) - 1;
-	tm->tm_year = bcd2bin(regs[RV3029C2_W_YEARS-RV3029C2_W_SEC]) + 100;
-	tm->tm_wday = bcd2bin(regs[RV3029C2_W_DAYS-RV3029C2_W_SEC]) - 1;
+	tm->tm_mday = bcd2bin(regs[RV3029_W_DATE-RV3029_W_SEC]);
+	tm->tm_mon = bcd2bin(regs[RV3029_W_MONTHS-RV3029_W_SEC]) - 1;
+	tm->tm_year = bcd2bin(regs[RV3029_W_YEARS-RV3029_W_SEC]) + 100;
+	tm->tm_wday = bcd2bin(regs[RV3029_W_DAYS-RV3029_W_SEC]) - 1;
 
 	return 0;
 }
 
-static int rv3029c2_rtc_read_time(struct device *dev, struct rtc_time *tm)
+static int rv3029_rtc_read_time(struct device *dev, struct rtc_time *tm)
 {
-	return rv3029c2_i2c_read_time(to_i2c_client(dev), tm);
+	return rv3029_i2c_read_time(to_i2c_client(dev), tm);
 }
 
 static int
-rv3029c2_i2c_read_alarm(struct i2c_client *client, struct rtc_wkalrm *alarm)
+rv3029_i2c_read_alarm(struct i2c_client *client, struct rtc_wkalrm *alarm)
 {
 	struct rtc_time *const tm = &alarm->time;
 	int ret;
 	u8 regs[8];
 
-	ret = rv3029c2_i2c_get_sr(client, regs);
+	ret = rv3029_i2c_get_sr(client, regs);
 	if (ret < 0) {
 		dev_err(&client->dev, "%s: reading SR failed\n", __func__);
 		return -EIO;
 	}
 
-	ret = rv3029c2_i2c_read_regs(client, RV3029C2_A_SC, regs,
-					RV3029C2_ALARM_SECTION_LEN);
+	ret = rv3029_i2c_read_regs(client, RV3029_A_SC, regs,
+				   RV3029_ALARM_SECTION_LEN);
 
 	if (ret < 0) {
 		dev_err(&client->dev, "%s: reading alarm section failed\n",
@@ -209,51 +400,42 @@
 		return ret;
 	}
 
-	tm->tm_sec = bcd2bin(regs[RV3029C2_A_SC-RV3029C2_A_SC] & 0x7f);
-	tm->tm_min = bcd2bin(regs[RV3029C2_A_MN-RV3029C2_A_SC] & 0x7f);
-	tm->tm_hour = bcd2bin(regs[RV3029C2_A_HR-RV3029C2_A_SC] & 0x3f);
-	tm->tm_mday = bcd2bin(regs[RV3029C2_A_DT-RV3029C2_A_SC] & 0x3f);
-	tm->tm_mon = bcd2bin(regs[RV3029C2_A_MO-RV3029C2_A_SC] & 0x1f) - 1;
-	tm->tm_year = bcd2bin(regs[RV3029C2_A_YR-RV3029C2_A_SC] & 0x7f) + 100;
-	tm->tm_wday = bcd2bin(regs[RV3029C2_A_DW-RV3029C2_A_SC] & 0x07) - 1;
+	tm->tm_sec = bcd2bin(regs[RV3029_A_SC-RV3029_A_SC] & 0x7f);
+	tm->tm_min = bcd2bin(regs[RV3029_A_MN-RV3029_A_SC] & 0x7f);
+	tm->tm_hour = bcd2bin(regs[RV3029_A_HR-RV3029_A_SC] & 0x3f);
+	tm->tm_mday = bcd2bin(regs[RV3029_A_DT-RV3029_A_SC] & 0x3f);
+	tm->tm_mon = bcd2bin(regs[RV3029_A_MO-RV3029_A_SC] & 0x1f) - 1;
+	tm->tm_year = bcd2bin(regs[RV3029_A_YR-RV3029_A_SC] & 0x7f) + 100;
+	tm->tm_wday = bcd2bin(regs[RV3029_A_DW-RV3029_A_SC] & 0x07) - 1;
 
 	return 0;
 }
 
 static int
-rv3029c2_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+rv3029_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
 {
-	return rv3029c2_i2c_read_alarm(to_i2c_client(dev), alarm);
+	return rv3029_i2c_read_alarm(to_i2c_client(dev), alarm);
 }
 
-static int rv3029c2_rtc_i2c_alarm_set_irq(struct i2c_client *client,
+static int rv3029_rtc_i2c_alarm_set_irq(struct i2c_client *client,
 					int enable)
 {
 	int ret;
-	u8 buf[1];
 
-	/* enable AIE irq */
-	ret = rv3029c2_i2c_read_regs(client, RV3029C2_IRQ_CTRL,	buf, 1);
+	/* enable/disable AIE irq */
+	ret = rv3029_i2c_update_bits(client, RV3029_IRQ_CTRL,
+				     RV3029_IRQ_CTRL_AIE,
+				     (enable ? RV3029_IRQ_CTRL_AIE : 0));
 	if (ret < 0) {
-		dev_err(&client->dev, "can't read INT reg\n");
-		return ret;
-	}
-	if (enable)
-		buf[0] |= RV3029C2_IRQ_CTRL_AIE;
-	else
-		buf[0] &= ~RV3029C2_IRQ_CTRL_AIE;
-
-	ret = rv3029c2_i2c_write_regs(client, RV3029C2_IRQ_CTRL, buf, 1);
-	if (ret < 0) {
-		dev_err(&client->dev, "can't set INT reg\n");
+		dev_err(&client->dev, "can't update INT reg\n");
 		return ret;
 	}
 
 	return 0;
 }
 
-static int rv3029c2_rtc_i2c_set_alarm(struct i2c_client *client,
-					struct rtc_wkalrm *alarm)
+static int rv3029_rtc_i2c_set_alarm(struct i2c_client *client,
+				    struct rtc_wkalrm *alarm)
 {
 	struct rtc_time *const tm = &alarm->time;
 	int ret;
@@ -267,50 +449,41 @@
 	if (tm->tm_year < 100)
 		return -EINVAL;
 
-	ret = rv3029c2_i2c_get_sr(client, regs);
+	ret = rv3029_i2c_get_sr(client, regs);
 	if (ret < 0) {
 		dev_err(&client->dev, "%s: reading SR failed\n", __func__);
 		return -EIO;
 	}
-	regs[RV3029C2_A_SC-RV3029C2_A_SC] = bin2bcd(tm->tm_sec & 0x7f);
-	regs[RV3029C2_A_MN-RV3029C2_A_SC] = bin2bcd(tm->tm_min & 0x7f);
-	regs[RV3029C2_A_HR-RV3029C2_A_SC] = bin2bcd(tm->tm_hour & 0x3f);
-	regs[RV3029C2_A_DT-RV3029C2_A_SC] = bin2bcd(tm->tm_mday & 0x3f);
-	regs[RV3029C2_A_MO-RV3029C2_A_SC] = bin2bcd((tm->tm_mon & 0x1f) - 1);
-	regs[RV3029C2_A_DW-RV3029C2_A_SC] = bin2bcd((tm->tm_wday & 7) - 1);
-	regs[RV3029C2_A_YR-RV3029C2_A_SC] = bin2bcd((tm->tm_year & 0x7f) - 100);
+	regs[RV3029_A_SC-RV3029_A_SC] = bin2bcd(tm->tm_sec & 0x7f);
+	regs[RV3029_A_MN-RV3029_A_SC] = bin2bcd(tm->tm_min & 0x7f);
+	regs[RV3029_A_HR-RV3029_A_SC] = bin2bcd(tm->tm_hour & 0x3f);
+	regs[RV3029_A_DT-RV3029_A_SC] = bin2bcd(tm->tm_mday & 0x3f);
+	regs[RV3029_A_MO-RV3029_A_SC] = bin2bcd((tm->tm_mon & 0x1f) - 1);
+	regs[RV3029_A_DW-RV3029_A_SC] = bin2bcd((tm->tm_wday & 7) - 1);
+	regs[RV3029_A_YR-RV3029_A_SC] = bin2bcd((tm->tm_year & 0x7f) - 100);
 
-	ret = rv3029c2_i2c_write_regs(client, RV3029C2_A_SC, regs,
-					RV3029C2_ALARM_SECTION_LEN);
+	ret = rv3029_i2c_write_regs(client, RV3029_A_SC, regs,
+				    RV3029_ALARM_SECTION_LEN);
 	if (ret < 0)
 		return ret;
 
 	if (alarm->enabled) {
-		u8 buf[1];
-
 		/* clear AF flag */
-		ret = rv3029c2_i2c_read_regs(client, RV3029C2_IRQ_FLAGS,
-						buf, 1);
+		ret = rv3029_i2c_update_bits(client, RV3029_IRQ_FLAGS,
+					     RV3029_IRQ_FLAGS_AF, 0);
 		if (ret < 0) {
-			dev_err(&client->dev, "can't read alarm flag\n");
-			return ret;
-		}
-		buf[0] &= ~RV3029C2_IRQ_FLAGS_AF;
-		ret = rv3029c2_i2c_write_regs(client, RV3029C2_IRQ_FLAGS,
-						buf, 1);
-		if (ret < 0) {
-			dev_err(&client->dev, "can't set alarm flag\n");
+			dev_err(&client->dev, "can't clear alarm flag\n");
 			return ret;
 		}
 		/* enable AIE irq */
-		ret = rv3029c2_rtc_i2c_alarm_set_irq(client, 1);
+		ret = rv3029_rtc_i2c_alarm_set_irq(client, 1);
 		if (ret)
 			return ret;
 
 		dev_dbg(&client->dev, "alarm IRQ armed\n");
 	} else {
 		/* disable AIE irq */
-		ret = rv3029c2_rtc_i2c_alarm_set_irq(client, 0);
+		ret = rv3029_rtc_i2c_alarm_set_irq(client, 0);
 		if (ret)
 			return ret;
 
@@ -320,13 +493,13 @@
 	return 0;
 }
 
-static int rv3029c2_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+static int rv3029_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
 {
-	return rv3029c2_rtc_i2c_set_alarm(to_i2c_client(dev), alarm);
+	return rv3029_rtc_i2c_set_alarm(to_i2c_client(dev), alarm);
 }
 
 static int
-rv3029c2_i2c_set_time(struct i2c_client *client, struct rtc_time const *tm)
+rv3029_i2c_set_time(struct i2c_client *client, struct rtc_time const *tm)
 {
 	u8 regs[8];
 	int ret;
@@ -339,26 +512,26 @@
 	if (tm->tm_year < 100)
 		return -EINVAL;
 
-	regs[RV3029C2_W_SEC-RV3029C2_W_SEC] = bin2bcd(tm->tm_sec);
-	regs[RV3029C2_W_MINUTES-RV3029C2_W_SEC] = bin2bcd(tm->tm_min);
-	regs[RV3029C2_W_HOURS-RV3029C2_W_SEC] = bin2bcd(tm->tm_hour);
-	regs[RV3029C2_W_DATE-RV3029C2_W_SEC] = bin2bcd(tm->tm_mday);
-	regs[RV3029C2_W_MONTHS-RV3029C2_W_SEC] = bin2bcd(tm->tm_mon+1);
-	regs[RV3029C2_W_DAYS-RV3029C2_W_SEC] = bin2bcd((tm->tm_wday & 7)+1);
-	regs[RV3029C2_W_YEARS-RV3029C2_W_SEC] = bin2bcd(tm->tm_year - 100);
+	regs[RV3029_W_SEC-RV3029_W_SEC] = bin2bcd(tm->tm_sec);
+	regs[RV3029_W_MINUTES-RV3029_W_SEC] = bin2bcd(tm->tm_min);
+	regs[RV3029_W_HOURS-RV3029_W_SEC] = bin2bcd(tm->tm_hour);
+	regs[RV3029_W_DATE-RV3029_W_SEC] = bin2bcd(tm->tm_mday);
+	regs[RV3029_W_MONTHS-RV3029_W_SEC] = bin2bcd(tm->tm_mon+1);
+	regs[RV3029_W_DAYS-RV3029_W_SEC] = bin2bcd((tm->tm_wday & 7)+1);
+	regs[RV3029_W_YEARS-RV3029_W_SEC] = bin2bcd(tm->tm_year - 100);
 
-	ret = rv3029c2_i2c_write_regs(client, RV3029C2_W_SEC, regs,
-					RV3029C2_WATCH_SECTION_LEN);
+	ret = rv3029_i2c_write_regs(client, RV3029_W_SEC, regs,
+				    RV3029_WATCH_SECTION_LEN);
 	if (ret < 0)
 		return ret;
 
-	ret = rv3029c2_i2c_get_sr(client, regs);
+	ret = rv3029_i2c_get_sr(client, regs);
 	if (ret < 0) {
 		dev_err(&client->dev, "%s: reading SR failed\n", __func__);
 		return ret;
 	}
 	/* clear PON bit */
-	ret = rv3029c2_i2c_set_sr(client, (regs[0] & ~RV3029C2_STATUS_PON));
+	ret = rv3029_i2c_set_sr(client, (regs[0] & ~RV3029_STATUS_PON));
 	if (ret < 0) {
 		dev_err(&client->dev, "%s: reading SR failed\n", __func__);
 		return ret;
@@ -367,26 +540,238 @@
 	return 0;
 }
 
-static int rv3029c2_rtc_set_time(struct device *dev, struct rtc_time *tm)
+static int rv3029_rtc_set_time(struct device *dev, struct rtc_time *tm)
 {
-	return rv3029c2_i2c_set_time(to_i2c_client(dev), tm);
+	return rv3029_i2c_set_time(to_i2c_client(dev), tm);
 }
 
-static const struct rtc_class_ops rv3029c2_rtc_ops = {
-	.read_time	= rv3029c2_rtc_read_time,
-	.set_time	= rv3029c2_rtc_set_time,
-	.read_alarm	= rv3029c2_rtc_read_alarm,
-	.set_alarm	= rv3029c2_rtc_set_alarm,
+static const struct rv3029_trickle_tab_elem {
+	u32 r;		/* resistance in ohms */
+	u8 conf;	/* trickle config bits */
+} rv3029_trickle_tab[] = {
+	{
+		.r	= 1076,
+		.conf	= RV3029_TRICKLE_1K | RV3029_TRICKLE_5K |
+			  RV3029_TRICKLE_20K | RV3029_TRICKLE_80K,
+	}, {
+		.r	= 1091,
+		.conf	= RV3029_TRICKLE_1K | RV3029_TRICKLE_5K |
+			  RV3029_TRICKLE_20K,
+	}, {
+		.r	= 1137,
+		.conf	= RV3029_TRICKLE_1K | RV3029_TRICKLE_5K |
+			  RV3029_TRICKLE_80K,
+	}, {
+		.r	= 1154,
+		.conf	= RV3029_TRICKLE_1K | RV3029_TRICKLE_5K,
+	}, {
+		.r	= 1371,
+		.conf	= RV3029_TRICKLE_1K | RV3029_TRICKLE_20K |
+			  RV3029_TRICKLE_80K,
+	}, {
+		.r	= 1395,
+		.conf	= RV3029_TRICKLE_1K | RV3029_TRICKLE_20K,
+	}, {
+		.r	= 1472,
+		.conf	= RV3029_TRICKLE_1K | RV3029_TRICKLE_80K,
+	}, {
+		.r	= 1500,
+		.conf	= RV3029_TRICKLE_1K,
+	}, {
+		.r	= 3810,
+		.conf	= RV3029_TRICKLE_5K | RV3029_TRICKLE_20K |
+			  RV3029_TRICKLE_80K,
+	}, {
+		.r	= 4000,
+		.conf	= RV3029_TRICKLE_5K | RV3029_TRICKLE_20K,
+	}, {
+		.r	= 4706,
+		.conf	= RV3029_TRICKLE_5K | RV3029_TRICKLE_80K,
+	}, {
+		.r	= 5000,
+		.conf	= RV3029_TRICKLE_5K,
+	}, {
+		.r	= 16000,
+		.conf	= RV3029_TRICKLE_20K | RV3029_TRICKLE_80K,
+	}, {
+		.r	= 20000,
+		.conf	= RV3029_TRICKLE_20K,
+	}, {
+		.r	= 80000,
+		.conf	= RV3029_TRICKLE_80K,
+	},
 };
 
-static struct i2c_device_id rv3029c2_id[] = {
+static void rv3029_trickle_config(struct i2c_client *client)
+{
+	struct device_node *of_node = client->dev.of_node;
+	const struct rv3029_trickle_tab_elem *elem;
+	int i, err;
+	u32 ohms;
+	u8 trickle_set_bits;
+
+	if (!of_node)
+		return;
+
+	/* Configure the trickle charger. */
+	err = of_property_read_u32(of_node, "trickle-resistor-ohms", &ohms);
+	if (err) {
+		/* Disable trickle charger. */
+		trickle_set_bits = 0;
+	} else {
+		/* Enable trickle charger. */
+		for (i = 0; i < ARRAY_SIZE(rv3029_trickle_tab); i++) {
+			elem = &rv3029_trickle_tab[i];
+			if (elem->r >= ohms)
+				break;
+		}
+		trickle_set_bits = elem->conf;
+		dev_info(&client->dev,
+			 "Trickle charger enabled at %d ohms resistance.\n",
+			 elem->r);
+	}
+	err = rv3029_eeprom_update_bits(client, RV3029_CONTROL_E2P_EECTRL,
+					RV3029_TRICKLE_MASK,
+					trickle_set_bits);
+	if (err < 0) {
+		dev_err(&client->dev,
+			"Failed to update trickle charger config\n");
+	}
+}
+
+#ifdef CONFIG_RTC_DRV_RV3029_HWMON
+
+static int rv3029_read_temp(struct i2c_client *client, int *temp_mC)
+{
+	int ret;
+	u8 temp;
+
+	ret = rv3029_i2c_read_regs(client, RV3029_TEMP_PAGE, &temp, 1);
+	if (ret < 0)
+		return ret;
+
+	*temp_mC = ((int)temp - 60) * 1000;
+
+	return 0;
+}
+
+static ssize_t rv3029_hwmon_show_temp(struct device *dev,
+				      struct device_attribute *attr,
+				      char *buf)
+{
+	struct i2c_client *client = dev_get_drvdata(dev);
+	int ret, temp_mC;
+
+	ret = rv3029_read_temp(client, &temp_mC);
+	if (ret < 0)
+		return ret;
+
+	return sprintf(buf, "%d\n", temp_mC);
+}
+
+static ssize_t rv3029_hwmon_set_update_interval(struct device *dev,
+						struct device_attribute *attr,
+						const char *buf,
+						size_t count)
+{
+	struct i2c_client *client = dev_get_drvdata(dev);
+	unsigned long interval_ms;
+	int ret;
+	u8 th_set_bits = 0;
+
+	ret = kstrtoul(buf, 10, &interval_ms);
+	if (ret < 0)
+		return ret;
+
+	if (interval_ms != 0) {
+		th_set_bits |= RV3029_EECTRL_THE;
+		if (interval_ms >= 16000)
+			th_set_bits |= RV3029_EECTRL_THP;
+	}
+	ret = rv3029_eeprom_update_bits(client, RV3029_CONTROL_E2P_EECTRL,
+					RV3029_EECTRL_THE | RV3029_EECTRL_THP,
+					th_set_bits);
+	if (ret < 0)
+		return ret;
+
+	return count;
+}
+
+static ssize_t rv3029_hwmon_show_update_interval(struct device *dev,
+						 struct device_attribute *attr,
+						 char *buf)
+{
+	struct i2c_client *client = dev_get_drvdata(dev);
+	int ret, interval_ms;
+	u8 eectrl;
+
+	ret = rv3029_eeprom_read(client, RV3029_CONTROL_E2P_EECTRL,
+				 &eectrl, 1);
+	if (ret < 0)
+		return ret;
+
+	if (eectrl & RV3029_EECTRL_THE) {
+		if (eectrl & RV3029_EECTRL_THP)
+			interval_ms = 16000;
+		else
+			interval_ms = 1000;
+	} else {
+		interval_ms = 0;
+	}
+
+	return sprintf(buf, "%d\n", interval_ms);
+}
+
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, rv3029_hwmon_show_temp,
+			  NULL, 0);
+static SENSOR_DEVICE_ATTR(update_interval, S_IWUSR | S_IRUGO,
+			  rv3029_hwmon_show_update_interval,
+			  rv3029_hwmon_set_update_interval, 0);
+
+static struct attribute *rv3029_hwmon_attrs[] = {
+	&sensor_dev_attr_temp1_input.dev_attr.attr,
+	&sensor_dev_attr_update_interval.dev_attr.attr,
+	NULL,
+};
+ATTRIBUTE_GROUPS(rv3029_hwmon);
+
+static void rv3029_hwmon_register(struct i2c_client *client)
+{
+	struct device *hwmon_dev;
+
+	hwmon_dev = devm_hwmon_device_register_with_groups(
+		&client->dev, client->name, client, rv3029_hwmon_groups);
+	if (IS_ERR(hwmon_dev)) {
+		dev_warn(&client->dev,
+			"unable to register hwmon device %ld\n",
+			PTR_ERR(hwmon_dev));
+	}
+}
+
+#else /* CONFIG_RTC_DRV_RV3029_HWMON */
+
+static void rv3029_hwmon_register(struct i2c_client *client)
+{
+}
+
+#endif /* CONFIG_RTC_DRV_RV3029_HWMON */
+
+static const struct rtc_class_ops rv3029_rtc_ops = {
+	.read_time	= rv3029_rtc_read_time,
+	.set_time	= rv3029_rtc_set_time,
+	.read_alarm	= rv3029_rtc_read_alarm,
+	.set_alarm	= rv3029_rtc_set_alarm,
+};
+
+static struct i2c_device_id rv3029_id[] = {
+	{ "rv3029", 0 },
 	{ "rv3029c2", 0 },
 	{ }
 };
-MODULE_DEVICE_TABLE(i2c, rv3029c2_id);
+MODULE_DEVICE_TABLE(i2c, rv3029_id);
 
-static int rv3029c2_probe(struct i2c_client *client,
-			  const struct i2c_device_id *id)
+static int rv3029_probe(struct i2c_client *client,
+			const struct i2c_device_id *id)
 {
 	struct rtc_device *rtc;
 	int rc = 0;
@@ -395,14 +780,17 @@
 	if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_EMUL))
 		return -ENODEV;
 
-	rc = rv3029c2_i2c_get_sr(client, buf);
+	rc = rv3029_i2c_get_sr(client, buf);
 	if (rc < 0) {
 		dev_err(&client->dev, "reading status failed\n");
 		return rc;
 	}
 
+	rv3029_trickle_config(client);
+	rv3029_hwmon_register(client);
+
 	rtc = devm_rtc_device_register(&client->dev, client->name,
-					&rv3029c2_rtc_ops, THIS_MODULE);
+				       &rv3029_rtc_ops, THIS_MODULE);
 
 	if (IS_ERR(rtc))
 		return PTR_ERR(rtc);
@@ -412,16 +800,17 @@
 	return 0;
 }
 
-static struct i2c_driver rv3029c2_driver = {
+static struct i2c_driver rv3029_driver = {
 	.driver = {
 		.name = "rtc-rv3029c2",
 	},
-	.probe = rv3029c2_probe,
-	.id_table = rv3029c2_id,
+	.probe		= rv3029_probe,
+	.id_table	= rv3029_id,
 };
 
-module_i2c_driver(rv3029c2_driver);
+module_i2c_driver(rv3029_driver);
 
 MODULE_AUTHOR("Gregory Hermant <gregory.hermant@calao-systems.com>");
-MODULE_DESCRIPTION("Micro Crystal RV3029C2 RTC driver");
+MODULE_AUTHOR("Michael Buesch <m@bues.ch>");
+MODULE_DESCRIPTION("Micro Crystal RV3029 RTC driver");
 MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-rv8803.c b/drivers/rtc/rtc-rv8803.c
index 7155c08..8d9f35c 100644
--- a/drivers/rtc/rtc-rv8803.c
+++ b/drivers/rtc/rtc-rv8803.c
@@ -52,7 +52,7 @@
 struct rv8803_data {
 	struct i2c_client *client;
 	struct rtc_device *rtc;
-	spinlock_t flags_lock;
+	struct mutex flags_lock;
 	u8 ctrl;
 };
 
@@ -63,11 +63,11 @@
 	unsigned long events = 0;
 	int flags;
 
-	spin_lock(&rv8803->flags_lock);
+	mutex_lock(&rv8803->flags_lock);
 
 	flags = i2c_smbus_read_byte_data(client, RV8803_FLAG);
 	if (flags <= 0) {
-		spin_unlock(&rv8803->flags_lock);
+		mutex_unlock(&rv8803->flags_lock);
 		return IRQ_NONE;
 	}
 
@@ -102,7 +102,7 @@
 					  rv8803->ctrl);
 	}
 
-	spin_unlock(&rv8803->flags_lock);
+	mutex_unlock(&rv8803->flags_lock);
 
 	return IRQ_HANDLED;
 }
@@ -155,7 +155,6 @@
 	struct rv8803_data *rv8803 = dev_get_drvdata(dev);
 	u8 date[7];
 	int flags, ret;
-	unsigned long irqflags;
 
 	if ((tm->tm_year < 100) || (tm->tm_year > 199))
 		return -EINVAL;
@@ -173,18 +172,18 @@
 	if (ret < 0)
 		return ret;
 
-	spin_lock_irqsave(&rv8803->flags_lock, irqflags);
+	mutex_lock(&rv8803->flags_lock);
 
 	flags = i2c_smbus_read_byte_data(rv8803->client, RV8803_FLAG);
 	if (flags < 0) {
-		spin_unlock_irqrestore(&rv8803->flags_lock, irqflags);
+		mutex_unlock(&rv8803->flags_lock);
 		return flags;
 	}
 
 	ret = i2c_smbus_write_byte_data(rv8803->client, RV8803_FLAG,
 					flags & ~RV8803_FLAG_V2F);
 
-	spin_unlock_irqrestore(&rv8803->flags_lock, irqflags);
+	mutex_unlock(&rv8803->flags_lock);
 
 	return ret;
 }
@@ -226,7 +225,6 @@
 	u8 alarmvals[3];
 	u8 ctrl[2];
 	int ret, err;
-	unsigned long irqflags;
 
 	/* The alarm has no seconds, round up to nearest minute */
 	if (alrm->time.tm_sec) {
@@ -236,11 +234,11 @@
 		rtc_time64_to_tm(alarm_time, &alrm->time);
 	}
 
-	spin_lock_irqsave(&rv8803->flags_lock, irqflags);
+	mutex_lock(&rv8803->flags_lock);
 
 	ret = i2c_smbus_read_i2c_block_data(client, RV8803_FLAG, 2, ctrl);
 	if (ret != 2) {
-		spin_unlock_irqrestore(&rv8803->flags_lock, irqflags);
+		mutex_unlock(&rv8803->flags_lock);
 		return ret < 0 ? ret : -EIO;
 	}
 
@@ -253,14 +251,14 @@
 		err = i2c_smbus_write_byte_data(rv8803->client, RV8803_CTRL,
 						rv8803->ctrl);
 		if (err) {
-			spin_unlock_irqrestore(&rv8803->flags_lock, irqflags);
+			mutex_unlock(&rv8803->flags_lock);
 			return err;
 		}
 	}
 
 	ctrl[1] &= ~RV8803_FLAG_AF;
 	err = i2c_smbus_write_byte_data(rv8803->client, RV8803_FLAG, ctrl[1]);
-	spin_unlock_irqrestore(&rv8803->flags_lock, irqflags);
+	mutex_unlock(&rv8803->flags_lock);
 	if (err)
 		return err;
 
@@ -289,7 +287,6 @@
 	struct i2c_client *client = to_i2c_client(dev);
 	struct rv8803_data *rv8803 = dev_get_drvdata(dev);
 	int ctrl, flags, err;
-	unsigned long irqflags;
 
 	ctrl = rv8803->ctrl;
 
@@ -305,15 +302,15 @@
 			ctrl &= ~RV8803_CTRL_AIE;
 	}
 
-	spin_lock_irqsave(&rv8803->flags_lock, irqflags);
+	mutex_lock(&rv8803->flags_lock);
 	flags = i2c_smbus_read_byte_data(client, RV8803_FLAG);
 	if (flags < 0) {
-		spin_unlock_irqrestore(&rv8803->flags_lock, irqflags);
+		mutex_unlock(&rv8803->flags_lock);
 		return flags;
 	}
 	flags &= ~(RV8803_FLAG_AF | RV8803_FLAG_UF);
 	err = i2c_smbus_write_byte_data(client, RV8803_FLAG, flags);
-	spin_unlock_irqrestore(&rv8803->flags_lock, irqflags);
+	mutex_unlock(&rv8803->flags_lock);
 	if (err)
 		return err;
 
@@ -333,7 +330,6 @@
 	struct i2c_client *client = to_i2c_client(dev);
 	struct rv8803_data *rv8803 = dev_get_drvdata(dev);
 	int flags, ret = 0;
-	unsigned long irqflags;
 
 	switch (cmd) {
 	case RTC_VL_READ:
@@ -355,16 +351,16 @@
 		return 0;
 
 	case RTC_VL_CLR:
-		spin_lock_irqsave(&rv8803->flags_lock, irqflags);
+		mutex_lock(&rv8803->flags_lock);
 		flags = i2c_smbus_read_byte_data(client, RV8803_FLAG);
 		if (flags < 0) {
-			spin_unlock_irqrestore(&rv8803->flags_lock, irqflags);
+			mutex_unlock(&rv8803->flags_lock);
 			return flags;
 		}
 
 		flags &= ~(RV8803_FLAG_V1F | RV8803_FLAG_V2F);
 		ret = i2c_smbus_write_byte_data(client, RV8803_FLAG, flags);
-		spin_unlock_irqrestore(&rv8803->flags_lock, irqflags);
+		mutex_unlock(&rv8803->flags_lock);
 		if (ret < 0)
 			return ret;
 
@@ -441,6 +437,7 @@
 	if (!rv8803)
 		return -ENOMEM;
 
+	mutex_init(&rv8803->flags_lock);
 	rv8803->client = client;
 	i2c_set_clientdata(client, rv8803);
 
diff --git a/drivers/rtc/rtc-rx6110.c b/drivers/rtc/rtc-rx6110.c
new file mode 100644
index 0000000..bbad00b
--- /dev/null
+++ b/drivers/rtc/rtc-rx6110.c
@@ -0,0 +1,402 @@
+/*
+ * Driver for the Epson RTC module RX-6110 SA
+ *
+ * Copyright(C) 2015 Pengutronix, Steffen Trumtrar <kernel@pengutronix.de>
+ * Copyright(C) SEIKO EPSON CORPORATION 2013. All rights reserved.
+ *
+ * This driver software is distributed as is, without any warranty of any kind,
+ * either express or implied as further specified in the GNU Public License.
+ * This software may be used and distributed according to the terms of the GNU
+ * Public License, version 2 as published by the Free Software Foundation.
+ * See the file COPYING in the main directory of this archive for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/bcd.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/regmap.h>
+#include <linux/rtc.h>
+#include <linux/spi/spi.h>
+
+/* RX-6110 Register definitions */
+#define RX6110_REG_SEC		0x10
+#define RX6110_REG_MIN		0x11
+#define RX6110_REG_HOUR		0x12
+#define RX6110_REG_WDAY		0x13
+#define RX6110_REG_MDAY		0x14
+#define RX6110_REG_MONTH	0x15
+#define RX6110_REG_YEAR		0x16
+#define RX6110_REG_RES1		0x17
+#define RX6110_REG_ALMIN	0x18
+#define RX6110_REG_ALHOUR	0x19
+#define RX6110_REG_ALWDAY	0x1A
+#define RX6110_REG_TCOUNT0	0x1B
+#define RX6110_REG_TCOUNT1	0x1C
+#define RX6110_REG_EXT		0x1D
+#define RX6110_REG_FLAG		0x1E
+#define RX6110_REG_CTRL		0x1F
+#define RX6110_REG_USER0	0x20
+#define RX6110_REG_USER1	0x21
+#define RX6110_REG_USER2	0x22
+#define RX6110_REG_USER3	0x23
+#define RX6110_REG_USER4	0x24
+#define RX6110_REG_USER5	0x25
+#define RX6110_REG_USER6	0x26
+#define RX6110_REG_USER7	0x27
+#define RX6110_REG_USER8	0x28
+#define RX6110_REG_USER9	0x29
+#define RX6110_REG_USERA	0x2A
+#define RX6110_REG_USERB	0x2B
+#define RX6110_REG_USERC	0x2C
+#define RX6110_REG_USERD	0x2D
+#define RX6110_REG_USERE	0x2E
+#define RX6110_REG_USERF	0x2F
+#define RX6110_REG_RES2		0x30
+#define RX6110_REG_RES3		0x31
+#define RX6110_REG_IRQ		0x32
+
+#define RX6110_BIT_ALARM_EN		BIT(7)
+
+/* Extension Register (1Dh) bit positions */
+#define RX6110_BIT_EXT_TSEL0		BIT(0)
+#define RX6110_BIT_EXT_TSEL1		BIT(1)
+#define RX6110_BIT_EXT_TSEL2		BIT(2)
+#define RX6110_BIT_EXT_WADA		BIT(3)
+#define RX6110_BIT_EXT_TE		BIT(4)
+#define RX6110_BIT_EXT_USEL		BIT(5)
+#define RX6110_BIT_EXT_FSEL0		BIT(6)
+#define RX6110_BIT_EXT_FSEL1		BIT(7)
+
+/* Flag Register (1Eh) bit positions */
+#define RX6110_BIT_FLAG_VLF		BIT(1)
+#define RX6110_BIT_FLAG_AF		BIT(3)
+#define RX6110_BIT_FLAG_TF		BIT(4)
+#define RX6110_BIT_FLAG_UF		BIT(5)
+
+/* Control Register (1Fh) bit positions */
+#define RX6110_BIT_CTRL_TBKE		BIT(0)
+#define RX6110_BIT_CTRL_TBKON		BIT(1)
+#define RX6110_BIT_CTRL_TSTP		BIT(2)
+#define RX6110_BIT_CTRL_AIE		BIT(3)
+#define RX6110_BIT_CTRL_TIE		BIT(4)
+#define RX6110_BIT_CTRL_UIE		BIT(5)
+#define RX6110_BIT_CTRL_STOP		BIT(6)
+#define RX6110_BIT_CTRL_TEST		BIT(7)
+
+enum {
+	RTC_SEC = 0,
+	RTC_MIN,
+	RTC_HOUR,
+	RTC_WDAY,
+	RTC_MDAY,
+	RTC_MONTH,
+	RTC_YEAR,
+	RTC_NR_TIME
+};
+
+#define RX6110_DRIVER_NAME		"rx6110"
+
+struct rx6110_data {
+	struct rtc_device *rtc;
+	struct regmap *regmap;
+};
+
+/**
+ * rx6110_rtc_tm_to_data - convert rtc_time to native time encoding
+ *
+ * @tm: holds date and time
+ * @data: holds the encoding in rx6110 native form
+ */
+static int rx6110_rtc_tm_to_data(struct rtc_time *tm, u8 *data)
+{
+	pr_debug("%s: date %ds %dm %dh %dmd %dm %dy\n", __func__,
+		 tm->tm_sec, tm->tm_min, tm->tm_hour,
+		 tm->tm_mday, tm->tm_mon, tm->tm_year);
+
+	/*
+	 * The year in the RTC is a value between 0 and 99.
+	 * Assume that this represents the current century
+	 * and disregard all other values.
+	 */
+	if (tm->tm_year < 100 || tm->tm_year >= 200)
+		return -EINVAL;
+
+	data[RTC_SEC] = bin2bcd(tm->tm_sec);
+	data[RTC_MIN] = bin2bcd(tm->tm_min);
+	data[RTC_HOUR] = bin2bcd(tm->tm_hour);
+	data[RTC_WDAY] = BIT(bin2bcd(tm->tm_wday));
+	data[RTC_MDAY] = bin2bcd(tm->tm_mday);
+	data[RTC_MONTH] = bin2bcd(tm->tm_mon + 1);
+	data[RTC_YEAR] = bin2bcd(tm->tm_year % 100);
+
+	return 0;
+}
+
+/**
+ * rx6110_data_to_rtc_tm - convert native time encoding to rtc_time
+ *
+ * @data: holds the encoding in rx6110 native form
+ * @tm: holds date and time
+ */
+static int rx6110_data_to_rtc_tm(u8 *data, struct rtc_time *tm)
+{
+	tm->tm_sec = bcd2bin(data[RTC_SEC] & 0x7f);
+	tm->tm_min = bcd2bin(data[RTC_MIN] & 0x7f);
+	/* only 24-hour clock */
+	tm->tm_hour = bcd2bin(data[RTC_HOUR] & 0x3f);
+	tm->tm_wday = ffs(data[RTC_WDAY] & 0x7f);
+	tm->tm_mday = bcd2bin(data[RTC_MDAY] & 0x3f);
+	tm->tm_mon = bcd2bin(data[RTC_MONTH] & 0x1f) - 1;
+	tm->tm_year = bcd2bin(data[RTC_YEAR]) + 100;
+
+	pr_debug("%s: date %ds %dm %dh %dmd %dm %dy\n", __func__,
+		 tm->tm_sec, tm->tm_min, tm->tm_hour,
+		 tm->tm_mday, tm->tm_mon, tm->tm_year);
+
+	/*
+	 * The year in the RTC is a value between 0 and 99.
+	 * Assume that this represents the current century
+	 * and disregard all other values.
+	 */
+	if (tm->tm_year < 100 || tm->tm_year >= 200)
+		return -EINVAL;
+
+	return 0;
+}
+
+/**
+ * rx6110_set_time - set the current time in the rx6110 registers
+ *
+ * @dev: the rtc device in use
+ * @tm: holds date and time
+ *
+ * BUG: The HW assumes every year that is a multiple of 4 to be a leap
+ * year. Next time this is wrong is 2100, which will not be a leap year
+ *
+ * Note: If STOP is not set/cleared, the clock will start when the seconds
+ *       register is written
+ *
+ */
+static int rx6110_set_time(struct device *dev, struct rtc_time *tm)
+{
+	struct rx6110_data *rx6110 = dev_get_drvdata(dev);
+	u8 data[RTC_NR_TIME];
+	int ret;
+
+	ret = rx6110_rtc_tm_to_data(tm, data);
+	if (ret < 0)
+		return ret;
+
+	/* set STOP bit before changing clock/calendar */
+	ret = regmap_update_bits(rx6110->regmap, RX6110_REG_CTRL,
+				 RX6110_BIT_CTRL_STOP, RX6110_BIT_CTRL_STOP);
+	if (ret)
+		return ret;
+
+	ret = regmap_bulk_write(rx6110->regmap, RX6110_REG_SEC, data,
+				RTC_NR_TIME);
+	if (ret)
+		return ret;
+
+	/* The time in the RTC is valid. Be sure to have VLF cleared. */
+	ret = regmap_update_bits(rx6110->regmap, RX6110_REG_FLAG,
+				 RX6110_BIT_FLAG_VLF, 0);
+	if (ret)
+		return ret;
+
+	/* clear STOP bit after changing clock/calendar */
+	ret = regmap_update_bits(rx6110->regmap, RX6110_REG_CTRL,
+				 RX6110_BIT_CTRL_STOP, 0);
+
+	return ret;
+}
+
+/**
+ * rx6110_get_time - get the current time from the rx6110 registers
+ * @dev: the rtc device in use
+ * @tm: holds date and time
+ */
+static int rx6110_get_time(struct device *dev, struct rtc_time *tm)
+{
+	struct rx6110_data *rx6110 = dev_get_drvdata(dev);
+	u8 data[RTC_NR_TIME];
+	int flags;
+	int ret;
+
+	ret = regmap_read(rx6110->regmap, RX6110_REG_FLAG, &flags);
+	if (ret)
+		return -EINVAL;
+
+	/* check for VLF Flag (set at power-on) */
+	if ((flags & RX6110_BIT_FLAG_VLF)) {
+		dev_warn(dev, "Voltage low, data is invalid.\n");
+		return -EINVAL;
+	}
+
+	/* read registers to date */
+	ret = regmap_bulk_read(rx6110->regmap, RX6110_REG_SEC, data,
+			       RTC_NR_TIME);
+	if (ret)
+		return ret;
+
+	ret = rx6110_data_to_rtc_tm(data, tm);
+	if (ret)
+		return ret;
+
+	dev_dbg(dev, "%s: date %ds %dm %dh %dmd %dm %dy\n", __func__,
+		tm->tm_sec, tm->tm_min, tm->tm_hour,
+		tm->tm_mday, tm->tm_mon, tm->tm_year);
+
+	return rtc_valid_tm(tm);
+}
+
+static const struct reg_sequence rx6110_default_regs[] = {
+	{ RX6110_REG_RES1,   0xB8 },
+	{ RX6110_REG_RES2,   0x00 },
+	{ RX6110_REG_RES3,   0x10 },
+	{ RX6110_REG_IRQ,    0x00 },
+	{ RX6110_REG_ALMIN,  0x00 },
+	{ RX6110_REG_ALHOUR, 0x00 },
+	{ RX6110_REG_ALWDAY, 0x00 },
+};
+
+/**
+ * rx6110_init - initialize the rx6110 registers
+ *
+ * @rx6110: pointer to the rx6110 struct in use
+ *
+ */
+static int rx6110_init(struct rx6110_data *rx6110)
+{
+	struct rtc_device *rtc = rx6110->rtc;
+	int flags;
+	int ret;
+
+	ret = regmap_update_bits(rx6110->regmap, RX6110_REG_EXT,
+				 RX6110_BIT_EXT_TE, 0);
+	if (ret)
+		return ret;
+
+	ret = regmap_register_patch(rx6110->regmap, rx6110_default_regs,
+				    ARRAY_SIZE(rx6110_default_regs));
+	if (ret)
+		return ret;
+
+	ret = regmap_read(rx6110->regmap, RX6110_REG_FLAG, &flags);
+	if (ret)
+		return ret;
+
+	/* check for VLF Flag (set at power-on) */
+	if ((flags & RX6110_BIT_FLAG_VLF))
+		dev_warn(&rtc->dev, "Voltage low, data loss detected.\n");
+
+	/* check for Alarm Flag */
+	if (flags & RX6110_BIT_FLAG_AF)
+		dev_warn(&rtc->dev, "An alarm may have been missed.\n");
+
+	/* check for Periodic Timer Flag */
+	if (flags & RX6110_BIT_FLAG_TF)
+		dev_warn(&rtc->dev, "Periodic timer was detected\n");
+
+	/* check for Update Timer Flag */
+	if (flags & RX6110_BIT_FLAG_UF)
+		dev_warn(&rtc->dev, "Update timer was detected\n");
+
+	/* clear all flags BUT VLF */
+	ret = regmap_update_bits(rx6110->regmap, RX6110_REG_FLAG,
+				 RX6110_BIT_FLAG_AF |
+				 RX6110_BIT_FLAG_UF |
+				 RX6110_BIT_FLAG_TF,
+				 0);
+
+	return ret;
+}
+
+static struct rtc_class_ops rx6110_rtc_ops = {
+	.read_time = rx6110_get_time,
+	.set_time = rx6110_set_time,
+};
+
+static struct regmap_config regmap_spi_config = {
+	.reg_bits = 8,
+	.val_bits = 8,
+	.max_register = RX6110_REG_IRQ,
+	.read_flag_mask = 0x80,
+};
+
+/**
+ * rx6110_probe - initialize rtc driver
+ * @spi: pointer to spi device
+ */
+static int rx6110_probe(struct spi_device *spi)
+{
+	struct rx6110_data *rx6110;
+	int err;
+
+	if ((spi->bits_per_word && spi->bits_per_word != 8) ||
+	    (spi->max_speed_hz > 2000000) ||
+	    (spi->mode != (SPI_CS_HIGH | SPI_CPOL | SPI_CPHA))) {
+		dev_warn(&spi->dev, "SPI settings: bits_per_word: %d, max_speed_hz: %d, mode: %xh\n",
+			 spi->bits_per_word, spi->max_speed_hz, spi->mode);
+		dev_warn(&spi->dev, "driving device in an unsupported mode");
+	}
+
+	rx6110 = devm_kzalloc(&spi->dev, sizeof(*rx6110), GFP_KERNEL);
+	if (!rx6110)
+		return -ENOMEM;
+
+	rx6110->regmap = devm_regmap_init_spi(spi, &regmap_spi_config);
+	if (IS_ERR(rx6110->regmap)) {
+		dev_err(&spi->dev, "regmap init failed for rtc rx6110\n");
+		return PTR_ERR(rx6110->regmap);
+	}
+
+	spi_set_drvdata(spi, rx6110);
+
+	rx6110->rtc = devm_rtc_device_register(&spi->dev,
+					       RX6110_DRIVER_NAME,
+					       &rx6110_rtc_ops, THIS_MODULE);
+
+	if (IS_ERR(rx6110->rtc))
+		return PTR_ERR(rx6110->rtc);
+
+	err = rx6110_init(rx6110);
+	if (err)
+		return err;
+
+	rx6110->rtc->max_user_freq = 1;
+
+	return 0;
+}
+
+static int rx6110_remove(struct spi_device *spi)
+{
+	return 0;
+}
+
+static const struct spi_device_id rx6110_id[] = {
+	{ "rx6110", 0 },
+	{ }
+};
+MODULE_DEVICE_TABLE(spi, rx6110_id);
+
+static struct spi_driver rx6110_driver = {
+	.driver = {
+		.name = RX6110_DRIVER_NAME,
+		.owner = THIS_MODULE,
+	},
+	.probe		= rx6110_probe,
+	.remove		= rx6110_remove,
+	.id_table	= rx6110_id,
+};
+
+module_spi_driver(rx6110_driver);
+
+MODULE_AUTHOR("Val Krutov <val.krutov@erd.epson.com>");
+MODULE_DESCRIPTION("RX-6110 SA RTC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-rx8025.c b/drivers/rtc/rtc-rx8025.c
index bd911ba..b69647e 100644
--- a/drivers/rtc/rtc-rx8025.c
+++ b/drivers/rtc/rtc-rx8025.c
@@ -65,7 +65,6 @@
 
 static const struct i2c_device_id rx8025_id[] = {
 	{ "rx8025", 0 },
-	{ "rv8803", 1 },
 	{ }
 };
 MODULE_DEVICE_TABLE(i2c, rx8025_id);
@@ -147,8 +146,10 @@
 {
 	struct i2c_client *client = dev_id;
 	struct rx8025_data *rx8025 = i2c_get_clientdata(client);
+	struct mutex *lock = &rx8025->rtc->ops_lock;
 	int status;
 
+	mutex_lock(lock);
 	status = rx8025_read_reg(client, RX8025_REG_CTRL2);
 	if (status < 0)
 		goto out;
@@ -173,6 +174,8 @@
 	}
 
 out:
+	mutex_unlock(lock);
+
 	return IRQ_HANDLED;
 }
 
@@ -341,7 +344,17 @@
 	if (client->irq <= 0)
 		return -EINVAL;
 
-	/* Hardware alarm precision is 1 minute! */
+	/*
+	 * Hardware alarm precision is 1 minute!
+	 * round up to nearest minute
+	 */
+	if (t->time.tm_sec) {
+		time64_t alarm_time = rtc_tm_to_time64(&t->time);
+
+		alarm_time += 60 - t->time.tm_sec;
+		rtc_time64_to_tm(alarm_time, &t->time);
+	}
+
 	ald[0] = bin2bcd(t->time.tm_min);
 	if (rx8025->ctrl1 & RX8025_BIT_CTRL1_1224)
 		ald[1] = bin2bcd(t->time.tm_hour);
@@ -539,8 +552,9 @@
 	if (client->irq > 0) {
 		dev_info(&client->dev, "IRQ %d supplied\n", client->irq);
 		err = devm_request_threaded_irq(&client->dev, client->irq, NULL,
-						rx8025_handle_irq, 0, "rx8025",
-						client);
+						rx8025_handle_irq,
+						IRQF_ONESHOT,
+						"rx8025", client);
 		if (err) {
 			dev_err(&client->dev, "unable to request IRQ, alarms disabled\n");
 			client->irq = 0;
@@ -549,6 +563,9 @@
 
 	rx8025->rtc->max_user_freq = 1;
 
+	/* the rx8025 alarm only supports a minute accuracy */
+	rx8025->rtc->uie_unsupported = 1;
+
 	err = rx8025_sysfs_register(&client->dev);
 	return err;
 }
diff --git a/drivers/rtc/rtc-s5m.c b/drivers/rtc/rtc-s5m.c
index 7407d73..0477678 100644
--- a/drivers/rtc/rtc-s5m.c
+++ b/drivers/rtc/rtc-s5m.c
@@ -216,7 +216,7 @@
  * Read RTC_UDR_CON register and wait till UDR field is cleared.
  * This indicates that time/alarm update ended.
  */
-static inline int s5m8767_wait_for_udr_update(struct s5m_rtc_info *info)
+static int s5m8767_wait_for_udr_update(struct s5m_rtc_info *info)
 {
 	int ret, retry = UDR_READ_RETRY_CNT;
 	unsigned int data;
@@ -231,7 +231,7 @@
 	return ret;
 }
 
-static inline int s5m_check_peding_alarm_interrupt(struct s5m_rtc_info *info,
+static int s5m_check_peding_alarm_interrupt(struct s5m_rtc_info *info,
 		struct rtc_wkalrm *alarm)
 {
 	int ret;
@@ -264,7 +264,7 @@
 	return 0;
 }
 
-static inline int s5m8767_rtc_set_time_reg(struct s5m_rtc_info *info)
+static int s5m8767_rtc_set_time_reg(struct s5m_rtc_info *info)
 {
 	int ret;
 	unsigned int data;
@@ -288,7 +288,7 @@
 	return ret;
 }
 
-static inline int s5m8767_rtc_set_alarm_reg(struct s5m_rtc_info *info)
+static int s5m8767_rtc_set_alarm_reg(struct s5m_rtc_info *info)
 {
 	int ret;
 	unsigned int data;
diff --git a/drivers/rtc/rtc-sysfs.c b/drivers/rtc/rtc-sysfs.c
index 463e286..63b9fb1 100644
--- a/drivers/rtc/rtc-sysfs.c
+++ b/drivers/rtc/rtc-sysfs.c
@@ -218,6 +218,34 @@
 }
 static DEVICE_ATTR_RW(wakealarm);
 
+static ssize_t
+offset_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	ssize_t retval;
+	long offset;
+
+	retval = rtc_read_offset(to_rtc_device(dev), &offset);
+	if (retval == 0)
+		retval = sprintf(buf, "%ld\n", offset);
+
+	return retval;
+}
+
+static ssize_t
+offset_store(struct device *dev, struct device_attribute *attr,
+	     const char *buf, size_t n)
+{
+	ssize_t retval;
+	long offset;
+
+	retval = kstrtol(buf, 10, &offset);
+	if (retval == 0)
+		retval = rtc_set_offset(to_rtc_device(dev), offset);
+
+	return (retval < 0) ? retval : n;
+}
+static DEVICE_ATTR_RW(offset);
+
 static struct attribute *rtc_attrs[] = {
 	&dev_attr_name.attr,
 	&dev_attr_date.attr,
@@ -226,6 +254,7 @@
 	&dev_attr_max_user_freq.attr,
 	&dev_attr_hctosys.attr,
 	&dev_attr_wakealarm.attr,
+	&dev_attr_offset.attr,
 	NULL,
 };
 
@@ -249,9 +278,13 @@
 	struct rtc_device *rtc = to_rtc_device(dev);
 	umode_t mode = attr->mode;
 
-	if (attr == &dev_attr_wakealarm.attr)
+	if (attr == &dev_attr_wakealarm.attr) {
 		if (!rtc_does_wakealarm(rtc))
 			mode = 0;
+	} else if (attr == &dev_attr_offset.attr) {
+		if (!rtc->ops->set_offset)
+			mode = 0;
+	}
 
 	return mode;
 }
diff --git a/drivers/rtc/rtc-tps6586x.c b/drivers/rtc/rtc-tps6586x.c
index 3b6ce80..e404faa 100644
--- a/drivers/rtc/rtc-tps6586x.c
+++ b/drivers/rtc/rtc-tps6586x.c
@@ -286,7 +286,7 @@
 
 	ret = devm_request_threaded_irq(&pdev->dev, rtc->irq, NULL,
 				tps6586x_rtc_irq,
-				IRQF_ONESHOT | IRQF_EARLY_RESUME,
+				IRQF_ONESHOT,
 				dev_name(&pdev->dev), rtc);
 	if (ret < 0) {
 		dev_err(&pdev->dev, "request IRQ(%d) failed with ret %d\n",
diff --git a/drivers/rtc/rtc-tps65910.c b/drivers/rtc/rtc-tps65910.c
index f42aa2b..5a3d53c 100644
--- a/drivers/rtc/rtc-tps65910.c
+++ b/drivers/rtc/rtc-tps65910.c
@@ -268,7 +268,7 @@
 	}
 
 	ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
-		tps65910_rtc_interrupt, IRQF_TRIGGER_LOW | IRQF_EARLY_RESUME,
+		tps65910_rtc_interrupt, IRQF_TRIGGER_LOW,
 		dev_name(&pdev->dev), &pdev->dev);
 	if (ret < 0) {
 		dev_err(&pdev->dev, "IRQ is not free.\n");
diff --git a/drivers/rtc/rtc-tps80031.c b/drivers/rtc/rtc-tps80031.c
index 27e254c..737f26e 100644
--- a/drivers/rtc/rtc-tps80031.c
+++ b/drivers/rtc/rtc-tps80031.c
@@ -287,7 +287,7 @@
 
 	ret = devm_request_threaded_irq(&pdev->dev, rtc->irq, NULL,
 			tps80031_rtc_irq,
-			IRQF_ONESHOT | IRQF_EARLY_RESUME,
+			IRQF_ONESHOT,
 			dev_name(&pdev->dev), rtc);
 	if (ret < 0) {
 		dev_err(&pdev->dev, "request IRQ:%d failed, err = %d\n",
diff --git a/drivers/rtc/rtc-vr41xx.c b/drivers/rtc/rtc-vr41xx.c
index f64c282..e1b86bb 100644
--- a/drivers/rtc/rtc-vr41xx.c
+++ b/drivers/rtc/rtc-vr41xx.c
@@ -272,12 +272,13 @@
 }
 
 static const struct rtc_class_ops vr41xx_rtc_ops = {
-	.release	= vr41xx_rtc_release,
-	.ioctl		= vr41xx_rtc_ioctl,
-	.read_time	= vr41xx_rtc_read_time,
-	.set_time	= vr41xx_rtc_set_time,
-	.read_alarm	= vr41xx_rtc_read_alarm,
-	.set_alarm	= vr41xx_rtc_set_alarm,
+	.release		= vr41xx_rtc_release,
+	.ioctl			= vr41xx_rtc_ioctl,
+	.read_time		= vr41xx_rtc_read_time,
+	.set_time		= vr41xx_rtc_set_time,
+	.read_alarm		= vr41xx_rtc_read_alarm,
+	.set_alarm		= vr41xx_rtc_set_alarm,
+	.alarm_irq_enable	= vr41xx_rtc_alarm_irq_enable,
 };
 
 static int rtc_probe(struct platform_device *pdev)
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index 286782c..17ad574 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -185,14 +185,12 @@
  */
 int dasd_alias_make_device_known_to_lcu(struct dasd_device *device)
 {
-	struct dasd_eckd_private *private;
+	struct dasd_eckd_private *private = device->private;
 	unsigned long flags;
 	struct alias_server *server, *newserver;
 	struct alias_lcu *lcu, *newlcu;
 	struct dasd_uid uid;
 
-	private = (struct dasd_eckd_private *) device->private;
-
 	device->discipline->get_uid(device, &uid);
 	spin_lock_irqsave(&aliastree.lock, flags);
 	server = _find_server(&uid);
@@ -244,14 +242,13 @@
  */
 void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
 {
-	struct dasd_eckd_private *private;
+	struct dasd_eckd_private *private = device->private;
 	unsigned long flags;
 	struct alias_lcu *lcu;
 	struct alias_server *server;
 	int was_pending;
 	struct dasd_uid uid;
 
-	private = (struct dasd_eckd_private *) device->private;
 	lcu = private->lcu;
 	/* nothing to do if already disconnected */
 	if (!lcu)
@@ -316,25 +313,15 @@
 			      struct dasd_device *pos)
 {
 
-	struct dasd_eckd_private *private;
+	struct dasd_eckd_private *private = device->private;
 	struct alias_pav_group *group;
 	struct dasd_uid uid;
-	unsigned long flags;
 
-	private = (struct dasd_eckd_private *) device->private;
-
-	/* only lock if not already locked */
-	if (device != pos)
-		spin_lock_irqsave_nested(get_ccwdev_lock(device->cdev), flags,
-					 CDEV_NESTED_SECOND);
 	private->uid.type = lcu->uac->unit[private->uid.real_unit_addr].ua_type;
 	private->uid.base_unit_addr =
 		lcu->uac->unit[private->uid.real_unit_addr].base_ua;
 	uid = private->uid;
 
-	if (device != pos)
-		spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
-
 	/* if we have no PAV anyway, we don't need to bother with PAV groups */
 	if (lcu->pav == NO_PAV) {
 		list_move(&device->alias_list, &lcu->active_devices);
@@ -370,10 +357,9 @@
 static void _remove_device_from_lcu(struct alias_lcu *lcu,
 				    struct dasd_device *device)
 {
-	struct dasd_eckd_private *private;
+	struct dasd_eckd_private *private = device->private;
 	struct alias_pav_group *group;
 
-	private = (struct dasd_eckd_private *) device->private;
 	list_move(&device->alias_list, &lcu->inactive_devices);
 	group = private->pavgroup;
 	if (!group)
@@ -411,6 +397,130 @@
 	return 0;
 }
 
+/*
+ * This function tries to lock all devices on an lcu via trylock
+ * return NULL on success otherwise return first failed device
+ */
+static struct dasd_device *_trylock_all_devices_on_lcu(struct alias_lcu *lcu,
+						      struct dasd_device *pos)
+
+{
+	struct alias_pav_group *pavgroup;
+	struct dasd_device *device;
+
+	list_for_each_entry(device, &lcu->active_devices, alias_list) {
+		if (device == pos)
+			continue;
+		if (!spin_trylock(get_ccwdev_lock(device->cdev)))
+			return device;
+	}
+	list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
+		if (device == pos)
+			continue;
+		if (!spin_trylock(get_ccwdev_lock(device->cdev)))
+			return device;
+	}
+	list_for_each_entry(pavgroup, &lcu->grouplist, group) {
+		list_for_each_entry(device, &pavgroup->baselist, alias_list) {
+			if (device == pos)
+				continue;
+			if (!spin_trylock(get_ccwdev_lock(device->cdev)))
+				return device;
+		}
+		list_for_each_entry(device, &pavgroup->aliaslist, alias_list) {
+			if (device == pos)
+				continue;
+			if (!spin_trylock(get_ccwdev_lock(device->cdev)))
+				return device;
+		}
+	}
+	return NULL;
+}
+
+/*
+ * unlock all devices except the one that is specified as pos
+ * stop if enddev is specified and reached
+ */
+static void _unlock_all_devices_on_lcu(struct alias_lcu *lcu,
+				       struct dasd_device *pos,
+				       struct dasd_device *enddev)
+
+{
+	struct alias_pav_group *pavgroup;
+	struct dasd_device *device;
+
+	list_for_each_entry(device, &lcu->active_devices, alias_list) {
+		if (device == pos)
+			continue;
+		if (device == enddev)
+			return;
+		spin_unlock(get_ccwdev_lock(device->cdev));
+	}
+	list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
+		if (device == pos)
+			continue;
+		if (device == enddev)
+			return;
+		spin_unlock(get_ccwdev_lock(device->cdev));
+	}
+	list_for_each_entry(pavgroup, &lcu->grouplist, group) {
+		list_for_each_entry(device, &pavgroup->baselist, alias_list) {
+			if (device == pos)
+				continue;
+			if (device == enddev)
+				return;
+			spin_unlock(get_ccwdev_lock(device->cdev));
+		}
+		list_for_each_entry(device, &pavgroup->aliaslist, alias_list) {
+			if (device == pos)
+				continue;
+			if (device == enddev)
+				return;
+			spin_unlock(get_ccwdev_lock(device->cdev));
+		}
+	}
+}
+
+/*
+ *  this function is needed because the locking order
+ *  device lock -> lcu lock
+ *  needs to be assured when iterating over devices in an LCU
+ *
+ *  if a device is specified in pos then the device lock is already hold
+ */
+static void _trylock_and_lock_lcu_irqsave(struct alias_lcu *lcu,
+					  struct dasd_device *pos,
+					  unsigned long *flags)
+{
+	struct dasd_device *failed;
+
+	do {
+		spin_lock_irqsave(&lcu->lock, *flags);
+		failed = _trylock_all_devices_on_lcu(lcu, pos);
+		if (failed) {
+			_unlock_all_devices_on_lcu(lcu, pos, failed);
+			spin_unlock_irqrestore(&lcu->lock, *flags);
+			cpu_relax();
+		}
+	} while (failed);
+}
+
+static void _trylock_and_lock_lcu(struct alias_lcu *lcu,
+				  struct dasd_device *pos)
+{
+	struct dasd_device *failed;
+
+	do {
+		spin_lock(&lcu->lock);
+		failed = _trylock_all_devices_on_lcu(lcu, pos);
+		if (failed) {
+			_unlock_all_devices_on_lcu(lcu, pos, failed);
+			spin_unlock(&lcu->lock);
+			cpu_relax();
+		}
+	} while (failed);
+}
+
 static int read_unit_address_configuration(struct dasd_device *device,
 					   struct alias_lcu *lcu)
 {
@@ -487,13 +597,13 @@
 		list_for_each_entry_safe(device, tempdev, &pavgroup->baselist,
 					 alias_list) {
 			list_move(&device->alias_list, &lcu->active_devices);
-			private = (struct dasd_eckd_private *) device->private;
+			private = device->private;
 			private->pavgroup = NULL;
 		}
 		list_for_each_entry_safe(device, tempdev, &pavgroup->aliaslist,
 					 alias_list) {
 			list_move(&device->alias_list, &lcu->active_devices);
-			private = (struct dasd_eckd_private *) device->private;
+			private = device->private;
 			private->pavgroup = NULL;
 		}
 		list_del(&pavgroup->group);
@@ -505,10 +615,7 @@
 	if (rc)
 		return rc;
 
-	/* need to take cdev lock before lcu lock */
-	spin_lock_irqsave_nested(get_ccwdev_lock(refdev->cdev), flags,
-				 CDEV_NESTED_FIRST);
-	spin_lock(&lcu->lock);
+	_trylock_and_lock_lcu_irqsave(lcu, NULL, &flags);
 	lcu->pav = NO_PAV;
 	for (i = 0; i < MAX_DEVICES_PER_LCU; ++i) {
 		switch (lcu->uac->unit[i].ua_type) {
@@ -527,8 +634,8 @@
 				 alias_list) {
 		_add_device_to_lcu(lcu, device, refdev);
 	}
-	spin_unlock(&lcu->lock);
-	spin_unlock_irqrestore(get_ccwdev_lock(refdev->cdev), flags);
+	_unlock_all_devices_on_lcu(lcu, NULL, NULL);
+	spin_unlock_irqrestore(&lcu->lock, flags);
 	return 0;
 }
 
@@ -608,16 +715,13 @@
 
 int dasd_alias_add_device(struct dasd_device *device)
 {
-	struct dasd_eckd_private *private;
+	struct dasd_eckd_private *private = device->private;
 	struct alias_lcu *lcu;
 	unsigned long flags;
 	int rc;
 
-	private = (struct dasd_eckd_private *) device->private;
 	lcu = private->lcu;
 	rc = 0;
-
-	/* need to take cdev lock before lcu lock */
 	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
 	spin_lock(&lcu->lock);
 	if (!(lcu->flags & UPDATE_PENDING)) {
@@ -636,20 +740,18 @@
 
 int dasd_alias_update_add_device(struct dasd_device *device)
 {
-	struct dasd_eckd_private *private;
-	private = (struct dasd_eckd_private *) device->private;
+	struct dasd_eckd_private *private = device->private;
+
 	private->lcu->flags |= UPDATE_PENDING;
 	return dasd_alias_add_device(device);
 }
 
 int dasd_alias_remove_device(struct dasd_device *device)
 {
-	struct dasd_eckd_private *private;
-	struct alias_lcu *lcu;
+	struct dasd_eckd_private *private = device->private;
+	struct alias_lcu *lcu = private->lcu;
 	unsigned long flags;
 
-	private = (struct dasd_eckd_private *) device->private;
-	lcu = private->lcu;
 	/* nothing to do if already removed */
 	if (!lcu)
 		return 0;
@@ -661,16 +763,12 @@
 
 struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *base_device)
 {
-
+	struct dasd_eckd_private *alias_priv, *private = base_device->private;
+	struct alias_pav_group *group = private->pavgroup;
+	struct alias_lcu *lcu = private->lcu;
 	struct dasd_device *alias_device;
-	struct alias_pav_group *group;
-	struct alias_lcu *lcu;
-	struct dasd_eckd_private *private, *alias_priv;
 	unsigned long flags;
 
-	private = (struct dasd_eckd_private *) base_device->private;
-	group = private->pavgroup;
-	lcu = private->lcu;
 	if (!group || !lcu)
 		return NULL;
 	if (lcu->pav == NO_PAV ||
@@ -706,7 +804,7 @@
 		group->next = list_first_entry(&alias_device->alias_list,
 					       struct dasd_device, alias_list);
 	spin_unlock_irqrestore(&lcu->lock, flags);
-	alias_priv = (struct dasd_eckd_private *) alias_device->private;
+	alias_priv = alias_device->private;
 	if ((alias_priv->count < private->count) && !alias_device->stopped &&
 	    !test_bit(DASD_FLAG_OFFLINE, &alias_device->flags))
 		return alias_device;
@@ -754,30 +852,19 @@
 	struct alias_pav_group *pavgroup;
 	struct dasd_device *device;
 	struct dasd_eckd_private *private;
-	unsigned long flags;
 
 	/* active and inactive list can contain alias as well as base devices */
 	list_for_each_entry(device, &lcu->active_devices, alias_list) {
-		private = (struct dasd_eckd_private *) device->private;
-		spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
-		if (private->uid.type != UA_BASE_DEVICE) {
-			spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
-					       flags);
+		private = device->private;
+		if (private->uid.type != UA_BASE_DEVICE)
 			continue;
-		}
-		spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
 		dasd_schedule_block_bh(device->block);
 		dasd_schedule_device_bh(device);
 	}
 	list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
-		private = (struct dasd_eckd_private *) device->private;
-		spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
-		if (private->uid.type != UA_BASE_DEVICE) {
-			spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
-					       flags);
+		private = device->private;
+		if (private->uid.type != UA_BASE_DEVICE)
 			continue;
-		}
-		spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
 		dasd_schedule_block_bh(device->block);
 		dasd_schedule_device_bh(device);
 	}
@@ -812,7 +899,7 @@
 	spin_lock_irqsave(&lcu->lock, flags);
 	list_for_each_entry_safe(device, temp, &lcu->active_devices,
 				 alias_list) {
-		private = (struct dasd_eckd_private *) device->private;
+		private = device->private;
 		if (private->uid.type == UA_BASE_DEVICE)
 			continue;
 		list_move(&device->alias_list, &active);
@@ -834,45 +921,27 @@
 		if (device == list_first_entry(&active,
 					       struct dasd_device, alias_list)) {
 			list_move(&device->alias_list, &lcu->active_devices);
-			private = (struct dasd_eckd_private *) device->private;
+			private = device->private;
 			private->pavgroup = NULL;
 		}
 	}
 	spin_unlock_irqrestore(&lcu->lock, flags);
 }
 
-static void __stop_device_on_lcu(struct dasd_device *device,
-				 struct dasd_device *pos)
-{
-	/* If pos == device then device is already locked! */
-	if (pos == device) {
-		dasd_device_set_stop_bits(pos, DASD_STOPPED_SU);
-		return;
-	}
-	spin_lock(get_ccwdev_lock(pos->cdev));
-	dasd_device_set_stop_bits(pos, DASD_STOPPED_SU);
-	spin_unlock(get_ccwdev_lock(pos->cdev));
-}
-
-/*
- * This function is called in interrupt context, so the
- * cdev lock for device is already locked!
- */
-static void _stop_all_devices_on_lcu(struct alias_lcu *lcu,
-				     struct dasd_device *device)
+static void _stop_all_devices_on_lcu(struct alias_lcu *lcu)
 {
 	struct alias_pav_group *pavgroup;
-	struct dasd_device *pos;
+	struct dasd_device *device;
 
-	list_for_each_entry(pos, &lcu->active_devices, alias_list)
-		__stop_device_on_lcu(device, pos);
-	list_for_each_entry(pos, &lcu->inactive_devices, alias_list)
-		__stop_device_on_lcu(device, pos);
+	list_for_each_entry(device, &lcu->active_devices, alias_list)
+		dasd_device_set_stop_bits(device, DASD_STOPPED_SU);
+	list_for_each_entry(device, &lcu->inactive_devices, alias_list)
+		dasd_device_set_stop_bits(device, DASD_STOPPED_SU);
 	list_for_each_entry(pavgroup, &lcu->grouplist, group) {
-		list_for_each_entry(pos, &pavgroup->baselist, alias_list)
-			__stop_device_on_lcu(device, pos);
-		list_for_each_entry(pos, &pavgroup->aliaslist, alias_list)
-			__stop_device_on_lcu(device, pos);
+		list_for_each_entry(device, &pavgroup->baselist, alias_list)
+			dasd_device_set_stop_bits(device, DASD_STOPPED_SU);
+		list_for_each_entry(device, &pavgroup->aliaslist, alias_list)
+			dasd_device_set_stop_bits(device, DASD_STOPPED_SU);
 	}
 }
 
@@ -880,33 +949,16 @@
 {
 	struct alias_pav_group *pavgroup;
 	struct dasd_device *device;
-	unsigned long flags;
 
-	list_for_each_entry(device, &lcu->active_devices, alias_list) {
-		spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+	list_for_each_entry(device, &lcu->active_devices, alias_list)
 		dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
-		spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
-	}
-
-	list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
-		spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+	list_for_each_entry(device, &lcu->inactive_devices, alias_list)
 		dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
-		spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
-	}
-
 	list_for_each_entry(pavgroup, &lcu->grouplist, group) {
-		list_for_each_entry(device, &pavgroup->baselist, alias_list) {
-			spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+		list_for_each_entry(device, &pavgroup->baselist, alias_list)
 			dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
-			spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
-					       flags);
-		}
-		list_for_each_entry(device, &pavgroup->aliaslist, alias_list) {
-			spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+		list_for_each_entry(device, &pavgroup->aliaslist, alias_list)
 			dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
-			spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
-					       flags);
-		}
 	}
 }
 
@@ -932,13 +984,14 @@
 	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
 	reset_summary_unit_check(lcu, device, suc_data->reason);
 
-	spin_lock_irqsave(&lcu->lock, flags);
+	_trylock_and_lock_lcu_irqsave(lcu, NULL, &flags);
 	_unstop_all_devices_on_lcu(lcu);
 	_restart_all_base_devices_on_lcu(lcu);
 	/* 3. read new alias configuration */
 	_schedule_lcu_update(lcu, device);
 	lcu->suc_data.device = NULL;
 	dasd_put_device(device);
+	_unlock_all_devices_on_lcu(lcu, NULL, NULL);
 	spin_unlock_irqrestore(&lcu->lock, flags);
 }
 
@@ -948,13 +1001,11 @@
 void dasd_alias_handle_summary_unit_check(struct dasd_device *device,
 					  struct irb *irb)
 {
+	struct dasd_eckd_private *private = device->private;
 	struct alias_lcu *lcu;
 	char reason;
-	struct dasd_eckd_private *private;
 	char *sense;
 
-	private = (struct dasd_eckd_private *) device->private;
-
 	sense = dasd_get_sense(irb);
 	if (sense) {
 		reason = sense[8];
@@ -974,10 +1025,7 @@
 			    " unit check (no lcu structure)");
 		return;
 	}
-	spin_lock(&lcu->lock);
-	_stop_all_devices_on_lcu(lcu, device);
-	/* prepare for lcu_update */
-	private->lcu->flags |= NEED_UAC_UPDATE | UPDATE_PENDING;
+	_trylock_and_lock_lcu(lcu, device);
 	/* If this device is about to be removed just return and wait for
 	 * the next interrupt on a different device
 	 */
@@ -985,6 +1033,7 @@
 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
 			    "device is in offline processing,"
 			    " don't do summary unit check handling");
+		_unlock_all_devices_on_lcu(lcu, device, NULL);
 		spin_unlock(&lcu->lock);
 		return;
 	}
@@ -993,12 +1042,17 @@
 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
 			    "previous instance of summary unit check worker"
 			    " still pending");
+		_unlock_all_devices_on_lcu(lcu, device, NULL);
 		spin_unlock(&lcu->lock);
 		return ;
 	}
+	_stop_all_devices_on_lcu(lcu);
+	/* prepare for lcu_update */
+	private->lcu->flags |= NEED_UAC_UPDATE | UPDATE_PENDING;
 	lcu->suc_data.reason = reason;
 	lcu->suc_data.device = device;
 	dasd_get_device(device);
+	_unlock_all_devices_on_lcu(lcu, device, NULL);
 	spin_unlock(&lcu->lock);
 	if (!schedule_work(&lcu->suc_data.worker))
 		dasd_put_device(device);
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index 8286f74..2f18f61 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -214,8 +214,8 @@
 		else if (len == 8 && !strncmp(str, "failfast", 8))
 			features |= DASD_FEATURE_FAILFAST;
 		else {
-			pr_warning("%*s is not a supported device option\n",
-				   len, str);
+			pr_warn("%*s is not a supported device option\n",
+				len, str);
 			rc = -EINVAL;
 		}
 		str += len;
@@ -224,8 +224,7 @@
 		str++;
 	}
 	if (*str != ')') {
-		pr_warning("A closing parenthesis ')' is missing in the "
-			   "dasd= parameter\n");
+		pr_warn("A closing parenthesis ')' is missing in the dasd= parameter\n");
 		rc = -EINVAL;
 	} else
 		str++;
@@ -348,8 +347,7 @@
 		return str + 1;
 	if (*str == '\0')
 		return str;
-	pr_warning("The dasd= parameter value %s has an invalid ending\n",
-		   str);
+	pr_warn("The dasd= parameter value %s has an invalid ending\n", str);
 	return ERR_PTR(-EINVAL);
 }
 
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index 277b5c8..5667146 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -104,12 +104,10 @@
 mdsk_init_io(struct dasd_device *device, unsigned int blocksize,
 	     blocknum_t offset, blocknum_t *end_block)
 {
-	struct dasd_diag_private *private;
-	struct dasd_diag_init_io *iib;
+	struct dasd_diag_private *private = device->private;
+	struct dasd_diag_init_io *iib = &private->iib;
 	int rc;
 
-	private = (struct dasd_diag_private *) device->private;
-	iib = &private->iib;
 	memset(iib, 0, sizeof (struct dasd_diag_init_io));
 
 	iib->dev_nr = private->dev_id.devno;
@@ -130,12 +128,10 @@
 static inline int
 mdsk_term_io(struct dasd_device * device)
 {
-	struct dasd_diag_private *private;
-	struct dasd_diag_init_io *iib;
+	struct dasd_diag_private *private = device->private;
+	struct dasd_diag_init_io *iib = &private->iib;
 	int rc;
 
-	private = (struct dasd_diag_private *) device->private;
-	iib = &private->iib;
 	memset(iib, 0, sizeof (struct dasd_diag_init_io));
 	iib->dev_nr = private->dev_id.devno;
 	rc = dia250(iib, TERM_BIO);
@@ -153,14 +149,13 @@
 	rc = mdsk_init_io(device, device->block->bp_block, 0, NULL);
 	if (rc == 4) {
 		if (!(test_and_set_bit(DASD_FLAG_DEVICE_RO, &device->flags)))
-			pr_warning("%s: The access mode of a DIAG device "
-				   "changed to read-only\n",
-				   dev_name(&device->cdev->dev));
+			pr_warn("%s: The access mode of a DIAG device changed to read-only\n",
+				dev_name(&device->cdev->dev));
 		rc = 0;
 	}
 	if (rc)
-		pr_warning("%s: DIAG ERP failed with "
-			    "rc=%d\n", dev_name(&device->cdev->dev), rc);
+		pr_warn("%s: DIAG ERP failed with rc=%d\n",
+			dev_name(&device->cdev->dev), rc);
 }
 
 /* Start a given request at the device. Return zero on success, non-zero
@@ -180,8 +175,8 @@
 		cqr->status = DASD_CQR_ERROR;
 		return -EIO;
 	}
-	private = (struct dasd_diag_private *) device->private;
-	dreq = (struct dasd_diag_req *) cqr->data;
+	private = device->private;
+	dreq = cqr->data;
 
 	private->iob.dev_nr = private->dev_id.devno;
 	private->iob.key = 0;
@@ -320,18 +315,17 @@
 static int
 dasd_diag_check_device(struct dasd_device *device)
 {
-	struct dasd_block *block;
-	struct dasd_diag_private *private;
+	struct dasd_diag_private *private = device->private;
 	struct dasd_diag_characteristics *rdc_data;
-	struct dasd_diag_bio bio;
 	struct vtoc_cms_label *label;
-	blocknum_t end_block;
+	struct dasd_block *block;
+	struct dasd_diag_bio bio;
 	unsigned int sb, bsize;
+	blocknum_t end_block;
 	int rc;
 
-	private = (struct dasd_diag_private *) device->private;
 	if (private == NULL) {
-		private = kzalloc(sizeof(struct dasd_diag_private),GFP_KERNEL);
+		private = kzalloc(sizeof(*private), GFP_KERNEL);
 		if (private == NULL) {
 			DBF_DEV_EVENT(DBF_WARNING, device, "%s",
 				"Allocating memory for private DASD data "
@@ -339,7 +333,7 @@
 			return -ENOMEM;
 		}
 		ccw_device_get_id(device->cdev, &private->dev_id);
-		device->private = (void *) private;
+		device->private = private;
 	}
 	block = dasd_alloc_block();
 	if (IS_ERR(block)) {
@@ -353,7 +347,7 @@
 	block->base = device;
 
 	/* Read Device Characteristics */
-	rdc_data = (void *) &(private->rdc_data);
+	rdc_data = &private->rdc_data;
 	rdc_data->dev_nr = private->dev_id.devno;
 	rdc_data->rdc_len = sizeof (struct dasd_diag_characteristics);
 
@@ -377,9 +371,9 @@
 		private->pt_block = 2;
 		break;
 	default:
-		pr_warning("%s: Device type %d is not supported "
-			   "in DIAG mode\n", dev_name(&device->cdev->dev),
-			   private->rdc_data.vdev_class);
+		pr_warn("%s: Device type %d is not supported in DIAG mode\n",
+			dev_name(&device->cdev->dev),
+			private->rdc_data.vdev_class);
 		rc = -EOPNOTSUPP;
 		goto out;
 	}
@@ -420,8 +414,8 @@
 		private->iob.flaga = DASD_DIAG_FLAGA_DEFAULT;
 		rc = dia250(&private->iob, RW_BIO);
 		if (rc == 3) {
-			pr_warning("%s: A 64-bit DIAG call failed\n",
-				   dev_name(&device->cdev->dev));
+			pr_warn("%s: A 64-bit DIAG call failed\n",
+				dev_name(&device->cdev->dev));
 			rc = -EOPNOTSUPP;
 			goto out_label;
 		}
@@ -430,9 +424,8 @@
 			break;
 	}
 	if (bsize > PAGE_SIZE) {
-		pr_warning("%s: Accessing the DASD failed because of an "
-			   "incorrect format (rc=%d)\n",
-			   dev_name(&device->cdev->dev), rc);
+		pr_warn("%s: Accessing the DASD failed because of an incorrect format (rc=%d)\n",
+			dev_name(&device->cdev->dev), rc);
 		rc = -EIO;
 		goto out_label;
 	}
@@ -450,8 +443,8 @@
 		block->s2b_shift++;
 	rc = mdsk_init_io(device, block->bp_block, 0, NULL);
 	if (rc && (rc != 4)) {
-		pr_warning("%s: DIAG initialization failed with rc=%d\n",
-			   dev_name(&device->cdev->dev), rc);
+		pr_warn("%s: DIAG initialization failed with rc=%d\n",
+			dev_name(&device->cdev->dev), rc);
 		rc = -EIO;
 	} else {
 		if (rc == 4)
@@ -601,16 +594,14 @@
 dasd_diag_fill_info(struct dasd_device * device,
 		    struct dasd_information2_t * info)
 {
-	struct dasd_diag_private *private;
+	struct dasd_diag_private *private = device->private;
 
-	private = (struct dasd_diag_private *) device->private;
 	info->label_block = (unsigned int) private->pt_block;
 	info->FBA_layout = 1;
 	info->format = DASD_FORMAT_LDL;
-	info->characteristics_size = sizeof (struct dasd_diag_characteristics);
-	memcpy(info->characteristics,
-	       &((struct dasd_diag_private *) device->private)->rdc_data,
-	       sizeof (struct dasd_diag_characteristics));
+	info->characteristics_size = sizeof(private->rdc_data);
+	memcpy(info->characteristics, &private->rdc_data,
+	       sizeof(private->rdc_data));
 	info->confdata_size = 0;
 	return 0;
 }
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 9083247..75c032d 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -212,10 +212,9 @@
            struct DE_eckd_data *data,
            struct dasd_device  *device)
 {
-        struct dasd_eckd_private *private;
+	struct dasd_eckd_private *private = device->private;
 	int rc;
 
-        private = (struct dasd_eckd_private *) device->private;
 	if (!private->rdc_data.facilities.XRC_supported)
 		return 0;
 
@@ -237,13 +236,11 @@
 define_extent(struct ccw1 *ccw, struct DE_eckd_data *data, unsigned int trk,
 	      unsigned int totrk, int cmd, struct dasd_device *device)
 {
-	struct dasd_eckd_private *private;
+	struct dasd_eckd_private *private = device->private;
 	u32 begcyl, endcyl;
 	u16 heads, beghead, endhead;
 	int rc = 0;
 
-	private = (struct dasd_eckd_private *) device->private;
-
 	ccw->cmd_code = DASD_ECKD_CCW_DEFINE_EXTENT;
 	ccw->flags = 0;
 	ccw->count = 16;
@@ -322,10 +319,9 @@
 static int check_XRC_on_prefix(struct PFX_eckd_data *pfxdata,
 			       struct dasd_device  *device)
 {
-	struct dasd_eckd_private *private;
+	struct dasd_eckd_private *private = device->private;
 	int rc;
 
-	private = (struct dasd_eckd_private *) device->private;
 	if (!private->rdc_data.facilities.XRC_supported)
 		return 0;
 
@@ -346,12 +342,10 @@
 			  struct dasd_device *device, unsigned int reclen,
 			  unsigned int tlf)
 {
-	struct dasd_eckd_private *private;
+	struct dasd_eckd_private *private = device->private;
 	int sector;
 	int dn, d;
 
-	private = (struct dasd_eckd_private *) device->private;
-
 	memset(data, 0, sizeof(*data));
 	sector = 0;
 	if (rec_on_trk) {
@@ -488,8 +482,8 @@
 	u16 heads, beghead, endhead;
 	int rc = 0;
 
-	basepriv = (struct dasd_eckd_private *) basedev->private;
-	startpriv = (struct dasd_eckd_private *) startdev->private;
+	basepriv = basedev->private;
+	startpriv = startdev->private;
 	dedata = &pfxdata->define_extent;
 	lredata = &pfxdata->locate_record;
 
@@ -631,12 +625,10 @@
 	      unsigned int rec_on_trk, int no_rec, int cmd,
 	      struct dasd_device * device, int reclen)
 {
-	struct dasd_eckd_private *private;
+	struct dasd_eckd_private *private = device->private;
 	int sector;
 	int dn, d;
 
-	private = (struct dasd_eckd_private *) device->private;
-
 	DBF_DEV_EVENT(DBF_INFO, device,
 		  "Locate: trk %d, rec %d, no_rec %d, cmd %d, reclen %d",
 		  trk, rec_on_trk, no_rec, cmd, reclen);
@@ -800,10 +792,9 @@
  */
 static int dasd_eckd_generate_uid(struct dasd_device *device)
 {
-	struct dasd_eckd_private *private;
+	struct dasd_eckd_private *private = device->private;
 	unsigned long flags;
 
-	private = (struct dasd_eckd_private *) device->private;
 	if (!private)
 		return -ENODEV;
 	if (!private->ned || !private->gneq)
@@ -816,11 +807,10 @@
 
 static int dasd_eckd_get_uid(struct dasd_device *device, struct dasd_uid *uid)
 {
-	struct dasd_eckd_private *private;
+	struct dasd_eckd_private *private = device->private;
 	unsigned long flags;
 
-	if (device->private) {
-		private = (struct dasd_eckd_private *)device->private;
+	if (private) {
 		spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
 		*uid = private->uid;
 		spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
@@ -1034,10 +1024,9 @@
 
 static void dasd_eckd_clear_conf_data(struct dasd_device *device)
 {
-	struct dasd_eckd_private *private;
+	struct dasd_eckd_private *private = device->private;
 	int i;
 
-	private = (struct dasd_eckd_private *) device->private;
 	private->conf_data = NULL;
 	private->conf_len = 0;
 	for (i = 0; i < 8; i++) {
@@ -1058,7 +1047,7 @@
 	struct dasd_uid *uid;
 	char print_path_uid[60], print_device_uid[60];
 
-	private = (struct dasd_eckd_private *) device->private;
+	private = device->private;
 	path_data = &device->path_data;
 	opm = ccw_device_get_path_mask(device->cdev);
 	conf_data_saved = 0;
@@ -1191,11 +1180,10 @@
 
 static int verify_fcx_max_data(struct dasd_device *device, __u8 lpm)
 {
-	struct dasd_eckd_private *private;
+	struct dasd_eckd_private *private = device->private;
 	int mdc;
 	u32 fcx_max_data;
 
-	private = (struct dasd_eckd_private *) device->private;
 	if (private->fcx_max_data) {
 		mdc = ccw_device_get_mdc(device->cdev, lpm);
 		if ((mdc < 0)) {
@@ -1221,15 +1209,10 @@
 static int rebuild_device_uid(struct dasd_device *device,
 			      struct path_verification_work_data *data)
 {
-	struct dasd_eckd_private *private;
-	struct dasd_path *path_data;
-	__u8 lpm, opm;
-	int rc;
-
-	rc = -ENODEV;
-	private = (struct dasd_eckd_private *) device->private;
-	path_data = &device->path_data;
-	opm = device->path_data.opm;
+	struct dasd_eckd_private *private = device->private;
+	struct dasd_path *path_data = &device->path_data;
+	__u8 lpm, opm = path_data->opm;
+	int rc = -ENODEV;
 
 	for (lpm = 0x80; lpm; lpm >>= 1) {
 		if (!(lpm & opm))
@@ -1463,14 +1446,13 @@
 
 static int dasd_eckd_read_features(struct dasd_device *device)
 {
+	struct dasd_eckd_private *private = device->private;
 	struct dasd_psf_prssd_data *prssdp;
 	struct dasd_rssd_features *features;
 	struct dasd_ccw_req *cqr;
 	struct ccw1 *ccw;
 	int rc;
-	struct dasd_eckd_private *private;
 
-	private = (struct dasd_eckd_private *) device->private;
 	memset(&private->features, 0, sizeof(struct dasd_rssd_features));
 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */	+ 1 /* RSSD */,
 				   (sizeof(struct dasd_psf_prssd_data) +
@@ -1605,11 +1587,9 @@
 static int dasd_eckd_validate_server(struct dasd_device *device,
 				     unsigned long flags)
 {
-	int rc;
-	struct dasd_eckd_private *private;
-	int enable_pav;
+	struct dasd_eckd_private *private = device->private;
+	int enable_pav, rc;
 
-	private = (struct dasd_eckd_private *) device->private;
 	if (private->uid.type == UA_BASE_PAV_ALIAS ||
 	    private->uid.type == UA_HYPER_PAV_ALIAS)
 		return 0;
@@ -1662,14 +1642,13 @@
 
 static u32 get_fcx_max_data(struct dasd_device *device)
 {
-	int tpm, mdc;
+	struct dasd_eckd_private *private = device->private;
 	int fcx_in_css, fcx_in_gneq, fcx_in_features;
-	struct dasd_eckd_private *private;
+	int tpm, mdc;
 
 	if (dasd_nofcx)
 		return 0;
 	/* is transport mode supported? */
-	private = (struct dasd_eckd_private *) device->private;
 	fcx_in_css = css_general_characteristics.fcx;
 	fcx_in_gneq = private->gneq->reserved2[7] & 0x04;
 	fcx_in_features = private->features.feature[40] & 0x80;
@@ -1694,7 +1673,7 @@
 static int
 dasd_eckd_check_characteristics(struct dasd_device *device)
 {
-	struct dasd_eckd_private *private;
+	struct dasd_eckd_private *private = device->private;
 	struct dasd_block *block;
 	struct dasd_uid temp_uid;
 	int rc, i;
@@ -1713,7 +1692,6 @@
 		dev_info(&device->cdev->dev,
 			 "The DASD is not operating in multipath mode\n");
 	}
-	private = (struct dasd_eckd_private *) device->private;
 	if (!private) {
 		private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
 		if (!private) {
@@ -1722,7 +1700,7 @@
 				 "failed\n");
 			return -ENOMEM;
 		}
-		device->private = (void *) private;
+		device->private = private;
 	} else {
 		memset(private, 0, sizeof(*private));
 	}
@@ -1837,10 +1815,9 @@
 
 static void dasd_eckd_uncheck_device(struct dasd_device *device)
 {
-	struct dasd_eckd_private *private;
+	struct dasd_eckd_private *private = device->private;
 	int i;
 
-	private = (struct dasd_eckd_private *) device->private;
 	dasd_alias_disconnect_device_from_lcu(device);
 	private->ned = NULL;
 	private->sneq = NULL;
@@ -1863,7 +1840,7 @@
 static struct dasd_ccw_req *
 dasd_eckd_analysis_ccw(struct dasd_device *device)
 {
-	struct dasd_eckd_private *private;
+	struct dasd_eckd_private *private = device->private;
 	struct eckd_count *count_data;
 	struct LO_eckd_data *LO_data;
 	struct dasd_ccw_req *cqr;
@@ -1871,8 +1848,6 @@
 	int cplength, datasize;
 	int i;
 
-	private = (struct dasd_eckd_private *) device->private;
-
 	cplength = 8;
 	datasize = sizeof(struct DE_eckd_data) + 2*sizeof(struct LO_eckd_data);
 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device);
@@ -1946,11 +1921,9 @@
 static void dasd_eckd_analysis_callback(struct dasd_ccw_req *init_cqr,
 					void *data)
 {
-	struct dasd_eckd_private *private;
-	struct dasd_device *device;
+	struct dasd_device *device = init_cqr->startdev;
+	struct dasd_eckd_private *private = device->private;
 
-	device = init_cqr->startdev;
-	private = (struct dasd_eckd_private *) device->private;
 	private->init_cqr_status = dasd_eckd_analysis_evaluation(init_cqr);
 	dasd_sfree_request(init_cqr, device);
 	dasd_kick_device(device);
@@ -1977,15 +1950,13 @@
 
 static int dasd_eckd_end_analysis(struct dasd_block *block)
 {
-	struct dasd_device *device;
-	struct dasd_eckd_private *private;
+	struct dasd_device *device = block->base;
+	struct dasd_eckd_private *private = device->private;
 	struct eckd_count *count_area;
 	unsigned int sb, blk_per_trk;
 	int status, i;
 	struct dasd_ccw_req *init_cqr;
 
-	device = block->base;
-	private = (struct dasd_eckd_private *) device->private;
 	status = private->init_cqr_status;
 	private->init_cqr_status = -1;
 	if (status == INIT_CQR_ERROR) {
@@ -2083,9 +2054,8 @@
 
 static int dasd_eckd_do_analysis(struct dasd_block *block)
 {
-	struct dasd_eckd_private *private;
+	struct dasd_eckd_private *private = block->base->private;
 
-	private = (struct dasd_eckd_private *) block->base->private;
 	if (private->init_cqr_status < 0)
 		return dasd_eckd_start_analysis(block);
 	else
@@ -2112,9 +2082,8 @@
 static int
 dasd_eckd_fill_geometry(struct dasd_block *block, struct hd_geometry *geo)
 {
-	struct dasd_eckd_private *private;
+	struct dasd_eckd_private *private = block->base->private;
 
-	private = (struct dasd_eckd_private *) block->base->private;
 	if (dasd_check_blocksize(block->bp_block) == 0) {
 		geo->sectors = recs_per_track(&private->rdc_data,
 					      0, block->bp_block);
@@ -2151,8 +2120,8 @@
 	if (!startdev)
 		startdev = base;
 
-	start_priv = (struct dasd_eckd_private *) startdev->private;
-	base_priv = (struct dasd_eckd_private *) base->private;
+	start_priv = startdev->private;
+	base_priv = base->private;
 
 	rpt = recs_per_track(&base_priv->rdc_data, 0, fdata->blksize);
 
@@ -2349,14 +2318,14 @@
 				 * when formatting CDL
 				 */
 				if ((intensity & 0x08) &&
-				    fdata->start_unit == 0) {
+				    address.cyl == 0 && address.head == 0) {
 					if (i < 3) {
 						ect->kl = 4;
 						ect->dl = sizes_trk0[i] - 4;
 					}
 				}
 				if ((intensity & 0x08) &&
-				    fdata->start_unit == 1) {
+				    address.cyl == 0 && address.head == 1) {
 					ect->kl = 44;
 					ect->dl = LABEL_SIZE - 44;
 				}
@@ -2386,23 +2355,24 @@
 	return fcp;
 }
 
-static int
-dasd_eckd_format_device(struct dasd_device *base,
-			struct format_data_t *fdata,
-			int enable_pav)
+/*
+ * Wrapper function to build a CCW request depending on input data
+ */
+static struct dasd_ccw_req *
+dasd_eckd_format_build_ccw_req(struct dasd_device *base,
+			       struct format_data_t *fdata, int enable_pav)
 {
-	struct dasd_ccw_req *cqr, *n;
-	struct dasd_block *block;
-	struct dasd_eckd_private *private;
-	struct list_head format_queue;
-	struct dasd_device *device;
-	int old_stop, format_step;
-	int step, rc = 0, sleep_rc;
+	return dasd_eckd_build_format(base, fdata, enable_pav);
+}
 
-	block = base->block;
-	private = (struct dasd_eckd_private *) base->private;
+/*
+ * Sanity checks on format_data
+ */
+static int dasd_eckd_format_sanity_checks(struct dasd_device *base,
+					  struct format_data_t *fdata)
+{
+	struct dasd_eckd_private *private = base->private;
 
-	/* Sanity checks. */
 	if (fdata->start_unit >=
 	    (private->real_cyl * private->rdc_data.trk_per_cyl)) {
 		dev_warn(&base->cdev->dev,
@@ -2429,75 +2399,98 @@
 			 fdata->blksize);
 		return -EINVAL;
 	}
+	return 0;
+}
+
+/*
+ * This function will process format_data originally coming from an IOCTL
+ */
+static int dasd_eckd_format_process_data(struct dasd_device *base,
+					 struct format_data_t *fdata,
+					 int enable_pav)
+{
+	struct dasd_eckd_private *private = base->private;
+	struct dasd_ccw_req *cqr, *n;
+	struct list_head format_queue;
+	struct dasd_device *device;
+	int old_start, old_stop, format_step;
+	int step, retry;
+	int rc;
+
+	rc = dasd_eckd_format_sanity_checks(base, fdata);
+	if (rc)
+		return rc;
 
 	INIT_LIST_HEAD(&format_queue);
 
+	old_start = fdata->start_unit;
 	old_stop = fdata->stop_unit;
-	while (fdata->start_unit <= 1) {
-		fdata->stop_unit = fdata->start_unit;
-		cqr = dasd_eckd_build_format(base, fdata, enable_pav);
-		list_add(&cqr->blocklist, &format_queue);
 
-		fdata->stop_unit = old_stop;
-		fdata->start_unit++;
+	format_step = DASD_CQR_MAX_CCW / recs_per_track(&private->rdc_data, 0,
+							fdata->blksize);
+	do {
+		retry = 0;
+		while (fdata->start_unit <= old_stop) {
+			step = fdata->stop_unit - fdata->start_unit + 1;
+			if (step > format_step) {
+				fdata->stop_unit =
+					fdata->start_unit + format_step - 1;
+			}
 
-		if (fdata->start_unit > fdata->stop_unit)
-			goto sleep;
-	}
+			cqr = dasd_eckd_format_build_ccw_req(base, fdata,
+							     enable_pav);
+			if (IS_ERR(cqr)) {
+				rc = PTR_ERR(cqr);
+				if (rc == -ENOMEM) {
+					if (list_empty(&format_queue))
+						goto out;
+					/*
+					 * not enough memory available, start
+					 * requests retry after first requests
+					 * were finished
+					 */
+					retry = 1;
+					break;
+				}
+				goto out_err;
+			}
+			list_add_tail(&cqr->blocklist, &format_queue);
 
-retry:
-	format_step = 255 / recs_per_track(&private->rdc_data, 0,
-					   fdata->blksize);
-	while (fdata->start_unit <= old_stop) {
-		step = fdata->stop_unit - fdata->start_unit + 1;
-		if (step > format_step)
-			fdata->stop_unit = fdata->start_unit + format_step - 1;
-
-		cqr = dasd_eckd_build_format(base, fdata, enable_pav);
-		if (IS_ERR(cqr)) {
-			if (PTR_ERR(cqr) == -ENOMEM) {
-				/*
-				 * not enough memory available
-				 * go to out and start requests
-				 * retry after first requests were finished
-				 */
-				fdata->stop_unit = old_stop;
-				goto sleep;
-			} else
-				return PTR_ERR(cqr);
+			fdata->start_unit = fdata->stop_unit + 1;
+			fdata->stop_unit = old_stop;
 		}
-		list_add(&cqr->blocklist, &format_queue);
 
-		fdata->start_unit = fdata->stop_unit + 1;
-		fdata->stop_unit = old_stop;
-	}
+		rc = dasd_sleep_on_queue(&format_queue);
 
-sleep:
-	sleep_rc = dasd_sleep_on_queue(&format_queue);
+out_err:
+		list_for_each_entry_safe(cqr, n, &format_queue, blocklist) {
+			device = cqr->startdev;
+			private = device->private;
+			if (cqr->status == DASD_CQR_FAILED)
+				rc = -EIO;
+			list_del_init(&cqr->blocklist);
+			dasd_sfree_request(cqr, device);
+			private->count--;
+		}
 
-	list_for_each_entry_safe(cqr, n, &format_queue, blocklist) {
-		device = cqr->startdev;
-		private = (struct dasd_eckd_private *) device->private;
-		if (cqr->status == DASD_CQR_FAILED)
-			rc = -EIO;
-		list_del_init(&cqr->blocklist);
-		dasd_sfree_request(cqr, device);
-		private->count--;
-	}
+		if (rc)
+			goto out;
 
-	if (sleep_rc)
-		return sleep_rc;
+	} while (retry);
 
-	/*
-	 * in case of ENOMEM we need to retry after
-	 * first requests are finished
-	 */
-	if (fdata->start_unit <= fdata->stop_unit)
-		goto retry;
+out:
+	fdata->start_unit = old_start;
+	fdata->stop_unit = old_stop;
 
 	return rc;
 }
 
+static int dasd_eckd_format_device(struct dasd_device *base,
+				   struct format_data_t *fdata, int enable_pav)
+{
+	return dasd_eckd_format_process_data(base, fdata, enable_pav);
+}
+
 static void dasd_eckd_handle_terminated_request(struct dasd_ccw_req *cqr)
 {
 	if (cqr->retries < 0) {
@@ -2543,9 +2536,8 @@
 {
 	char mask;
 	char *sense = NULL;
-	struct dasd_eckd_private *private;
+	struct dasd_eckd_private *private = device->private;
 
-	private = (struct dasd_eckd_private *) device->private;
 	/* first of all check for state change pending interrupt */
 	mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
 	if ((scsw_dstat(&irb->scsw) & mask) == mask) {
@@ -2634,7 +2626,7 @@
 	struct dasd_device *basedev;
 
 	basedev = block->base;
-	private = (struct dasd_eckd_private *) basedev->private;
+	private = basedev->private;
 	if (rq_data_dir(req) == READ)
 		cmd = DASD_ECKD_CCW_READ_MT;
 	else if (rq_data_dir(req) == WRITE)
@@ -2990,8 +2982,8 @@
 
 
 	/* setup prefix data */
-	basepriv = (struct dasd_eckd_private *) basedev->private;
-	startpriv = (struct dasd_eckd_private *) startdev->private;
+	basepriv = basedev->private;
+	startpriv = startdev->private;
 	dedata = &pfxdata.define_extent;
 	lredata = &pfxdata.locate_record;
 
@@ -3278,7 +3270,7 @@
 	struct dasd_ccw_req *cqr;
 
 	basedev = block->base;
-	private = (struct dasd_eckd_private *) basedev->private;
+	private = basedev->private;
 
 	/* Calculate number of blocks/records per track. */
 	blksize = block->bp_block;
@@ -3503,7 +3495,7 @@
 
 	if (!dasd_page_cache)
 		goto out;
-	private = (struct dasd_eckd_private *) cqr->block->base->private;
+	private = cqr->block->base->private;
 	blksize = cqr->block->bp_block;
 	blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
 	recid = blk_rq_pos(req) >> cqr->block->s2b_shift;
@@ -3587,7 +3579,7 @@
 	startdev = dasd_alias_get_start_dev(base);
 	if (!startdev)
 		startdev = base;
-	private = (struct dasd_eckd_private *) startdev->private;
+	private = startdev->private;
 	if (private->count >= DASD_ECKD_CHANQ_MAX_SIZE)
 		return ERR_PTR(-EBUSY);
 
@@ -3610,7 +3602,7 @@
 	unsigned long flags;
 
 	spin_lock_irqsave(get_ccwdev_lock(cqr->memdev->cdev), flags);
-	private = (struct dasd_eckd_private *) cqr->memdev->private;
+	private = cqr->memdev->private;
 	private->count--;
 	spin_unlock_irqrestore(get_ccwdev_lock(cqr->memdev->cdev), flags);
 	return dasd_eckd_free_cp(cqr, req);
@@ -3620,15 +3612,14 @@
 dasd_eckd_fill_info(struct dasd_device * device,
 		    struct dasd_information2_t * info)
 {
-	struct dasd_eckd_private *private;
+	struct dasd_eckd_private *private = device->private;
 
-	private = (struct dasd_eckd_private *) device->private;
 	info->label_block = 2;
 	info->FBA_layout = private->uses_cdl ? 0 : 1;
 	info->format = private->uses_cdl ? DASD_FORMAT_CDL : DASD_FORMAT_LDL;
-	info->characteristics_size = sizeof(struct dasd_eckd_characteristics);
+	info->characteristics_size = sizeof(private->rdc_data);
 	memcpy(info->characteristics, &private->rdc_data,
-	       sizeof(struct dasd_eckd_characteristics));
+	       sizeof(private->rdc_data));
 	info->confdata_size = min((unsigned long)private->conf_len,
 				  sizeof(info->configuration_data));
 	memcpy(info->configuration_data, private->conf_data,
@@ -3941,8 +3932,7 @@
 static int
 dasd_eckd_get_attrib(struct dasd_device *device, void __user *argp)
 {
-	struct dasd_eckd_private *private =
-		(struct dasd_eckd_private *)device->private;
+	struct dasd_eckd_private *private = device->private;
 	struct attrib_data_t attrib = private->attrib;
 	int rc;
 
@@ -3966,8 +3956,7 @@
 static int
 dasd_eckd_set_attrib(struct dasd_device *device, void __user *argp)
 {
-	struct dasd_eckd_private *private =
-		(struct dasd_eckd_private *)device->private;
+	struct dasd_eckd_private *private = device->private;
 	struct attrib_data_t attrib;
 
 	if (!capable(CAP_SYS_ADMIN))
@@ -4430,15 +4419,13 @@
 
 static int dasd_eckd_restore_device(struct dasd_device *device)
 {
-	struct dasd_eckd_private *private;
+	struct dasd_eckd_private *private = device->private;
 	struct dasd_eckd_characteristics temp_rdc_data;
 	int rc;
 	struct dasd_uid temp_uid;
 	unsigned long flags;
 	unsigned long cqr_flags = 0;
 
-	private = (struct dasd_eckd_private *) device->private;
-
 	/* Read Configuration Data */
 	rc = dasd_eckd_read_conf(device);
 	if (rc) {
@@ -4502,14 +4489,12 @@
 
 static int dasd_eckd_reload_device(struct dasd_device *device)
 {
-	struct dasd_eckd_private *private;
+	struct dasd_eckd_private *private = device->private;
 	int rc, old_base;
 	char print_uid[60];
 	struct dasd_uid uid;
 	unsigned long flags;
 
-	private = (struct dasd_eckd_private *) device->private;
-
 	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
 	old_base = private->uid.base_unit_addr;
 	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
@@ -4556,12 +4541,10 @@
 {
 	struct dasd_rssd_messages *message_buf;
 	struct dasd_psf_prssd_data *prssdp;
-	struct dasd_eckd_private *private;
 	struct dasd_ccw_req *cqr;
 	struct ccw1 *ccw;
 	int rc;
 
-	private = (struct dasd_eckd_private *) device->private;
 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */	+ 1 /* RSSD */,
 				   (sizeof(struct dasd_psf_prssd_data) +
 				    sizeof(struct dasd_rssd_messages)),
@@ -4686,11 +4669,10 @@
 						     __u8 lpum,
 						     struct dasd_cuir_message *cuir)
 {
-	struct dasd_eckd_private *private;
+	struct dasd_eckd_private *private = device->private;
 	struct dasd_conf_data *conf_data;
 	int path, pos;
 
-	private = (struct dasd_eckd_private *) device->private;
 	if (cuir->record_selector == 0)
 		goto out;
 	for (path = 0x80, pos = 0; path; path >>= 1, pos++) {
@@ -4715,9 +4697,9 @@
 static int dasd_eckd_cuir_scope(struct dasd_device *device, __u8 lpum,
 				struct dasd_cuir_message *cuir)
 {
+	struct dasd_eckd_private *private = device->private;
 	struct dasd_conf_data *ref_conf_data;
 	unsigned long bitmask = 0, mask = 0;
-	struct dasd_eckd_private *private;
 	struct dasd_conf_data *conf_data;
 	unsigned int pos, path;
 	char *ref_gneq, *gneq;
@@ -4730,7 +4712,6 @@
 	    !(cuir->neq_map[0] | cuir->neq_map[1] | cuir->neq_map[2]))
 		return lpum;
 
-	private = (struct dasd_eckd_private *) device->private;
 	/* get reference conf data */
 	ref_conf_data = dasd_eckd_get_ref_conf(device, lpum, cuir);
 	/* reference ned is determined by ned_map field */
@@ -4829,14 +4810,13 @@
 				  struct subchannel_id sch_id,
 				  struct dasd_cuir_message *cuir)
 {
+	struct dasd_eckd_private *private = device->private;
 	struct alias_pav_group *pavgroup, *tempgroup;
-	struct dasd_eckd_private *private;
 	struct dasd_device *dev, *n;
 	unsigned long paths = 0;
 	unsigned long flags;
 	int tbcpm;
 
-	private = (struct dasd_eckd_private *) device->private;
 	/* active devices */
 	list_for_each_entry_safe(dev, n, &private->lcu->active_devices,
 				 alias_list) {
@@ -4892,13 +4872,12 @@
 				 struct subchannel_id sch_id,
 				 struct dasd_cuir_message *cuir)
 {
+	struct dasd_eckd_private *private = device->private;
 	struct alias_pav_group *pavgroup, *tempgroup;
-	struct dasd_eckd_private *private;
 	struct dasd_device *dev, *n;
 	unsigned long paths = 0;
 	int tbcpm;
 
-	private = (struct dasd_eckd_private *) device->private;
 	/*
 	 * the path may have been added through a generic path event before
 	 * only trigger path verification if the path is not already in use
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index c9262e7..d7b5b55 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -125,13 +125,11 @@
 static int
 dasd_fba_check_characteristics(struct dasd_device *device)
 {
-	struct dasd_block *block;
-	struct dasd_fba_private *private;
+	struct dasd_fba_private *private = device->private;
 	struct ccw_device *cdev = device->cdev;
-	int rc;
-	int readonly;
+	struct dasd_block *block;
+	int readonly, rc;
 
-	private = (struct dasd_fba_private *) device->private;
 	if (!private) {
 		private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
 		if (!private) {
@@ -140,7 +138,7 @@
 				 "data failed\n");
 			return -ENOMEM;
 		}
-		device->private = (void *) private;
+		device->private = private;
 	} else {
 		memset(private, 0, sizeof(*private));
 	}
@@ -192,10 +190,9 @@
 
 static int dasd_fba_do_analysis(struct dasd_block *block)
 {
-	struct dasd_fba_private *private;
+	struct dasd_fba_private *private = block->base->private;
 	int sb, rc;
 
-	private = (struct dasd_fba_private *) block->base->private;
 	rc = dasd_check_blocksize(private->rdc_data.blk_size);
 	if (rc) {
 		DBF_DEV_EVENT(DBF_WARNING, block->base, "unknown blocksize %d",
@@ -254,7 +251,7 @@
 					      struct dasd_block *block,
 					      struct request *req)
 {
-	struct dasd_fba_private *private;
+	struct dasd_fba_private *private = block->base->private;
 	unsigned long *idaws;
 	struct LO_fba_data *LO_data;
 	struct dasd_ccw_req *cqr;
@@ -267,7 +264,6 @@
 	unsigned int blksize, off;
 	unsigned char cmd;
 
-	private = (struct dasd_fba_private *) block->base->private;
 	if (rq_data_dir(req) == READ) {
 		cmd = DASD_FBA_CCW_READ;
 	} else if (rq_data_dir(req) == WRITE) {
@@ -379,7 +375,7 @@
 static int
 dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req)
 {
-	struct dasd_fba_private *private;
+	struct dasd_fba_private *private = cqr->block->base->private;
 	struct ccw1 *ccw;
 	struct req_iterator iter;
 	struct bio_vec bv;
@@ -389,7 +385,6 @@
 
 	if (!dasd_page_cache)
 		goto out;
-	private = (struct dasd_fba_private *) cqr->block->base->private;
 	blksize = cqr->block->bp_block;
 	ccw = cqr->cpaddr;
 	/* Skip over define extent & locate record. */
@@ -436,13 +431,14 @@
 dasd_fba_fill_info(struct dasd_device * device,
 		   struct dasd_information2_t * info)
 {
+	struct dasd_fba_private *private = device->private;
+
 	info->label_block = 1;
 	info->FBA_layout = 1;
 	info->format = DASD_FORMAT_LDL;
-	info->characteristics_size = sizeof(struct dasd_fba_characteristics);
-	memcpy(info->characteristics,
-	       &((struct dasd_fba_private *) device->private)->rdc_data,
-	       sizeof (struct dasd_fba_characteristics));
+	info->characteristics_size = sizeof(private->rdc_data);
+	memcpy(info->characteristics, &private->rdc_data,
+	       sizeof(private->rdc_data));
 	info->confdata_size = 0;
 	return 0;
 }
diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c
index ef1d9fb..31d544a 100644
--- a/drivers/s390/block/dasd_genhd.c
+++ b/drivers/s390/block/dasd_genhd.c
@@ -178,8 +178,8 @@
 	/* Register to static dasd major 94 */
 	rc = register_blkdev(DASD_MAJOR, "dasd");
 	if (rc != 0) {
-		pr_warning("Registering the device driver with major number "
-			   "%d failed\n", DASD_MAJOR);
+		pr_warn("Registering the device driver with major number %d failed\n",
+			DASD_MAJOR);
 		return rc;
 	}
 	return 0;
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 4aed5ed..8de29be 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -241,6 +241,13 @@
 typedef struct dasd_ccw_req *(*dasd_erp_fn_t) (struct dasd_ccw_req *);
 
 /*
+ * A single CQR can only contain a maximum of 255 CCWs. It is limited by
+ * the locate record and locate record extended count value which can only hold
+ * 1 Byte max.
+ */
+#define DASD_CQR_MAX_CCW 255
+
+/*
  * Unique identifier for dasd device.
  */
 #define UA_NOT_CONFIGURED  0x00
@@ -438,7 +445,7 @@
 	/* Device discipline stuff. */
 	struct dasd_discipline *discipline;
 	struct dasd_discipline *base_discipline;
-	char *private;
+	void *private;
 	struct dasd_path path_data;
 
 	/* Device state and target state. */
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
index 02837d0..90f30cc 100644
--- a/drivers/s390/block/dasd_ioctl.c
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -203,9 +203,7 @@
 dasd_format(struct dasd_block *block, struct format_data_t *fdata)
 {
 	struct dasd_device *base;
-	int enable_pav = 1;
-	int rc, retries;
-	int start, stop;
+	int rc;
 
 	base = block->base;
 	if (base->discipline->format_device == NULL)
@@ -233,30 +231,11 @@
 		bdput(bdev);
 	}
 
-	retries = 255;
-	/* backup start- and endtrack for retries */
-	start = fdata->start_unit;
-	stop = fdata->stop_unit;
-	do {
-		rc = base->discipline->format_device(base, fdata, enable_pav);
-		if (rc) {
-			if (rc == -EAGAIN) {
-				retries--;
-				/* disable PAV in case of errors */
-				enable_pav = 0;
-				fdata->start_unit = start;
-				fdata->stop_unit = stop;
-			} else
-				return rc;
-		} else
-			/* success */
-			break;
-	} while (retries);
+	rc = base->discipline->format_device(base, fdata, 1);
+	if (rc == -EAGAIN)
+		rc = base->discipline->format_device(base, fdata, 0);
 
-	if (!retries)
-		return -EIO;
-	else
-		return 0;
+	return rc;
 }
 
 /*
@@ -286,9 +265,8 @@
 		return -EFAULT;
 	}
 	if (bdev != bdev->bd_contains) {
-		pr_warning("%s: The specified DASD is a partition and cannot "
-			   "be formatted\n",
-			   dev_name(&base->cdev->dev));
+		pr_warn("%s: The specified DASD is a partition and cannot be formatted\n",
+			dev_name(&base->cdev->dev));
 		dasd_put_device(base);
 		return -EINVAL;
 	}
diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c
index aa7bb2d..bad7a19 100644
--- a/drivers/s390/block/dasd_proc.c
+++ b/drivers/s390/block/dasd_proc.c
@@ -322,13 +322,12 @@
 	return user_len;
 out_parse_error:
 	rc = -EINVAL;
-	pr_warning("%s is not a supported value for /proc/dasd/statistics\n",
-		str);
+	pr_warn("%s is not a supported value for /proc/dasd/statistics\n", str);
 out_error:
 	vfree(buffer);
 	return rc;
 #else
-	pr_warning("/proc/dasd/statistics: is not activated in this kernel\n");
+	pr_warn("/proc/dasd/statistics: is not activated in this kernel\n");
 	return user_len;
 #endif				/* CONFIG_DASD_PROFILE */
 }
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index ce7b701..1bce9cf 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -738,15 +738,15 @@
 	dev_info = dcssblk_get_device_by_name(local_buf);
 	if (dev_info == NULL) {
 		up_write(&dcssblk_devices_sem);
-		pr_warning("Device %s cannot be removed because it is not a "
-			   "known device\n", local_buf);
+		pr_warn("Device %s cannot be removed because it is not a known device\n",
+			local_buf);
 		rc = -ENODEV;
 		goto out_buf;
 	}
 	if (atomic_read(&dev_info->use_count) != 0) {
 		up_write(&dcssblk_devices_sem);
-		pr_warning("Device %s cannot be removed while it is in "
-			   "use\n", local_buf);
+		pr_warn("Device %s cannot be removed while it is in use\n",
+			local_buf);
 		rc = -EBUSY;
 		goto out_buf;
 	}
@@ -850,9 +850,8 @@
 		case SEG_TYPE_SC:
 			/* cannot write to these segments */
 			if (bio_data_dir(bio) == WRITE) {
-				pr_warning("Writing to %s failed because it "
-					   "is a read-only device\n",
-					   dev_name(&dev_info->dev));
+				pr_warn("Writing to %s failed because it is a read-only device\n",
+					dev_name(&dev_info->dev));
 				goto fail;
 			}
 		}
diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c
index fc94bfd..ebdeaa5 100644
--- a/drivers/s390/char/monreader.c
+++ b/drivers/s390/char/monreader.c
@@ -257,7 +257,7 @@
 	memcpy(&monpriv->msg_array[monpriv->write_index]->msg,
 	       msg, sizeof(*msg));
 	if (atomic_inc_return(&monpriv->msglim_count) == MON_MSGLIM) {
-		pr_warning("The read queue for monitor data is full\n");
+		pr_warn("The read queue for monitor data is full\n");
 		monpriv->msg_array[monpriv->write_index]->msglim_reached = 1;
 	}
 	monpriv->write_index = (monpriv->write_index + 1) % MON_MSGLIM;
@@ -342,8 +342,8 @@
 	if (monpriv->path) {
 		rc = iucv_path_sever(monpriv->path, user_data_sever);
 		if (rc)
-			pr_warning("Disconnecting the z/VM *MONITOR system "
-				   "service failed with rc=%i\n", rc);
+			pr_warn("Disconnecting the z/VM *MONITOR system service failed with rc=%i\n",
+				rc);
 		iucv_path_free(monpriv->path);
 	}
 
@@ -469,8 +469,8 @@
 	if (monpriv->path) {
 		rc = iucv_path_sever(monpriv->path, user_data_sever);
 		if (rc)
-			pr_warning("Disconnecting the z/VM *MONITOR system "
-				   "service failed with rc=%i\n", rc);
+			pr_warn("Disconnecting the z/VM *MONITOR system service failed with rc=%i\n",
+				rc);
 		iucv_path_free(monpriv->path);
 	}
 	atomic_set(&monpriv->iucv_severed, 0);
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index 806239c..d3947ea 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -67,8 +67,8 @@
 
 	/* Check response. */
 	if (request->status != SCLP_REQ_DONE) {
-		pr_warning("sync request failed (cmd=0x%08x, "
-			   "status=0x%02x)\n", cmd, request->status);
+		pr_warn("sync request failed (cmd=0x%08x, status=0x%02x)\n",
+			cmd, request->status);
 		rc = -EIO;
 	}
 out:
@@ -122,8 +122,8 @@
 	if (rc)
 		goto out;
 	if (sccb->header.response_code != 0x0010) {
-		pr_warning("readcpuinfo failed (response=0x%04x)\n",
-			   sccb->header.response_code);
+		pr_warn("readcpuinfo failed (response=0x%04x)\n",
+			sccb->header.response_code);
 		rc = -EIO;
 		goto out;
 	}
@@ -160,9 +160,8 @@
 	case 0x0120:
 		break;
 	default:
-		pr_warning("configure cpu failed (cmd=0x%08x, "
-			   "response=0x%04x)\n", cmd,
-			   sccb->header.response_code);
+		pr_warn("configure cpu failed (cmd=0x%08x, response=0x%04x)\n",
+			cmd, sccb->header.response_code);
 		rc = -EIO;
 		break;
 	}
@@ -230,9 +229,8 @@
 	case 0x0120:
 		break;
 	default:
-		pr_warning("assign storage failed (cmd=0x%08x, "
-			   "response=0x%04x, rn=0x%04x)\n", cmd,
-			   sccb->header.response_code, rn);
+		pr_warn("assign storage failed (cmd=0x%08x, response=0x%04x, rn=0x%04x)\n",
+			cmd, sccb->header.response_code, rn);
 		rc = -EIO;
 		break;
 	}
@@ -675,9 +673,8 @@
 	case 0x0450:
 		break;
 	default:
-		pr_warning("configure channel-path failed "
-			   "(cmd=0x%08x, response=0x%04x)\n", cmd,
-			   sccb->header.response_code);
+		pr_warn("configure channel-path failed (cmd=0x%08x, response=0x%04x)\n",
+			cmd, sccb->header.response_code);
 		rc = -EIO;
 		break;
 	}
@@ -744,8 +741,8 @@
 	if (rc)
 		goto out;
 	if (sccb->header.response_code != 0x0010) {
-		pr_warning("read channel-path info failed "
-			   "(response=0x%04x)\n", sccb->header.response_code);
+		pr_warn("read channel-path info failed (response=0x%04x)\n",
+			sccb->header.response_code);
 		rc = -EIO;
 		goto out;
 	}
diff --git a/drivers/s390/char/sclp_cpi_sys.c b/drivers/s390/char/sclp_cpi_sys.c
index 2acea80..f344e5b 100644
--- a/drivers/s390/char/sclp_cpi_sys.c
+++ b/drivers/s390/char/sclp_cpi_sys.c
@@ -154,16 +154,14 @@
 	wait_for_completion(&completion);
 
 	if (req->status != SCLP_REQ_DONE) {
-		pr_warning("request failed (status=0x%02x)\n",
-			   req->status);
+		pr_warn("request failed (status=0x%02x)\n", req->status);
 		rc = -EIO;
 		goto out_free_req;
 	}
 
 	response = ((struct cpi_sccb *) req->sccb)->header.response_code;
 	if (response != 0x0020) {
-		pr_warning("request failed with response code 0x%x\n",
-			   response);
+		pr_warn("request failed with response code 0x%x\n", response);
 		rc = -EIO;
 	}
 
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c
index f3b5123..3c379da 100644
--- a/drivers/s390/char/tape_core.c
+++ b/drivers/s390/char/tape_core.c
@@ -699,8 +699,8 @@
 			 */
 			DBF_EVENT(3, "(%08x): Drive in use vanished!\n",
 				device->cdev_id);
-			pr_warning("%s: A tape unit was detached while in "
-				   "use\n", dev_name(&device->cdev->dev));
+			pr_warn("%s: A tape unit was detached while in use\n",
+				dev_name(&device->cdev->dev));
 			tape_state_set(device, TS_NOT_OPER);
 			__tape_discard_requests(device);
 			spin_unlock_irq(get_ccwdev_lock(device->cdev));
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index 799c152..e883063 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -343,8 +343,7 @@
 	if (logptr->autorecording) {
 		ret = vmlogrdr_recording(logptr,1,logptr->autopurge);
 		if (ret)
-			pr_warning("vmlogrdr: failed to start "
-				   "recording automatically\n");
+			pr_warn("vmlogrdr: failed to start recording automatically\n");
 	}
 
 	/* create connection to the system service */
@@ -396,8 +395,7 @@
 	if (logptr->autorecording) {
 		ret = vmlogrdr_recording(logptr,0,logptr->autopurge);
 		if (ret)
-			pr_warning("vmlogrdr: failed to stop "
-				   "recording automatically\n");
+			pr_warn("vmlogrdr: failed to stop recording automatically\n");
 	}
 	logptr->dev_in_use = 0;
 
diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c
index 20314aa..9082476 100644
--- a/drivers/s390/cio/blacklist.c
+++ b/drivers/s390/cio/blacklist.c
@@ -51,9 +51,8 @@
 {
 	if ((from_ssid > to_ssid) || ((from_ssid == to_ssid) && (from > to))) {
 		if (msgtrigger)
-			pr_warning("0.%x.%04x to 0.%x.%04x is not a valid "
-				   "range for cio_ignore\n", from_ssid, from,
-				   to_ssid, to);
+			pr_warn("0.%x.%04x to 0.%x.%04x is not a valid range for cio_ignore\n",
+				from_ssid, from, to_ssid, to);
 
 		return 1;
 	}
@@ -140,8 +139,8 @@
 	rc = 0;
 out:
 	if (rc && msgtrigger)
-		pr_warning("%s is not a valid device for the cio_ignore "
-			   "kernel parameter\n", str);
+		pr_warn("%s is not a valid device for the cio_ignore kernel parameter\n",
+			str);
 
 	return rc;
 }
diff --git a/drivers/s390/cio/ccwreq.c b/drivers/s390/cio/ccwreq.c
index 79f5991..2782100 100644
--- a/drivers/s390/cio/ccwreq.c
+++ b/drivers/s390/cio/ccwreq.c
@@ -333,13 +333,12 @@
 
 	for (chp = 0; chp < 8; chp++) {
 		if ((0x80 >> chp) & sch->schib.pmcw.lpum)
-			pr_warning("%s: No interrupt was received within %lus "
-				   "(CS=%02x, DS=%02x, CHPID=%x.%02x)\n",
-				   dev_name(&cdev->dev), req->timeout / HZ,
-				   scsw_cstat(&sch->schib.scsw),
-				   scsw_dstat(&sch->schib.scsw),
-				   sch->schid.cssid,
-				   sch->schib.pmcw.chpid[chp]);
+			pr_warn("%s: No interrupt was received within %lus (CS=%02x, DS=%02x, CHPID=%x.%02x)\n",
+				dev_name(&cdev->dev), req->timeout / HZ,
+				scsw_cstat(&sch->schib.scsw),
+				scsw_dstat(&sch->schib.scsw),
+				sch->schid.cssid,
+				sch->schib.pmcw.chpid[chp]);
 	}
 
 	if (!ccwreq_next_path(cdev)) {
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 39a8ae5..de6fccc 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -656,7 +656,7 @@
 
 	sch_no = cio_get_console_sch_no();
 	if (sch_no == -1) {
-		pr_warning("No CCW console was found\n");
+		pr_warn("No CCW console was found\n");
 		return ERR_PTR(-ENODEV);
 	}
 	init_subchannel_id(&schid);
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 6aae684..7ada078 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -364,11 +364,11 @@
 		   cdev->private->state == DEV_STATE_DISCONNECTED));
 	/* Inform the user if set offline failed. */
 	if (cdev->private->state == DEV_STATE_BOXED) {
-		pr_warning("%s: The device entered boxed state while "
-			   "being set offline\n", dev_name(&cdev->dev));
+		pr_warn("%s: The device entered boxed state while being set offline\n",
+			dev_name(&cdev->dev));
 	} else if (cdev->private->state == DEV_STATE_NOT_OPER) {
-		pr_warning("%s: The device stopped operating while "
-			   "being set offline\n", dev_name(&cdev->dev));
+		pr_warn("%s: The device stopped operating while being set offline\n",
+			dev_name(&cdev->dev));
 	}
 	/* Give up reference from ccw_device_set_online(). */
 	put_device(&cdev->dev);
@@ -429,13 +429,11 @@
 		spin_unlock_irq(cdev->ccwlock);
 		/* Inform the user that set online failed. */
 		if (cdev->private->state == DEV_STATE_BOXED) {
-			pr_warning("%s: Setting the device online failed "
-				   "because it is boxed\n",
-				   dev_name(&cdev->dev));
+			pr_warn("%s: Setting the device online failed because it is boxed\n",
+				dev_name(&cdev->dev));
 		} else if (cdev->private->state == DEV_STATE_NOT_OPER) {
-			pr_warning("%s: Setting the device online failed "
-				   "because it is not operational\n",
-				   dev_name(&cdev->dev));
+			pr_warn("%s: Setting the device online failed because it is not operational\n",
+				dev_name(&cdev->dev));
 		}
 		/* Give up online reference since onlining failed. */
 		put_device(&cdev->dev);
@@ -619,9 +617,8 @@
 
 	rc = chsc_siosl(sch->schid);
 	if (rc < 0) {
-		pr_warning("Logging for subchannel 0.%x.%04x failed with "
-			   "errno=%d\n",
-			   sch->schid.ssid, sch->schid.sch_no, rc);
+		pr_warn("Logging for subchannel 0.%x.%04x failed with errno=%d\n",
+			sch->schid.ssid, sch->schid.sch_no, rc);
 		return rc;
 	}
 	pr_notice("Logging for subchannel 0.%x.%04x was triggered\n",
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index 2f5b518..251db0a 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -1761,8 +1761,8 @@
 			lcs_schedule_recovery(card);
 			break;
 		case LCS_CMD_STOPLAN:
-			pr_warning("Stoplan for %s initiated by LGW.\n",
-				   card->dev->name);
+			pr_warn("Stoplan for %s initiated by LGW\n",
+				card->dev->name);
 			if (card->dev)
 				netif_carrier_off(card->dev);
 			break;
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 7c8c68c..ac54433 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -3624,7 +3624,7 @@
 		return rc;
 	}
 #else
-	pr_warning("There is no IPv6 support for the layer 3 discipline\n");
+	pr_warn("There is no IPv6 support for the layer 3 discipline\n");
 #endif
 	return 0;
 }
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 7706416..9d8c84b 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -75,11 +75,26 @@
 	  This selects a driver for the Atmel SPI Controller, present on
 	  many AT32 (AVR32) and AT91 (ARM) chips.
 
+config SPI_AU1550
+	tristate "Au1550/Au1200/Au1300 SPI Controller"
+	depends on MIPS_ALCHEMY
+	select SPI_BITBANG
+	help
+	  If you say yes to this option, support will be included for the
+	  PSC SPI controller found on Au1550, Au1200 and Au1300 series.
+
+config SPI_AXI_SPI_ENGINE
+	tristate "Analog Devices AXI SPI Engine controller"
+	depends on HAS_IOMEM
+	help
+	  This enables support for the Analog Devices AXI SPI Engine SPI controller.
+	  It is part of the SPI Engine framework that is used in some Analog Devices
+	  reference designs for FPGAs.
+
 config SPI_BCM2835
 	tristate "BCM2835 SPI controller"
 	depends on GPIOLIB
 	depends on ARCH_BCM2835 || COMPILE_TEST
-	depends on GPIOLIB
 	help
 	  This selects a driver for the Broadcom BCM2835 SPI master.
 
@@ -90,8 +105,7 @@
 
 config SPI_BCM2835AUX
 	tristate "BCM2835 SPI auxiliary controller"
-	depends on ARCH_BCM2835 || COMPILE_TEST
-	depends on GPIOLIB
+	depends on (ARCH_BCM2835 && GPIOLIB) || COMPILE_TEST
 	help
 	  This selects a driver for the Broadcom BCM2835 SPI aux master.
 
@@ -118,14 +132,6 @@
 	help
 	  Enable support for a SPI bus via the Blackfin SPORT peripheral.
 
-config SPI_AU1550
-	tristate "Au1550/Au1200/Au1300 SPI Controller"
-	depends on MIPS_ALCHEMY
-	select SPI_BITBANG
-	help
-	  If you say yes to this option, support will be included for the
-	  PSC SPI controller found on Au1550, Au1200 and Au1300 series.
-
 config SPI_BCM53XX
 	tristate "Broadcom BCM53xx SPI controller"
 	depends on ARCH_BCM_5301X
@@ -197,6 +203,23 @@
 	help
 	  SPI master controller for DaVinci/DA8x/OMAP-L/AM1x SPI modules.
 
+config SPI_DESIGNWARE
+	tristate "DesignWare SPI controller core support"
+	help
+	  general driver for SPI controller core from DesignWare
+
+config SPI_DW_PCI
+	tristate "PCI interface driver for DW SPI core"
+	depends on SPI_DESIGNWARE && PCI
+
+config SPI_DW_MID_DMA
+	bool "DMA support for DW SPI controller on Intel MID platform"
+	depends on SPI_DW_PCI && DW_DMAC_PCI
+
+config SPI_DW_MMIO
+	tristate "Memory-mapped io interface driver for DW SPI core"
+	depends on SPI_DESIGNWARE
+
 config SPI_DLN2
        tristate "Diolan DLN-2 USB SPI adapter"
        depends on MFD_DLN2
@@ -271,6 +294,16 @@
 	  which interfaces to an LM70 temperature sensor using
 	  a parallel port.
 
+config SPI_LP8841_RTC
+	tristate "ICP DAS LP-8841 SPI Controller for RTC"
+	depends on MACH_PXA27X_DT || COMPILE_TEST
+	help
+	  This driver provides an SPI master device to drive Maxim
+	  DS-1302 real time clock.
+
+	  Say N here unless you plan to run the kernel on an ICP DAS
+	  LP-8x4x industrial computer.
+
 config SPI_MPC52xx
 	tristate "Freescale MPC52xx SPI (non-PSC) controller support"
 	depends on PPC_MPC52xx
@@ -346,6 +379,13 @@
 	  say Y or M here.If you are not sure, say N.
 	  SPI drivers for Mediatek MT65XX and MT81XX series ARM SoCs.
 
+config SPI_NUC900
+	tristate "Nuvoton NUC900 series SPI"
+	depends on ARCH_W90X900
+	select SPI_BITBANG
+	help
+	  SPI driver for Nuvoton NUC900 series ARM SoCs
+
 config SPI_OC_TINY
 	tristate "OpenCores tiny SPI"
 	depends on GPIOLIB || COMPILE_TEST
@@ -415,10 +455,6 @@
 	help
 	  This selects a driver for the PPC4xx SPI Controller.
 
-config SPI_PXA2XX_DMA
-	def_bool y
-	depends on SPI_PXA2XX
-
 config SPI_PXA2XX
 	tristate "PXA2xx SSP SPI master"
 	depends on (ARCH_PXA || PCI || ACPI)
@@ -451,7 +487,7 @@
 
 config SPI_RSPI
 	tristate "Renesas RSPI/QSPI controller"
-	depends on SUPERH || ARCH_SHMOBILE || COMPILE_TEST
+	depends on SUPERH || ARCH_RENESAS || COMPILE_TEST
 	help
 	  SPI driver for Renesas RSPI and QSPI blocks.
 
@@ -501,7 +537,7 @@
 config SPI_SH_MSIOF
 	tristate "SuperH MSIOF SPI controller"
 	depends on HAVE_CLK && HAS_DMA
-	depends on SUPERH || ARCH_SHMOBILE || COMPILE_TEST
+	depends on SUPERH || ARCH_RENESAS || COMPILE_TEST
 	help
 	  SPI driver for SuperH and SH Mobile MSIOF blocks.
 
@@ -520,7 +556,7 @@
 
 config SPI_SH_HSPI
 	tristate "SuperH HSPI controller"
-	depends on ARCH_SHMOBILE || COMPILE_TEST
+	depends on ARCH_RENESAS || COMPILE_TEST
 	help
 	  SPI driver for SuperH HSPI blocks.
 
@@ -647,34 +683,10 @@
 	help
 	  Enables Xilinx GQSPI controller driver for Zynq UltraScale+ MPSoC.
 
-config SPI_NUC900
-	tristate "Nuvoton NUC900 series SPI"
-	depends on ARCH_W90X900
-	select SPI_BITBANG
-	help
-	  SPI driver for Nuvoton NUC900 series ARM SoCs
-
 #
 # Add new SPI master controllers in alphabetical order above this line
 #
 
-config SPI_DESIGNWARE
-	tristate "DesignWare SPI controller core support"
-	help
-	  general driver for SPI controller core from DesignWare
-
-config SPI_DW_PCI
-	tristate "PCI interface driver for DW SPI core"
-	depends on SPI_DESIGNWARE && PCI
-
-config SPI_DW_MID_DMA
-	bool "DMA support for DW SPI controller on Intel MID platform"
-	depends on SPI_DW_PCI && DW_DMAC_PCI
-
-config SPI_DW_MMIO
-	tristate "Memory-mapped io interface driver for DW SPI core"
-	depends on SPI_DESIGNWARE
-
 #
 # There are lots of SPI device types, with sensors and memory
 # being probably the most widely used ones.
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 8991ffc..fbb255c 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -15,6 +15,7 @@
 obj-$(CONFIG_SPI_ATMEL)			+= spi-atmel.o
 obj-$(CONFIG_SPI_ATH79)			+= spi-ath79.o
 obj-$(CONFIG_SPI_AU1550)		+= spi-au1550.o
+obj-$(CONFIG_SPI_AXI_SPI_ENGINE)	+= spi-axi-spi-engine.o
 obj-$(CONFIG_SPI_BCM2835)		+= spi-bcm2835.o
 obj-$(CONFIG_SPI_BCM2835AUX)		+= spi-bcm2835aux.o
 obj-$(CONFIG_SPI_BCM53XX)		+= spi-bcm53xx.o
@@ -46,6 +47,7 @@
 obj-$(CONFIG_SPI_IMG_SPFI)		+= spi-img-spfi.o
 obj-$(CONFIG_SPI_IMX)			+= spi-imx.o
 obj-$(CONFIG_SPI_LM70_LLP)		+= spi-lm70llp.o
+obj-$(CONFIG_SPI_LP8841_RTC)		+= spi-lp8841-rtc.o
 obj-$(CONFIG_SPI_MESON_SPIFC)		+= spi-meson-spifc.o
 obj-$(CONFIG_SPI_MPC512x_PSC)		+= spi-mpc512x-psc.o
 obj-$(CONFIG_SPI_MPC52xx_PSC)		+= spi-mpc52xx-psc.o
@@ -62,8 +64,7 @@
 obj-$(CONFIG_SPI_ORION)			+= spi-orion.o
 obj-$(CONFIG_SPI_PL022)			+= spi-pl022.o
 obj-$(CONFIG_SPI_PPC4xx)		+= spi-ppc4xx.o
-spi-pxa2xx-platform-objs		:= spi-pxa2xx.o
-spi-pxa2xx-platform-$(CONFIG_SPI_PXA2XX_DMA)	+= spi-pxa2xx-dma.o
+spi-pxa2xx-platform-objs		:= spi-pxa2xx.o spi-pxa2xx-dma.o
 obj-$(CONFIG_SPI_PXA2XX)		+= spi-pxa2xx-platform.o
 obj-$(CONFIG_SPI_PXA2XX_PCI)		+= spi-pxa2xx-pci.o
 obj-$(CONFIG_SPI_QUP)			+= spi-qup.o
diff --git a/drivers/spi/spi-axi-spi-engine.c b/drivers/spi/spi-axi-spi-engine.c
new file mode 100644
index 0000000..c968ab2
--- /dev/null
+++ b/drivers/spi/spi-axi-spi-engine.c
@@ -0,0 +1,591 @@
+/*
+ * SPI-Engine SPI controller driver
+ * Copyright 2015 Analog Devices Inc.
+ *  Author: Lars-Peter Clausen <lars@metafoo.de>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+
+#define SPI_ENGINE_VERSION_MAJOR(x)	((x >> 16) & 0xff)
+#define SPI_ENGINE_VERSION_MINOR(x)	((x >> 8) & 0xff)
+#define SPI_ENGINE_VERSION_PATCH(x)	(x & 0xff)
+
+#define SPI_ENGINE_REG_VERSION			0x00
+
+#define SPI_ENGINE_REG_RESET			0x40
+
+#define SPI_ENGINE_REG_INT_ENABLE		0x80
+#define SPI_ENGINE_REG_INT_PENDING		0x84
+#define SPI_ENGINE_REG_INT_SOURCE		0x88
+
+#define SPI_ENGINE_REG_SYNC_ID			0xc0
+
+#define SPI_ENGINE_REG_CMD_FIFO_ROOM		0xd0
+#define SPI_ENGINE_REG_SDO_FIFO_ROOM		0xd4
+#define SPI_ENGINE_REG_SDI_FIFO_LEVEL		0xd8
+
+#define SPI_ENGINE_REG_CMD_FIFO			0xe0
+#define SPI_ENGINE_REG_SDO_DATA_FIFO		0xe4
+#define SPI_ENGINE_REG_SDI_DATA_FIFO		0xe8
+#define SPI_ENGINE_REG_SDI_DATA_FIFO_PEEK	0xec
+
+#define SPI_ENGINE_INT_CMD_ALMOST_EMPTY		BIT(0)
+#define SPI_ENGINE_INT_SDO_ALMOST_EMPTY		BIT(1)
+#define SPI_ENGINE_INT_SDI_ALMOST_FULL		BIT(2)
+#define SPI_ENGINE_INT_SYNC			BIT(3)
+
+#define SPI_ENGINE_CONFIG_CPHA			BIT(0)
+#define SPI_ENGINE_CONFIG_CPOL			BIT(1)
+#define SPI_ENGINE_CONFIG_3WIRE			BIT(2)
+
+#define SPI_ENGINE_INST_TRANSFER		0x0
+#define SPI_ENGINE_INST_ASSERT			0x1
+#define SPI_ENGINE_INST_WRITE			0x2
+#define SPI_ENGINE_INST_MISC			0x3
+
+#define SPI_ENGINE_CMD_REG_CLK_DIV		0x0
+#define SPI_ENGINE_CMD_REG_CONFIG		0x1
+
+#define SPI_ENGINE_MISC_SYNC			0x0
+#define SPI_ENGINE_MISC_SLEEP			0x1
+
+#define SPI_ENGINE_TRANSFER_WRITE		0x1
+#define SPI_ENGINE_TRANSFER_READ		0x2
+
+#define SPI_ENGINE_CMD(inst, arg1, arg2) \
+	(((inst) << 12) | ((arg1) << 8) | (arg2))
+
+#define SPI_ENGINE_CMD_TRANSFER(flags, n) \
+	SPI_ENGINE_CMD(SPI_ENGINE_INST_TRANSFER, (flags), (n))
+#define SPI_ENGINE_CMD_ASSERT(delay, cs) \
+	SPI_ENGINE_CMD(SPI_ENGINE_INST_ASSERT, (delay), (cs))
+#define SPI_ENGINE_CMD_WRITE(reg, val) \
+	SPI_ENGINE_CMD(SPI_ENGINE_INST_WRITE, (reg), (val))
+#define SPI_ENGINE_CMD_SLEEP(delay) \
+	SPI_ENGINE_CMD(SPI_ENGINE_INST_MISC, SPI_ENGINE_MISC_SLEEP, (delay))
+#define SPI_ENGINE_CMD_SYNC(id) \
+	SPI_ENGINE_CMD(SPI_ENGINE_INST_MISC, SPI_ENGINE_MISC_SYNC, (id))
+
+struct spi_engine_program {
+	unsigned int length;
+	uint16_t instructions[];
+};
+
+struct spi_engine {
+	struct clk *clk;
+	struct clk *ref_clk;
+
+	spinlock_t lock;
+
+	void __iomem *base;
+
+	struct spi_message *msg;
+	struct spi_engine_program *p;
+	unsigned cmd_length;
+	const uint16_t *cmd_buf;
+
+	struct spi_transfer *tx_xfer;
+	unsigned int tx_length;
+	const uint8_t *tx_buf;
+
+	struct spi_transfer *rx_xfer;
+	unsigned int rx_length;
+	uint8_t *rx_buf;
+
+	unsigned int sync_id;
+	unsigned int completed_id;
+
+	unsigned int int_enable;
+};
+
+static void spi_engine_program_add_cmd(struct spi_engine_program *p,
+	bool dry, uint16_t cmd)
+{
+	if (!dry)
+		p->instructions[p->length] = cmd;
+	p->length++;
+}
+
+static unsigned int spi_engine_get_config(struct spi_device *spi)
+{
+	unsigned int config = 0;
+
+	if (spi->mode & SPI_CPOL)
+		config |= SPI_ENGINE_CONFIG_CPOL;
+	if (spi->mode & SPI_CPHA)
+		config |= SPI_ENGINE_CONFIG_CPHA;
+	if (spi->mode & SPI_3WIRE)
+		config |= SPI_ENGINE_CONFIG_3WIRE;
+
+	return config;
+}
+
+static unsigned int spi_engine_get_clk_div(struct spi_engine *spi_engine,
+	struct spi_device *spi, struct spi_transfer *xfer)
+{
+	unsigned int clk_div;
+
+	clk_div = DIV_ROUND_UP(clk_get_rate(spi_engine->ref_clk),
+		xfer->speed_hz * 2);
+	if (clk_div > 255)
+		clk_div = 255;
+	else if (clk_div > 0)
+		clk_div -= 1;
+
+	return clk_div;
+}
+
+static void spi_engine_gen_xfer(struct spi_engine_program *p, bool dry,
+	struct spi_transfer *xfer)
+{
+	unsigned int len = xfer->len;
+
+	while (len) {
+		unsigned int n = min(len, 256U);
+		unsigned int flags = 0;
+
+		if (xfer->tx_buf)
+			flags |= SPI_ENGINE_TRANSFER_WRITE;
+		if (xfer->rx_buf)
+			flags |= SPI_ENGINE_TRANSFER_READ;
+
+		spi_engine_program_add_cmd(p, dry,
+			SPI_ENGINE_CMD_TRANSFER(flags, n - 1));
+		len -= n;
+	}
+}
+
+static void spi_engine_gen_sleep(struct spi_engine_program *p, bool dry,
+	struct spi_engine *spi_engine, unsigned int clk_div, unsigned int delay)
+{
+	unsigned int spi_clk = clk_get_rate(spi_engine->ref_clk);
+	unsigned int t;
+
+	if (delay == 0)
+		return;
+
+	t = DIV_ROUND_UP(delay * spi_clk, (clk_div + 1) * 2);
+	while (t) {
+		unsigned int n = min(t, 256U);
+
+		spi_engine_program_add_cmd(p, dry, SPI_ENGINE_CMD_SLEEP(n - 1));
+		t -= n;
+	}
+}
+
+static void spi_engine_gen_cs(struct spi_engine_program *p, bool dry,
+		struct spi_device *spi, bool assert)
+{
+	unsigned int mask = 0xff;
+
+	if (assert)
+		mask ^= BIT(spi->chip_select);
+
+	spi_engine_program_add_cmd(p, dry, SPI_ENGINE_CMD_ASSERT(1, mask));
+}
+
+static int spi_engine_compile_message(struct spi_engine *spi_engine,
+	struct spi_message *msg, bool dry, struct spi_engine_program *p)
+{
+	struct spi_device *spi = msg->spi;
+	struct spi_transfer *xfer;
+	int clk_div, new_clk_div;
+	bool cs_change = true;
+
+	clk_div = -1;
+
+	spi_engine_program_add_cmd(p, dry,
+		SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CONFIG,
+			spi_engine_get_config(spi)));
+
+	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+		new_clk_div = spi_engine_get_clk_div(spi_engine, spi, xfer);
+		if (new_clk_div != clk_div) {
+			clk_div = new_clk_div;
+			spi_engine_program_add_cmd(p, dry,
+				SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CLK_DIV,
+					clk_div));
+		}
+
+		if (cs_change)
+			spi_engine_gen_cs(p, dry, spi, true);
+
+		spi_engine_gen_xfer(p, dry, xfer);
+		spi_engine_gen_sleep(p, dry, spi_engine, clk_div,
+			xfer->delay_usecs);
+
+		cs_change = xfer->cs_change;
+		if (list_is_last(&xfer->transfer_list, &msg->transfers))
+			cs_change = !cs_change;
+
+		if (cs_change)
+			spi_engine_gen_cs(p, dry, spi, false);
+	}
+
+	return 0;
+}
+
+static void spi_engine_xfer_next(struct spi_engine *spi_engine,
+	struct spi_transfer **_xfer)
+{
+	struct spi_message *msg = spi_engine->msg;
+	struct spi_transfer *xfer = *_xfer;
+
+	if (!xfer) {
+		xfer = list_first_entry(&msg->transfers,
+			struct spi_transfer, transfer_list);
+	} else if (list_is_last(&xfer->transfer_list, &msg->transfers)) {
+		xfer = NULL;
+	} else {
+		xfer = list_next_entry(xfer, transfer_list);
+	}
+
+	*_xfer = xfer;
+}
+
+static void spi_engine_tx_next(struct spi_engine *spi_engine)
+{
+	struct spi_transfer *xfer = spi_engine->tx_xfer;
+
+	do {
+		spi_engine_xfer_next(spi_engine, &xfer);
+	} while (xfer && !xfer->tx_buf);
+
+	spi_engine->tx_xfer = xfer;
+	if (xfer) {
+		spi_engine->tx_length = xfer->len;
+		spi_engine->tx_buf = xfer->tx_buf;
+	} else {
+		spi_engine->tx_buf = NULL;
+	}
+}
+
+static void spi_engine_rx_next(struct spi_engine *spi_engine)
+{
+	struct spi_transfer *xfer = spi_engine->rx_xfer;
+
+	do {
+		spi_engine_xfer_next(spi_engine, &xfer);
+	} while (xfer && !xfer->rx_buf);
+
+	spi_engine->rx_xfer = xfer;
+	if (xfer) {
+		spi_engine->rx_length = xfer->len;
+		spi_engine->rx_buf = xfer->rx_buf;
+	} else {
+		spi_engine->rx_buf = NULL;
+	}
+}
+
+static bool spi_engine_write_cmd_fifo(struct spi_engine *spi_engine)
+{
+	void __iomem *addr = spi_engine->base + SPI_ENGINE_REG_CMD_FIFO;
+	unsigned int n, m, i;
+	const uint16_t *buf;
+
+	n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_CMD_FIFO_ROOM);
+	while (n && spi_engine->cmd_length) {
+		m = min(n, spi_engine->cmd_length);
+		buf = spi_engine->cmd_buf;
+		for (i = 0; i < m; i++)
+			writel_relaxed(buf[i], addr);
+		spi_engine->cmd_buf += m;
+		spi_engine->cmd_length -= m;
+		n -= m;
+	}
+
+	return spi_engine->cmd_length != 0;
+}
+
+static bool spi_engine_write_tx_fifo(struct spi_engine *spi_engine)
+{
+	void __iomem *addr = spi_engine->base + SPI_ENGINE_REG_SDO_DATA_FIFO;
+	unsigned int n, m, i;
+	const uint8_t *buf;
+
+	n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_SDO_FIFO_ROOM);
+	while (n && spi_engine->tx_length) {
+		m = min(n, spi_engine->tx_length);
+		buf = spi_engine->tx_buf;
+		for (i = 0; i < m; i++)
+			writel_relaxed(buf[i], addr);
+		spi_engine->tx_buf += m;
+		spi_engine->tx_length -= m;
+		n -= m;
+		if (spi_engine->tx_length == 0)
+			spi_engine_tx_next(spi_engine);
+	}
+
+	return spi_engine->tx_length != 0;
+}
+
+static bool spi_engine_read_rx_fifo(struct spi_engine *spi_engine)
+{
+	void __iomem *addr = spi_engine->base + SPI_ENGINE_REG_SDI_DATA_FIFO;
+	unsigned int n, m, i;
+	uint8_t *buf;
+
+	n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_SDI_FIFO_LEVEL);
+	while (n && spi_engine->rx_length) {
+		m = min(n, spi_engine->rx_length);
+		buf = spi_engine->rx_buf;
+		for (i = 0; i < m; i++)
+			buf[i] = readl_relaxed(addr);
+		spi_engine->rx_buf += m;
+		spi_engine->rx_length -= m;
+		n -= m;
+		if (spi_engine->rx_length == 0)
+			spi_engine_rx_next(spi_engine);
+	}
+
+	return spi_engine->rx_length != 0;
+}
+
+static irqreturn_t spi_engine_irq(int irq, void *devid)
+{
+	struct spi_master *master = devid;
+	struct spi_engine *spi_engine = spi_master_get_devdata(master);
+	unsigned int disable_int = 0;
+	unsigned int pending;
+
+	pending = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
+
+	if (pending & SPI_ENGINE_INT_SYNC) {
+		writel_relaxed(SPI_ENGINE_INT_SYNC,
+			spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
+		spi_engine->completed_id = readl_relaxed(
+			spi_engine->base + SPI_ENGINE_REG_SYNC_ID);
+	}
+
+	spin_lock(&spi_engine->lock);
+
+	if (pending & SPI_ENGINE_INT_CMD_ALMOST_EMPTY) {
+		if (!spi_engine_write_cmd_fifo(spi_engine))
+			disable_int |= SPI_ENGINE_INT_CMD_ALMOST_EMPTY;
+	}
+
+	if (pending & SPI_ENGINE_INT_SDO_ALMOST_EMPTY) {
+		if (!spi_engine_write_tx_fifo(spi_engine))
+			disable_int |= SPI_ENGINE_INT_SDO_ALMOST_EMPTY;
+	}
+
+	if (pending & (SPI_ENGINE_INT_SDI_ALMOST_FULL | SPI_ENGINE_INT_SYNC)) {
+		if (!spi_engine_read_rx_fifo(spi_engine))
+			disable_int |= SPI_ENGINE_INT_SDI_ALMOST_FULL;
+	}
+
+	if (pending & SPI_ENGINE_INT_SYNC) {
+		if (spi_engine->msg &&
+		    spi_engine->completed_id == spi_engine->sync_id) {
+			struct spi_message *msg = spi_engine->msg;
+
+			kfree(spi_engine->p);
+			msg->status = 0;
+			msg->actual_length = msg->frame_length;
+			spi_engine->msg = NULL;
+			spi_finalize_current_message(master);
+			disable_int |= SPI_ENGINE_INT_SYNC;
+		}
+	}
+
+	if (disable_int) {
+		spi_engine->int_enable &= ~disable_int;
+		writel_relaxed(spi_engine->int_enable,
+			spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
+	}
+
+	spin_unlock(&spi_engine->lock);
+
+	return IRQ_HANDLED;
+}
+
+static int spi_engine_transfer_one_message(struct spi_master *master,
+	struct spi_message *msg)
+{
+	struct spi_engine_program p_dry, *p;
+	struct spi_engine *spi_engine = spi_master_get_devdata(master);
+	unsigned int int_enable = 0;
+	unsigned long flags;
+	size_t size;
+
+	p_dry.length = 0;
+	spi_engine_compile_message(spi_engine, msg, true, &p_dry);
+
+	size = sizeof(*p->instructions) * (p_dry.length + 1);
+	p = kzalloc(sizeof(*p) + size, GFP_KERNEL);
+	if (!p)
+		return -ENOMEM;
+	spi_engine_compile_message(spi_engine, msg, false, p);
+
+	spin_lock_irqsave(&spi_engine->lock, flags);
+	spi_engine->sync_id = (spi_engine->sync_id + 1) & 0xff;
+	spi_engine_program_add_cmd(p, false,
+		SPI_ENGINE_CMD_SYNC(spi_engine->sync_id));
+
+	spi_engine->msg = msg;
+	spi_engine->p = p;
+
+	spi_engine->cmd_buf = p->instructions;
+	spi_engine->cmd_length = p->length;
+	if (spi_engine_write_cmd_fifo(spi_engine))
+		int_enable |= SPI_ENGINE_INT_CMD_ALMOST_EMPTY;
+
+	spi_engine_tx_next(spi_engine);
+	if (spi_engine_write_tx_fifo(spi_engine))
+		int_enable |= SPI_ENGINE_INT_SDO_ALMOST_EMPTY;
+
+	spi_engine_rx_next(spi_engine);
+	if (spi_engine->rx_length != 0)
+		int_enable |= SPI_ENGINE_INT_SDI_ALMOST_FULL;
+
+	int_enable |= SPI_ENGINE_INT_SYNC;
+
+	writel_relaxed(int_enable,
+		spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
+	spi_engine->int_enable = int_enable;
+	spin_unlock_irqrestore(&spi_engine->lock, flags);
+
+	return 0;
+}
+
+static int spi_engine_probe(struct platform_device *pdev)
+{
+	struct spi_engine *spi_engine;
+	struct spi_master *master;
+	unsigned int version;
+	struct resource *res;
+	int irq;
+	int ret;
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq <= 0)
+		return -ENXIO;
+
+	spi_engine = devm_kzalloc(&pdev->dev, sizeof(*spi_engine), GFP_KERNEL);
+	if (!spi_engine)
+		return -ENOMEM;
+
+	master = spi_alloc_master(&pdev->dev, 0);
+	if (!master)
+		return -ENOMEM;
+
+	spi_master_set_devdata(master, spi_engine);
+
+	spin_lock_init(&spi_engine->lock);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	spi_engine->base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(spi_engine->base)) {
+		ret = PTR_ERR(spi_engine->base);
+		goto err_put_master;
+	}
+
+	version = readl(spi_engine->base + SPI_ENGINE_REG_VERSION);
+	if (SPI_ENGINE_VERSION_MAJOR(version) != 1) {
+		dev_err(&pdev->dev, "Unsupported peripheral version %u.%u.%c\n",
+			SPI_ENGINE_VERSION_MAJOR(version),
+			SPI_ENGINE_VERSION_MINOR(version),
+			SPI_ENGINE_VERSION_PATCH(version));
+		return -ENODEV;
+	}
+
+	spi_engine->clk = devm_clk_get(&pdev->dev, "s_axi_aclk");
+	if (IS_ERR(spi_engine->clk)) {
+		ret = PTR_ERR(spi_engine->clk);
+		goto err_put_master;
+	}
+
+	spi_engine->ref_clk = devm_clk_get(&pdev->dev, "spi_clk");
+	if (IS_ERR(spi_engine->ref_clk)) {
+		ret = PTR_ERR(spi_engine->ref_clk);
+		goto err_put_master;
+	}
+
+	ret = clk_prepare_enable(spi_engine->clk);
+	if (ret)
+		goto err_put_master;
+
+	ret = clk_prepare_enable(spi_engine->ref_clk);
+	if (ret)
+		goto err_clk_disable;
+
+	writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_RESET);
+	writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
+	writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
+
+	ret = request_irq(irq, spi_engine_irq, 0, pdev->name, master);
+	if (ret)
+		goto err_ref_clk_disable;
+
+	master->dev.parent = &pdev->dev;
+	master->dev.of_node = pdev->dev.of_node;
+	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_3WIRE;
+	master->bits_per_word_mask = SPI_BPW_MASK(8);
+	master->max_speed_hz = clk_get_rate(spi_engine->ref_clk) / 2;
+	master->transfer_one_message = spi_engine_transfer_one_message;
+	master->num_chipselect = 8;
+
+	ret = spi_register_master(master);
+	if (ret)
+		goto err_free_irq;
+
+	platform_set_drvdata(pdev, master);
+
+	return 0;
+err_free_irq:
+	free_irq(irq, master);
+err_ref_clk_disable:
+	clk_disable_unprepare(spi_engine->ref_clk);
+err_clk_disable:
+	clk_disable_unprepare(spi_engine->clk);
+err_put_master:
+	spi_master_put(master);
+	return ret;
+}
+
+static int spi_engine_remove(struct platform_device *pdev)
+{
+	struct spi_master *master = platform_get_drvdata(pdev);
+	struct spi_engine *spi_engine = spi_master_get_devdata(master);
+	int irq = platform_get_irq(pdev, 0);
+
+	spi_unregister_master(master);
+
+	free_irq(irq, master);
+
+	writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
+	writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
+	writel_relaxed(0x01, spi_engine->base + SPI_ENGINE_REG_RESET);
+
+	clk_disable_unprepare(spi_engine->ref_clk);
+	clk_disable_unprepare(spi_engine->clk);
+
+	return 0;
+}
+
+static const struct of_device_id spi_engine_match_table[] = {
+	{ .compatible = "adi,axi-spi-engine-1.00.a" },
+	{ },
+};
+
+static struct platform_driver spi_engine_driver = {
+	.probe = spi_engine_probe,
+	.remove = spi_engine_remove,
+	.driver = {
+		.name = "spi-engine",
+		.of_match_table = spi_engine_match_table,
+	},
+};
+module_platform_driver(spi_engine_driver);
+
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("Analog Devices SPI engine peripheral driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
index cf04960..f35cc10 100644
--- a/drivers/spi/spi-bcm2835.c
+++ b/drivers/spi/spi-bcm2835.c
@@ -727,11 +727,6 @@
 			spi->chip_select, spi->cs_gpio, err);
 		return err;
 	}
-	/* the implementation of pinctrl-bcm2835 currently does not
-	 * set the GPIO value when using gpio_direction_output
-	 * so we are setting it here explicitly
-	 */
-	gpio_set_value(spi->cs_gpio, (spi->mode & SPI_CS_HIGH) ? 0 : 1);
 
 	return 0;
 }
diff --git a/drivers/spi/spi-bcm2835aux.c b/drivers/spi/spi-bcm2835aux.c
index ecc73c0..7428091 100644
--- a/drivers/spi/spi-bcm2835aux.c
+++ b/drivers/spi/spi-bcm2835aux.c
@@ -64,9 +64,9 @@
 #define BCM2835_AUX_SPI_CNTL0_VAR_WIDTH	0x00004000
 #define BCM2835_AUX_SPI_CNTL0_DOUTHOLD	0x00003000
 #define BCM2835_AUX_SPI_CNTL0_ENABLE	0x00000800
-#define BCM2835_AUX_SPI_CNTL0_CPHA_IN	0x00000400
+#define BCM2835_AUX_SPI_CNTL0_IN_RISING	0x00000400
 #define BCM2835_AUX_SPI_CNTL0_CLEARFIFO	0x00000200
-#define BCM2835_AUX_SPI_CNTL0_CPHA_OUT	0x00000100
+#define BCM2835_AUX_SPI_CNTL0_OUT_RISING	0x00000100
 #define BCM2835_AUX_SPI_CNTL0_CPOL	0x00000080
 #define BCM2835_AUX_SPI_CNTL0_MSBF_OUT	0x00000040
 #define BCM2835_AUX_SPI_CNTL0_SHIFTLEN	0x0000003F
@@ -92,9 +92,6 @@
 #define BCM2835_AUX_SPI_POLLING_LIMIT_US	30
 #define BCM2835_AUX_SPI_POLLING_JIFFIES		2
 
-#define BCM2835_AUX_SPI_MODE_BITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \
-				  | SPI_NO_CS)
-
 struct bcm2835aux_spi {
 	void __iomem *regs;
 	struct clk *clk;
@@ -212,9 +209,15 @@
 		ret = IRQ_HANDLED;
 	}
 
-	/* and if rx_len is 0 then wake up completion and disable spi */
+	if (!bs->tx_len) {
+		/* disable tx fifo empty interrupt */
+		bcm2835aux_wr(bs, BCM2835_AUX_SPI_CNTL1, bs->cntl[1] |
+			BCM2835_AUX_SPI_CNTL1_IDLE);
+	}
+
+	/* and if rx_len is 0 then disable interrupts and wake up completion */
 	if (!bs->rx_len) {
-		bcm2835aux_spi_reset_hw(bs);
+		bcm2835aux_wr(bs, BCM2835_AUX_SPI_CNTL1, bs->cntl[1]);
 		complete(&master->xfer_completion);
 	}
 
@@ -307,9 +310,6 @@
 		}
 	}
 
-	/* Transfer complete - reset SPI HW */
-	bcm2835aux_spi_reset_hw(bs);
-
 	/* and return without waiting for completion */
 	return 0;
 }
@@ -330,10 +330,6 @@
 	 * resulting (potentially) in more interrupts when transferring
 	 * more than 12 bytes
 	 */
-	bs->cntl[0] = BCM2835_AUX_SPI_CNTL0_ENABLE |
-		      BCM2835_AUX_SPI_CNTL0_VAR_WIDTH |
-		      BCM2835_AUX_SPI_CNTL0_MSBF_OUT;
-	bs->cntl[1] = BCM2835_AUX_SPI_CNTL1_MSBF_IN;
 
 	/* set clock */
 	spi_hz = tfr->speed_hz;
@@ -348,17 +344,13 @@
 	} else { /* the slowest we can go */
 		speed = BCM2835_AUX_SPI_CNTL0_SPEED_MAX;
 	}
+	/* mask out old speed from previous spi_transfer */
+	bs->cntl[0] &= ~(BCM2835_AUX_SPI_CNTL0_SPEED);
+	/* set the new speed */
 	bs->cntl[0] |= speed << BCM2835_AUX_SPI_CNTL0_SPEED_SHIFT;
 
 	spi_used_hz = clk_hz / (2 * (speed + 1));
 
-	/* handle all the modes */
-	if (spi->mode & SPI_CPOL)
-		bs->cntl[0] |= BCM2835_AUX_SPI_CNTL0_CPOL;
-	if (spi->mode & SPI_CPHA)
-		bs->cntl[0] |= BCM2835_AUX_SPI_CNTL0_CPHA_OUT |
-			       BCM2835_AUX_SPI_CNTL0_CPHA_IN;
-
 	/* set transmit buffers and length */
 	bs->tx_buf = tfr->tx_buf;
 	bs->rx_buf = tfr->rx_buf;
@@ -382,6 +374,40 @@
 	return bcm2835aux_spi_transfer_one_irq(master, spi, tfr);
 }
 
+static int bcm2835aux_spi_prepare_message(struct spi_master *master,
+					  struct spi_message *msg)
+{
+	struct spi_device *spi = msg->spi;
+	struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
+
+	bs->cntl[0] = BCM2835_AUX_SPI_CNTL0_ENABLE |
+		      BCM2835_AUX_SPI_CNTL0_VAR_WIDTH |
+		      BCM2835_AUX_SPI_CNTL0_MSBF_OUT;
+	bs->cntl[1] = BCM2835_AUX_SPI_CNTL1_MSBF_IN;
+
+	/* handle all the modes */
+	if (spi->mode & SPI_CPOL) {
+		bs->cntl[0] |= BCM2835_AUX_SPI_CNTL0_CPOL;
+		bs->cntl[0] |= BCM2835_AUX_SPI_CNTL0_OUT_RISING;
+	} else {
+		bs->cntl[0] |= BCM2835_AUX_SPI_CNTL0_IN_RISING;
+	}
+	bcm2835aux_wr(bs, BCM2835_AUX_SPI_CNTL1, bs->cntl[1]);
+	bcm2835aux_wr(bs, BCM2835_AUX_SPI_CNTL0, bs->cntl[0]);
+
+	return 0;
+}
+
+static int bcm2835aux_spi_unprepare_message(struct spi_master *master,
+					    struct spi_message *msg)
+{
+	struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
+
+	bcm2835aux_spi_reset_hw(bs);
+
+	return 0;
+}
+
 static void bcm2835aux_spi_handle_err(struct spi_master *master,
 				      struct spi_message *msg)
 {
@@ -405,11 +431,13 @@
 	}
 
 	platform_set_drvdata(pdev, master);
-	master->mode_bits = BCM2835_AUX_SPI_MODE_BITS;
+	master->mode_bits = (SPI_CPOL | SPI_CS_HIGH | SPI_NO_CS);
 	master->bits_per_word_mask = SPI_BPW_MASK(8);
 	master->num_chipselect = -1;
 	master->transfer_one = bcm2835aux_spi_transfer_one;
 	master->handle_err = bcm2835aux_spi_handle_err;
+	master->prepare_message = bcm2835aux_spi_prepare_message;
+	master->unprepare_message = bcm2835aux_spi_unprepare_message;
 	master->dev.of_node = pdev->dev.of_node;
 
 	bs = spi_master_get_devdata(master);
diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c
index 9185f6c..e31971f9 100644
--- a/drivers/spi/spi-dw-mid.c
+++ b/drivers/spi/spi-dw-mid.c
@@ -89,10 +89,10 @@
 	if (!dws->dma_inited)
 		return;
 
-	dmaengine_terminate_all(dws->txchan);
+	dmaengine_terminate_sync(dws->txchan);
 	dma_release_channel(dws->txchan);
 
-	dmaengine_terminate_all(dws->rxchan);
+	dmaengine_terminate_sync(dws->rxchan);
 	dma_release_channel(dws->rxchan);
 }
 
diff --git a/drivers/spi/spi-dw-mmio.c b/drivers/spi/spi-dw-mmio.c
index a6d7029..447497e 100644
--- a/drivers/spi/spi-dw-mmio.c
+++ b/drivers/spi/spi-dw-mmio.c
@@ -47,11 +47,6 @@
 
 	/* Get basic io resource and map it */
 	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!mem) {
-		dev_err(&pdev->dev, "no mem resource?\n");
-		return -EINVAL;
-	}
-
 	dws->regs = devm_ioremap_resource(&pdev->dev, mem);
 	if (IS_ERR(dws->regs)) {
 		dev_err(&pdev->dev, "SPI region map failed\n");
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index c688efa..e7a19be 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -56,7 +56,6 @@
 
 /* The maximum  bytes that a sdma BD can transfer.*/
 #define MAX_SDMA_BD_BYTES  (1 << 15)
-#define IMX_DMA_TIMEOUT (msecs_to_jiffies(3000))
 struct spi_imx_config {
 	unsigned int speed_hz;
 	unsigned int bpw;
@@ -86,12 +85,18 @@
 
 struct spi_imx_data {
 	struct spi_bitbang bitbang;
+	struct device *dev;
 
 	struct completion xfer_done;
 	void __iomem *base;
+	unsigned long base_phys;
+
 	struct clk *clk_per;
 	struct clk *clk_ipg;
 	unsigned long spi_clk;
+	unsigned int spi_bus_clk;
+
+	unsigned int bytes_per_word;
 
 	unsigned int count;
 	void (*tx)(struct spi_imx_data *);
@@ -101,8 +106,6 @@
 	unsigned int txfifo; /* number of words pushed in tx FIFO */
 
 	/* DMA */
-	unsigned int dma_is_inited;
-	unsigned int dma_finished;
 	bool usedma;
 	u32 wml;
 	struct completion dma_rx_completion;
@@ -199,15 +202,35 @@
 	return 7;
 }
 
+static int spi_imx_bytes_per_word(const int bpw)
+{
+	return DIV_ROUND_UP(bpw, BITS_PER_BYTE);
+}
+
 static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi,
 			 struct spi_transfer *transfer)
 {
 	struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
+	unsigned int bpw = transfer->bits_per_word;
 
-	if (spi_imx->dma_is_inited && transfer->len >= spi_imx->wml &&
-	    (transfer->len % spi_imx->wml) == 0)
-		return true;
-	return false;
+	if (!master->dma_rx)
+		return false;
+
+	if (!bpw)
+		bpw = spi->bits_per_word;
+
+	bpw = spi_imx_bytes_per_word(bpw);
+
+	if (bpw != 1 && bpw != 2 && bpw != 4)
+		return false;
+
+	if (transfer->len < spi_imx->wml * bpw)
+		return false;
+
+	if (transfer->len % (spi_imx->wml * bpw))
+		return false;
+
+	return true;
 }
 
 #define MX51_ECSPI_CTRL		0x08
@@ -232,16 +255,13 @@
 #define MX51_ECSPI_INT_RREN		(1 <<  3)
 
 #define MX51_ECSPI_DMA      0x14
-#define MX51_ECSPI_DMA_TX_WML_OFFSET	0
-#define MX51_ECSPI_DMA_TX_WML_MASK	0x3F
-#define MX51_ECSPI_DMA_RX_WML_OFFSET	16
-#define MX51_ECSPI_DMA_RX_WML_MASK	(0x3F << 16)
-#define MX51_ECSPI_DMA_RXT_WML_OFFSET	24
-#define MX51_ECSPI_DMA_RXT_WML_MASK	(0x3F << 24)
+#define MX51_ECSPI_DMA_TX_WML(wml)	((wml) & 0x3f)
+#define MX51_ECSPI_DMA_RX_WML(wml)	(((wml) & 0x3f) << 16)
+#define MX51_ECSPI_DMA_RXT_WML(wml)	(((wml) & 0x3f) << 24)
 
-#define MX51_ECSPI_DMA_TEDEN_OFFSET	7
-#define MX51_ECSPI_DMA_RXDEN_OFFSET	23
-#define MX51_ECSPI_DMA_RXTDEN_OFFSET	31
+#define MX51_ECSPI_DMA_TEDEN		(1 << 7)
+#define MX51_ECSPI_DMA_RXDEN		(1 << 23)
+#define MX51_ECSPI_DMA_RXTDEN		(1 << 31)
 
 #define MX51_ECSPI_STAT		0x18
 #define MX51_ECSPI_STAT_RR		(1 <<  3)
@@ -250,14 +270,15 @@
 #define MX51_ECSPI_TESTREG_LBC	BIT(31)
 
 /* MX51 eCSPI */
-static unsigned int mx51_ecspi_clkdiv(unsigned int fin, unsigned int fspi,
-				      unsigned int *fres)
+static unsigned int mx51_ecspi_clkdiv(struct spi_imx_data *spi_imx,
+				      unsigned int fspi, unsigned int *fres)
 {
 	/*
 	 * there are two 4-bit dividers, the pre-divider divides by
 	 * $pre, the post-divider by 2^$post
 	 */
 	unsigned int pre, post;
+	unsigned int fin = spi_imx->spi_clk;
 
 	if (unlikely(fspi > fin))
 		return 0;
@@ -270,14 +291,14 @@
 
 	post = max(4U, post) - 4;
 	if (unlikely(post > 0xf)) {
-		pr_err("%s: cannot set clock freq: %u (base freq: %u)\n",
-				__func__, fspi, fin);
+		dev_err(spi_imx->dev, "cannot set clock freq: %u (base freq: %u)\n",
+				fspi, fin);
 		return 0xff;
 	}
 
 	pre = DIV_ROUND_UP(fin, fspi << post) - 1;
 
-	pr_debug("%s: fin: %u, fspi: %u, post: %u, pre: %u\n",
+	dev_dbg(spi_imx->dev, "%s: fin: %u, fspi: %u, post: %u, pre: %u\n",
 			__func__, fin, fspi, post, pre);
 
 	/* Resulting frequency for the SCLK line. */
@@ -302,22 +323,17 @@
 
 static void __maybe_unused mx51_ecspi_trigger(struct spi_imx_data *spi_imx)
 {
-	u32 reg = readl(spi_imx->base + MX51_ECSPI_CTRL);
+	u32 reg;
 
-	if (!spi_imx->usedma)
-		reg |= MX51_ECSPI_CTRL_XCH;
-	else if (!spi_imx->dma_finished)
-		reg |= MX51_ECSPI_CTRL_SMC;
-	else
-		reg &= ~MX51_ECSPI_CTRL_SMC;
+	reg = readl(spi_imx->base + MX51_ECSPI_CTRL);
+	reg |= MX51_ECSPI_CTRL_XCH;
 	writel(reg, spi_imx->base + MX51_ECSPI_CTRL);
 }
 
 static int __maybe_unused mx51_ecspi_config(struct spi_imx_data *spi_imx,
 		struct spi_imx_config *config)
 {
-	u32 ctrl = MX51_ECSPI_CTRL_ENABLE, cfg = 0, dma = 0;
-	u32 tx_wml_cfg, rx_wml_cfg, rxt_wml_cfg;
+	u32 ctrl = MX51_ECSPI_CTRL_ENABLE, cfg = 0;
 	u32 clk = config->speed_hz, delay, reg;
 
 	/*
@@ -330,7 +346,8 @@
 	ctrl |= MX51_ECSPI_CTRL_MODE_MASK;
 
 	/* set clock speed */
-	ctrl |= mx51_ecspi_clkdiv(spi_imx->spi_clk, config->speed_hz, &clk);
+	ctrl |= mx51_ecspi_clkdiv(spi_imx, config->speed_hz, &clk);
+	spi_imx->spi_bus_clk = clk;
 
 	/* set chip select to use */
 	ctrl |= MX51_ECSPI_CTRL_CS(config->cs);
@@ -341,20 +358,16 @@
 
 	if (config->mode & SPI_CPHA)
 		cfg |= MX51_ECSPI_CONFIG_SCLKPHA(config->cs);
-	else
-		cfg &= ~MX51_ECSPI_CONFIG_SCLKPHA(config->cs);
 
 	if (config->mode & SPI_CPOL) {
 		cfg |= MX51_ECSPI_CONFIG_SCLKPOL(config->cs);
 		cfg |= MX51_ECSPI_CONFIG_SCLKCTL(config->cs);
-	} else {
-		cfg &= ~MX51_ECSPI_CONFIG_SCLKPOL(config->cs);
-		cfg &= ~MX51_ECSPI_CONFIG_SCLKCTL(config->cs);
 	}
 	if (config->mode & SPI_CS_HIGH)
 		cfg |= MX51_ECSPI_CONFIG_SSBPOL(config->cs);
-	else
-		cfg &= ~MX51_ECSPI_CONFIG_SSBPOL(config->cs);
+
+	if (spi_imx->usedma)
+		ctrl |= MX51_ECSPI_CTRL_SMC;
 
 	/* CTRL register always go first to bring out controller from reset */
 	writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
@@ -389,22 +402,12 @@
 	 * Configure the DMA register: setup the watermark
 	 * and enable DMA request.
 	 */
-	if (spi_imx->dma_is_inited) {
-		dma = readl(spi_imx->base + MX51_ECSPI_DMA);
 
-		rx_wml_cfg = spi_imx->wml << MX51_ECSPI_DMA_RX_WML_OFFSET;
-		tx_wml_cfg = spi_imx->wml << MX51_ECSPI_DMA_TX_WML_OFFSET;
-		rxt_wml_cfg = spi_imx->wml << MX51_ECSPI_DMA_RXT_WML_OFFSET;
-		dma = (dma & ~MX51_ECSPI_DMA_TX_WML_MASK
-			   & ~MX51_ECSPI_DMA_RX_WML_MASK
-			   & ~MX51_ECSPI_DMA_RXT_WML_MASK)
-			   | rx_wml_cfg | tx_wml_cfg | rxt_wml_cfg
-			   |(1 << MX51_ECSPI_DMA_TEDEN_OFFSET)
-			   |(1 << MX51_ECSPI_DMA_RXDEN_OFFSET)
-			   |(1 << MX51_ECSPI_DMA_RXTDEN_OFFSET);
-
-		writel(dma, spi_imx->base + MX51_ECSPI_DMA);
-	}
+	writel(MX51_ECSPI_DMA_RX_WML(spi_imx->wml) |
+		MX51_ECSPI_DMA_TX_WML(spi_imx->wml) |
+		MX51_ECSPI_DMA_RXT_WML(spi_imx->wml) |
+		MX51_ECSPI_DMA_TEDEN | MX51_ECSPI_DMA_RXDEN |
+		MX51_ECSPI_DMA_RXTDEN, spi_imx->base + MX51_ECSPI_DMA);
 
 	return 0;
 }
@@ -784,11 +787,63 @@
 	return IRQ_HANDLED;
 }
 
+static int spi_imx_dma_configure(struct spi_master *master,
+				 int bytes_per_word)
+{
+	int ret;
+	enum dma_slave_buswidth buswidth;
+	struct dma_slave_config rx = {}, tx = {};
+	struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
+
+	if (bytes_per_word == spi_imx->bytes_per_word)
+		/* Same as last time */
+		return 0;
+
+	switch (bytes_per_word) {
+	case 4:
+		buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
+		break;
+	case 2:
+		buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
+		break;
+	case 1:
+		buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	tx.direction = DMA_MEM_TO_DEV;
+	tx.dst_addr = spi_imx->base_phys + MXC_CSPITXDATA;
+	tx.dst_addr_width = buswidth;
+	tx.dst_maxburst = spi_imx->wml;
+	ret = dmaengine_slave_config(master->dma_tx, &tx);
+	if (ret) {
+		dev_err(spi_imx->dev, "TX dma configuration failed with %d\n", ret);
+		return ret;
+	}
+
+	rx.direction = DMA_DEV_TO_MEM;
+	rx.src_addr = spi_imx->base_phys + MXC_CSPIRXDATA;
+	rx.src_addr_width = buswidth;
+	rx.src_maxburst = spi_imx->wml;
+	ret = dmaengine_slave_config(master->dma_rx, &rx);
+	if (ret) {
+		dev_err(spi_imx->dev, "RX dma configuration failed with %d\n", ret);
+		return ret;
+	}
+
+	spi_imx->bytes_per_word = bytes_per_word;
+
+	return 0;
+}
+
 static int spi_imx_setupxfer(struct spi_device *spi,
 				 struct spi_transfer *t)
 {
 	struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
 	struct spi_imx_config config;
+	int ret;
 
 	config.bpw = t ? t->bits_per_word : spi->bits_per_word;
 	config.speed_hz  = t ? t->speed_hz : spi->max_speed_hz;
@@ -812,6 +867,18 @@
 		spi_imx->tx = spi_imx_buf_tx_u32;
 	}
 
+	if (spi_imx_can_dma(spi_imx->bitbang.master, spi, t))
+		spi_imx->usedma = 1;
+	else
+		spi_imx->usedma = 0;
+
+	if (spi_imx->usedma) {
+		ret = spi_imx_dma_configure(spi->master,
+					    spi_imx_bytes_per_word(config.bpw));
+		if (ret)
+			return ret;
+	}
+
 	spi_imx->devtype_data->config(spi_imx, &config);
 
 	return 0;
@@ -830,15 +897,11 @@
 		dma_release_channel(master->dma_tx);
 		master->dma_tx = NULL;
 	}
-
-	spi_imx->dma_is_inited = 0;
 }
 
 static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx,
-			     struct spi_master *master,
-			     const struct resource *res)
+			     struct spi_master *master)
 {
-	struct dma_slave_config slave_config = {};
 	int ret;
 
 	/* use pio mode for i.mx6dl chip TKT238285 */
@@ -856,16 +919,6 @@
 		goto err;
 	}
 
-	slave_config.direction = DMA_MEM_TO_DEV;
-	slave_config.dst_addr = res->start + MXC_CSPITXDATA;
-	slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
-	slave_config.dst_maxburst = spi_imx->wml;
-	ret = dmaengine_slave_config(master->dma_tx, &slave_config);
-	if (ret) {
-		dev_err(dev, "error in TX dma configuration.\n");
-		goto err;
-	}
-
 	/* Prepare for RX : */
 	master->dma_rx = dma_request_slave_channel_reason(dev, "rx");
 	if (IS_ERR(master->dma_rx)) {
@@ -875,15 +928,7 @@
 		goto err;
 	}
 
-	slave_config.direction = DMA_DEV_TO_MEM;
-	slave_config.src_addr = res->start + MXC_CSPIRXDATA;
-	slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
-	slave_config.src_maxburst = spi_imx->wml;
-	ret = dmaengine_slave_config(master->dma_rx, &slave_config);
-	if (ret) {
-		dev_err(dev, "error in RX dma configuration.\n");
-		goto err;
-	}
+	spi_imx_dma_configure(master, 1);
 
 	init_completion(&spi_imx->dma_rx_completion);
 	init_completion(&spi_imx->dma_tx_completion);
@@ -891,7 +936,6 @@
 	master->max_dma_len = MAX_SDMA_BD_BYTES;
 	spi_imx->bitbang.master->flags = SPI_MASTER_MUST_RX |
 					 SPI_MASTER_MUST_TX;
-	spi_imx->dma_is_inited = 1;
 
 	return 0;
 err:
@@ -913,95 +957,81 @@
 	complete(&spi_imx->dma_tx_completion);
 }
 
+static int spi_imx_calculate_timeout(struct spi_imx_data *spi_imx, int size)
+{
+	unsigned long timeout = 0;
+
+	/* Time with actual data transfer and CS change delay related to HW */
+	timeout = (8 + 4) * size / spi_imx->spi_bus_clk;
+
+	/* Add extra second for scheduler related activities */
+	timeout += 1;
+
+	/* Double calculated timeout */
+	return msecs_to_jiffies(2 * timeout * MSEC_PER_SEC);
+}
+
 static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
 				struct spi_transfer *transfer)
 {
-	struct dma_async_tx_descriptor *desc_tx = NULL, *desc_rx = NULL;
-	int ret;
+	struct dma_async_tx_descriptor *desc_tx, *desc_rx;
+	unsigned long transfer_timeout;
 	unsigned long timeout;
 	struct spi_master *master = spi_imx->bitbang.master;
 	struct sg_table *tx = &transfer->tx_sg, *rx = &transfer->rx_sg;
 
-	if (tx) {
-		desc_tx = dmaengine_prep_slave_sg(master->dma_tx,
-					tx->sgl, tx->nents, DMA_MEM_TO_DEV,
-					DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
-		if (!desc_tx)
-			goto tx_nodma;
-
-		desc_tx->callback = spi_imx_dma_tx_callback;
-		desc_tx->callback_param = (void *)spi_imx;
-		dmaengine_submit(desc_tx);
-	}
-
-	if (rx) {
-		desc_rx = dmaengine_prep_slave_sg(master->dma_rx,
-					rx->sgl, rx->nents, DMA_DEV_TO_MEM,
-					DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
-		if (!desc_rx)
-			goto rx_nodma;
-
-		desc_rx->callback = spi_imx_dma_rx_callback;
-		desc_rx->callback_param = (void *)spi_imx;
-		dmaengine_submit(desc_rx);
-	}
-
-	reinit_completion(&spi_imx->dma_rx_completion);
-	reinit_completion(&spi_imx->dma_tx_completion);
-
-	/* Trigger the cspi module. */
-	spi_imx->dma_finished = 0;
-
 	/*
-	 * Set these order to avoid potential RX overflow. The overflow may
-	 * happen if we enable SPI HW before starting RX DMA due to rescheduling
-	 * for another task and/or interrupt.
-	 * So RX DMA enabled first to make sure data would be read out from FIFO
-	 * ASAP. TX DMA enabled next to start filling TX FIFO with new data.
-	 * And finaly SPI HW enabled to start actual data transfer.
+	 * The TX DMA setup starts the transfer, so make sure RX is configured
+	 * before TX.
 	 */
+	desc_rx = dmaengine_prep_slave_sg(master->dma_rx,
+				rx->sgl, rx->nents, DMA_DEV_TO_MEM,
+				DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+	if (!desc_rx)
+		return -EINVAL;
+
+	desc_rx->callback = spi_imx_dma_rx_callback;
+	desc_rx->callback_param = (void *)spi_imx;
+	dmaengine_submit(desc_rx);
+	reinit_completion(&spi_imx->dma_rx_completion);
 	dma_async_issue_pending(master->dma_rx);
+
+	desc_tx = dmaengine_prep_slave_sg(master->dma_tx,
+				tx->sgl, tx->nents, DMA_MEM_TO_DEV,
+				DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+	if (!desc_tx) {
+		dmaengine_terminate_all(master->dma_tx);
+		return -EINVAL;
+	}
+
+	desc_tx->callback = spi_imx_dma_tx_callback;
+	desc_tx->callback_param = (void *)spi_imx;
+	dmaengine_submit(desc_tx);
+	reinit_completion(&spi_imx->dma_tx_completion);
 	dma_async_issue_pending(master->dma_tx);
-	spi_imx->devtype_data->trigger(spi_imx);
+
+	transfer_timeout = spi_imx_calculate_timeout(spi_imx, transfer->len);
 
 	/* Wait SDMA to finish the data transfer.*/
 	timeout = wait_for_completion_timeout(&spi_imx->dma_tx_completion,
-						IMX_DMA_TIMEOUT);
+						transfer_timeout);
 	if (!timeout) {
-		pr_warn("%s %s: I/O Error in DMA TX\n",
-			dev_driver_string(&master->dev),
-			dev_name(&master->dev));
+		dev_err(spi_imx->dev, "I/O Error in DMA TX\n");
 		dmaengine_terminate_all(master->dma_tx);
 		dmaengine_terminate_all(master->dma_rx);
-	} else {
-		timeout = wait_for_completion_timeout(
-				&spi_imx->dma_rx_completion, IMX_DMA_TIMEOUT);
-		if (!timeout) {
-			pr_warn("%s %s: I/O Error in DMA RX\n",
-				dev_driver_string(&master->dev),
-				dev_name(&master->dev));
-			spi_imx->devtype_data->reset(spi_imx);
-			dmaengine_terminate_all(master->dma_rx);
-		}
+		return -ETIMEDOUT;
 	}
 
-	spi_imx->dma_finished = 1;
-	spi_imx->devtype_data->trigger(spi_imx);
+	timeout = wait_for_completion_timeout(&spi_imx->dma_rx_completion,
+					      transfer_timeout);
+	if (!timeout) {
+		dev_err(&master->dev, "I/O Error in DMA RX\n");
+		spi_imx->devtype_data->reset(spi_imx);
+		dmaengine_terminate_all(master->dma_rx);
+		return -ETIMEDOUT;
+	}
 
-	if (!timeout)
-		ret = -ETIMEDOUT;
-	else
-		ret = transfer->len;
-
-	return ret;
-
-rx_nodma:
-	dmaengine_terminate_all(master->dma_tx);
-tx_nodma:
-	pr_warn_once("%s %s: DMA not available, falling back to PIO\n",
-		     dev_driver_string(&master->dev),
-		     dev_name(&master->dev));
-	return -EAGAIN;
+	return transfer->len;
 }
 
 static int spi_imx_pio_transfer(struct spi_device *spi,
@@ -1028,19 +1058,12 @@
 static int spi_imx_transfer(struct spi_device *spi,
 				struct spi_transfer *transfer)
 {
-	int ret;
 	struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
 
-	if (spi_imx->bitbang.master->can_dma &&
-	    spi_imx_can_dma(spi_imx->bitbang.master, spi, transfer)) {
-		spi_imx->usedma = true;
-		ret = spi_imx_dma_transfer(spi_imx, transfer);
-		if (ret != -EAGAIN)
-			return ret;
-	}
-	spi_imx->usedma = false;
-
-	return spi_imx_pio_transfer(spi, transfer);
+	if (spi_imx->usedma)
+		return spi_imx_dma_transfer(spi_imx, transfer);
+	else
+		return spi_imx_pio_transfer(spi, transfer);
 }
 
 static int spi_imx_setup(struct spi_device *spi)
@@ -1130,6 +1153,7 @@
 
 	spi_imx = spi_master_get_devdata(master);
 	spi_imx->bitbang.master = master;
+	spi_imx->dev = &pdev->dev;
 
 	spi_imx->devtype_data = of_id ? of_id->data :
 		(struct spi_imx_devtype_data *)pdev->id_entry->driver_data;
@@ -1170,6 +1194,7 @@
 		ret = PTR_ERR(spi_imx->base);
 		goto out_master_put;
 	}
+	spi_imx->base_phys = res->start;
 
 	irq = platform_get_irq(pdev, 0);
 	if (irq < 0) {
@@ -1210,7 +1235,7 @@
 	 * other chips.
 	 */
 	if (is_imx51_ecspi(spi_imx)) {
-		ret = spi_imx_sdma_init(&pdev->dev, spi_imx, master, res);
+		ret = spi_imx_sdma_init(&pdev->dev, spi_imx, master);
 		if (ret == -EPROBE_DEFER)
 			goto out_clk_put;
 
diff --git a/drivers/spi/spi-lp8841-rtc.c b/drivers/spi/spi-lp8841-rtc.c
new file mode 100644
index 0000000..faa577d
--- /dev/null
+++ b/drivers/spi/spi-lp8841-rtc.c
@@ -0,0 +1,256 @@
+/*
+ * SPI master driver for ICP DAS LP-8841 RTC
+ *
+ * Copyright (C) 2016 Sergei Ianovich
+ *
+ * based on
+ *
+ * Dallas DS1302 RTC Support
+ * Copyright (C) 2002 David McCullough
+ * Copyright (C) 2003 - 2007 Paul Mundt
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/spi/spi.h>
+
+#define DRIVER_NAME	"spi_lp8841_rtc"
+
+#define SPI_LP8841_RTC_CE	0x01
+#define SPI_LP8841_RTC_CLK	0x02
+#define SPI_LP8841_RTC_nWE	0x04
+#define SPI_LP8841_RTC_MOSI	0x08
+#define SPI_LP8841_RTC_MISO	0x01
+
+/*
+ * REVISIT If there is support for SPI_3WIRE and SPI_LSB_FIRST in SPI
+ * GPIO driver, this SPI driver can be replaced by a simple GPIO driver
+ * providing 3 GPIO pins.
+ */
+
+struct spi_lp8841_rtc {
+	void		*iomem;
+	unsigned long	state;
+};
+
+static inline void
+setsck(struct spi_lp8841_rtc *data, int is_on)
+{
+	if (is_on)
+		data->state |= SPI_LP8841_RTC_CLK;
+	else
+		data->state &= ~SPI_LP8841_RTC_CLK;
+	writeb(data->state, data->iomem);
+}
+
+static inline void
+setmosi(struct spi_lp8841_rtc *data, int is_on)
+{
+	if (is_on)
+		data->state |= SPI_LP8841_RTC_MOSI;
+	else
+		data->state &= ~SPI_LP8841_RTC_MOSI;
+	writeb(data->state, data->iomem);
+}
+
+static inline int
+getmiso(struct spi_lp8841_rtc *data)
+{
+	return ioread8(data->iomem) & SPI_LP8841_RTC_MISO;
+}
+
+static inline u32
+bitbang_txrx_be_cpha0_lsb(struct spi_lp8841_rtc *data,
+		unsigned usecs, unsigned cpol, unsigned flags,
+		u32 word, u8 bits)
+{
+	/* if (cpol == 0) this is SPI_MODE_0; else this is SPI_MODE_2 */
+
+	u32 shift = 32 - bits;
+	/* clock starts at inactive polarity */
+	for (; likely(bits); bits--) {
+
+		/* setup LSB (to slave) on leading edge */
+		if ((flags & SPI_MASTER_NO_TX) == 0)
+			setmosi(data, (word & 1));
+
+		usleep_range(usecs, usecs + 1);	/* T(setup) */
+
+		/* sample LSB (from slave) on trailing edge */
+		word >>= 1;
+		if ((flags & SPI_MASTER_NO_RX) == 0)
+			word |= (getmiso(data) << 31);
+
+		setsck(data, !cpol);
+		usleep_range(usecs, usecs + 1);
+
+		setsck(data, cpol);
+	}
+
+	word >>= shift;
+	return word;
+}
+
+static int
+spi_lp8841_rtc_transfer_one(struct spi_master *master,
+			    struct spi_device *spi,
+			    struct spi_transfer *t)
+{
+	struct spi_lp8841_rtc	*data = spi_master_get_devdata(master);
+	unsigned		count = t->len;
+	const u8		*tx = t->tx_buf;
+	u8			*rx = t->rx_buf;
+	u8			word = 0;
+	int			ret = 0;
+
+	if (tx) {
+		data->state &= ~SPI_LP8841_RTC_nWE;
+		writeb(data->state, data->iomem);
+		while (likely(count > 0)) {
+			word = *tx++;
+			bitbang_txrx_be_cpha0_lsb(data, 1, 0,
+					SPI_MASTER_NO_RX, word, 8);
+			count--;
+		}
+	} else if (rx) {
+		data->state |= SPI_LP8841_RTC_nWE;
+		writeb(data->state, data->iomem);
+		while (likely(count > 0)) {
+			word = bitbang_txrx_be_cpha0_lsb(data, 1, 0,
+					SPI_MASTER_NO_TX, word, 8);
+			*rx++ = word;
+			count--;
+		}
+	} else {
+		ret = -EINVAL;
+	}
+
+	spi_finalize_current_transfer(master);
+
+	return ret;
+}
+
+static void
+spi_lp8841_rtc_set_cs(struct spi_device *spi, bool enable)
+{
+	struct spi_lp8841_rtc *data = spi_master_get_devdata(spi->master);
+
+	data->state = 0;
+	writeb(data->state, data->iomem);
+	if (enable) {
+		usleep_range(4, 5);
+		data->state |= SPI_LP8841_RTC_CE;
+		writeb(data->state, data->iomem);
+		usleep_range(4, 5);
+	}
+}
+
+static int
+spi_lp8841_rtc_setup(struct spi_device *spi)
+{
+	if ((spi->mode & SPI_CS_HIGH) == 0) {
+		dev_err(&spi->dev, "unsupported active low chip select\n");
+		return -EINVAL;
+	}
+
+	if ((spi->mode & SPI_LSB_FIRST) == 0) {
+		dev_err(&spi->dev, "unsupported MSB first mode\n");
+		return -EINVAL;
+	}
+
+	if ((spi->mode & SPI_3WIRE) == 0) {
+		dev_err(&spi->dev, "unsupported wiring. 3 wires required\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id spi_lp8841_rtc_dt_ids[] = {
+	{ .compatible = "icpdas,lp8841-spi-rtc" },
+	{ }
+};
+
+MODULE_DEVICE_TABLE(of, spi_lp8841_rtc_dt_ids);
+#endif
+
+static int
+spi_lp8841_rtc_probe(struct platform_device *pdev)
+{
+	int				ret;
+	struct spi_master		*master;
+	struct spi_lp8841_rtc		*data;
+	void				*iomem;
+
+	master = spi_alloc_master(&pdev->dev, sizeof(*data));
+	if (!master)
+		return -ENOMEM;
+	platform_set_drvdata(pdev, master);
+
+	master->flags = SPI_MASTER_HALF_DUPLEX;
+	master->mode_bits = SPI_CS_HIGH | SPI_3WIRE | SPI_LSB_FIRST;
+
+	master->bus_num = pdev->id;
+	master->num_chipselect = 1;
+	master->setup = spi_lp8841_rtc_setup;
+	master->set_cs = spi_lp8841_rtc_set_cs;
+	master->transfer_one = spi_lp8841_rtc_transfer_one;
+	master->bits_per_word_mask = SPI_BPW_MASK(8);
+#ifdef CONFIG_OF
+	master->dev.of_node = pdev->dev.of_node;
+#endif
+
+	data = spi_master_get_devdata(master);
+
+	iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	data->iomem = devm_ioremap_resource(&pdev->dev, iomem);
+	ret = PTR_ERR_OR_ZERO(data->iomem);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to get IO address\n");
+		goto err_put_master;
+	}
+
+	/* register with the SPI framework */
+	ret = devm_spi_register_master(&pdev->dev, master);
+	if (ret) {
+		dev_err(&pdev->dev, "cannot register spi master\n");
+		goto err_put_master;
+	}
+
+	return ret;
+
+
+err_put_master:
+	spi_master_put(master);
+
+	return ret;
+}
+
+MODULE_ALIAS("platform:" DRIVER_NAME);
+
+static struct platform_driver spi_lp8841_rtc_driver = {
+	.driver = {
+		.name	= DRIVER_NAME,
+		.of_match_table = of_match_ptr(spi_lp8841_rtc_dt_ids),
+	},
+	.probe		= spi_lp8841_rtc_probe,
+};
+module_platform_driver(spi_lp8841_rtc_driver);
+
+MODULE_DESCRIPTION("SPI master driver for ICP DAS LP-8841 RTC");
+MODULE_AUTHOR("Sergei Ianovich");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
index 5e5fd77..f7f7ba1 100644
--- a/drivers/spi/spi-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -346,13 +346,6 @@
  * @clk: outgoing clock "SPICLK" for the SPI bus
  * @master: SPI framework hookup
  * @master_info: controller-specific data from machine setup
- * @kworker: thread struct for message pump
- * @kworker_task: pointer to task for message pump kworker thread
- * @pump_messages: work struct for scheduling work to the message pump
- * @queue_lock: spinlock to syncronise access to message queue
- * @queue: message queue
- * @busy: message pump is busy
- * @running: message pump is running
  * @pump_transfers: Tasklet used in Interrupt Transfer mode
  * @cur_msg: Pointer to current spi_message being processed
  * @cur_transfer: Pointer to current spi_transfer
diff --git a/drivers/spi/spi-pxa2xx-dma.c b/drivers/spi/spi-pxa2xx-dma.c
index bd8b369..365fc22 100644
--- a/drivers/spi/spi-pxa2xx-dma.c
+++ b/drivers/spi/spi-pxa2xx-dma.c
@@ -254,8 +254,8 @@
 	if (status & SSSR_ROR) {
 		dev_err(&drv_data->pdev->dev, "FIFO overrun\n");
 
-		dmaengine_terminate_all(drv_data->rx_chan);
-		dmaengine_terminate_all(drv_data->tx_chan);
+		dmaengine_terminate_async(drv_data->rx_chan);
+		dmaengine_terminate_async(drv_data->tx_chan);
 
 		pxa2xx_spi_dma_transfer_complete(drv_data, true);
 		return IRQ_HANDLED;
@@ -331,13 +331,13 @@
 void pxa2xx_spi_dma_release(struct driver_data *drv_data)
 {
 	if (drv_data->rx_chan) {
-		dmaengine_terminate_all(drv_data->rx_chan);
+		dmaengine_terminate_sync(drv_data->rx_chan);
 		dma_release_channel(drv_data->rx_chan);
 		sg_free_table(&drv_data->rx_sgt);
 		drv_data->rx_chan = NULL;
 	}
 	if (drv_data->tx_chan) {
-		dmaengine_terminate_all(drv_data->tx_chan);
+		dmaengine_terminate_sync(drv_data->tx_chan);
 		dma_release_channel(drv_data->tx_chan);
 		sg_free_table(&drv_data->tx_sgt);
 		drv_data->tx_chan = NULL;
diff --git a/drivers/spi/spi-pxa2xx-pci.c b/drivers/spi/spi-pxa2xx-pci.c
index d19d7f2..520ed1d 100644
--- a/drivers/spi/spi-pxa2xx-pci.c
+++ b/drivers/spi/spi-pxa2xx-pci.c
@@ -19,6 +19,7 @@
 	PORT_BSW1,
 	PORT_BSW2,
 	PORT_QUARK_X1000,
+	PORT_LPT,
 };
 
 struct pxa_spi_info {
@@ -42,6 +43,9 @@
 static struct dw_dma_slave bsw2_tx_param = { .dst_id = 8 };
 static struct dw_dma_slave bsw2_rx_param = { .src_id = 9 };
 
+static struct dw_dma_slave lpt_tx_param = { .dst_id = 0 };
+static struct dw_dma_slave lpt_rx_param = { .src_id = 1 };
+
 static bool lpss_dma_filter(struct dma_chan *chan, void *param)
 {
 	struct dw_dma_slave *dws = param;
@@ -98,6 +102,14 @@
 		.num_chipselect = 1,
 		.max_clk_rate = 50000000,
 	},
+	[PORT_LPT] = {
+		.type = LPSS_LPT_SSP,
+		.port_id = 0,
+		.num_chipselect = 1,
+		.max_clk_rate = 50000000,
+		.tx_param = &lpt_tx_param,
+		.rx_param = &lpt_rx_param,
+	},
 };
 
 static int pxa2xx_spi_pci_probe(struct pci_dev *dev,
@@ -202,6 +214,7 @@
 	{ PCI_VDEVICE(INTEL, 0x228e), PORT_BSW0 },
 	{ PCI_VDEVICE(INTEL, 0x2290), PORT_BSW1 },
 	{ PCI_VDEVICE(INTEL, 0x22ac), PORT_BSW2 },
+	{ PCI_VDEVICE(INTEL, 0x9ce6), PORT_LPT },
 	{ },
 };
 MODULE_DEVICE_TABLE(pci, pxa2xx_spi_pci_devices);
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index ab9914a..85e59a4 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -65,8 +65,6 @@
 #define LPSS_GENERAL_REG_RXTO_HOLDOFF_DISABLE	BIT(24)
 #define LPSS_CS_CONTROL_SW_MODE			BIT(0)
 #define LPSS_CS_CONTROL_CS_HIGH			BIT(1)
-#define LPSS_CS_CONTROL_CS_SEL_SHIFT		8
-#define LPSS_CS_CONTROL_CS_SEL_MASK		(3 << LPSS_CS_CONTROL_CS_SEL_SHIFT)
 #define LPSS_CAPS_CS_EN_SHIFT			9
 #define LPSS_CAPS_CS_EN_MASK			(0xf << LPSS_CAPS_CS_EN_SHIFT)
 
@@ -82,6 +80,10 @@
 	u32 rx_threshold;
 	u32 tx_threshold_lo;
 	u32 tx_threshold_hi;
+	/* Chip select control */
+	unsigned cs_sel_shift;
+	unsigned cs_sel_mask;
+	unsigned cs_num;
 };
 
 /* Keep these sorted with enum pxa_ssp_type */
@@ -106,6 +108,19 @@
 		.tx_threshold_lo = 160,
 		.tx_threshold_hi = 224,
 	},
+	{	/* LPSS_BSW_SSP */
+		.offset = 0x400,
+		.reg_general = 0x08,
+		.reg_ssp = 0x0c,
+		.reg_cs_ctrl = 0x18,
+		.reg_capabilities = -1,
+		.rx_threshold = 64,
+		.tx_threshold_lo = 160,
+		.tx_threshold_hi = 224,
+		.cs_sel_shift = 2,
+		.cs_sel_mask = 1 << 2,
+		.cs_num = 2,
+	},
 	{	/* LPSS_SPT_SSP */
 		.offset = 0x200,
 		.reg_general = -1,
@@ -125,6 +140,8 @@
 		.rx_threshold = 1,
 		.tx_threshold_lo = 16,
 		.tx_threshold_hi = 48,
+		.cs_sel_shift = 8,
+		.cs_sel_mask = 3 << 8,
 	},
 };
 
@@ -139,6 +156,7 @@
 	switch (drv_data->ssp_type) {
 	case LPSS_LPT_SSP:
 	case LPSS_BYT_SSP:
+	case LPSS_BSW_SSP:
 	case LPSS_SPT_SSP:
 	case LPSS_BXT_SSP:
 		return true;
@@ -288,37 +306,50 @@
 	}
 }
 
+static void lpss_ssp_select_cs(struct driver_data *drv_data,
+			       const struct lpss_config *config)
+{
+	u32 value, cs;
+
+	if (!config->cs_sel_mask)
+		return;
+
+	value = __lpss_ssp_read_priv(drv_data, config->reg_cs_ctrl);
+
+	cs = drv_data->cur_msg->spi->chip_select;
+	cs <<= config->cs_sel_shift;
+	if (cs != (value & config->cs_sel_mask)) {
+		/*
+		 * When switching another chip select output active the
+		 * output must be selected first and wait 2 ssp_clk cycles
+		 * before changing state to active. Otherwise a short
+		 * glitch will occur on the previous chip select since
+		 * output select is latched but state control is not.
+		 */
+		value &= ~config->cs_sel_mask;
+		value |= cs;
+		__lpss_ssp_write_priv(drv_data,
+				      config->reg_cs_ctrl, value);
+		ndelay(1000000000 /
+		       (drv_data->master->max_speed_hz / 2));
+	}
+}
+
 static void lpss_ssp_cs_control(struct driver_data *drv_data, bool enable)
 {
 	const struct lpss_config *config;
-	u32 value, cs;
+	u32 value;
 
 	config = lpss_get_config(drv_data);
 
+	if (enable)
+		lpss_ssp_select_cs(drv_data, config);
+
 	value = __lpss_ssp_read_priv(drv_data, config->reg_cs_ctrl);
-	if (enable) {
-		cs = drv_data->cur_msg->spi->chip_select;
-		cs <<= LPSS_CS_CONTROL_CS_SEL_SHIFT;
-		if (cs != (value & LPSS_CS_CONTROL_CS_SEL_MASK)) {
-			/*
-			 * When switching another chip select output active
-			 * the output must be selected first and wait 2 ssp_clk
-			 * cycles before changing state to active. Otherwise
-			 * a short glitch will occur on the previous chip
-			 * select since output select is latched but state
-			 * control is not.
-			 */
-			value &= ~LPSS_CS_CONTROL_CS_SEL_MASK;
-			value |= cs;
-			__lpss_ssp_write_priv(drv_data,
-					      config->reg_cs_ctrl, value);
-			ndelay(1000000000 /
-			       (drv_data->master->max_speed_hz / 2));
-		}
+	if (enable)
 		value &= ~LPSS_CS_CONTROL_CS_HIGH;
-	} else {
+	else
 		value |= LPSS_CS_CONTROL_CS_HIGH;
-	}
 	__lpss_ssp_write_priv(drv_data, config->reg_cs_ctrl, value);
 }
 
@@ -496,6 +527,7 @@
 {
 	struct spi_transfer* last_transfer;
 	struct spi_message *msg;
+	unsigned long timeout;
 
 	msg = drv_data->cur_msg;
 	drv_data->cur_msg = NULL;
@@ -508,6 +540,12 @@
 	if (last_transfer->delay_usecs)
 		udelay(last_transfer->delay_usecs);
 
+	/* Wait until SSP becomes idle before deasserting the CS */
+	timeout = jiffies + msecs_to_jiffies(10);
+	while (pxa2xx_spi_read(drv_data, SSSR) & SSSR_BSY &&
+	       !time_after(jiffies, timeout))
+		cpu_relax();
+
 	/* Drop chip select UNLESS cs_change is true or we are returning
 	 * a message with an error, or next message is for another chip
 	 */
@@ -572,7 +610,7 @@
 
 static void int_transfer_complete(struct driver_data *drv_data)
 {
-	/* Stop SSP */
+	/* Clear and disable interrupts */
 	write_SSSR_CS(drv_data, drv_data->clear_sr);
 	reset_sccr1(drv_data);
 	if (!pxa25x_ssp_comp(drv_data))
@@ -957,8 +995,6 @@
 	drv_data->tx_end = drv_data->tx + transfer->len;
 	drv_data->rx = transfer->rx_buf;
 	drv_data->rx_end = drv_data->rx + transfer->len;
-	drv_data->rx_dma = transfer->rx_dma;
-	drv_data->tx_dma = transfer->tx_dma;
 	drv_data->len = transfer->len;
 	drv_data->write = drv_data->tx ? chip->write : null_writer;
 	drv_data->read = drv_data->rx ? chip->read : null_reader;
@@ -1001,19 +1037,6 @@
 					     "pump_transfers: DMA burst size reduced to match bits_per_word\n");
 	}
 
-	/* NOTE:  PXA25x_SSP _could_ use external clocking ... */
-	cr0 = pxa2xx_configure_sscr0(drv_data, clk_div, bits);
-	if (!pxa25x_ssp_comp(drv_data))
-		dev_dbg(&message->spi->dev, "%u Hz actual, %s\n",
-			drv_data->master->max_speed_hz
-				/ (1 + ((cr0 & SSCR0_SCR(0xfff)) >> 8)),
-			chip->enable_dma ? "DMA" : "PIO");
-	else
-		dev_dbg(&message->spi->dev, "%u Hz actual, %s\n",
-			drv_data->master->max_speed_hz / 2
-				/ (1 + ((cr0 & SSCR0_SCR(0x0ff)) >> 8)),
-			chip->enable_dma ? "DMA" : "PIO");
-
 	message->state = RUNNING_STATE;
 
 	drv_data->dma_mapped = 0;
@@ -1040,6 +1063,19 @@
 		write_SSSR_CS(drv_data, drv_data->clear_sr);
 	}
 
+	/* NOTE:  PXA25x_SSP _could_ use external clocking ... */
+	cr0 = pxa2xx_configure_sscr0(drv_data, clk_div, bits);
+	if (!pxa25x_ssp_comp(drv_data))
+		dev_dbg(&message->spi->dev, "%u Hz actual, %s\n",
+			drv_data->master->max_speed_hz
+				/ (1 + ((cr0 & SSCR0_SCR(0xfff)) >> 8)),
+			drv_data->dma_mapped ? "DMA" : "PIO");
+	else
+		dev_dbg(&message->spi->dev, "%u Hz actual, %s\n",
+			drv_data->master->max_speed_hz / 2
+				/ (1 + ((cr0 & SSCR0_SCR(0x0ff)) >> 8)),
+			drv_data->dma_mapped ? "DMA" : "PIO");
+
 	if (is_lpss_ssp(drv_data)) {
 		if ((pxa2xx_spi_read(drv_data, SSIRF) & 0xff)
 		    != chip->lpss_rx_threshold)
@@ -1166,6 +1202,7 @@
 		break;
 	case LPSS_LPT_SSP:
 	case LPSS_BYT_SSP:
+	case LPSS_BSW_SSP:
 	case LPSS_SPT_SSP:
 	case LPSS_BXT_SSP:
 		config = lpss_get_config(drv_data);
@@ -1313,7 +1350,7 @@
 	{ "INT3430", LPSS_LPT_SSP },
 	{ "INT3431", LPSS_LPT_SSP },
 	{ "80860F0E", LPSS_BYT_SSP },
-	{ "8086228E", LPSS_BYT_SSP },
+	{ "8086228E", LPSS_BSW_SSP },
 	{ },
 };
 MODULE_DEVICE_TABLE(acpi, pxa2xx_spi_acpi_match);
@@ -1347,10 +1384,14 @@
 	/* SPT-H */
 	{ PCI_VDEVICE(INTEL, 0xa129), LPSS_SPT_SSP },
 	{ PCI_VDEVICE(INTEL, 0xa12a), LPSS_SPT_SSP },
-	/* BXT */
+	/* BXT A-Step */
 	{ PCI_VDEVICE(INTEL, 0x0ac2), LPSS_BXT_SSP },
 	{ PCI_VDEVICE(INTEL, 0x0ac4), LPSS_BXT_SSP },
 	{ PCI_VDEVICE(INTEL, 0x0ac6), LPSS_BXT_SSP },
+	/* BXT B-Step */
+	{ PCI_VDEVICE(INTEL, 0x1ac2), LPSS_BXT_SSP },
+	{ PCI_VDEVICE(INTEL, 0x1ac4), LPSS_BXT_SSP },
+	{ PCI_VDEVICE(INTEL, 0x1ac6), LPSS_BXT_SSP },
 	/* APL */
 	{ PCI_VDEVICE(INTEL, 0x5ac2), LPSS_BXT_SSP },
 	{ PCI_VDEVICE(INTEL, 0x5ac4), LPSS_BXT_SSP },
@@ -1438,6 +1479,29 @@
 }
 #endif
 
+static int pxa2xx_spi_fw_translate_cs(struct spi_master *master, unsigned cs)
+{
+	struct driver_data *drv_data = spi_master_get_devdata(master);
+
+	if (has_acpi_companion(&drv_data->pdev->dev)) {
+		switch (drv_data->ssp_type) {
+		/*
+		 * For Atoms the ACPI DeviceSelection used by the Windows
+		 * driver starts from 1 instead of 0 so translate it here
+		 * to match what Linux expects.
+		 */
+		case LPSS_BYT_SSP:
+		case LPSS_BSW_SSP:
+			return cs - 1;
+
+		default:
+			break;
+		}
+	}
+
+	return cs;
+}
+
 static int pxa2xx_spi_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
@@ -1490,6 +1554,7 @@
 	master->setup = setup;
 	master->transfer_one_message = pxa2xx_spi_transfer_one_message;
 	master->unprepare_transfer_hardware = pxa2xx_spi_unprepare_transfer;
+	master->fw_translate_cs = pxa2xx_spi_fw_translate_cs;
 	master->auto_runtime_pm = true;
 
 	drv_data->ssp_type = ssp->type;
@@ -1576,6 +1641,8 @@
 			tmp &= LPSS_CAPS_CS_EN_MASK;
 			tmp >>= LPSS_CAPS_CS_EN_SHIFT;
 			platform_info->num_chipselect = ffz(tmp);
+		} else if (config->cs_num) {
+			platform_info->num_chipselect = config->cs_num;
 		}
 	}
 	master->num_chipselect = platform_info->num_chipselect;
diff --git a/drivers/spi/spi-pxa2xx.h b/drivers/spi/spi-pxa2xx.h
index 58efa98..a1ef889 100644
--- a/drivers/spi/spi-pxa2xx.h
+++ b/drivers/spi/spi-pxa2xx.h
@@ -69,8 +69,6 @@
 	void *rx;
 	void *rx_end;
 	int dma_mapped;
-	dma_addr_t rx_dma;
-	dma_addr_t tx_dma;
 	size_t rx_map_len;
 	size_t tx_map_len;
 	u8 n_bytes;
@@ -147,20 +145,9 @@
 extern int pxa2xx_spi_flush(struct driver_data *drv_data);
 extern void *pxa2xx_spi_next_transfer(struct driver_data *drv_data);
 
-/*
- * Select the right DMA implementation.
- */
-#if defined(CONFIG_SPI_PXA2XX_DMA)
-#define SPI_PXA2XX_USE_DMA	1
 #define MAX_DMA_LEN		SZ_64K
 #define DEFAULT_DMA_CR1		(SSCR1_TSRE | SSCR1_RSRE | SSCR1_TRAIL)
-#else
-#undef SPI_PXA2XX_USE_DMA
-#define MAX_DMA_LEN		0
-#define DEFAULT_DMA_CR1		0
-#endif
 
-#ifdef SPI_PXA2XX_USE_DMA
 extern bool pxa2xx_spi_dma_is_possible(size_t len);
 extern int pxa2xx_spi_map_dma_buffers(struct driver_data *drv_data);
 extern irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data);
@@ -173,29 +160,5 @@
 						  u8 bits_per_word,
 						  u32 *burst_code,
 						  u32 *threshold);
-#else
-static inline bool pxa2xx_spi_dma_is_possible(size_t len) { return false; }
-static inline int pxa2xx_spi_map_dma_buffers(struct driver_data *drv_data)
-{
-	return 0;
-}
-#define pxa2xx_spi_dma_transfer NULL
-static inline void pxa2xx_spi_dma_prepare(struct driver_data *drv_data,
-					  u32 dma_burst) {}
-static inline void pxa2xx_spi_dma_start(struct driver_data *drv_data) {}
-static inline int pxa2xx_spi_dma_setup(struct driver_data *drv_data)
-{
-	return 0;
-}
-static inline void pxa2xx_spi_dma_release(struct driver_data *drv_data) {}
-static inline int pxa2xx_spi_set_dma_burst_and_threshold(struct chip_data *chip,
-							 struct spi_device *spi,
-							 u8 bits_per_word,
-							 u32 *burst_code,
-							 u32 *threshold)
-{
-	return -ENODEV;
-}
-#endif
 
 #endif /* SPI_PXA2XX_H */
diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
index 7cb1b2d..26e2688 100644
--- a/drivers/spi/spi-rockchip.c
+++ b/drivers/spi/spi-rockchip.c
@@ -13,20 +13,14 @@
  *
  */
 
-#include <linux/init.h>
-#include <linux/module.h>
 #include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/spi/spi.h>
-#include <linux/scatterlist.h>
-#include <linux/of.h>
-#include <linux/pm_runtime.h>
-#include <linux/io.h>
 #include <linux/dmaengine.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/pm_runtime.h>
+#include <linux/scatterlist.h>
 
 #define DRIVER_NAME "rockchip-spi"
 
@@ -179,7 +173,7 @@
 	u8 tmode;
 	u8 bpw;
 	u8 n_bytes;
-	u8 rsd_nsecs;
+	u32 rsd_nsecs;
 	unsigned len;
 	u32 speed;
 
@@ -192,8 +186,6 @@
 	/* protect state */
 	spinlock_t lock;
 
-	struct completion xfer_completion;
-
 	u32 use_dma;
 	struct sg_table tx_sg;
 	struct sg_table rx_sg;
@@ -265,7 +257,10 @@
 static void rockchip_spi_set_cs(struct spi_device *spi, bool enable)
 {
 	u32 ser;
-	struct rockchip_spi *rs = spi_master_get_devdata(spi->master);
+	struct spi_master *master = spi->master;
+	struct rockchip_spi *rs = spi_master_get_devdata(master);
+
+	pm_runtime_get_sync(rs->dev);
 
 	ser = readl_relaxed(rs->regs + ROCKCHIP_SPI_SER) & SER_MASK;
 
@@ -290,6 +285,8 @@
 		ser &= ~(1 << spi->chip_select);
 
 	writel_relaxed(ser, rs->regs + ROCKCHIP_SPI_SER);
+
+	pm_runtime_put_sync(rs->dev);
 }
 
 static int rockchip_spi_prepare_message(struct spi_master *master,
@@ -319,12 +316,12 @@
 	 */
 	if (rs->use_dma) {
 		if (rs->state & RXBUSY) {
-			dmaengine_terminate_all(rs->dma_rx.ch);
+			dmaengine_terminate_async(rs->dma_rx.ch);
 			flush_fifo(rs);
 		}
 
 		if (rs->state & TXBUSY)
-			dmaengine_terminate_all(rs->dma_tx.ch);
+			dmaengine_terminate_async(rs->dma_tx.ch);
 	}
 
 	spin_unlock_irqrestore(&rs->lock, flags);
@@ -433,7 +430,7 @@
 	spin_unlock_irqrestore(&rs->lock, flags);
 }
 
-static void rockchip_spi_prepare_dma(struct rockchip_spi *rs)
+static int rockchip_spi_prepare_dma(struct rockchip_spi *rs)
 {
 	unsigned long flags;
 	struct dma_slave_config rxconf, txconf;
@@ -456,6 +453,8 @@
 				rs->dma_rx.ch,
 				rs->rx_sg.sgl, rs->rx_sg.nents,
 				rs->dma_rx.direction, DMA_PREP_INTERRUPT);
+		if (!rxdesc)
+			return -EINVAL;
 
 		rxdesc->callback = rockchip_spi_dma_rxcb;
 		rxdesc->callback_param = rs;
@@ -473,6 +472,11 @@
 				rs->dma_tx.ch,
 				rs->tx_sg.sgl, rs->tx_sg.nents,
 				rs->dma_tx.direction, DMA_PREP_INTERRUPT);
+		if (!txdesc) {
+			if (rxdesc)
+				dmaengine_terminate_sync(rs->dma_rx.ch);
+			return -EINVAL;
+		}
 
 		txdesc->callback = rockchip_spi_dma_txcb;
 		txdesc->callback_param = rs;
@@ -494,6 +498,8 @@
 		dmaengine_submit(txdesc);
 		dma_async_issue_pending(rs->dma_tx.ch);
 	}
+
+	return 0;
 }
 
 static void rockchip_spi_config(struct rockchip_spi *rs)
@@ -503,7 +509,8 @@
 	int rsd = 0;
 
 	u32 cr0 = (CR0_BHT_8BIT << CR0_BHT_OFFSET)
-		| (CR0_SSD_ONE << CR0_SSD_OFFSET);
+		| (CR0_SSD_ONE << CR0_SSD_OFFSET)
+		| (CR0_EM_BIG << CR0_EM_OFFSET);
 
 	cr0 |= (rs->n_bytes << CR0_DFS_OFFSET);
 	cr0 |= ((rs->mode & 0x3) << CR0_SCPH_OFFSET);
@@ -606,12 +613,12 @@
 	if (rs->use_dma) {
 		if (rs->tmode == CR0_XFM_RO) {
 			/* rx: dma must be prepared first */
-			rockchip_spi_prepare_dma(rs);
+			ret = rockchip_spi_prepare_dma(rs);
 			spi_enable_chip(rs, 1);
 		} else {
 			/* tx or tr: spi must be enabled first */
 			spi_enable_chip(rs, 1);
-			rockchip_spi_prepare_dma(rs);
+			ret = rockchip_spi_prepare_dma(rs);
 		}
 	} else {
 		spi_enable_chip(rs, 1);
@@ -717,8 +724,14 @@
 	master->handle_err = rockchip_spi_handle_err;
 
 	rs->dma_tx.ch = dma_request_slave_channel(rs->dev, "tx");
-	if (!rs->dma_tx.ch)
+	if (IS_ERR_OR_NULL(rs->dma_tx.ch)) {
+		/* Check tx to see if we need defer probing driver */
+		if (PTR_ERR(rs->dma_tx.ch) == -EPROBE_DEFER) {
+			ret = -EPROBE_DEFER;
+			goto err_get_fifo_len;
+		}
 		dev_warn(rs->dev, "Failed to request TX DMA channel\n");
+	}
 
 	rs->dma_rx.ch = dma_request_slave_channel(rs->dev, "rx");
 	if (!rs->dma_rx.ch) {
@@ -871,6 +884,7 @@
 	{ .compatible = "rockchip,rk3066-spi", },
 	{ .compatible = "rockchip,rk3188-spi", },
 	{ .compatible = "rockchip,rk3288-spi", },
+	{ .compatible = "rockchip,rk3399-spi", },
 	{ },
 };
 MODULE_DEVICE_TABLE(of, rockchip_spi_dt_match);
diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c
index 64318fc..eac3c96 100644
--- a/drivers/spi/spi-ti-qspi.c
+++ b/drivers/spi/spi-ti-qspi.c
@@ -31,6 +31,8 @@
 #include <linux/of.h>
 #include <linux/of_device.h>
 #include <linux/pinctrl/consumer.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
 
 #include <linux/spi/spi.h>
 
@@ -44,8 +46,9 @@
 
 	struct spi_master	*master;
 	void __iomem            *base;
-	void __iomem            *ctrl_base;
 	void __iomem            *mmap_base;
+	struct regmap		*ctrl_base;
+	unsigned int		ctrl_reg;
 	struct clk		*fclk;
 	struct device           *dev;
 
@@ -55,7 +58,7 @@
 	u32 cmd;
 	u32 dc;
 
-	bool ctrl_mod;
+	bool mmap_enabled;
 };
 
 #define QSPI_PID			(0x0)
@@ -65,11 +68,8 @@
 #define QSPI_SPI_CMD_REG		(0x48)
 #define QSPI_SPI_STATUS_REG		(0x4c)
 #define QSPI_SPI_DATA_REG		(0x50)
-#define QSPI_SPI_SETUP0_REG		(0x54)
+#define QSPI_SPI_SETUP_REG(n)		((0x54 + 4 * n))
 #define QSPI_SPI_SWITCH_REG		(0x64)
-#define QSPI_SPI_SETUP1_REG		(0x58)
-#define QSPI_SPI_SETUP2_REG		(0x5c)
-#define QSPI_SPI_SETUP3_REG		(0x60)
 #define QSPI_SPI_DATA_REG_1		(0x68)
 #define QSPI_SPI_DATA_REG_2		(0x6c)
 #define QSPI_SPI_DATA_REG_3		(0x70)
@@ -109,6 +109,17 @@
 
 #define QSPI_AUTOSUSPEND_TIMEOUT         2000
 
+#define MEM_CS_EN(n)			((n + 1) << 8)
+#define MEM_CS_MASK			(7 << 8)
+
+#define MM_SWITCH			0x1
+
+#define QSPI_SETUP_RD_NORMAL		(0x0 << 12)
+#define QSPI_SETUP_RD_DUAL		(0x1 << 12)
+#define QSPI_SETUP_RD_QUAD		(0x3 << 12)
+#define QSPI_SETUP_ADDR_SHIFT		8
+#define QSPI_SETUP_DUMMY_SHIFT		10
+
 static inline unsigned long ti_qspi_read(struct ti_qspi *qspi,
 		unsigned long reg)
 {
@@ -366,6 +377,72 @@
 	return 0;
 }
 
+static void ti_qspi_enable_memory_map(struct spi_device *spi)
+{
+	struct ti_qspi  *qspi = spi_master_get_devdata(spi->master);
+
+	ti_qspi_write(qspi, MM_SWITCH, QSPI_SPI_SWITCH_REG);
+	if (qspi->ctrl_base) {
+		regmap_update_bits(qspi->ctrl_base, qspi->ctrl_reg,
+				   MEM_CS_EN(spi->chip_select),
+				   MEM_CS_MASK);
+	}
+	qspi->mmap_enabled = true;
+}
+
+static void ti_qspi_disable_memory_map(struct spi_device *spi)
+{
+	struct ti_qspi  *qspi = spi_master_get_devdata(spi->master);
+
+	ti_qspi_write(qspi, 0, QSPI_SPI_SWITCH_REG);
+	if (qspi->ctrl_base)
+		regmap_update_bits(qspi->ctrl_base, qspi->ctrl_reg,
+				   0, MEM_CS_MASK);
+	qspi->mmap_enabled = false;
+}
+
+static void ti_qspi_setup_mmap_read(struct spi_device *spi,
+				    struct spi_flash_read_message *msg)
+{
+	struct ti_qspi  *qspi = spi_master_get_devdata(spi->master);
+	u32 memval = msg->read_opcode;
+
+	switch (msg->data_nbits) {
+	case SPI_NBITS_QUAD:
+		memval |= QSPI_SETUP_RD_QUAD;
+		break;
+	case SPI_NBITS_DUAL:
+		memval |= QSPI_SETUP_RD_DUAL;
+		break;
+	default:
+		memval |= QSPI_SETUP_RD_NORMAL;
+		break;
+	}
+	memval |= ((msg->addr_width - 1) << QSPI_SETUP_ADDR_SHIFT |
+		   msg->dummy_bytes << QSPI_SETUP_DUMMY_SHIFT);
+	ti_qspi_write(qspi, memval,
+		      QSPI_SPI_SETUP_REG(spi->chip_select));
+}
+
+static int ti_qspi_spi_flash_read(struct  spi_device *spi,
+				  struct spi_flash_read_message *msg)
+{
+	struct ti_qspi *qspi = spi_master_get_devdata(spi->master);
+	int ret = 0;
+
+	mutex_lock(&qspi->list_lock);
+
+	if (!qspi->mmap_enabled)
+		ti_qspi_enable_memory_map(spi);
+	ti_qspi_setup_mmap_read(spi, msg);
+	memcpy_fromio(msg->buf, qspi->mmap_base + msg->from, msg->len);
+	msg->retlen = msg->len;
+
+	mutex_unlock(&qspi->list_lock);
+
+	return ret;
+}
+
 static int ti_qspi_start_transfer_one(struct spi_master *master,
 		struct spi_message *m)
 {
@@ -398,6 +475,9 @@
 
 	mutex_lock(&qspi->list_lock);
 
+	if (qspi->mmap_enabled)
+		ti_qspi_disable_memory_map(spi);
+
 	list_for_each_entry(t, &m->transfers, transfer_list) {
 		qspi->cmd |= QSPI_WLEN(t->bits_per_word);
 
@@ -441,7 +521,7 @@
 {
 	struct  ti_qspi *qspi;
 	struct spi_master *master;
-	struct resource         *r, *res_ctrl, *res_mmap;
+	struct resource         *r, *res_mmap;
 	struct device_node *np = pdev->dev.of_node;
 	u32 max_freq;
 	int ret = 0, num_cs, irq;
@@ -487,16 +567,6 @@
 		}
 	}
 
-	res_ctrl = platform_get_resource_byname(pdev,
-			IORESOURCE_MEM, "qspi_ctrlmod");
-	if (res_ctrl == NULL) {
-		res_ctrl = platform_get_resource(pdev, IORESOURCE_MEM, 2);
-		if (res_ctrl == NULL) {
-			dev_dbg(&pdev->dev,
-				"control module resources not required\n");
-		}
-	}
-
 	irq = platform_get_irq(pdev, 0);
 	if (irq < 0) {
 		dev_err(&pdev->dev, "no irq resource?\n");
@@ -511,20 +581,31 @@
 		goto free_master;
 	}
 
-	if (res_ctrl) {
-		qspi->ctrl_mod = true;
-		qspi->ctrl_base = devm_ioremap_resource(&pdev->dev, res_ctrl);
-		if (IS_ERR(qspi->ctrl_base)) {
-			ret = PTR_ERR(qspi->ctrl_base);
-			goto free_master;
+	if (res_mmap) {
+		qspi->mmap_base = devm_ioremap_resource(&pdev->dev,
+							res_mmap);
+		master->spi_flash_read = ti_qspi_spi_flash_read;
+		if (IS_ERR(qspi->mmap_base)) {
+			dev_err(&pdev->dev,
+				"falling back to PIO mode\n");
+			master->spi_flash_read = NULL;
 		}
 	}
+	qspi->mmap_enabled = false;
 
-	if (res_mmap) {
-		qspi->mmap_base = devm_ioremap_resource(&pdev->dev, res_mmap);
-		if (IS_ERR(qspi->mmap_base)) {
-			ret = PTR_ERR(qspi->mmap_base);
-			goto free_master;
+	if (of_property_read_bool(np, "syscon-chipselects")) {
+		qspi->ctrl_base =
+		syscon_regmap_lookup_by_phandle(np,
+						"syscon-chipselects");
+		if (IS_ERR(qspi->ctrl_base))
+			return PTR_ERR(qspi->ctrl_base);
+		ret = of_property_read_u32_index(np,
+						 "syscon-chipselects",
+						 1, &qspi->ctrl_reg);
+		if (ret) {
+			dev_err(&pdev->dev,
+				"couldn't get ctrl_mod reg index\n");
+			return ret;
 		}
 	}
 
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 47eff80..de2f2f9 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -144,6 +144,8 @@
 SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535");
 SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+");
 
+SPI_STATISTICS_SHOW(transfers_split_maxsize, "%lu");
+
 static struct attribute *spi_dev_attrs[] = {
 	&dev_attr_modalias.attr,
 	NULL,
@@ -181,6 +183,7 @@
 	&dev_attr_spi_device_transfer_bytes_histo14.attr,
 	&dev_attr_spi_device_transfer_bytes_histo15.attr,
 	&dev_attr_spi_device_transfer_bytes_histo16.attr,
+	&dev_attr_spi_device_transfers_split_maxsize.attr,
 	NULL,
 };
 
@@ -223,6 +226,7 @@
 	&dev_attr_spi_master_transfer_bytes_histo14.attr,
 	&dev_attr_spi_master_transfer_bytes_histo15.attr,
 	&dev_attr_spi_master_transfer_bytes_histo16.attr,
+	&dev_attr_spi_master_transfers_split_maxsize.attr,
 	NULL,
 };
 
@@ -702,6 +706,7 @@
 		       enum dma_data_direction dir)
 {
 	const bool vmalloced_buf = is_vmalloc_addr(buf);
+	unsigned int max_seg_size = dma_get_max_seg_size(dev);
 	int desc_len;
 	int sgs;
 	struct page *vm_page;
@@ -710,10 +715,10 @@
 	int i, ret;
 
 	if (vmalloced_buf) {
-		desc_len = PAGE_SIZE;
+		desc_len = min_t(int, max_seg_size, PAGE_SIZE);
 		sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len);
 	} else {
-		desc_len = master->max_dma_len;
+		desc_len = min_t(int, max_seg_size, master->max_dma_len);
 		sgs = DIV_ROUND_UP(len, desc_len);
 	}
 
@@ -739,7 +744,6 @@
 			sg_set_buf(&sgt->sgl[i], sg_buf, min);
 		}
 
-
 		buf += min;
 		len -= min;
 	}
@@ -1024,6 +1028,8 @@
 	if (msg->status && master->handle_err)
 		master->handle_err(master, msg);
 
+	spi_res_release(master, msg);
+
 	spi_finalize_current_message(master);
 
 	return ret;
@@ -1047,6 +1053,7 @@
  * __spi_pump_messages - function which processes spi message queue
  * @master: master to process queue for
  * @in_kthread: true if we are in the context of the message pump thread
+ * @bus_locked: true if the bus mutex is held when calling this function
  *
  * This function checks if there is any spi message in the queue that
  * needs processing and if so call out to the driver to initialize hardware
@@ -1056,7 +1063,8 @@
  * inside spi_sync(); the queue extraction handling at the top of the
  * function should deal with this safely.
  */
-static void __spi_pump_messages(struct spi_master *master, bool in_kthread)
+static void __spi_pump_messages(struct spi_master *master, bool in_kthread,
+				bool bus_locked)
 {
 	unsigned long flags;
 	bool was_busy = false;
@@ -1152,6 +1160,9 @@
 		}
 	}
 
+	if (!bus_locked)
+		mutex_lock(&master->bus_lock_mutex);
+
 	trace_spi_message_start(master->cur_msg);
 
 	if (master->prepare_message) {
@@ -1161,7 +1172,7 @@
 				"failed to prepare message: %d\n", ret);
 			master->cur_msg->status = ret;
 			spi_finalize_current_message(master);
-			return;
+			goto out;
 		}
 		master->cur_msg_prepared = true;
 	}
@@ -1170,15 +1181,23 @@
 	if (ret) {
 		master->cur_msg->status = ret;
 		spi_finalize_current_message(master);
-		return;
+		goto out;
 	}
 
 	ret = master->transfer_one_message(master, master->cur_msg);
 	if (ret) {
 		dev_err(&master->dev,
 			"failed to transfer one message from queue\n");
-		return;
+		goto out;
 	}
+
+out:
+	if (!bus_locked)
+		mutex_unlock(&master->bus_lock_mutex);
+
+	/* Prod the scheduler in case transfer_one() was busy waiting */
+	if (!ret)
+		cond_resched();
 }
 
 /**
@@ -1190,7 +1209,7 @@
 	struct spi_master *master =
 		container_of(work, struct spi_master, pump_messages);
 
-	__spi_pump_messages(master, true);
+	__spi_pump_messages(master, true, false);
 }
 
 static int spi_init_queue(struct spi_master *master)
@@ -1581,13 +1600,30 @@
 static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
 {
 	struct spi_device *spi = data;
+	struct spi_master *master = spi->master;
 
 	if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
 		struct acpi_resource_spi_serialbus *sb;
 
 		sb = &ares->data.spi_serial_bus;
 		if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
-			spi->chip_select = sb->device_selection;
+			/*
+			 * ACPI DeviceSelection numbering is handled by the
+			 * host controller driver in Windows and can vary
+			 * from driver to driver. In Linux we always expect
+			 * 0 .. max - 1 so we need to ask the driver to
+			 * translate between the two schemes.
+			 */
+			if (master->fw_translate_cs) {
+				int cs = master->fw_translate_cs(master,
+						sb->device_selection);
+				if (cs < 0)
+					return cs;
+				spi->chip_select = cs;
+			} else {
+				spi->chip_select = sb->device_selection;
+			}
+
 			spi->max_speed_hz = sb->connection_speed;
 
 			if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
@@ -2013,6 +2049,336 @@
 }
 EXPORT_SYMBOL_GPL(spi_busnum_to_master);
 
+/*-------------------------------------------------------------------------*/
+
+/* Core methods for SPI resource management */
+
+/**
+ * spi_res_alloc - allocate a spi resource that is life-cycle managed
+ *                 during the processing of a spi_message while using
+ *                 spi_transfer_one
+ * @spi:     the spi device for which we allocate memory
+ * @release: the release code to execute for this resource
+ * @size:    size to alloc and return
+ * @gfp:     GFP allocation flags
+ *
+ * Return: the pointer to the allocated data
+ *
+ * This may get enhanced in the future to allocate from a memory pool
+ * of the @spi_device or @spi_master to avoid repeated allocations.
+ */
+void *spi_res_alloc(struct spi_device *spi,
+		    spi_res_release_t release,
+		    size_t size, gfp_t gfp)
+{
+	struct spi_res *sres;
+
+	sres = kzalloc(sizeof(*sres) + size, gfp);
+	if (!sres)
+		return NULL;
+
+	INIT_LIST_HEAD(&sres->entry);
+	sres->release = release;
+
+	return sres->data;
+}
+EXPORT_SYMBOL_GPL(spi_res_alloc);
+
+/**
+ * spi_res_free - free an spi resource
+ * @res: pointer to the custom data of a resource
+ *
+ */
+void spi_res_free(void *res)
+{
+	struct spi_res *sres = container_of(res, struct spi_res, data);
+
+	if (!res)
+		return;
+
+	WARN_ON(!list_empty(&sres->entry));
+	kfree(sres);
+}
+EXPORT_SYMBOL_GPL(spi_res_free);
+
+/**
+ * spi_res_add - add a spi_res to the spi_message
+ * @message: the spi message
+ * @res:     the spi_resource
+ */
+void spi_res_add(struct spi_message *message, void *res)
+{
+	struct spi_res *sres = container_of(res, struct spi_res, data);
+
+	WARN_ON(!list_empty(&sres->entry));
+	list_add_tail(&sres->entry, &message->resources);
+}
+EXPORT_SYMBOL_GPL(spi_res_add);
+
+/**
+ * spi_res_release - release all spi resources for this message
+ * @master:  the @spi_master
+ * @message: the @spi_message
+ */
+void spi_res_release(struct spi_master *master,
+		     struct spi_message *message)
+{
+	struct spi_res *res;
+
+	while (!list_empty(&message->resources)) {
+		res = list_last_entry(&message->resources,
+				      struct spi_res, entry);
+
+		if (res->release)
+			res->release(master, message, res->data);
+
+		list_del(&res->entry);
+
+		kfree(res);
+	}
+}
+EXPORT_SYMBOL_GPL(spi_res_release);
+
+/*-------------------------------------------------------------------------*/
+
+/* Core methods for spi_message alterations */
+
+static void __spi_replace_transfers_release(struct spi_master *master,
+					    struct spi_message *msg,
+					    void *res)
+{
+	struct spi_replaced_transfers *rxfer = res;
+	size_t i;
+
+	/* call extra callback if requested */
+	if (rxfer->release)
+		rxfer->release(master, msg, res);
+
+	/* insert replaced transfers back into the message */
+	list_splice(&rxfer->replaced_transfers, rxfer->replaced_after);
+
+	/* remove the formerly inserted entries */
+	for (i = 0; i < rxfer->inserted; i++)
+		list_del(&rxfer->inserted_transfers[i].transfer_list);
+}
+
+/**
+ * spi_replace_transfers - replace transfers with several transfers
+ *                         and register change with spi_message.resources
+ * @msg:           the spi_message we work upon
+ * @xfer_first:    the first spi_transfer we want to replace
+ * @remove:        number of transfers to remove
+ * @insert:        the number of transfers we want to insert instead
+ * @release:       extra release code necessary in some circumstances
+ * @extradatasize: extra data to allocate (with alignment guarantees
+ *                 of struct @spi_transfer)
+ * @gfp:           gfp flags
+ *
+ * Returns: pointer to @spi_replaced_transfers,
+ *          PTR_ERR(...) in case of errors.
+ */
+struct spi_replaced_transfers *spi_replace_transfers(
+	struct spi_message *msg,
+	struct spi_transfer *xfer_first,
+	size_t remove,
+	size_t insert,
+	spi_replaced_release_t release,
+	size_t extradatasize,
+	gfp_t gfp)
+{
+	struct spi_replaced_transfers *rxfer;
+	struct spi_transfer *xfer;
+	size_t i;
+
+	/* allocate the structure using spi_res */
+	rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release,
+			      insert * sizeof(struct spi_transfer)
+			      + sizeof(struct spi_replaced_transfers)
+			      + extradatasize,
+			      gfp);
+	if (!rxfer)
+		return ERR_PTR(-ENOMEM);
+
+	/* the release code to invoke before running the generic release */
+	rxfer->release = release;
+
+	/* assign extradata */
+	if (extradatasize)
+		rxfer->extradata =
+			&rxfer->inserted_transfers[insert];
+
+	/* init the replaced_transfers list */
+	INIT_LIST_HEAD(&rxfer->replaced_transfers);
+
+	/* assign the list_entry after which we should reinsert
+	 * the @replaced_transfers - it may be spi_message.messages!
+	 */
+	rxfer->replaced_after = xfer_first->transfer_list.prev;
+
+	/* remove the requested number of transfers */
+	for (i = 0; i < remove; i++) {
+		/* if the entry after replaced_after it is msg->transfers
+		 * then we have been requested to remove more transfers
+		 * than are in the list
+		 */
+		if (rxfer->replaced_after->next == &msg->transfers) {
+			dev_err(&msg->spi->dev,
+				"requested to remove more spi_transfers than are available\n");
+			/* insert replaced transfers back into the message */
+			list_splice(&rxfer->replaced_transfers,
+				    rxfer->replaced_after);
+
+			/* free the spi_replace_transfer structure */
+			spi_res_free(rxfer);
+
+			/* and return with an error */
+			return ERR_PTR(-EINVAL);
+		}
+
+		/* remove the entry after replaced_after from list of
+		 * transfers and add it to list of replaced_transfers
+		 */
+		list_move_tail(rxfer->replaced_after->next,
+			       &rxfer->replaced_transfers);
+	}
+
+	/* create copy of the given xfer with identical settings
+	 * based on the first transfer to get removed
+	 */
+	for (i = 0; i < insert; i++) {
+		/* we need to run in reverse order */
+		xfer = &rxfer->inserted_transfers[insert - 1 - i];
+
+		/* copy all spi_transfer data */
+		memcpy(xfer, xfer_first, sizeof(*xfer));
+
+		/* add to list */
+		list_add(&xfer->transfer_list, rxfer->replaced_after);
+
+		/* clear cs_change and delay_usecs for all but the last */
+		if (i) {
+			xfer->cs_change = false;
+			xfer->delay_usecs = 0;
+		}
+	}
+
+	/* set up inserted */
+	rxfer->inserted = insert;
+
+	/* and register it with spi_res/spi_message */
+	spi_res_add(msg, rxfer);
+
+	return rxfer;
+}
+EXPORT_SYMBOL_GPL(spi_replace_transfers);
+
+static int __spi_split_transfer_maxsize(struct spi_master *master,
+					struct spi_message *msg,
+					struct spi_transfer **xferp,
+					size_t maxsize,
+					gfp_t gfp)
+{
+	struct spi_transfer *xfer = *xferp, *xfers;
+	struct spi_replaced_transfers *srt;
+	size_t offset;
+	size_t count, i;
+
+	/* warn once about this fact that we are splitting a transfer */
+	dev_warn_once(&msg->spi->dev,
+		      "spi_transfer of length %i exceed max length of %zu - needed to split transfers\n",
+		      xfer->len, maxsize);
+
+	/* calculate how many we have to replace */
+	count = DIV_ROUND_UP(xfer->len, maxsize);
+
+	/* create replacement */
+	srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, gfp);
+	if (IS_ERR(srt))
+		return PTR_ERR(srt);
+	xfers = srt->inserted_transfers;
+
+	/* now handle each of those newly inserted spi_transfers
+	 * note that the replacements spi_transfers all are preset
+	 * to the same values as *xferp, so tx_buf, rx_buf and len
+	 * are all identical (as well as most others)
+	 * so we just have to fix up len and the pointers.
+	 *
+	 * this also includes support for the depreciated
+	 * spi_message.is_dma_mapped interface
+	 */
+
+	/* the first transfer just needs the length modified, so we
+	 * run it outside the loop
+	 */
+	xfers[0].len = min_t(size_t, maxsize, xfer[0].len);
+
+	/* all the others need rx_buf/tx_buf also set */
+	for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) {
+		/* update rx_buf, tx_buf and dma */
+		if (xfers[i].rx_buf)
+			xfers[i].rx_buf += offset;
+		if (xfers[i].rx_dma)
+			xfers[i].rx_dma += offset;
+		if (xfers[i].tx_buf)
+			xfers[i].tx_buf += offset;
+		if (xfers[i].tx_dma)
+			xfers[i].tx_dma += offset;
+
+		/* update length */
+		xfers[i].len = min(maxsize, xfers[i].len - offset);
+	}
+
+	/* we set up xferp to the last entry we have inserted,
+	 * so that we skip those already split transfers
+	 */
+	*xferp = &xfers[count - 1];
+
+	/* increment statistics counters */
+	SPI_STATISTICS_INCREMENT_FIELD(&master->statistics,
+				       transfers_split_maxsize);
+	SPI_STATISTICS_INCREMENT_FIELD(&msg->spi->statistics,
+				       transfers_split_maxsize);
+
+	return 0;
+}
+
+/**
+ * spi_split_tranfers_maxsize - split spi transfers into multiple transfers
+ *                              when an individual transfer exceeds a
+ *                              certain size
+ * @master:    the @spi_master for this transfer
+ * @msg:   the @spi_message to transform
+ * @maxsize:  the maximum when to apply this
+ * @gfp: GFP allocation flags
+ *
+ * Return: status of transformation
+ */
+int spi_split_transfers_maxsize(struct spi_master *master,
+				struct spi_message *msg,
+				size_t maxsize,
+				gfp_t gfp)
+{
+	struct spi_transfer *xfer;
+	int ret;
+
+	/* iterate over the transfer_list,
+	 * but note that xfer is advanced to the last transfer inserted
+	 * to avoid checking sizes again unnecessarily (also xfer does
+	 * potentiall belong to a different list by the time the
+	 * replacement has happened
+	 */
+	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+		if (xfer->len > maxsize) {
+			ret = __spi_split_transfer_maxsize(
+				master, msg, &xfer, maxsize, gfp);
+			if (ret)
+				return ret;
+		}
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize);
 
 /*-------------------------------------------------------------------------*/
 
@@ -2351,6 +2717,46 @@
 EXPORT_SYMBOL_GPL(spi_async_locked);
 
 
+int spi_flash_read(struct spi_device *spi,
+		   struct spi_flash_read_message *msg)
+
+{
+	struct spi_master *master = spi->master;
+	int ret;
+
+	if ((msg->opcode_nbits == SPI_NBITS_DUAL ||
+	     msg->addr_nbits == SPI_NBITS_DUAL) &&
+	    !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
+		return -EINVAL;
+	if ((msg->opcode_nbits == SPI_NBITS_QUAD ||
+	     msg->addr_nbits == SPI_NBITS_QUAD) &&
+	    !(spi->mode & SPI_TX_QUAD))
+		return -EINVAL;
+	if (msg->data_nbits == SPI_NBITS_DUAL &&
+	    !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
+		return -EINVAL;
+	if (msg->data_nbits == SPI_NBITS_QUAD &&
+	    !(spi->mode &  SPI_RX_QUAD))
+		return -EINVAL;
+
+	if (master->auto_runtime_pm) {
+		ret = pm_runtime_get_sync(master->dev.parent);
+		if (ret < 0) {
+			dev_err(&master->dev, "Failed to power device: %d\n",
+				ret);
+			return ret;
+		}
+	}
+	mutex_lock(&master->bus_lock_mutex);
+	ret = master->spi_flash_read(spi, msg);
+	mutex_unlock(&master->bus_lock_mutex);
+	if (master->auto_runtime_pm)
+		pm_runtime_put(master->dev.parent);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(spi_flash_read);
+
 /*-------------------------------------------------------------------------*/
 
 /* Utility methods for SPI master protocol drivers, layered on
@@ -2414,7 +2820,7 @@
 						       spi_sync_immediate);
 			SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics,
 						       spi_sync_immediate);
-			__spi_pump_messages(master, false);
+			__spi_pump_messages(master, false, bus_locked);
 		}
 
 		wait_for_completion(&done);
diff --git a/drivers/zorro/zorro-sysfs.c b/drivers/zorro/zorro-sysfs.c
index 36b210f..9282dbf 100644
--- a/drivers/zorro/zorro-sysfs.c
+++ b/drivers/zorro/zorro-sysfs.c
@@ -65,8 +65,7 @@
 				 struct bin_attribute *bin_attr,
 				 char *buf, loff_t off, size_t count)
 {
-	struct zorro_dev *z = to_zorro_dev(container_of(kobj, struct device,
-					   kobj));
+	struct zorro_dev *z = to_zorro_dev(kobj_to_dev(kobj));
 	struct ConfigDev cd;
 
 	/* Construct a ConfigDev */
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 4545e2e..5699bbc 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -931,7 +931,7 @@
 	if (bio_flags & EXTENT_BIO_TREE_LOG)
 		return 0;
 #ifdef CONFIG_X86
-	if (static_cpu_has_safe(X86_FEATURE_XMM4_2))
+	if (static_cpu_has(X86_FEATURE_XMM4_2))
 		return 0;
 #endif
 	return 1;
diff --git a/include/dt-bindings/pinctrl/mt7623-pinfunc.h b/include/dt-bindings/pinctrl/mt7623-pinfunc.h
new file mode 100644
index 0000000..2f00bdc
--- /dev/null
+++ b/include/dt-bindings/pinctrl/mt7623-pinfunc.h
@@ -0,0 +1,520 @@
+#ifndef __DTS_MT7623_PINFUNC_H
+#define __DTS_MT7623_PINFUNC_H
+
+#include <dt-bindings/pinctrl/mt65xx.h>
+
+#define MT7623_PIN_0_PWRAP_SPI0_MI_FUNC_GPIO0 (MTK_PIN_NO(0) | 0)
+#define MT7623_PIN_0_PWRAP_SPI0_MI_FUNC_PWRAP_SPIDO (MTK_PIN_NO(0) | 1)
+#define MT7623_PIN_0_PWRAP_SPI0_MI_FUNC_PWRAP_SPIDI (MTK_PIN_NO(0) | 2)
+
+#define MT7623_PIN_1_PWRAP_SPI0_MO_FUNC_GPIO1 (MTK_PIN_NO(1) | 0)
+#define MT7623_PIN_1_PWRAP_SPI0_MO_FUNC_PWRAP_SPIDI (MTK_PIN_NO(1) | 1)
+#define MT7623_PIN_1_PWRAP_SPI0_MO_FUNC_PWRAP_SPIDO (MTK_PIN_NO(1) | 2)
+
+#define MT7623_PIN_2_PWRAP_INT_FUNC_GPIO2 (MTK_PIN_NO(2) | 0)
+#define MT7623_PIN_2_PWRAP_INT_FUNC_PWRAP_INT (MTK_PIN_NO(2) | 1)
+
+#define MT7623_PIN_3_PWRAP_SPI0_CK_FUNC_GPIO3 (MTK_PIN_NO(3) | 0)
+#define MT7623_PIN_3_PWRAP_SPI0_CK_FUNC_PWRAP_SPICK_I (MTK_PIN_NO(3) | 1)
+
+#define MT7623_PIN_4_PWRAP_SPI0_CSN_FUNC_GPIO4 (MTK_PIN_NO(4) | 0)
+#define MT7623_PIN_4_PWRAP_SPI0_CSN_FUNC_PWRAP_SPICS_B_I (MTK_PIN_NO(4) | 1)
+
+#define MT7623_PIN_5_PWRAP_SPI0_CK2_FUNC_GPIO5 (MTK_PIN_NO(5) | 0)
+#define MT7623_PIN_5_PWRAP_SPI0_CK2_FUNC_PWRAP_SPICK2_I (MTK_PIN_NO(5) | 1)
+
+#define MT7623_PIN_6_PWRAP_SPI0_CSN2_FUNC_GPIO6 (MTK_PIN_NO(6) | 0)
+#define MT7623_PIN_6_PWRAP_SPI0_CSN2_FUNC_PWRAP_SPICS2_B_I (MTK_PIN_NO(6) | 1)
+
+#define MT7623_PIN_7_SPI1_CSN_FUNC_GPIO7 (MTK_PIN_NO(7) | 0)
+#define MT7623_PIN_7_SPI1_CSN_FUNC_SPI1_CS (MTK_PIN_NO(7) | 1)
+
+#define MT7623_PIN_8_SPI1_MI_FUNC_GPIO8 (MTK_PIN_NO(8) | 0)
+#define MT7623_PIN_8_SPI1_MI_FUNC_SPI1_MI (MTK_PIN_NO(8) | 1)
+#define MT7623_PIN_8_SPI1_MI_FUNC_SPI1_MO (MTK_PIN_NO(8) | 2)
+
+#define MT7623_PIN_9_SPI1_MO_FUNC_GPIO9 (MTK_PIN_NO(9) | 0)
+#define MT7623_PIN_9_SPI1_MO_FUNC_SPI1_MO (MTK_PIN_NO(9) | 1)
+#define MT7623_PIN_9_SPI1_MO_FUNC_SPI1_MI (MTK_PIN_NO(9) | 2)
+
+#define MT7623_PIN_10_RTC32K_CK_FUNC_GPIO10 (MTK_PIN_NO(10) | 0)
+#define MT7623_PIN_10_RTC32K_CK_FUNC_RTC32K_CK (MTK_PIN_NO(10) | 1)
+
+#define MT7623_PIN_11_WATCHDOG_FUNC_GPIO11 (MTK_PIN_NO(11) | 0)
+#define MT7623_PIN_11_WATCHDOG_FUNC_WATCHDOG (MTK_PIN_NO(11) | 1)
+
+#define MT7623_PIN_12_SRCLKENA_FUNC_GPIO12 (MTK_PIN_NO(12) | 0)
+#define MT7623_PIN_12_SRCLKENA_FUNC_SRCLKENA (MTK_PIN_NO(12) | 1)
+
+#define MT7623_PIN_13_SRCLKENAI_FUNC_GPIO13 (MTK_PIN_NO(13) | 0)
+#define MT7623_PIN_13_SRCLKENAI_FUNC_SRCLKENAI (MTK_PIN_NO(13) | 1)
+
+#define MT7623_PIN_14_GPIO14_FUNC_GPIO14 (MTK_PIN_NO(14) | 0)
+#define MT7623_PIN_14_GPIO14_FUNC_URXD2 (MTK_PIN_NO(14) | 1)
+#define MT7623_PIN_14_GPIO14_FUNC_UTXD2 (MTK_PIN_NO(14) | 2)
+
+#define MT7623_PIN_15_GPIO15_FUNC_GPIO15 (MTK_PIN_NO(15) | 0)
+#define MT7623_PIN_15_GPIO15_FUNC_UTXD2 (MTK_PIN_NO(15) | 1)
+#define MT7623_PIN_15_GPIO15_FUNC_URXD2 (MTK_PIN_NO(15) | 2)
+
+#define MT7623_PIN_18_PCM_CLK_FUNC_GPIO18 (MTK_PIN_NO(18) | 0)
+#define MT7623_PIN_18_PCM_CLK_FUNC_PCM_CLK0 (MTK_PIN_NO(18) | 1)
+#define MT7623_PIN_18_PCM_CLK_FUNC_AP_PCM_CLKO (MTK_PIN_NO(18) | 6)
+
+#define MT7623_PIN_19_PCM_SYNC_FUNC_GPIO19 (MTK_PIN_NO(19) | 0)
+#define MT7623_PIN_19_PCM_SYNC_FUNC_PCM_SYNC (MTK_PIN_NO(19) | 1)
+#define MT7623_PIN_19_PCM_SYNC_FUNC_AP_PCM_SYNC (MTK_PIN_NO(19) | 6)
+
+#define MT7623_PIN_20_PCM_RX_FUNC_GPIO20 (MTK_PIN_NO(20) | 0)
+#define MT7623_PIN_20_PCM_RX_FUNC_PCM_RX (MTK_PIN_NO(20) | 1)
+#define MT7623_PIN_20_PCM_RX_FUNC_PCM_TX (MTK_PIN_NO(20) | 4)
+#define MT7623_PIN_20_PCM_RX_FUNC_AP_PCM_RX (MTK_PIN_NO(20) | 6)
+
+#define MT7623_PIN_21_PCM_TX_FUNC_GPIO21 (MTK_PIN_NO(21) | 0)
+#define MT7623_PIN_21_PCM_TX_FUNC_PCM_TX (MTK_PIN_NO(21) | 1)
+#define MT7623_PIN_21_PCM_TX_FUNC_PCM_RX (MTK_PIN_NO(21) | 4)
+#define MT7623_PIN_21_PCM_TX_FUNC_AP_PCM_TX (MTK_PIN_NO(21) | 6)
+
+#define MT7623_PIN_22_EINT0_FUNC_GPIO22 (MTK_PIN_NO(22) | 0)
+#define MT7623_PIN_22_EINT0_FUNC_UCTS0 (MTK_PIN_NO(22) | 1)
+#define MT7623_PIN_22_EINT0_FUNC_PCIE0_PERST_N (MTK_PIN_NO(22) | 2)
+
+#define MT7623_PIN_23_EINT1_FUNC_GPIO23 (MTK_PIN_NO(23) | 0)
+#define MT7623_PIN_23_EINT1_FUNC_URTS0 (MTK_PIN_NO(23) | 1)
+#define MT7623_PIN_23_EINT1_FUNC_PCIE1_PERST_N (MTK_PIN_NO(23) | 2)
+
+#define MT7623_PIN_24_EINT2_FUNC_GPIO24 (MTK_PIN_NO(24) | 0)
+#define MT7623_PIN_24_EINT2_FUNC_UCTS1 (MTK_PIN_NO(24) | 1)
+#define MT7623_PIN_24_EINT2_FUNC_PCIE2_PERST_N (MTK_PIN_NO(24) | 2)
+
+#define MT7623_PIN_25_EINT3_FUNC_GPIO25 (MTK_PIN_NO(25) | 0)
+#define MT7623_PIN_25_EINT3_FUNC_URTS1 (MTK_PIN_NO(25) | 1)
+
+#define MT7623_PIN_26_EINT4_FUNC_GPIO26 (MTK_PIN_NO(26) | 0)
+#define MT7623_PIN_26_EINT4_FUNC_UCTS3 (MTK_PIN_NO(26) | 1)
+#define MT7623_PIN_26_EINT4_FUNC_PCIE2_WAKE_N (MTK_PIN_NO(26) | 6)
+
+#define MT7623_PIN_27_EINT5_FUNC_GPIO27 (MTK_PIN_NO(27) | 0)
+#define MT7623_PIN_27_EINT5_FUNC_URTS3 (MTK_PIN_NO(27) | 1)
+#define MT7623_PIN_27_EINT5_FUNC_PCIE1_WAKE_N (MTK_PIN_NO(27) | 6)
+
+#define MT7623_PIN_28_EINT6_FUNC_GPIO28 (MTK_PIN_NO(28) | 0)
+#define MT7623_PIN_28_EINT6_FUNC_DRV_VBUS (MTK_PIN_NO(28) | 1)
+#define MT7623_PIN_28_EINT6_FUNC_PCIE0_WAKE_N (MTK_PIN_NO(28) | 6)
+
+#define MT7623_PIN_29_EINT7_FUNC_GPIO29 (MTK_PIN_NO(29) | 0)
+#define MT7623_PIN_29_EINT7_FUNC_IDDIG (MTK_PIN_NO(29) | 1)
+#define MT7623_PIN_29_EINT7_FUNC_MSDC1_WP (MTK_PIN_NO(29) | 2)
+#define MT7623_PIN_29_EINT7_FUNC_PCIE2_PERST_N (MTK_PIN_NO(29) | 6)
+
+#define MT7623_PIN_33_I2S1_DATA_FUNC_GPIO33 (MTK_PIN_NO(33) | 0)
+#define MT7623_PIN_33_I2S1_DATA_FUNC_I2S1_DATA (MTK_PIN_NO(33) | 1)
+#define MT7623_PIN_33_I2S1_DATA_FUNC_PCM_TX (MTK_PIN_NO(33) | 3)
+#define MT7623_PIN_33_I2S1_DATA_FUNC_AP_PCM_TX (MTK_PIN_NO(33) | 6)
+
+#define MT7623_PIN_34_I2S1_DATA_IN_FUNC_GPIO34 (MTK_PIN_NO(34) | 0)
+#define MT7623_PIN_34_I2S1_DATA_IN_FUNC_I2S1_DATA_IN (MTK_PIN_NO(34) | 1)
+#define MT7623_PIN_34_I2S1_DATA_IN_FUNC_PCM_RX (MTK_PIN_NO(34) | 3)
+#define MT7623_PIN_34_I2S1_DATA_IN_FUNC_AP_PCM_RX (MTK_PIN_NO(34) | 6)
+
+#define MT7623_PIN_35_I2S1_BCK_FUNC_GPIO35 (MTK_PIN_NO(35) | 0)
+#define MT7623_PIN_35_I2S1_BCK_FUNC_I2S1_BCK (MTK_PIN_NO(35) | 1)
+#define MT7623_PIN_35_I2S1_BCK_FUNC_PCM_CLK0 (MTK_PIN_NO(35) | 3)
+#define MT7623_PIN_35_I2S1_BCK_FUNC_AP_PCM_CLKO (MTK_PIN_NO(35) | 6)
+
+#define MT7623_PIN_36_I2S1_LRCK_FUNC_GPIO36 (MTK_PIN_NO(36) | 0)
+#define MT7623_PIN_36_I2S1_LRCK_FUNC_I2S1_LRCK (MTK_PIN_NO(36) | 1)
+#define MT7623_PIN_36_I2S1_LRCK_FUNC_PCM_SYNC (MTK_PIN_NO(36) | 3)
+#define MT7623_PIN_36_I2S1_LRCK_FUNC_AP_PCM_SYNC (MTK_PIN_NO(36) | 6)
+
+#define MT7623_PIN_37_I2S1_MCLK_FUNC_GPIO37 (MTK_PIN_NO(37) | 0)
+#define MT7623_PIN_37_I2S1_MCLK_FUNC_I2S1_MCLK (MTK_PIN_NO(37) | 1)
+
+#define MT7623_PIN_39_JTMS_FUNC_GPIO39 (MTK_PIN_NO(39) | 0)
+#define MT7623_PIN_39_JTMS_FUNC_JTMS (MTK_PIN_NO(39) | 1)
+
+#define MT7623_PIN_40_JTCK_FUNC_GPIO40 (MTK_PIN_NO(40) | 0)
+#define MT7623_PIN_40_JTCK_FUNC_JTCK (MTK_PIN_NO(40) | 1)
+
+#define MT7623_PIN_41_JTDI_FUNC_GPIO41 (MTK_PIN_NO(41) | 0)
+#define MT7623_PIN_41_JTDI_FUNC_JTDI (MTK_PIN_NO(41) | 1)
+
+#define MT7623_PIN_42_JTDO_FUNC_GPIO42 (MTK_PIN_NO(42) | 0)
+#define MT7623_PIN_42_JTDO_FUNC_JTDO (MTK_PIN_NO(42) | 1)
+
+#define MT7623_PIN_43_NCLE_FUNC_GPIO43 (MTK_PIN_NO(43) | 0)
+#define MT7623_PIN_43_NCLE_FUNC_NCLE (MTK_PIN_NO(43) | 1)
+#define MT7623_PIN_43_NCLE_FUNC_EXT_XCS2 (MTK_PIN_NO(43) | 2)
+
+#define MT7623_PIN_44_NCEB1_FUNC_GPIO44 (MTK_PIN_NO(44) | 0)
+#define MT7623_PIN_44_NCEB1_FUNC_NCEB1 (MTK_PIN_NO(44) | 1)
+#define MT7623_PIN_44_NCEB1_FUNC_IDDIG (MTK_PIN_NO(44) | 2)
+
+#define MT7623_PIN_45_NCEB0_FUNC_GPIO45 (MTK_PIN_NO(45) | 0)
+#define MT7623_PIN_45_NCEB0_FUNC_NCEB0 (MTK_PIN_NO(45) | 1)
+#define MT7623_PIN_45_NCEB0_FUNC_DRV_VBUS (MTK_PIN_NO(45) | 2)
+
+#define MT7623_PIN_46_IR_FUNC_GPIO46 (MTK_PIN_NO(46) | 0)
+#define MT7623_PIN_46_IR_FUNC_IR (MTK_PIN_NO(46) | 1)
+
+#define MT7623_PIN_47_NREB_FUNC_GPIO47 (MTK_PIN_NO(47) | 0)
+#define MT7623_PIN_47_NREB_FUNC_NREB (MTK_PIN_NO(47) | 1)
+
+#define MT7623_PIN_48_NRNB_FUNC_GPIO48 (MTK_PIN_NO(48) | 0)
+#define MT7623_PIN_48_NRNB_FUNC_NRNB (MTK_PIN_NO(48) | 1)
+
+#define MT7623_PIN_49_I2S0_DATA_FUNC_GPIO49 (MTK_PIN_NO(49) | 0)
+#define MT7623_PIN_49_I2S0_DATA_FUNC_I2S0_DATA (MTK_PIN_NO(49) | 1)
+#define MT7623_PIN_49_I2S0_DATA_FUNC_PCM_TX (MTK_PIN_NO(49) | 3)
+#define MT7623_PIN_49_I2S0_DATA_FUNC_AP_I2S_DO (MTK_PIN_NO(49) | 6)
+
+#define MT7623_PIN_53_SPI0_CSN_FUNC_GPIO53 (MTK_PIN_NO(53) | 0)
+#define MT7623_PIN_53_SPI0_CSN_FUNC_SPI0_CS (MTK_PIN_NO(53) | 1)
+#define MT7623_PIN_53_SPI0_CSN_FUNC_PWM1 (MTK_PIN_NO(53) | 5)
+
+#define MT7623_PIN_54_SPI0_CK_FUNC_GPIO54 (MTK_PIN_NO(54) | 0)
+#define MT7623_PIN_54_SPI0_CK_FUNC_SPI0_CK (MTK_PIN_NO(54) | 1)
+
+#define MT7623_PIN_55_SPI0_MI_FUNC_GPIO55 (MTK_PIN_NO(55) | 0)
+#define MT7623_PIN_55_SPI0_MI_FUNC_SPI0_MI (MTK_PIN_NO(55) | 1)
+#define MT7623_PIN_55_SPI0_MI_FUNC_SPI0_MO (MTK_PIN_NO(55) | 2)
+#define MT7623_PIN_55_SPI0_MI_FUNC_MSDC1_WP (MTK_PIN_NO(55) | 3)
+#define MT7623_PIN_55_SPI0_MI_FUNC_PWM2 (MTK_PIN_NO(55) | 5)
+
+#define MT7623_PIN_56_SPI0_MO_FUNC_GPIO56 (MTK_PIN_NO(56) | 0)
+#define MT7623_PIN_56_SPI0_MO_FUNC_SPI0_MO (MTK_PIN_NO(56) | 1)
+#define MT7623_PIN_56_SPI0_MO_FUNC_SPI0_MI (MTK_PIN_NO(56) | 2)
+
+#define MT7623_PIN_60_WB_RSTB_FUNC_GPIO60 (MTK_PIN_NO(60) | 0)
+#define MT7623_PIN_60_WB_RSTB_FUNC_WB_RSTB (MTK_PIN_NO(60) | 1)
+
+#define MT7623_PIN_61_GPIO61_FUNC_GPIO61 (MTK_PIN_NO(61) | 0)
+#define MT7623_PIN_61_GPIO61_FUNC_TEST_FD (MTK_PIN_NO(61) | 1)
+
+#define MT7623_PIN_62_GPIO62_FUNC_GPIO62 (MTK_PIN_NO(62) | 0)
+#define MT7623_PIN_62_GPIO62_FUNC_TEST_FC (MTK_PIN_NO(62) | 1)
+
+#define MT7623_PIN_63_WB_SCLK_FUNC_GPIO63 (MTK_PIN_NO(63) | 0)
+#define MT7623_PIN_63_WB_SCLK_FUNC_WB_SCLK (MTK_PIN_NO(63) | 1)
+
+#define MT7623_PIN_64_WB_SDATA_FUNC_GPIO64 (MTK_PIN_NO(64) | 0)
+#define MT7623_PIN_64_WB_SDATA_FUNC_WB_SDATA (MTK_PIN_NO(64) | 1)
+
+#define MT7623_PIN_65_WB_SEN_FUNC_GPIO65 (MTK_PIN_NO(65) | 0)
+#define MT7623_PIN_65_WB_SEN_FUNC_WB_SEN (MTK_PIN_NO(65) | 1)
+
+#define MT7623_PIN_66_WB_CRTL0_FUNC_GPIO66 (MTK_PIN_NO(66) | 0)
+#define MT7623_PIN_66_WB_CRTL0_FUNC_WB_CRTL0 (MTK_PIN_NO(66) | 1)
+
+#define MT7623_PIN_67_WB_CRTL1_FUNC_GPIO67 (MTK_PIN_NO(67) | 0)
+#define MT7623_PIN_67_WB_CRTL1_FUNC_WB_CRTL1 (MTK_PIN_NO(67) | 1)
+
+#define MT7623_PIN_68_WB_CRTL2_FUNC_GPIO68 (MTK_PIN_NO(68) | 0)
+#define MT7623_PIN_68_WB_CRTL2_FUNC_WB_CRTL2 (MTK_PIN_NO(68) | 1)
+
+#define MT7623_PIN_69_WB_CRTL3_FUNC_GPIO69 (MTK_PIN_NO(69) | 0)
+#define MT7623_PIN_69_WB_CRTL3_FUNC_WB_CRTL3 (MTK_PIN_NO(69) | 1)
+
+#define MT7623_PIN_70_WB_CRTL4_FUNC_GPIO70 (MTK_PIN_NO(70) | 0)
+#define MT7623_PIN_70_WB_CRTL4_FUNC_WB_CRTL4 (MTK_PIN_NO(70) | 1)
+
+#define MT7623_PIN_71_WB_CRTL5_FUNC_GPIO71 (MTK_PIN_NO(71) | 0)
+#define MT7623_PIN_71_WB_CRTL5_FUNC_WB_CRTL5 (MTK_PIN_NO(71) | 1)
+
+#define MT7623_PIN_72_I2S0_DATA_IN_FUNC_GPIO72 (MTK_PIN_NO(72) | 0)
+#define MT7623_PIN_72_I2S0_DATA_IN_FUNC_I2S0_DATA_IN (MTK_PIN_NO(72) | 1)
+#define MT7623_PIN_72_I2S0_DATA_IN_FUNC_PCM_RX (MTK_PIN_NO(72) | 3)
+#define MT7623_PIN_72_I2S0_DATA_IN_FUNC_PWM0 (MTK_PIN_NO(72) | 4)
+#define MT7623_PIN_72_I2S0_DATA_IN_FUNC_DISP_PWM (MTK_PIN_NO(72) | 5)
+#define MT7623_PIN_72_I2S0_DATA_IN_FUNC_AP_I2S_DI (MTK_PIN_NO(72) | 6)
+
+#define MT7623_PIN_73_I2S0_LRCK_FUNC_GPIO73 (MTK_PIN_NO(73) | 0)
+#define MT7623_PIN_73_I2S0_LRCK_FUNC_I2S0_LRCK (MTK_PIN_NO(73) | 1)
+#define MT7623_PIN_73_I2S0_LRCK_FUNC_PCM_SYNC (MTK_PIN_NO(73) | 3)
+#define MT7623_PIN_73_I2S0_LRCK_FUNC_AP_I2S_LRCK (MTK_PIN_NO(73) | 6)
+
+#define MT7623_PIN_74_I2S0_BCK_FUNC_GPIO74 (MTK_PIN_NO(74) | 0)
+#define MT7623_PIN_74_I2S0_BCK_FUNC_I2S0_BCK (MTK_PIN_NO(74) | 1)
+#define MT7623_PIN_74_I2S0_BCK_FUNC_PCM_CLK0 (MTK_PIN_NO(74) | 3)
+#define MT7623_PIN_74_I2S0_BCK_FUNC_AP_I2S_BCK (MTK_PIN_NO(74) | 6)
+
+#define MT7623_PIN_75_SDA0_FUNC_GPIO75 (MTK_PIN_NO(75) | 0)
+#define MT7623_PIN_75_SDA0_FUNC_SDA0 (MTK_PIN_NO(75) | 1)
+
+#define MT7623_PIN_76_SCL0_FUNC_GPIO76 (MTK_PIN_NO(76) | 0)
+#define MT7623_PIN_76_SCL0_FUNC_SCL0 (MTK_PIN_NO(76) | 1)
+
+#define MT7623_PIN_83_LCM_RST_FUNC_GPIO83 (MTK_PIN_NO(83) | 0)
+#define MT7623_PIN_83_LCM_RST_FUNC_LCM_RST (MTK_PIN_NO(83) | 1)
+
+#define MT7623_PIN_84_DSI_TE_FUNC_GPIO84 (MTK_PIN_NO(84) | 0)
+#define MT7623_PIN_84_DSI_TE_FUNC_DSI_TE (MTK_PIN_NO(84) | 1)
+
+#define MT7623_PIN_95_MIPI_TCN_FUNC_GPIO95 (MTK_PIN_NO(95) | 0)
+#define MT7623_PIN_95_MIPI_TCN_FUNC_TCN (MTK_PIN_NO(95) | 1)
+
+#define MT7623_PIN_96_MIPI_TCP_FUNC_GPIO96 (MTK_PIN_NO(96) | 0)
+#define MT7623_PIN_96_MIPI_TCP_FUNC_TCP (MTK_PIN_NO(96) | 1)
+
+#define MT7623_PIN_97_MIPI_TDN1_FUNC_GPIO97 (MTK_PIN_NO(97) | 0)
+#define MT7623_PIN_97_MIPI_TDN1_FUNC_TDN1 (MTK_PIN_NO(97) | 1)
+
+#define MT7623_PIN_98_MIPI_TDP1_FUNC_GPIO98 (MTK_PIN_NO(98) | 0)
+#define MT7623_PIN_98_MIPI_TDP1_FUNC_TDP1 (MTK_PIN_NO(98) | 1)
+
+#define MT7623_PIN_99_MIPI_TDN0_FUNC_GPIO99 (MTK_PIN_NO(99) | 0)
+#define MT7623_PIN_99_MIPI_TDN0_FUNC_TDN0 (MTK_PIN_NO(99) | 1)
+
+#define MT7623_PIN_100_MIPI_TDP0_FUNC_GPIO100 (MTK_PIN_NO(100) | 0)
+#define MT7623_PIN_100_MIPI_TDP0_FUNC_TDP0 (MTK_PIN_NO(100) | 1)
+
+#define MT7623_PIN_105_MSDC1_CMD_FUNC_GPIO105 (MTK_PIN_NO(105) | 0)
+#define MT7623_PIN_105_MSDC1_CMD_FUNC_MSDC1_CMD (MTK_PIN_NO(105) | 1)
+#define MT7623_PIN_105_MSDC1_CMD_FUNC_SDA1 (MTK_PIN_NO(105) | 3)
+#define MT7623_PIN_105_MSDC1_CMD_FUNC_I2SOUT_BCK (MTK_PIN_NO(105) | 6)
+
+#define MT7623_PIN_106_MSDC1_CLK_FUNC_GPIO106 (MTK_PIN_NO(106) | 0)
+#define MT7623_PIN_106_MSDC1_CLK_FUNC_MSDC1_CLK (MTK_PIN_NO(106) | 1)
+#define MT7623_PIN_106_MSDC1_CLK_FUNC_SCL1 (MTK_PIN_NO(106) | 3)
+#define MT7623_PIN_106_MSDC1_CLK_FUNC_I2SOUT_LRCK (MTK_PIN_NO(106) | 6)
+
+#define MT7623_PIN_107_MSDC1_DAT0_FUNC_GPIO107 (MTK_PIN_NO(107) | 0)
+#define MT7623_PIN_107_MSDC1_DAT0_FUNC_MSDC1_DAT0 (MTK_PIN_NO(107) | 1)
+#define MT7623_PIN_107_MSDC1_DAT0_FUNC_UTXD0 (MTK_PIN_NO(107) | 5)
+#define MT7623_PIN_107_MSDC1_DAT0_FUNC_I2SOUT_DATA_OUT (MTK_PIN_NO(107) | 6)
+
+#define MT7623_PIN_108_MSDC1_DAT1_FUNC_GPIO108 (MTK_PIN_NO(108) | 0)
+#define MT7623_PIN_108_MSDC1_DAT1_FUNC_MSDC1_DAT1 (MTK_PIN_NO(108) | 1)
+#define MT7623_PIN_108_MSDC1_DAT1_FUNC_PWM0 (MTK_PIN_NO(108) | 3)
+#define MT7623_PIN_108_MSDC1_DAT1_FUNC_URXD0 (MTK_PIN_NO(108) | 5)
+#define MT7623_PIN_108_MSDC1_DAT1_FUNC_PWM1 (MTK_PIN_NO(108) | 6)
+
+#define MT7623_PIN_109_MSDC1_DAT2_FUNC_GPIO109 (MTK_PIN_NO(109) | 0)
+#define MT7623_PIN_109_MSDC1_DAT2_FUNC_MSDC1_DAT2 (MTK_PIN_NO(109) | 1)
+#define MT7623_PIN_109_MSDC1_DAT2_FUNC_SDA2 (MTK_PIN_NO(109) | 3)
+#define MT7623_PIN_109_MSDC1_DAT2_FUNC_UTXD1 (MTK_PIN_NO(109) | 5)
+#define MT7623_PIN_109_MSDC1_DAT2_FUNC_PWM2 (MTK_PIN_NO(109) | 6)
+
+#define MT7623_PIN_110_MSDC1_DAT3_FUNC_GPIO110 (MTK_PIN_NO(110) | 0)
+#define MT7623_PIN_110_MSDC1_DAT3_FUNC_MSDC1_DAT3 (MTK_PIN_NO(110) | 1)
+#define MT7623_PIN_110_MSDC1_DAT3_FUNC_SCL2 (MTK_PIN_NO(110) | 3)
+#define MT7623_PIN_110_MSDC1_DAT3_FUNC_URXD1 (MTK_PIN_NO(110) | 5)
+#define MT7623_PIN_110_MSDC1_DAT3_FUNC_PWM3 (MTK_PIN_NO(110) | 6)
+
+#define MT7623_PIN_111_MSDC0_DAT7_FUNC_GPIO111 (MTK_PIN_NO(111) | 0)
+#define MT7623_PIN_111_MSDC0_DAT7_FUNC_MSDC0_DAT7 (MTK_PIN_NO(111) | 1)
+#define MT7623_PIN_111_MSDC0_DAT7_FUNC_NLD7 (MTK_PIN_NO(111) | 4)
+
+#define MT7623_PIN_112_MSDC0_DAT6_FUNC_GPIO112 (MTK_PIN_NO(112) | 0)
+#define MT7623_PIN_112_MSDC0_DAT6_FUNC_MSDC0_DAT6 (MTK_PIN_NO(112) | 1)
+#define MT7623_PIN_112_MSDC0_DAT6_FUNC_NLD6 (MTK_PIN_NO(112) | 4)
+
+#define MT7623_PIN_113_MSDC0_DAT5_FUNC_GPIO113 (MTK_PIN_NO(113) | 0)
+#define MT7623_PIN_113_MSDC0_DAT5_FUNC_MSDC0_DAT5 (MTK_PIN_NO(113) | 1)
+#define MT7623_PIN_113_MSDC0_DAT5_FUNC_NLD5 (MTK_PIN_NO(113) | 4)
+
+#define MT7623_PIN_114_MSDC0_DAT4_FUNC_GPIO114 (MTK_PIN_NO(114) | 0)
+#define MT7623_PIN_114_MSDC0_DAT4_FUNC_MSDC0_DAT4 (MTK_PIN_NO(114) | 1)
+#define MT7623_PIN_114_MSDC0_DAT4_FUNC_NLD4 (MTK_PIN_NO(114) | 4)
+
+#define MT7623_PIN_115_MSDC0_RSTB_FUNC_GPIO115 (MTK_PIN_NO(115) | 0)
+#define MT7623_PIN_115_MSDC0_RSTB_FUNC_MSDC0_RSTB (MTK_PIN_NO(115) | 1)
+#define MT7623_PIN_115_MSDC0_RSTB_FUNC_NLD8 (MTK_PIN_NO(115) | 4)
+
+#define MT7623_PIN_116_MSDC0_CMD_FUNC_GPIO116 (MTK_PIN_NO(116) | 0)
+#define MT7623_PIN_116_MSDC0_CMD_FUNC_MSDC0_CMD (MTK_PIN_NO(116) | 1)
+#define MT7623_PIN_116_MSDC0_CMD_FUNC_NALE (MTK_PIN_NO(116) | 4)
+
+#define MT7623_PIN_117_MSDC0_CLK_FUNC_GPIO117 (MTK_PIN_NO(117) | 0)
+#define MT7623_PIN_117_MSDC0_CLK_FUNC_MSDC0_CLK (MTK_PIN_NO(117) | 1)
+#define MT7623_PIN_117_MSDC0_CLK_FUNC_NWEB (MTK_PIN_NO(117) | 4)
+
+#define MT7623_PIN_118_MSDC0_DAT3_FUNC_GPIO118 (MTK_PIN_NO(118) | 0)
+#define MT7623_PIN_118_MSDC0_DAT3_FUNC_MSDC0_DAT3 (MTK_PIN_NO(118) | 1)
+#define MT7623_PIN_118_MSDC0_DAT3_FUNC_NLD3 (MTK_PIN_NO(118) | 4)
+
+#define MT7623_PIN_119_MSDC0_DAT2_FUNC_GPIO119 (MTK_PIN_NO(119) | 0)
+#define MT7623_PIN_119_MSDC0_DAT2_FUNC_MSDC0_DAT2 (MTK_PIN_NO(119) | 1)
+#define MT7623_PIN_119_MSDC0_DAT2_FUNC_NLD2 (MTK_PIN_NO(119) | 4)
+
+#define MT7623_PIN_120_MSDC0_DAT1_FUNC_GPIO120 (MTK_PIN_NO(120) | 0)
+#define MT7623_PIN_120_MSDC0_DAT1_FUNC_MSDC0_DAT1 (MTK_PIN_NO(120) | 1)
+#define MT7623_PIN_120_MSDC0_DAT1_FUNC_NLD1 (MTK_PIN_NO(120) | 4)
+
+#define MT7623_PIN_121_MSDC0_DAT0_FUNC_GPIO121 (MTK_PIN_NO(121) | 0)
+#define MT7623_PIN_121_MSDC0_DAT0_FUNC_MSDC0_DAT0 (MTK_PIN_NO(121) | 1)
+#define MT7623_PIN_121_MSDC0_DAT0_FUNC_NLD0 (MTK_PIN_NO(121) | 4)
+#define MT7623_PIN_121_MSDC0_DAT0_FUNC_WATCHDOG (MTK_PIN_NO(121) | 5)
+
+#define MT7623_PIN_122_GPIO122_FUNC_GPIO122 (MTK_PIN_NO(122) | 0)
+#define MT7623_PIN_122_GPIO122_FUNC_TEST (MTK_PIN_NO(122) | 1)
+#define MT7623_PIN_122_GPIO122_FUNC_SDA2 (MTK_PIN_NO(122) | 4)
+#define MT7623_PIN_122_GPIO122_FUNC_URXD0 (MTK_PIN_NO(122) | 5)
+
+#define MT7623_PIN_123_GPIO123_FUNC_GPIO123 (MTK_PIN_NO(123) | 0)
+#define MT7623_PIN_123_GPIO123_FUNC_TEST (MTK_PIN_NO(123) | 1)
+#define MT7623_PIN_123_GPIO123_FUNC_SCL2 (MTK_PIN_NO(123) | 4)
+#define MT7623_PIN_123_GPIO123_FUNC_UTXD0 (MTK_PIN_NO(123) | 5)
+
+#define MT7623_PIN_124_GPIO124_FUNC_GPIO124 (MTK_PIN_NO(124) | 0)
+#define MT7623_PIN_124_GPIO124_FUNC_TEST (MTK_PIN_NO(124) | 1)
+#define MT7623_PIN_124_GPIO124_FUNC_SDA1 (MTK_PIN_NO(124) | 4)
+#define MT7623_PIN_124_GPIO124_FUNC_PWM3 (MTK_PIN_NO(124) | 5)
+
+#define MT7623_PIN_125_GPIO125_FUNC_GPIO125 (MTK_PIN_NO(125) | 0)
+#define MT7623_PIN_125_GPIO125_FUNC_TEST (MTK_PIN_NO(125) | 1)
+#define MT7623_PIN_125_GPIO125_FUNC_SCL1 (MTK_PIN_NO(125) | 4)
+#define MT7623_PIN_125_GPIO125_FUNC_PWM4 (MTK_PIN_NO(125) | 5)
+
+#define MT7623_PIN_126_I2S0_MCLK_FUNC_GPIO126 (MTK_PIN_NO(126) | 0)
+#define MT7623_PIN_126_I2S0_MCLK_FUNC_I2S0_MCLK (MTK_PIN_NO(126) | 1)
+#define MT7623_PIN_126_I2S0_MCLK_FUNC_AP_I2S_MCLK (MTK_PIN_NO(126) | 6)
+
+#define MT7623_PIN_199_SPI1_CK_FUNC_GPIO199 (MTK_PIN_NO(199) | 0)
+#define MT7623_PIN_199_SPI1_CK_FUNC_SPI1_CK (MTK_PIN_NO(199) | 1)
+
+#define MT7623_PIN_200_URXD2_FUNC_GPIO200 (MTK_PIN_NO(200) | 0)
+#define MT7623_PIN_200_URXD2_FUNC_URXD2 (MTK_PIN_NO(200) | 6)
+
+#define MT7623_PIN_201_UTXD2_FUNC_GPIO201 (MTK_PIN_NO(201) | 0)
+#define MT7623_PIN_201_UTXD2_FUNC_UTXD2 (MTK_PIN_NO(201) | 6)
+
+#define MT7623_PIN_203_PWM0_FUNC_GPIO203 (MTK_PIN_NO(203) | 0)
+#define MT7623_PIN_203_PWM0_FUNC_PWM0 (MTK_PIN_NO(203) | 1)
+#define MT7623_PIN_203_PWM0_FUNC_DISP_PWM (MTK_PIN_NO(203) | 2)
+
+#define MT7623_PIN_204_PWM1_FUNC_GPIO204 (MTK_PIN_NO(204) | 0)
+#define MT7623_PIN_204_PWM1_FUNC_PWM1 (MTK_PIN_NO(204) | 1)
+
+#define MT7623_PIN_205_PWM2_FUNC_GPIO205 (MTK_PIN_NO(205) | 0)
+#define MT7623_PIN_205_PWM2_FUNC_PWM2 (MTK_PIN_NO(205) | 1)
+
+#define MT7623_PIN_206_PWM3_FUNC_GPIO206 (MTK_PIN_NO(206) | 0)
+#define MT7623_PIN_206_PWM3_FUNC_PWM3 (MTK_PIN_NO(206) | 1)
+
+#define MT7623_PIN_207_PWM4_FUNC_GPIO207 (MTK_PIN_NO(207) | 0)
+#define MT7623_PIN_207_PWM4_FUNC_PWM4 (MTK_PIN_NO(207) | 1)
+
+#define MT7623_PIN_208_AUD_EXT_CK1_FUNC_GPIO208 (MTK_PIN_NO(208) | 0)
+#define MT7623_PIN_208_AUD_EXT_CK1_FUNC_AUD_EXT_CK1 (MTK_PIN_NO(208) | 1)
+#define MT7623_PIN_208_AUD_EXT_CK1_FUNC_PWM0 (MTK_PIN_NO(208) | 2)
+#define MT7623_PIN_208_AUD_EXT_CK1_FUNC_PCIE0_PERST_N (MTK_PIN_NO(208) | 3)
+#define MT7623_PIN_208_AUD_EXT_CK1_FUNC_DISP_PWM (MTK_PIN_NO(208) | 5)
+
+#define MT7623_PIN_209_AUD_EXT_CK2_FUNC_GPIO209 (MTK_PIN_NO(209) | 0)
+#define MT7623_PIN_209_AUD_EXT_CK2_FUNC_AUD_EXT_CK2 (MTK_PIN_NO(209) | 1)
+#define MT7623_PIN_209_AUD_EXT_CK2_FUNC_MSDC1_WP (MTK_PIN_NO(209) | 2)
+#define MT7623_PIN_209_AUD_EXT_CK2_FUNC_PCIE1_PERST_N (MTK_PIN_NO(209) | 3)
+#define MT7623_PIN_209_AUD_EXT_CK2_FUNC_PWM1 (MTK_PIN_NO(209) | 5)
+
+#define MT7623_PIN_236_EXT_SDIO3_FUNC_GPIO236 (MTK_PIN_NO(236) | 0)
+#define MT7623_PIN_236_EXT_SDIO3_FUNC_EXT_SDIO3 (MTK_PIN_NO(236) | 1)
+#define MT7623_PIN_236_EXT_SDIO3_FUNC_IDDIG (MTK_PIN_NO(236) | 2)
+
+#define MT7623_PIN_237_EXT_SDIO2_FUNC_GPIO237 (MTK_PIN_NO(237) | 0)
+#define MT7623_PIN_237_EXT_SDIO2_FUNC_EXT_SDIO2 (MTK_PIN_NO(237) | 1)
+#define MT7623_PIN_237_EXT_SDIO2_FUNC_DRV_VBUS (MTK_PIN_NO(237) | 2)
+
+#define MT7623_PIN_238_EXT_SDIO1_FUNC_GPIO238 (MTK_PIN_NO(238) | 0)
+#define MT7623_PIN_238_EXT_SDIO1_FUNC_EXT_SDIO1 (MTK_PIN_NO(238) | 1)
+
+#define MT7623_PIN_239_EXT_SDIO0_FUNC_GPIO239 (MTK_PIN_NO(239) | 0)
+#define MT7623_PIN_239_EXT_SDIO0_FUNC_EXT_SDIO0 (MTK_PIN_NO(239) | 1)
+
+#define MT7623_PIN_240_EXT_XCS_FUNC_GPIO240 (MTK_PIN_NO(240) | 0)
+#define MT7623_PIN_240_EXT_XCS_FUNC_EXT_XCS (MTK_PIN_NO(240) | 1)
+
+#define MT7623_PIN_241_EXT_SCK_FUNC_GPIO241 (MTK_PIN_NO(241) | 0)
+#define MT7623_PIN_241_EXT_SCK_FUNC_EXT_SCK (MTK_PIN_NO(241) | 1)
+
+#define MT7623_PIN_242_URTS2_FUNC_GPIO242 (MTK_PIN_NO(242) | 0)
+#define MT7623_PIN_242_URTS2_FUNC_URTS2 (MTK_PIN_NO(242) | 1)
+#define MT7623_PIN_242_URTS2_FUNC_UTXD3 (MTK_PIN_NO(242) | 2)
+#define MT7623_PIN_242_URTS2_FUNC_URXD3 (MTK_PIN_NO(242) | 3)
+#define MT7623_PIN_242_URTS2_FUNC_SCL1 (MTK_PIN_NO(242) | 4)
+
+#define MT7623_PIN_243_UCTS2_FUNC_GPIO243 (MTK_PIN_NO(243) | 0)
+#define MT7623_PIN_243_UCTS2_FUNC_UCTS2 (MTK_PIN_NO(243) | 1)
+#define MT7623_PIN_243_UCTS2_FUNC_URXD3 (MTK_PIN_NO(243) | 2)
+#define MT7623_PIN_243_UCTS2_FUNC_UTXD3 (MTK_PIN_NO(243) | 3)
+#define MT7623_PIN_243_UCTS2_FUNC_SDA1 (MTK_PIN_NO(243) | 4)
+
+#define MT7623_PIN_250_GPIO250_FUNC_GPIO250 (MTK_PIN_NO(250) | 0)
+#define MT7623_PIN_250_GPIO250_FUNC_TEST_MD7 (MTK_PIN_NO(250) | 1)
+#define MT7623_PIN_250_GPIO250_FUNC_PCIE0_CLKREQ_N (MTK_PIN_NO(250) | 6)
+
+#define MT7623_PIN_251_GPIO251_FUNC_GPIO251 (MTK_PIN_NO(251) | 0)
+#define MT7623_PIN_251_GPIO251_FUNC_TEST_MD6 (MTK_PIN_NO(251) | 1)
+#define MT7623_PIN_251_GPIO251_FUNC_PCIE0_WAKE_N (MTK_PIN_NO(251) | 6)
+
+#define MT7623_PIN_252_GPIO252_FUNC_GPIO252 (MTK_PIN_NO(252) | 0)
+#define MT7623_PIN_252_GPIO252_FUNC_TEST_MD5 (MTK_PIN_NO(252) | 1)
+#define MT7623_PIN_252_GPIO252_FUNC_PCIE1_CLKREQ_N (MTK_PIN_NO(252) | 6)
+
+#define MT7623_PIN_253_GPIO253_FUNC_GPIO253 (MTK_PIN_NO(253) | 0)
+#define MT7623_PIN_253_GPIO253_FUNC_TEST_MD4 (MTK_PIN_NO(253) | 1)
+#define MT7623_PIN_253_GPIO253_FUNC_PCIE1_WAKE_N (MTK_PIN_NO(253) | 6)
+
+#define MT7623_PIN_254_GPIO254_FUNC_GPIO254 (MTK_PIN_NO(254) | 0)
+#define MT7623_PIN_254_GPIO254_FUNC_TEST_MD3 (MTK_PIN_NO(254) | 1)
+#define MT7623_PIN_254_GPIO254_FUNC_PCIE2_CLKREQ_N (MTK_PIN_NO(254) | 6)
+
+#define MT7623_PIN_255_GPIO255_FUNC_GPIO255 (MTK_PIN_NO(255) | 0)
+#define MT7623_PIN_255_GPIO255_FUNC_TEST_MD2 (MTK_PIN_NO(255) | 1)
+#define MT7623_PIN_255_GPIO255_FUNC_PCIE2_WAKE_N (MTK_PIN_NO(255) | 6)
+
+#define MT7623_PIN_256_GPIO256_FUNC_GPIO256 (MTK_PIN_NO(256) | 0)
+#define MT7623_PIN_256_GPIO256_FUNC_TEST_MD1 (MTK_PIN_NO(256) | 1)
+
+#define MT7623_PIN_257_GPIO257_FUNC_GPIO257 (MTK_PIN_NO(257) | 0)
+#define MT7623_PIN_257_GPIO257_FUNC_TEST_MD0 (MTK_PIN_NO(257) | 1)
+
+#define MT7623_PIN_261_MSDC1_INS_FUNC_GPIO261 (MTK_PIN_NO(261) | 0)
+#define MT7623_PIN_261_MSDC1_INS_FUNC_MSDC1_INS (MTK_PIN_NO(261) | 1)
+
+#define MT7623_PIN_262_G2_TXEN_FUNC_GPIO262 (MTK_PIN_NO(262) | 0)
+#define MT7623_PIN_262_G2_TXEN_FUNC_G2_TXEN (MTK_PIN_NO(262) | 1)
+
+#define MT7623_PIN_263_G2_TXD3_FUNC_GPIO263 (MTK_PIN_NO(263) | 0)
+#define MT7623_PIN_263_G2_TXD3_FUNC_G2_TXD3 (MTK_PIN_NO(263) | 1)
+
+#define MT7623_PIN_264_G2_TXD2_FUNC_GPIO264 (MTK_PIN_NO(264) | 0)
+#define MT7623_PIN_264_G2_TXD2_FUNC_G2_TXD2 (MTK_PIN_NO(264) | 1)
+
+#define MT7623_PIN_265_G2_TXD1_FUNC_GPIO265 (MTK_PIN_NO(265) | 0)
+#define MT7623_PIN_265_G2_TXD1_FUNC_G2_TXD1 (MTK_PIN_NO(265) | 1)
+
+#define MT7623_PIN_266_G2_TXD0_FUNC_GPIO266 (MTK_PIN_NO(266) | 0)
+#define MT7623_PIN_266_G2_TXD0_FUNC_G2_TXD0 (MTK_PIN_NO(266) | 1)
+
+#define MT7623_PIN_267_G2_TXCLK_FUNC_GPIO267 (MTK_PIN_NO(267) | 0)
+#define MT7623_PIN_267_G2_TXCLK_FUNC_G2_TXC (MTK_PIN_NO(267) | 1)
+
+#define MT7623_PIN_268_G2_RXCLK_FUNC_GPIO268 (MTK_PIN_NO(268) | 0)
+#define MT7623_PIN_268_G2_RXCLK_FUNC_G2_RXC (MTK_PIN_NO(268) | 1)
+
+#define MT7623_PIN_269_G2_RXD0_FUNC_GPIO269 (MTK_PIN_NO(269) | 0)
+#define MT7623_PIN_269_G2_RXD0_FUNC_G2_RXD0 (MTK_PIN_NO(269) | 1)
+
+#define MT7623_PIN_270_G2_RXD1_FUNC_GPIO270 (MTK_PIN_NO(270) | 0)
+#define MT7623_PIN_270_G2_RXD1_FUNC_G2_RXD1 (MTK_PIN_NO(270) | 1)
+
+#define MT7623_PIN_271_G2_RXD2_FUNC_GPIO271 (MTK_PIN_NO(271) | 0)
+#define MT7623_PIN_271_G2_RXD2_FUNC_G2_RXD2 (MTK_PIN_NO(271) | 1)
+
+#define MT7623_PIN_272_G2_RXD3_FUNC_GPIO272 (MTK_PIN_NO(272) | 0)
+#define MT7623_PIN_272_G2_RXD3_FUNC_G2_RXD3 (MTK_PIN_NO(272) | 1)
+
+#define MT7623_PIN_274_G2_RXDV_FUNC_GPIO274 (MTK_PIN_NO(274) | 0)
+#define MT7623_PIN_274_G2_RXDV_FUNC_G2_RXDV (MTK_PIN_NO(274) | 1)
+
+#define MT7623_PIN_275_G2_MDC_FUNC_GPIO275 (MTK_PIN_NO(275) | 0)
+#define MT7623_PIN_275_G2_MDC_FUNC_MDC (MTK_PIN_NO(275) | 1)
+
+#define MT7623_PIN_276_G2_MDIO_FUNC_GPIO276 (MTK_PIN_NO(276) | 0)
+#define MT7623_PIN_276_G2_MDIO_FUNC_MDIO (MTK_PIN_NO(276) | 1)
+
+#define MT7623_PIN_278_JTAG_RESET_FUNC_GPIO278 (MTK_PIN_NO(278) | 0)
+#define MT7623_PIN_278_JTAG_RESET_FUNC_JTAG_RESET (MTK_PIN_NO(278) | 1)
+
+#endif /* __DTS_MT7623_PINFUNC_H */
diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h
index 1800227..b651aed 100644
--- a/include/kvm/arm_arch_timer.h
+++ b/include/kvm/arm_arch_timer.h
@@ -55,6 +55,9 @@
 
 	/* VGIC mapping */
 	struct irq_phys_map		*map;
+
+	/* Active IRQ state caching */
+	bool				active_cleared_last;
 };
 
 int kvm_timer_hyp_init(void);
@@ -74,4 +77,6 @@
 void kvm_timer_schedule(struct kvm_vcpu *vcpu);
 void kvm_timer_unschedule(struct kvm_vcpu *vcpu);
 
+void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu);
+
 #endif
diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
new file mode 100644
index 0000000..fe389ac
--- /dev/null
+++ b/include/kvm/arm_pmu.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright (C) 2015 Linaro Ltd.
+ * Author: Shannon Zhao <shannon.zhao@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ASM_ARM_KVM_PMU_H
+#define __ASM_ARM_KVM_PMU_H
+
+#ifdef CONFIG_KVM_ARM_PMU
+
+#include <linux/perf_event.h>
+#include <asm/perf_event.h>
+
+#define ARMV8_PMU_CYCLE_IDX		(ARMV8_PMU_MAX_COUNTERS - 1)
+
+struct kvm_pmc {
+	u8 idx;	/* index into the pmu->pmc array */
+	struct perf_event *perf_event;
+	u64 bitmask;
+};
+
+struct kvm_pmu {
+	int irq_num;
+	struct kvm_pmc pmc[ARMV8_PMU_MAX_COUNTERS];
+	bool ready;
+	bool irq_level;
+};
+
+#define kvm_arm_pmu_v3_ready(v)		((v)->arch.pmu.ready)
+#define kvm_arm_pmu_irq_initialized(v)	((v)->arch.pmu.irq_num >= VGIC_NR_SGIS)
+u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx);
+void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val);
+u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu);
+void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu);
+void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu);
+void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val);
+void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val);
+void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val);
+void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu);
+void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu);
+void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val);
+void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val);
+void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
+				    u64 select_idx);
+bool kvm_arm_support_pmu_v3(void);
+int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu,
+			    struct kvm_device_attr *attr);
+int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu,
+			    struct kvm_device_attr *attr);
+int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu,
+			    struct kvm_device_attr *attr);
+#else
+struct kvm_pmu {
+};
+
+#define kvm_arm_pmu_v3_ready(v)		(false)
+#define kvm_arm_pmu_irq_initialized(v)	(false)
+static inline u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu,
+					    u64 select_idx)
+{
+	return 0;
+}
+static inline void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu,
+					     u64 select_idx, u64 val) {}
+static inline u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
+{
+	return 0;
+}
+static inline void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu) {}
+static inline void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) {}
+static inline void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val) {}
+static inline void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val) {}
+static inline void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val) {}
+static inline void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu) {}
+static inline void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu) {}
+static inline void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val) {}
+static inline void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) {}
+static inline void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu,
+						  u64 data, u64 select_idx) {}
+static inline bool kvm_arm_support_pmu_v3(void) { return false; }
+static inline int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu,
+					  struct kvm_device_attr *attr)
+{
+	return -ENXIO;
+}
+static inline int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu,
+					  struct kvm_device_attr *attr)
+{
+	return -ENXIO;
+}
+static inline int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu,
+					  struct kvm_device_attr *attr)
+{
+	return -ENXIO;
+}
+#endif
+
+#endif
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index 13a3d53..281caf8 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -279,12 +279,6 @@
 	u32		vgic_lr[VGIC_V2_MAX_LRS];
 };
 
-/*
- * LRs are stored in reverse order in memory. make sure we index them
- * correctly.
- */
-#define VGIC_V3_LR_INDEX(lr)		(VGIC_V3_MAX_LRS - 1 - lr)
-
 struct vgic_v3_cpu_if {
 #ifdef CONFIG_KVM_ARM_VGIC_V3
 	u32		vgic_hcr;
@@ -321,6 +315,8 @@
 
 	/* Protected by the distributor's irq_phys_map_lock */
 	struct list_head	irq_phys_map_list;
+
+	u64		live_lrs;
 };
 
 #define LR_EMPTY	0xff
diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h
index bdcf358..0d442e3 100644
--- a/include/linux/clockchips.h
+++ b/include/linux/clockchips.h
@@ -190,9 +190,9 @@
 extern int clockevents_update_freq(struct clock_event_device *ce, u32 freq);
 
 static inline void
-clockevents_calc_mult_shift(struct clock_event_device *ce, u32 freq, u32 minsec)
+clockevents_calc_mult_shift(struct clock_event_device *ce, u32 freq, u32 maxsec)
 {
-	return clocks_calc_mult_shift(&ce->mult, &ce->shift, NSEC_PER_SEC, freq, minsec);
+	return clocks_calc_mult_shift(&ce->mult, &ce->shift, NSEC_PER_SEC, freq, maxsec);
 }
 
 extern void clockevents_suspend(void);
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index 6013021..a307bf6 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -118,6 +118,23 @@
 /* simplify initialization of mask field */
 #define CLOCKSOURCE_MASK(bits) (cycle_t)((bits) < 64 ? ((1ULL<<(bits))-1) : -1)
 
+static inline u32 clocksource_freq2mult(u32 freq, u32 shift_constant, u64 from)
+{
+	/*  freq = cyc/from
+	 *  mult/2^shift  = ns/cyc
+	 *  mult = ns/cyc * 2^shift
+	 *  mult = from/freq * 2^shift
+	 *  mult = from * 2^shift / freq
+	 *  mult = (from<<shift) / freq
+	 */
+	u64 tmp = ((u64)from) << shift_constant;
+
+	tmp += freq/2; /* round for do_div */
+	do_div(tmp, freq);
+
+	return (u32)tmp;
+}
+
 /**
  * clocksource_khz2mult - calculates mult from khz and shift
  * @khz:		Clocksource frequency in KHz
@@ -128,19 +145,7 @@
  */
 static inline u32 clocksource_khz2mult(u32 khz, u32 shift_constant)
 {
-	/*  khz = cyc/(Million ns)
-	 *  mult/2^shift  = ns/cyc
-	 *  mult = ns/cyc * 2^shift
-	 *  mult = 1Million/khz * 2^shift
-	 *  mult = 1000000 * 2^shift / khz
-	 *  mult = (1000000<<shift) / khz
-	 */
-	u64 tmp = ((u64)1000000) << shift_constant;
-
-	tmp += khz/2; /* round for do_div */
-	do_div(tmp, khz);
-
-	return (u32)tmp;
+	return clocksource_freq2mult(khz, shift_constant, NSEC_PER_MSEC);
 }
 
 /**
@@ -154,19 +159,7 @@
  */
 static inline u32 clocksource_hz2mult(u32 hz, u32 shift_constant)
 {
-	/*  hz = cyc/(Billion ns)
-	 *  mult/2^shift  = ns/cyc
-	 *  mult = ns/cyc * 2^shift
-	 *  mult = 1Billion/hz * 2^shift
-	 *  mult = 1000000000 * 2^shift / hz
-	 *  mult = (1000000000<<shift) / hz
-	 */
-	u64 tmp = ((u64)1000000000) << shift_constant;
-
-	tmp += hz/2; /* round for do_div */
-	do_div(tmp, hz);
-
-	return (u32)tmp;
+	return clocksource_freq2mult(hz, shift_constant, NSEC_PER_SEC);
 }
 
 /**
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index a27f4f1..b5ff988 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -20,12 +20,14 @@
 # define __pmem		__attribute__((noderef, address_space(5)))
 #ifdef CONFIG_SPARSE_RCU_POINTER
 # define __rcu		__attribute__((noderef, address_space(4)))
-#else
+#else /* CONFIG_SPARSE_RCU_POINTER */
 # define __rcu
-#endif
+#endif /* CONFIG_SPARSE_RCU_POINTER */
+# define __private	__attribute__((noderef))
 extern void __chk_user_ptr(const volatile void __user *);
 extern void __chk_io_ptr(const volatile void __iomem *);
-#else
+# define ACCESS_PRIVATE(p, member) (*((typeof((p)->member) __force *) &(p)->member))
+#else /* __CHECKER__ */
 # define __user
 # define __kernel
 # define __safe
@@ -44,7 +46,9 @@
 # define __percpu
 # define __rcu
 # define __pmem
-#endif
+# define __private
+# define ACCESS_PRIVATE(p, member) ((p)->member)
+#endif /* __CHECKER__ */
 
 /* Indirect macros required for expanded argument pasting, eg. __LINE__. */
 #define ___PASTE(a,b) a##b
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index d2ca8c3..f9b1fab 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -16,6 +16,7 @@
 #include <linux/node.h>
 #include <linux/compiler.h>
 #include <linux/cpumask.h>
+#include <linux/cpuhotplug.h>
 
 struct device;
 struct device_node;
@@ -27,6 +28,9 @@
 	struct device dev;
 };
 
+extern void boot_cpu_init(void);
+extern void boot_cpu_state_init(void);
+
 extern int register_cpu(struct cpu *cpu, int num);
 extern struct device *get_cpu_device(unsigned cpu);
 extern bool cpu_is_hotpluggable(unsigned cpu);
@@ -74,7 +78,7 @@
 	/* migration should happen before other stuff but after perf */
 	CPU_PRI_PERF		= 20,
 	CPU_PRI_MIGRATION	= 10,
-	CPU_PRI_SMPBOOT		= 9,
+
 	/* bring up workqueues before normal notifiers and down after */
 	CPU_PRI_WORKQUEUE_UP	= 5,
 	CPU_PRI_WORKQUEUE_DOWN	= -5,
@@ -97,9 +101,7 @@
 					* Called on the new cpu, just before
 					* enabling interrupts. Must not sleep,
 					* must not fail */
-#define CPU_DYING_IDLE		0x000B /* CPU (unsigned)v dying, reached
-					* idle loop. */
-#define CPU_BROKEN		0x000C /* CPU (unsigned)v did not die properly,
+#define CPU_BROKEN		0x000B /* CPU (unsigned)v did not die properly,
 					* perhaps due to preemption. */
 
 /* Used for CPU hotplug events occurring while tasks are frozen due to a suspend
@@ -118,6 +120,7 @@
 
 
 #ifdef CONFIG_SMP
+extern bool cpuhp_tasks_frozen;
 /* Need to know about CPUs going up/down? */
 #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE)
 #define cpu_notifier(fn, pri) {					\
@@ -167,7 +170,6 @@
 }
 #endif
 
-void smpboot_thread_init(void);
 int cpu_up(unsigned int cpu);
 void notify_cpu_starting(unsigned int cpu);
 extern void cpu_maps_update_begin(void);
@@ -177,6 +179,7 @@
 #define cpu_notifier_register_done	cpu_maps_update_done
 
 #else	/* CONFIG_SMP */
+#define cpuhp_tasks_frozen	0
 
 #define cpu_notifier(fn, pri)	do { (void)(fn); } while (0)
 #define __cpu_notifier(fn, pri)	do { (void)(fn); } while (0)
@@ -215,10 +218,6 @@
 {
 }
 
-static inline void smpboot_thread_init(void)
-{
-}
-
 #endif /* CONFIG_SMP */
 extern struct bus_type cpu_subsys;
 
@@ -265,11 +264,6 @@
 static inline void enable_nonboot_cpus(void) {}
 #endif /* !CONFIG_PM_SLEEP_SMP */
 
-enum cpuhp_state {
-	CPUHP_OFFLINE,
-	CPUHP_ONLINE,
-};
-
 void cpu_startup_entry(enum cpuhp_state state);
 
 void cpu_idle_poll_ctrl(bool enable);
@@ -280,14 +274,15 @@
 void arch_cpu_idle_exit(void);
 void arch_cpu_idle_dead(void);
 
-DECLARE_PER_CPU(bool, cpu_dead_idle);
-
 int cpu_report_state(int cpu);
 int cpu_check_up_prepare(int cpu);
 void cpu_set_state_online(int cpu);
 #ifdef CONFIG_HOTPLUG_CPU
 bool cpu_wait_death(unsigned int cpu, int seconds);
 bool cpu_report_death(void);
+void cpuhp_report_idle_dead(void);
+#else
+static inline void cpuhp_report_idle_dead(void) { }
 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
 
 #endif /* _LINUX_CPU_H_ */
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
new file mode 100644
index 0000000..5d68e15
--- /dev/null
+++ b/include/linux/cpuhotplug.h
@@ -0,0 +1,93 @@
+#ifndef __CPUHOTPLUG_H
+#define __CPUHOTPLUG_H
+
+enum cpuhp_state {
+	CPUHP_OFFLINE,
+	CPUHP_CREATE_THREADS,
+	CPUHP_NOTIFY_PREPARE,
+	CPUHP_BRINGUP_CPU,
+	CPUHP_AP_IDLE_DEAD,
+	CPUHP_AP_OFFLINE,
+	CPUHP_AP_NOTIFY_STARTING,
+	CPUHP_AP_ONLINE,
+	CPUHP_TEARDOWN_CPU,
+	CPUHP_AP_ONLINE_IDLE,
+	CPUHP_AP_SMPBOOT_THREADS,
+	CPUHP_AP_NOTIFY_ONLINE,
+	CPUHP_AP_ONLINE_DYN,
+	CPUHP_AP_ONLINE_DYN_END		= CPUHP_AP_ONLINE_DYN + 30,
+	CPUHP_ONLINE,
+};
+
+int __cpuhp_setup_state(enum cpuhp_state state,	const char *name, bool invoke,
+			int (*startup)(unsigned int cpu),
+			int (*teardown)(unsigned int cpu));
+
+/**
+ * cpuhp_setup_state - Setup hotplug state callbacks with calling the callbacks
+ * @state:	The state for which the calls are installed
+ * @name:	Name of the callback (will be used in debug output)
+ * @startup:	startup callback function
+ * @teardown:	teardown callback function
+ *
+ * Installs the callback functions and invokes the startup callback on
+ * the present cpus which have already reached the @state.
+ */
+static inline int cpuhp_setup_state(enum cpuhp_state state,
+				    const char *name,
+				    int (*startup)(unsigned int cpu),
+				    int (*teardown)(unsigned int cpu))
+{
+	return __cpuhp_setup_state(state, name, true, startup, teardown);
+}
+
+/**
+ * cpuhp_setup_state_nocalls - Setup hotplug state callbacks without calling the
+ *			       callbacks
+ * @state:	The state for which the calls are installed
+ * @name:	Name of the callback.
+ * @startup:	startup callback function
+ * @teardown:	teardown callback function
+ *
+ * Same as @cpuhp_setup_state except that no calls are executed are invoked
+ * during installation of this callback. NOP if SMP=n or HOTPLUG_CPU=n.
+ */
+static inline int cpuhp_setup_state_nocalls(enum cpuhp_state state,
+					    const char *name,
+					    int (*startup)(unsigned int cpu),
+					    int (*teardown)(unsigned int cpu))
+{
+	return __cpuhp_setup_state(state, name, false, startup, teardown);
+}
+
+void __cpuhp_remove_state(enum cpuhp_state state, bool invoke);
+
+/**
+ * cpuhp_remove_state - Remove hotplug state callbacks and invoke the teardown
+ * @state:	The state for which the calls are removed
+ *
+ * Removes the callback functions and invokes the teardown callback on
+ * the present cpus which have already reached the @state.
+ */
+static inline void cpuhp_remove_state(enum cpuhp_state state)
+{
+	__cpuhp_remove_state(state, true);
+}
+
+/**
+ * cpuhp_remove_state_nocalls - Remove hotplug state callbacks without invoking
+ *				teardown
+ * @state:	The state for which the calls are removed
+ */
+static inline void cpuhp_remove_state_nocalls(enum cpuhp_state state)
+{
+	__cpuhp_remove_state(state, false);
+}
+
+#ifdef CONFIG_SMP
+void cpuhp_online_idle(enum cpuhp_state state);
+#else
+static inline void cpuhp_online_idle(enum cpuhp_state state) { }
+#endif
+
+#endif
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 3c1c967..c4de623 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -133,17 +133,23 @@
  *			Use accessor functions to deal with it
  * @node:		node index useful for balancing
  * @handler_data:	per-IRQ data for the irq_chip methods
- * @affinity:		IRQ affinity on SMP
+ * @affinity:		IRQ affinity on SMP. If this is an IPI
+ *			related irq, then this is the mask of the
+ *			CPUs to which an IPI can be sent.
  * @msi_desc:		MSI descriptor
+ * @ipi_offset:		Offset of first IPI target cpu in @affinity. Optional.
  */
 struct irq_common_data {
-	unsigned int		state_use_accessors;
+	unsigned int		__private state_use_accessors;
 #ifdef CONFIG_NUMA
 	unsigned int		node;
 #endif
 	void			*handler_data;
 	struct msi_desc		*msi_desc;
 	cpumask_var_t		affinity;
+#ifdef CONFIG_GENERIC_IRQ_IPI
+	unsigned int		ipi_offset;
+#endif
 };
 
 /**
@@ -208,7 +214,7 @@
 	IRQD_FORWARDED_TO_VCPU		= (1 << 20),
 };
 
-#define __irqd_to_state(d)		((d)->common->state_use_accessors)
+#define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors)
 
 static inline bool irqd_is_setaffinity_pending(struct irq_data *d)
 {
@@ -299,6 +305,8 @@
 	__irqd_to_state(d) &= ~IRQD_FORWARDED_TO_VCPU;
 }
 
+#undef __irqd_to_state
+
 static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
 {
 	return d->hwirq;
@@ -341,6 +349,8 @@
  * @irq_get_irqchip_state:	return the internal state of an interrupt
  * @irq_set_irqchip_state:	set the internal state of a interrupt
  * @irq_set_vcpu_affinity:	optional to target a vCPU in a virtual machine
+ * @ipi_send_single:	send a single IPI to destination cpus
+ * @ipi_send_mask:	send an IPI to destination cpus in cpumask
  * @flags:		chip specific flags
  */
 struct irq_chip {
@@ -385,6 +395,9 @@
 
 	int		(*irq_set_vcpu_affinity)(struct irq_data *data, void *vcpu_info);
 
+	void		(*ipi_send_single)(struct irq_data *data, unsigned int cpu);
+	void		(*ipi_send_mask)(struct irq_data *data, const struct cpumask *dest);
+
 	unsigned long	flags;
 };
 
@@ -934,4 +947,12 @@
 		return readl(gc->reg_base + reg_offset);
 }
 
+/* Contrary to Linux irqs, for hardware irqs the irq number 0 is valid */
+#define INVALID_HWIRQ	(~0UL)
+irq_hw_number_t ipi_get_hwirq(unsigned int irq, unsigned int cpu);
+int __ipi_send_single(struct irq_desc *desc, unsigned int cpu);
+int __ipi_send_mask(struct irq_desc *desc, const struct cpumask *dest);
+int ipi_send_single(unsigned int virq, unsigned int cpu);
+int ipi_send_mask(unsigned int virq, const struct cpumask *dest);
+
 #endif /* _LINUX_IRQ_H */
diff --git a/include/linux/irqchip/mips-gic.h b/include/linux/irqchip/mips-gic.h
index ce824db..80f89e4 100644
--- a/include/linux/irqchip/mips-gic.h
+++ b/include/linux/irqchip/mips-gic.h
@@ -261,9 +261,6 @@
 extern void gic_write_cpu_compare(cycle_t cnt, int cpu);
 extern void gic_start_count(void);
 extern void gic_stop_count(void);
-extern void gic_send_ipi(unsigned int intr);
-extern unsigned int plat_ipi_call_int_xlate(unsigned int);
-extern unsigned int plat_ipi_resched_int_xlate(unsigned int);
 extern int gic_get_c0_compare_int(void);
 extern int gic_get_c0_perfcount_int(void);
 extern int gic_get_c0_fdc_int(void);
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index 04579d9..ed48594 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -74,6 +74,7 @@
 	DOMAIN_BUS_PCI_MSI,
 	DOMAIN_BUS_PLATFORM_MSI,
 	DOMAIN_BUS_NEXUS,
+	DOMAIN_BUS_IPI,
 };
 
 /**
@@ -172,6 +173,12 @@
 	/* Core calls alloc/free recursive through the domain hierarchy. */
 	IRQ_DOMAIN_FLAG_AUTO_RECURSIVE	= (1 << 1),
 
+	/* Irq domain is an IPI domain with virq per cpu */
+	IRQ_DOMAIN_FLAG_IPI_PER_CPU	= (1 << 2),
+
+	/* Irq domain is an IPI domain with single virq */
+	IRQ_DOMAIN_FLAG_IPI_SINGLE	= (1 << 3),
+
 	/*
 	 * Flags starting from IRQ_DOMAIN_FLAG_NONCORE are reserved
 	 * for implementation specific purposes and ignored by the
@@ -206,6 +213,8 @@
 extern struct irq_domain *irq_find_matching_fwnode(struct fwnode_handle *fwnode,
 						   enum irq_domain_bus_token bus_token);
 extern void irq_set_default_host(struct irq_domain *host);
+extern int irq_domain_alloc_descs(int virq, unsigned int nr_irqs,
+				  irq_hw_number_t hwirq, int node);
 
 static inline struct fwnode_handle *of_node_to_fwnode(struct device_node *node)
 {
@@ -335,6 +344,11 @@
 			const u32 *intspec, unsigned int intsize,
 			irq_hw_number_t *out_hwirq, unsigned int *out_type);
 
+/* IPI functions */
+unsigned int irq_reserve_ipi(struct irq_domain *domain,
+			     const struct cpumask *dest);
+void irq_destroy_ipi(unsigned int irq);
+
 /* V2 interfaces to support hierarchy IRQ domains. */
 extern struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain,
 						unsigned int virq);
@@ -400,6 +414,22 @@
 {
 	return domain->flags & IRQ_DOMAIN_FLAG_HIERARCHY;
 }
+
+static inline bool irq_domain_is_ipi(struct irq_domain *domain)
+{
+	return domain->flags &
+		(IRQ_DOMAIN_FLAG_IPI_PER_CPU | IRQ_DOMAIN_FLAG_IPI_SINGLE);
+}
+
+static inline bool irq_domain_is_ipi_per_cpu(struct irq_domain *domain)
+{
+	return domain->flags & IRQ_DOMAIN_FLAG_IPI_PER_CPU;
+}
+
+static inline bool irq_domain_is_ipi_single(struct irq_domain *domain)
+{
+	return domain->flags & IRQ_DOMAIN_FLAG_IPI_SINGLE;
+}
 #else	/* CONFIG_IRQ_DOMAIN_HIERARCHY */
 static inline void irq_domain_activate_irq(struct irq_data *data) { }
 static inline void irq_domain_deactivate_irq(struct irq_data *data) { }
@@ -413,6 +443,21 @@
 {
 	return false;
 }
+
+static inline bool irq_domain_is_ipi(struct irq_domain *domain)
+{
+	return false;
+}
+
+static inline bool irq_domain_is_ipi_per_cpu(struct irq_domain *domain)
+{
+	return false;
+}
+
+static inline bool irq_domain_is_ipi_single(struct irq_domain *domain)
+{
+	return false;
+}
 #endif	/* CONFIG_IRQ_DOMAIN_HIERARCHY */
 
 #else /* CONFIG_IRQ_DOMAIN */
diff --git a/include/linux/leds.h b/include/linux/leds.h
index bc1476f..f203a8f 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -39,6 +39,7 @@
 
 	/* Lower 16 bits reflect status */
 #define LED_SUSPENDED		(1 << 0)
+#define LED_UNREGISTERING	(1 << 1)
 	/* Upper 16 bits reflect control information */
 #define LED_CORE_SUSPENDRESUME	(1 << 16)
 #define LED_BLINK_ONESHOT	(1 << 17)
@@ -48,9 +49,12 @@
 #define LED_BLINK_DISABLE	(1 << 21)
 #define LED_SYSFS_DISABLE	(1 << 22)
 #define LED_DEV_CAP_FLASH	(1 << 23)
+#define LED_HW_PLUGGABLE	(1 << 24)
 
-	/* Set LED brightness level */
-	/* Must not sleep, use a workqueue if needed */
+	/* Set LED brightness level
+	 * Must not sleep. Use brightness_set_blocking for drivers
+	 * that can sleep while setting brightness.
+	 */
 	void		(*brightness_set)(struct led_classdev *led_cdev,
 					  enum led_brightness brightness);
 	/*
diff --git a/include/linux/mfd/max77686-private.h b/include/linux/mfd/max77686-private.h
index f504349..643dae7 100644
--- a/include/linux/mfd/max77686-private.h
+++ b/include/linux/mfd/max77686-private.h
@@ -437,14 +437,11 @@
 struct max77686_dev {
 	struct device *dev;
 	struct i2c_client *i2c; /* 0xcc / PMIC, Battery Control, and FLASH */
-	struct i2c_client *rtc; /* slave addr 0x0c */
 
 	unsigned long type;
 
 	struct regmap *regmap;		/* regmap for mfd */
-	struct regmap *rtc_regmap;	/* regmap for rtc */
 	struct regmap_irq_chip_data *irq_data;
-	struct regmap_irq_chip_data *rtc_irq_data;
 
 	int irq;
 	struct mutex irqlock;
diff --git a/include/linux/mm.h b/include/linux/mm.h
index a862b4f..dbf1edd 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2129,6 +2129,8 @@
 int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
 int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
 			unsigned long pfn);
+int vm_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
+			unsigned long pfn, pgprot_t pgprot);
 int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
 			pfn_t pfn);
 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 624b78b..944b2b3 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -566,10 +566,26 @@
 }
 #endif
 
-struct vm_special_mapping
-{
-	const char *name;
+struct vm_fault;
+
+struct vm_special_mapping {
+	const char *name;	/* The name, e.g. "[vdso]". */
+
+	/*
+	 * If .fault is not provided, this points to a
+	 * NULL-terminated array of pages that back the special mapping.
+	 *
+	 * This must not be NULL unless .fault is provided.
+	 */
 	struct page **pages;
+
+	/*
+	 * If non-NULL, then this is called to resolve page faults
+	 * on the special mapping.  If used, .pages is not checked.
+	 */
+	int (*fault)(const struct vm_special_mapping *sm,
+		     struct vm_area_struct *vma,
+		     struct vm_fault *vmf);
 };
 
 enum tlb_flush_reason {
diff --git a/include/linux/notifier.h b/include/linux/notifier.h
index d14a4c3..4149868 100644
--- a/include/linux/notifier.h
+++ b/include/linux/notifier.h
@@ -47,6 +47,8 @@
  * runtime initialization.
  */
 
+struct notifier_block;
+
 typedef	int (*notifier_fn_t)(struct notifier_block *nb,
 			unsigned long action, void *data);
 
diff --git a/include/linux/platform_data/ntc_thermistor.h b/include/linux/platform_data/ntc_thermistor.h
index aed1705..698d0d5 100644
--- a/include/linux/platform_data/ntc_thermistor.h
+++ b/include/linux/platform_data/ntc_thermistor.h
@@ -28,6 +28,7 @@
 	TYPE_NCPXXWL333,
 	TYPE_B57330V2103,
 	TYPE_NCPXXWF104,
+	TYPE_NCPXXXH103,
 };
 
 struct ntc_thermistor_platform_data {
diff --git a/include/linux/pps_kernel.h b/include/linux/pps_kernel.h
index 54bf148..35ac903 100644
--- a/include/linux/pps_kernel.h
+++ b/include/linux/pps_kernel.h
@@ -111,22 +111,17 @@
 	kt->nsec = ts.tv_nsec;
 }
 
+static inline void pps_get_ts(struct pps_event_time *ts)
+{
+	struct system_time_snapshot snap;
+
+	ktime_get_snapshot(&snap);
+	ts->ts_real = ktime_to_timespec64(snap.real);
 #ifdef CONFIG_NTP_PPS
-
-static inline void pps_get_ts(struct pps_event_time *ts)
-{
-	ktime_get_raw_and_real_ts64(&ts->ts_raw, &ts->ts_real);
+	ts->ts_raw = ktime_to_timespec64(snap.raw);
+#endif
 }
 
-#else /* CONFIG_NTP_PPS */
-
-static inline void pps_get_ts(struct pps_event_time *ts)
-{
-	ktime_get_real_ts64(&ts->ts_real);
-}
-
-#endif /* CONFIG_NTP_PPS */
-
 /* Subtract known time delay from PPS event time(s) */
 static inline void pps_sub_ts(struct pps_event_time *ts, struct timespec64 delta)
 {
diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h
index b8b7306..6b15e16 100644
--- a/include/linux/ptp_clock_kernel.h
+++ b/include/linux/ptp_clock_kernel.h
@@ -38,6 +38,7 @@
 	};
 };
 
+struct system_device_crosststamp;
 /**
  * struct ptp_clock_info - decribes a PTP hardware clock
  *
@@ -67,6 +68,11 @@
  * @gettime64:  Reads the current time from the hardware clock.
  *              parameter ts: Holds the result.
  *
+ * @getcrosststamp:  Reads the current time from the hardware clock and
+ *                   system clock simultaneously.
+ *                   parameter cts: Contains timestamp (device,system) pair,
+ *                   where system time is realtime and monotonic.
+ *
  * @settime64:  Set the current time on the hardware clock.
  *              parameter ts: Time value to set.
  *
@@ -105,6 +111,8 @@
 	int (*adjfreq)(struct ptp_clock_info *ptp, s32 delta);
 	int (*adjtime)(struct ptp_clock_info *ptp, s64 delta);
 	int (*gettime64)(struct ptp_clock_info *ptp, struct timespec64 *ts);
+	int (*getcrosststamp)(struct ptp_clock_info *ptp,
+			      struct system_device_crosststamp *cts);
 	int (*settime64)(struct ptp_clock_info *p, const struct timespec64 *ts);
 	int (*enable)(struct ptp_clock_info *ptp,
 		      struct ptp_clock_request *request, int on);
diff --git a/include/linux/pxa2xx_ssp.h b/include/linux/pxa2xx_ssp.h
index c2f2574..2a097d1 100644
--- a/include/linux/pxa2xx_ssp.h
+++ b/include/linux/pxa2xx_ssp.h
@@ -197,6 +197,7 @@
 	QUARK_X1000_SSP,
 	LPSS_LPT_SSP, /* Keep LPSS types sorted with lpss_platforms[] */
 	LPSS_BYT_SSP,
+	LPSS_BSW_SSP,
 	LPSS_SPT_SSP,
 	LPSS_BXT_SSP,
 };
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 14e6f47..2657aff 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -332,9 +332,7 @@
 void rcu_sched_qs(void);
 void rcu_bh_qs(void);
 void rcu_check_callbacks(int user);
-struct notifier_block;
-int rcu_cpu_notify(struct notifier_block *self,
-		   unsigned long action, void *hcpu);
+void rcu_report_dead(unsigned int cpu);
 
 #ifndef CONFIG_TINY_RCU
 void rcu_end_inkernel_boot(void);
@@ -360,8 +358,6 @@
 #else
 static inline void rcu_user_enter(void) { }
 static inline void rcu_user_exit(void) { }
-static inline void rcu_user_hooks_switch(struct task_struct *prev,
-					 struct task_struct *next) { }
 #endif /* CONFIG_NO_HZ_FULL */
 
 #ifdef CONFIG_RCU_NOCB_CPU
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index 1839434..3dc08ce 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -65,6 +65,36 @@
 	unsigned int delay_us;
 };
 
+#define	regmap_update_bits(map, reg, mask, val) \
+	regmap_update_bits_base(map, reg, mask, val, NULL, false, false)
+#define	regmap_update_bits_async(map, reg, mask, val)\
+	regmap_update_bits_base(map, reg, mask, val, NULL, true, false)
+#define	regmap_update_bits_check(map, reg, mask, val, change)\
+	regmap_update_bits_base(map, reg, mask, val, change, false, false)
+#define	regmap_update_bits_check_async(map, reg, mask, val, change)\
+	regmap_update_bits_base(map, reg, mask, val, change, true, false)
+
+#define	regmap_write_bits(map, reg, mask, val) \
+	regmap_update_bits_base(map, reg, mask, val, NULL, false, true)
+
+#define	regmap_field_write(field, val) \
+	regmap_field_update_bits_base(field, ~0, val, NULL, false, false)
+#define	regmap_field_force_write(field, val) \
+	regmap_field_update_bits_base(field, ~0, val, NULL, false, true)
+#define	regmap_field_update_bits(field, mask, val)\
+	regmap_field_update_bits_base(field, mask, val, NULL, false, false)
+#define	regmap_field_force_update_bits(field, mask, val) \
+	regmap_field_update_bits_base(field, mask, val, NULL, false, true)
+
+#define	regmap_fields_write(field, id, val) \
+	regmap_fields_update_bits_base(field, id, ~0, val, NULL, false, false)
+#define	regmap_fields_force_write(field, id, val) \
+	regmap_fields_update_bits_base(field, id, ~0, val, NULL, false, true)
+#define	regmap_fields_update_bits(field, id, mask, val)\
+	regmap_fields_update_bits_base(field, id, mask, val, NULL, false, false)
+#define	regmap_fields_force_update_bits(field, id, mask, val) \
+	regmap_fields_update_bits_base(field, id, mask, val, NULL, false, true)
+
 #ifdef CONFIG_REGMAP
 
 enum regmap_endian {
@@ -162,7 +192,7 @@
  *		  This field is a duplicate of a similar file in
  *		  'struct regmap_bus' and serves exact same purpose.
  *		   Use it only for "no-bus" cases.
- * @max_register: Optional, specifies the maximum valid register index.
+ * @max_register: Optional, specifies the maximum valid register address.
  * @wr_table:     Optional, points to a struct regmap_access_table specifying
  *                valid ranges for write access.
  * @rd_table:     As above, for read access.
@@ -691,18 +721,9 @@
 		    void *val, size_t val_len);
 int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
 		     size_t val_count);
-int regmap_update_bits(struct regmap *map, unsigned int reg,
-		       unsigned int mask, unsigned int val);
-int regmap_write_bits(struct regmap *map, unsigned int reg,
-		       unsigned int mask, unsigned int val);
-int regmap_update_bits_async(struct regmap *map, unsigned int reg,
-			     unsigned int mask, unsigned int val);
-int regmap_update_bits_check(struct regmap *map, unsigned int reg,
-			     unsigned int mask, unsigned int val,
-			     bool *change);
-int regmap_update_bits_check_async(struct regmap *map, unsigned int reg,
-				   unsigned int mask, unsigned int val,
-				   bool *change);
+int regmap_update_bits_base(struct regmap *map, unsigned int reg,
+			    unsigned int mask, unsigned int val,
+			    bool *change, bool async, bool force);
 int regmap_get_val_bytes(struct regmap *map);
 int regmap_get_max_register(struct regmap *map);
 int regmap_get_reg_stride(struct regmap *map);
@@ -770,18 +791,14 @@
 void devm_regmap_field_free(struct device *dev,	struct regmap_field *field);
 
 int regmap_field_read(struct regmap_field *field, unsigned int *val);
-int regmap_field_write(struct regmap_field *field, unsigned int val);
-int regmap_field_update_bits(struct regmap_field *field,
-			     unsigned int mask, unsigned int val);
-
-int regmap_fields_write(struct regmap_field *field, unsigned int id,
-			unsigned int val);
-int regmap_fields_force_write(struct regmap_field *field, unsigned int id,
-			unsigned int val);
+int regmap_field_update_bits_base(struct regmap_field *field,
+				  unsigned int mask, unsigned int val,
+				  bool *change, bool async, bool force);
 int regmap_fields_read(struct regmap_field *field, unsigned int id,
 		       unsigned int *val);
-int regmap_fields_update_bits(struct regmap_field *field,  unsigned int id,
-			      unsigned int mask, unsigned int val);
+int regmap_fields_update_bits_base(struct regmap_field *field,  unsigned int id,
+				   unsigned int mask, unsigned int val,
+				   bool *change, bool async, bool force);
 
 /**
  * Description of an IRQ for the generic regmap irq_chip.
@@ -868,6 +885,14 @@
 			int irq_base, const struct regmap_irq_chip *chip,
 			struct regmap_irq_chip_data **data);
 void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *data);
+
+int devm_regmap_add_irq_chip(struct device *dev, struct regmap *map, int irq,
+			     int irq_flags, int irq_base,
+			     const struct regmap_irq_chip *chip,
+			     struct regmap_irq_chip_data **data);
+void devm_regmap_del_irq_chip(struct device *dev, int irq,
+			      struct regmap_irq_chip_data *data);
+
 int regmap_irq_chip_get_base(struct regmap_irq_chip_data *data);
 int regmap_irq_get_virq(struct regmap_irq_chip_data *data, int irq);
 struct irq_domain *regmap_irq_get_domain(struct regmap_irq_chip_data *data);
@@ -937,42 +962,26 @@
 	return -EINVAL;
 }
 
-static inline int regmap_update_bits(struct regmap *map, unsigned int reg,
-				     unsigned int mask, unsigned int val)
+static inline int regmap_update_bits_base(struct regmap *map, unsigned int reg,
+					  unsigned int mask, unsigned int val,
+					  bool *change, bool async, bool force)
 {
 	WARN_ONCE(1, "regmap API is disabled");
 	return -EINVAL;
 }
 
-static inline int regmap_write_bits(struct regmap *map, unsigned int reg,
-				     unsigned int mask, unsigned int val)
+static inline int regmap_field_update_bits_base(struct regmap_field *field,
+					unsigned int mask, unsigned int val,
+					bool *change, bool async, bool force)
 {
 	WARN_ONCE(1, "regmap API is disabled");
 	return -EINVAL;
 }
 
-static inline int regmap_update_bits_async(struct regmap *map,
-					   unsigned int reg,
-					   unsigned int mask, unsigned int val)
-{
-	WARN_ONCE(1, "regmap API is disabled");
-	return -EINVAL;
-}
-
-static inline int regmap_update_bits_check(struct regmap *map,
-					   unsigned int reg,
-					   unsigned int mask, unsigned int val,
-					   bool *change)
-{
-	WARN_ONCE(1, "regmap API is disabled");
-	return -EINVAL;
-}
-
-static inline int regmap_update_bits_check_async(struct regmap *map,
-						 unsigned int reg,
-						 unsigned int mask,
-						 unsigned int val,
-						 bool *change)
+static inline int regmap_fields_update_bits_base(struct regmap_field *field,
+				   unsigned int id,
+				   unsigned int mask, unsigned int val,
+				   bool *change, bool async, bool force)
 {
 	WARN_ONCE(1, "regmap API is disabled");
 	return -EINVAL;
diff --git a/include/linux/regulator/act8865.h b/include/linux/regulator/act8865.h
index 15fa8f2..2eb3860 100644
--- a/include/linux/regulator/act8865.h
+++ b/include/linux/regulator/act8865.h
@@ -68,12 +68,12 @@
  * act8865_regulator_data - regulator data
  * @id: regulator id
  * @name: regulator name
- * @platform_data: regulator init data
+ * @init_data: regulator init data
  */
 struct act8865_regulator_data {
 	int id;
 	const char *name;
-	struct regulator_init_data *platform_data;
+	struct regulator_init_data *init_data;
 };
 
 /**
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index 16ac9e1..cd271e8 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -93,6 +93,8 @@
  * @get_current_limit: Get the configured limit for a current-limited regulator.
  * @set_input_current_limit: Configure an input limit.
  *
+ * @set_active_discharge: Set active discharge enable/disable of regulators.
+ *
  * @set_mode: Set the configured operating mode for the regulator.
  * @get_mode: Get the configured operating mode for the regulator.
  * @get_status: Return actual (not as-configured) status of regulator, as a
@@ -149,6 +151,7 @@
 
 	int (*set_input_current_limit) (struct regulator_dev *, int lim_uA);
 	int (*set_over_current_protection) (struct regulator_dev *);
+	int (*set_active_discharge) (struct regulator_dev *, bool enable);
 
 	/* enable/disable regulator */
 	int (*enable) (struct regulator_dev *);
@@ -266,6 +269,14 @@
  * @bypass_mask: Mask for control when using regmap set_bypass
  * @bypass_val_on: Enabling value for control when using regmap set_bypass
  * @bypass_val_off: Disabling value for control when using regmap set_bypass
+ * @active_discharge_off: Enabling value for control when using regmap
+ *			  set_active_discharge
+ * @active_discharge_on: Disabling value for control when using regmap
+ *			 set_active_discharge
+ * @active_discharge_mask: Mask for control when using regmap
+ *			   set_active_discharge
+ * @active_discharge_reg: Register for control when using regmap
+ *			  set_active_discharge
  *
  * @enable_time: Time taken for initial enable of regulator (in uS).
  * @off_on_delay: guard time (in uS), before re-enabling a regulator
@@ -315,6 +326,10 @@
 	unsigned int bypass_mask;
 	unsigned int bypass_val_on;
 	unsigned int bypass_val_off;
+	unsigned int active_discharge_on;
+	unsigned int active_discharge_off;
+	unsigned int active_discharge_mask;
+	unsigned int active_discharge_reg;
 
 	unsigned int enable_time;
 
@@ -447,6 +462,8 @@
 int regulator_set_bypass_regmap(struct regulator_dev *rdev, bool enable);
 int regulator_get_bypass_regmap(struct regulator_dev *rdev, bool *enable);
 
+int regulator_set_active_discharge_regmap(struct regulator_dev *rdev,
+					  bool enable);
 void *regulator_get_init_drvdata(struct regulator_init_data *reg_init_data);
 
 #endif
diff --git a/include/linux/regulator/lp872x.h b/include/linux/regulator/lp872x.h
index 132e05c..6029279 100644
--- a/include/linux/regulator/lp872x.h
+++ b/include/linux/regulator/lp872x.h
@@ -18,6 +18,9 @@
 
 #define LP872X_MAX_REGULATORS		9
 
+#define LP8720_ENABLE_DELAY		200
+#define LP8725_ENABLE_DELAY		30000
+
 enum lp872x_regulator_id {
 	LP8720_ID_BASE,
 	LP8720_ID_LDO1 = LP8720_ID_BASE,
@@ -79,12 +82,14 @@
  * @update_config     : if LP872X_GENERAL_CFG register is updated, set true
  * @regulator_data    : platform regulator id and init data
  * @dvs               : dvs data for buck voltage control
+ * @enable_gpio       : gpio pin number for enable control
  */
 struct lp872x_platform_data {
 	u8 general_config;
 	bool update_config;
 	struct lp872x_regulator_data regulator_data[LP872X_MAX_REGULATORS];
 	struct lp872x_dvs *dvs;
+	int enable_gpio;
 };
 
 #endif
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h
index a1067d0..5d627c8 100644
--- a/include/linux/regulator/machine.h
+++ b/include/linux/regulator/machine.h
@@ -42,6 +42,13 @@
 #define REGULATOR_CHANGE_DRMS		0x10
 #define REGULATOR_CHANGE_BYPASS		0x20
 
+/* Regulator active discharge flags */
+enum regulator_active_discharge {
+	REGULATOR_ACTIVE_DISCHARGE_DEFAULT,
+	REGULATOR_ACTIVE_DISCHARGE_DISABLE,
+	REGULATOR_ACTIVE_DISCHARGE_ENABLE,
+};
+
 /**
  * struct regulator_state - regulator state during low power system states
  *
@@ -100,6 +107,9 @@
  * @initial_state: Suspend state to set by default.
  * @initial_mode: Mode to set at startup.
  * @ramp_delay: Time to settle down after voltage change (unit: uV/us)
+ * @active_discharge: Enable/disable active discharge. The enum
+ *		      regulator_active_discharge values are used for
+ *		      initialisation.
  * @enable_time: Turn-on time of the rails (unit: microseconds)
  */
 struct regulation_constraints {
@@ -140,6 +150,8 @@
 	unsigned int ramp_delay;
 	unsigned int enable_time;
 
+	unsigned int active_discharge;
+
 	/* constraint flags */
 	unsigned always_on:1;	/* regulator never off when system is on */
 	unsigned boot_on:1;	/* bootloader/firmware enabled regulator */
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index 3359f04..b693ada 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -89,6 +89,8 @@
 	int (*set_mmss)(struct device *, unsigned long secs);
 	int (*read_callback)(struct device *, int data);
 	int (*alarm_irq_enable)(struct device *, unsigned int enabled);
+	int (*read_offset)(struct device *, long *offset);
+	int (*set_offset)(struct device *, long offset);
 };
 
 #define RTC_DEVICE_NAME_SIZE 20
@@ -208,6 +210,8 @@
 int rtc_timer_start(struct rtc_device *rtc, struct rtc_timer *timer,
 		    ktime_t expires, ktime_t period);
 void rtc_timer_cancel(struct rtc_device *rtc, struct rtc_timer *timer);
+int rtc_read_offset(struct rtc_device *rtc, long *offset);
+int rtc_set_offset(struct rtc_device *rtc, long offset);
 void rtc_timer_do_work(struct work_struct *work);
 
 static inline bool is_leap_year(unsigned int year)
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 53be3a4..857a9a1 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -25,6 +25,7 @@
 struct dma_chan;
 struct spi_master;
 struct spi_transfer;
+struct spi_flash_read_message;
 
 /*
  * INTERFACES between SPI master-side drivers and SPI infrastructure.
@@ -53,6 +54,10 @@
  *
  * @transfer_bytes_histo:
  *                 transfer bytes histogramm
+ *
+ * @transfers_split_maxsize:
+ *                 number of transfers that have been split because of
+ *                 maxsize limit
  */
 struct spi_statistics {
 	spinlock_t		lock; /* lock for the whole structure */
@@ -72,6 +77,8 @@
 
 #define SPI_STATISTICS_HISTO_SIZE 17
 	unsigned long transfer_bytes_histo[SPI_STATISTICS_HISTO_SIZE];
+
+	unsigned long transfers_split_maxsize;
 };
 
 void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
@@ -303,6 +310,8 @@
  * @min_speed_hz: Lowest supported transfer speed
  * @max_speed_hz: Highest supported transfer speed
  * @flags: other constraints relevant to this driver
+ * @max_transfer_size: function that returns the max transfer size for
+ *	a &spi_device; may be %NULL, so the default %SIZE_MAX will be used.
  * @bus_lock_spinlock: spinlock for SPI bus locking
  * @bus_lock_mutex: mutex for SPI bus locking
  * @bus_lock_flag: indicates that the SPI bus is locked for exclusive use
@@ -361,6 +370,8 @@
  * @handle_err: the subsystem calls the driver to handle an error that occurs
  *		in the generic implementation of transfer_one_message().
  * @unprepare_message: undo any work done by prepare_message().
+ * @spi_flash_read: to support spi-controller hardwares that provide
+ *                  accelerated interface to read from flash devices.
  * @cs_gpios: Array of GPIOs to use as chip select lines; one per CS
  *	number. Any individual value may be -ENOENT for CS lines that
  *	are not GPIOs (driven by the SPI controller itself).
@@ -369,6 +380,9 @@
  * @dma_rx: DMA receive channel
  * @dummy_rx: dummy receive buffer for full-duplex devices
  * @dummy_tx: dummy transmit buffer for full-duplex devices
+ * @fw_translate_cs: If the boot firmware uses different numbering scheme
+ *	what Linux expects, this optional hook can be used to translate
+ *	between the two.
  *
  * Each SPI master controller can communicate with one or more @spi_device
  * children.  These make a small bus, sharing MOSI, MISO and SCK signals
@@ -513,6 +527,8 @@
 			       struct spi_message *message);
 	int (*unprepare_message)(struct spi_master *master,
 				 struct spi_message *message);
+	int (*spi_flash_read)(struct  spi_device *spi,
+			      struct spi_flash_read_message *msg);
 
 	/*
 	 * These hooks are for drivers that use a generic implementation
@@ -537,6 +553,8 @@
 	/* dummy data for full duplex devices */
 	void			*dummy_rx;
 	void			*dummy_tx;
+
+	int (*fw_translate_cs)(struct spi_master *master, unsigned cs);
 };
 
 static inline void *spi_master_get_devdata(struct spi_master *master)
@@ -582,6 +600,38 @@
 
 extern struct spi_master *spi_busnum_to_master(u16 busnum);
 
+/*
+ * SPI resource management while processing a SPI message
+ */
+
+typedef void (*spi_res_release_t)(struct spi_master *master,
+				  struct spi_message *msg,
+				  void *res);
+
+/**
+ * struct spi_res - spi resource management structure
+ * @entry:   list entry
+ * @release: release code called prior to freeing this resource
+ * @data:    extra data allocated for the specific use-case
+ *
+ * this is based on ideas from devres, but focused on life-cycle
+ * management during spi_message processing
+ */
+struct spi_res {
+	struct list_head        entry;
+	spi_res_release_t       release;
+	unsigned long long      data[]; /* guarantee ull alignment */
+};
+
+extern void *spi_res_alloc(struct spi_device *spi,
+			   spi_res_release_t release,
+			   size_t size, gfp_t gfp);
+extern void spi_res_add(struct spi_message *message, void *res);
+extern void spi_res_free(void *res);
+
+extern void spi_res_release(struct spi_master *master,
+			    struct spi_message *message);
+
 /*---------------------------------------------------------------------------*/
 
 /*
@@ -720,6 +770,7 @@
  * @status: zero for success, else negative errno
  * @queue: for use by whichever driver currently owns the message
  * @state: for use by whichever driver currently owns the message
+ * @resources: for resource management when the spi message is processed
  *
  * A @spi_message is used to execute an atomic sequence of data transfers,
  * each represented by a struct spi_transfer.  The sequence is "atomic"
@@ -766,11 +817,15 @@
 	 */
 	struct list_head	queue;
 	void			*state;
+
+	/* list of spi_res reources when the spi message is processed */
+	struct list_head        resources;
 };
 
 static inline void spi_message_init_no_memset(struct spi_message *m)
 {
 	INIT_LIST_HEAD(&m->transfers);
+	INIT_LIST_HEAD(&m->resources);
 }
 
 static inline void spi_message_init(struct spi_message *m)
@@ -854,6 +909,60 @@
 
 /*---------------------------------------------------------------------------*/
 
+/* SPI transfer replacement methods which make use of spi_res */
+
+struct spi_replaced_transfers;
+typedef void (*spi_replaced_release_t)(struct spi_master *master,
+				       struct spi_message *msg,
+				       struct spi_replaced_transfers *res);
+/**
+ * struct spi_replaced_transfers - structure describing the spi_transfer
+ *                                 replacements that have occurred
+ *                                 so that they can get reverted
+ * @release:            some extra release code to get executed prior to
+ *                      relasing this structure
+ * @extradata:          pointer to some extra data if requested or NULL
+ * @replaced_transfers: transfers that have been replaced and which need
+ *                      to get restored
+ * @replaced_after:     the transfer after which the @replaced_transfers
+ *                      are to get re-inserted
+ * @inserted:           number of transfers inserted
+ * @inserted_transfers: array of spi_transfers of array-size @inserted,
+ *                      that have been replacing replaced_transfers
+ *
+ * note: that @extradata will point to @inserted_transfers[@inserted]
+ * if some extra allocation is requested, so alignment will be the same
+ * as for spi_transfers
+ */
+struct spi_replaced_transfers {
+	spi_replaced_release_t release;
+	void *extradata;
+	struct list_head replaced_transfers;
+	struct list_head *replaced_after;
+	size_t inserted;
+	struct spi_transfer inserted_transfers[];
+};
+
+extern struct spi_replaced_transfers *spi_replace_transfers(
+	struct spi_message *msg,
+	struct spi_transfer *xfer_first,
+	size_t remove,
+	size_t insert,
+	spi_replaced_release_t release,
+	size_t extradatasize,
+	gfp_t gfp);
+
+/*---------------------------------------------------------------------------*/
+
+/* SPI transfer transformation methods */
+
+extern int spi_split_transfers_maxsize(struct spi_master *master,
+				       struct spi_message *msg,
+				       size_t maxsize,
+				       gfp_t gfp);
+
+/*---------------------------------------------------------------------------*/
+
 /* All these synchronous SPI transfer routines are utilities layered
  * over the core async transfer primitive.  Here, "synchronous" means
  * they will sleep uninterruptibly until the async transfer completes.
@@ -1019,6 +1128,42 @@
 	return be16_to_cpu(result);
 }
 
+/**
+ * struct spi_flash_read_message - flash specific information for
+ * spi-masters that provide accelerated flash read interfaces
+ * @buf: buffer to read data
+ * @from: offset within the flash from where data is to be read
+ * @len: length of data to be read
+ * @retlen: actual length of data read
+ * @read_opcode: read_opcode to be used to communicate with flash
+ * @addr_width: number of address bytes
+ * @dummy_bytes: number of dummy bytes
+ * @opcode_nbits: number of lines to send opcode
+ * @addr_nbits: number of lines to send address
+ * @data_nbits: number of lines for data
+ */
+struct spi_flash_read_message {
+	void *buf;
+	loff_t from;
+	size_t len;
+	size_t retlen;
+	u8 read_opcode;
+	u8 addr_width;
+	u8 dummy_bytes;
+	u8 opcode_nbits;
+	u8 addr_nbits;
+	u8 data_nbits;
+};
+
+/* SPI core interface for flash read support */
+static inline bool spi_flash_read_supported(struct spi_device *spi)
+{
+	return spi->master->spi_flash_read ? true : false;
+}
+
+int spi_flash_read(struct spi_device *spi,
+		   struct spi_flash_read_message *msg);
+
 /*---------------------------------------------------------------------------*/
 
 /*
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index f5f80c5..dc8eb63 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -99,8 +99,23 @@
 	}
 
 /*
- * define and init a srcu struct at build time.
- * dont't call init_srcu_struct() nor cleanup_srcu_struct() on it.
+ * Define and initialize a srcu struct at build time.
+ * Do -not- call init_srcu_struct() nor cleanup_srcu_struct() on it.
+ *
+ * Note that although DEFINE_STATIC_SRCU() hides the name from other
+ * files, the per-CPU variable rules nevertheless require that the
+ * chosen name be globally unique.  These rules also prohibit use of
+ * DEFINE_STATIC_SRCU() within a function.  If these rules are too
+ * restrictive, declare the srcu_struct manually.  For example, in
+ * each file:
+ *
+ *	static struct srcu_struct my_srcu;
+ *
+ * Then, before the first use of each my_srcu, manually initialize it:
+ *
+ *	init_srcu_struct(&my_srcu);
+ *
+ * See include/linux/percpu-defs.h for the rules on per-CPU variables.
  */
 #define __DEFINE_SRCU(name, is_static)					\
 	static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\
diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h
index 2524722..e880054 100644
--- a/include/linux/timekeeper_internal.h
+++ b/include/linux/timekeeper_internal.h
@@ -50,6 +50,7 @@
  * @offs_tai:		Offset clock monotonic -> clock tai
  * @tai_offset:		The current UTC to TAI offset in seconds
  * @clock_was_set_seq:	The sequence number of clock was set events
+ * @cs_was_changed_seq:	The sequence number of clocksource change events
  * @next_leap_ktime:	CLOCK_MONOTONIC time value of a pending leap-second
  * @raw_time:		Monotonic raw base time in timespec64 format
  * @cycle_interval:	Number of clock cycles in one NTP interval
@@ -91,6 +92,7 @@
 	ktime_t			offs_tai;
 	s32			tai_offset;
 	unsigned int		clock_was_set_seq;
+	u8			cs_was_changed_seq;
 	ktime_t			next_leap_ktime;
 	struct timespec64	raw_time;
 
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h
index ec89d84..96f37be 100644
--- a/include/linux/timekeeping.h
+++ b/include/linux/timekeeping.h
@@ -267,6 +267,64 @@
 				        struct timespec64 *ts_real);
 
 /*
+ * struct system_time_snapshot - simultaneous raw/real time capture with
+ *	counter value
+ * @cycles:	Clocksource counter value to produce the system times
+ * @real:	Realtime system time
+ * @raw:	Monotonic raw system time
+ * @clock_was_set_seq:	The sequence number of clock was set events
+ * @cs_was_changed_seq:	The sequence number of clocksource change events
+ */
+struct system_time_snapshot {
+	cycle_t		cycles;
+	ktime_t		real;
+	ktime_t		raw;
+	unsigned int	clock_was_set_seq;
+	u8		cs_was_changed_seq;
+};
+
+/*
+ * struct system_device_crosststamp - system/device cross-timestamp
+ *	(syncronized capture)
+ * @device:		Device time
+ * @sys_realtime:	Realtime simultaneous with device time
+ * @sys_monoraw:	Monotonic raw simultaneous with device time
+ */
+struct system_device_crosststamp {
+	ktime_t device;
+	ktime_t sys_realtime;
+	ktime_t sys_monoraw;
+};
+
+/*
+ * struct system_counterval_t - system counter value with the pointer to the
+ *	corresponding clocksource
+ * @cycles:	System counter value
+ * @cs:		Clocksource corresponding to system counter value. Used by
+ *	timekeeping code to verify comparibility of two cycle values
+ */
+struct system_counterval_t {
+	cycle_t			cycles;
+	struct clocksource	*cs;
+};
+
+/*
+ * Get cross timestamp between system clock and device clock
+ */
+extern int get_device_system_crosststamp(
+			int (*get_time_fn)(ktime_t *device_time,
+				struct system_counterval_t *system_counterval,
+				void *ctx),
+			void *ctx,
+			struct system_time_snapshot *history,
+			struct system_device_crosststamp *xtstamp);
+
+/*
+ * Simultaneously snapshot realtime and monotonic raw clocks
+ */
+extern void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot);
+
+/*
  * Persistent clock related interfaces
  */
 extern int persistent_clock_is_local;
diff --git a/include/trace/events/cpuhp.h b/include/trace/events/cpuhp.h
new file mode 100644
index 0000000..a72bd93
--- /dev/null
+++ b/include/trace/events/cpuhp.h
@@ -0,0 +1,66 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM cpuhp
+
+#if !defined(_TRACE_CPUHP_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_CPUHP_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(cpuhp_enter,
+
+	TP_PROTO(unsigned int cpu,
+		 int target,
+		 int idx,
+		 int (*fun)(unsigned int)),
+
+	TP_ARGS(cpu, target, idx, fun),
+
+	TP_STRUCT__entry(
+		__field( unsigned int,	cpu		)
+		__field( int,		target		)
+		__field( int,		idx		)
+		__field( void *,	fun		)
+	),
+
+	TP_fast_assign(
+		__entry->cpu	= cpu;
+		__entry->target	= target;
+		__entry->idx	= idx;
+		__entry->fun	= fun;
+	),
+
+	TP_printk("cpu: %04u target: %3d step: %3d (%pf)",
+		  __entry->cpu, __entry->target, __entry->idx, __entry->fun)
+);
+
+TRACE_EVENT(cpuhp_exit,
+
+	TP_PROTO(unsigned int cpu,
+		 int state,
+		 int idx,
+		 int ret),
+
+	TP_ARGS(cpu, state, idx, ret),
+
+	TP_STRUCT__entry(
+		__field( unsigned int,	cpu		)
+		__field( int,		state		)
+		__field( int,		idx		)
+		__field( int,		ret		)
+	),
+
+	TP_fast_assign(
+		__entry->cpu	= cpu;
+		__entry->state	= state;
+		__entry->idx	= idx;
+		__entry->ret	= ret;
+	),
+
+	TP_printk(" cpu: %04u  state: %3d step: %3d ret: %d",
+		  __entry->cpu, __entry->state, __entry->idx,  __entry->ret)
+);
+
+#endif
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h
index d6f8322..aa69253 100644
--- a/include/trace/events/kvm.h
+++ b/include/trace/events/kvm.h
@@ -359,14 +359,15 @@
 #endif
 
 TRACE_EVENT(kvm_halt_poll_ns,
-	TP_PROTO(bool grow, unsigned int vcpu_id, int new, int old),
+	TP_PROTO(bool grow, unsigned int vcpu_id, unsigned int new,
+		 unsigned int old),
 	TP_ARGS(grow, vcpu_id, new, old),
 
 	TP_STRUCT__entry(
 		__field(bool, grow)
 		__field(unsigned int, vcpu_id)
-		__field(int, new)
-		__field(int, old)
+		__field(unsigned int, new)
+		__field(unsigned int, old)
 	),
 
 	TP_fast_assign(
@@ -376,7 +377,7 @@
 		__entry->old            = old;
 	),
 
-	TP_printk("vcpu %u: halt_poll_ns %d (%s %d)",
+	TP_printk("vcpu %u: halt_poll_ns %u (%s %u)",
 			__entry->vcpu_id,
 			__entry->new,
 			__entry->grow ? "grow" : "shrink",
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 9da9051..a7f1f80 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -157,6 +157,7 @@
 
 struct kvm_hyperv_exit {
 #define KVM_EXIT_HYPERV_SYNIC          1
+#define KVM_EXIT_HYPERV_HCALL          2
 	__u32 type;
 	union {
 		struct {
@@ -165,6 +166,11 @@
 			__u64 evt_page;
 			__u64 msg_page;
 		} synic;
+		struct {
+			__u64 input;
+			__u64 result;
+			__u64 params[2];
+		} hcall;
 	} u;
 };
 
@@ -541,7 +547,13 @@
 	__u8 exc_access_id;
 	__u8 per_access_id;
 	__u8 op_access_id;
-	__u8 pad[3];
+#define KVM_S390_PGM_FLAGS_ILC_VALID	0x01
+#define KVM_S390_PGM_FLAGS_ILC_0	0x02
+#define KVM_S390_PGM_FLAGS_ILC_1	0x04
+#define KVM_S390_PGM_FLAGS_ILC_MASK	0x06
+#define KVM_S390_PGM_FLAGS_NO_REWIND	0x08
+	__u8 flags;
+	__u8 pad[2];
 };
 
 struct kvm_s390_prefix_info {
@@ -850,6 +862,9 @@
 #define KVM_CAP_IOEVENTFD_ANY_LENGTH 122
 #define KVM_CAP_HYPERV_SYNIC 123
 #define KVM_CAP_S390_RI 124
+#define KVM_CAP_SPAPR_TCE_64 125
+#define KVM_CAP_ARM_PMU_V3 126
+#define KVM_CAP_VCPU_ATTRIBUTES 127
 
 #ifdef KVM_CAP_IRQ_ROUTING
 
@@ -1142,6 +1157,8 @@
 /* Available with KVM_CAP_PPC_ALLOC_HTAB */
 #define KVM_PPC_ALLOCATE_HTAB	  _IOWR(KVMIO, 0xa7, __u32)
 #define KVM_CREATE_SPAPR_TCE	  _IOW(KVMIO,  0xa8, struct kvm_create_spapr_tce)
+#define KVM_CREATE_SPAPR_TCE_64	  _IOW(KVMIO,  0xa8, \
+				       struct kvm_create_spapr_tce_64)
 /* Available with KVM_CAP_RMA */
 #define KVM_ALLOCATE_RMA	  _IOR(KVMIO,  0xa9, struct kvm_allocate_rma)
 /* Available with KVM_CAP_PPC_HTAB_FD */
diff --git a/include/uapi/linux/ptp_clock.h b/include/uapi/linux/ptp_clock.h
index f0b7bfe..ac6dded 100644
--- a/include/uapi/linux/ptp_clock.h
+++ b/include/uapi/linux/ptp_clock.h
@@ -51,7 +51,9 @@
 	int n_per_out; /* Number of programmable periodic signals. */
 	int pps;       /* Whether the clock supports a PPS callback. */
 	int n_pins;    /* Number of input/output pins. */
-	int rsv[14];   /* Reserved for future use. */
+	/* Whether the clock supports precise system-device cross timestamps */
+	int cross_timestamping;
+	int rsv[13];   /* Reserved for future use. */
 };
 
 struct ptp_extts_request {
@@ -81,6 +83,13 @@
 	struct ptp_clock_time ts[2 * PTP_MAX_SAMPLES + 1];
 };
 
+struct ptp_sys_offset_precise {
+	struct ptp_clock_time device;
+	struct ptp_clock_time sys_realtime;
+	struct ptp_clock_time sys_monoraw;
+	unsigned int rsv[4];    /* Reserved for future use. */
+};
+
 enum ptp_pin_function {
 	PTP_PF_NONE,
 	PTP_PF_EXTTS,
@@ -124,6 +133,8 @@
 #define PTP_SYS_OFFSET     _IOW(PTP_CLK_MAGIC, 5, struct ptp_sys_offset)
 #define PTP_PIN_GETFUNC    _IOWR(PTP_CLK_MAGIC, 6, struct ptp_pin_desc)
 #define PTP_PIN_SETFUNC    _IOW(PTP_CLK_MAGIC, 7, struct ptp_pin_desc)
+#define PTP_SYS_OFFSET_PRECISE \
+	_IOWR(PTP_CLK_MAGIC, 8, struct ptp_sys_offset_precise)
 
 struct ptp_extts_event {
 	struct ptp_clock_time t; /* Time event occured. */
diff --git a/init/main.c b/init/main.c
index 031a608..b3c6e36 100644
--- a/init/main.c
+++ b/init/main.c
@@ -385,7 +385,6 @@
 	int pid;
 
 	rcu_scheduler_starting();
-	smpboot_thread_init();
 	/*
 	 * We need to spawn init first so that it obtains pid 1, however
 	 * the init task will end up wanting to create kthreads, which, if
@@ -449,20 +448,6 @@
 	done = 1;
 }
 
-/*
- *	Activate the first processor.
- */
-
-static void __init boot_cpu_init(void)
-{
-	int cpu = smp_processor_id();
-	/* Mark the boot cpu "present", "online" etc for SMP and UP case */
-	set_cpu_online(cpu, true);
-	set_cpu_active(cpu, true);
-	set_cpu_present(cpu, true);
-	set_cpu_possible(cpu, true);
-}
-
 void __init __weak smp_setup_processor_id(void)
 {
 }
@@ -522,6 +507,7 @@
 	setup_command_line(command_line);
 	setup_nr_cpu_ids();
 	setup_per_cpu_areas();
+	boot_cpu_state_init();
 	smp_prepare_boot_cpu();	/* arch-specific boot-cpu hooks */
 
 	build_all_zonelists(NULL, NULL);
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 5b9d396..6ea42e8 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -22,13 +22,88 @@
 #include <linux/lockdep.h>
 #include <linux/tick.h>
 #include <linux/irq.h>
+#include <linux/smpboot.h>
+
 #include <trace/events/power.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/cpuhp.h>
 
 #include "smpboot.h"
 
+/**
+ * cpuhp_cpu_state - Per cpu hotplug state storage
+ * @state:	The current cpu state
+ * @target:	The target state
+ * @thread:	Pointer to the hotplug thread
+ * @should_run:	Thread should execute
+ * @cb_stat:	The state for a single callback (install/uninstall)
+ * @cb:		Single callback function (install/uninstall)
+ * @result:	Result of the operation
+ * @done:	Signal completion to the issuer of the task
+ */
+struct cpuhp_cpu_state {
+	enum cpuhp_state	state;
+	enum cpuhp_state	target;
+#ifdef CONFIG_SMP
+	struct task_struct	*thread;
+	bool			should_run;
+	enum cpuhp_state	cb_state;
+	int			(*cb)(unsigned int cpu);
+	int			result;
+	struct completion	done;
+#endif
+};
+
+static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state);
+
+/**
+ * cpuhp_step - Hotplug state machine step
+ * @name:	Name of the step
+ * @startup:	Startup function of the step
+ * @teardown:	Teardown function of the step
+ * @skip_onerr:	Do not invoke the functions on error rollback
+ *		Will go away once the notifiers	are gone
+ * @cant_stop:	Bringup/teardown can't be stopped at this step
+ */
+struct cpuhp_step {
+	const char	*name;
+	int		(*startup)(unsigned int cpu);
+	int		(*teardown)(unsigned int cpu);
+	bool		skip_onerr;
+	bool		cant_stop;
+};
+
+static DEFINE_MUTEX(cpuhp_state_mutex);
+static struct cpuhp_step cpuhp_bp_states[];
+static struct cpuhp_step cpuhp_ap_states[];
+
+/**
+ * cpuhp_invoke_callback _ Invoke the callbacks for a given state
+ * @cpu:	The cpu for which the callback should be invoked
+ * @step:	The step in the state machine
+ * @cb:		The callback function to invoke
+ *
+ * Called from cpu hotplug and from the state register machinery
+ */
+static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state step,
+				 int (*cb)(unsigned int))
+{
+	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
+	int ret = 0;
+
+	if (cb) {
+		trace_cpuhp_enter(cpu, st->target, step, cb);
+		ret = cb(cpu);
+		trace_cpuhp_exit(cpu, st->state, step, ret);
+	}
+	return ret;
+}
+
 #ifdef CONFIG_SMP
 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
 static DEFINE_MUTEX(cpu_add_remove_lock);
+bool cpuhp_tasks_frozen;
+EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen);
 
 /*
  * The following two APIs (cpu_maps_update_begin/done) must be used when
@@ -207,31 +282,281 @@
 	return raw_notifier_chain_register(&cpu_chain, nb);
 }
 
-static int __cpu_notify(unsigned long val, void *v, int nr_to_call,
+static int __cpu_notify(unsigned long val, unsigned int cpu, int nr_to_call,
 			int *nr_calls)
 {
+	unsigned long mod = cpuhp_tasks_frozen ? CPU_TASKS_FROZEN : 0;
+	void *hcpu = (void *)(long)cpu;
+
 	int ret;
 
-	ret = __raw_notifier_call_chain(&cpu_chain, val, v, nr_to_call,
+	ret = __raw_notifier_call_chain(&cpu_chain, val | mod, hcpu, nr_to_call,
 					nr_calls);
 
 	return notifier_to_errno(ret);
 }
 
-static int cpu_notify(unsigned long val, void *v)
+static int cpu_notify(unsigned long val, unsigned int cpu)
 {
-	return __cpu_notify(val, v, -1, NULL);
+	return __cpu_notify(val, cpu, -1, NULL);
+}
+
+/* Notifier wrappers for transitioning to state machine */
+static int notify_prepare(unsigned int cpu)
+{
+	int nr_calls = 0;
+	int ret;
+
+	ret = __cpu_notify(CPU_UP_PREPARE, cpu, -1, &nr_calls);
+	if (ret) {
+		nr_calls--;
+		printk(KERN_WARNING "%s: attempt to bring up CPU %u failed\n",
+				__func__, cpu);
+		__cpu_notify(CPU_UP_CANCELED, cpu, nr_calls, NULL);
+	}
+	return ret;
+}
+
+static int notify_online(unsigned int cpu)
+{
+	cpu_notify(CPU_ONLINE, cpu);
+	return 0;
+}
+
+static int notify_starting(unsigned int cpu)
+{
+	cpu_notify(CPU_STARTING, cpu);
+	return 0;
+}
+
+static int bringup_wait_for_ap(unsigned int cpu)
+{
+	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
+
+	wait_for_completion(&st->done);
+	return st->result;
+}
+
+static int bringup_cpu(unsigned int cpu)
+{
+	struct task_struct *idle = idle_thread_get(cpu);
+	int ret;
+
+	/* Arch-specific enabling code. */
+	ret = __cpu_up(cpu, idle);
+	if (ret) {
+		cpu_notify(CPU_UP_CANCELED, cpu);
+		return ret;
+	}
+	ret = bringup_wait_for_ap(cpu);
+	BUG_ON(!cpu_online(cpu));
+	return ret;
+}
+
+/*
+ * Hotplug state machine related functions
+ */
+static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st,
+			  struct cpuhp_step *steps)
+{
+	for (st->state++; st->state < st->target; st->state++) {
+		struct cpuhp_step *step = steps + st->state;
+
+		if (!step->skip_onerr)
+			cpuhp_invoke_callback(cpu, st->state, step->startup);
+	}
+}
+
+static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
+				struct cpuhp_step *steps, enum cpuhp_state target)
+{
+	enum cpuhp_state prev_state = st->state;
+	int ret = 0;
+
+	for (; st->state > target; st->state--) {
+		struct cpuhp_step *step = steps + st->state;
+
+		ret = cpuhp_invoke_callback(cpu, st->state, step->teardown);
+		if (ret) {
+			st->target = prev_state;
+			undo_cpu_down(cpu, st, steps);
+			break;
+		}
+	}
+	return ret;
+}
+
+static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st,
+			struct cpuhp_step *steps)
+{
+	for (st->state--; st->state > st->target; st->state--) {
+		struct cpuhp_step *step = steps + st->state;
+
+		if (!step->skip_onerr)
+			cpuhp_invoke_callback(cpu, st->state, step->teardown);
+	}
+}
+
+static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
+			      struct cpuhp_step *steps, enum cpuhp_state target)
+{
+	enum cpuhp_state prev_state = st->state;
+	int ret = 0;
+
+	while (st->state < target) {
+		struct cpuhp_step *step;
+
+		st->state++;
+		step = steps + st->state;
+		ret = cpuhp_invoke_callback(cpu, st->state, step->startup);
+		if (ret) {
+			st->target = prev_state;
+			undo_cpu_up(cpu, st, steps);
+			break;
+		}
+	}
+	return ret;
+}
+
+/*
+ * The cpu hotplug threads manage the bringup and teardown of the cpus
+ */
+static void cpuhp_create(unsigned int cpu)
+{
+	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
+
+	init_completion(&st->done);
+}
+
+static int cpuhp_should_run(unsigned int cpu)
+{
+	struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
+
+	return st->should_run;
+}
+
+/* Execute the teardown callbacks. Used to be CPU_DOWN_PREPARE */
+static int cpuhp_ap_offline(unsigned int cpu, struct cpuhp_cpu_state *st)
+{
+	enum cpuhp_state target = max((int)st->target, CPUHP_TEARDOWN_CPU);
+
+	return cpuhp_down_callbacks(cpu, st, cpuhp_ap_states, target);
+}
+
+/* Execute the online startup callbacks. Used to be CPU_ONLINE */
+static int cpuhp_ap_online(unsigned int cpu, struct cpuhp_cpu_state *st)
+{
+	return cpuhp_up_callbacks(cpu, st, cpuhp_ap_states, st->target);
+}
+
+/*
+ * Execute teardown/startup callbacks on the plugged cpu. Also used to invoke
+ * callbacks when a state gets [un]installed at runtime.
+ */
+static void cpuhp_thread_fun(unsigned int cpu)
+{
+	struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
+	int ret = 0;
+
+	/*
+	 * Paired with the mb() in cpuhp_kick_ap_work and
+	 * cpuhp_invoke_ap_callback, so the work set is consistent visible.
+	 */
+	smp_mb();
+	if (!st->should_run)
+		return;
+
+	st->should_run = false;
+
+	/* Single callback invocation for [un]install ? */
+	if (st->cb) {
+		if (st->cb_state < CPUHP_AP_ONLINE) {
+			local_irq_disable();
+			ret = cpuhp_invoke_callback(cpu, st->cb_state, st->cb);
+			local_irq_enable();
+		} else {
+			ret = cpuhp_invoke_callback(cpu, st->cb_state, st->cb);
+		}
+	} else {
+		/* Cannot happen .... */
+		BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE);
+
+		/* Regular hotplug work */
+		if (st->state < st->target)
+			ret = cpuhp_ap_online(cpu, st);
+		else if (st->state > st->target)
+			ret = cpuhp_ap_offline(cpu, st);
+	}
+	st->result = ret;
+	complete(&st->done);
+}
+
+/* Invoke a single callback on a remote cpu */
+static int cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state,
+				    int (*cb)(unsigned int))
+{
+	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
+
+	if (!cpu_online(cpu))
+		return 0;
+
+	st->cb_state = state;
+	st->cb = cb;
+	/*
+	 * Make sure the above stores are visible before should_run becomes
+	 * true. Paired with the mb() above in cpuhp_thread_fun()
+	 */
+	smp_mb();
+	st->should_run = true;
+	wake_up_process(st->thread);
+	wait_for_completion(&st->done);
+	return st->result;
+}
+
+/* Regular hotplug invocation of the AP hotplug thread */
+static void __cpuhp_kick_ap_work(struct cpuhp_cpu_state *st)
+{
+	st->result = 0;
+	st->cb = NULL;
+	/*
+	 * Make sure the above stores are visible before should_run becomes
+	 * true. Paired with the mb() above in cpuhp_thread_fun()
+	 */
+	smp_mb();
+	st->should_run = true;
+	wake_up_process(st->thread);
+}
+
+static int cpuhp_kick_ap_work(unsigned int cpu)
+{
+	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
+	enum cpuhp_state state = st->state;
+
+	trace_cpuhp_enter(cpu, st->target, state, cpuhp_kick_ap_work);
+	__cpuhp_kick_ap_work(st);
+	wait_for_completion(&st->done);
+	trace_cpuhp_exit(cpu, st->state, state, st->result);
+	return st->result;
+}
+
+static struct smp_hotplug_thread cpuhp_threads = {
+	.store			= &cpuhp_state.thread,
+	.create			= &cpuhp_create,
+	.thread_should_run	= cpuhp_should_run,
+	.thread_fn		= cpuhp_thread_fun,
+	.thread_comm		= "cpuhp/%u",
+	.selfparking		= true,
+};
+
+void __init cpuhp_threads_init(void)
+{
+	BUG_ON(smpboot_register_percpu_thread(&cpuhp_threads));
+	kthread_unpark(this_cpu_read(cpuhp_state.thread));
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
-
-static void cpu_notify_nofail(unsigned long val, void *v)
-{
-	BUG_ON(cpu_notify(val, v));
-}
 EXPORT_SYMBOL(register_cpu_notifier);
 EXPORT_SYMBOL(__register_cpu_notifier);
-
 void unregister_cpu_notifier(struct notifier_block *nb)
 {
 	cpu_maps_update_begin();
@@ -311,57 +636,60 @@
 	read_unlock(&tasklist_lock);
 }
 
-struct take_cpu_down_param {
-	unsigned long mod;
-	void *hcpu;
-};
+static void cpu_notify_nofail(unsigned long val, unsigned int cpu)
+{
+	BUG_ON(cpu_notify(val, cpu));
+}
+
+static int notify_down_prepare(unsigned int cpu)
+{
+	int err, nr_calls = 0;
+
+	err = __cpu_notify(CPU_DOWN_PREPARE, cpu, -1, &nr_calls);
+	if (err) {
+		nr_calls--;
+		__cpu_notify(CPU_DOWN_FAILED, cpu, nr_calls, NULL);
+		pr_warn("%s: attempt to take down CPU %u failed\n",
+				__func__, cpu);
+	}
+	return err;
+}
+
+static int notify_dying(unsigned int cpu)
+{
+	cpu_notify(CPU_DYING, cpu);
+	return 0;
+}
 
 /* Take this CPU down. */
 static int take_cpu_down(void *_param)
 {
-	struct take_cpu_down_param *param = _param;
-	int err;
+	struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
+	enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE);
+	int err, cpu = smp_processor_id();
 
 	/* Ensure this CPU doesn't handle any more interrupts. */
 	err = __cpu_disable();
 	if (err < 0)
 		return err;
 
-	cpu_notify(CPU_DYING | param->mod, param->hcpu);
+	/* Invoke the former CPU_DYING callbacks */
+	for (; st->state > target; st->state--) {
+		struct cpuhp_step *step = cpuhp_ap_states + st->state;
+
+		cpuhp_invoke_callback(cpu, st->state, step->teardown);
+	}
 	/* Give up timekeeping duties */
 	tick_handover_do_timer();
 	/* Park the stopper thread */
-	stop_machine_park((long)param->hcpu);
+	stop_machine_park(cpu);
 	return 0;
 }
 
-/* Requires cpu_add_remove_lock to be held */
-static int _cpu_down(unsigned int cpu, int tasks_frozen)
+static int takedown_cpu(unsigned int cpu)
 {
-	int err, nr_calls = 0;
-	void *hcpu = (void *)(long)cpu;
-	unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
-	struct take_cpu_down_param tcd_param = {
-		.mod = mod,
-		.hcpu = hcpu,
-	};
-
-	if (num_online_cpus() == 1)
-		return -EBUSY;
-
-	if (!cpu_online(cpu))
-		return -EINVAL;
-
-	cpu_hotplug_begin();
-
-	err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
-	if (err) {
-		nr_calls--;
-		__cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
-		pr_warn("%s: attempt to take down CPU %u failed\n",
-			__func__, cpu);
-		goto out_release;
-	}
+	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
+	int err;
 
 	/*
 	 * By now we've cleared cpu_active_mask, wait for all preempt-disabled
@@ -378,6 +706,8 @@
 	else
 		synchronize_rcu();
 
+	/* Park the smpboot threads */
+	kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread);
 	smpboot_park_threads(cpu);
 
 	/*
@@ -389,12 +719,12 @@
 	/*
 	 * So now all preempt/rcu users must observe !cpu_active().
 	 */
-	err = stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
+	err = stop_machine(take_cpu_down, NULL, cpumask_of(cpu));
 	if (err) {
 		/* CPU didn't die: tell everyone.  Can't complain. */
-		cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
+		cpu_notify_nofail(CPU_DOWN_FAILED, cpu);
 		irq_unlock_sparse();
-		goto out_release;
+		return err;
 	}
 	BUG_ON(cpu_online(cpu));
 
@@ -405,10 +735,8 @@
 	 *
 	 * Wait for the stop thread to go away.
 	 */
-	while (!per_cpu(cpu_dead_idle, cpu))
-		cpu_relax();
-	smp_mb(); /* Read from cpu_dead_idle before __cpu_die(). */
-	per_cpu(cpu_dead_idle, cpu) = false;
+	wait_for_completion(&st->done);
+	BUG_ON(st->state != CPUHP_AP_IDLE_DEAD);
 
 	/* Interrupts are moved away from the dying cpu, reenable alloc/free */
 	irq_unlock_sparse();
@@ -417,20 +745,104 @@
 	/* This actually kills the CPU. */
 	__cpu_die(cpu);
 
-	/* CPU is completely dead: tell everyone.  Too late to complain. */
 	tick_cleanup_dead_cpu(cpu);
-	cpu_notify_nofail(CPU_DEAD | mod, hcpu);
-
-	check_for_tasks(cpu);
-
-out_release:
-	cpu_hotplug_done();
-	if (!err)
-		cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
-	return err;
+	return 0;
 }
 
-int cpu_down(unsigned int cpu)
+static int notify_dead(unsigned int cpu)
+{
+	cpu_notify_nofail(CPU_DEAD, cpu);
+	check_for_tasks(cpu);
+	return 0;
+}
+
+static void cpuhp_complete_idle_dead(void *arg)
+{
+	struct cpuhp_cpu_state *st = arg;
+
+	complete(&st->done);
+}
+
+void cpuhp_report_idle_dead(void)
+{
+	struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
+
+	BUG_ON(st->state != CPUHP_AP_OFFLINE);
+	rcu_report_dead(smp_processor_id());
+	st->state = CPUHP_AP_IDLE_DEAD;
+	/*
+	 * We cannot call complete after rcu_report_dead() so we delegate it
+	 * to an online cpu.
+	 */
+	smp_call_function_single(cpumask_first(cpu_online_mask),
+				 cpuhp_complete_idle_dead, st, 0);
+}
+
+#else
+#define notify_down_prepare	NULL
+#define takedown_cpu		NULL
+#define notify_dead		NULL
+#define notify_dying		NULL
+#endif
+
+#ifdef CONFIG_HOTPLUG_CPU
+
+/* Requires cpu_add_remove_lock to be held */
+static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
+			   enum cpuhp_state target)
+{
+	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
+	int prev_state, ret = 0;
+	bool hasdied = false;
+
+	if (num_online_cpus() == 1)
+		return -EBUSY;
+
+	if (!cpu_present(cpu))
+		return -EINVAL;
+
+	cpu_hotplug_begin();
+
+	cpuhp_tasks_frozen = tasks_frozen;
+
+	prev_state = st->state;
+	st->target = target;
+	/*
+	 * If the current CPU state is in the range of the AP hotplug thread,
+	 * then we need to kick the thread.
+	 */
+	if (st->state > CPUHP_TEARDOWN_CPU) {
+		ret = cpuhp_kick_ap_work(cpu);
+		/*
+		 * The AP side has done the error rollback already. Just
+		 * return the error code..
+		 */
+		if (ret)
+			goto out;
+
+		/*
+		 * We might have stopped still in the range of the AP hotplug
+		 * thread. Nothing to do anymore.
+		 */
+		if (st->state > CPUHP_TEARDOWN_CPU)
+			goto out;
+	}
+	/*
+	 * The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need
+	 * to do the further cleanups.
+	 */
+	ret = cpuhp_down_callbacks(cpu, st, cpuhp_bp_states, target);
+
+	hasdied = prev_state != st->state && st->state == CPUHP_OFFLINE;
+out:
+	cpu_hotplug_done();
+	/* This post dead nonsense must die */
+	if (!ret && hasdied)
+		cpu_notify_nofail(CPU_POST_DEAD, cpu);
+	return ret;
+}
+
+static int do_cpu_down(unsigned int cpu, enum cpuhp_state target)
 {
 	int err;
 
@@ -441,100 +853,131 @@
 		goto out;
 	}
 
-	err = _cpu_down(cpu, 0);
+	err = _cpu_down(cpu, 0, target);
 
 out:
 	cpu_maps_update_done();
 	return err;
 }
+int cpu_down(unsigned int cpu)
+{
+	return do_cpu_down(cpu, CPUHP_OFFLINE);
+}
 EXPORT_SYMBOL(cpu_down);
 #endif /*CONFIG_HOTPLUG_CPU*/
 
-/*
- * Unpark per-CPU smpboot kthreads at CPU-online time.
+/**
+ * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
+ * @cpu: cpu that just started
+ *
+ * This function calls the cpu_chain notifiers with CPU_STARTING.
+ * It must be called by the arch code on the new cpu, before the new cpu
+ * enables interrupts and before the "boot" cpu returns from __cpu_up().
  */
-static int smpboot_thread_call(struct notifier_block *nfb,
-			       unsigned long action, void *hcpu)
+void notify_cpu_starting(unsigned int cpu)
 {
-	int cpu = (long)hcpu;
+	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
+	enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE);
 
-	switch (action & ~CPU_TASKS_FROZEN) {
+	while (st->state < target) {
+		struct cpuhp_step *step;
 
-	case CPU_DOWN_FAILED:
-	case CPU_ONLINE:
-		smpboot_unpark_threads(cpu);
-		break;
-
-	default:
-		break;
+		st->state++;
+		step = cpuhp_ap_states + st->state;
+		cpuhp_invoke_callback(cpu, st->state, step->startup);
 	}
-
-	return NOTIFY_OK;
 }
 
-static struct notifier_block smpboot_thread_notifier = {
-	.notifier_call = smpboot_thread_call,
-	.priority = CPU_PRI_SMPBOOT,
-};
-
-void smpboot_thread_init(void)
+/*
+ * Called from the idle task. We need to set active here, so we can kick off
+ * the stopper thread and unpark the smpboot threads. If the target state is
+ * beyond CPUHP_AP_ONLINE_IDLE we kick cpuhp thread and let it bring up the
+ * cpu further.
+ */
+void cpuhp_online_idle(enum cpuhp_state state)
 {
-	register_cpu_notifier(&smpboot_thread_notifier);
+	struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
+	unsigned int cpu = smp_processor_id();
+
+	/* Happens for the boot cpu */
+	if (state != CPUHP_AP_ONLINE_IDLE)
+		return;
+
+	st->state = CPUHP_AP_ONLINE_IDLE;
+
+	/* The cpu is marked online, set it active now */
+	set_cpu_active(cpu, true);
+	/* Unpark the stopper thread and the hotplug thread of this cpu */
+	stop_machine_unpark(cpu);
+	kthread_unpark(st->thread);
+
+	/* Should we go further up ? */
+	if (st->target > CPUHP_AP_ONLINE_IDLE)
+		__cpuhp_kick_ap_work(st);
+	else
+		complete(&st->done);
 }
 
 /* Requires cpu_add_remove_lock to be held */
-static int _cpu_up(unsigned int cpu, int tasks_frozen)
+static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
 {
-	int ret, nr_calls = 0;
-	void *hcpu = (void *)(long)cpu;
-	unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
+	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
 	struct task_struct *idle;
+	int ret = 0;
 
 	cpu_hotplug_begin();
 
-	if (cpu_online(cpu) || !cpu_present(cpu)) {
+	if (!cpu_present(cpu)) {
 		ret = -EINVAL;
 		goto out;
 	}
 
-	idle = idle_thread_get(cpu);
-	if (IS_ERR(idle)) {
-		ret = PTR_ERR(idle);
-		goto out;
-	}
-
-	ret = smpboot_create_threads(cpu);
-	if (ret)
+	/*
+	 * The caller of do_cpu_up might have raced with another
+	 * caller. Ignore it for now.
+	 */
+	if (st->state >= target)
 		goto out;
 
-	ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls);
-	if (ret) {
-		nr_calls--;
-		pr_warn("%s: attempt to bring up CPU %u failed\n",
-			__func__, cpu);
-		goto out_notify;
+	if (st->state == CPUHP_OFFLINE) {
+		/* Let it fail before we try to bring the cpu up */
+		idle = idle_thread_get(cpu);
+		if (IS_ERR(idle)) {
+			ret = PTR_ERR(idle);
+			goto out;
+		}
 	}
 
-	/* Arch-specific enabling code. */
-	ret = __cpu_up(cpu, idle);
+	cpuhp_tasks_frozen = tasks_frozen;
 
-	if (ret != 0)
-		goto out_notify;
-	BUG_ON(!cpu_online(cpu));
+	st->target = target;
+	/*
+	 * If the current CPU state is in the range of the AP hotplug thread,
+	 * then we need to kick the thread once more.
+	 */
+	if (st->state > CPUHP_BRINGUP_CPU) {
+		ret = cpuhp_kick_ap_work(cpu);
+		/*
+		 * The AP side has done the error rollback already. Just
+		 * return the error code..
+		 */
+		if (ret)
+			goto out;
+	}
 
-	/* Now call notifier in preparation. */
-	cpu_notify(CPU_ONLINE | mod, hcpu);
-
-out_notify:
-	if (ret != 0)
-		__cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
+	/*
+	 * Try to reach the target state. We max out on the BP at
+	 * CPUHP_BRINGUP_CPU. After that the AP hotplug thread is
+	 * responsible for bringing it up to the target state.
+	 */
+	target = min((int)target, CPUHP_BRINGUP_CPU);
+	ret = cpuhp_up_callbacks(cpu, st, cpuhp_bp_states, target);
 out:
 	cpu_hotplug_done();
-
 	return ret;
 }
 
-int cpu_up(unsigned int cpu)
+static int do_cpu_up(unsigned int cpu, enum cpuhp_state target)
 {
 	int err = 0;
 
@@ -558,12 +1001,16 @@
 		goto out;
 	}
 
-	err = _cpu_up(cpu, 0);
-
+	err = _cpu_up(cpu, 0, target);
 out:
 	cpu_maps_update_done();
 	return err;
 }
+
+int cpu_up(unsigned int cpu)
+{
+	return do_cpu_up(cpu, CPUHP_ONLINE);
+}
 EXPORT_SYMBOL_GPL(cpu_up);
 
 #ifdef CONFIG_PM_SLEEP_SMP
@@ -586,7 +1033,7 @@
 		if (cpu == first_cpu)
 			continue;
 		trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
-		error = _cpu_down(cpu, 1);
+		error = _cpu_down(cpu, 1, CPUHP_OFFLINE);
 		trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
 		if (!error)
 			cpumask_set_cpu(cpu, frozen_cpus);
@@ -636,7 +1083,7 @@
 
 	for_each_cpu(cpu, frozen_cpus) {
 		trace_suspend_resume(TPS("CPU_ON"), cpu, true);
-		error = _cpu_up(cpu, 1);
+		error = _cpu_up(cpu, 1, CPUHP_ONLINE);
 		trace_suspend_resume(TPS("CPU_ON"), cpu, false);
 		if (!error) {
 			pr_info("CPU%d is up\n", cpu);
@@ -709,26 +1156,463 @@
 
 #endif /* CONFIG_PM_SLEEP_SMP */
 
-/**
- * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
- * @cpu: cpu that just started
- *
- * This function calls the cpu_chain notifiers with CPU_STARTING.
- * It must be called by the arch code on the new cpu, before the new cpu
- * enables interrupts and before the "boot" cpu returns from __cpu_up().
- */
-void notify_cpu_starting(unsigned int cpu)
-{
-	unsigned long val = CPU_STARTING;
+#endif /* CONFIG_SMP */
 
-#ifdef CONFIG_PM_SLEEP_SMP
-	if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus))
-		val = CPU_STARTING_FROZEN;
-#endif /* CONFIG_PM_SLEEP_SMP */
-	cpu_notify(val, (void *)(long)cpu);
+/* Boot processor state steps */
+static struct cpuhp_step cpuhp_bp_states[] = {
+	[CPUHP_OFFLINE] = {
+		.name			= "offline",
+		.startup		= NULL,
+		.teardown		= NULL,
+	},
+#ifdef CONFIG_SMP
+	[CPUHP_CREATE_THREADS]= {
+		.name			= "threads:create",
+		.startup		= smpboot_create_threads,
+		.teardown		= NULL,
+		.cant_stop		= true,
+	},
+	/*
+	 * Preparatory and dead notifiers. Will be replaced once the notifiers
+	 * are converted to states.
+	 */
+	[CPUHP_NOTIFY_PREPARE] = {
+		.name			= "notify:prepare",
+		.startup		= notify_prepare,
+		.teardown		= notify_dead,
+		.skip_onerr		= true,
+		.cant_stop		= true,
+	},
+	/* Kicks the plugged cpu into life */
+	[CPUHP_BRINGUP_CPU] = {
+		.name			= "cpu:bringup",
+		.startup		= bringup_cpu,
+		.teardown		= NULL,
+		.cant_stop		= true,
+	},
+	/*
+	 * Handled on controll processor until the plugged processor manages
+	 * this itself.
+	 */
+	[CPUHP_TEARDOWN_CPU] = {
+		.name			= "cpu:teardown",
+		.startup		= NULL,
+		.teardown		= takedown_cpu,
+		.cant_stop		= true,
+	},
+#endif
+};
+
+/* Application processor state steps */
+static struct cpuhp_step cpuhp_ap_states[] = {
+#ifdef CONFIG_SMP
+	/* Final state before CPU kills itself */
+	[CPUHP_AP_IDLE_DEAD] = {
+		.name			= "idle:dead",
+	},
+	/*
+	 * Last state before CPU enters the idle loop to die. Transient state
+	 * for synchronization.
+	 */
+	[CPUHP_AP_OFFLINE] = {
+		.name			= "ap:offline",
+		.cant_stop		= true,
+	},
+	/*
+	 * Low level startup/teardown notifiers. Run with interrupts
+	 * disabled. Will be removed once the notifiers are converted to
+	 * states.
+	 */
+	[CPUHP_AP_NOTIFY_STARTING] = {
+		.name			= "notify:starting",
+		.startup		= notify_starting,
+		.teardown		= notify_dying,
+		.skip_onerr		= true,
+		.cant_stop		= true,
+	},
+	/* Entry state on starting. Interrupts enabled from here on. Transient
+	 * state for synchronsization */
+	[CPUHP_AP_ONLINE] = {
+		.name			= "ap:online",
+	},
+	/* Handle smpboot threads park/unpark */
+	[CPUHP_AP_SMPBOOT_THREADS] = {
+		.name			= "smpboot:threads",
+		.startup		= smpboot_unpark_threads,
+		.teardown		= NULL,
+	},
+	/*
+	 * Online/down_prepare notifiers. Will be removed once the notifiers
+	 * are converted to states.
+	 */
+	[CPUHP_AP_NOTIFY_ONLINE] = {
+		.name			= "notify:online",
+		.startup		= notify_online,
+		.teardown		= notify_down_prepare,
+	},
+#endif
+	/*
+	 * The dynamically registered state space is here
+	 */
+
+	/* CPU is fully up and running. */
+	[CPUHP_ONLINE] = {
+		.name			= "online",
+		.startup		= NULL,
+		.teardown		= NULL,
+	},
+};
+
+/* Sanity check for callbacks */
+static int cpuhp_cb_check(enum cpuhp_state state)
+{
+	if (state <= CPUHP_OFFLINE || state >= CPUHP_ONLINE)
+		return -EINVAL;
+	return 0;
 }
 
-#endif /* CONFIG_SMP */
+static bool cpuhp_is_ap_state(enum cpuhp_state state)
+{
+	/*
+	 * The extra check for CPUHP_TEARDOWN_CPU is only for documentation
+	 * purposes as that state is handled explicitely in cpu_down.
+	 */
+	return state > CPUHP_BRINGUP_CPU && state != CPUHP_TEARDOWN_CPU;
+}
+
+static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state)
+{
+	struct cpuhp_step *sp;
+
+	sp = cpuhp_is_ap_state(state) ? cpuhp_ap_states : cpuhp_bp_states;
+	return sp + state;
+}
+
+static void cpuhp_store_callbacks(enum cpuhp_state state,
+				  const char *name,
+				  int (*startup)(unsigned int cpu),
+				  int (*teardown)(unsigned int cpu))
+{
+	/* (Un)Install the callbacks for further cpu hotplug operations */
+	struct cpuhp_step *sp;
+
+	mutex_lock(&cpuhp_state_mutex);
+	sp = cpuhp_get_step(state);
+	sp->startup = startup;
+	sp->teardown = teardown;
+	sp->name = name;
+	mutex_unlock(&cpuhp_state_mutex);
+}
+
+static void *cpuhp_get_teardown_cb(enum cpuhp_state state)
+{
+	return cpuhp_get_step(state)->teardown;
+}
+
+/*
+ * Call the startup/teardown function for a step either on the AP or
+ * on the current CPU.
+ */
+static int cpuhp_issue_call(int cpu, enum cpuhp_state state,
+			    int (*cb)(unsigned int), bool bringup)
+{
+	int ret;
+
+	if (!cb)
+		return 0;
+	/*
+	 * The non AP bound callbacks can fail on bringup. On teardown
+	 * e.g. module removal we crash for now.
+	 */
+#ifdef CONFIG_SMP
+	if (cpuhp_is_ap_state(state))
+		ret = cpuhp_invoke_ap_callback(cpu, state, cb);
+	else
+		ret = cpuhp_invoke_callback(cpu, state, cb);
+#else
+	ret = cpuhp_invoke_callback(cpu, state, cb);
+#endif
+	BUG_ON(ret && !bringup);
+	return ret;
+}
+
+/*
+ * Called from __cpuhp_setup_state on a recoverable failure.
+ *
+ * Note: The teardown callbacks for rollback are not allowed to fail!
+ */
+static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state,
+				   int (*teardown)(unsigned int cpu))
+{
+	int cpu;
+
+	if (!teardown)
+		return;
+
+	/* Roll back the already executed steps on the other cpus */
+	for_each_present_cpu(cpu) {
+		struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
+		int cpustate = st->state;
+
+		if (cpu >= failedcpu)
+			break;
+
+		/* Did we invoke the startup call on that cpu ? */
+		if (cpustate >= state)
+			cpuhp_issue_call(cpu, state, teardown, false);
+	}
+}
+
+/*
+ * Returns a free for dynamic slot assignment of the Online state. The states
+ * are protected by the cpuhp_slot_states mutex and an empty slot is identified
+ * by having no name assigned.
+ */
+static int cpuhp_reserve_state(enum cpuhp_state state)
+{
+	enum cpuhp_state i;
+
+	mutex_lock(&cpuhp_state_mutex);
+	for (i = CPUHP_AP_ONLINE_DYN; i <= CPUHP_AP_ONLINE_DYN_END; i++) {
+		if (cpuhp_ap_states[i].name)
+			continue;
+
+		cpuhp_ap_states[i].name = "Reserved";
+		mutex_unlock(&cpuhp_state_mutex);
+		return i;
+	}
+	mutex_unlock(&cpuhp_state_mutex);
+	WARN(1, "No more dynamic states available for CPU hotplug\n");
+	return -ENOSPC;
+}
+
+/**
+ * __cpuhp_setup_state - Setup the callbacks for an hotplug machine state
+ * @state:	The state to setup
+ * @invoke:	If true, the startup function is invoked for cpus where
+ *		cpu state >= @state
+ * @startup:	startup callback function
+ * @teardown:	teardown callback function
+ *
+ * Returns 0 if successful, otherwise a proper error code
+ */
+int __cpuhp_setup_state(enum cpuhp_state state,
+			const char *name, bool invoke,
+			int (*startup)(unsigned int cpu),
+			int (*teardown)(unsigned int cpu))
+{
+	int cpu, ret = 0;
+	int dyn_state = 0;
+
+	if (cpuhp_cb_check(state) || !name)
+		return -EINVAL;
+
+	get_online_cpus();
+
+	/* currently assignments for the ONLINE state are possible */
+	if (state == CPUHP_AP_ONLINE_DYN) {
+		dyn_state = 1;
+		ret = cpuhp_reserve_state(state);
+		if (ret < 0)
+			goto out;
+		state = ret;
+	}
+
+	cpuhp_store_callbacks(state, name, startup, teardown);
+
+	if (!invoke || !startup)
+		goto out;
+
+	/*
+	 * Try to call the startup callback for each present cpu
+	 * depending on the hotplug state of the cpu.
+	 */
+	for_each_present_cpu(cpu) {
+		struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
+		int cpustate = st->state;
+
+		if (cpustate < state)
+			continue;
+
+		ret = cpuhp_issue_call(cpu, state, startup, true);
+		if (ret) {
+			cpuhp_rollback_install(cpu, state, teardown);
+			cpuhp_store_callbacks(state, NULL, NULL, NULL);
+			goto out;
+		}
+	}
+out:
+	put_online_cpus();
+	if (!ret && dyn_state)
+		return state;
+	return ret;
+}
+EXPORT_SYMBOL(__cpuhp_setup_state);
+
+/**
+ * __cpuhp_remove_state - Remove the callbacks for an hotplug machine state
+ * @state:	The state to remove
+ * @invoke:	If true, the teardown function is invoked for cpus where
+ *		cpu state >= @state
+ *
+ * The teardown callback is currently not allowed to fail. Think
+ * about module removal!
+ */
+void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
+{
+	int (*teardown)(unsigned int cpu) = cpuhp_get_teardown_cb(state);
+	int cpu;
+
+	BUG_ON(cpuhp_cb_check(state));
+
+	get_online_cpus();
+
+	if (!invoke || !teardown)
+		goto remove;
+
+	/*
+	 * Call the teardown callback for each present cpu depending
+	 * on the hotplug state of the cpu. This function is not
+	 * allowed to fail currently!
+	 */
+	for_each_present_cpu(cpu) {
+		struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
+		int cpustate = st->state;
+
+		if (cpustate >= state)
+			cpuhp_issue_call(cpu, state, teardown, false);
+	}
+remove:
+	cpuhp_store_callbacks(state, NULL, NULL, NULL);
+	put_online_cpus();
+}
+EXPORT_SYMBOL(__cpuhp_remove_state);
+
+#if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU)
+static ssize_t show_cpuhp_state(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
+
+	return sprintf(buf, "%d\n", st->state);
+}
+static DEVICE_ATTR(state, 0444, show_cpuhp_state, NULL);
+
+static ssize_t write_cpuhp_target(struct device *dev,
+				  struct device_attribute *attr,
+				  const char *buf, size_t count)
+{
+	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
+	struct cpuhp_step *sp;
+	int target, ret;
+
+	ret = kstrtoint(buf, 10, &target);
+	if (ret)
+		return ret;
+
+#ifdef CONFIG_CPU_HOTPLUG_STATE_CONTROL
+	if (target < CPUHP_OFFLINE || target > CPUHP_ONLINE)
+		return -EINVAL;
+#else
+	if (target != CPUHP_OFFLINE && target != CPUHP_ONLINE)
+		return -EINVAL;
+#endif
+
+	ret = lock_device_hotplug_sysfs();
+	if (ret)
+		return ret;
+
+	mutex_lock(&cpuhp_state_mutex);
+	sp = cpuhp_get_step(target);
+	ret = !sp->name || sp->cant_stop ? -EINVAL : 0;
+	mutex_unlock(&cpuhp_state_mutex);
+	if (ret)
+		return ret;
+
+	if (st->state < target)
+		ret = do_cpu_up(dev->id, target);
+	else
+		ret = do_cpu_down(dev->id, target);
+
+	unlock_device_hotplug();
+	return ret ? ret : count;
+}
+
+static ssize_t show_cpuhp_target(struct device *dev,
+				 struct device_attribute *attr, char *buf)
+{
+	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
+
+	return sprintf(buf, "%d\n", st->target);
+}
+static DEVICE_ATTR(target, 0644, show_cpuhp_target, write_cpuhp_target);
+
+static struct attribute *cpuhp_cpu_attrs[] = {
+	&dev_attr_state.attr,
+	&dev_attr_target.attr,
+	NULL
+};
+
+static struct attribute_group cpuhp_cpu_attr_group = {
+	.attrs = cpuhp_cpu_attrs,
+	.name = "hotplug",
+	NULL
+};
+
+static ssize_t show_cpuhp_states(struct device *dev,
+				 struct device_attribute *attr, char *buf)
+{
+	ssize_t cur, res = 0;
+	int i;
+
+	mutex_lock(&cpuhp_state_mutex);
+	for (i = CPUHP_OFFLINE; i <= CPUHP_ONLINE; i++) {
+		struct cpuhp_step *sp = cpuhp_get_step(i);
+
+		if (sp->name) {
+			cur = sprintf(buf, "%3d: %s\n", i, sp->name);
+			buf += cur;
+			res += cur;
+		}
+	}
+	mutex_unlock(&cpuhp_state_mutex);
+	return res;
+}
+static DEVICE_ATTR(states, 0444, show_cpuhp_states, NULL);
+
+static struct attribute *cpuhp_cpu_root_attrs[] = {
+	&dev_attr_states.attr,
+	NULL
+};
+
+static struct attribute_group cpuhp_cpu_root_attr_group = {
+	.attrs = cpuhp_cpu_root_attrs,
+	.name = "hotplug",
+	NULL
+};
+
+static int __init cpuhp_sysfs_init(void)
+{
+	int cpu, ret;
+
+	ret = sysfs_create_group(&cpu_subsys.dev_root->kobj,
+				 &cpuhp_cpu_root_attr_group);
+	if (ret)
+		return ret;
+
+	for_each_possible_cpu(cpu) {
+		struct device *dev = get_cpu_device(cpu);
+
+		if (!dev)
+			continue;
+		ret = sysfs_create_group(&dev->kobj, &cpuhp_cpu_attr_group);
+		if (ret)
+			return ret;
+	}
+	return 0;
+}
+device_initcall(cpuhp_sysfs_init);
+#endif
 
 /*
  * cpu_bit_bitmap[] is a special, "compressed" data structure that
@@ -789,3 +1673,25 @@
 {
 	cpumask_copy(&__cpu_online_mask, src);
 }
+
+/*
+ * Activate the first processor.
+ */
+void __init boot_cpu_init(void)
+{
+	int cpu = smp_processor_id();
+
+	/* Mark the boot cpu "present", "online" etc for SMP and UP case */
+	set_cpu_online(cpu, true);
+	set_cpu_active(cpu, true);
+	set_cpu_present(cpu, true);
+	set_cpu_possible(cpu, true);
+}
+
+/*
+ * Must be called _AFTER_ setting up the per_cpu areas
+ */
+void __init boot_cpu_state_init(void)
+{
+	per_cpu_ptr(&cpuhp_state, smp_processor_id())->state = CPUHP_ONLINE;
+}
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 0167679..5f6ce93 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -1178,6 +1178,7 @@
 		goto free_area;
 
 	area->xol_mapping.name = "[uprobes]";
+	area->xol_mapping.fault = NULL;
 	area->xol_mapping.pages = area->pages;
 	area->pages[0] = alloc_page(GFP_HIGHUSER);
 	if (!area->pages[0])
diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig
index 3b48dab..3bbfd6a 100644
--- a/kernel/irq/Kconfig
+++ b/kernel/irq/Kconfig
@@ -64,6 +64,10 @@
 	bool
 	select IRQ_DOMAIN
 
+# Generic IRQ IPI support
+config GENERIC_IRQ_IPI
+	bool
+
 # Generic MSI interrupt support
 config GENERIC_MSI_IRQ
 	bool
diff --git a/kernel/irq/Makefile b/kernel/irq/Makefile
index 2fc9cbd..2ee42e9 100644
--- a/kernel/irq/Makefile
+++ b/kernel/irq/Makefile
@@ -8,3 +8,4 @@
 obj-$(CONFIG_GENERIC_IRQ_MIGRATION) += cpuhotplug.o
 obj-$(CONFIG_PM_SLEEP) += pm.o
 obj-$(CONFIG_GENERIC_MSI_IRQ) += msi.o
+obj-$(CONFIG_GENERIC_IRQ_IPI) += ipi.o
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 5797909..2f9f2b0 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -961,6 +961,7 @@
 	data = data->parent_data;
 	data->chip->irq_mask(data);
 }
+EXPORT_SYMBOL_GPL(irq_chip_mask_parent);
 
 /**
  * irq_chip_unmask_parent - Unmask the parent interrupt
@@ -971,6 +972,7 @@
 	data = data->parent_data;
 	data->chip->irq_unmask(data);
 }
+EXPORT_SYMBOL_GPL(irq_chip_unmask_parent);
 
 /**
  * irq_chip_eoi_parent - Invoke EOI on the parent interrupt
@@ -981,6 +983,7 @@
 	data = data->parent_data;
 	data->chip->irq_eoi(data);
 }
+EXPORT_SYMBOL_GPL(irq_chip_eoi_parent);
 
 /**
  * irq_chip_set_affinity_parent - Set affinity on the parent interrupt
@@ -1016,6 +1019,7 @@
 
 	return -ENOSYS;
 }
+EXPORT_SYMBOL_GPL(irq_chip_set_type_parent);
 
 /**
  * irq_chip_retrigger_hierarchy - Retrigger an interrupt in hardware
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index 57bff78..a15b548 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -136,10 +136,9 @@
 {
 	irqreturn_t retval = IRQ_NONE;
 	unsigned int flags = 0, irq = desc->irq_data.irq;
-	struct irqaction *action = desc->action;
+	struct irqaction *action;
 
-	/* action might have become NULL since we dropped the lock */
-	while (action) {
+	for_each_action_of_desc(desc, action) {
 		irqreturn_t res;
 
 		trace_irq_handler_entry(irq, action);
@@ -173,7 +172,6 @@
 		}
 
 		retval |= res;
-		action = action->next;
 	}
 
 	add_interrupt_randomness(irq, flags);
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index fcab63c..09be2c9 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -131,6 +131,9 @@
 #define IRQ_GET_DESC_CHECK_GLOBAL	(_IRQ_DESC_CHECK)
 #define IRQ_GET_DESC_CHECK_PERCPU	(_IRQ_DESC_CHECK | _IRQ_DESC_PERCPU)
 
+#define for_each_action_of_desc(desc, act)			\
+	for (act = desc->act; act; act = act->next)
+
 struct irq_desc *
 __irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus,
 		    unsigned int check);
@@ -160,6 +163,8 @@
 	__irq_put_desc_unlock(desc, flags, false);
 }
 
+#define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors)
+
 /*
  * Manipulation functions for irq_data.state
  */
@@ -188,6 +193,8 @@
 	return __irqd_to_state(d) & mask;
 }
 
+#undef __irqd_to_state
+
 static inline void kstat_incr_irqs_this_cpu(struct irq_desc *desc)
 {
 	__this_cpu_inc(*desc->kstat_irqs);
diff --git a/kernel/irq/ipi.c b/kernel/irq/ipi.c
new file mode 100644
index 0000000..c37f34b
--- /dev/null
+++ b/kernel/irq/ipi.c
@@ -0,0 +1,326 @@
+/*
+ * linux/kernel/irq/ipi.c
+ *
+ * Copyright (C) 2015 Imagination Technologies Ltd
+ * Author: Qais Yousef <qais.yousef@imgtec.com>
+ *
+ * This file contains driver APIs to the IPI subsystem.
+ */
+
+#define pr_fmt(fmt) "genirq/ipi: " fmt
+
+#include <linux/irqdomain.h>
+#include <linux/irq.h>
+
+/**
+ * irq_reserve_ipi() - Setup an IPI to destination cpumask
+ * @domain:	IPI domain
+ * @dest:	cpumask of cpus which can receive the IPI
+ *
+ * Allocate a virq that can be used to send IPI to any CPU in dest mask.
+ *
+ * On success it'll return linux irq number and 0 on failure
+ */
+unsigned int irq_reserve_ipi(struct irq_domain *domain,
+			     const struct cpumask *dest)
+{
+	unsigned int nr_irqs, offset;
+	struct irq_data *data;
+	int virq, i;
+
+	if (!domain ||!irq_domain_is_ipi(domain)) {
+		pr_warn("Reservation on a non IPI domain\n");
+		return 0;
+	}
+
+	if (!cpumask_subset(dest, cpu_possible_mask)) {
+		pr_warn("Reservation is not in possible_cpu_mask\n");
+		return 0;
+	}
+
+	nr_irqs = cpumask_weight(dest);
+	if (!nr_irqs) {
+		pr_warn("Reservation for empty destination mask\n");
+		return 0;
+	}
+
+	if (irq_domain_is_ipi_single(domain)) {
+		/*
+		 * If the underlying implementation uses a single HW irq on
+		 * all cpus then we only need a single Linux irq number for
+		 * it. We have no restrictions vs. the destination mask. The
+		 * underlying implementation can deal with holes nicely.
+		 */
+		nr_irqs = 1;
+		offset = 0;
+	} else {
+		unsigned int next;
+
+		/*
+		 * The IPI requires a seperate HW irq on each CPU. We require
+		 * that the destination mask is consecutive. If an
+		 * implementation needs to support holes, it can reserve
+		 * several IPI ranges.
+		 */
+		offset = cpumask_first(dest);
+		/*
+		 * Find a hole and if found look for another set bit after the
+		 * hole. For now we don't support this scenario.
+		 */
+		next = cpumask_next_zero(offset, dest);
+		if (next < nr_cpu_ids)
+			next = cpumask_next(next, dest);
+		if (next < nr_cpu_ids) {
+			pr_warn("Destination mask has holes\n");
+			return 0;
+		}
+	}
+
+	virq = irq_domain_alloc_descs(-1, nr_irqs, 0, NUMA_NO_NODE);
+	if (virq <= 0) {
+		pr_warn("Can't reserve IPI, failed to alloc descs\n");
+		return 0;
+	}
+
+	virq = __irq_domain_alloc_irqs(domain, virq, nr_irqs, NUMA_NO_NODE,
+				       (void *) dest, true);
+
+	if (virq <= 0) {
+		pr_warn("Can't reserve IPI, failed to alloc hw irqs\n");
+		goto free_descs;
+	}
+
+	for (i = 0; i < nr_irqs; i++) {
+		data = irq_get_irq_data(virq + i);
+		cpumask_copy(data->common->affinity, dest);
+		data->common->ipi_offset = offset;
+	}
+	return virq;
+
+free_descs:
+	irq_free_descs(virq, nr_irqs);
+	return 0;
+}
+
+/**
+ * irq_destroy_ipi() - unreserve an IPI that was previously allocated
+ * @irq:	linux irq number to be destroyed
+ *
+ * Return the IPIs allocated with irq_reserve_ipi() to the system destroying
+ * all virqs associated with them.
+ */
+void irq_destroy_ipi(unsigned int irq)
+{
+	struct irq_data *data = irq_get_irq_data(irq);
+	struct cpumask *ipimask = data ? irq_data_get_affinity_mask(data) : NULL;
+	struct irq_domain *domain;
+	unsigned int nr_irqs;
+
+	if (!irq || !data || !ipimask)
+		return;
+
+	domain = data->domain;
+	if (WARN_ON(domain == NULL))
+		return;
+
+	if (!irq_domain_is_ipi(domain)) {
+		pr_warn("Trying to destroy a non IPI domain!\n");
+		return;
+	}
+
+	if (irq_domain_is_ipi_per_cpu(domain))
+		nr_irqs = cpumask_weight(ipimask);
+	else
+		nr_irqs = 1;
+
+	irq_domain_free_irqs(irq, nr_irqs);
+}
+
+/**
+ * ipi_get_hwirq - Get the hwirq associated with an IPI to a cpu
+ * @irq:	linux irq number
+ * @cpu:	the target cpu
+ *
+ * When dealing with coprocessors IPI, we need to inform the coprocessor of
+ * the hwirq it needs to use to receive and send IPIs.
+ *
+ * Returns hwirq value on success and INVALID_HWIRQ on failure.
+ */
+irq_hw_number_t ipi_get_hwirq(unsigned int irq, unsigned int cpu)
+{
+	struct irq_data *data = irq_get_irq_data(irq);
+	struct cpumask *ipimask = data ? irq_data_get_affinity_mask(data) : NULL;
+
+	if (!data || !ipimask || cpu > nr_cpu_ids)
+		return INVALID_HWIRQ;
+
+	if (!cpumask_test_cpu(cpu, ipimask))
+		return INVALID_HWIRQ;
+
+	/*
+	 * Get the real hardware irq number if the underlying implementation
+	 * uses a seperate irq per cpu. If the underlying implementation uses
+	 * a single hardware irq for all cpus then the IPI send mechanism
+	 * needs to take care of the cpu destinations.
+	 */
+	if (irq_domain_is_ipi_per_cpu(data->domain))
+		data = irq_get_irq_data(irq + cpu - data->common->ipi_offset);
+
+	return data ? irqd_to_hwirq(data) : INVALID_HWIRQ;
+}
+EXPORT_SYMBOL_GPL(ipi_get_hwirq);
+
+static int ipi_send_verify(struct irq_chip *chip, struct irq_data *data,
+			   const struct cpumask *dest, unsigned int cpu)
+{
+	struct cpumask *ipimask = irq_data_get_affinity_mask(data);
+
+	if (!chip || !ipimask)
+		return -EINVAL;
+
+	if (!chip->ipi_send_single && !chip->ipi_send_mask)
+		return -EINVAL;
+
+	if (cpu > nr_cpu_ids)
+		return -EINVAL;
+
+	if (dest) {
+		if (!cpumask_subset(dest, ipimask))
+			return -EINVAL;
+	} else {
+		if (!cpumask_test_cpu(cpu, ipimask))
+			return -EINVAL;
+	}
+	return 0;
+}
+
+/**
+ * __ipi_send_single - send an IPI to a target Linux SMP CPU
+ * @desc:	pointer to irq_desc of the IRQ
+ * @cpu:	destination CPU, must in the destination mask passed to
+ *		irq_reserve_ipi()
+ *
+ * This function is for architecture or core code to speed up IPI sending. Not
+ * usable from driver code.
+ *
+ * Returns zero on success and negative error number on failure.
+ */
+int __ipi_send_single(struct irq_desc *desc, unsigned int cpu)
+{
+	struct irq_data *data = irq_desc_get_irq_data(desc);
+	struct irq_chip *chip = irq_data_get_irq_chip(data);
+
+#ifdef DEBUG
+	/*
+	 * Minimise the overhead by omitting the checks for Linux SMP IPIs.
+	 * Since the callers should be arch or core code which is generally
+	 * trusted, only check for errors when debugging.
+	 */
+	if (WARN_ON_ONCE(ipi_send_verify(chip, data, NULL, cpu)))
+		return -EINVAL;
+#endif
+	if (!chip->ipi_send_single) {
+		chip->ipi_send_mask(data, cpumask_of(cpu));
+		return 0;
+	}
+
+	/* FIXME: Store this information in irqdata flags */
+	if (irq_domain_is_ipi_per_cpu(data->domain) &&
+	    cpu != data->common->ipi_offset) {
+		/* use the correct data for that cpu */
+		unsigned irq = data->irq + cpu - data->common->ipi_offset;
+
+		data = irq_get_irq_data(irq);
+	}
+	chip->ipi_send_single(data, cpu);
+	return 0;
+}
+
+/**
+ * ipi_send_mask - send an IPI to target Linux SMP CPU(s)
+ * @desc:	pointer to irq_desc of the IRQ
+ * @dest:	dest CPU(s), must be a subset of the mask passed to
+ *		irq_reserve_ipi()
+ *
+ * This function is for architecture or core code to speed up IPI sending. Not
+ * usable from driver code.
+ *
+ * Returns zero on success and negative error number on failure.
+ */
+int __ipi_send_mask(struct irq_desc *desc, const struct cpumask *dest)
+{
+	struct irq_data *data = irq_desc_get_irq_data(desc);
+	struct irq_chip *chip = irq_data_get_irq_chip(data);
+	unsigned int cpu;
+
+#ifdef DEBUG
+	/*
+	 * Minimise the overhead by omitting the checks for Linux SMP IPIs.
+	 * Since the callers should be arch or core code which is generally
+	 * trusted, only check for errors when debugging.
+	 */
+	if (WARN_ON_ONCE(ipi_send_verify(chip, data, dest, 0)))
+		return -EINVAL;
+#endif
+	if (chip->ipi_send_mask) {
+		chip->ipi_send_mask(data, dest);
+		return 0;
+	}
+
+	if (irq_domain_is_ipi_per_cpu(data->domain)) {
+		unsigned int base = data->irq;
+
+		for_each_cpu(cpu, dest) {
+			unsigned irq = base + cpu - data->common->ipi_offset;
+
+			data = irq_get_irq_data(irq);
+			chip->ipi_send_single(data, cpu);
+		}
+	} else {
+		for_each_cpu(cpu, dest)
+			chip->ipi_send_single(data, cpu);
+	}
+	return 0;
+}
+
+/**
+ * ipi_send_single - Send an IPI to a single CPU
+ * @virq:	linux irq number from irq_reserve_ipi()
+ * @cpu:	destination CPU, must in the destination mask passed to
+ *		irq_reserve_ipi()
+ *
+ * Returns zero on success and negative error number on failure.
+ */
+int ipi_send_single(unsigned int virq, unsigned int cpu)
+{
+	struct irq_desc *desc = irq_to_desc(virq);
+	struct irq_data *data = desc ? irq_desc_get_irq_data(desc) : NULL;
+	struct irq_chip *chip = data ? irq_data_get_irq_chip(data) : NULL;
+
+	if (WARN_ON_ONCE(ipi_send_verify(chip, data, NULL, cpu)))
+		return -EINVAL;
+
+	return __ipi_send_single(desc, cpu);
+}
+EXPORT_SYMBOL_GPL(ipi_send_single);
+
+/**
+ * ipi_send_mask - Send an IPI to target CPU(s)
+ * @virq:	linux irq number from irq_reserve_ipi()
+ * @dest:	dest CPU(s), must be a subset of the mask passed to
+ *		irq_reserve_ipi()
+ *
+ * Returns zero on success and negative error number on failure.
+ */
+int ipi_send_mask(unsigned int virq, const struct cpumask *dest)
+{
+	struct irq_desc *desc = irq_to_desc(virq);
+	struct irq_data *data = desc ? irq_desc_get_irq_data(desc) : NULL;
+	struct irq_chip *chip = data ? irq_data_get_irq_chip(data) : NULL;
+
+	if (WARN_ON_ONCE(ipi_send_verify(chip, data, dest, 0)))
+		return -EINVAL;
+
+	return __ipi_send_mask(desc, dest);
+}
+EXPORT_SYMBOL_GPL(ipi_send_mask);
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index 0409da0..0ccd028 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -24,10 +24,27 @@
 static struct lock_class_key irq_desc_lock_class;
 
 #if defined(CONFIG_SMP)
+static int __init irq_affinity_setup(char *str)
+{
+	zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
+	cpulist_parse(str, irq_default_affinity);
+	/*
+	 * Set at least the boot cpu. We don't want to end up with
+	 * bugreports caused by random comandline masks
+	 */
+	cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
+	return 1;
+}
+__setup("irqaffinity=", irq_affinity_setup);
+
 static void __init init_irq_default_affinity(void)
 {
-	alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
-	cpumask_setall(irq_default_affinity);
+#ifdef CONFIG_CPUMASK_OFFSTACK
+	if (!irq_default_affinity)
+		zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
+#endif
+	if (cpumask_empty(irq_default_affinity))
+		cpumask_setall(irq_default_affinity);
 }
 #else
 static void __init init_irq_default_affinity(void)
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index 3e56d2f0..3a519a0 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -23,8 +23,6 @@
 static DEFINE_MUTEX(revmap_trees_mutex);
 static struct irq_domain *irq_default_domain;
 
-static int irq_domain_alloc_descs(int virq, unsigned int nr_irqs,
-				  irq_hw_number_t hwirq, int node);
 static void irq_domain_check_hierarchy(struct irq_domain *domain);
 
 struct irqchip_fwid {
@@ -840,8 +838,8 @@
 };
 EXPORT_SYMBOL_GPL(irq_domain_simple_ops);
 
-static int irq_domain_alloc_descs(int virq, unsigned int cnt,
-				  irq_hw_number_t hwirq, int node)
+int irq_domain_alloc_descs(int virq, unsigned int cnt, irq_hw_number_t hwirq,
+			   int node)
 {
 	unsigned int hint;
 
@@ -895,6 +893,7 @@
 
 	return domain;
 }
+EXPORT_SYMBOL_GPL(irq_domain_create_hierarchy);
 
 static void irq_domain_insert_irq(int virq)
 {
@@ -1045,6 +1044,7 @@
 
 	return 0;
 }
+EXPORT_SYMBOL_GPL(irq_domain_set_hwirq_and_chip);
 
 /**
  * irq_domain_set_info - Set the complete data for a @virq in @domain
@@ -1078,6 +1078,7 @@
 	irq_data->chip = &no_irq_chip;
 	irq_data->chip_data = NULL;
 }
+EXPORT_SYMBOL_GPL(irq_domain_reset_irq_data);
 
 /**
  * irq_domain_free_irqs_common - Clear irq_data and free the parent
@@ -1275,6 +1276,7 @@
 						       nr_irqs, arg);
 	return -ENOSYS;
 }
+EXPORT_SYMBOL_GPL(irq_domain_alloc_irqs_parent);
 
 /**
  * irq_domain_free_irqs_parent - Free interrupts from parent domain
@@ -1292,6 +1294,7 @@
 		irq_domain_free_irqs_recursive(domain->parent, irq_base,
 					       nr_irqs);
 }
+EXPORT_SYMBOL_GPL(irq_domain_free_irqs_parent);
 
 /**
  * irq_domain_activate_irq - Call domain_ops->activate recursively to activate
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 8411872..3ddd229 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -144,13 +144,11 @@
  */
 void irq_set_thread_affinity(struct irq_desc *desc)
 {
-	struct irqaction *action = desc->action;
+	struct irqaction *action;
 
-	while (action) {
+	for_each_action_of_desc(desc, action)
 		if (action->thread)
 			set_bit(IRQTF_AFFINITY, &action->thread_flags);
-		action = action->next;
-	}
 }
 
 #ifdef CONFIG_GENERIC_PENDING_IRQ
@@ -994,7 +992,7 @@
 		return;
 
 	raw_spin_lock_irqsave(&desc->lock, flags);
-	for (action = desc->action; action; action = action->next) {
+	for_each_action_of_desc(desc, action) {
 		if (action->dev_id == dev_id) {
 			if (action->thread)
 				__irq_wake_thread(desc, action);
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index a2c02fd..4e1b947 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -291,7 +291,7 @@
 	int ret = 1;
 
 	raw_spin_lock_irqsave(&desc->lock, flags);
-	for (action = desc->action ; action; action = action->next) {
+	for_each_action_of_desc(desc, action) {
 		if ((action != new_action) && action->name &&
 				!strcmp(new_action->name, action->name)) {
 			ret = 0;
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
index 3214417..5707f97 100644
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
@@ -211,14 +211,12 @@
 	 * desc->lock here. See synchronize_irq().
 	 */
 	raw_spin_lock_irqsave(&desc->lock, flags);
-	action = desc->action;
-	while (action) {
+	for_each_action_of_desc(desc, action) {
 		printk(KERN_ERR "[<%p>] %pf", action->handler, action->handler);
 		if (action->thread_fn)
 			printk(KERN_CONT " threaded [<%p>] %pf",
 					action->thread_fn, action->thread_fn);
 		printk(KERN_CONT "\n");
-		action = action->next;
 	}
 	raw_spin_unlock_irqrestore(&desc->lock, flags);
 }
diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
index 4d5cc6a..250ea67 100644
--- a/kernel/rcu/rcutorture.c
+++ b/kernel/rcu/rcutorture.c
@@ -930,12 +930,14 @@
 	int nsynctypes = 0;
 
 	VERBOSE_TOROUT_STRING("rcu_torture_writer task started");
-	pr_alert("%s" TORTURE_FLAG
-		 " Grace periods expedited from boot/sysfs for %s,\n",
-		 torture_type, cur_ops->name);
-	pr_alert("%s" TORTURE_FLAG
-		 " Testing of dynamic grace-period expediting diabled.\n",
-		 torture_type);
+	if (!can_expedite) {
+		pr_alert("%s" TORTURE_FLAG
+			 " Grace periods expedited from boot/sysfs for %s,\n",
+			 torture_type, cur_ops->name);
+		pr_alert("%s" TORTURE_FLAG
+			 " Disabled dynamic grace-period expediting.\n",
+			 torture_type);
+	}
 
 	/* Initialize synctype[] array.  If none set, take default. */
 	if (!gp_cond1 && !gp_exp1 && !gp_normal1 && !gp_sync1)
diff --git a/kernel/rcu/tiny_plugin.h b/kernel/rcu/tiny_plugin.h
index e492a52..196f030 100644
--- a/kernel/rcu/tiny_plugin.h
+++ b/kernel/rcu/tiny_plugin.h
@@ -23,7 +23,7 @@
  */
 
 #include <linux/kthread.h>
-#include <linux/module.h>
+#include <linux/init.h>
 #include <linux/debugfs.h>
 #include <linux/seq_file.h>
 
@@ -122,18 +122,7 @@
 	debugfs_remove_recursive(rcudir);
 	return 1;
 }
-
-static void __exit rcutiny_trace_cleanup(void)
-{
-	debugfs_remove_recursive(rcudir);
-}
-
-module_init(rcutiny_trace_init);
-module_exit(rcutiny_trace_cleanup);
-
-MODULE_AUTHOR("Paul E. McKenney");
-MODULE_DESCRIPTION("Read-Copy Update tracing for tiny implementation");
-MODULE_LICENSE("GPL");
+device_initcall(rcutiny_trace_init);
 
 static void check_cpu_stall(struct rcu_ctrlblk *rcp)
 {
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 9fd5b62..9a535a8 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -108,7 +108,6 @@
 RCU_STATE_INITIALIZER(rcu_bh, 'b', call_rcu_bh);
 
 static struct rcu_state *const rcu_state_p;
-static struct rcu_data __percpu *const rcu_data_p;
 LIST_HEAD(rcu_struct_flavors);
 
 /* Dump rcu_node combining tree at boot to verify correct setup. */
@@ -1083,13 +1082,12 @@
 	rcu_sysidle_check_cpu(rdp, isidle, maxj);
 	if ((rdp->dynticks_snap & 0x1) == 0) {
 		trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
-		return 1;
-	} else {
 		if (ULONG_CMP_LT(READ_ONCE(rdp->gpnum) + ULONG_MAX / 4,
 				 rdp->mynode->gpnum))
 			WRITE_ONCE(rdp->gpwrap, true);
-		return 0;
+		return 1;
 	}
+	return 0;
 }
 
 /*
@@ -1173,15 +1171,16 @@
 			smp_mb(); /* ->cond_resched_completed before *rcrmp. */
 			WRITE_ONCE(*rcrmp,
 				   READ_ONCE(*rcrmp) + rdp->rsp->flavor_mask);
-			resched_cpu(rdp->cpu);  /* Force CPU into scheduler. */
-			rdp->rsp->jiffies_resched += 5; /* Enable beating. */
-		} else if (ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) {
-			/* Time to beat on that CPU again! */
-			resched_cpu(rdp->cpu);  /* Force CPU into scheduler. */
-			rdp->rsp->jiffies_resched += 5; /* Re-enable beating. */
 		}
+		rdp->rsp->jiffies_resched += 5; /* Re-enable beating. */
 	}
 
+	/* And if it has been a really long time, kick the CPU as well. */
+	if (ULONG_CMP_GE(jiffies,
+			 rdp->rsp->gp_start + 2 * jiffies_till_sched_qs) ||
+	    ULONG_CMP_GE(jiffies, rdp->rsp->gp_start + jiffies_till_sched_qs))
+		resched_cpu(rdp->cpu);  /* Force CPU into scheduler. */
+
 	return 0;
 }
 
@@ -1246,7 +1245,7 @@
 				if (rnp->qsmask & (1UL << cpu))
 					dump_cpu_task(rnp->grplo + cpu);
 		}
-		raw_spin_unlock_irqrestore(&rnp->lock, flags);
+		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 	}
 }
 
@@ -1266,12 +1265,12 @@
 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
 	delta = jiffies - READ_ONCE(rsp->jiffies_stall);
 	if (delta < RCU_STALL_RAT_DELAY || !rcu_gp_in_progress(rsp)) {
-		raw_spin_unlock_irqrestore(&rnp->lock, flags);
+		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 		return;
 	}
 	WRITE_ONCE(rsp->jiffies_stall,
 		   jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
-	raw_spin_unlock_irqrestore(&rnp->lock, flags);
+	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 
 	/*
 	 * OK, time to rat on our buddy...
@@ -1292,7 +1291,7 @@
 					ndetected++;
 				}
 		}
-		raw_spin_unlock_irqrestore(&rnp->lock, flags);
+		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 	}
 
 	print_cpu_stall_info_end();
@@ -1357,7 +1356,7 @@
 	if (ULONG_CMP_GE(jiffies, READ_ONCE(rsp->jiffies_stall)))
 		WRITE_ONCE(rsp->jiffies_stall,
 			   jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
-	raw_spin_unlock_irqrestore(&rnp->lock, flags);
+	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 
 	/*
 	 * Attempt to revive the RCU machinery by forcing a context switch.
@@ -1595,7 +1594,7 @@
 	}
 unlock_out:
 	if (rnp != rnp_root)
-		raw_spin_unlock(&rnp_root->lock);
+		raw_spin_unlock_rcu_node(rnp_root);
 out:
 	if (c_out != NULL)
 		*c_out = c;
@@ -1814,7 +1813,7 @@
 		return;
 	}
 	needwake = __note_gp_changes(rsp, rnp, rdp);
-	raw_spin_unlock_irqrestore(&rnp->lock, flags);
+	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 	if (needwake)
 		rcu_gp_kthread_wake(rsp);
 }
@@ -1839,7 +1838,7 @@
 	raw_spin_lock_irq_rcu_node(rnp);
 	if (!READ_ONCE(rsp->gp_flags)) {
 		/* Spurious wakeup, tell caller to go back to sleep.  */
-		raw_spin_unlock_irq(&rnp->lock);
+		raw_spin_unlock_irq_rcu_node(rnp);
 		return false;
 	}
 	WRITE_ONCE(rsp->gp_flags, 0); /* Clear all flags: New grace period. */
@@ -1849,7 +1848,7 @@
 		 * Grace period already in progress, don't start another.
 		 * Not supposed to be able to happen.
 		 */
-		raw_spin_unlock_irq(&rnp->lock);
+		raw_spin_unlock_irq_rcu_node(rnp);
 		return false;
 	}
 
@@ -1858,7 +1857,7 @@
 	/* Record GP times before starting GP, hence smp_store_release(). */
 	smp_store_release(&rsp->gpnum, rsp->gpnum + 1);
 	trace_rcu_grace_period(rsp->name, rsp->gpnum, TPS("start"));
-	raw_spin_unlock_irq(&rnp->lock);
+	raw_spin_unlock_irq_rcu_node(rnp);
 
 	/*
 	 * Apply per-leaf buffered online and offline operations to the
@@ -1872,7 +1871,7 @@
 		if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
 		    !rnp->wait_blkd_tasks) {
 			/* Nothing to do on this leaf rcu_node structure. */
-			raw_spin_unlock_irq(&rnp->lock);
+			raw_spin_unlock_irq_rcu_node(rnp);
 			continue;
 		}
 
@@ -1906,7 +1905,7 @@
 			rcu_cleanup_dead_rnp(rnp);
 		}
 
-		raw_spin_unlock_irq(&rnp->lock);
+		raw_spin_unlock_irq_rcu_node(rnp);
 	}
 
 	/*
@@ -1937,7 +1936,7 @@
 		trace_rcu_grace_period_init(rsp->name, rnp->gpnum,
 					    rnp->level, rnp->grplo,
 					    rnp->grphi, rnp->qsmask);
-		raw_spin_unlock_irq(&rnp->lock);
+		raw_spin_unlock_irq_rcu_node(rnp);
 		cond_resched_rcu_qs();
 		WRITE_ONCE(rsp->gp_activity, jiffies);
 	}
@@ -1995,7 +1994,7 @@
 		raw_spin_lock_irq_rcu_node(rnp);
 		WRITE_ONCE(rsp->gp_flags,
 			   READ_ONCE(rsp->gp_flags) & ~RCU_GP_FLAG_FQS);
-		raw_spin_unlock_irq(&rnp->lock);
+		raw_spin_unlock_irq_rcu_node(rnp);
 	}
 }
 
@@ -2025,7 +2024,7 @@
 	 * safe for us to drop the lock in order to mark the grace
 	 * period as completed in all of the rcu_node structures.
 	 */
-	raw_spin_unlock_irq(&rnp->lock);
+	raw_spin_unlock_irq_rcu_node(rnp);
 
 	/*
 	 * Propagate new ->completed value to rcu_node structures so
@@ -2047,7 +2046,7 @@
 		/* smp_mb() provided by prior unlock-lock pair. */
 		nocb += rcu_future_gp_cleanup(rsp, rnp);
 		sq = rcu_nocb_gp_get(rnp);
-		raw_spin_unlock_irq(&rnp->lock);
+		raw_spin_unlock_irq_rcu_node(rnp);
 		rcu_nocb_gp_cleanup(sq);
 		cond_resched_rcu_qs();
 		WRITE_ONCE(rsp->gp_activity, jiffies);
@@ -2070,7 +2069,7 @@
 				       READ_ONCE(rsp->gpnum),
 				       TPS("newreq"));
 	}
-	raw_spin_unlock_irq(&rnp->lock);
+	raw_spin_unlock_irq_rcu_node(rnp);
 }
 
 /*
@@ -2236,18 +2235,20 @@
 }
 
 /*
- * Report a full set of quiescent states to the specified rcu_state
- * data structure.  This involves cleaning up after the prior grace
- * period and letting rcu_start_gp() start up the next grace period
- * if one is needed.  Note that the caller must hold rnp->lock, which
- * is released before return.
+ * Report a full set of quiescent states to the specified rcu_state data
+ * structure.  Invoke rcu_gp_kthread_wake() to awaken the grace-period
+ * kthread if another grace period is required.  Whether we wake
+ * the grace-period kthread or it awakens itself for the next round
+ * of quiescent-state forcing, that kthread will clean up after the
+ * just-completed grace period.  Note that the caller must hold rnp->lock,
+ * which is released before return.
  */
 static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
 	__releases(rcu_get_root(rsp)->lock)
 {
 	WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
 	WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS);
-	raw_spin_unlock_irqrestore(&rcu_get_root(rsp)->lock, flags);
+	raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(rsp), flags);
 	swake_up(&rsp->gp_wq);  /* Memory barrier implied by swake_up() path. */
 }
 
@@ -2277,7 +2278,7 @@
 			 * Our bit has already been cleared, or the
 			 * relevant grace period is already over, so done.
 			 */
-			raw_spin_unlock_irqrestore(&rnp->lock, flags);
+			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 			return;
 		}
 		WARN_ON_ONCE(oldmask); /* Any child must be all zeroed! */
@@ -2289,7 +2290,7 @@
 		if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
 
 			/* Other bits still set at this level, so done. */
-			raw_spin_unlock_irqrestore(&rnp->lock, flags);
+			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 			return;
 		}
 		mask = rnp->grpmask;
@@ -2299,7 +2300,7 @@
 
 			break;
 		}
-		raw_spin_unlock_irqrestore(&rnp->lock, flags);
+		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 		rnp_c = rnp;
 		rnp = rnp->parent;
 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
@@ -2331,7 +2332,7 @@
 
 	if (rcu_state_p == &rcu_sched_state || rsp != rcu_state_p ||
 	    rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
-		raw_spin_unlock_irqrestore(&rnp->lock, flags);
+		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 		return;  /* Still need more quiescent states! */
 	}
 
@@ -2348,19 +2349,14 @@
 	/* Report up the rest of the hierarchy, tracking current ->gpnum. */
 	gps = rnp->gpnum;
 	mask = rnp->grpmask;
-	raw_spin_unlock(&rnp->lock);	/* irqs remain disabled. */
+	raw_spin_unlock_rcu_node(rnp);	/* irqs remain disabled. */
 	raw_spin_lock_rcu_node(rnp_p);	/* irqs already disabled. */
 	rcu_report_qs_rnp(mask, rsp, rnp_p, gps, flags);
 }
 
 /*
  * Record a quiescent state for the specified CPU to that CPU's rcu_data
- * structure.  This must be either called from the specified CPU, or
- * called when the specified CPU is known to be offline (and when it is
- * also known that no other CPU is concurrently trying to help the offline
- * CPU).  The lastcomp argument is used to make sure we are still in the
- * grace period of interest.  We don't want to end the current grace period
- * based on quiescent states detected in an earlier grace period!
+ * structure.  This must be called from the specified CPU.
  */
 static void
 rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
@@ -2385,14 +2381,14 @@
 		 */
 		rdp->cpu_no_qs.b.norm = true;	/* need qs for new gp. */
 		rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr);
-		raw_spin_unlock_irqrestore(&rnp->lock, flags);
+		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 		return;
 	}
 	mask = rdp->grpmask;
 	if ((rnp->qsmask & mask) == 0) {
-		raw_spin_unlock_irqrestore(&rnp->lock, flags);
+		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 	} else {
-		rdp->core_needs_qs = 0;
+		rdp->core_needs_qs = false;
 
 		/*
 		 * This GP can't end until cpu checks in, so all of our
@@ -2601,36 +2597,15 @@
 		rnp->qsmaskinit &= ~mask;
 		rnp->qsmask &= ~mask;
 		if (rnp->qsmaskinit) {
-			raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
+			raw_spin_unlock_rcu_node(rnp);
+			/* irqs remain disabled. */
 			return;
 		}
-		raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
+		raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
 	}
 }
 
 /*
- * The CPU is exiting the idle loop into the arch_cpu_idle_dead()
- * function.  We now remove it from the rcu_node tree's ->qsmaskinit
- * bit masks.
- */
-static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp)
-{
-	unsigned long flags;
-	unsigned long mask;
-	struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
-	struct rcu_node *rnp = rdp->mynode;  /* Outgoing CPU's rdp & rnp. */
-
-	if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
-		return;
-
-	/* Remove outgoing CPU from mask in the leaf rcu_node structure. */
-	mask = rdp->grpmask;
-	raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */
-	rnp->qsmaskinitnext &= ~mask;
-	raw_spin_unlock_irqrestore(&rnp->lock, flags);
-}
-
-/*
  * The CPU has been completely removed, and some other CPU is reporting
  * this fact from process context.  Do the remainder of the cleanup,
  * including orphaning the outgoing CPU's RCU callbacks, and also
@@ -2861,7 +2836,7 @@
 			rcu_report_qs_rnp(mask, rsp, rnp, rnp->gpnum, flags);
 		} else {
 			/* Nothing to do here, so just drop the lock. */
-			raw_spin_unlock_irqrestore(&rnp->lock, flags);
+			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 		}
 	}
 }
@@ -2897,11 +2872,11 @@
 	raw_spin_unlock(&rnp_old->fqslock);
 	if (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
 		rsp->n_force_qs_lh++;
-		raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
+		raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
 		return;  /* Someone beat us to it. */
 	}
 	WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS);
-	raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
+	raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
 	swake_up(&rsp->gp_wq); /* Memory barrier implied by swake_up() path. */
 }
 
@@ -2927,7 +2902,7 @@
 	if (cpu_needs_another_gp(rsp, rdp)) {
 		raw_spin_lock_rcu_node(rcu_get_root(rsp)); /* irqs disabled. */
 		needwake = rcu_start_gp(rsp);
-		raw_spin_unlock_irqrestore(&rcu_get_root(rsp)->lock, flags);
+		raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(rsp), flags);
 		if (needwake)
 			rcu_gp_kthread_wake(rsp);
 	} else {
@@ -3018,7 +2993,7 @@
 
 			raw_spin_lock_rcu_node(rnp_root);
 			needwake = rcu_start_gp(rsp);
-			raw_spin_unlock(&rnp_root->lock);
+			raw_spin_unlock_rcu_node(rnp_root);
 			if (needwake)
 				rcu_gp_kthread_wake(rsp);
 		} else {
@@ -3438,14 +3413,14 @@
 	rcu_for_each_leaf_node(rsp, rnp) {
 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
 		if (rnp->expmaskinit == rnp->expmaskinitnext) {
-			raw_spin_unlock_irqrestore(&rnp->lock, flags);
+			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 			continue;  /* No new CPUs, nothing to do. */
 		}
 
 		/* Update this node's mask, track old value for propagation. */
 		oldmask = rnp->expmaskinit;
 		rnp->expmaskinit = rnp->expmaskinitnext;
-		raw_spin_unlock_irqrestore(&rnp->lock, flags);
+		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 
 		/* If was already nonzero, nothing to propagate. */
 		if (oldmask)
@@ -3460,7 +3435,7 @@
 			if (rnp_up->expmaskinit)
 				done = true;
 			rnp_up->expmaskinit |= mask;
-			raw_spin_unlock_irqrestore(&rnp_up->lock, flags);
+			raw_spin_unlock_irqrestore_rcu_node(rnp_up, flags);
 			if (done)
 				break;
 			mask = rnp_up->grpmask;
@@ -3483,7 +3458,7 @@
 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
 		WARN_ON_ONCE(rnp->expmask);
 		rnp->expmask = rnp->expmaskinit;
-		raw_spin_unlock_irqrestore(&rnp->lock, flags);
+		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 	}
 }
 
@@ -3524,11 +3499,11 @@
 			if (!rnp->expmask)
 				rcu_initiate_boost(rnp, flags);
 			else
-				raw_spin_unlock_irqrestore(&rnp->lock, flags);
+				raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 			break;
 		}
 		if (rnp->parent == NULL) {
-			raw_spin_unlock_irqrestore(&rnp->lock, flags);
+			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 			if (wake) {
 				smp_mb(); /* EGP done before wake_up(). */
 				swake_up(&rsp->expedited_wq);
@@ -3536,7 +3511,7 @@
 			break;
 		}
 		mask = rnp->grpmask;
-		raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
+		raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled */
 		rnp = rnp->parent;
 		raw_spin_lock_rcu_node(rnp); /* irqs already disabled */
 		WARN_ON_ONCE(!(rnp->expmask & mask));
@@ -3571,7 +3546,7 @@
 
 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
 	if (!(rnp->expmask & mask)) {
-		raw_spin_unlock_irqrestore(&rnp->lock, flags);
+		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 		return;
 	}
 	rnp->expmask &= ~mask;
@@ -3732,7 +3707,7 @@
 		 */
 		if (rcu_preempt_has_tasks(rnp))
 			rnp->exp_tasks = rnp->blkd_tasks.next;
-		raw_spin_unlock_irqrestore(&rnp->lock, flags);
+		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 
 		/* IPI the remaining CPUs for expedited quiescent state. */
 		mask = 1;
@@ -3749,7 +3724,7 @@
 			raw_spin_lock_irqsave_rcu_node(rnp, flags);
 			if (cpu_online(cpu) &&
 			    (rnp->expmask & mask)) {
-				raw_spin_unlock_irqrestore(&rnp->lock, flags);
+				raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 				schedule_timeout_uninterruptible(1);
 				if (cpu_online(cpu) &&
 				    (rnp->expmask & mask))
@@ -3758,7 +3733,7 @@
 			}
 			if (!(rnp->expmask & mask))
 				mask_ofl_ipi &= ~mask;
-			raw_spin_unlock_irqrestore(&rnp->lock, flags);
+			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 		}
 		/* Report quiescent states for those that went offline. */
 		mask_ofl_test |= mask_ofl_ipi;
@@ -4165,7 +4140,7 @@
 			return;
 		raw_spin_lock_rcu_node(rnp); /* Interrupts already disabled. */
 		rnp->qsmaskinit |= mask;
-		raw_spin_unlock(&rnp->lock); /* Interrupts remain disabled. */
+		raw_spin_unlock_rcu_node(rnp); /* Interrupts remain disabled. */
 	}
 }
 
@@ -4189,7 +4164,7 @@
 	rdp->rsp = rsp;
 	mutex_init(&rdp->exp_funnel_mutex);
 	rcu_boot_init_nocb_percpu_data(rdp);
-	raw_spin_unlock_irqrestore(&rnp->lock, flags);
+	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 }
 
 /*
@@ -4217,7 +4192,7 @@
 	rcu_sysidle_init_percpu_data(rdp->dynticks);
 	atomic_set(&rdp->dynticks->dynticks,
 		   (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
-	raw_spin_unlock(&rnp->lock);		/* irqs remain disabled. */
+	raw_spin_unlock_rcu_node(rnp);		/* irqs remain disabled. */
 
 	/*
 	 * Add CPU to leaf rcu_node pending-online bitmask.  Any needed
@@ -4238,7 +4213,7 @@
 	rdp->rcu_qs_ctr_snap = per_cpu(rcu_qs_ctr, cpu);
 	rdp->core_needs_qs = false;
 	trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl"));
-	raw_spin_unlock_irqrestore(&rnp->lock, flags);
+	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 }
 
 static void rcu_prepare_cpu(int cpu)
@@ -4249,6 +4224,46 @@
 		rcu_init_percpu_data(cpu, rsp);
 }
 
+#ifdef CONFIG_HOTPLUG_CPU
+/*
+ * The CPU is exiting the idle loop into the arch_cpu_idle_dead()
+ * function.  We now remove it from the rcu_node tree's ->qsmaskinit
+ * bit masks.
+ * The CPU is exiting the idle loop into the arch_cpu_idle_dead()
+ * function.  We now remove it from the rcu_node tree's ->qsmaskinit
+ * bit masks.
+ */
+static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp)
+{
+	unsigned long flags;
+	unsigned long mask;
+	struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
+	struct rcu_node *rnp = rdp->mynode;  /* Outgoing CPU's rdp & rnp. */
+
+	if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
+		return;
+
+	/* Remove outgoing CPU from mask in the leaf rcu_node structure. */
+	mask = rdp->grpmask;
+	raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */
+	rnp->qsmaskinitnext &= ~mask;
+	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+}
+
+void rcu_report_dead(unsigned int cpu)
+{
+	struct rcu_state *rsp;
+
+	/* QS for any half-done expedited RCU-sched GP. */
+	preempt_disable();
+	rcu_report_exp_rdp(&rcu_sched_state,
+			   this_cpu_ptr(rcu_sched_state.rda), true);
+	preempt_enable();
+	for_each_rcu_flavor(rsp)
+		rcu_cleanup_dying_idle_cpu(cpu, rsp);
+}
+#endif
+
 /*
  * Handle CPU online/offline notification events.
  */
@@ -4280,17 +4295,6 @@
 		for_each_rcu_flavor(rsp)
 			rcu_cleanup_dying_cpu(rsp);
 		break;
-	case CPU_DYING_IDLE:
-		/* QS for any half-done expedited RCU-sched GP. */
-		preempt_disable();
-		rcu_report_exp_rdp(&rcu_sched_state,
-				   this_cpu_ptr(rcu_sched_state.rda), true);
-		preempt_enable();
-
-		for_each_rcu_flavor(rsp) {
-			rcu_cleanup_dying_idle_cpu(cpu, rsp);
-		}
-		break;
 	case CPU_DEAD:
 	case CPU_DEAD_FROZEN:
 	case CPU_UP_CANCELED:
@@ -4360,7 +4364,7 @@
 			sp.sched_priority = kthread_prio;
 			sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
 		}
-		raw_spin_unlock_irqrestore(&rnp->lock, flags);
+		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 		wake_up_process(t);
 	}
 	rcu_spawn_nocb_kthreads();
@@ -4451,8 +4455,8 @@
 		cpustride *= levelspread[i];
 		rnp = rsp->level[i];
 		for (j = 0; j < levelcnt[i]; j++, rnp++) {
-			raw_spin_lock_init(&rnp->lock);
-			lockdep_set_class_and_name(&rnp->lock,
+			raw_spin_lock_init(&ACCESS_PRIVATE(rnp, lock));
+			lockdep_set_class_and_name(&ACCESS_PRIVATE(rnp, lock),
 						   &rcu_node_class[i], buf[i]);
 			raw_spin_lock_init(&rnp->fqslock);
 			lockdep_set_class_and_name(&rnp->fqslock,
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index bbd235d..df668c0 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -150,8 +150,9 @@
  * Definition for node within the RCU grace-period-detection hierarchy.
  */
 struct rcu_node {
-	raw_spinlock_t lock;	/* Root rcu_node's lock protects some */
-				/*  rcu_state fields as well as following. */
+	raw_spinlock_t __private lock;	/* Root rcu_node's lock protects */
+					/*  some rcu_state fields as well as */
+					/*  following. */
 	unsigned long gpnum;	/* Current grace period for this node. */
 				/*  This will either be equal to or one */
 				/*  behind the root rcu_node's gpnum. */
@@ -682,7 +683,7 @@
 #endif /* #else #ifdef CONFIG_PPC */
 
 /*
- * Wrappers for the rcu_node::lock acquire.
+ * Wrappers for the rcu_node::lock acquire and release.
  *
  * Because the rcu_nodes form a tree, the tree traversal locking will observe
  * different lock values, this in turn means that an UNLOCK of one level
@@ -691,29 +692,48 @@
  *
  * In order to restore full ordering between tree levels, augment the regular
  * lock acquire functions with smp_mb__after_unlock_lock().
+ *
+ * As ->lock of struct rcu_node is a __private field, therefore one should use
+ * these wrappers rather than directly call raw_spin_{lock,unlock}* on ->lock.
  */
 static inline void raw_spin_lock_rcu_node(struct rcu_node *rnp)
 {
-	raw_spin_lock(&rnp->lock);
+	raw_spin_lock(&ACCESS_PRIVATE(rnp, lock));
 	smp_mb__after_unlock_lock();
 }
 
+static inline void raw_spin_unlock_rcu_node(struct rcu_node *rnp)
+{
+	raw_spin_unlock(&ACCESS_PRIVATE(rnp, lock));
+}
+
 static inline void raw_spin_lock_irq_rcu_node(struct rcu_node *rnp)
 {
-	raw_spin_lock_irq(&rnp->lock);
+	raw_spin_lock_irq(&ACCESS_PRIVATE(rnp, lock));
 	smp_mb__after_unlock_lock();
 }
 
-#define raw_spin_lock_irqsave_rcu_node(rnp, flags)	\
-do {							\
-	typecheck(unsigned long, flags);		\
-	raw_spin_lock_irqsave(&(rnp)->lock, flags);	\
-	smp_mb__after_unlock_lock();			\
+static inline void raw_spin_unlock_irq_rcu_node(struct rcu_node *rnp)
+{
+	raw_spin_unlock_irq(&ACCESS_PRIVATE(rnp, lock));
+}
+
+#define raw_spin_lock_irqsave_rcu_node(rnp, flags)			\
+do {									\
+	typecheck(unsigned long, flags);				\
+	raw_spin_lock_irqsave(&ACCESS_PRIVATE(rnp, lock), flags);	\
+	smp_mb__after_unlock_lock();					\
+} while (0)
+
+#define raw_spin_unlock_irqrestore_rcu_node(rnp, flags)			\
+do {									\
+	typecheck(unsigned long, flags);				\
+	raw_spin_unlock_irqrestore(&ACCESS_PRIVATE(rnp, lock), flags);	\
 } while (0)
 
 static inline bool raw_spin_trylock_rcu_node(struct rcu_node *rnp)
 {
-	bool locked = raw_spin_trylock(&rnp->lock);
+	bool locked = raw_spin_trylock(&ACCESS_PRIVATE(rnp, lock));
 
 	if (locked)
 		smp_mb__after_unlock_lock();
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 080bd20..efdf7b6 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -235,7 +235,7 @@
 		rnp->gp_tasks = &t->rcu_node_entry;
 	if (!rnp->exp_tasks && (blkd_state & RCU_EXP_BLKD))
 		rnp->exp_tasks = &t->rcu_node_entry;
-	raw_spin_unlock(&rnp->lock); /* rrupts remain disabled. */
+	raw_spin_unlock_rcu_node(rnp); /* interrupts remain disabled. */
 
 	/*
 	 * Report the quiescent state for the expedited GP.  This expedited
@@ -489,7 +489,7 @@
 							 !!rnp->gp_tasks);
 			rcu_report_unblock_qs_rnp(rcu_state_p, rnp, flags);
 		} else {
-			raw_spin_unlock_irqrestore(&rnp->lock, flags);
+			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 		}
 
 		/* Unboost if we were boosted. */
@@ -518,14 +518,14 @@
 
 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
 	if (!rcu_preempt_blocked_readers_cgp(rnp)) {
-		raw_spin_unlock_irqrestore(&rnp->lock, flags);
+		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 		return;
 	}
 	t = list_entry(rnp->gp_tasks->prev,
 		       struct task_struct, rcu_node_entry);
 	list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry)
 		sched_show_task(t);
-	raw_spin_unlock_irqrestore(&rnp->lock, flags);
+	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 }
 
 /*
@@ -807,7 +807,6 @@
 #else /* #ifdef CONFIG_PREEMPT_RCU */
 
 static struct rcu_state *const rcu_state_p = &rcu_sched_state;
-static struct rcu_data __percpu *const rcu_data_p = &rcu_sched_data;
 
 /*
  * Tell them what RCU they are running.
@@ -991,7 +990,7 @@
 	 * might exit their RCU read-side critical sections on their own.
 	 */
 	if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) {
-		raw_spin_unlock_irqrestore(&rnp->lock, flags);
+		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 		return 0;
 	}
 
@@ -1028,7 +1027,7 @@
 	 */
 	t = container_of(tb, struct task_struct, rcu_node_entry);
 	rt_mutex_init_proxy_locked(&rnp->boost_mtx, t);
-	raw_spin_unlock_irqrestore(&rnp->lock, flags);
+	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 	/* Lock only for side effect: boosts task t's priority. */
 	rt_mutex_lock(&rnp->boost_mtx);
 	rt_mutex_unlock(&rnp->boost_mtx);  /* Then keep lockdep happy. */
@@ -1088,7 +1087,7 @@
 
 	if (!rcu_preempt_blocked_readers_cgp(rnp) && rnp->exp_tasks == NULL) {
 		rnp->n_balk_exp_gp_tasks++;
-		raw_spin_unlock_irqrestore(&rnp->lock, flags);
+		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 		return;
 	}
 	if (rnp->exp_tasks != NULL ||
@@ -1098,13 +1097,13 @@
 	     ULONG_CMP_GE(jiffies, rnp->boost_time))) {
 		if (rnp->exp_tasks == NULL)
 			rnp->boost_tasks = rnp->gp_tasks;
-		raw_spin_unlock_irqrestore(&rnp->lock, flags);
+		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 		t = rnp->boost_kthread_task;
 		if (t)
 			rcu_wake_cond(t, rnp->boost_kthread_status);
 	} else {
 		rcu_initiate_boost_trace(rnp);
-		raw_spin_unlock_irqrestore(&rnp->lock, flags);
+		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 	}
 }
 
@@ -1172,7 +1171,7 @@
 		return PTR_ERR(t);
 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
 	rnp->boost_kthread_task = t;
-	raw_spin_unlock_irqrestore(&rnp->lock, flags);
+	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 	sp.sched_priority = kthread_prio;
 	sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
 	wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
@@ -1308,7 +1307,7 @@
 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
 	__releases(rnp->lock)
 {
-	raw_spin_unlock_irqrestore(&rnp->lock, flags);
+	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 }
 
 static void invoke_rcu_callbacks_kthread(void)
@@ -1559,7 +1558,7 @@
 		rnp = rdp->mynode;
 		raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
 		needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
-		raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
+		raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
 		if (needwake)
 			rcu_gp_kthread_wake(rsp);
 	}
@@ -2064,7 +2063,7 @@
 
 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
 	needwake = rcu_start_future_gp(rnp, rdp, &c);
-	raw_spin_unlock_irqrestore(&rnp->lock, flags);
+	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 	if (needwake)
 		rcu_gp_kthread_wake(rdp->rsp);
 
diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
index 76b94e1..ca828b4 100644
--- a/kernel/rcu/update.c
+++ b/kernel/rcu/update.c
@@ -128,6 +128,7 @@
 {
 	return READ_ONCE(rcu_normal);
 }
+EXPORT_SYMBOL_GPL(rcu_gp_is_normal);
 
 static atomic_t rcu_expedited_nesting =
 	ATOMIC_INIT(IS_ENABLED(CONFIG_RCU_EXPEDITE_BOOT) ? 1 : 0);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index e5725b9..ea8f49a 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5434,16 +5434,6 @@
 		set_cpu_rq_start_time();
 		return NOTIFY_OK;
 
-	case CPU_ONLINE:
-		/*
-		 * At this point a starting CPU has marked itself as online via
-		 * set_cpu_online(). But it might not yet have marked itself
-		 * as active, which is essential from here on.
-		 */
-		set_cpu_active(cpu, true);
-		stop_machine_unpark(cpu);
-		return NOTIFY_OK;
-
 	case CPU_DOWN_FAILED:
 		set_cpu_active(cpu, true);
 		return NOTIFY_OK;
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index 544a713..bd12c6c 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -4,6 +4,7 @@
 #include <linux/sched.h>
 #include <linux/cpu.h>
 #include <linux/cpuidle.h>
+#include <linux/cpuhotplug.h>
 #include <linux/tick.h>
 #include <linux/mm.h>
 #include <linux/stackprotector.h>
@@ -193,8 +194,6 @@
 	rcu_idle_exit();
 }
 
-DEFINE_PER_CPU(bool, cpu_dead_idle);
-
 /*
  * Generic idle loop implementation
  *
@@ -221,10 +220,7 @@
 			rmb();
 
 			if (cpu_is_offline(smp_processor_id())) {
-				rcu_cpu_notify(NULL, CPU_DYING_IDLE,
-					       (void *)(long)smp_processor_id());
-				smp_mb(); /* all activity before dead. */
-				this_cpu_write(cpu_dead_idle, true);
+				cpuhp_report_idle_dead();
 				arch_cpu_idle_dead();
 			}
 
@@ -291,5 +287,6 @@
 	boot_init_stack_canary();
 #endif
 	arch_cpu_idle_prepare();
+	cpuhp_online_idle(state);
 	cpu_idle_loop();
 }
diff --git a/kernel/smp.c b/kernel/smp.c
index 300d293..7416544 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -568,6 +568,7 @@
 	unsigned int cpu;
 
 	idle_threads_init();
+	cpuhp_threads_init();
 
 	/* FIXME: This should be done in userspace --RR */
 	for_each_present_cpu(cpu) {
diff --git a/kernel/smpboot.c b/kernel/smpboot.c
index d264f59..13bc43d 100644
--- a/kernel/smpboot.c
+++ b/kernel/smpboot.c
@@ -226,7 +226,7 @@
 		kthread_unpark(tsk);
 }
 
-void smpboot_unpark_threads(unsigned int cpu)
+int smpboot_unpark_threads(unsigned int cpu)
 {
 	struct smp_hotplug_thread *cur;
 
@@ -235,6 +235,7 @@
 		if (cpumask_test_cpu(cpu, cur->cpumask))
 			smpboot_unpark_thread(cur, cpu);
 	mutex_unlock(&smpboot_threads_lock);
+	return 0;
 }
 
 static void smpboot_park_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
@@ -245,7 +246,7 @@
 		kthread_park(tsk);
 }
 
-void smpboot_park_threads(unsigned int cpu)
+int smpboot_park_threads(unsigned int cpu)
 {
 	struct smp_hotplug_thread *cur;
 
@@ -253,6 +254,7 @@
 	list_for_each_entry_reverse(cur, &hotplug_threads, list)
 		smpboot_park_thread(cur, cpu);
 	mutex_unlock(&smpboot_threads_lock);
+	return 0;
 }
 
 static void smpboot_destroy_threads(struct smp_hotplug_thread *ht)
diff --git a/kernel/smpboot.h b/kernel/smpboot.h
index 72415a0..485b81c 100644
--- a/kernel/smpboot.h
+++ b/kernel/smpboot.h
@@ -14,7 +14,9 @@
 #endif
 
 int smpboot_create_threads(unsigned int cpu);
-void smpboot_park_threads(unsigned int cpu);
-void smpboot_unpark_threads(unsigned int cpu);
+int smpboot_park_threads(unsigned int cpu);
+int smpboot_unpark_threads(unsigned int cpu);
+
+void __init cpuhp_threads_init(void);
 
 #endif
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index 664de53..56ece14 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -323,13 +323,42 @@
 		/* cs is a watchdog. */
 		if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
 			cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
-		/* Pick the best watchdog. */
-		if (!watchdog || cs->rating > watchdog->rating) {
-			watchdog = cs;
-			/* Reset watchdog cycles */
-			clocksource_reset_watchdog();
-		}
 	}
+	spin_unlock_irqrestore(&watchdog_lock, flags);
+}
+
+static void clocksource_select_watchdog(bool fallback)
+{
+	struct clocksource *cs, *old_wd;
+	unsigned long flags;
+
+	spin_lock_irqsave(&watchdog_lock, flags);
+	/* save current watchdog */
+	old_wd = watchdog;
+	if (fallback)
+		watchdog = NULL;
+
+	list_for_each_entry(cs, &clocksource_list, list) {
+		/* cs is a clocksource to be watched. */
+		if (cs->flags & CLOCK_SOURCE_MUST_VERIFY)
+			continue;
+
+		/* Skip current if we were requested for a fallback. */
+		if (fallback && cs == old_wd)
+			continue;
+
+		/* Pick the best watchdog. */
+		if (!watchdog || cs->rating > watchdog->rating)
+			watchdog = cs;
+	}
+	/* If we failed to find a fallback restore the old one. */
+	if (!watchdog)
+		watchdog = old_wd;
+
+	/* If we changed the watchdog we need to reset cycles. */
+	if (watchdog != old_wd)
+		clocksource_reset_watchdog();
+
 	/* Check if the watchdog timer needs to be started. */
 	clocksource_start_watchdog();
 	spin_unlock_irqrestore(&watchdog_lock, flags);
@@ -404,6 +433,7 @@
 		cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
 }
 
+static void clocksource_select_watchdog(bool fallback) { }
 static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { }
 static inline void clocksource_resume_watchdog(void) { }
 static inline int __clocksource_watchdog_kthread(void) { return 0; }
@@ -736,6 +766,7 @@
 	clocksource_enqueue(cs);
 	clocksource_enqueue_watchdog(cs);
 	clocksource_select();
+	clocksource_select_watchdog(false);
 	mutex_unlock(&clocksource_mutex);
 	return 0;
 }
@@ -758,6 +789,7 @@
 	mutex_lock(&clocksource_mutex);
 	__clocksource_change_rating(cs, rating);
 	clocksource_select();
+	clocksource_select_watchdog(false);
 	mutex_unlock(&clocksource_mutex);
 }
 EXPORT_SYMBOL(clocksource_change_rating);
@@ -767,12 +799,12 @@
  */
 static int clocksource_unbind(struct clocksource *cs)
 {
-	/*
-	 * I really can't convince myself to support this on hardware
-	 * designed by lobotomized monkeys.
-	 */
-	if (clocksource_is_watchdog(cs))
-		return -EBUSY;
+	if (clocksource_is_watchdog(cs)) {
+		/* Select and try to install a replacement watchdog. */
+		clocksource_select_watchdog(true);
+		if (clocksource_is_watchdog(cs))
+			return -EBUSY;
+	}
 
 	if (cs == curr_clocksource) {
 		/* Select and try to install a replacement clock source */
diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c
index 347fecf..555e21f 100644
--- a/kernel/time/jiffies.c
+++ b/kernel/time/jiffies.c
@@ -68,7 +68,7 @@
 	.name		= "jiffies",
 	.rating		= 1, /* lowest valid rating*/
 	.read		= jiffies_read,
-	.mask		= 0xffffffff, /*32bits*/
+	.mask		= CLOCKSOURCE_MASK(32),
 	.mult		= NSEC_PER_JIFFY << JIFFIES_SHIFT, /* details above */
 	.shift		= JIFFIES_SHIFT,
 	.max_cycles	= 10,
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 34b4ced..9c629bb 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -233,6 +233,7 @@
 	u64 tmp, ntpinterval;
 	struct clocksource *old_clock;
 
+	++tk->cs_was_changed_seq;
 	old_clock = tk->tkr_mono.clock;
 	tk->tkr_mono.clock = clock;
 	tk->tkr_mono.read = clock->read;
@@ -298,19 +299,36 @@
 static inline u32 arch_gettimeoffset(void) { return 0; }
 #endif
 
-static inline s64 timekeeping_get_ns(struct tk_read_base *tkr)
+static inline s64 timekeeping_delta_to_ns(struct tk_read_base *tkr,
+					  cycle_t delta)
 {
-	cycle_t delta;
 	s64 nsec;
 
-	delta = timekeeping_get_delta(tkr);
-
-	nsec = (delta * tkr->mult + tkr->xtime_nsec) >> tkr->shift;
+	nsec = delta * tkr->mult + tkr->xtime_nsec;
+	nsec >>= tkr->shift;
 
 	/* If arch requires, add in get_arch_timeoffset() */
 	return nsec + arch_gettimeoffset();
 }
 
+static inline s64 timekeeping_get_ns(struct tk_read_base *tkr)
+{
+	cycle_t delta;
+
+	delta = timekeeping_get_delta(tkr);
+	return timekeeping_delta_to_ns(tkr, delta);
+}
+
+static inline s64 timekeeping_cycles_to_ns(struct tk_read_base *tkr,
+					    cycle_t cycles)
+{
+	cycle_t delta;
+
+	/* calculate the delta since the last update_wall_time */
+	delta = clocksource_delta(cycles, tkr->cycle_last, tkr->mask);
+	return timekeeping_delta_to_ns(tkr, delta);
+}
+
 /**
  * update_fast_timekeeper - Update the fast and NMI safe monotonic timekeeper.
  * @tkr: Timekeeping readout base from which we take the update
@@ -857,44 +875,262 @@
 	return tk->xtime_sec;
 }
 
-
-#ifdef CONFIG_NTP_PPS
-
 /**
- * ktime_get_raw_and_real_ts64 - get day and raw monotonic time in timespec format
- * @ts_raw:	pointer to the timespec to be set to raw monotonic time
- * @ts_real:	pointer to the timespec to be set to the time of day
- *
- * This function reads both the time of day and raw monotonic time at the
- * same time atomically and stores the resulting timestamps in timespec
- * format.
+ * ktime_get_snapshot - snapshots the realtime/monotonic raw clocks with counter
+ * @systime_snapshot:	pointer to struct receiving the system time snapshot
  */
-void ktime_get_raw_and_real_ts64(struct timespec64 *ts_raw, struct timespec64 *ts_real)
+void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot)
 {
 	struct timekeeper *tk = &tk_core.timekeeper;
 	unsigned long seq;
-	s64 nsecs_raw, nsecs_real;
+	ktime_t base_raw;
+	ktime_t base_real;
+	s64 nsec_raw;
+	s64 nsec_real;
+	cycle_t now;
 
 	WARN_ON_ONCE(timekeeping_suspended);
 
 	do {
 		seq = read_seqcount_begin(&tk_core.seq);
 
-		*ts_raw = tk->raw_time;
-		ts_real->tv_sec = tk->xtime_sec;
-		ts_real->tv_nsec = 0;
-
-		nsecs_raw  = timekeeping_get_ns(&tk->tkr_raw);
-		nsecs_real = timekeeping_get_ns(&tk->tkr_mono);
-
+		now = tk->tkr_mono.read(tk->tkr_mono.clock);
+		systime_snapshot->cs_was_changed_seq = tk->cs_was_changed_seq;
+		systime_snapshot->clock_was_set_seq = tk->clock_was_set_seq;
+		base_real = ktime_add(tk->tkr_mono.base,
+				      tk_core.timekeeper.offs_real);
+		base_raw = tk->tkr_raw.base;
+		nsec_real = timekeeping_cycles_to_ns(&tk->tkr_mono, now);
+		nsec_raw  = timekeeping_cycles_to_ns(&tk->tkr_raw, now);
 	} while (read_seqcount_retry(&tk_core.seq, seq));
 
-	timespec64_add_ns(ts_raw, nsecs_raw);
-	timespec64_add_ns(ts_real, nsecs_real);
+	systime_snapshot->cycles = now;
+	systime_snapshot->real = ktime_add_ns(base_real, nsec_real);
+	systime_snapshot->raw = ktime_add_ns(base_raw, nsec_raw);
 }
-EXPORT_SYMBOL(ktime_get_raw_and_real_ts64);
+EXPORT_SYMBOL_GPL(ktime_get_snapshot);
 
-#endif /* CONFIG_NTP_PPS */
+/* Scale base by mult/div checking for overflow */
+static int scale64_check_overflow(u64 mult, u64 div, u64 *base)
+{
+	u64 tmp, rem;
+
+	tmp = div64_u64_rem(*base, div, &rem);
+
+	if (((int)sizeof(u64)*8 - fls64(mult) < fls64(tmp)) ||
+	    ((int)sizeof(u64)*8 - fls64(mult) < fls64(rem)))
+		return -EOVERFLOW;
+	tmp *= mult;
+	rem *= mult;
+
+	do_div(rem, div);
+	*base = tmp + rem;
+	return 0;
+}
+
+/**
+ * adjust_historical_crosststamp - adjust crosstimestamp previous to current interval
+ * @history:			Snapshot representing start of history
+ * @partial_history_cycles:	Cycle offset into history (fractional part)
+ * @total_history_cycles:	Total history length in cycles
+ * @discontinuity:		True indicates clock was set on history period
+ * @ts:				Cross timestamp that should be adjusted using
+ *	partial/total ratio
+ *
+ * Helper function used by get_device_system_crosststamp() to correct the
+ * crosstimestamp corresponding to the start of the current interval to the
+ * system counter value (timestamp point) provided by the driver. The
+ * total_history_* quantities are the total history starting at the provided
+ * reference point and ending at the start of the current interval. The cycle
+ * count between the driver timestamp point and the start of the current
+ * interval is partial_history_cycles.
+ */
+static int adjust_historical_crosststamp(struct system_time_snapshot *history,
+					 cycle_t partial_history_cycles,
+					 cycle_t total_history_cycles,
+					 bool discontinuity,
+					 struct system_device_crosststamp *ts)
+{
+	struct timekeeper *tk = &tk_core.timekeeper;
+	u64 corr_raw, corr_real;
+	bool interp_forward;
+	int ret;
+
+	if (total_history_cycles == 0 || partial_history_cycles == 0)
+		return 0;
+
+	/* Interpolate shortest distance from beginning or end of history */
+	interp_forward = partial_history_cycles > total_history_cycles/2 ?
+		true : false;
+	partial_history_cycles = interp_forward ?
+		total_history_cycles - partial_history_cycles :
+		partial_history_cycles;
+
+	/*
+	 * Scale the monotonic raw time delta by:
+	 *	partial_history_cycles / total_history_cycles
+	 */
+	corr_raw = (u64)ktime_to_ns(
+		ktime_sub(ts->sys_monoraw, history->raw));
+	ret = scale64_check_overflow(partial_history_cycles,
+				     total_history_cycles, &corr_raw);
+	if (ret)
+		return ret;
+
+	/*
+	 * If there is a discontinuity in the history, scale monotonic raw
+	 *	correction by:
+	 *	mult(real)/mult(raw) yielding the realtime correction
+	 * Otherwise, calculate the realtime correction similar to monotonic
+	 *	raw calculation
+	 */
+	if (discontinuity) {
+		corr_real = mul_u64_u32_div
+			(corr_raw, tk->tkr_mono.mult, tk->tkr_raw.mult);
+	} else {
+		corr_real = (u64)ktime_to_ns(
+			ktime_sub(ts->sys_realtime, history->real));
+		ret = scale64_check_overflow(partial_history_cycles,
+					     total_history_cycles, &corr_real);
+		if (ret)
+			return ret;
+	}
+
+	/* Fixup monotonic raw and real time time values */
+	if (interp_forward) {
+		ts->sys_monoraw = ktime_add_ns(history->raw, corr_raw);
+		ts->sys_realtime = ktime_add_ns(history->real, corr_real);
+	} else {
+		ts->sys_monoraw = ktime_sub_ns(ts->sys_monoraw, corr_raw);
+		ts->sys_realtime = ktime_sub_ns(ts->sys_realtime, corr_real);
+	}
+
+	return 0;
+}
+
+/*
+ * cycle_between - true if test occurs chronologically between before and after
+ */
+static bool cycle_between(cycle_t before, cycle_t test, cycle_t after)
+{
+	if (test > before && test < after)
+		return true;
+	if (test < before && before > after)
+		return true;
+	return false;
+}
+
+/**
+ * get_device_system_crosststamp - Synchronously capture system/device timestamp
+ * @get_time_fn:	Callback to get simultaneous device time and
+ *	system counter from the device driver
+ * @ctx:		Context passed to get_time_fn()
+ * @history_begin:	Historical reference point used to interpolate system
+ *	time when counter provided by the driver is before the current interval
+ * @xtstamp:		Receives simultaneously captured system and device time
+ *
+ * Reads a timestamp from a device and correlates it to system time
+ */
+int get_device_system_crosststamp(int (*get_time_fn)
+				  (ktime_t *device_time,
+				   struct system_counterval_t *sys_counterval,
+				   void *ctx),
+				  void *ctx,
+				  struct system_time_snapshot *history_begin,
+				  struct system_device_crosststamp *xtstamp)
+{
+	struct system_counterval_t system_counterval;
+	struct timekeeper *tk = &tk_core.timekeeper;
+	cycle_t cycles, now, interval_start;
+	unsigned int clock_was_set_seq = 0;
+	ktime_t base_real, base_raw;
+	s64 nsec_real, nsec_raw;
+	u8 cs_was_changed_seq;
+	unsigned long seq;
+	bool do_interp;
+	int ret;
+
+	do {
+		seq = read_seqcount_begin(&tk_core.seq);
+		/*
+		 * Try to synchronously capture device time and a system
+		 * counter value calling back into the device driver
+		 */
+		ret = get_time_fn(&xtstamp->device, &system_counterval, ctx);
+		if (ret)
+			return ret;
+
+		/*
+		 * Verify that the clocksource associated with the captured
+		 * system counter value is the same as the currently installed
+		 * timekeeper clocksource
+		 */
+		if (tk->tkr_mono.clock != system_counterval.cs)
+			return -ENODEV;
+		cycles = system_counterval.cycles;
+
+		/*
+		 * Check whether the system counter value provided by the
+		 * device driver is on the current timekeeping interval.
+		 */
+		now = tk->tkr_mono.read(tk->tkr_mono.clock);
+		interval_start = tk->tkr_mono.cycle_last;
+		if (!cycle_between(interval_start, cycles, now)) {
+			clock_was_set_seq = tk->clock_was_set_seq;
+			cs_was_changed_seq = tk->cs_was_changed_seq;
+			cycles = interval_start;
+			do_interp = true;
+		} else {
+			do_interp = false;
+		}
+
+		base_real = ktime_add(tk->tkr_mono.base,
+				      tk_core.timekeeper.offs_real);
+		base_raw = tk->tkr_raw.base;
+
+		nsec_real = timekeeping_cycles_to_ns(&tk->tkr_mono,
+						     system_counterval.cycles);
+		nsec_raw = timekeeping_cycles_to_ns(&tk->tkr_raw,
+						    system_counterval.cycles);
+	} while (read_seqcount_retry(&tk_core.seq, seq));
+
+	xtstamp->sys_realtime = ktime_add_ns(base_real, nsec_real);
+	xtstamp->sys_monoraw = ktime_add_ns(base_raw, nsec_raw);
+
+	/*
+	 * Interpolate if necessary, adjusting back from the start of the
+	 * current interval
+	 */
+	if (do_interp) {
+		cycle_t partial_history_cycles, total_history_cycles;
+		bool discontinuity;
+
+		/*
+		 * Check that the counter value occurs after the provided
+		 * history reference and that the history doesn't cross a
+		 * clocksource change
+		 */
+		if (!history_begin ||
+		    !cycle_between(history_begin->cycles,
+				   system_counterval.cycles, cycles) ||
+		    history_begin->cs_was_changed_seq != cs_was_changed_seq)
+			return -EINVAL;
+		partial_history_cycles = cycles - system_counterval.cycles;
+		total_history_cycles = cycles - history_begin->cycles;
+		discontinuity =
+			history_begin->clock_was_set_seq != clock_was_set_seq;
+
+		ret = adjust_historical_crosststamp(history_begin,
+						    partial_history_cycles,
+						    total_history_cycles,
+						    discontinuity, xtstamp);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(get_device_system_crosststamp);
 
 /**
  * do_gettimeofday - Returns the time of day in a timeval
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 8bfd1ac..f28f7fa 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1442,6 +1442,19 @@
 
 	  Say N if you are unsure.
 
+config CPU_HOTPLUG_STATE_CONTROL
+	bool "Enable CPU hotplug state control"
+	depends on DEBUG_KERNEL
+	depends on HOTPLUG_CPU
+	default n
+	help
+	  Allows to write steps between "offline" and "online" to the CPUs
+	  sysfs target file so states can be stepped granular. This is a debug
+	  option for now as the hotplug machinery cannot be stopped and
+	  restarted at arbitrary points yet.
+
+	  Say N if your are unsure.
+
 config NOTIFIER_ERROR_INJECTION
 	tristate "Notifier error injection"
 	depends on DEBUG_KERNEL
diff --git a/lib/atomic64_test.c b/lib/atomic64_test.c
index d62de8b..1234818 100644
--- a/lib/atomic64_test.c
+++ b/lib/atomic64_test.c
@@ -17,7 +17,7 @@
 #include <linux/atomic.h>
 
 #ifdef CONFIG_X86
-#include <asm/processor.h>	/* for boot_cpu_has below */
+#include <asm/cpufeature.h>	/* for boot_cpu_has below */
 #endif
 
 #define TEST(bit, op, c_op, val)				\
diff --git a/mm/memory.c b/mm/memory.c
index 032f05c..0e24764 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1551,8 +1551,29 @@
 int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
 			unsigned long pfn)
 {
+	return vm_insert_pfn_prot(vma, addr, pfn, vma->vm_page_prot);
+}
+EXPORT_SYMBOL(vm_insert_pfn);
+
+/**
+ * vm_insert_pfn_prot - insert single pfn into user vma with specified pgprot
+ * @vma: user vma to map to
+ * @addr: target user address of this page
+ * @pfn: source kernel pfn
+ * @pgprot: pgprot flags for the inserted page
+ *
+ * This is exactly like vm_insert_pfn, except that it allows drivers to
+ * to override pgprot on a per-page basis.
+ *
+ * This only makes sense for IO mappings, and it makes no sense for
+ * cow mappings.  In general, using multiple vmas is preferable;
+ * vm_insert_pfn_prot should only be used if using multiple VMAs is
+ * impractical.
+ */
+int vm_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
+			unsigned long pfn, pgprot_t pgprot)
+{
 	int ret;
-	pgprot_t pgprot = vma->vm_page_prot;
 	/*
 	 * Technically, architectures with pte_special can avoid all these
 	 * restrictions (same for remap_pfn_range).  However we would like
@@ -1574,7 +1595,7 @@
 
 	return ret;
 }
-EXPORT_SYMBOL(vm_insert_pfn);
+EXPORT_SYMBOL(vm_insert_pfn_prot);
 
 int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
 			pfn_t pfn)
diff --git a/mm/mmap.c b/mm/mmap.c
index 76d1ec2..90e3b86 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -3066,11 +3066,16 @@
 	pgoff_t pgoff;
 	struct page **pages;
 
-	if (vma->vm_ops == &legacy_special_mapping_vmops)
+	if (vma->vm_ops == &legacy_special_mapping_vmops) {
 		pages = vma->vm_private_data;
-	else
-		pages = ((struct vm_special_mapping *)vma->vm_private_data)->
-			pages;
+	} else {
+		struct vm_special_mapping *sm = vma->vm_private_data;
+
+		if (sm->fault)
+			return sm->fault(sm, vma, vmf);
+
+		pages = sm->pages;
+	}
 
 	for (pgoff = vmf->pgoff; pgoff && *pages; ++pages)
 		pgoff--;
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index c64e604..d574d13 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -269,7 +269,8 @@
 			__init_refok|
 			__kprobes|
 			__ref|
-			__rcu
+			__rcu|
+			__private
 		}x;
 our $InitAttributePrefix = qr{__(?:mem|cpu|dev|net_|)};
 our $InitAttributeData = qr{$InitAttributePrefix(?:initdata\b)};
diff --git a/sound/soc/codecs/cs4271.c b/sound/soc/codecs/cs4271.c
index e770ee6..0c0010b 100644
--- a/sound/soc/codecs/cs4271.c
+++ b/sound/soc/codecs/cs4271.c
@@ -26,6 +26,7 @@
 #include <linux/of.h>
 #include <linux/of_device.h>
 #include <linux/of_gpio.h>
+#include <linux/regulator/consumer.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
 #include <sound/tlv.h>
@@ -157,6 +158,10 @@
 	return reg == CS4271_CHIPID;
 }
 
+static const char * const supply_names[] = {
+	"vd", "vl", "va"
+};
+
 struct cs4271_private {
 	unsigned int			mclk;
 	bool				master;
@@ -170,6 +175,7 @@
 	int				gpio_disable;
 	/* enable soft reset workaround */
 	bool				enable_soft_reset;
+	struct regulator_bulk_data      supplies[ARRAY_SIZE(supply_names)];
 };
 
 static const struct snd_soc_dapm_widget cs4271_dapm_widgets[] = {
@@ -487,6 +493,20 @@
 	.symmetric_rates = 1,
 };
 
+static int cs4271_reset(struct snd_soc_codec *codec)
+{
+	struct cs4271_private *cs4271 = snd_soc_codec_get_drvdata(codec);
+
+	if (gpio_is_valid(cs4271->gpio_nreset)) {
+		gpio_set_value(cs4271->gpio_nreset, 0);
+		mdelay(1);
+		gpio_set_value(cs4271->gpio_nreset, 1);
+		mdelay(1);
+	}
+
+	return 0;
+}
+
 #ifdef CONFIG_PM
 static int cs4271_soc_suspend(struct snd_soc_codec *codec)
 {
@@ -499,6 +519,9 @@
 	if (ret < 0)
 		return ret;
 
+	regcache_mark_dirty(cs4271->regmap);
+	regulator_bulk_disable(ARRAY_SIZE(cs4271->supplies), cs4271->supplies);
+
 	return 0;
 }
 
@@ -507,6 +530,16 @@
 	int ret;
 	struct cs4271_private *cs4271 = snd_soc_codec_get_drvdata(codec);
 
+	ret = regulator_bulk_enable(ARRAY_SIZE(cs4271->supplies),
+				    cs4271->supplies);
+	if (ret < 0) {
+		dev_err(codec->dev, "Failed to enable regulators: %d\n", ret);
+		return ret;
+	}
+
+	/* Do a proper reset after power up */
+	cs4271_reset(codec);
+
 	/* Restore codec state */
 	ret = regcache_sync(cs4271->regmap);
 	if (ret < 0)
@@ -553,19 +586,24 @@
 	}
 #endif
 
+	ret = regulator_bulk_enable(ARRAY_SIZE(cs4271->supplies),
+				    cs4271->supplies);
+	if (ret < 0) {
+		dev_err(codec->dev, "Failed to enable regulators: %d\n", ret);
+		return ret;
+	}
+
 	if (cs4271plat) {
 		amutec_eq_bmutec = cs4271plat->amutec_eq_bmutec;
 		cs4271->enable_soft_reset = cs4271plat->enable_soft_reset;
 	}
 
-	if (gpio_is_valid(cs4271->gpio_nreset)) {
-		/* Reset codec */
-		gpio_direction_output(cs4271->gpio_nreset, 0);
-		mdelay(1);
-		gpio_set_value(cs4271->gpio_nreset, 1);
-		/* Give the codec time to wake up */
-		mdelay(1);
-	}
+	/* Reset codec */
+	cs4271_reset(codec);
+
+	ret = regcache_sync(cs4271->regmap);
+	if (ret < 0)
+		return ret;
 
 	ret = regmap_update_bits(cs4271->regmap, CS4271_MODE2,
 				 CS4271_MODE2_PDN | CS4271_MODE2_CPEN,
@@ -595,6 +633,9 @@
 		/* Set codec to the reset state */
 		gpio_set_value(cs4271->gpio_nreset, 0);
 
+	regcache_mark_dirty(cs4271->regmap);
+	regulator_bulk_disable(ARRAY_SIZE(cs4271->supplies), cs4271->supplies);
+
 	return 0;
 };
 
@@ -617,6 +658,7 @@
 {
 	struct cs4271_platform_data *cs4271plat = dev->platform_data;
 	struct cs4271_private *cs4271;
+	int i, ret;
 
 	cs4271 = devm_kzalloc(dev, sizeof(*cs4271), GFP_KERNEL);
 	if (!cs4271)
@@ -638,6 +680,17 @@
 			return ret;
 	}
 
+	for (i = 0; i < ARRAY_SIZE(supply_names); i++)
+		cs4271->supplies[i].supply = supply_names[i];
+
+	ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(cs4271->supplies),
+					cs4271->supplies);
+
+	if (ret < 0) {
+		dev_err(dev, "Failed to get regulators: %d\n", ret);
+		return ret;
+	}
+
 	*c = cs4271;
 	return 0;
 }
diff --git a/tools/testing/selftests/rcutorture/bin/parse-console.sh b/tools/testing/selftests/rcutorture/bin/parse-console.sh
index 844787a..5eb49b7 100755
--- a/tools/testing/selftests/rcutorture/bin/parse-console.sh
+++ b/tools/testing/selftests/rcutorture/bin/parse-console.sh
@@ -33,7 +33,7 @@
 then
 	print_warning Console output contains nul bytes, old qemu still running?
 fi
-egrep 'Badness|WARNING:|Warn|BUG|===========|Call Trace:|Oops:|detected stalls on CPUs/tasks:|Stall ended before state dump start' < $file | grep -v 'ODEBUG: ' | grep -v 'Warning: unable to open an initial console' > $1.diags
+egrep 'Badness|WARNING:|Warn|BUG|===========|Call Trace:|Oops:|detected stalls on CPUs/tasks:|self-detected stall on CPU|Stall ended before state dump start|\?\?\? Writer stall state' < $file | grep -v 'ODEBUG: ' | grep -v 'Warning: unable to open an initial console' > $1.diags
 if test -s $1.diags
 then
 	print_warning Assertion failure in $file $title
@@ -64,10 +64,12 @@
 	then
 		summary="$summary  lockdep: $n_badness"
 	fi
-	n_stalls=`egrep -c 'detected stalls on CPUs/tasks:|Stall ended before state dump start' $1`
+	n_stalls=`egrep -c 'detected stalls on CPUs/tasks:|self-detected stall on CPU|Stall ended before state dump start|\?\?\? Writer stall state' $1`
 	if test "$n_stalls" -ne 0
 	then
 		summary="$summary  Stalls: $n_stalls"
 	fi
 	print_warning Summary: $summary
+else
+	rm $1.diags
 fi
diff --git a/tools/testing/selftests/x86/Makefile b/tools/testing/selftests/x86/Makefile
index d0c473f..d5ce7d7 100644
--- a/tools/testing/selftests/x86/Makefile
+++ b/tools/testing/selftests/x86/Makefile
@@ -4,15 +4,16 @@
 
 .PHONY: all all_32 all_64 warn_32bit_failure clean
 
-TARGETS_C_BOTHBITS := single_step_syscall sysret_ss_attrs syscall_nt ptrace_syscall
-TARGETS_C_32BIT_ONLY := entry_from_vm86 syscall_arg_fault sigreturn test_syscall_vdso unwind_vdso \
+TARGETS_C_BOTHBITS := single_step_syscall sysret_ss_attrs syscall_nt ptrace_syscall \
+			check_initial_reg_state sigreturn ldt_gdt
+TARGETS_C_32BIT_ONLY := entry_from_vm86 syscall_arg_fault test_syscall_vdso unwind_vdso \
 			test_FCMOV test_FCOMI test_FISTTP \
-			ldt_gdt \
 			vdso_restorer
 
 TARGETS_C_32BIT_ALL := $(TARGETS_C_BOTHBITS) $(TARGETS_C_32BIT_ONLY)
+TARGETS_C_64BIT_ALL := $(TARGETS_C_BOTHBITS) $(TARGETS_C_64BIT_ONLY)
 BINARIES_32 := $(TARGETS_C_32BIT_ALL:%=%_32)
-BINARIES_64 := $(TARGETS_C_BOTHBITS:%=%_64)
+BINARIES_64 := $(TARGETS_C_64BIT_ALL:%=%_64)
 
 CFLAGS := -O2 -g -std=gnu99 -pthread -Wall
 
@@ -40,7 +41,7 @@
 $(TARGETS_C_32BIT_ALL:%=%_32): %_32: %.c
 	$(CC) -m32 -o $@ $(CFLAGS) $(EXTRA_CFLAGS) $^ -lrt -ldl -lm
 
-$(TARGETS_C_BOTHBITS:%=%_64): %_64: %.c
+$(TARGETS_C_64BIT_ALL:%=%_64): %_64: %.c
 	$(CC) -m64 -o $@ $(CFLAGS) $(EXTRA_CFLAGS) $^ -lrt -ldl
 
 # x86_64 users should be encouraged to install 32-bit libraries
@@ -65,3 +66,9 @@
 sysret_ss_attrs_64: thunks.S
 ptrace_syscall_32: raw_syscall_helper_32.S
 test_syscall_vdso_32: thunks_32.S
+
+# check_initial_reg_state is special: it needs a custom entry, and it
+# needs to be static so that its interpreter doesn't destroy its initial
+# state.
+check_initial_reg_state_32: CFLAGS += -Wl,-ereal_start -static
+check_initial_reg_state_64: CFLAGS += -Wl,-ereal_start -static
diff --git a/tools/testing/selftests/x86/check_initial_reg_state.c b/tools/testing/selftests/x86/check_initial_reg_state.c
new file mode 100644
index 0000000..6aaed9b8
--- /dev/null
+++ b/tools/testing/selftests/x86/check_initial_reg_state.c
@@ -0,0 +1,109 @@
+/*
+ * check_initial_reg_state.c - check that execve sets the correct state
+ * Copyright (c) 2014-2016 Andrew Lutomirski
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#define _GNU_SOURCE
+
+#include <stdio.h>
+
+unsigned long ax, bx, cx, dx, si, di, bp, sp, flags;
+unsigned long r8, r9, r10, r11, r12, r13, r14, r15;
+
+asm (
+	".pushsection .text\n\t"
+	".type real_start, @function\n\t"
+	".global real_start\n\t"
+	"real_start:\n\t"
+#ifdef __x86_64__
+	"mov %rax, ax\n\t"
+	"mov %rbx, bx\n\t"
+	"mov %rcx, cx\n\t"
+	"mov %rdx, dx\n\t"
+	"mov %rsi, si\n\t"
+	"mov %rdi, di\n\t"
+	"mov %rbp, bp\n\t"
+	"mov %rsp, sp\n\t"
+	"mov %r8, r8\n\t"
+	"mov %r9, r9\n\t"
+	"mov %r10, r10\n\t"
+	"mov %r11, r11\n\t"
+	"mov %r12, r12\n\t"
+	"mov %r13, r13\n\t"
+	"mov %r14, r14\n\t"
+	"mov %r15, r15\n\t"
+	"pushfq\n\t"
+	"popq flags\n\t"
+#else
+	"mov %eax, ax\n\t"
+	"mov %ebx, bx\n\t"
+	"mov %ecx, cx\n\t"
+	"mov %edx, dx\n\t"
+	"mov %esi, si\n\t"
+	"mov %edi, di\n\t"
+	"mov %ebp, bp\n\t"
+	"mov %esp, sp\n\t"
+	"pushfl\n\t"
+	"popl flags\n\t"
+#endif
+	"jmp _start\n\t"
+	".size real_start, . - real_start\n\t"
+	".popsection");
+
+int main()
+{
+	int nerrs = 0;
+
+	if (sp == 0) {
+		printf("[FAIL]\tTest was built incorrectly\n");
+		return 1;
+	}
+
+	if (ax || bx || cx || dx || si || di || bp
+#ifdef __x86_64__
+	    || r8 || r9 || r10 || r11 || r12 || r13 || r14 || r15
+#endif
+		) {
+		printf("[FAIL]\tAll GPRs except SP should be 0\n");
+#define SHOW(x) printf("\t" #x " = 0x%lx\n", x);
+		SHOW(ax);
+		SHOW(bx);
+		SHOW(cx);
+		SHOW(dx);
+		SHOW(si);
+		SHOW(di);
+		SHOW(bp);
+		SHOW(sp);
+#ifdef __x86_64__
+		SHOW(r8);
+		SHOW(r9);
+		SHOW(r10);
+		SHOW(r11);
+		SHOW(r12);
+		SHOW(r13);
+		SHOW(r14);
+		SHOW(r15);
+#endif
+		nerrs++;
+	} else {
+		printf("[OK]\tAll GPRs except SP are 0\n");
+	}
+
+	if (flags != 0x202) {
+		printf("[FAIL]\tFLAGS is 0x%lx, but it should be 0x202\n", flags);
+		nerrs++;
+	} else {
+		printf("[OK]\tFLAGS is 0x202\n");
+	}
+
+	return nerrs ? 1 : 0;
+}
diff --git a/tools/testing/selftests/x86/ptrace_syscall.c b/tools/testing/selftests/x86/ptrace_syscall.c
index 5105b49..4214567 100644
--- a/tools/testing/selftests/x86/ptrace_syscall.c
+++ b/tools/testing/selftests/x86/ptrace_syscall.c
@@ -103,6 +103,17 @@
 		err(1, "sigaction");
 }
 
+static void setsigign(int sig, int flags)
+{
+	struct sigaction sa;
+	memset(&sa, 0, sizeof(sa));
+	sa.sa_sigaction = (void *)SIG_IGN;
+	sa.sa_flags = flags;
+	sigemptyset(&sa.sa_mask);
+	if (sigaction(sig, &sa, 0))
+		err(1, "sigaction");
+}
+
 static void clearhandler(int sig)
 {
 	struct sigaction sa;
@@ -187,7 +198,7 @@
 
 	printf("[RUN]\tSYSEMU\n");
 	if (ptrace(PTRACE_SYSEMU, chld, 0, 0) != 0)
-		err(1, "PTRACE_SYSCALL");
+		err(1, "PTRACE_SYSEMU");
 	wait_trap(chld);
 
 	if (ptrace(PTRACE_GETREGS, chld, 0, &regs) != 0)
@@ -218,7 +229,7 @@
 		err(1, "PTRACE_SETREGS");
 
 	if (ptrace(PTRACE_SYSEMU, chld, 0, 0) != 0)
-		err(1, "PTRACE_SYSCALL");
+		err(1, "PTRACE_SYSEMU");
 	wait_trap(chld);
 
 	if (ptrace(PTRACE_GETREGS, chld, 0, &regs) != 0)
@@ -250,7 +261,7 @@
 		err(1, "PTRACE_SETREGS");
 
 	if (ptrace(PTRACE_SYSEMU, chld, 0, 0) != 0)
-		err(1, "PTRACE_SYSCALL");
+		err(1, "PTRACE_SYSEMU");
 	wait_trap(chld);
 
 	if (ptrace(PTRACE_GETREGS, chld, 0, &regs) != 0)
@@ -277,6 +288,119 @@
 	}
 }
 
+static void test_restart_under_ptrace(void)
+{
+	printf("[RUN]\tkernel syscall restart under ptrace\n");
+	pid_t chld = fork();
+	if (chld < 0)
+		err(1, "fork");
+
+	if (chld == 0) {
+		if (ptrace(PTRACE_TRACEME, 0, 0, 0) != 0)
+			err(1, "PTRACE_TRACEME");
+
+		printf("\tChild will take a nap until signaled\n");
+		setsigign(SIGUSR1, SA_RESTART);
+		raise(SIGSTOP);
+
+		syscall(SYS_pause, 0, 0, 0, 0, 0, 0);
+		_exit(0);
+	}
+
+	int status;
+
+	/* Wait for SIGSTOP. */
+	if (waitpid(chld, &status, 0) != chld || !WIFSTOPPED(status))
+		err(1, "waitpid");
+
+	struct user_regs_struct regs;
+
+	printf("[RUN]\tSYSCALL\n");
+	if (ptrace(PTRACE_SYSCALL, chld, 0, 0) != 0)
+		err(1, "PTRACE_SYSCALL");
+	wait_trap(chld);
+
+	/* We should be stopped at pause(2) entry. */
+
+	if (ptrace(PTRACE_GETREGS, chld, 0, &regs) != 0)
+		err(1, "PTRACE_GETREGS");
+
+	if (regs.user_syscall_nr != SYS_pause ||
+	    regs.user_arg0 != 0 || regs.user_arg1 != 0 ||
+	    regs.user_arg2 != 0 || regs.user_arg3 != 0 ||
+	    regs.user_arg4 != 0 || regs.user_arg5 != 0) {
+		printf("[FAIL]\tInitial args are wrong (nr=%lu, args=%lu %lu %lu %lu %lu %lu)\n", (unsigned long)regs.user_syscall_nr, (unsigned long)regs.user_arg0, (unsigned long)regs.user_arg1, (unsigned long)regs.user_arg2, (unsigned long)regs.user_arg3, (unsigned long)regs.user_arg4, (unsigned long)regs.user_arg5);
+		nerrs++;
+	} else {
+		printf("[OK]\tInitial nr and args are correct\n");
+	}
+
+	/* Interrupt it. */
+	kill(chld, SIGUSR1);
+
+	/* Advance.  We should be stopped at exit. */
+	printf("[RUN]\tSYSCALL\n");
+	if (ptrace(PTRACE_SYSCALL, chld, 0, 0) != 0)
+		err(1, "PTRACE_SYSCALL");
+	wait_trap(chld);
+
+	if (ptrace(PTRACE_GETREGS, chld, 0, &regs) != 0)
+		err(1, "PTRACE_GETREGS");
+
+	if (regs.user_syscall_nr != SYS_pause ||
+	    regs.user_arg0 != 0 || regs.user_arg1 != 0 ||
+	    regs.user_arg2 != 0 || regs.user_arg3 != 0 ||
+	    regs.user_arg4 != 0 || regs.user_arg5 != 0) {
+		printf("[FAIL]\tArgs after SIGUSR1 are wrong (nr=%lu, args=%lu %lu %lu %lu %lu %lu)\n", (unsigned long)regs.user_syscall_nr, (unsigned long)regs.user_arg0, (unsigned long)regs.user_arg1, (unsigned long)regs.user_arg2, (unsigned long)regs.user_arg3, (unsigned long)regs.user_arg4, (unsigned long)regs.user_arg5);
+		nerrs++;
+	} else {
+		printf("[OK]\tArgs after SIGUSR1 are correct (ax = %ld)\n",
+		       (long)regs.user_ax);
+	}
+
+	/* Poke the regs back in.  This must not break anything. */
+	if (ptrace(PTRACE_SETREGS, chld, 0, &regs) != 0)
+		err(1, "PTRACE_SETREGS");
+
+	/* Catch the (ignored) SIGUSR1. */
+	if (ptrace(PTRACE_CONT, chld, 0, 0) != 0)
+		err(1, "PTRACE_CONT");
+	if (waitpid(chld, &status, 0) != chld)
+		err(1, "waitpid");
+	if (!WIFSTOPPED(status)) {
+		printf("[FAIL]\tChild was stopped for SIGUSR1 (status = 0x%x)\n", status);
+		nerrs++;
+	} else {
+		printf("[OK]\tChild got SIGUSR1\n");
+	}
+
+	/* The next event should be pause(2) again. */
+	printf("[RUN]\tStep again\n");
+	if (ptrace(PTRACE_SYSCALL, chld, 0, 0) != 0)
+		err(1, "PTRACE_SYSCALL");
+	wait_trap(chld);
+
+	/* We should be stopped at pause(2) entry. */
+
+	if (ptrace(PTRACE_GETREGS, chld, 0, &regs) != 0)
+		err(1, "PTRACE_GETREGS");
+
+	if (regs.user_syscall_nr != SYS_pause ||
+	    regs.user_arg0 != 0 || regs.user_arg1 != 0 ||
+	    regs.user_arg2 != 0 || regs.user_arg3 != 0 ||
+	    regs.user_arg4 != 0 || regs.user_arg5 != 0) {
+		printf("[FAIL]\tpause did not restart (nr=%lu, args=%lu %lu %lu %lu %lu %lu)\n", (unsigned long)regs.user_syscall_nr, (unsigned long)regs.user_arg0, (unsigned long)regs.user_arg1, (unsigned long)regs.user_arg2, (unsigned long)regs.user_arg3, (unsigned long)regs.user_arg4, (unsigned long)regs.user_arg5);
+		nerrs++;
+	} else {
+		printf("[OK]\tpause(2) restarted correctly\n");
+	}
+
+	/* Kill it. */
+	kill(chld, SIGKILL);
+	if (waitpid(chld, &status, 0) != chld)
+		err(1, "waitpid");
+}
+
 int main()
 {
 	printf("[RUN]\tCheck int80 return regs\n");
@@ -290,5 +414,7 @@
 
 	test_ptrace_syscall_restart();
 
+	test_restart_under_ptrace();
+
 	return 0;
 }
diff --git a/tools/testing/selftests/x86/sigreturn.c b/tools/testing/selftests/x86/sigreturn.c
index b5aa1ba..8a577e7 100644
--- a/tools/testing/selftests/x86/sigreturn.c
+++ b/tools/testing/selftests/x86/sigreturn.c
@@ -54,6 +54,37 @@
 #include <sys/ptrace.h>
 #include <sys/user.h>
 
+/* Pull in AR_xyz defines. */
+typedef unsigned int u32;
+typedef unsigned short u16;
+#include "../../../../arch/x86/include/asm/desc_defs.h"
+
+/*
+ * Copied from asm/ucontext.h, as asm/ucontext.h conflicts badly with the glibc
+ * headers.
+ */
+#ifdef __x86_64__
+/*
+ * UC_SIGCONTEXT_SS will be set when delivering 64-bit or x32 signals on
+ * kernels that save SS in the sigcontext.  All kernels that set
+ * UC_SIGCONTEXT_SS will correctly restore at least the low 32 bits of esp
+ * regardless of SS (i.e. they implement espfix).
+ *
+ * Kernels that set UC_SIGCONTEXT_SS will also set UC_STRICT_RESTORE_SS
+ * when delivering a signal that came from 64-bit code.
+ *
+ * Sigreturn restores SS as follows:
+ *
+ * if (saved SS is valid || UC_STRICT_RESTORE_SS is set ||
+ *     saved CS is not 64-bit)
+ *         new SS = saved SS  (will fail IRET and signal if invalid)
+ * else
+ *         new SS = a flat 32-bit data segment
+ */
+#define UC_SIGCONTEXT_SS       0x2
+#define UC_STRICT_RESTORE_SS   0x4
+#endif
+
 /*
  * In principle, this test can run on Linux emulation layers (e.g.
  * Illumos "LX branded zones").  Solaris-based kernels reserve LDT
@@ -267,6 +298,9 @@
 /* Instructions for the SIGUSR1 handler. */
 static volatile unsigned short sig_cs, sig_ss;
 static volatile sig_atomic_t sig_trapped, sig_err, sig_trapno;
+#ifdef __x86_64__
+static volatile sig_atomic_t sig_corrupt_final_ss;
+#endif
 
 /* Abstractions for some 32-bit vs 64-bit differences. */
 #ifdef __x86_64__
@@ -305,62 +339,6 @@
 }
 #endif
 
-/* Number of errors in the current test case. */
-static volatile sig_atomic_t nerrs;
-
-/*
- * SIGUSR1 handler.  Sets CS and SS as requested and points IP to the
- * int3 trampoline.  Sets SP to a large known value so that we can see
- * whether the value round-trips back to user mode correctly.
- */
-static void sigusr1(int sig, siginfo_t *info, void *ctx_void)
-{
-	ucontext_t *ctx = (ucontext_t*)ctx_void;
-
-	memcpy(&initial_regs, &ctx->uc_mcontext.gregs, sizeof(gregset_t));
-
-	*csptr(ctx) = sig_cs;
-	*ssptr(ctx) = sig_ss;
-
-	ctx->uc_mcontext.gregs[REG_IP] =
-		sig_cs == code16_sel ? 0 : (unsigned long)&int3;
-	ctx->uc_mcontext.gregs[REG_SP] = (unsigned long)0x8badf00d5aadc0deULL;
-	ctx->uc_mcontext.gregs[REG_AX] = 0;
-
-	memcpy(&requested_regs, &ctx->uc_mcontext.gregs, sizeof(gregset_t));
-	requested_regs[REG_AX] = *ssptr(ctx);	/* The asm code does this. */
-
-	return;
-}
-
-/*
- * Called after a successful sigreturn.  Restores our state so that
- * the original raise(SIGUSR1) returns.
- */
-static void sigtrap(int sig, siginfo_t *info, void *ctx_void)
-{
-	ucontext_t *ctx = (ucontext_t*)ctx_void;
-
-	sig_err = ctx->uc_mcontext.gregs[REG_ERR];
-	sig_trapno = ctx->uc_mcontext.gregs[REG_TRAPNO];
-
-	unsigned short ss;
-	asm ("mov %%ss,%0" : "=r" (ss));
-
-	greg_t asm_ss = ctx->uc_mcontext.gregs[REG_AX];
-	if (asm_ss != sig_ss && sig == SIGTRAP) {
-		/* Sanity check failure. */
-		printf("[FAIL]\tSIGTRAP: ss = %hx, frame ss = %hx, ax = %llx\n",
-		       ss, *ssptr(ctx), (unsigned long long)asm_ss);
-		nerrs++;
-	}
-
-	memcpy(&resulting_regs, &ctx->uc_mcontext.gregs, sizeof(gregset_t));
-	memcpy(&ctx->uc_mcontext.gregs, &initial_regs, sizeof(gregset_t));
-
-	sig_trapped = sig;
-}
-
 /*
  * Checks a given selector for its code bitness or returns -1 if it's not
  * a usable code segment selector.
@@ -394,6 +372,184 @@
 		return -1;	/* Unknown bitness. */
 }
 
+/*
+ * Checks a given selector for its code bitness or returns -1 if it's not
+ * a usable code segment selector.
+ */
+bool is_valid_ss(unsigned short cs)
+{
+	uint32_t valid = 0, ar;
+	asm ("lar %[cs], %[ar]\n\t"
+	     "jnz 1f\n\t"
+	     "mov $1, %[valid]\n\t"
+	     "1:"
+	     : [ar] "=r" (ar), [valid] "+rm" (valid)
+	     : [cs] "r" (cs));
+
+	if (!valid)
+		return false;
+
+	if ((ar & AR_TYPE_MASK) != AR_TYPE_RWDATA &&
+	    (ar & AR_TYPE_MASK) != AR_TYPE_RWDATA_EXPDOWN)
+		return false;
+
+	return (ar & AR_P);
+}
+
+/* Number of errors in the current test case. */
+static volatile sig_atomic_t nerrs;
+
+static void validate_signal_ss(int sig, ucontext_t *ctx)
+{
+#ifdef __x86_64__
+	bool was_64bit = (cs_bitness(*csptr(ctx)) == 64);
+
+	if (!(ctx->uc_flags & UC_SIGCONTEXT_SS)) {
+		printf("[FAIL]\tUC_SIGCONTEXT_SS was not set\n");
+		nerrs++;
+
+		/*
+		 * This happens on Linux 4.1.  The rest will fail, too, so
+		 * return now to reduce the noise.
+		 */
+		return;
+	}
+
+	/* UC_STRICT_RESTORE_SS is set iff we came from 64-bit mode. */
+	if (!!(ctx->uc_flags & UC_STRICT_RESTORE_SS) != was_64bit) {
+		printf("[FAIL]\tUC_STRICT_RESTORE_SS was wrong in signal %d\n",
+		       sig);
+		nerrs++;
+	}
+
+	if (is_valid_ss(*ssptr(ctx))) {
+		/*
+		 * DOSEMU was written before 64-bit sigcontext had SS, and
+		 * it tries to figure out the signal source SS by looking at
+		 * the physical register.  Make sure that keeps working.
+		 */
+		unsigned short hw_ss;
+		asm ("mov %%ss, %0" : "=rm" (hw_ss));
+		if (hw_ss != *ssptr(ctx)) {
+			printf("[FAIL]\tHW SS didn't match saved SS\n");
+			nerrs++;
+		}
+	}
+#endif
+}
+
+/*
+ * SIGUSR1 handler.  Sets CS and SS as requested and points IP to the
+ * int3 trampoline.  Sets SP to a large known value so that we can see
+ * whether the value round-trips back to user mode correctly.
+ */
+static void sigusr1(int sig, siginfo_t *info, void *ctx_void)
+{
+	ucontext_t *ctx = (ucontext_t*)ctx_void;
+
+	validate_signal_ss(sig, ctx);
+
+	memcpy(&initial_regs, &ctx->uc_mcontext.gregs, sizeof(gregset_t));
+
+	*csptr(ctx) = sig_cs;
+	*ssptr(ctx) = sig_ss;
+
+	ctx->uc_mcontext.gregs[REG_IP] =
+		sig_cs == code16_sel ? 0 : (unsigned long)&int3;
+	ctx->uc_mcontext.gregs[REG_SP] = (unsigned long)0x8badf00d5aadc0deULL;
+	ctx->uc_mcontext.gregs[REG_AX] = 0;
+
+	memcpy(&requested_regs, &ctx->uc_mcontext.gregs, sizeof(gregset_t));
+	requested_regs[REG_AX] = *ssptr(ctx);	/* The asm code does this. */
+
+	return;
+}
+
+/*
+ * Called after a successful sigreturn (via int3) or from a failed
+ * sigreturn (directly by kernel).  Restores our state so that the
+ * original raise(SIGUSR1) returns.
+ */
+static void sigtrap(int sig, siginfo_t *info, void *ctx_void)
+{
+	ucontext_t *ctx = (ucontext_t*)ctx_void;
+
+	validate_signal_ss(sig, ctx);
+
+	sig_err = ctx->uc_mcontext.gregs[REG_ERR];
+	sig_trapno = ctx->uc_mcontext.gregs[REG_TRAPNO];
+
+	unsigned short ss;
+	asm ("mov %%ss,%0" : "=r" (ss));
+
+	greg_t asm_ss = ctx->uc_mcontext.gregs[REG_AX];
+	if (asm_ss != sig_ss && sig == SIGTRAP) {
+		/* Sanity check failure. */
+		printf("[FAIL]\tSIGTRAP: ss = %hx, frame ss = %hx, ax = %llx\n",
+		       ss, *ssptr(ctx), (unsigned long long)asm_ss);
+		nerrs++;
+	}
+
+	memcpy(&resulting_regs, &ctx->uc_mcontext.gregs, sizeof(gregset_t));
+	memcpy(&ctx->uc_mcontext.gregs, &initial_regs, sizeof(gregset_t));
+
+#ifdef __x86_64__
+	if (sig_corrupt_final_ss) {
+		if (ctx->uc_flags & UC_STRICT_RESTORE_SS) {
+			printf("[FAIL]\tUC_STRICT_RESTORE_SS was set inappropriately\n");
+			nerrs++;
+		} else {
+			/*
+			 * DOSEMU transitions from 32-bit to 64-bit mode by
+			 * adjusting sigcontext, and it requires that this work
+			 * even if the saved SS is bogus.
+			 */
+			printf("\tCorrupting SS on return to 64-bit mode\n");
+			*ssptr(ctx) = 0;
+		}
+	}
+#endif
+
+	sig_trapped = sig;
+}
+
+#ifdef __x86_64__
+/* Tests recovery if !UC_STRICT_RESTORE_SS */
+static void sigusr2(int sig, siginfo_t *info, void *ctx_void)
+{
+	ucontext_t *ctx = (ucontext_t*)ctx_void;
+
+	if (!(ctx->uc_flags & UC_STRICT_RESTORE_SS)) {
+		printf("[FAIL]\traise(2) didn't set UC_STRICT_RESTORE_SS\n");
+		nerrs++;
+		return;  /* We can't do the rest. */
+	}
+
+	ctx->uc_flags &= ~UC_STRICT_RESTORE_SS;
+	*ssptr(ctx) = 0;
+
+	/* Return.  The kernel should recover without sending another signal. */
+}
+
+static int test_nonstrict_ss(void)
+{
+	clearhandler(SIGUSR1);
+	clearhandler(SIGTRAP);
+	clearhandler(SIGSEGV);
+	clearhandler(SIGILL);
+	sethandler(SIGUSR2, sigusr2, 0);
+
+	nerrs = 0;
+
+	printf("[RUN]\tClear UC_STRICT_RESTORE_SS and corrupt SS\n");
+	raise(SIGUSR2);
+	if (!nerrs)
+		printf("[OK]\tIt worked\n");
+
+	return nerrs;
+}
+#endif
+
 /* Finds a usable code segment of the requested bitness. */
 int find_cs(int bitness)
 {
@@ -576,6 +732,12 @@
 		       errdesc, strsignal(sig_trapped));
 		return 0;
 	} else {
+		/*
+		 * This also implicitly tests UC_STRICT_RESTORE_SS:
+		 * We check that these signals set UC_STRICT_RESTORE_SS and,
+		 * if UC_STRICT_RESTORE_SS doesn't cause strict behavior,
+		 * then we won't get SIGSEGV.
+		 */
 		printf("[FAIL]\tDid not get SIGSEGV\n");
 		return 1;
 	}
@@ -632,6 +794,14 @@
 						    GDT3(gdt_data16_idx));
 	}
 
+#ifdef __x86_64__
+	/* Nasty ABI case: check SS corruption handling. */
+	sig_corrupt_final_ss = 1;
+	total_nerrs += test_valid_sigreturn(32, false, -1);
+	total_nerrs += test_valid_sigreturn(32, true, -1);
+	sig_corrupt_final_ss = 0;
+#endif
+
 	/*
 	 * We're done testing valid sigreturn cases.  Now we test states
 	 * for which sigreturn itself will succeed but the subsequent
@@ -680,5 +850,9 @@
 	if (gdt_npdata32_idx)
 		test_bad_iret(32, GDT3(gdt_npdata32_idx), -1);
 
+#ifdef __x86_64__
+	total_nerrs += test_nonstrict_ss();
+#endif
+
 	return total_nerrs ? 1 : 0;
 }
diff --git a/tools/testing/selftests/x86/syscall_nt.c b/tools/testing/selftests/x86/syscall_nt.c
index 60c06af4..43fcab36 100644
--- a/tools/testing/selftests/x86/syscall_nt.c
+++ b/tools/testing/selftests/x86/syscall_nt.c
@@ -17,6 +17,9 @@
 
 #include <stdio.h>
 #include <unistd.h>
+#include <string.h>
+#include <signal.h>
+#include <err.h>
 #include <sys/syscall.h>
 #include <asm/processor-flags.h>
 
@@ -26,6 +29,8 @@
 # define WIDTH "l"
 #endif
 
+static unsigned int nerrs;
+
 static unsigned long get_eflags(void)
 {
 	unsigned long eflags;
@@ -39,16 +44,52 @@
 		      : : "rm" (eflags) : "flags");
 }
 
-int main()
+static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
+		       int flags)
+{
+	struct sigaction sa;
+	memset(&sa, 0, sizeof(sa));
+	sa.sa_sigaction = handler;
+	sa.sa_flags = SA_SIGINFO | flags;
+	sigemptyset(&sa.sa_mask);
+	if (sigaction(sig, &sa, 0))
+		err(1, "sigaction");
+}
+
+static void sigtrap(int sig, siginfo_t *si, void *ctx_void)
+{
+}
+
+static void do_it(unsigned long extraflags)
+{
+	unsigned long flags;
+
+	set_eflags(get_eflags() | extraflags);
+	syscall(SYS_getpid);
+	flags = get_eflags();
+	if ((flags & extraflags) == extraflags) {
+		printf("[OK]\tThe syscall worked and flags are still set\n");
+	} else {
+		printf("[FAIL]\tThe syscall worked but flags were cleared (flags = 0x%lx but expected 0x%lx set)\n",
+		       flags, extraflags);
+		nerrs++;
+	}
+}
+
+int main(void)
 {
 	printf("[RUN]\tSet NT and issue a syscall\n");
-	set_eflags(get_eflags() | X86_EFLAGS_NT);
-	syscall(SYS_getpid);
-	if (get_eflags() & X86_EFLAGS_NT) {
-		printf("[OK]\tThe syscall worked and NT is still set\n");
-		return 0;
-	} else {
-		printf("[FAIL]\tThe syscall worked but NT was cleared\n");
-		return 1;
-	}
+	do_it(X86_EFLAGS_NT);
+
+	/*
+	 * Now try it again with TF set -- TF forces returns via IRET in all
+	 * cases except non-ptregs-using 64-bit full fast path syscalls.
+	 */
+
+	sethandler(SIGTRAP, sigtrap, 0);
+
+	printf("[RUN]\tSet NT|TF and issue a syscall\n");
+	do_it(X86_EFLAGS_NT | X86_EFLAGS_TF);
+
+	return nerrs == 0 ? 0 : 1;
 }
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index ea60646..a9ad4fe 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -34,6 +34,11 @@
 static struct workqueue_struct *wqueue;
 static unsigned int host_vtimer_irq;
 
+void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
+{
+	vcpu->arch.timer_cpu.active_cleared_last = false;
+}
+
 static cycle_t kvm_phys_timer_read(void)
 {
 	return timecounter->cc->read(timecounter->cc);
@@ -130,6 +135,7 @@
 
 	BUG_ON(!vgic_initialized(vcpu->kvm));
 
+	timer->active_cleared_last = false;
 	timer->irq.level = new_level;
 	trace_kvm_timer_update_irq(vcpu->vcpu_id, timer->map->virt_irq,
 				   timer->irq.level);
@@ -245,10 +251,35 @@
 	else
 		phys_active = false;
 
+	/*
+	 * We want to avoid hitting the (re)distributor as much as
+	 * possible, as this is a potentially expensive MMIO access
+	 * (not to mention locks in the irq layer), and a solution for
+	 * this is to cache the "active" state in memory.
+	 *
+	 * Things to consider: we cannot cache an "active set" state,
+	 * because the HW can change this behind our back (it becomes
+	 * "clear" in the HW). We must then restrict the caching to
+	 * the "clear" state.
+	 *
+	 * The cache is invalidated on:
+	 * - vcpu put, indicating that the HW cannot be trusted to be
+	 *   in a sane state on the next vcpu load,
+	 * - any change in the interrupt state
+	 *
+	 * Usage conditions:
+	 * - cached value is "active clear"
+	 * - value to be programmed is "active clear"
+	 */
+	if (timer->active_cleared_last && !phys_active)
+		return;
+
 	ret = irq_set_irqchip_state(timer->map->irq,
 				    IRQCHIP_STATE_ACTIVE,
 				    phys_active);
 	WARN_ON(ret);
+
+	timer->active_cleared_last = !phys_active;
 }
 
 /**
diff --git a/arch/arm64/kvm/hyp/timer-sr.c b/virt/kvm/arm/hyp/timer-sr.c
similarity index 86%
rename from arch/arm64/kvm/hyp/timer-sr.c
rename to virt/kvm/arm/hyp/timer-sr.c
index 1051e5d..ea00d69 100644
--- a/arch/arm64/kvm/hyp/timer-sr.c
+++ b/virt/kvm/arm/hyp/timer-sr.c
@@ -19,9 +19,7 @@
 #include <linux/compiler.h>
 #include <linux/kvm_host.h>
 
-#include <asm/kvm_mmu.h>
-
-#include "hyp.h"
+#include <asm/kvm_hyp.h>
 
 /* vcpu is already in the HYP VA space */
 void __hyp_text __timer_save_state(struct kvm_vcpu *vcpu)
@@ -31,12 +29,12 @@
 	u64 val;
 
 	if (kvm->arch.timer.enabled) {
-		timer->cntv_ctl = read_sysreg(cntv_ctl_el0);
-		timer->cntv_cval = read_sysreg(cntv_cval_el0);
+		timer->cntv_ctl = read_sysreg_el0(cntv_ctl);
+		timer->cntv_cval = read_sysreg_el0(cntv_cval);
 	}
 
 	/* Disable the virtual timer */
-	write_sysreg(0, cntv_ctl_el0);
+	write_sysreg_el0(0, cntv_ctl);
 
 	/* Allow physical timer/counter access for the host */
 	val = read_sysreg(cnthctl_el2);
@@ -64,8 +62,8 @@
 
 	if (kvm->arch.timer.enabled) {
 		write_sysreg(kvm->arch.timer.cntvoff, cntvoff_el2);
-		write_sysreg(timer->cntv_cval, cntv_cval_el0);
+		write_sysreg_el0(timer->cntv_cval, cntv_cval);
 		isb();
-		write_sysreg(timer->cntv_ctl, cntv_ctl_el0);
+		write_sysreg_el0(timer->cntv_ctl, cntv_ctl);
 	}
 }
diff --git a/virt/kvm/arm/hyp/vgic-v2-sr.c b/virt/kvm/arm/hyp/vgic-v2-sr.c
new file mode 100644
index 0000000..674bdf8
--- /dev/null
+++ b/virt/kvm/arm/hyp/vgic-v2-sr.c
@@ -0,0 +1,170 @@
+/*
+ * Copyright (C) 2012-2015 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/compiler.h>
+#include <linux/irqchip/arm-gic.h>
+#include <linux/kvm_host.h>
+
+#include <asm/kvm_hyp.h>
+
+static void __hyp_text save_maint_int_state(struct kvm_vcpu *vcpu,
+					    void __iomem *base)
+{
+	struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
+	int nr_lr = vcpu->arch.vgic_cpu.nr_lr;
+	u32 eisr0, eisr1;
+	int i;
+	bool expect_mi;
+
+	expect_mi = !!(cpu_if->vgic_hcr & GICH_HCR_UIE);
+
+	for (i = 0; i < nr_lr; i++) {
+		if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i)))
+				continue;
+
+		expect_mi |= (!(cpu_if->vgic_lr[i] & GICH_LR_HW) &&
+			      (cpu_if->vgic_lr[i] & GICH_LR_EOI));
+	}
+
+	if (expect_mi) {
+		cpu_if->vgic_misr = readl_relaxed(base + GICH_MISR);
+
+		if (cpu_if->vgic_misr & GICH_MISR_EOI) {
+			eisr0  = readl_relaxed(base + GICH_EISR0);
+			if (unlikely(nr_lr > 32))
+				eisr1  = readl_relaxed(base + GICH_EISR1);
+			else
+				eisr1 = 0;
+		} else {
+			eisr0 = eisr1 = 0;
+		}
+	} else {
+		cpu_if->vgic_misr = 0;
+		eisr0 = eisr1 = 0;
+	}
+
+#ifdef CONFIG_CPU_BIG_ENDIAN
+	cpu_if->vgic_eisr = ((u64)eisr0 << 32) | eisr1;
+#else
+	cpu_if->vgic_eisr = ((u64)eisr1 << 32) | eisr0;
+#endif
+}
+
+static void __hyp_text save_elrsr(struct kvm_vcpu *vcpu, void __iomem *base)
+{
+	struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
+	int nr_lr = vcpu->arch.vgic_cpu.nr_lr;
+	u32 elrsr0, elrsr1;
+
+	elrsr0 = readl_relaxed(base + GICH_ELRSR0);
+	if (unlikely(nr_lr > 32))
+		elrsr1 = readl_relaxed(base + GICH_ELRSR1);
+	else
+		elrsr1 = 0;
+
+#ifdef CONFIG_CPU_BIG_ENDIAN
+	cpu_if->vgic_elrsr = ((u64)elrsr0 << 32) | elrsr1;
+#else
+	cpu_if->vgic_elrsr = ((u64)elrsr1 << 32) | elrsr0;
+#endif
+}
+
+static void __hyp_text save_lrs(struct kvm_vcpu *vcpu, void __iomem *base)
+{
+	struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
+	int nr_lr = vcpu->arch.vgic_cpu.nr_lr;
+	int i;
+
+	for (i = 0; i < nr_lr; i++) {
+		if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i)))
+			continue;
+
+		if (cpu_if->vgic_elrsr & (1UL << i)) {
+			cpu_if->vgic_lr[i] &= ~GICH_LR_STATE;
+			continue;
+		}
+
+		cpu_if->vgic_lr[i] = readl_relaxed(base + GICH_LR0 + (i * 4));
+		writel_relaxed(0, base + GICH_LR0 + (i * 4));
+	}
+}
+
+/* vcpu is already in the HYP VA space */
+void __hyp_text __vgic_v2_save_state(struct kvm_vcpu *vcpu)
+{
+	struct kvm *kvm = kern_hyp_va(vcpu->kvm);
+	struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
+	struct vgic_dist *vgic = &kvm->arch.vgic;
+	void __iomem *base = kern_hyp_va(vgic->vctrl_base);
+
+	if (!base)
+		return;
+
+	cpu_if->vgic_vmcr = readl_relaxed(base + GICH_VMCR);
+
+	if (vcpu->arch.vgic_cpu.live_lrs) {
+		cpu_if->vgic_apr = readl_relaxed(base + GICH_APR);
+
+		save_maint_int_state(vcpu, base);
+		save_elrsr(vcpu, base);
+		save_lrs(vcpu, base);
+
+		writel_relaxed(0, base + GICH_HCR);
+
+		vcpu->arch.vgic_cpu.live_lrs = 0;
+	} else {
+		cpu_if->vgic_eisr = 0;
+		cpu_if->vgic_elrsr = ~0UL;
+		cpu_if->vgic_misr = 0;
+		cpu_if->vgic_apr = 0;
+	}
+}
+
+/* vcpu is already in the HYP VA space */
+void __hyp_text __vgic_v2_restore_state(struct kvm_vcpu *vcpu)
+{
+	struct kvm *kvm = kern_hyp_va(vcpu->kvm);
+	struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
+	struct vgic_dist *vgic = &kvm->arch.vgic;
+	void __iomem *base = kern_hyp_va(vgic->vctrl_base);
+	int i, nr_lr;
+	u64 live_lrs = 0;
+
+	if (!base)
+		return;
+
+	nr_lr = vcpu->arch.vgic_cpu.nr_lr;
+
+	for (i = 0; i < nr_lr; i++)
+		if (cpu_if->vgic_lr[i] & GICH_LR_STATE)
+			live_lrs |= 1UL << i;
+
+	if (live_lrs) {
+		writel_relaxed(cpu_if->vgic_hcr, base + GICH_HCR);
+		writel_relaxed(cpu_if->vgic_apr, base + GICH_APR);
+		for (i = 0; i < nr_lr; i++) {
+			if (!(live_lrs & (1UL << i)))
+				continue;
+
+			writel_relaxed(cpu_if->vgic_lr[i],
+				       base + GICH_LR0 + (i * 4));
+		}
+	}
+
+	writel_relaxed(cpu_if->vgic_vmcr, base + GICH_VMCR);
+	vcpu->arch.vgic_cpu.live_lrs = live_lrs;
+}
diff --git a/virt/kvm/arm/pmu.c b/virt/kvm/arm/pmu.c
new file mode 100644
index 0000000..b5754c6
--- /dev/null
+++ b/virt/kvm/arm/pmu.c
@@ -0,0 +1,529 @@
+/*
+ * Copyright (C) 2015 Linaro Ltd.
+ * Author: Shannon Zhao <shannon.zhao@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/cpu.h>
+#include <linux/kvm.h>
+#include <linux/kvm_host.h>
+#include <linux/perf_event.h>
+#include <linux/uaccess.h>
+#include <asm/kvm_emulate.h>
+#include <kvm/arm_pmu.h>
+#include <kvm/arm_vgic.h>
+
+/**
+ * kvm_pmu_get_counter_value - get PMU counter value
+ * @vcpu: The vcpu pointer
+ * @select_idx: The counter index
+ */
+u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
+{
+	u64 counter, reg, enabled, running;
+	struct kvm_pmu *pmu = &vcpu->arch.pmu;
+	struct kvm_pmc *pmc = &pmu->pmc[select_idx];
+
+	reg = (select_idx == ARMV8_PMU_CYCLE_IDX)
+	      ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + select_idx;
+	counter = vcpu_sys_reg(vcpu, reg);
+
+	/* The real counter value is equal to the value of counter register plus
+	 * the value perf event counts.
+	 */
+	if (pmc->perf_event)
+		counter += perf_event_read_value(pmc->perf_event, &enabled,
+						 &running);
+
+	return counter & pmc->bitmask;
+}
+
+/**
+ * kvm_pmu_set_counter_value - set PMU counter value
+ * @vcpu: The vcpu pointer
+ * @select_idx: The counter index
+ * @val: The counter value
+ */
+void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
+{
+	u64 reg;
+
+	reg = (select_idx == ARMV8_PMU_CYCLE_IDX)
+	      ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + select_idx;
+	vcpu_sys_reg(vcpu, reg) += (s64)val - kvm_pmu_get_counter_value(vcpu, select_idx);
+}
+
+/**
+ * kvm_pmu_stop_counter - stop PMU counter
+ * @pmc: The PMU counter pointer
+ *
+ * If this counter has been configured to monitor some event, release it here.
+ */
+static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc)
+{
+	u64 counter, reg;
+
+	if (pmc->perf_event) {
+		counter = kvm_pmu_get_counter_value(vcpu, pmc->idx);
+		reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX)
+		       ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + pmc->idx;
+		vcpu_sys_reg(vcpu, reg) = counter;
+		perf_event_disable(pmc->perf_event);
+		perf_event_release_kernel(pmc->perf_event);
+		pmc->perf_event = NULL;
+	}
+}
+
+/**
+ * kvm_pmu_vcpu_reset - reset pmu state for cpu
+ * @vcpu: The vcpu pointer
+ *
+ */
+void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu)
+{
+	int i;
+	struct kvm_pmu *pmu = &vcpu->arch.pmu;
+
+	for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
+		kvm_pmu_stop_counter(vcpu, &pmu->pmc[i]);
+		pmu->pmc[i].idx = i;
+		pmu->pmc[i].bitmask = 0xffffffffUL;
+	}
+}
+
+/**
+ * kvm_pmu_vcpu_destroy - free perf event of PMU for cpu
+ * @vcpu: The vcpu pointer
+ *
+ */
+void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu)
+{
+	int i;
+	struct kvm_pmu *pmu = &vcpu->arch.pmu;
+
+	for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
+		struct kvm_pmc *pmc = &pmu->pmc[i];
+
+		if (pmc->perf_event) {
+			perf_event_disable(pmc->perf_event);
+			perf_event_release_kernel(pmc->perf_event);
+			pmc->perf_event = NULL;
+		}
+	}
+}
+
+u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
+{
+	u64 val = vcpu_sys_reg(vcpu, PMCR_EL0) >> ARMV8_PMU_PMCR_N_SHIFT;
+
+	val &= ARMV8_PMU_PMCR_N_MASK;
+	if (val == 0)
+		return BIT(ARMV8_PMU_CYCLE_IDX);
+	else
+		return GENMASK(val - 1, 0) | BIT(ARMV8_PMU_CYCLE_IDX);
+}
+
+/**
+ * kvm_pmu_enable_counter - enable selected PMU counter
+ * @vcpu: The vcpu pointer
+ * @val: the value guest writes to PMCNTENSET register
+ *
+ * Call perf_event_enable to start counting the perf event
+ */
+void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val)
+{
+	int i;
+	struct kvm_pmu *pmu = &vcpu->arch.pmu;
+	struct kvm_pmc *pmc;
+
+	if (!(vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) || !val)
+		return;
+
+	for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
+		if (!(val & BIT(i)))
+			continue;
+
+		pmc = &pmu->pmc[i];
+		if (pmc->perf_event) {
+			perf_event_enable(pmc->perf_event);
+			if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE)
+				kvm_debug("fail to enable perf event\n");
+		}
+	}
+}
+
+/**
+ * kvm_pmu_disable_counter - disable selected PMU counter
+ * @vcpu: The vcpu pointer
+ * @val: the value guest writes to PMCNTENCLR register
+ *
+ * Call perf_event_disable to stop counting the perf event
+ */
+void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val)
+{
+	int i;
+	struct kvm_pmu *pmu = &vcpu->arch.pmu;
+	struct kvm_pmc *pmc;
+
+	if (!val)
+		return;
+
+	for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
+		if (!(val & BIT(i)))
+			continue;
+
+		pmc = &pmu->pmc[i];
+		if (pmc->perf_event)
+			perf_event_disable(pmc->perf_event);
+	}
+}
+
+static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu)
+{
+	u64 reg = 0;
+
+	if ((vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E))
+		reg = vcpu_sys_reg(vcpu, PMOVSSET_EL0);
+		reg &= vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
+		reg &= vcpu_sys_reg(vcpu, PMINTENSET_EL1);
+		reg &= kvm_pmu_valid_counter_mask(vcpu);
+
+	return reg;
+}
+
+/**
+ * kvm_pmu_overflow_set - set PMU overflow interrupt
+ * @vcpu: The vcpu pointer
+ * @val: the value guest writes to PMOVSSET register
+ */
+void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val)
+{
+	u64 reg;
+
+	if (val == 0)
+		return;
+
+	vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= val;
+	reg = kvm_pmu_overflow_status(vcpu);
+	if (reg != 0)
+		kvm_vcpu_kick(vcpu);
+}
+
+static void kvm_pmu_update_state(struct kvm_vcpu *vcpu)
+{
+	struct kvm_pmu *pmu = &vcpu->arch.pmu;
+	bool overflow;
+
+	if (!kvm_arm_pmu_v3_ready(vcpu))
+		return;
+
+	overflow = !!kvm_pmu_overflow_status(vcpu);
+	if (pmu->irq_level != overflow) {
+		pmu->irq_level = overflow;
+		kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
+				    pmu->irq_num, overflow);
+	}
+}
+
+/**
+ * kvm_pmu_flush_hwstate - flush pmu state to cpu
+ * @vcpu: The vcpu pointer
+ *
+ * Check if the PMU has overflowed while we were running in the host, and inject
+ * an interrupt if that was the case.
+ */
+void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu)
+{
+	kvm_pmu_update_state(vcpu);
+}
+
+/**
+ * kvm_pmu_sync_hwstate - sync pmu state from cpu
+ * @vcpu: The vcpu pointer
+ *
+ * Check if the PMU has overflowed while we were running in the guest, and
+ * inject an interrupt if that was the case.
+ */
+void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu)
+{
+	kvm_pmu_update_state(vcpu);
+}
+
+static inline struct kvm_vcpu *kvm_pmc_to_vcpu(struct kvm_pmc *pmc)
+{
+	struct kvm_pmu *pmu;
+	struct kvm_vcpu_arch *vcpu_arch;
+
+	pmc -= pmc->idx;
+	pmu = container_of(pmc, struct kvm_pmu, pmc[0]);
+	vcpu_arch = container_of(pmu, struct kvm_vcpu_arch, pmu);
+	return container_of(vcpu_arch, struct kvm_vcpu, arch);
+}
+
+/**
+ * When perf event overflows, call kvm_pmu_overflow_set to set overflow status.
+ */
+static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
+				  struct perf_sample_data *data,
+				  struct pt_regs *regs)
+{
+	struct kvm_pmc *pmc = perf_event->overflow_handler_context;
+	struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
+	int idx = pmc->idx;
+
+	kvm_pmu_overflow_set(vcpu, BIT(idx));
+}
+
+/**
+ * kvm_pmu_software_increment - do software increment
+ * @vcpu: The vcpu pointer
+ * @val: the value guest writes to PMSWINC register
+ */
+void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
+{
+	int i;
+	u64 type, enable, reg;
+
+	if (val == 0)
+		return;
+
+	enable = vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
+	for (i = 0; i < ARMV8_PMU_CYCLE_IDX; i++) {
+		if (!(val & BIT(i)))
+			continue;
+		type = vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i)
+		       & ARMV8_PMU_EVTYPE_EVENT;
+		if ((type == ARMV8_PMU_EVTYPE_EVENT_SW_INCR)
+		    && (enable & BIT(i))) {
+			reg = vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1;
+			reg = lower_32_bits(reg);
+			vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) = reg;
+			if (!reg)
+				kvm_pmu_overflow_set(vcpu, BIT(i));
+		}
+	}
+}
+
+/**
+ * kvm_pmu_handle_pmcr - handle PMCR register
+ * @vcpu: The vcpu pointer
+ * @val: the value guest writes to PMCR register
+ */
+void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
+{
+	struct kvm_pmu *pmu = &vcpu->arch.pmu;
+	struct kvm_pmc *pmc;
+	u64 mask;
+	int i;
+
+	mask = kvm_pmu_valid_counter_mask(vcpu);
+	if (val & ARMV8_PMU_PMCR_E) {
+		kvm_pmu_enable_counter(vcpu,
+				vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask);
+	} else {
+		kvm_pmu_disable_counter(vcpu, mask);
+	}
+
+	if (val & ARMV8_PMU_PMCR_C)
+		kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0);
+
+	if (val & ARMV8_PMU_PMCR_P) {
+		for (i = 0; i < ARMV8_PMU_CYCLE_IDX; i++)
+			kvm_pmu_set_counter_value(vcpu, i, 0);
+	}
+
+	if (val & ARMV8_PMU_PMCR_LC) {
+		pmc = &pmu->pmc[ARMV8_PMU_CYCLE_IDX];
+		pmc->bitmask = 0xffffffffffffffffUL;
+	}
+}
+
+static bool kvm_pmu_counter_is_enabled(struct kvm_vcpu *vcpu, u64 select_idx)
+{
+	return (vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) &&
+	       (vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & BIT(select_idx));
+}
+
+/**
+ * kvm_pmu_set_counter_event_type - set selected counter to monitor some event
+ * @vcpu: The vcpu pointer
+ * @data: The data guest writes to PMXEVTYPER_EL0
+ * @select_idx: The number of selected counter
+ *
+ * When OS accesses PMXEVTYPER_EL0, that means it wants to set a PMC to count an
+ * event with given hardware event number. Here we call perf_event API to
+ * emulate this action and create a kernel perf event for it.
+ */
+void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
+				    u64 select_idx)
+{
+	struct kvm_pmu *pmu = &vcpu->arch.pmu;
+	struct kvm_pmc *pmc = &pmu->pmc[select_idx];
+	struct perf_event *event;
+	struct perf_event_attr attr;
+	u64 eventsel, counter;
+
+	kvm_pmu_stop_counter(vcpu, pmc);
+	eventsel = data & ARMV8_PMU_EVTYPE_EVENT;
+
+	/* Software increment event does't need to be backed by a perf event */
+	if (eventsel == ARMV8_PMU_EVTYPE_EVENT_SW_INCR)
+		return;
+
+	memset(&attr, 0, sizeof(struct perf_event_attr));
+	attr.type = PERF_TYPE_RAW;
+	attr.size = sizeof(attr);
+	attr.pinned = 1;
+	attr.disabled = !kvm_pmu_counter_is_enabled(vcpu, select_idx);
+	attr.exclude_user = data & ARMV8_PMU_EXCLUDE_EL0 ? 1 : 0;
+	attr.exclude_kernel = data & ARMV8_PMU_EXCLUDE_EL1 ? 1 : 0;
+	attr.exclude_hv = 1; /* Don't count EL2 events */
+	attr.exclude_host = 1; /* Don't count host events */
+	attr.config = eventsel;
+
+	counter = kvm_pmu_get_counter_value(vcpu, select_idx);
+	/* The initial sample period (overflow count) of an event. */
+	attr.sample_period = (-counter) & pmc->bitmask;
+
+	event = perf_event_create_kernel_counter(&attr, -1, current,
+						 kvm_pmu_perf_overflow, pmc);
+	if (IS_ERR(event)) {
+		pr_err_once("kvm: pmu event creation failed %ld\n",
+			    PTR_ERR(event));
+		return;
+	}
+
+	pmc->perf_event = event;
+}
+
+bool kvm_arm_support_pmu_v3(void)
+{
+	/*
+	 * Check if HW_PERF_EVENTS are supported by checking the number of
+	 * hardware performance counters. This could ensure the presence of
+	 * a physical PMU and CONFIG_PERF_EVENT is selected.
+	 */
+	return (perf_num_counters() > 0);
+}
+
+static int kvm_arm_pmu_v3_init(struct kvm_vcpu *vcpu)
+{
+	if (!kvm_arm_support_pmu_v3())
+		return -ENODEV;
+
+	if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features) ||
+	    !kvm_arm_pmu_irq_initialized(vcpu))
+		return -ENXIO;
+
+	if (kvm_arm_pmu_v3_ready(vcpu))
+		return -EBUSY;
+
+	kvm_pmu_vcpu_reset(vcpu);
+	vcpu->arch.pmu.ready = true;
+
+	return 0;
+}
+
+static bool irq_is_valid(struct kvm *kvm, int irq, bool is_ppi)
+{
+	int i;
+	struct kvm_vcpu *vcpu;
+
+	kvm_for_each_vcpu(i, vcpu, kvm) {
+		if (!kvm_arm_pmu_irq_initialized(vcpu))
+			continue;
+
+		if (is_ppi) {
+			if (vcpu->arch.pmu.irq_num != irq)
+				return false;
+		} else {
+			if (vcpu->arch.pmu.irq_num == irq)
+				return false;
+		}
+	}
+
+	return true;
+}
+
+
+int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
+{
+	switch (attr->attr) {
+	case KVM_ARM_VCPU_PMU_V3_IRQ: {
+		int __user *uaddr = (int __user *)(long)attr->addr;
+		int irq;
+
+		if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features))
+			return -ENODEV;
+
+		if (get_user(irq, uaddr))
+			return -EFAULT;
+
+		/*
+		 * The PMU overflow interrupt could be a PPI or SPI, but for one
+		 * VM the interrupt type must be same for each vcpu. As a PPI,
+		 * the interrupt number is the same for all vcpus, while as an
+		 * SPI it must be a separate number per vcpu.
+		 */
+		if (irq < VGIC_NR_SGIS || irq >= vcpu->kvm->arch.vgic.nr_irqs ||
+		    !irq_is_valid(vcpu->kvm, irq, irq < VGIC_NR_PRIVATE_IRQS))
+			return -EINVAL;
+
+		if (kvm_arm_pmu_irq_initialized(vcpu))
+			return -EBUSY;
+
+		kvm_debug("Set kvm ARM PMU irq: %d\n", irq);
+		vcpu->arch.pmu.irq_num = irq;
+		return 0;
+	}
+	case KVM_ARM_VCPU_PMU_V3_INIT:
+		return kvm_arm_pmu_v3_init(vcpu);
+	}
+
+	return -ENXIO;
+}
+
+int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
+{
+	switch (attr->attr) {
+	case KVM_ARM_VCPU_PMU_V3_IRQ: {
+		int __user *uaddr = (int __user *)(long)attr->addr;
+		int irq;
+
+		if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features))
+			return -ENODEV;
+
+		if (!kvm_arm_pmu_irq_initialized(vcpu))
+			return -ENXIO;
+
+		irq = vcpu->arch.pmu.irq_num;
+		return put_user(irq, uaddr);
+	}
+	}
+
+	return -ENXIO;
+}
+
+int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
+{
+	switch (attr->attr) {
+	case KVM_ARM_VCPU_PMU_V3_IRQ:
+	case KVM_ARM_VCPU_PMU_V3_INIT:
+		if (kvm_arm_support_pmu_v3() &&
+		    test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features))
+			return 0;
+	}
+
+	return -ENXIO;
+}
diff --git a/virt/kvm/arm/vgic-v2-emul.c b/virt/kvm/arm/vgic-v2-emul.c
index 1390797..1b0bee0 100644
--- a/virt/kvm/arm/vgic-v2-emul.c
+++ b/virt/kvm/arm/vgic-v2-emul.c
@@ -321,6 +321,11 @@
 
 static const struct vgic_io_range vgic_dist_ranges[] = {
 	{
+		.base		= GIC_DIST_SOFTINT,
+		.len		= 4,
+		.handle_mmio	= handle_mmio_sgi_reg,
+	},
+	{
 		.base		= GIC_DIST_CTRL,
 		.len		= 12,
 		.bits_per_irq	= 0,
@@ -387,11 +392,6 @@
 		.handle_mmio	= handle_mmio_cfg_reg,
 	},
 	{
-		.base		= GIC_DIST_SOFTINT,
-		.len		= 4,
-		.handle_mmio	= handle_mmio_sgi_reg,
-	},
-	{
 		.base		= GIC_DIST_SGI_PENDING_CLEAR,
 		.len		= VGIC_NR_SGIS,
 		.handle_mmio	= handle_mmio_sgi_clear,
diff --git a/virt/kvm/arm/vgic-v2.c b/virt/kvm/arm/vgic-v2.c
index ff02f08..67ec334 100644
--- a/virt/kvm/arm/vgic-v2.c
+++ b/virt/kvm/arm/vgic-v2.c
@@ -176,6 +176,15 @@
 
 static struct vgic_params vgic_v2_params;
 
+static void vgic_cpu_init_lrs(void *params)
+{
+	struct vgic_params *vgic = params;
+	int i;
+
+	for (i = 0; i < vgic->nr_lr; i++)
+		writel_relaxed(0, vgic->vctrl_base + GICH_LR0 + (i * 4));
+}
+
 /**
  * vgic_v2_probe - probe for a GICv2 compatible interrupt controller in DT
  * @node:	pointer to the DT node
@@ -257,6 +266,9 @@
 
 	vgic->type = VGIC_V2;
 	vgic->max_gic_vcpus = VGIC_V2_MAX_CPUS;
+
+	on_each_cpu(vgic_cpu_init_lrs, vgic, 1);
+
 	*ops = &vgic_v2_ops;
 	*params = vgic;
 	goto out;
diff --git a/virt/kvm/arm/vgic-v3.c b/virt/kvm/arm/vgic-v3.c
index 453eafd..999bdc6 100644
--- a/virt/kvm/arm/vgic-v3.c
+++ b/virt/kvm/arm/vgic-v3.c
@@ -42,7 +42,7 @@
 static struct vgic_lr vgic_v3_get_lr(const struct kvm_vcpu *vcpu, int lr)
 {
 	struct vgic_lr lr_desc;
-	u64 val = vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[VGIC_V3_LR_INDEX(lr)];
+	u64 val = vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr];
 
 	if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)
 		lr_desc.irq = val & ICH_LR_VIRTUALID_MASK;
@@ -106,7 +106,7 @@
 		lr_val |= ((u64)lr_desc.hwirq) << ICH_LR_PHYS_ID_SHIFT;
 	}
 
-	vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[VGIC_V3_LR_INDEX(lr)] = lr_val;
+	vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr] = lr_val;
 
 	if (!(lr_desc.state & LR_STATE_MASK))
 		vcpu->arch.vgic_cpu.vgic_v3.vgic_elrsr |= (1U << lr);
@@ -216,6 +216,11 @@
 
 static struct vgic_params vgic_v3_params;
 
+static void vgic_cpu_init_lrs(void *params)
+{
+	kvm_call_hyp(__vgic_v3_init_lrs);
+}
+
 /**
  * vgic_v3_probe - probe for a GICv3 compatible interrupt controller in DT
  * @node:	pointer to the DT node
@@ -284,6 +289,8 @@
 	kvm_info("%s@%llx IRQ%d\n", vgic_node->name,
 		 vcpu_res.start, vgic->maint_irq);
 
+	on_each_cpu(vgic_cpu_init_lrs, vgic, 1);
+
 	*ops = &vgic_v3_ops;
 	*params = vgic;
 
diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
index 65da997..f0d061f 100644
--- a/virt/kvm/async_pf.c
+++ b/virt/kvm/async_pf.c
@@ -109,8 +109,8 @@
 	/* cancel outstanding work queue item */
 	while (!list_empty(&vcpu->async_pf.queue)) {
 		struct kvm_async_pf *work =
-			list_entry(vcpu->async_pf.queue.next,
-				   typeof(*work), queue);
+			list_first_entry(&vcpu->async_pf.queue,
+					 typeof(*work), queue);
 		list_del(&work->queue);
 
 #ifdef CONFIG_KVM_ASYNC_PF_SYNC
@@ -127,8 +127,8 @@
 	spin_lock(&vcpu->async_pf.lock);
 	while (!list_empty(&vcpu->async_pf.done)) {
 		struct kvm_async_pf *work =
-			list_entry(vcpu->async_pf.done.next,
-				   typeof(*work), link);
+			list_first_entry(&vcpu->async_pf.done,
+					 typeof(*work), link);
 		list_del(&work->link);
 		kmem_cache_free(async_pf_cache, work);
 	}
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 5af50c3..7ba1d10 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -72,11 +72,11 @@
 
 /* Default doubles per-vcpu halt_poll_ns. */
 static unsigned int halt_poll_ns_grow = 2;
-module_param(halt_poll_ns_grow, int, S_IRUGO);
+module_param(halt_poll_ns_grow, uint, S_IRUGO | S_IWUSR);
 
 /* Default resets per-vcpu halt_poll_ns . */
 static unsigned int halt_poll_ns_shrink;
-module_param(halt_poll_ns_shrink, int, S_IRUGO);
+module_param(halt_poll_ns_shrink, uint, S_IRUGO | S_IWUSR);
 
 /*
  * Ordering of locks:
@@ -619,13 +619,10 @@
 
 static void kvm_destroy_devices(struct kvm *kvm)
 {
-	struct list_head *node, *tmp;
+	struct kvm_device *dev, *tmp;
 
-	list_for_each_safe(node, tmp, &kvm->devices) {
-		struct kvm_device *dev =
-			list_entry(node, struct kvm_device, vm_node);
-
-		list_del(node);
+	list_for_each_entry_safe(dev, tmp, &kvm->devices, vm_node) {
+		list_del(&dev->vm_node);
 		dev->ops->destroy(dev);
 	}
 }
@@ -1436,11 +1433,17 @@
 {
 	unsigned long addr = __gfn_to_hva_many(slot, gfn, NULL, write_fault);
 
-	if (addr == KVM_HVA_ERR_RO_BAD)
+	if (addr == KVM_HVA_ERR_RO_BAD) {
+		if (writable)
+			*writable = false;
 		return KVM_PFN_ERR_RO_FAULT;
+	}
 
-	if (kvm_is_error_hva(addr))
+	if (kvm_is_error_hva(addr)) {
+		if (writable)
+			*writable = false;
 		return KVM_PFN_NOSLOT;
+	}
 
 	/* Do not map writable pfn in the readonly memslot. */
 	if (writable && memslot_is_readonly(slot)) {
@@ -1942,14 +1945,15 @@
 
 static void grow_halt_poll_ns(struct kvm_vcpu *vcpu)
 {
-	int old, val;
+	unsigned int old, val, grow;
 
 	old = val = vcpu->halt_poll_ns;
+	grow = READ_ONCE(halt_poll_ns_grow);
 	/* 10us base */
-	if (val == 0 && halt_poll_ns_grow)
+	if (val == 0 && grow)
 		val = 10000;
 	else
-		val *= halt_poll_ns_grow;
+		val *= grow;
 
 	if (val > halt_poll_ns)
 		val = halt_poll_ns;
@@ -1960,13 +1964,14 @@
 
 static void shrink_halt_poll_ns(struct kvm_vcpu *vcpu)
 {
-	int old, val;
+	unsigned int old, val, shrink;
 
 	old = val = vcpu->halt_poll_ns;
-	if (halt_poll_ns_shrink == 0)
+	shrink = READ_ONCE(halt_poll_ns_shrink);
+	if (shrink == 0)
 		val = 0;
 	else
-		val /= halt_poll_ns_shrink;
+		val /= shrink;
 
 	vcpu->halt_poll_ns = val;
 	trace_kvm_halt_poll_ns_shrink(vcpu->vcpu_id, val, old);