Merge branches 'cleanup', 'fixes', 'misc', 'omap-barrier' and 'uaccess' into for-linus
diff --git a/Documentation/arm/sunxi/README b/Documentation/arm/sunxi/README
index 1fe2d7f..5e38e158 100644
--- a/Documentation/arm/sunxi/README
+++ b/Documentation/arm/sunxi/README
@@ -36,7 +36,7 @@
+ User Manual
http://dl.linux-sunxi.org/A20/A20%20User%20Manual%202013-03-22.pdf
- - Allwinner A23
+ - Allwinner A23 (sun8i)
+ Datasheet
http://dl.linux-sunxi.org/A23/A23%20Datasheet%20V1.0%2020130830.pdf
+ User Manual
@@ -55,7 +55,23 @@
+ User Manual
http://dl.linux-sunxi.org/A31/A3x_release_document/A31s/IC/A31s%20User%20Manual%20%20V1.0%2020130322.pdf
+ - Allwinner A33 (sun8i)
+ + Datasheet
+ http://dl.linux-sunxi.org/A33/A33%20Datasheet%20release%201.1.pdf
+ + User Manual
+ http://dl.linux-sunxi.org/A33/A33%20user%20manual%20release%201.1.pdf
+
+ - Allwinner H3 (sun8i)
+ + Datasheet
+ http://dl.linux-sunxi.org/H3/Allwinner_H3_Datasheet_V1.0.pdf
+
* Quad ARM Cortex-A15, Quad ARM Cortex-A7 based SoCs
- Allwinner A80
+ Datasheet
http://dl.linux-sunxi.org/A80/A80_Datasheet_Revision_1.0_0404.pdf
+
+ * Octa ARM Cortex-A7 based SoCs
+ - Allwinner A83T
+ + Not Supported
+ + Datasheet
+ http://dl.linux-sunxi.org/A83T/A83T_datasheet_Revision_1.1.pdf
diff --git a/Documentation/devicetree/bindings/arm/l2cc.txt b/Documentation/devicetree/bindings/arm/l2cc.txt
index 2251dcc..06c88a4 100644
--- a/Documentation/devicetree/bindings/arm/l2cc.txt
+++ b/Documentation/devicetree/bindings/arm/l2cc.txt
@@ -67,6 +67,12 @@
disable if zero.
- arm,prefetch-offset : Override prefetch offset value. Valid values are
0-7, 15, 23, and 31.
+- arm,shared-override : The default behavior of the pl310 cache controller with
+ respect to the shareable attribute is to transform "normal memory
+ non-cacheable transactions" into "cacheable no allocate" (for reads) or
+ "write through no write allocate" (for writes).
+ On systems where this may cause DMA buffer corruption, this property must be
+ specified to indicate that such transforms are precluded.
- prefetch-data : Data prefetch. Value: <0> (forcibly disable), <1>
(forcibly enable), property absent (retain settings set by firmware)
- prefetch-instr : Instruction prefetch. Value: <0> (forcibly disable),
diff --git a/Documentation/devicetree/bindings/arm/sunxi.txt b/Documentation/devicetree/bindings/arm/sunxi.txt
index 42941fd..67da205 100644
--- a/Documentation/devicetree/bindings/arm/sunxi.txt
+++ b/Documentation/devicetree/bindings/arm/sunxi.txt
@@ -9,4 +9,6 @@
allwinner,sun6i-a31
allwinner,sun7i-a20
allwinner,sun8i-a23
+ allwinner,sun8i-a33
+ allwinner,sun8i-h3
allwinner,sun9i-a80
diff --git a/Documentation/devicetree/bindings/memory-controllers/ti/emif.txt b/Documentation/devicetree/bindings/memory-controllers/ti/emif.txt
index 938f8e1..0db6047 100644
--- a/Documentation/devicetree/bindings/memory-controllers/ti/emif.txt
+++ b/Documentation/devicetree/bindings/memory-controllers/ti/emif.txt
@@ -8,6 +8,7 @@
Required properties:
- compatible : Should be of the form "ti,emif-<ip-rev>" where <ip-rev>
is the IP revision of the specific EMIF instance.
+ For am437x should be ti,emif-am4372.
- phy-type : <u32> indicating the DDR phy type. Following are the
allowed values
diff --git a/Documentation/power/swsusp.txt b/Documentation/power/swsusp.txt
index f732a83..8cc17ca 100644
--- a/Documentation/power/swsusp.txt
+++ b/Documentation/power/swsusp.txt
@@ -410,8 +410,17 @@
Q: Can I suspend-to-disk using a swap partition under LVM?
-A: No. You can suspend successfully, but you'll not be able to
-resume. uswsusp should be able to work with LVM. See suspend.sf.net.
+A: Yes and No. You can suspend successfully, but the kernel will not be able
+to resume on its own. You need an initramfs that can recognize the resume
+situation, activate the logical volume containing the swap volume (but not
+touch any filesystems!), and eventually call
+
+echo -n "$major:$minor" > /sys/power/resume
+
+where $major and $minor are the respective major and minor device numbers of
+the swap volume.
+
+uswsusp works with LVM, too. See http://suspend.sourceforge.net/
Q: I upgraded the kernel from 2.6.15 to 2.6.16. Both kernels were
compiled with the similar configuration files. Anyway I found that
diff --git a/MAINTAINERS b/MAINTAINERS
index 8133cef..fd60784 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1614,6 +1614,7 @@
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: arch/arm/boot/dts/vexpress*
+F: arch/arm64/boot/dts/arm/vexpress*
F: arch/arm/mach-vexpress/
F: */*/vexpress*
F: */*/*/vexpress*
@@ -2562,19 +2563,31 @@
F: arch/powerpc/oprofile/*cell*
F: arch/powerpc/platforms/cell/
-CEPH DISTRIBUTED FILE SYSTEM CLIENT
+CEPH COMMON CODE (LIBCEPH)
+M: Ilya Dryomov <idryomov@gmail.com>
M: "Yan, Zheng" <zyan@redhat.com>
M: Sage Weil <sage@redhat.com>
L: ceph-devel@vger.kernel.org
W: http://ceph.com/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client.git
+T: git git://github.com/ceph/ceph-client.git
S: Supported
-F: Documentation/filesystems/ceph.txt
-F: fs/ceph/
F: net/ceph/
F: include/linux/ceph/
F: include/linux/crush/
+CEPH DISTRIBUTED FILE SYSTEM CLIENT (CEPH)
+M: "Yan, Zheng" <zyan@redhat.com>
+M: Sage Weil <sage@redhat.com>
+M: Ilya Dryomov <idryomov@gmail.com>
+L: ceph-devel@vger.kernel.org
+W: http://ceph.com/
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client.git
+T: git git://github.com/ceph/ceph-client.git
+S: Supported
+F: Documentation/filesystems/ceph.txt
+F: fs/ceph/
+
CERTIFIED WIRELESS USB (WUSB) SUBSYSTEM:
L: linux-usb@vger.kernel.org
S: Orphan
@@ -6147,6 +6160,7 @@
Q: https://patchwork.kernel.org/project/linux-nvdimm/list/
S: Supported
F: drivers/nvdimm/pmem.c
+F: include/linux/pmem.h
LINUX FOR IBM pSERIES (RS/6000)
M: Paul Mackerras <paulus@au.ibm.com>
@@ -6161,7 +6175,7 @@
W: http://www.penguinppc.org/
L: linuxppc-dev@lists.ozlabs.org
Q: http://patchwork.ozlabs.org/project/linuxppc-dev/list/
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux.git
S: Supported
F: Documentation/powerpc/
F: arch/powerpc/
@@ -8366,10 +8380,12 @@
M: Ilya Dryomov <idryomov@gmail.com>
M: Sage Weil <sage@redhat.com>
M: Alex Elder <elder@kernel.org>
-M: ceph-devel@vger.kernel.org
+L: ceph-devel@vger.kernel.org
W: http://ceph.com/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client.git
+T: git git://github.com/ceph/ceph-client.git
S: Supported
+F: Documentation/ABI/testing/sysfs-bus-rbd
F: drivers/block/rbd.c
F: drivers/block/rbd_types.h
diff --git a/Makefile b/Makefile
index 13270c0..257ef58 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 4
PATCHLEVEL = 2
SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc2
NAME = Hurr durr I'ma sheep
# *DOCUMENTATION*
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index a750c14..3458828 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -188,6 +188,9 @@
config ARCH_HAS_BANDGAP
bool
+config FIX_EARLYCON_MEM
+ def_bool y if MMU
+
config GENERIC_HWEIGHT
bool
default y
@@ -1693,6 +1696,27 @@
config HIGHPTE
bool "Allocate 2nd-level pagetables from highmem"
depends on HIGHMEM
+ help
+ The VM uses one page of physical memory for each page table.
+ For systems with a lot of processes, this can use a lot of
+ precious low memory, eventually leading to low memory being
+ consumed by page tables. Setting this option will allow
+ user-space 2nd level page tables to reside in high memory.
+
+config CPU_SW_DOMAIN_PAN
+ bool "Enable use of CPU domains to implement privileged no-access"
+ depends on MMU && !ARM_LPAE
+ default y
+ help
+ Increase kernel security by ensuring that normal kernel accesses
+ are unable to access userspace addresses. This can help prevent
+ use-after-free bugs becoming an exploitable privilege escalation
+ by ensuring that magic values (such as LIST_POISON) will always
+ fault when dereferenced.
+
+ CPUs with low-vector mappings use a best-efforts implementation.
+ Their lower 1MB needs to remain accessible for the vectors, but
+ the remainder of userspace will become appropriately inaccessible.
config HW_PERF_EVENTS
bool "Enable hardware performance counter support for perf events"
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index f1b1579..a2e16f9 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -1635,7 +1635,7 @@
config DEBUG_SET_MODULE_RONX
bool "Set loadable kernel module data as NX and text as RO"
- depends on MODULES
+ depends on MODULES && MMU
---help---
This option helps catch unintended modifications to loadable
kernel module's text and read-only data. It also prevents execution
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 07ab3d2..7451b44 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -312,6 +312,9 @@
PHONY += bzImage $(BOOT_TARGETS) $(INSTALL_TARGETS)
+bootpImage uImage: zImage
+zImage: Image
+
$(BOOT_TARGETS): vmlinux
$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
diff --git a/arch/arm/boot/dts/am335x-boneblack.dts b/arch/arm/boot/dts/am335x-boneblack.dts
index 901739f..5c42d25 100644
--- a/arch/arm/boot/dts/am335x-boneblack.dts
+++ b/arch/arm/boot/dts/am335x-boneblack.dts
@@ -80,3 +80,7 @@
status = "okay";
};
};
+
+&rtc {
+ system-power-controller;
+};
diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi
index c80a3e2..ade28c79 100644
--- a/arch/arm/boot/dts/am4372.dtsi
+++ b/arch/arm/boot/dts/am4372.dtsi
@@ -132,6 +132,12 @@
};
};
+ emif: emif@4c000000 {
+ compatible = "ti,emif-am4372";
+ reg = <0x4c000000 0x1000000>;
+ ti,hwmods = "emif";
+ };
+
edma: edma@49000000 {
compatible = "ti,edma3";
ti,hwmods = "tpcc", "tptc0", "tptc1", "tptc2";
@@ -941,6 +947,7 @@
ti,hwmods = "dss_rfbi";
clocks = <&disp_clk>;
clock-names = "fck";
+ status = "disabled";
};
};
diff --git a/arch/arm/boot/dts/am57xx-beagle-x15.dts b/arch/arm/boot/dts/am57xx-beagle-x15.dts
index a42cc37..a63bf78 100644
--- a/arch/arm/boot/dts/am57xx-beagle-x15.dts
+++ b/arch/arm/boot/dts/am57xx-beagle-x15.dts
@@ -605,6 +605,10 @@
phy-supply = <&ldousb_reg>;
};
+&usb2_phy2 {
+ phy-supply = <&ldousb_reg>;
+};
+
&usb1 {
dr_mode = "host";
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/atlas7.dtsi b/arch/arm/boot/dts/atlas7.dtsi
index 5dfd3a4..3e21311 100644
--- a/arch/arm/boot/dts/atlas7.dtsi
+++ b/arch/arm/boot/dts/atlas7.dtsi
@@ -135,6 +135,1025 @@
compatible = "sirf,atlas7-ioc";
reg = <0x18880000 0x1000>,
<0x10E40000 0x1000>;
+
+ audio_ac97_pmx: audio_ac97@0 {
+ audio_ac97 {
+ groups = "audio_ac97_grp";
+ function = "audio_ac97";
+ };
+ };
+
+ audio_func_dbg_pmx: audio_func_dbg@0 {
+ audio_func_dbg {
+ groups = "audio_func_dbg_grp";
+ function = "audio_func_dbg";
+ };
+ };
+
+ audio_i2s_pmx: audio_i2s@0 {
+ audio_i2s {
+ groups = "audio_i2s_grp";
+ function = "audio_i2s";
+ };
+ };
+
+ audio_i2s_2ch_pmx: audio_i2s_2ch@0 {
+ audio_i2s_2ch {
+ groups = "audio_i2s_2ch_grp";
+ function = "audio_i2s_2ch";
+ };
+ };
+
+ audio_i2s_extclk_pmx: audio_i2s_extclk@0 {
+ audio_i2s_extclk {
+ groups = "audio_i2s_extclk_grp";
+ function = "audio_i2s_extclk";
+ };
+ };
+
+ audio_uart0_pmx: audio_uart0@0 {
+ audio_uart0 {
+ groups = "audio_uart0_grp";
+ function = "audio_uart0";
+ };
+ };
+
+ audio_uart1_pmx: audio_uart1@0 {
+ audio_uart1 {
+ groups = "audio_uart1_grp";
+ function = "audio_uart1";
+ };
+ };
+
+ audio_uart2_pmx0: audio_uart2@0 {
+ audio_uart2_0 {
+ groups = "audio_uart2_grp0";
+ function = "audio_uart2_m0";
+ };
+ };
+
+ audio_uart2_pmx1: audio_uart2@1 {
+ audio_uart2_1 {
+ groups = "audio_uart2_grp1";
+ function = "audio_uart2_m1";
+ };
+ };
+
+ c_can_trnsvr_pmx: c_can_trnsvr@0 {
+ c_can_trnsvr {
+ groups = "c_can_trnsvr_grp";
+ function = "c_can_trnsvr";
+ };
+ };
+
+ c0_can_pmx0: c0_can@0 {
+ c0_can_0 {
+ groups = "c0_can_grp0";
+ function = "c0_can_m0";
+ };
+ };
+
+ c0_can_pmx1: c0_can@1 {
+ c0_can_1 {
+ groups = "c0_can_grp1";
+ function = "c0_can_m1";
+ };
+ };
+
+ c1_can_pmx0: c1_can@0 {
+ c1_can_0 {
+ groups = "c1_can_grp0";
+ function = "c1_can_m0";
+ };
+ };
+
+ c1_can_pmx1: c1_can@1 {
+ c1_can_1 {
+ groups = "c1_can_grp1";
+ function = "c1_can_m1";
+ };
+ };
+
+ c1_can_pmx2: c1_can@2 {
+ c1_can_2 {
+ groups = "c1_can_grp2";
+ function = "c1_can_m2";
+ };
+ };
+
+ ca_audio_lpc_pmx: ca_audio_lpc@0 {
+ ca_audio_lpc {
+ groups = "ca_audio_lpc_grp";
+ function = "ca_audio_lpc";
+ };
+ };
+
+ ca_bt_lpc_pmx: ca_bt_lpc@0 {
+ ca_bt_lpc {
+ groups = "ca_bt_lpc_grp";
+ function = "ca_bt_lpc";
+ };
+ };
+
+ ca_coex_pmx: ca_coex@0 {
+ ca_coex {
+ groups = "ca_coex_grp";
+ function = "ca_coex";
+ };
+ };
+
+ ca_curator_lpc_pmx: ca_curator_lpc@0 {
+ ca_curator_lpc {
+ groups = "ca_curator_lpc_grp";
+ function = "ca_curator_lpc";
+ };
+ };
+
+ ca_pcm_debug_pmx: ca_pcm_debug@0 {
+ ca_pcm_debug {
+ groups = "ca_pcm_debug_grp";
+ function = "ca_pcm_debug";
+ };
+ };
+
+ ca_pio_pmx: ca_pio@0 {
+ ca_pio {
+ groups = "ca_pio_grp";
+ function = "ca_pio";
+ };
+ };
+
+ ca_sdio_debug_pmx: ca_sdio_debug@0 {
+ ca_sdio_debug {
+ groups = "ca_sdio_debug_grp";
+ function = "ca_sdio_debug";
+ };
+ };
+
+ ca_spi_pmx: ca_spi@0 {
+ ca_spi {
+ groups = "ca_spi_grp";
+ function = "ca_spi";
+ };
+ };
+
+ ca_trb_pmx: ca_trb@0 {
+ ca_trb {
+ groups = "ca_trb_grp";
+ function = "ca_trb";
+ };
+ };
+
+ ca_uart_debug_pmx: ca_uart_debug@0 {
+ ca_uart_debug {
+ groups = "ca_uart_debug_grp";
+ function = "ca_uart_debug";
+ };
+ };
+
+ clkc_pmx0: clkc@0 {
+ clkc_0 {
+ groups = "clkc_grp0";
+ function = "clkc_m0";
+ };
+ };
+
+ clkc_pmx1: clkc@1 {
+ clkc_1 {
+ groups = "clkc_grp1";
+ function = "clkc_m1";
+ };
+ };
+
+ gn_gnss_i2c_pmx: gn_gnss_i2c@0 {
+ gn_gnss_i2c {
+ groups = "gn_gnss_i2c_grp";
+ function = "gn_gnss_i2c";
+ };
+ };
+
+ gn_gnss_uart_nopause_pmx: gn_gnss_uart_nopause@0 {
+ gn_gnss_uart_nopause {
+ groups = "gn_gnss_uart_nopause_grp";
+ function = "gn_gnss_uart_nopause";
+ };
+ };
+
+ gn_gnss_uart_pmx: gn_gnss_uart@0 {
+ gn_gnss_uart {
+ groups = "gn_gnss_uart_grp";
+ function = "gn_gnss_uart";
+ };
+ };
+
+ gn_trg_spi_pmx0: gn_trg_spi@0 {
+ gn_trg_spi_0 {
+ groups = "gn_trg_spi_grp0";
+ function = "gn_trg_spi_m0";
+ };
+ };
+
+ gn_trg_spi_pmx1: gn_trg_spi@1 {
+ gn_trg_spi_1 {
+ groups = "gn_trg_spi_grp1";
+ function = "gn_trg_spi_m1";
+ };
+ };
+
+ cvbs_dbg_pmx: cvbs_dbg@0 {
+ cvbs_dbg {
+ groups = "cvbs_dbg_grp";
+ function = "cvbs_dbg";
+ };
+ };
+
+ cvbs_dbg_test_pmx0: cvbs_dbg_test@0 {
+ cvbs_dbg_test_0 {
+ groups = "cvbs_dbg_test_grp0";
+ function = "cvbs_dbg_test_m0";
+ };
+ };
+
+ cvbs_dbg_test_pmx1: cvbs_dbg_test@1 {
+ cvbs_dbg_test_1 {
+ groups = "cvbs_dbg_test_grp1";
+ function = "cvbs_dbg_test_m1";
+ };
+ };
+
+ cvbs_dbg_test_pmx2: cvbs_dbg_test@2 {
+ cvbs_dbg_test_2 {
+ groups = "cvbs_dbg_test_grp2";
+ function = "cvbs_dbg_test_m2";
+ };
+ };
+
+ cvbs_dbg_test_pmx3: cvbs_dbg_test@3 {
+ cvbs_dbg_test_3 {
+ groups = "cvbs_dbg_test_grp3";
+ function = "cvbs_dbg_test_m3";
+ };
+ };
+
+ cvbs_dbg_test_pmx4: cvbs_dbg_test@4 {
+ cvbs_dbg_test_4 {
+ groups = "cvbs_dbg_test_grp4";
+ function = "cvbs_dbg_test_m4";
+ };
+ };
+
+ cvbs_dbg_test_pmx5: cvbs_dbg_test@5 {
+ cvbs_dbg_test_5 {
+ groups = "cvbs_dbg_test_grp5";
+ function = "cvbs_dbg_test_m5";
+ };
+ };
+
+ cvbs_dbg_test_pmx6: cvbs_dbg_test@6 {
+ cvbs_dbg_test_6 {
+ groups = "cvbs_dbg_test_grp6";
+ function = "cvbs_dbg_test_m6";
+ };
+ };
+
+ cvbs_dbg_test_pmx7: cvbs_dbg_test@7 {
+ cvbs_dbg_test_7 {
+ groups = "cvbs_dbg_test_grp7";
+ function = "cvbs_dbg_test_m7";
+ };
+ };
+
+ cvbs_dbg_test_pmx8: cvbs_dbg_test@8 {
+ cvbs_dbg_test_8 {
+ groups = "cvbs_dbg_test_grp8";
+ function = "cvbs_dbg_test_m8";
+ };
+ };
+
+ cvbs_dbg_test_pmx9: cvbs_dbg_test@9 {
+ cvbs_dbg_test_9 {
+ groups = "cvbs_dbg_test_grp9";
+ function = "cvbs_dbg_test_m9";
+ };
+ };
+
+ cvbs_dbg_test_pmx10: cvbs_dbg_test@10 {
+ cvbs_dbg_test_10 {
+ groups = "cvbs_dbg_test_grp10";
+ function = "cvbs_dbg_test_m10";
+ };
+ };
+
+ cvbs_dbg_test_pmx11: cvbs_dbg_test@11 {
+ cvbs_dbg_test_11 {
+ groups = "cvbs_dbg_test_grp11";
+ function = "cvbs_dbg_test_m11";
+ };
+ };
+
+ cvbs_dbg_test_pmx12: cvbs_dbg_test@12 {
+ cvbs_dbg_test_12 {
+ groups = "cvbs_dbg_test_grp12";
+ function = "cvbs_dbg_test_m12";
+ };
+ };
+
+ cvbs_dbg_test_pmx13: cvbs_dbg_test@13 {
+ cvbs_dbg_test_13 {
+ groups = "cvbs_dbg_test_grp13";
+ function = "cvbs_dbg_test_m13";
+ };
+ };
+
+ cvbs_dbg_test_pmx14: cvbs_dbg_test@14 {
+ cvbs_dbg_test_14 {
+ groups = "cvbs_dbg_test_grp14";
+ function = "cvbs_dbg_test_m14";
+ };
+ };
+
+ cvbs_dbg_test_pmx15: cvbs_dbg_test@15 {
+ cvbs_dbg_test_15 {
+ groups = "cvbs_dbg_test_grp15";
+ function = "cvbs_dbg_test_m15";
+ };
+ };
+
+ gn_gnss_power_pmx: gn_gnss_power@0 {
+ gn_gnss_power {
+ groups = "gn_gnss_power_grp";
+ function = "gn_gnss_power";
+ };
+ };
+
+ gn_gnss_sw_status_pmx: gn_gnss_sw_status@0 {
+ gn_gnss_sw_status {
+ groups = "gn_gnss_sw_status_grp";
+ function = "gn_gnss_sw_status";
+ };
+ };
+
+ gn_gnss_eclk_pmx: gn_gnss_eclk@0 {
+ gn_gnss_eclk {
+ groups = "gn_gnss_eclk_grp";
+ function = "gn_gnss_eclk";
+ };
+ };
+
+ gn_gnss_irq1_pmx0: gn_gnss_irq1@0 {
+ gn_gnss_irq1_0 {
+ groups = "gn_gnss_irq1_grp0";
+ function = "gn_gnss_irq1_m0";
+ };
+ };
+
+ gn_gnss_irq2_pmx0: gn_gnss_irq2@0 {
+ gn_gnss_irq2_0 {
+ groups = "gn_gnss_irq2_grp0";
+ function = "gn_gnss_irq2_m0";
+ };
+ };
+
+ gn_gnss_tm_pmx: gn_gnss_tm@0 {
+ gn_gnss_tm {
+ groups = "gn_gnss_tm_grp";
+ function = "gn_gnss_tm";
+ };
+ };
+
+ gn_gnss_tsync_pmx: gn_gnss_tsync@0 {
+ gn_gnss_tsync {
+ groups = "gn_gnss_tsync_grp";
+ function = "gn_gnss_tsync";
+ };
+ };
+
+ gn_io_gnsssys_sw_cfg_pmx: gn_io_gnsssys_sw_cfg@0 {
+ gn_io_gnsssys_sw_cfg {
+ groups = "gn_io_gnsssys_sw_cfg_grp";
+ function = "gn_io_gnsssys_sw_cfg";
+ };
+ };
+
+ gn_trg_pmx0: gn_trg@0 {
+ gn_trg_0 {
+ groups = "gn_trg_grp0";
+ function = "gn_trg_m0";
+ };
+ };
+
+ gn_trg_pmx1: gn_trg@1 {
+ gn_trg_1 {
+ groups = "gn_trg_grp1";
+ function = "gn_trg_m1";
+ };
+ };
+
+ gn_trg_shutdown_pmx0: gn_trg_shutdown@0 {
+ gn_trg_shutdown_0 {
+ groups = "gn_trg_shutdown_grp0";
+ function = "gn_trg_shutdown_m0";
+ };
+ };
+
+ gn_trg_shutdown_pmx1: gn_trg_shutdown@1 {
+ gn_trg_shutdown_1 {
+ groups = "gn_trg_shutdown_grp1";
+ function = "gn_trg_shutdown_m1";
+ };
+ };
+
+ gn_trg_shutdown_pmx2: gn_trg_shutdown@2 {
+ gn_trg_shutdown_2 {
+ groups = "gn_trg_shutdown_grp2";
+ function = "gn_trg_shutdown_m2";
+ };
+ };
+
+ gn_trg_shutdown_pmx3: gn_trg_shutdown@3 {
+ gn_trg_shutdown_3 {
+ groups = "gn_trg_shutdown_grp3";
+ function = "gn_trg_shutdown_m3";
+ };
+ };
+
+ i2c0_pmx: i2c0@0 {
+ i2c0 {
+ groups = "i2c0_grp";
+ function = "i2c0";
+ };
+ };
+
+ i2c1_pmx: i2c1@0 {
+ i2c1 {
+ groups = "i2c1_grp";
+ function = "i2c1";
+ };
+ };
+
+ jtag_pmx0: jtag@0 {
+ jtag_0 {
+ groups = "jtag_grp0";
+ function = "jtag_m0";
+ };
+ };
+
+ ks_kas_spi_pmx0: ks_kas_spi@0 {
+ ks_kas_spi_0 {
+ groups = "ks_kas_spi_grp0";
+ function = "ks_kas_spi_m0";
+ };
+ };
+
+ ld_ldd_pmx: ld_ldd@0 {
+ ld_ldd {
+ groups = "ld_ldd_grp";
+ function = "ld_ldd";
+ };
+ };
+
+ ld_ldd_16bit_pmx: ld_ldd_16bit@0 {
+ ld_ldd_16bit {
+ groups = "ld_ldd_16bit_grp";
+ function = "ld_ldd_16bit";
+ };
+ };
+
+ ld_ldd_fck_pmx: ld_ldd_fck@0 {
+ ld_ldd_fck {
+ groups = "ld_ldd_fck_grp";
+ function = "ld_ldd_fck";
+ };
+ };
+
+ ld_ldd_lck_pmx: ld_ldd_lck@0 {
+ ld_ldd_lck {
+ groups = "ld_ldd_lck_grp";
+ function = "ld_ldd_lck";
+ };
+ };
+
+ lr_lcdrom_pmx: lr_lcdrom@0 {
+ lr_lcdrom {
+ groups = "lr_lcdrom_grp";
+ function = "lr_lcdrom";
+ };
+ };
+
+ lvds_analog_pmx: lvds_analog@0 {
+ lvds_analog {
+ groups = "lvds_analog_grp";
+ function = "lvds_analog";
+ };
+ };
+
+ nd_df_pmx: nd_df@0 {
+ nd_df {
+ groups = "nd_df_grp";
+ function = "nd_df";
+ };
+ };
+
+ nd_df_nowp_pmx: nd_df_nowp@0 {
+ nd_df_nowp {
+ groups = "nd_df_nowp_grp";
+ function = "nd_df_nowp";
+ };
+ };
+
+ ps_pmx: ps@0 {
+ ps {
+ groups = "ps_grp";
+ function = "ps";
+ };
+ };
+
+ pwc_core_on_pmx: pwc_core_on@0 {
+ pwc_core_on {
+ groups = "pwc_core_on_grp";
+ function = "pwc_core_on";
+ };
+ };
+
+ pwc_ext_on_pmx: pwc_ext_on@0 {
+ pwc_ext_on {
+ groups = "pwc_ext_on_grp";
+ function = "pwc_ext_on";
+ };
+ };
+
+ pwc_gpio3_clk_pmx: pwc_gpio3_clk@0 {
+ pwc_gpio3_clk {
+ groups = "pwc_gpio3_clk_grp";
+ function = "pwc_gpio3_clk";
+ };
+ };
+
+ pwc_io_on_pmx: pwc_io_on@0 {
+ pwc_io_on {
+ groups = "pwc_io_on_grp";
+ function = "pwc_io_on";
+ };
+ };
+
+ pwc_lowbatt_b_pmx0: pwc_lowbatt_b@0 {
+ pwc_lowbatt_b_0 {
+ groups = "pwc_lowbatt_b_grp0";
+ function = "pwc_lowbatt_b_m0";
+ };
+ };
+
+ pwc_mem_on_pmx: pwc_mem_on@0 {
+ pwc_mem_on {
+ groups = "pwc_mem_on_grp";
+ function = "pwc_mem_on";
+ };
+ };
+
+ pwc_on_key_b_pmx0: pwc_on_key_b@0 {
+ pwc_on_key_b_0 {
+ groups = "pwc_on_key_b_grp0";
+ function = "pwc_on_key_b_m0";
+ };
+ };
+
+ pwc_wakeup_src0_pmx: pwc_wakeup_src0@0 {
+ pwc_wakeup_src0 {
+ groups = "pwc_wakeup_src0_grp";
+ function = "pwc_wakeup_src0";
+ };
+ };
+
+ pwc_wakeup_src1_pmx: pwc_wakeup_src1@0 {
+ pwc_wakeup_src1 {
+ groups = "pwc_wakeup_src1_grp";
+ function = "pwc_wakeup_src1";
+ };
+ };
+
+ pwc_wakeup_src2_pmx: pwc_wakeup_src2@0 {
+ pwc_wakeup_src2 {
+ groups = "pwc_wakeup_src2_grp";
+ function = "pwc_wakeup_src2";
+ };
+ };
+
+ pwc_wakeup_src3_pmx: pwc_wakeup_src3@0 {
+ pwc_wakeup_src3 {
+ groups = "pwc_wakeup_src3_grp";
+ function = "pwc_wakeup_src3";
+ };
+ };
+
+ pw_cko0_pmx0: pw_cko0@0 {
+ pw_cko0_0 {
+ groups = "pw_cko0_grp0";
+ function = "pw_cko0_m0";
+ };
+ };
+
+ pw_cko0_pmx1: pw_cko0@1 {
+ pw_cko0_1 {
+ groups = "pw_cko0_grp1";
+ function = "pw_cko0_m1";
+ };
+ };
+
+ pw_cko0_pmx2: pw_cko0@2 {
+ pw_cko0_2 {
+ groups = "pw_cko0_grp2";
+ function = "pw_cko0_m2";
+ };
+ };
+
+ pw_cko1_pmx0: pw_cko1@0 {
+ pw_cko1_0 {
+ groups = "pw_cko1_grp0";
+ function = "pw_cko1_m0";
+ };
+ };
+
+ pw_cko1_pmx1: pw_cko1@1 {
+ pw_cko1_1 {
+ groups = "pw_cko1_grp1";
+ function = "pw_cko1_m1";
+ };
+ };
+
+ pw_i2s01_clk_pmx0: pw_i2s01_clk@0 {
+ pw_i2s01_clk_0 {
+ groups = "pw_i2s01_clk_grp0";
+ function = "pw_i2s01_clk_m0";
+ };
+ };
+
+ pw_i2s01_clk_pmx1: pw_i2s01_clk@1 {
+ pw_i2s01_clk_1 {
+ groups = "pw_i2s01_clk_grp1";
+ function = "pw_i2s01_clk_m1";
+ };
+ };
+
+ pw_pwm0_pmx: pw_pwm0@0 {
+ pw_pwm0 {
+ groups = "pw_pwm0_grp";
+ function = "pw_pwm0";
+ };
+ };
+
+ pw_pwm1_pmx: pw_pwm1@0 {
+ pw_pwm1 {
+ groups = "pw_pwm1_grp";
+ function = "pw_pwm1";
+ };
+ };
+
+ pw_pwm2_pmx0: pw_pwm2@0 {
+ pw_pwm2_0 {
+ groups = "pw_pwm2_grp0";
+ function = "pw_pwm2_m0";
+ };
+ };
+
+ pw_pwm2_pmx1: pw_pwm2@1 {
+ pw_pwm2_1 {
+ groups = "pw_pwm2_grp1";
+ function = "pw_pwm2_m1";
+ };
+ };
+
+ pw_pwm3_pmx0: pw_pwm3@0 {
+ pw_pwm3_0 {
+ groups = "pw_pwm3_grp0";
+ function = "pw_pwm3_m0";
+ };
+ };
+
+ pw_pwm3_pmx1: pw_pwm3@1 {
+ pw_pwm3_1 {
+ groups = "pw_pwm3_grp1";
+ function = "pw_pwm3_m1";
+ };
+ };
+
+ pw_pwm_cpu_vol_pmx0: pw_pwm_cpu_vol@0 {
+ pw_pwm_cpu_vol_0 {
+ groups = "pw_pwm_cpu_vol_grp0";
+ function = "pw_pwm_cpu_vol_m0";
+ };
+ };
+
+ pw_pwm_cpu_vol_pmx1: pw_pwm_cpu_vol@1 {
+ pw_pwm_cpu_vol_1 {
+ groups = "pw_pwm_cpu_vol_grp1";
+ function = "pw_pwm_cpu_vol_m1";
+ };
+ };
+
+ pw_backlight_pmx0: pw_backlight@0 {
+ pw_backlight_0 {
+ groups = "pw_backlight_grp0";
+ function = "pw_backlight_m0";
+ };
+ };
+
+ pw_backlight_pmx1: pw_backlight@1 {
+ pw_backlight_1 {
+ groups = "pw_backlight_grp1";
+ function = "pw_backlight_m1";
+ };
+ };
+
+ rg_eth_mac_pmx: rg_eth_mac@0 {
+ rg_eth_mac {
+ groups = "rg_eth_mac_grp";
+ function = "rg_eth_mac";
+ };
+ };
+
+ rg_gmac_phy_intr_n_pmx: rg_gmac_phy_intr_n@0 {
+ rg_gmac_phy_intr_n {
+ groups = "rg_gmac_phy_intr_n_grp";
+ function = "rg_gmac_phy_intr_n";
+ };
+ };
+
+ rg_rgmii_mac_pmx: rg_rgmii_mac@0 {
+ rg_rgmii_mac {
+ groups = "rg_rgmii_mac_grp";
+ function = "rg_rgmii_mac";
+ };
+ };
+
+ rg_rgmii_phy_ref_clk_pmx0: rg_rgmii_phy_ref_clk@0 {
+ rg_rgmii_phy_ref_clk_0 {
+ groups =
+ "rg_rgmii_phy_ref_clk_grp0";
+ function =
+ "rg_rgmii_phy_ref_clk_m0";
+ };
+ };
+
+ rg_rgmii_phy_ref_clk_pmx1: rg_rgmii_phy_ref_clk@1 {
+ rg_rgmii_phy_ref_clk_1 {
+ groups =
+ "rg_rgmii_phy_ref_clk_grp1";
+ function =
+ "rg_rgmii_phy_ref_clk_m1";
+ };
+ };
+
+ sd0_pmx: sd0@0 {
+ sd0 {
+ groups = "sd0_grp";
+ function = "sd0";
+ };
+ };
+
+ sd0_4bit_pmx: sd0_4bit@0 {
+ sd0_4bit {
+ groups = "sd0_4bit_grp";
+ function = "sd0_4bit";
+ };
+ };
+
+ sd1_pmx: sd1@0 {
+ sd1 {
+ groups = "sd1_grp";
+ function = "sd1";
+ };
+ };
+
+ sd1_4bit_pmx0: sd1_4bit@0 {
+ sd1_4bit_0 {
+ groups = "sd1_4bit_grp0";
+ function = "sd1_4bit_m0";
+ };
+ };
+
+ sd1_4bit_pmx1: sd1_4bit@1 {
+ sd1_4bit_1 {
+ groups = "sd1_4bit_grp1";
+ function = "sd1_4bit_m1";
+ };
+ };
+
+ sd2_pmx0: sd2@0 {
+ sd2_0 {
+ groups = "sd2_grp0";
+ function = "sd2_m0";
+ };
+ };
+
+ sd2_no_cdb_pmx0: sd2_no_cdb@0 {
+ sd2_no_cdb_0 {
+ groups = "sd2_no_cdb_grp0";
+ function = "sd2_no_cdb_m0";
+ };
+ };
+
+ sd3_pmx: sd3@0 {
+ sd3 {
+ groups = "sd3_grp";
+ function = "sd3";
+ };
+ };
+
+ sd5_pmx: sd5@0 {
+ sd5 {
+ groups = "sd5_grp";
+ function = "sd5";
+ };
+ };
+
+ sd6_pmx0: sd6@0 {
+ sd6_0 {
+ groups = "sd6_grp0";
+ function = "sd6_m0";
+ };
+ };
+
+ sd6_pmx1: sd6@1 {
+ sd6_1 {
+ groups = "sd6_grp1";
+ function = "sd6_m1";
+ };
+ };
+
+ sp0_ext_ldo_on_pmx: sp0_ext_ldo_on@0 {
+ sp0_ext_ldo_on {
+ groups = "sp0_ext_ldo_on_grp";
+ function = "sp0_ext_ldo_on";
+ };
+ };
+
+ sp0_qspi_pmx: sp0_qspi@0 {
+ sp0_qspi {
+ groups = "sp0_qspi_grp";
+ function = "sp0_qspi";
+ };
+ };
+
+ sp1_spi_pmx: sp1_spi@0 {
+ sp1_spi {
+ groups = "sp1_spi_grp";
+ function = "sp1_spi";
+ };
+ };
+
+ tpiu_trace_pmx: tpiu_trace@0 {
+ tpiu_trace {
+ groups = "tpiu_trace_grp";
+ function = "tpiu_trace";
+ };
+ };
+
+ uart0_pmx: uart0@0 {
+ uart0 {
+ groups = "uart0_grp";
+ function = "uart0";
+ };
+ };
+
+ uart0_nopause_pmx: uart0_nopause@0 {
+ uart0_nopause {
+ groups = "uart0_nopause_grp";
+ function = "uart0_nopause";
+ };
+ };
+
+ uart1_pmx: uart1@0 {
+ uart1 {
+ groups = "uart1_grp";
+ function = "uart1";
+ };
+ };
+
+ uart2_pmx: uart2@0 {
+ uart2 {
+ groups = "uart2_grp";
+ function = "uart2";
+ };
+ };
+
+ uart3_pmx0: uart3@0 {
+ uart3_0 {
+ groups = "uart3_grp0";
+ function = "uart3_m0";
+ };
+ };
+
+ uart3_pmx1: uart3@1 {
+ uart3_1 {
+ groups = "uart3_grp1";
+ function = "uart3_m1";
+ };
+ };
+
+ uart3_pmx2: uart3@2 {
+ uart3_2 {
+ groups = "uart3_grp2";
+ function = "uart3_m2";
+ };
+ };
+
+ uart3_pmx3: uart3@3 {
+ uart3_3 {
+ groups = "uart3_grp3";
+ function = "uart3_m3";
+ };
+ };
+
+ uart3_nopause_pmx0: uart3_nopause@0 {
+ uart3_nopause_0 {
+ groups = "uart3_nopause_grp0";
+ function = "uart3_nopause_m0";
+ };
+ };
+
+ uart3_nopause_pmx1: uart3_nopause@1 {
+ uart3_nopause_1 {
+ groups = "uart3_nopause_grp1";
+ function = "uart3_nopause_m1";
+ };
+ };
+
+ uart4_pmx0: uart4@0 {
+ uart4_0 {
+ groups = "uart4_grp0";
+ function = "uart4_m0";
+ };
+ };
+
+ uart4_pmx1: uart4@1 {
+ uart4_1 {
+ groups = "uart4_grp1";
+ function = "uart4_m1";
+ };
+ };
+
+ uart4_pmx2: uart4@2 {
+ uart4_2 {
+ groups = "uart4_grp2";
+ function = "uart4_m2";
+ };
+ };
+
+ uart4_nopause_pmx: uart4_nopause@0 {
+ uart4_nopause {
+ groups = "uart4_nopause_grp";
+ function = "uart4_nopause";
+ };
+ };
+
+ usb0_drvvbus_pmx: usb0_drvvbus@0 {
+ usb0_drvvbus {
+ groups = "usb0_drvvbus_grp";
+ function = "usb0_drvvbus";
+ };
+ };
+
+ usb1_drvvbus_pmx: usb1_drvvbus@0 {
+ usb1_drvvbus {
+ groups = "usb1_drvvbus_grp";
+ function = "usb1_drvvbus";
+ };
+ };
+
+ visbus_dout_pmx: visbus_dout@0 {
+ visbus_dout {
+ groups = "visbus_dout_grp";
+ function = "visbus_dout";
+ };
+ };
+
+ vi_vip1_pmx: vi_vip1@0 {
+ vi_vip1 {
+ groups = "vi_vip1_grp";
+ function = "vi_vip1";
+ };
+ };
+
+ vi_vip1_ext_pmx: vi_vip1_ext@0 {
+ vi_vip1_ext {
+ groups = "vi_vip1_ext_grp";
+ function = "vi_vip1_ext";
+ };
+ };
+
+ vi_vip1_low8bit_pmx: vi_vip1_low8bit@0 {
+ vi_vip1_low8bit {
+ groups = "vi_vip1_low8bit_grp";
+ function = "vi_vip1_low8bit";
+ };
+ };
+
+ vi_vip1_high8bit_pmx: vi_vip1_high8bit@0 {
+ vi_vip1_high8bit {
+ groups = "vi_vip1_high8bit_grp";
+ function = "vi_vip1_high8bit";
+ };
+ };
};
pmipc {
@@ -356,6 +1375,12 @@
clock-names = "gpio0_io";
gpio-controller;
interrupt-controller;
+
+ gpio-banks = <2>;
+ gpio-ranges = <&pinctrl 0 0 0>,
+ <&pinctrl 32 0 0>;
+ gpio-ranges-group-names = "lvds_gpio_grp",
+ "uart_nand_gpio_grp";
};
nand@17050000 {
@@ -461,11 +1486,22 @@
#interrupt-cells = <2>;
compatible = "sirf,atlas7-gpio";
reg = <0x13300000 0x1000>;
- interrupts = <0 43 0>, <0 44 0>, <0 45 0>;
+ interrupts = <0 43 0>, <0 44 0>,
+ <0 45 0>, <0 46 0>;
clocks = <&car 84>;
clock-names = "gpio1_io";
gpio-controller;
interrupt-controller;
+
+ gpio-banks = <4>;
+ gpio-ranges = <&pinctrl 0 0 0>,
+ <&pinctrl 32 0 0>,
+ <&pinctrl 64 0 0>,
+ <&pinctrl 96 0 0>;
+ gpio-ranges-group-names = "gnss_gpio_grp",
+ "lcd_vip_gpio_grp",
+ "sdio_i2s_gpio_grp",
+ "sp_rgmii_gpio_grp";
};
sd2: sdhci@14200000 {
@@ -744,6 +1780,10 @@
interrupts = <0 47 0>;
gpio-controller;
interrupt-controller;
+
+ gpio-banks = <1>;
+ gpio-ranges = <&pinctrl 0 0 0>;
+ gpio-ranges-group-names = "rtc_gpio_grp";
};
rtc-iobg@18840000 {
diff --git a/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts b/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts
index 107395c..17f63f7 100644
--- a/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts
+++ b/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts
@@ -150,6 +150,16 @@
interface-type = "ace";
reg = <0x5000 0x1000>;
};
+
+ pmu@9000 {
+ compatible = "arm,cci-400-pmu,r0";
+ reg = <0x9000 0x5000>;
+ interrupts = <0 105 4>,
+ <0 101 4>,
+ <0 102 4>,
+ <0 103 4>,
+ <0 104 4>;
+ };
};
memory-controller@7ffd0000 {
@@ -187,11 +197,22 @@
<1 10 0xf08>;
};
- pmu {
+ pmu_a15 {
compatible = "arm,cortex-a15-pmu";
interrupts = <0 68 4>,
<0 69 4>;
- interrupt-affinity = <&cpu0>, <&cpu1>;
+ interrupt-affinity = <&cpu0>,
+ <&cpu1>;
+ };
+
+ pmu_a7 {
+ compatible = "arm,cortex-a7-pmu";
+ interrupts = <0 128 4>,
+ <0 129 4>,
+ <0 130 4>;
+ interrupt-affinity = <&cpu2>,
+ <&cpu3>,
+ <&cpu4>;
};
oscclk6a: oscclk6a {
diff --git a/arch/arm/common/mcpm_platsmp.c b/arch/arm/common/mcpm_platsmp.c
index 92e54d7..2b25b60 100644
--- a/arch/arm/common/mcpm_platsmp.c
+++ b/arch/arm/common/mcpm_platsmp.c
@@ -65,14 +65,10 @@
return !mcpm_wait_for_cpu_powerdown(pcpu, pcluster);
}
-static int mcpm_cpu_disable(unsigned int cpu)
+static bool mcpm_cpu_can_disable(unsigned int cpu)
{
- /*
- * We assume all CPUs may be shut down.
- * This would be the hook to use for eventual Secure
- * OS migration requests as described in the PSCI spec.
- */
- return 0;
+ /* We assume all CPUs may be shut down. */
+ return true;
}
static void mcpm_cpu_die(unsigned int cpu)
@@ -92,7 +88,7 @@
.smp_secondary_init = mcpm_secondary_init,
#ifdef CONFIG_HOTPLUG_CPU
.cpu_kill = mcpm_cpu_kill,
- .cpu_disable = mcpm_cpu_disable,
+ .cpu_can_disable = mcpm_cpu_can_disable,
.cpu_die = mcpm_cpu_die,
#endif
};
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index 6d83a1b..5fd8df6 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -353,7 +353,6 @@
CONFIG_POWER_RESET_GPIO=y
CONFIG_POWER_RESET_GPIO_RESTART=y
CONFIG_POWER_RESET_KEYSTONE=y
-CONFIG_POWER_RESET_SUN6I=y
CONFIG_POWER_RESET_RMOBILE=y
CONFIG_SENSORS_LM90=y
CONFIG_SENSORS_LM95245=y
diff --git a/arch/arm/configs/sunxi_defconfig b/arch/arm/configs/sunxi_defconfig
index 8ecba00..7ebc346b 100644
--- a/arch/arm/configs/sunxi_defconfig
+++ b/arch/arm/configs/sunxi_defconfig
@@ -2,6 +2,7 @@
CONFIG_HIGH_RES_TIMERS=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_PERF_EVENTS=y
+CONFIG_MODULES=y
CONFIG_ARCH_SUNXI=y
CONFIG_SMP=y
CONFIG_NR_CPUS=8
@@ -77,7 +78,6 @@
CONFIG_GPIO_SYSFS=y
CONFIG_POWER_SUPPLY=y
CONFIG_POWER_RESET=y
-CONFIG_POWER_RESET_SUN6I=y
CONFIG_THERMAL=y
CONFIG_CPU_THERMAL=y
CONFIG_WATCHDOG=y
@@ -87,6 +87,10 @@
CONFIG_REGULATOR_FIXED_VOLTAGE=y
CONFIG_REGULATOR_AXP20X=y
CONFIG_REGULATOR_GPIO=y
+CONFIG_FB=y
+CONFIG_FB_SIMPLE=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
CONFIG_USB=y
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_EHCI_HCD_PLATFORM=y
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild
index 83c5019..517ef6d 100644
--- a/arch/arm/include/asm/Kbuild
+++ b/arch/arm/include/asm/Kbuild
@@ -12,7 +12,6 @@
generic-y += kdebug.h
generic-y += local.h
generic-y += local64.h
-generic-y += mcs_spinlock.h
generic-y += msgbuf.h
generic-y += param.h
generic-y += parport.h
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index 4abe572..7bbf325 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -108,33 +108,37 @@
.endm
#endif
- .macro asm_trace_hardirqs_off
+ .macro asm_trace_hardirqs_off, save=1
#if defined(CONFIG_TRACE_IRQFLAGS)
+ .if \save
stmdb sp!, {r0-r3, ip, lr}
+ .endif
bl trace_hardirqs_off
+ .if \save
ldmia sp!, {r0-r3, ip, lr}
+ .endif
#endif
.endm
- .macro asm_trace_hardirqs_on_cond, cond
+ .macro asm_trace_hardirqs_on, cond=al, save=1
#if defined(CONFIG_TRACE_IRQFLAGS)
/*
* actually the registers should be pushed and pop'd conditionally, but
* after bl the flags are certainly clobbered
*/
+ .if \save
stmdb sp!, {r0-r3, ip, lr}
+ .endif
bl\cond trace_hardirqs_on
+ .if \save
ldmia sp!, {r0-r3, ip, lr}
+ .endif
#endif
.endm
- .macro asm_trace_hardirqs_on
- asm_trace_hardirqs_on_cond al
- .endm
-
- .macro disable_irq
+ .macro disable_irq, save=1
disable_irq_notrace
- asm_trace_hardirqs_off
+ asm_trace_hardirqs_off \save
.endm
.macro enable_irq
@@ -173,7 +177,7 @@
.macro restore_irqs, oldcpsr
tst \oldcpsr, #PSR_I_BIT
- asm_trace_hardirqs_on_cond eq
+ asm_trace_hardirqs_on cond=eq
restore_irqs_notrace \oldcpsr
.endm
@@ -445,6 +449,53 @@
#endif
.endm
+ .macro uaccess_disable, tmp, isb=1
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+ /*
+ * Whenever we re-enter userspace, the domains should always be
+ * set appropriately.
+ */
+ mov \tmp, #DACR_UACCESS_DISABLE
+ mcr p15, 0, \tmp, c3, c0, 0 @ Set domain register
+ .if \isb
+ instr_sync
+ .endif
+#endif
+ .endm
+
+ .macro uaccess_enable, tmp, isb=1
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+ /*
+ * Whenever we re-enter userspace, the domains should always be
+ * set appropriately.
+ */
+ mov \tmp, #DACR_UACCESS_ENABLE
+ mcr p15, 0, \tmp, c3, c0, 0
+ .if \isb
+ instr_sync
+ .endif
+#endif
+ .endm
+
+ .macro uaccess_save, tmp
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+ mrc p15, 0, \tmp, c3, c0, 0
+ str \tmp, [sp, #S_FRAME_SIZE]
+#endif
+ .endm
+
+ .macro uaccess_restore
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+ ldr r0, [sp, #S_FRAME_SIZE]
+ mcr p15, 0, r0, c3, c0, 0
+#endif
+ .endm
+
+ .macro uaccess_save_and_disable, tmp
+ uaccess_save \tmp
+ uaccess_disable \tmp
+ .endm
+
.irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
.macro ret\c, reg
#if __LINUX_ARM_ARCH__ < 6
diff --git a/arch/arm/include/asm/bitops.h b/arch/arm/include/asm/bitops.h
index 5638099..e943e6c 100644
--- a/arch/arm/include/asm/bitops.h
+++ b/arch/arm/include/asm/bitops.h
@@ -35,9 +35,9 @@
static inline void ____atomic_set_bit(unsigned int bit, volatile unsigned long *p)
{
unsigned long flags;
- unsigned long mask = 1UL << (bit & 31);
+ unsigned long mask = BIT_MASK(bit);
- p += bit >> 5;
+ p += BIT_WORD(bit);
raw_local_irq_save(flags);
*p |= mask;
@@ -47,9 +47,9 @@
static inline void ____atomic_clear_bit(unsigned int bit, volatile unsigned long *p)
{
unsigned long flags;
- unsigned long mask = 1UL << (bit & 31);
+ unsigned long mask = BIT_MASK(bit);
- p += bit >> 5;
+ p += BIT_WORD(bit);
raw_local_irq_save(flags);
*p &= ~mask;
@@ -59,9 +59,9 @@
static inline void ____atomic_change_bit(unsigned int bit, volatile unsigned long *p)
{
unsigned long flags;
- unsigned long mask = 1UL << (bit & 31);
+ unsigned long mask = BIT_MASK(bit);
- p += bit >> 5;
+ p += BIT_WORD(bit);
raw_local_irq_save(flags);
*p ^= mask;
@@ -73,9 +73,9 @@
{
unsigned long flags;
unsigned int res;
- unsigned long mask = 1UL << (bit & 31);
+ unsigned long mask = BIT_MASK(bit);
- p += bit >> 5;
+ p += BIT_WORD(bit);
raw_local_irq_save(flags);
res = *p;
@@ -90,9 +90,9 @@
{
unsigned long flags;
unsigned int res;
- unsigned long mask = 1UL << (bit & 31);
+ unsigned long mask = BIT_MASK(bit);
- p += bit >> 5;
+ p += BIT_WORD(bit);
raw_local_irq_save(flags);
res = *p;
@@ -107,9 +107,9 @@
{
unsigned long flags;
unsigned int res;
- unsigned long mask = 1UL << (bit & 31);
+ unsigned long mask = BIT_MASK(bit);
- p += bit >> 5;
+ p += BIT_WORD(bit);
raw_local_irq_save(flags);
res = *p;
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 4812cda..d5525bf 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -140,8 +140,6 @@
* is visible to DMA, or data written by DMA to system memory is
* visible to the CPU.
*/
-#define dmac_map_area cpu_cache.dma_map_area
-#define dmac_unmap_area cpu_cache.dma_unmap_area
#define dmac_flush_range cpu_cache.dma_flush_range
#else
@@ -161,8 +159,6 @@
* is visible to DMA, or data written by DMA to system memory is
* visible to the CPU.
*/
-extern void dmac_map_area(const void *, size_t, int);
-extern void dmac_unmap_area(const void *, size_t, int);
extern void dmac_flush_range(const void *, const void *);
#endif
@@ -506,4 +502,21 @@
void flush_uprobe_xol_access(struct page *page, unsigned long uaddr,
void *kaddr, unsigned long len);
+/**
+ * secure_flush_area - ensure coherency across the secure boundary
+ * @addr: virtual address
+ * @size: size of region
+ *
+ * Ensure that the specified area of memory is coherent across the secure
+ * boundary from the non-secure side. This is used when calling secure
+ * firmware where the secure firmware does not ensure coherency.
+ */
+static inline void secure_flush_area(const void *addr, size_t size)
+{
+ phys_addr_t phys = __pa(addr);
+
+ __cpuc_flush_dcache_area((void *)addr, size);
+ outer_flush_range(phys, phys + size);
+}
+
#endif
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index b52101d..a68b9d8 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -14,7 +14,7 @@
#include <xen/xen.h>
#include <asm/xen/hypervisor.h>
-#define DMA_ERROR_CODE (~0)
+#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
extern struct dma_map_ops arm_dma_ops;
extern struct dma_map_ops arm_coherent_dma_ops;
diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
index 6ddbe44..e878129 100644
--- a/arch/arm/include/asm/domain.h
+++ b/arch/arm/include/asm/domain.h
@@ -34,15 +34,14 @@
*/
#ifndef CONFIG_IO_36
#define DOMAIN_KERNEL 0
-#define DOMAIN_TABLE 0
#define DOMAIN_USER 1
#define DOMAIN_IO 2
#else
#define DOMAIN_KERNEL 2
-#define DOMAIN_TABLE 2
#define DOMAIN_USER 1
#define DOMAIN_IO 0
#endif
+#define DOMAIN_VECTORS 3
/*
* Domain types
@@ -55,11 +54,46 @@
#define DOMAIN_MANAGER 1
#endif
-#define domain_val(dom,type) ((type) << (2*(dom)))
+#define domain_mask(dom) ((3) << (2 * (dom)))
+#define domain_val(dom,type) ((type) << (2 * (dom)))
+
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+#define DACR_INIT \
+ (domain_val(DOMAIN_USER, DOMAIN_NOACCESS) | \
+ domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
+ domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
+ domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT))
+#else
+#define DACR_INIT \
+ (domain_val(DOMAIN_USER, DOMAIN_CLIENT) | \
+ domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
+ domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
+ domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT))
+#endif
+
+#define __DACR_DEFAULT \
+ domain_val(DOMAIN_KERNEL, DOMAIN_CLIENT) | \
+ domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
+ domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT)
+
+#define DACR_UACCESS_DISABLE \
+ (__DACR_DEFAULT | domain_val(DOMAIN_USER, DOMAIN_NOACCESS))
+#define DACR_UACCESS_ENABLE \
+ (__DACR_DEFAULT | domain_val(DOMAIN_USER, DOMAIN_CLIENT))
#ifndef __ASSEMBLY__
-#ifdef CONFIG_CPU_USE_DOMAINS
+static inline unsigned int get_domain(void)
+{
+ unsigned int domain;
+
+ asm(
+ "mrc p15, 0, %0, c3, c0 @ get domain"
+ : "=r" (domain));
+
+ return domain;
+}
+
static inline void set_domain(unsigned val)
{
asm volatile(
@@ -68,17 +102,16 @@
isb();
}
+#ifdef CONFIG_CPU_USE_DOMAINS
#define modify_domain(dom,type) \
do { \
- struct thread_info *thread = current_thread_info(); \
- unsigned int domain = thread->cpu_domain; \
- domain &= ~domain_val(dom, DOMAIN_MANAGER); \
- thread->cpu_domain = domain | domain_val(dom, type); \
- set_domain(thread->cpu_domain); \
+ unsigned int domain = get_domain(); \
+ domain &= ~domain_mask(dom); \
+ domain = domain | domain_val(dom, type); \
+ set_domain(domain); \
} while (0)
#else
-static inline void set_domain(unsigned val) { }
static inline void modify_domain(unsigned dom, unsigned type) { }
#endif
diff --git a/arch/arm/include/asm/fixmap.h b/arch/arm/include/asm/fixmap.h
index 0415eae..58cfe9f 100644
--- a/arch/arm/include/asm/fixmap.h
+++ b/arch/arm/include/asm/fixmap.h
@@ -6,9 +6,13 @@
#define FIXADDR_TOP (FIXADDR_END - PAGE_SIZE)
#include <asm/kmap_types.h>
+#include <asm/pgtable.h>
enum fixed_addresses {
- FIX_KMAP_BEGIN,
+ FIX_EARLYCON_MEM_BASE,
+ __end_of_permanent_fixed_addresses,
+
+ FIX_KMAP_BEGIN = __end_of_permanent_fixed_addresses,
FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1,
/* Support writing RO kernel text via kprobes, jump labels, etc. */
@@ -18,7 +22,16 @@
__end_of_fixed_addresses
};
+#define FIXMAP_PAGE_COMMON (L_PTE_YOUNG | L_PTE_PRESENT | L_PTE_XN | L_PTE_DIRTY)
+
+#define FIXMAP_PAGE_NORMAL (FIXMAP_PAGE_COMMON | L_PTE_MT_WRITEBACK)
+
+/* Used by set_fixmap_(io|nocache), both meant for mapping a device */
+#define FIXMAP_PAGE_IO (FIXMAP_PAGE_COMMON | L_PTE_MT_DEV_SHARED | L_PTE_SHARED)
+#define FIXMAP_PAGE_NOCACHE FIXMAP_PAGE_IO
+
void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot);
+void __init early_fixmap_init(void);
#include <asm-generic/fixmap.h>
diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
index 5eed828..6795368 100644
--- a/arch/arm/include/asm/futex.h
+++ b/arch/arm/include/asm/futex.h
@@ -22,8 +22,11 @@
#ifdef CONFIG_SMP
#define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \
+({ \
+ unsigned int __ua_flags; \
smp_mb(); \
prefetchw(uaddr); \
+ __ua_flags = uaccess_save_and_enable(); \
__asm__ __volatile__( \
"1: ldrex %1, [%3]\n" \
" " insn "\n" \
@@ -34,12 +37,15 @@
__futex_atomic_ex_table("%5") \
: "=&r" (ret), "=&r" (oldval), "=&r" (tmp) \
: "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \
- : "cc", "memory")
+ : "cc", "memory"); \
+ uaccess_restore(__ua_flags); \
+})
static inline int
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
u32 oldval, u32 newval)
{
+ unsigned int __ua_flags;
int ret;
u32 val;
@@ -49,6 +55,7 @@
smp_mb();
/* Prefetching cannot fault */
prefetchw(uaddr);
+ __ua_flags = uaccess_save_and_enable();
__asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
"1: ldrex %1, [%4]\n"
" teq %1, %2\n"
@@ -61,6 +68,7 @@
: "=&r" (ret), "=&r" (val)
: "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
: "cc", "memory");
+ uaccess_restore(__ua_flags);
smp_mb();
*uval = val;
@@ -73,6 +81,8 @@
#include <asm/domain.h>
#define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \
+({ \
+ unsigned int __ua_flags = uaccess_save_and_enable(); \
__asm__ __volatile__( \
"1: " TUSER(ldr) " %1, [%3]\n" \
" " insn "\n" \
@@ -81,12 +91,15 @@
__futex_atomic_ex_table("%5") \
: "=&r" (ret), "=&r" (oldval), "=&r" (tmp) \
: "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \
- : "cc", "memory")
+ : "cc", "memory"); \
+ uaccess_restore(__ua_flags); \
+})
static inline int
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
u32 oldval, u32 newval)
{
+ unsigned int __ua_flags;
int ret = 0;
u32 val;
@@ -94,6 +107,7 @@
return -EFAULT;
preempt_disable();
+ __ua_flags = uaccess_save_and_enable();
__asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
"1: " TUSER(ldr) " %1, [%4]\n"
" teq %1, %2\n"
@@ -103,6 +117,7 @@
: "+r" (ret), "=&r" (val)
: "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
: "cc", "memory");
+ uaccess_restore(__ua_flags);
*uval = val;
preempt_enable();
diff --git a/arch/arm/include/asm/glue-cache.h b/arch/arm/include/asm/glue-cache.h
index a3c24cd..cab07f6 100644
--- a/arch/arm/include/asm/glue-cache.h
+++ b/arch/arm/include/asm/glue-cache.h
@@ -158,8 +158,6 @@
#define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range)
#define __cpuc_flush_dcache_area __glue(_CACHE,_flush_kern_dcache_area)
-#define dmac_map_area __glue(_CACHE,_dma_map_area)
-#define dmac_unmap_area __glue(_CACHE,_dma_unmap_area)
#define dmac_flush_range __glue(_CACHE,_dma_flush_range)
#endif
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index 1c3938f..4859820 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -140,16 +140,11 @@
* The _caller variety takes a __builtin_return_address(0) value for
* /proc/vmalloc to use - and should only be used in non-inline functions.
*/
-extern void __iomem *__arm_ioremap_pfn_caller(unsigned long, unsigned long,
- size_t, unsigned int, void *);
extern void __iomem *__arm_ioremap_caller(phys_addr_t, size_t, unsigned int,
void *);
-
extern void __iomem *__arm_ioremap_pfn(unsigned long, unsigned long, size_t, unsigned int);
-extern void __iomem *__arm_ioremap(phys_addr_t, size_t, unsigned int);
extern void __iomem *__arm_ioremap_exec(phys_addr_t, size_t, bool cached);
extern void __iounmap(volatile void __iomem *addr);
-extern void __arm_iounmap(volatile void __iomem *addr);
extern void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t,
unsigned int, void *);
@@ -321,21 +316,24 @@
static inline void memset_io(volatile void __iomem *dst, unsigned c,
size_t count)
{
- memset((void __force *)dst, c, count);
+ extern void mmioset(void *, unsigned int, size_t);
+ mmioset((void __force *)dst, c, count);
}
#define memset_io(dst,c,count) memset_io(dst,c,count)
static inline void memcpy_fromio(void *to, const volatile void __iomem *from,
size_t count)
{
- memcpy(to, (const void __force *)from, count);
+ extern void mmiocpy(void *, const void *, size_t);
+ mmiocpy(to, (const void __force *)from, count);
}
#define memcpy_fromio(to,from,count) memcpy_fromio(to,from,count)
static inline void memcpy_toio(volatile void __iomem *to, const void *from,
size_t count)
{
- memcpy((void __force *)to, from, count);
+ extern void mmiocpy(void *, const void *, size_t);
+ mmiocpy((void __force *)to, from, count);
}
#define memcpy_toio(to,from,count) memcpy_toio(to,from,count)
@@ -348,18 +346,61 @@
#endif /* readl */
/*
- * ioremap and friends.
+ * ioremap() and friends.
*
- * ioremap takes a PCI memory address, as specified in
- * Documentation/io-mapping.txt.
+ * ioremap() takes a resource address, and size. Due to the ARM memory
+ * types, it is important to use the correct ioremap() function as each
+ * mapping has specific properties.
*
+ * Function Memory type Cacheability Cache hint
+ * ioremap() Device n/a n/a
+ * ioremap_nocache() Device n/a n/a
+ * ioremap_cache() Normal Writeback Read allocate
+ * ioremap_wc() Normal Non-cacheable n/a
+ * ioremap_wt() Normal Non-cacheable n/a
+ *
+ * All device mappings have the following properties:
+ * - no access speculation
+ * - no repetition (eg, on return from an exception)
+ * - number, order and size of accesses are maintained
+ * - unaligned accesses are "unpredictable"
+ * - writes may be delayed before they hit the endpoint device
+ *
+ * ioremap_nocache() is the same as ioremap() as there are too many device
+ * drivers using this for device registers, and documentation which tells
+ * people to use it for such for this to be any different. This is not a
+ * safe fallback for memory-like mappings, or memory regions where the
+ * compiler may generate unaligned accesses - eg, via inlining its own
+ * memcpy.
+ *
+ * All normal memory mappings have the following properties:
+ * - reads can be repeated with no side effects
+ * - repeated reads return the last value written
+ * - reads can fetch additional locations without side effects
+ * - writes can be repeated (in certain cases) with no side effects
+ * - writes can be merged before accessing the target
+ * - unaligned accesses can be supported
+ * - ordering is not guaranteed without explicit dependencies or barrier
+ * instructions
+ * - writes may be delayed before they hit the endpoint memory
+ *
+ * The cache hint is only a performance hint: CPUs may alias these hints.
+ * Eg, a CPU not implementing read allocate but implementing write allocate
+ * will provide a write allocate mapping instead.
*/
-#define ioremap(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE)
-#define ioremap_nocache(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE)
-#define ioremap_cache(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE_CACHED)
-#define ioremap_wc(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE_WC)
-#define ioremap_wt(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE)
-#define iounmap __arm_iounmap
+void __iomem *ioremap(resource_size_t res_cookie, size_t size);
+#define ioremap ioremap
+#define ioremap_nocache ioremap
+
+void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size);
+#define ioremap_cache ioremap_cache
+
+void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size);
+#define ioremap_wc ioremap_wc
+#define ioremap_wt ioremap_wc
+
+void iounmap(volatile void __iomem *iomem_cookie);
+#define iounmap iounmap
/*
* io{read,write}{16,32}be() macros
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index 3a72d69..b7f6fb4 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -275,7 +275,7 @@
*/
#define __pa(x) __virt_to_phys((unsigned long)(x))
#define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x)))
-#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
+#define pfn_to_kaddr(pfn) __va((phys_addr_t)(pfn) << PAGE_SHIFT)
extern phys_addr_t (*arch_virt_to_idmap)(unsigned long x);
@@ -286,7 +286,7 @@
*/
static inline phys_addr_t __virt_to_idmap(unsigned long x)
{
- if (arch_virt_to_idmap)
+ if (IS_ENABLED(CONFIG_MMU) && arch_virt_to_idmap)
return arch_virt_to_idmap(x);
else
return __virt_to_phys(x);
diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
index 5e68278..d0131ee 100644
--- a/arch/arm/include/asm/pgtable-2level-hwdef.h
+++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
@@ -23,6 +23,7 @@
#define PMD_PXNTABLE (_AT(pmdval_t, 1) << 2) /* v7 */
#define PMD_BIT4 (_AT(pmdval_t, 1) << 4)
#define PMD_DOMAIN(x) (_AT(pmdval_t, (x)) << 5)
+#define PMD_DOMAIN_MASK PMD_DOMAIN(0x0f)
#define PMD_PROTECTION (_AT(pmdval_t, 1) << 9) /* v5 */
/*
* - section
diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
index bfd662e..aeddd28 100644
--- a/arch/arm/include/asm/pgtable-2level.h
+++ b/arch/arm/include/asm/pgtable-2level.h
@@ -129,7 +129,36 @@
/*
* These are the memory types, defined to be compatible with
- * pre-ARMv6 CPUs cacheable and bufferable bits: XXCB
+ * pre-ARMv6 CPUs cacheable and bufferable bits: n/a,n/a,C,B
+ * ARMv6+ without TEX remapping, they are a table index.
+ * ARMv6+ with TEX remapping, they correspond to n/a,TEX(0),C,B
+ *
+ * MT type Pre-ARMv6 ARMv6+ type / cacheable status
+ * UNCACHED Uncached Strongly ordered
+ * BUFFERABLE Bufferable Normal memory / non-cacheable
+ * WRITETHROUGH Writethrough Normal memory / write through
+ * WRITEBACK Writeback Normal memory / write back, read alloc
+ * MINICACHE Minicache N/A
+ * WRITEALLOC Writeback Normal memory / write back, write alloc
+ * DEV_SHARED Uncached Device memory (shared)
+ * DEV_NONSHARED Uncached Device memory (non-shared)
+ * DEV_WC Bufferable Normal memory / non-cacheable
+ * DEV_CACHED Writeback Normal memory / write back, read alloc
+ * VECTORS Variable Normal memory / variable
+ *
+ * All normal memory mappings have the following properties:
+ * - reads can be repeated with no side effects
+ * - repeated reads return the last value written
+ * - reads can fetch additional locations without side effects
+ * - writes can be repeated (in certain cases) with no side effects
+ * - writes can be merged before accessing the target
+ * - unaligned accesses can be supported
+ *
+ * All device mappings have the following properties:
+ * - no access speculation
+ * - no repetition (eg, on return from an exception)
+ * - number, order and size of accesses are maintained
+ * - unaligned accesses are "unpredictable"
*/
#define L_PTE_MT_UNCACHED (_AT(pteval_t, 0x00) << 2) /* 0000 */
#define L_PTE_MT_BUFFERABLE (_AT(pteval_t, 0x01) << 2) /* 0001 */
diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
index 2f3ac1b..ef35665 100644
--- a/arch/arm/include/asm/smp.h
+++ b/arch/arm/include/asm/smp.h
@@ -74,7 +74,6 @@
extern int __cpu_disable(void);
extern void __cpu_die(unsigned int cpu);
-extern void cpu_die(void);
extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
@@ -105,6 +104,7 @@
#ifdef CONFIG_HOTPLUG_CPU
int (*cpu_kill)(unsigned int cpu);
void (*cpu_die)(unsigned int cpu);
+ bool (*cpu_can_disable)(unsigned int cpu);
int (*cpu_disable)(unsigned int cpu);
#endif
#endif
diff --git a/arch/arm/include/asm/smp_plat.h b/arch/arm/include/asm/smp_plat.h
index 993e522..f908071 100644
--- a/arch/arm/include/asm/smp_plat.h
+++ b/arch/arm/include/asm/smp_plat.h
@@ -107,4 +107,13 @@
extern int platform_can_secondary_boot(void);
extern int platform_can_cpu_hotplug(void);
+#ifdef CONFIG_HOTPLUG_CPU
+extern int platform_can_hotplug_cpu(unsigned int cpu);
+#else
+static inline int platform_can_hotplug_cpu(unsigned int cpu)
+{
+ return 0;
+}
+#endif
+
#endif
diff --git a/arch/arm/include/asm/switch_to.h b/arch/arm/include/asm/switch_to.h
index c99e259..12ebfcc 100644
--- a/arch/arm/include/asm/switch_to.h
+++ b/arch/arm/include/asm/switch_to.h
@@ -10,7 +10,9 @@
* CPU.
*/
#if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP) && defined(CONFIG_CPU_V7)
-#define finish_arch_switch(prev) dsb(ish)
+#define __complete_pending_tlbi() dsb(ish)
+#else
+#define __complete_pending_tlbi()
#endif
/*
@@ -22,6 +24,7 @@
#define switch_to(prev,next,last) \
do { \
+ __complete_pending_tlbi(); \
last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \
} while (0)
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index bd32ede..d0a1119 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -74,9 +74,6 @@
.flags = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
.addr_limit = KERNEL_DS, \
- .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
- domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
- domain_val(DOMAIN_IO, DOMAIN_CLIENT), \
}
#define init_thread_info (init_thread_union.thread_info)
@@ -136,22 +133,18 @@
/*
* thread information flags:
- * TIF_SYSCALL_TRACE - syscall trace active
- * TIF_SYSCAL_AUDIT - syscall auditing active
- * TIF_SIGPENDING - signal pending
- * TIF_NEED_RESCHED - rescheduling necessary
- * TIF_NOTIFY_RESUME - callback before returning to user
* TIF_USEDFPU - FPU was used by this task this quantum (SMP)
* TIF_POLLING_NRFLAG - true if poll_idle() is polling TIF_NEED_RESCHED
*/
-#define TIF_SIGPENDING 0
-#define TIF_NEED_RESCHED 1
+#define TIF_SIGPENDING 0 /* signal pending */
+#define TIF_NEED_RESCHED 1 /* rescheduling necessary */
#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
-#define TIF_UPROBE 7
-#define TIF_SYSCALL_TRACE 8
-#define TIF_SYSCALL_AUDIT 9
-#define TIF_SYSCALL_TRACEPOINT 10
-#define TIF_SECCOMP 11 /* seccomp syscall filtering active */
+#define TIF_UPROBE 3 /* breakpointed or singlestepping */
+#define TIF_SYSCALL_TRACE 4 /* syscall trace active */
+#define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */
+#define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */
+#define TIF_SECCOMP 7 /* seccomp syscall filtering active */
+
#define TIF_NOHZ 12 /* in adaptive nohz mode */
#define TIF_USING_IWMMXT 17
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 74b17d0..8cc85a4 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -50,6 +50,35 @@
extern int fixup_exception(struct pt_regs *regs);
/*
+ * These two functions allow hooking accesses to userspace to increase
+ * system integrity by ensuring that the kernel can not inadvertantly
+ * perform such accesses (eg, via list poison values) which could then
+ * be exploited for priviledge escalation.
+ */
+static inline unsigned int uaccess_save_and_enable(void)
+{
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+ unsigned int old_domain = get_domain();
+
+ /* Set the current domain access to permit user accesses */
+ set_domain((old_domain & ~domain_mask(DOMAIN_USER)) |
+ domain_val(DOMAIN_USER, DOMAIN_CLIENT));
+
+ return old_domain;
+#else
+ return 0;
+#endif
+}
+
+static inline void uaccess_restore(unsigned int flags)
+{
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+ /* Restore the user access mask */
+ set_domain(flags);
+#endif
+}
+
+/*
* These two are intentionally not defined anywhere - if the kernel
* code generates any references to them, that's a bug.
*/
@@ -165,6 +194,7 @@
register typeof(x) __r2 asm("r2"); \
register unsigned long __l asm("r1") = __limit; \
register int __e asm("r0"); \
+ unsigned int __ua_flags = uaccess_save_and_enable(); \
switch (sizeof(*(__p))) { \
case 1: \
if (sizeof((x)) >= 8) \
@@ -192,6 +222,7 @@
break; \
default: __e = __get_user_bad(); break; \
} \
+ uaccess_restore(__ua_flags); \
x = (typeof(*(p))) __r2; \
__e; \
})
@@ -224,6 +255,7 @@
register const typeof(*(p)) __user *__p asm("r0") = __tmp_p; \
register unsigned long __l asm("r1") = __limit; \
register int __e asm("r0"); \
+ unsigned int __ua_flags = uaccess_save_and_enable(); \
switch (sizeof(*(__p))) { \
case 1: \
__put_user_x(__r2, __p, __e, __l, 1); \
@@ -239,6 +271,7 @@
break; \
default: __e = __put_user_bad(); break; \
} \
+ uaccess_restore(__ua_flags); \
__e; \
})
@@ -300,20 +333,23 @@
do { \
unsigned long __gu_addr = (unsigned long)(ptr); \
unsigned long __gu_val; \
+ unsigned int __ua_flags; \
__chk_user_ptr(ptr); \
might_fault(); \
+ __ua_flags = uaccess_save_and_enable(); \
switch (sizeof(*(ptr))) { \
case 1: __get_user_asm_byte(__gu_val, __gu_addr, err); break; \
case 2: __get_user_asm_half(__gu_val, __gu_addr, err); break; \
case 4: __get_user_asm_word(__gu_val, __gu_addr, err); break; \
default: (__gu_val) = __get_user_bad(); \
} \
+ uaccess_restore(__ua_flags); \
(x) = (__typeof__(*(ptr)))__gu_val; \
} while (0)
-#define __get_user_asm_byte(x, addr, err) \
+#define __get_user_asm(x, addr, err, instr) \
__asm__ __volatile__( \
- "1: " TUSER(ldrb) " %1,[%2],#0\n" \
+ "1: " TUSER(instr) " %1, [%2], #0\n" \
"2:\n" \
" .pushsection .text.fixup,\"ax\"\n" \
" .align 2\n" \
@@ -329,6 +365,9 @@
: "r" (addr), "i" (-EFAULT) \
: "cc")
+#define __get_user_asm_byte(x, addr, err) \
+ __get_user_asm(x, addr, err, ldrb)
+
#ifndef __ARMEB__
#define __get_user_asm_half(x, __gu_addr, err) \
({ \
@@ -348,22 +387,7 @@
#endif
#define __get_user_asm_word(x, addr, err) \
- __asm__ __volatile__( \
- "1: " TUSER(ldr) " %1,[%2],#0\n" \
- "2:\n" \
- " .pushsection .text.fixup,\"ax\"\n" \
- " .align 2\n" \
- "3: mov %0, %3\n" \
- " mov %1, #0\n" \
- " b 2b\n" \
- " .popsection\n" \
- " .pushsection __ex_table,\"a\"\n" \
- " .align 3\n" \
- " .long 1b, 3b\n" \
- " .popsection" \
- : "+r" (err), "=&r" (x) \
- : "r" (addr), "i" (-EFAULT) \
- : "cc")
+ __get_user_asm(x, addr, err, ldr)
#define __put_user(x, ptr) \
({ \
@@ -381,9 +405,11 @@
#define __put_user_err(x, ptr, err) \
do { \
unsigned long __pu_addr = (unsigned long)(ptr); \
+ unsigned int __ua_flags; \
__typeof__(*(ptr)) __pu_val = (x); \
__chk_user_ptr(ptr); \
might_fault(); \
+ __ua_flags = uaccess_save_and_enable(); \
switch (sizeof(*(ptr))) { \
case 1: __put_user_asm_byte(__pu_val, __pu_addr, err); break; \
case 2: __put_user_asm_half(__pu_val, __pu_addr, err); break; \
@@ -391,11 +417,12 @@
case 8: __put_user_asm_dword(__pu_val, __pu_addr, err); break; \
default: __put_user_bad(); \
} \
+ uaccess_restore(__ua_flags); \
} while (0)
-#define __put_user_asm_byte(x, __pu_addr, err) \
+#define __put_user_asm(x, __pu_addr, err, instr) \
__asm__ __volatile__( \
- "1: " TUSER(strb) " %1,[%2],#0\n" \
+ "1: " TUSER(instr) " %1, [%2], #0\n" \
"2:\n" \
" .pushsection .text.fixup,\"ax\"\n" \
" .align 2\n" \
@@ -410,6 +437,9 @@
: "r" (x), "r" (__pu_addr), "i" (-EFAULT) \
: "cc")
+#define __put_user_asm_byte(x, __pu_addr, err) \
+ __put_user_asm(x, __pu_addr, err, strb)
+
#ifndef __ARMEB__
#define __put_user_asm_half(x, __pu_addr, err) \
({ \
@@ -427,21 +457,7 @@
#endif
#define __put_user_asm_word(x, __pu_addr, err) \
- __asm__ __volatile__( \
- "1: " TUSER(str) " %1,[%2],#0\n" \
- "2:\n" \
- " .pushsection .text.fixup,\"ax\"\n" \
- " .align 2\n" \
- "3: mov %0, %3\n" \
- " b 2b\n" \
- " .popsection\n" \
- " .pushsection __ex_table,\"a\"\n" \
- " .align 3\n" \
- " .long 1b, 3b\n" \
- " .popsection" \
- : "+r" (err) \
- : "r" (x), "r" (__pu_addr), "i" (-EFAULT) \
- : "cc")
+ __put_user_asm(x, __pu_addr, err, str)
#ifndef __ARMEB__
#define __reg_oper0 "%R2"
@@ -474,11 +490,46 @@
#ifdef CONFIG_MMU
-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
-extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
-extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
-extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
+extern unsigned long __must_check
+arm_copy_from_user(void *to, const void __user *from, unsigned long n);
+
+static inline unsigned long __must_check
+__copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+ unsigned int __ua_flags = uaccess_save_and_enable();
+ n = arm_copy_from_user(to, from, n);
+ uaccess_restore(__ua_flags);
+ return n;
+}
+
+extern unsigned long __must_check
+arm_copy_to_user(void __user *to, const void *from, unsigned long n);
+extern unsigned long __must_check
+__copy_to_user_std(void __user *to, const void *from, unsigned long n);
+
+static inline unsigned long __must_check
+__copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ unsigned int __ua_flags = uaccess_save_and_enable();
+ n = arm_copy_to_user(to, from, n);
+ uaccess_restore(__ua_flags);
+ return n;
+}
+
+extern unsigned long __must_check
+arm_clear_user(void __user *addr, unsigned long n);
+extern unsigned long __must_check
+__clear_user_std(void __user *addr, unsigned long n);
+
+static inline unsigned long __must_check
+__clear_user(void __user *addr, unsigned long n)
+{
+ unsigned int __ua_flags = uaccess_save_and_enable();
+ n = arm_clear_user(addr, n);
+ uaccess_restore(__ua_flags);
+ return n;
+}
+
#else
#define __copy_from_user(to, from, n) (memcpy(to, (void __force *)from, n), 0)
#define __copy_to_user(to, from, n) (memcpy((void __force *)to, from, n), 0)
@@ -511,6 +562,7 @@
return n;
}
+/* These are from lib/ code, and use __get_user() and friends */
extern long strncpy_from_user(char *dest, const char __user *src, long count);
extern __must_check long strlen_user(const char __user *str);
diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
index a88671c..f89811f 100644
--- a/arch/arm/kernel/armksyms.c
+++ b/arch/arm/kernel/armksyms.c
@@ -50,6 +50,9 @@
extern void fpundefinstr(void);
+void mmioset(void *, unsigned int, size_t);
+void mmiocpy(void *, const void *, size_t);
+
/* platform dependent support */
EXPORT_SYMBOL(arm_delay_ops);
@@ -88,12 +91,15 @@
EXPORT_SYMBOL(memchr);
EXPORT_SYMBOL(__memzero);
+EXPORT_SYMBOL(mmioset);
+EXPORT_SYMBOL(mmiocpy);
+
#ifdef CONFIG_MMU
EXPORT_SYMBOL(copy_page);
-EXPORT_SYMBOL(__copy_from_user);
-EXPORT_SYMBOL(__copy_to_user);
-EXPORT_SYMBOL(__clear_user);
+EXPORT_SYMBOL(arm_copy_from_user);
+EXPORT_SYMBOL(arm_copy_to_user);
+EXPORT_SYMBOL(arm_clear_user);
EXPORT_SYMBOL(__get_user_1);
EXPORT_SYMBOL(__get_user_2);
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 7dac308..3e1c26e 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -149,10 +149,10 @@
#define SPFIX(code...)
#endif
- .macro svc_entry, stack_hole=0, trace=1
+ .macro svc_entry, stack_hole=0, trace=1, uaccess=1
UNWIND(.fnstart )
UNWIND(.save {r0 - pc} )
- sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
+ sub sp, sp, #(S_FRAME_SIZE + 8 + \stack_hole - 4)
#ifdef CONFIG_THUMB2_KERNEL
SPFIX( str r0, [sp] ) @ temporarily saved
SPFIX( mov r0, sp )
@@ -167,7 +167,7 @@
ldmia r0, {r3 - r5}
add r7, sp, #S_SP - 4 @ here for interlock avoidance
mov r6, #-1 @ "" "" "" ""
- add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
+ add r2, sp, #(S_FRAME_SIZE + 8 + \stack_hole - 4)
SPFIX( addeq r2, r2, #4 )
str r3, [sp, #-4]! @ save the "real" r0 copied
@ from the exception stack
@@ -185,6 +185,11 @@
@
stmia r7, {r2 - r6}
+ uaccess_save r0
+ .if \uaccess
+ uaccess_disable r0
+ .endif
+
.if \trace
#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_off
@@ -194,7 +199,7 @@
.align 5
__dabt_svc:
- svc_entry
+ svc_entry uaccess=0
mov r2, sp
dabt_helper
THUMB( ldr r5, [sp, #S_PSR] ) @ potentially updated CPSR
@@ -368,7 +373,7 @@
#error "sizeof(struct pt_regs) must be a multiple of 8"
#endif
- .macro usr_entry, trace=1
+ .macro usr_entry, trace=1, uaccess=1
UNWIND(.fnstart )
UNWIND(.cantunwind ) @ don't unwind the user space
sub sp, sp, #S_FRAME_SIZE
@@ -400,6 +405,10 @@
ARM( stmdb r0, {sp, lr}^ )
THUMB( store_user_sp_lr r0, r1, S_SP - S_PC )
+ .if \uaccess
+ uaccess_disable ip
+ .endif
+
@ Enable the alignment trap while in kernel mode
ATRAP( teq r8, r7)
ATRAP( mcrne p15, 0, r8, c1, c0, 0)
@@ -410,7 +419,7 @@
zero_fp
.if \trace
-#ifdef CONFIG_IRQSOFF_TRACER
+#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_off
#endif
ct_user_exit save = 0
@@ -435,7 +444,7 @@
.align 5
__dabt_usr:
- usr_entry
+ usr_entry uaccess=0
kuser_cmpxchg_check
mov r2, sp
dabt_helper
@@ -458,7 +467,7 @@
.align 5
__und_usr:
- usr_entry
+ usr_entry uaccess=0
mov r2, r4
mov r3, r5
@@ -484,6 +493,8 @@
1: ldrt r0, [r4]
ARM_BE8(rev r0, r0) @ little endian instruction
+ uaccess_disable ip
+
@ r0 = 32-bit ARM instruction which caused the exception
@ r2 = PC value for the following instruction (:= regs->ARM_pc)
@ r4 = PC value for the faulting instruction
@@ -518,9 +529,10 @@
2: ldrht r5, [r4]
ARM_BE8(rev16 r5, r5) @ little endian instruction
cmp r5, #0xe800 @ 32bit instruction if xx != 0
- blo __und_usr_fault_16 @ 16bit undefined instruction
+ blo __und_usr_fault_16_pan @ 16bit undefined instruction
3: ldrht r0, [r2]
ARM_BE8(rev16 r0, r0) @ little endian instruction
+ uaccess_disable ip
add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
orr r0, r0, r5, lsl #16
@@ -715,6 +727,8 @@
__und_usr_fault_32:
mov r1, #4
b 1f
+__und_usr_fault_16_pan:
+ uaccess_disable ip
__und_usr_fault_16:
mov r1, #2
1: mov r0, sp
@@ -770,6 +784,8 @@
ldr r4, [r2, #TI_TP_VALUE]
ldr r5, [r2, #TI_TP_VALUE + 4]
#ifdef CONFIG_CPU_USE_DOMAINS
+ mrc p15, 0, r6, c3, c0, 0 @ Get domain register
+ str r6, [r1, #TI_CPU_DOMAIN] @ Save old domain register
ldr r6, [r2, #TI_CPU_DOMAIN]
#endif
switch_tls r1, r4, r5, r3, r7
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 92828a1..30a7228 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -24,35 +24,55 @@
.align 5
+#if !(IS_ENABLED(CONFIG_TRACE_IRQFLAGS) || IS_ENABLED(CONFIG_CONTEXT_TRACKING))
/*
- * This is the fast syscall return path. We do as little as
- * possible here, and this includes saving r0 back into the SVC
- * stack.
+ * This is the fast syscall return path. We do as little as possible here,
+ * such as avoiding writing r0 to the stack. We only use this path if we
+ * have tracing and context tracking disabled - the overheads from those
+ * features make this path too inefficient.
*/
ret_fast_syscall:
UNWIND(.fnstart )
UNWIND(.cantunwind )
- disable_irq @ disable interrupts
+ disable_irq_notrace @ disable interrupts
ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
- tst r1, #_TIF_SYSCALL_WORK
- bne __sys_trace_return
- tst r1, #_TIF_WORK_MASK
+ tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
bne fast_work_pending
- asm_trace_hardirqs_on
/* perform architecture specific actions before user return */
arch_ret_to_user r1, lr
- ct_user_enter
restore_user_regs fast = 1, offset = S_OFF
UNWIND(.fnend )
+ENDPROC(ret_fast_syscall)
-/*
- * Ok, we need to do extra processing, enter the slow path.
- */
+ /* Ok, we need to do extra processing, enter the slow path. */
fast_work_pending:
str r0, [sp, #S_R0+S_OFF]! @ returned r0
-work_pending:
+ /* fall through to work_pending */
+#else
+/*
+ * The "replacement" ret_fast_syscall for when tracing or context tracking
+ * is enabled. As we will need to call out to some C functions, we save
+ * r0 first to avoid needing to save registers around each C function call.
+ */
+ret_fast_syscall:
+ UNWIND(.fnstart )
+ UNWIND(.cantunwind )
+ str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
+ disable_irq_notrace @ disable interrupts
+ ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
+ tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
+ beq no_work_pending
+ UNWIND(.fnend )
+ENDPROC(ret_fast_syscall)
+
+ /* Slower path - fall through to work_pending */
+#endif
+
+ tst r1, #_TIF_SYSCALL_WORK
+ bne __sys_trace_return_nosave
+slow_work_pending:
mov r0, sp @ 'regs'
mov r2, why @ 'syscall'
bl do_work_pending
@@ -61,19 +81,23 @@
movlt scno, #(__NR_restart_syscall - __NR_SYSCALL_BASE)
ldmia sp, {r0 - r6} @ have to reload r0 - r6
b local_restart @ ... and off we go
+ENDPROC(ret_fast_syscall)
/*
* "slow" syscall return path. "why" tells us if this was a real syscall.
+ * IRQs may be enabled here, so always disable them. Note that we use the
+ * "notrace" version to avoid calling into the tracing code unnecessarily.
+ * do_work_pending() will update this state if necessary.
*/
ENTRY(ret_to_user)
ret_slow_syscall:
- disable_irq @ disable interrupts
+ disable_irq_notrace @ disable interrupts
ENTRY(ret_to_user_from_irq)
ldr r1, [tsk, #TI_FLAGS]
tst r1, #_TIF_WORK_MASK
- bne work_pending
+ bne slow_work_pending
no_work_pending:
- asm_trace_hardirqs_on
+ asm_trace_hardirqs_on save = 0
/* perform architecture specific actions before user return */
arch_ret_to_user r1, lr
@@ -173,6 +197,8 @@
USER( ldr scno, [lr, #-4] ) @ get SWI instruction
#endif
+ uaccess_disable tbl
+
adr tbl, sys_call_table @ load syscall table pointer
#if defined(CONFIG_OABI_COMPAT)
@@ -251,6 +277,12 @@
bl syscall_trace_exit
b ret_slow_syscall
+__sys_trace_return_nosave:
+ enable_irq_notrace
+ mov r0, sp
+ bl syscall_trace_exit
+ b ret_slow_syscall
+
.align 5
#ifdef CONFIG_ALIGNMENT_TRAP
.type __cr_alignment, #object
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
index 1a0045a..0d22ad2 100644
--- a/arch/arm/kernel/entry-header.S
+++ b/arch/arm/kernel/entry-header.S
@@ -196,7 +196,7 @@
msr cpsr_c, \rtemp @ switch back to the SVC mode
.endm
-#ifndef CONFIG_THUMB2_KERNEL
+
.macro svc_exit, rpsr, irq = 0
.if \irq != 0
@ IRQs already off
@@ -215,6 +215,10 @@
blne trace_hardirqs_off
#endif
.endif
+ uaccess_restore
+
+#ifndef CONFIG_THUMB2_KERNEL
+ @ ARM mode SVC restore
msr spsr_cxsf, \rpsr
#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
@ We must avoid clrex due to Cortex-A15 erratum #830321
@@ -222,6 +226,20 @@
strex r1, r2, [r0] @ clear the exclusive monitor
#endif
ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
+#else
+ @ Thumb mode SVC restore
+ ldr lr, [sp, #S_SP] @ top of the stack
+ ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
+
+ @ We must avoid clrex due to Cortex-A15 erratum #830321
+ strex r2, r1, [sp, #S_LR] @ clear the exclusive monitor
+
+ stmdb lr!, {r0, r1, \rpsr} @ calling lr and rfe context
+ ldmia sp, {r0 - r12}
+ mov sp, lr
+ ldr lr, [sp], #4
+ rfeia sp!
+#endif
.endm
@
@@ -241,6 +259,9 @@
@ on the stack remains correct).
@
.macro svc_exit_via_fiq
+ uaccess_restore
+#ifndef CONFIG_THUMB2_KERNEL
+ @ ARM mode restore
mov r0, sp
ldmib r0, {r1 - r14} @ abort is deadly from here onward (it will
@ clobber state restored below)
@@ -250,9 +271,27 @@
msr spsr_cxsf, r9
ldr r0, [r0, #S_R0]
ldmia r8, {pc}^
+#else
+ @ Thumb mode restore
+ add r0, sp, #S_R2
+ ldr lr, [sp, #S_LR]
+ ldr sp, [sp, #S_SP] @ abort is deadly from here onward (it will
+ @ clobber state restored below)
+ ldmia r0, {r2 - r12}
+ mov r1, #FIQ_MODE | PSR_I_BIT | PSR_F_BIT
+ msr cpsr_c, r1
+ sub r0, #S_R2
+ add r8, r0, #S_PC
+ ldmia r0, {r0 - r1}
+ rfeia r8
+#endif
.endm
+
.macro restore_user_regs, fast = 0, offset = 0
+ uaccess_enable r1, isb=0
+#ifndef CONFIG_THUMB2_KERNEL
+ @ ARM mode restore
mov r2, sp
ldr r1, [r2, #\offset + S_PSR] @ get calling cpsr
ldr lr, [r2, #\offset + S_PC]! @ get pc
@@ -270,72 +309,16 @@
@ after ldm {}^
add sp, sp, #\offset + S_FRAME_SIZE
movs pc, lr @ return & move spsr_svc into cpsr
- .endm
-
-#else /* CONFIG_THUMB2_KERNEL */
- .macro svc_exit, rpsr, irq = 0
- .if \irq != 0
- @ IRQs already off
-#ifdef CONFIG_TRACE_IRQFLAGS
- @ The parent context IRQs must have been enabled to get here in
- @ the first place, so there's no point checking the PSR I bit.
- bl trace_hardirqs_on
-#endif
- .else
- @ IRQs off again before pulling preserved data off the stack
- disable_irq_notrace
-#ifdef CONFIG_TRACE_IRQFLAGS
- tst \rpsr, #PSR_I_BIT
- bleq trace_hardirqs_on
- tst \rpsr, #PSR_I_BIT
- blne trace_hardirqs_off
-#endif
- .endif
- ldr lr, [sp, #S_SP] @ top of the stack
- ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
-
- @ We must avoid clrex due to Cortex-A15 erratum #830321
- strex r2, r1, [sp, #S_LR] @ clear the exclusive monitor
-
- stmdb lr!, {r0, r1, \rpsr} @ calling lr and rfe context
- ldmia sp, {r0 - r12}
- mov sp, lr
- ldr lr, [sp], #4
- rfeia sp!
- .endm
-
- @
- @ svc_exit_via_fiq - like svc_exit but switches to FIQ mode before exit
- @
- @ For full details see non-Thumb implementation above.
- @
- .macro svc_exit_via_fiq
- add r0, sp, #S_R2
- ldr lr, [sp, #S_LR]
- ldr sp, [sp, #S_SP] @ abort is deadly from here onward (it will
- @ clobber state restored below)
- ldmia r0, {r2 - r12}
- mov r1, #FIQ_MODE | PSR_I_BIT | PSR_F_BIT
- msr cpsr_c, r1
- sub r0, #S_R2
- add r8, r0, #S_PC
- ldmia r0, {r0 - r1}
- rfeia r8
- .endm
-
-#ifdef CONFIG_CPU_V7M
- /*
- * Note we don't need to do clrex here as clearing the local monitor is
- * part of each exception entry and exit sequence.
- */
- .macro restore_user_regs, fast = 0, offset = 0
+#elif defined(CONFIG_CPU_V7M)
+ @ V7M restore.
+ @ Note that we don't need to do clrex here as clearing the local
+ @ monitor is part of the exception entry and exit sequence.
.if \offset
add sp, #\offset
.endif
v7m_exception_slow_exit ret_r0 = \fast
- .endm
-#else /* ifdef CONFIG_CPU_V7M */
- .macro restore_user_regs, fast = 0, offset = 0
+#else
+ @ Thumb mode restore
mov r2, sp
load_user_sp_lr r2, r3, \offset + S_SP @ calling sp, lr
ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr
@@ -353,9 +336,8 @@
.endif
add sp, sp, #S_FRAME_SIZE - S_SP
movs pc, lr @ return & move spsr_svc into cpsr
- .endm
-#endif /* ifdef CONFIG_CPU_V7M / else */
#endif /* !CONFIG_THUMB2_KERNEL */
+ .endm
/*
* Context tracking subsystem. Used to instrument transitions
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index bd755d9..04286fd 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -399,6 +399,9 @@
sub lr, r4, r5 @ mmu has been enabled
add r3, r7, lr
ldrd r4, [r3, #0] @ get secondary_data.pgdir
+ARM_BE8(eor r4, r4, r5) @ Swap r5 and r4 in BE:
+ARM_BE8(eor r5, r4, r5) @ it can be done in 3 steps
+ARM_BE8(eor r4, r4, r5) @ without using a temp reg.
ldr r8, [r3, #8] @ get secondary_data.swapper_pg_dir
badr lr, __enable_mmu @ return address
mov r13, r12 @ __secondary_switched address
@@ -461,10 +464,7 @@
#ifdef CONFIG_ARM_LPAE
mcrr p15, 0, r4, r5, c2 @ load TTBR0
#else
- mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
- domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
- domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
- domain_val(DOMAIN_IO, DOMAIN_CLIENT))
+ mov r5, #DACR_INIT
mcr p15, 0, r5, c3, c0, 0 @ load domain access register
mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
#endif
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 357f57e..7d5379c 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -795,8 +795,10 @@
/* Don't bother with PPIs; they're already affine */
irq = platform_get_irq(pdev, 0);
- if (irq >= 0 && irq_is_percpu(irq))
+ if (irq >= 0 && irq_is_percpu(irq)) {
+ cpumask_setall(&pmu->supported_cpus);
return 0;
+ }
irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL);
if (!irqs)
@@ -818,12 +820,13 @@
if (arch_find_n_match_cpu_physical_id(dn, cpu, NULL))
break;
- of_node_put(dn);
if (cpu >= nr_cpu_ids) {
pr_warn("Failed to find logical CPU for %s\n",
dn->name);
+ of_node_put(dn);
break;
}
+ of_node_put(dn);
irqs[i] = cpu;
cpumask_set_cpu(cpu, &pmu->supported_cpus);
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index f192a2a..a3089ba 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -91,13 +91,6 @@
ledtrig_cpu(CPU_LED_IDLE_END);
}
-#ifdef CONFIG_HOTPLUG_CPU
-void arch_cpu_idle_dead(void)
-{
- cpu_die();
-}
-#endif
-
void __show_regs(struct pt_regs *regs)
{
unsigned long flags;
@@ -129,12 +122,36 @@
buf[4] = '\0';
#ifndef CONFIG_CPU_V7M
- printk("Flags: %s IRQs o%s FIQs o%s Mode %s ISA %s Segment %s\n",
- buf, interrupts_enabled(regs) ? "n" : "ff",
- fast_interrupts_enabled(regs) ? "n" : "ff",
- processor_modes[processor_mode(regs)],
- isa_modes[isa_mode(regs)],
- get_fs() == get_ds() ? "kernel" : "user");
+ {
+ unsigned int domain = get_domain();
+ const char *segment;
+
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+ /*
+ * Get the domain register for the parent context. In user
+ * mode, we don't save the DACR, so lets use what it should
+ * be. For other modes, we place it after the pt_regs struct.
+ */
+ if (user_mode(regs))
+ domain = DACR_UACCESS_ENABLE;
+ else
+ domain = *(unsigned int *)(regs + 1);
+#endif
+
+ if ((domain & domain_mask(DOMAIN_USER)) ==
+ domain_val(DOMAIN_USER, DOMAIN_NOACCESS))
+ segment = "none";
+ else if (get_fs() == get_ds())
+ segment = "kernel";
+ else
+ segment = "user";
+
+ printk("Flags: %s IRQs o%s FIQs o%s Mode %s ISA %s Segment %s\n",
+ buf, interrupts_enabled(regs) ? "n" : "ff",
+ fast_interrupts_enabled(regs) ? "n" : "ff",
+ processor_modes[processor_mode(regs)],
+ isa_modes[isa_mode(regs)], segment);
+ }
#else
printk("xPSR: %08lx\n", regs->ARM_cpsr);
#endif
@@ -146,10 +163,9 @@
buf[0] = '\0';
#ifdef CONFIG_CPU_CP15_MMU
{
- unsigned int transbase, dac;
+ unsigned int transbase, dac = get_domain();
asm("mrc p15, 0, %0, c2, c0\n\t"
- "mrc p15, 0, %1, c3, c0\n"
- : "=r" (transbase), "=r" (dac));
+ : "=r" (transbase));
snprintf(buf, sizeof(buf), " Table: %08x DAC: %08x",
transbase, dac);
}
@@ -210,6 +226,14 @@
memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save));
+ /*
+ * Copy the initial value of the domain access control register
+ * from the current thread: thread->addr_limit will have been
+ * copied from the current thread via setup_thread_stack() in
+ * kernel/fork.c
+ */
+ thread->cpu_domain = get_domain();
+
if (likely(!(p->flags & PF_KTHREAD))) {
*childregs = *current_pt_regs();
childregs->ARM_r0 = 0;
diff --git a/arch/arm/kernel/reboot.c b/arch/arm/kernel/reboot.c
index 1a4d232..3826935 100644
--- a/arch/arm/kernel/reboot.c
+++ b/arch/arm/kernel/reboot.c
@@ -50,7 +50,7 @@
flush_cache_all();
/* Switch to the identity mapping. */
- phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset);
+ phys_reset = (phys_reset_t)(unsigned long)virt_to_idmap(cpu_reset);
phys_reset((unsigned long)addr);
/* Should never get here. */
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 36c18b7..e2ecee6 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -37,6 +37,7 @@
#include <asm/cpu.h>
#include <asm/cputype.h>
#include <asm/elf.h>
+#include <asm/fixmap.h>
#include <asm/procinfo.h>
#include <asm/psci.h>
#include <asm/sections.h>
@@ -954,6 +955,9 @@
strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
*cmdline_p = cmd_line;
+ if (IS_ENABLED(CONFIG_FIX_EARLYCON_MEM))
+ early_fixmap_init();
+
parse_early_param();
#ifdef CONFIG_MMU
@@ -1015,7 +1019,7 @@
for_each_possible_cpu(cpu) {
struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
- cpuinfo->cpu.hotpluggable = 1;
+ cpuinfo->cpu.hotpluggable = platform_can_hotplug_cpu(cpu);
register_cpu(&cpuinfo->cpu, cpu);
}
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index 423663e..b6cda06 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -562,6 +562,12 @@
asmlinkage int
do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
{
+ /*
+ * The assembly code enters us with IRQs off, but it hasn't
+ * informed the tracing code of that for efficiency reasons.
+ * Update the trace code with the current status.
+ */
+ trace_hardirqs_off();
do {
if (likely(thread_flags & _TIF_NEED_RESCHED)) {
schedule();
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 90dfbed..ba0063c 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -175,13 +175,26 @@
if (smp_ops.cpu_disable)
return smp_ops.cpu_disable(cpu);
+ return 0;
+}
+
+int platform_can_hotplug_cpu(unsigned int cpu)
+{
+ /* cpu_die must be specified to support hotplug */
+ if (!smp_ops.cpu_die)
+ return 0;
+
+ if (smp_ops.cpu_can_disable)
+ return smp_ops.cpu_can_disable(cpu);
+
/*
* By default, allow disabling all CPUs except the first one,
* since this is special on a lot of platforms, e.g. because
* of clock tick interrupts.
*/
- return cpu == 0 ? -EPERM : 0;
+ return cpu != 0;
}
+
/*
* __cpu_disable runs on the processor to be shutdown.
*/
@@ -253,7 +266,7 @@
* of the other hotplug-cpu capable cores, so presumably coming
* out of idle fixes this.
*/
-void __ref cpu_die(void)
+void arch_cpu_idle_dead(void)
{
unsigned int cpu = smp_processor_id();
@@ -578,7 +591,7 @@
struct pt_regs *old_regs = set_irq_regs(regs);
if ((unsigned)ipinr < NR_IPI) {
- trace_ipi_entry(ipi_types[ipinr]);
+ trace_ipi_entry_rcuidle(ipi_types[ipinr]);
__inc_irq_stat(cpu, ipi_irqs[ipinr]);
}
@@ -637,7 +650,7 @@
}
if ((unsigned)ipinr < NR_IPI)
- trace_ipi_exit(ipi_types[ipinr]);
+ trace_ipi_exit_rcuidle(ipi_types[ipinr]);
set_irq_regs(old_regs);
}
diff --git a/arch/arm/kernel/swp_emulate.c b/arch/arm/kernel/swp_emulate.c
index 1361756..5b26e7e 100644
--- a/arch/arm/kernel/swp_emulate.c
+++ b/arch/arm/kernel/swp_emulate.c
@@ -141,11 +141,14 @@
while (1) {
unsigned long temp;
+ unsigned int __ua_flags;
+ __ua_flags = uaccess_save_and_enable();
if (type == TYPE_SWPB)
__user_swpb_asm(*data, address, res, temp);
else
__user_swp_asm(*data, address, res, temp);
+ uaccess_restore(__ua_flags);
if (likely(res != -EAGAIN) || signal_pending(current))
break;
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index d358226..969f9d9 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -870,7 +870,6 @@
kuser_init(vectors_base);
flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
- modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
#else /* ifndef CONFIG_CPU_V7M */
/*
* on V7-M there is no need to copy the vector table to a dedicated
diff --git a/arch/arm/kernel/vdso.c b/arch/arm/kernel/vdso.c
index efe17dd..54a5aea 100644
--- a/arch/arm/kernel/vdso.c
+++ b/arch/arm/kernel/vdso.c
@@ -296,7 +296,6 @@
*/
void update_vsyscall(struct timekeeper *tk)
{
- struct timespec xtime_coarse;
struct timespec64 *wtm = &tk->wall_to_monotonic;
if (!cntvct_ok) {
@@ -308,10 +307,10 @@
vdso_write_begin(vdso_data);
- xtime_coarse = __current_kernel_time();
vdso_data->tk_is_cntvct = tk_is_cntvct(tk);
- vdso_data->xtime_coarse_sec = xtime_coarse.tv_sec;
- vdso_data->xtime_coarse_nsec = xtime_coarse.tv_nsec;
+ vdso_data->xtime_coarse_sec = tk->xtime_sec;
+ vdso_data->xtime_coarse_nsec = (u32)(tk->tkr_mono.xtime_nsec >>
+ tk->tkr_mono.shift);
vdso_data->wtm_clock_sec = wtm->tv_sec;
vdso_data->wtm_clock_nsec = wtm->tv_nsec;
diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
index 1710fd7..970d6c0 100644
--- a/arch/arm/lib/clear_user.S
+++ b/arch/arm/lib/clear_user.S
@@ -12,14 +12,14 @@
.text
-/* Prototype: int __clear_user(void *addr, size_t sz)
+/* Prototype: unsigned long arm_clear_user(void *addr, size_t sz)
* Purpose : clear some user memory
* Params : addr - user memory address to clear
* : sz - number of bytes to clear
* Returns : number of bytes NOT cleared
*/
ENTRY(__clear_user_std)
-WEAK(__clear_user)
+WEAK(arm_clear_user)
stmfd sp!, {r1, lr}
mov r2, #0
cmp r1, #4
@@ -44,7 +44,7 @@
USER( strnebt r2, [r0])
mov r0, #0
ldmfd sp!, {r1, pc}
-ENDPROC(__clear_user)
+ENDPROC(arm_clear_user)
ENDPROC(__clear_user_std)
.pushsection .text.fixup,"ax"
diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
index 7a235b9..1512beb 100644
--- a/arch/arm/lib/copy_from_user.S
+++ b/arch/arm/lib/copy_from_user.S
@@ -17,7 +17,7 @@
/*
* Prototype:
*
- * size_t __copy_from_user(void *to, const void *from, size_t n)
+ * size_t arm_copy_from_user(void *to, const void *from, size_t n)
*
* Purpose:
*
@@ -89,11 +89,11 @@
.text
-ENTRY(__copy_from_user)
+ENTRY(arm_copy_from_user)
#include "copy_template.S"
-ENDPROC(__copy_from_user)
+ENDPROC(arm_copy_from_user)
.pushsection .fixup,"ax"
.align 0
diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
index 9648b06..caf5019 100644
--- a/arch/arm/lib/copy_to_user.S
+++ b/arch/arm/lib/copy_to_user.S
@@ -17,7 +17,7 @@
/*
* Prototype:
*
- * size_t __copy_to_user(void *to, const void *from, size_t n)
+ * size_t arm_copy_to_user(void *to, const void *from, size_t n)
*
* Purpose:
*
@@ -93,11 +93,11 @@
.text
ENTRY(__copy_to_user_std)
-WEAK(__copy_to_user)
+WEAK(arm_copy_to_user)
#include "copy_template.S"
-ENDPROC(__copy_to_user)
+ENDPROC(arm_copy_to_user)
ENDPROC(__copy_to_user_std)
.pushsection .text.fixup,"ax"
diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
index 1d0957e..1712f13 100644
--- a/arch/arm/lib/csumpartialcopyuser.S
+++ b/arch/arm/lib/csumpartialcopyuser.S
@@ -17,6 +17,19 @@
.text
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+ .macro save_regs
+ mrc p15, 0, ip, c3, c0, 0
+ stmfd sp!, {r1, r2, r4 - r8, ip, lr}
+ uaccess_enable ip
+ .endm
+
+ .macro load_regs
+ ldmfd sp!, {r1, r2, r4 - r8, ip, lr}
+ mcr p15, 0, ip, c3, c0, 0
+ ret lr
+ .endm
+#else
.macro save_regs
stmfd sp!, {r1, r2, r4 - r8, lr}
.endm
@@ -24,6 +37,7 @@
.macro load_regs
ldmfd sp!, {r1, r2, r4 - r8, pc}
.endm
+#endif
.macro load1b, reg1
ldrusr \reg1, r0, 1
diff --git a/arch/arm/lib/memcpy.S b/arch/arm/lib/memcpy.S
index 7797e81..64111bd 100644
--- a/arch/arm/lib/memcpy.S
+++ b/arch/arm/lib/memcpy.S
@@ -61,8 +61,10 @@
/* Prototype: void *memcpy(void *dest, const void *src, size_t n); */
+ENTRY(mmiocpy)
ENTRY(memcpy)
#include "copy_template.S"
ENDPROC(memcpy)
+ENDPROC(mmiocpy)
diff --git a/arch/arm/lib/memset.S b/arch/arm/lib/memset.S
index a4ee97b..3c65e3b 100644
--- a/arch/arm/lib/memset.S
+++ b/arch/arm/lib/memset.S
@@ -16,6 +16,7 @@
.text
.align 5
+ENTRY(mmioset)
ENTRY(memset)
UNWIND( .fnstart )
ands r3, r0, #3 @ 1 unaligned?
@@ -133,3 +134,4 @@
b 1b
UNWIND( .fnend )
ENDPROC(memset)
+ENDPROC(mmioset)
diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
index 3e58d71..d72b909 100644
--- a/arch/arm/lib/uaccess_with_memcpy.c
+++ b/arch/arm/lib/uaccess_with_memcpy.c
@@ -96,7 +96,7 @@
}
/* the mmap semaphore is taken only if not in an atomic context */
- atomic = in_atomic();
+ atomic = faulthandler_disabled();
if (!atomic)
down_read(¤t->mm->mmap_sem);
@@ -136,7 +136,7 @@
}
unsigned long
-__copy_to_user(void __user *to, const void *from, unsigned long n)
+arm_copy_to_user(void __user *to, const void *from, unsigned long n)
{
/*
* This test is stubbed out of the main function above to keep
@@ -190,7 +190,7 @@
return n;
}
-unsigned long __clear_user(void __user *addr, unsigned long n)
+unsigned long arm_clear_user(void __user *addr, unsigned long n)
{
/* See rational for this in __copy_to_user() above. */
if (n < 64)
diff --git a/arch/arm/mach-omap2/dma.c b/arch/arm/mach-omap2/dma.c
index e1a56d8..1ed4be1 100644
--- a/arch/arm/mach-omap2/dma.c
+++ b/arch/arm/mach-omap2/dma.c
@@ -117,7 +117,6 @@
u8 revision = dma_read(REVISION, 0) & 0xff;
printk(KERN_INFO "OMAP DMA hardware revision %d.%d\n",
revision >> 4, revision & 0xf);
- return;
}
static unsigned configure_dma_errata(void)
diff --git a/arch/arm/mach-prima2/Kconfig b/arch/arm/mach-prima2/Kconfig
index e03d8b5..9ab8932 100644
--- a/arch/arm/mach-prima2/Kconfig
+++ b/arch/arm/mach-prima2/Kconfig
@@ -4,6 +4,7 @@
select ARCH_REQUIRE_GPIOLIB
select GENERIC_IRQ_CHIP
select NO_IOPORT_MAP
+ select REGMAP
select PINCTRL
select PINCTRL_SIRF
help
diff --git a/arch/arm/mach-prima2/rtciobrg.c b/arch/arm/mach-prima2/rtciobrg.c
index 8f66d8f..d4852d2 100644
--- a/arch/arm/mach-prima2/rtciobrg.c
+++ b/arch/arm/mach-prima2/rtciobrg.c
@@ -1,5 +1,5 @@
/*
- * RTC I/O Bridge interfaces for CSR SiRFprimaII
+ * RTC I/O Bridge interfaces for CSR SiRFprimaII/atlas7
* ARM access the registers of SYSRTC, GPSRTC and PWRC through this module
*
* Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
@@ -10,6 +10,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/io.h>
+#include <linux/regmap.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
@@ -66,6 +67,7 @@
{
unsigned long flags, val;
+ /* TODO: add hwspinlock to sync with M3 */
spin_lock_irqsave(&rtciobrg_lock, flags);
val = __sirfsoc_rtc_iobrg_readl(addr);
@@ -90,6 +92,7 @@
{
unsigned long flags;
+ /* TODO: add hwspinlock to sync with M3 */
spin_lock_irqsave(&rtciobrg_lock, flags);
sirfsoc_rtc_iobrg_pre_writel(val, addr);
@@ -102,6 +105,45 @@
}
EXPORT_SYMBOL_GPL(sirfsoc_rtc_iobrg_writel);
+
+static int regmap_iobg_regwrite(void *context, unsigned int reg,
+ unsigned int val)
+{
+ sirfsoc_rtc_iobrg_writel(val, reg);
+ return 0;
+}
+
+static int regmap_iobg_regread(void *context, unsigned int reg,
+ unsigned int *val)
+{
+ *val = (u32)sirfsoc_rtc_iobrg_readl(reg);
+ return 0;
+}
+
+static struct regmap_bus regmap_iobg = {
+ .reg_write = regmap_iobg_regwrite,
+ .reg_read = regmap_iobg_regread,
+};
+
+/**
+ * devm_regmap_init_iobg(): Initialise managed register map
+ *
+ * @iobg: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The regmap will be automatically freed by the
+ * device management code.
+ */
+struct regmap *devm_regmap_init_iobg(struct device *dev,
+ const struct regmap_config *config)
+{
+ const struct regmap_bus *bus = ®map_iobg;
+
+ return devm_regmap_init(dev, bus, dev, config);
+}
+EXPORT_SYMBOL_GPL(devm_regmap_init_iobg);
+
static const struct of_device_id rtciobrg_ids[] = {
{ .compatible = "sirf,prima2-rtciobg" },
{}
@@ -132,7 +174,7 @@
}
postcore_initcall(sirfsoc_rtciobrg_init);
-MODULE_AUTHOR("Zhiwu Song <zhiwu.song@csr.com>, "
- "Barry Song <baohua.song@csr.com>");
+MODULE_AUTHOR("Zhiwu Song <zhiwu.song@csr.com>");
+MODULE_AUTHOR("Barry Song <baohua.song@csr.com>");
MODULE_DESCRIPTION("CSR SiRFprimaII rtc io bridge");
MODULE_LICENSE("GPL v2");
diff --git a/arch/arm/mach-shmobile/common.h b/arch/arm/mach-shmobile/common.h
index 476092b..8d27ec5 100644
--- a/arch/arm/mach-shmobile/common.h
+++ b/arch/arm/mach-shmobile/common.h
@@ -13,7 +13,7 @@
extern void shmobile_smp_sleep(void);
extern void shmobile_smp_hook(unsigned int cpu, unsigned long fn,
unsigned long arg);
-extern int shmobile_smp_cpu_disable(unsigned int cpu);
+extern bool shmobile_smp_cpu_can_disable(unsigned int cpu);
extern void shmobile_boot_scu(void);
extern void shmobile_smp_scu_prepare_cpus(unsigned int max_cpus);
extern void shmobile_smp_scu_cpu_die(unsigned int cpu);
diff --git a/arch/arm/mach-shmobile/platsmp.c b/arch/arm/mach-shmobile/platsmp.c
index 3923e09..b23378f 100644
--- a/arch/arm/mach-shmobile/platsmp.c
+++ b/arch/arm/mach-shmobile/platsmp.c
@@ -31,8 +31,8 @@
}
#ifdef CONFIG_HOTPLUG_CPU
-int shmobile_smp_cpu_disable(unsigned int cpu)
+bool shmobile_smp_cpu_can_disable(unsigned int cpu)
{
- return 0; /* Hotplug of any CPU is supported */
+ return true; /* Hotplug of any CPU is supported */
}
#endif
diff --git a/arch/arm/mach-shmobile/smp-r8a7790.c b/arch/arm/mach-shmobile/smp-r8a7790.c
index 930f45c..947e437 100644
--- a/arch/arm/mach-shmobile/smp-r8a7790.c
+++ b/arch/arm/mach-shmobile/smp-r8a7790.c
@@ -64,7 +64,7 @@
.smp_prepare_cpus = r8a7790_smp_prepare_cpus,
.smp_boot_secondary = shmobile_smp_apmu_boot_secondary,
#ifdef CONFIG_HOTPLUG_CPU
- .cpu_disable = shmobile_smp_cpu_disable,
+ .cpu_can_disable = shmobile_smp_cpu_can_disable,
.cpu_die = shmobile_smp_apmu_cpu_die,
.cpu_kill = shmobile_smp_apmu_cpu_kill,
#endif
diff --git a/arch/arm/mach-shmobile/smp-r8a7791.c b/arch/arm/mach-shmobile/smp-r8a7791.c
index 5e2d1db7..b2508c0 100644
--- a/arch/arm/mach-shmobile/smp-r8a7791.c
+++ b/arch/arm/mach-shmobile/smp-r8a7791.c
@@ -58,7 +58,7 @@
.smp_prepare_cpus = r8a7791_smp_prepare_cpus,
.smp_boot_secondary = r8a7791_smp_boot_secondary,
#ifdef CONFIG_HOTPLUG_CPU
- .cpu_disable = shmobile_smp_cpu_disable,
+ .cpu_can_disable = shmobile_smp_cpu_can_disable,
.cpu_die = shmobile_smp_apmu_cpu_die,
.cpu_kill = shmobile_smp_apmu_cpu_kill,
#endif
diff --git a/arch/arm/mach-shmobile/smp-sh73a0.c b/arch/arm/mach-shmobile/smp-sh73a0.c
index 2106d6b..ae7c764 100644
--- a/arch/arm/mach-shmobile/smp-sh73a0.c
+++ b/arch/arm/mach-shmobile/smp-sh73a0.c
@@ -68,7 +68,7 @@
.smp_prepare_cpus = sh73a0_smp_prepare_cpus,
.smp_boot_secondary = sh73a0_boot_secondary,
#ifdef CONFIG_HOTPLUG_CPU
- .cpu_disable = shmobile_smp_cpu_disable,
+ .cpu_can_disable = shmobile_smp_cpu_can_disable,
.cpu_die = shmobile_smp_scu_cpu_die,
.cpu_kill = shmobile_smp_scu_cpu_kill,
#endif
diff --git a/arch/arm/mach-sunxi/Kconfig b/arch/arm/mach-sunxi/Kconfig
index 81502b9..4efe2d4 100644
--- a/arch/arm/mach-sunxi/Kconfig
+++ b/arch/arm/mach-sunxi/Kconfig
@@ -35,7 +35,7 @@
select SUN5I_HSTIMER
config MACH_SUN8I
- bool "Allwinner A23 (sun8i) SoCs support"
+ bool "Allwinner sun8i Family SoCs support"
default ARCH_SUNXI
select ARM_GIC
select MFD_SUN6I_PRCM
diff --git a/arch/arm/mach-sunxi/sunxi.c b/arch/arm/mach-sunxi/sunxi.c
index 1bc811a..65bab28 100644
--- a/arch/arm/mach-sunxi/sunxi.c
+++ b/arch/arm/mach-sunxi/sunxi.c
@@ -67,10 +67,13 @@
static const char * const sun8i_board_dt_compat[] = {
"allwinner,sun8i-a23",
+ "allwinner,sun8i-a33",
+ "allwinner,sun8i-h3",
NULL,
};
-DT_MACHINE_START(SUN8I_DT, "Allwinner sun8i (A23) Family")
+DT_MACHINE_START(SUN8I_DT, "Allwinner sun8i Family")
+ .init_time = sun6i_timer_init,
.dt_compat = sun8i_board_dt_compat,
.init_late = sunxi_dt_cpufreq_init,
MACHINE_END
diff --git a/arch/arm/mm/abort-ev4.S b/arch/arm/mm/abort-ev4.S
index 54473cd4..b3b31e3 100644
--- a/arch/arm/mm/abort-ev4.S
+++ b/arch/arm/mm/abort-ev4.S
@@ -19,6 +19,7 @@
mrc p15, 0, r1, c5, c0, 0 @ get FSR
mrc p15, 0, r0, c6, c0, 0 @ get FAR
ldr r3, [r4] @ read aborted ARM instruction
+ uaccess_disable ip @ disable userspace access
bic r1, r1, #1 << 11 | 1 << 10 @ clear bits 11 and 10 of FSR
tst r3, #1 << 20 @ L = 1 -> write?
orreq r1, r1, #1 << 11 @ yes.
diff --git a/arch/arm/mm/abort-ev5t.S b/arch/arm/mm/abort-ev5t.S
index a0908d4..a6a381a 100644
--- a/arch/arm/mm/abort-ev5t.S
+++ b/arch/arm/mm/abort-ev5t.S
@@ -21,8 +21,10 @@
mrc p15, 0, r0, c6, c0, 0 @ get FAR
do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3
ldreq r3, [r4] @ read aborted ARM instruction
+ uaccess_disable ip @ disable user access
bic r1, r1, #1 << 11 @ clear bits 11 of FSR
- do_ldrd_abort tmp=ip, insn=r3
+ teq_ldrd tmp=ip, insn=r3 @ insn was LDRD?
+ beq do_DataAbort @ yes
tst r3, #1 << 20 @ check write
orreq r1, r1, #1 << 11
b do_DataAbort
diff --git a/arch/arm/mm/abort-ev5tj.S b/arch/arm/mm/abort-ev5tj.S
index 4006b7a..00ab011 100644
--- a/arch/arm/mm/abort-ev5tj.S
+++ b/arch/arm/mm/abort-ev5tj.S
@@ -24,7 +24,9 @@
bne do_DataAbort
do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3
ldreq r3, [r4] @ read aborted ARM instruction
- do_ldrd_abort tmp=ip, insn=r3
+ uaccess_disable ip @ disable userspace access
+ teq_ldrd tmp=ip, insn=r3 @ insn was LDRD?
+ beq do_DataAbort @ yes
tst r3, #1 << 20 @ L = 0 -> write
orreq r1, r1, #1 << 11 @ yes.
b do_DataAbort
diff --git a/arch/arm/mm/abort-ev6.S b/arch/arm/mm/abort-ev6.S
index 8c48c5c..8801a15 100644
--- a/arch/arm/mm/abort-ev6.S
+++ b/arch/arm/mm/abort-ev6.S
@@ -26,16 +26,18 @@
ldr ip, =0x4107b36
mrc p15, 0, r3, c0, c0, 0 @ get processor id
teq ip, r3, lsr #4 @ r0 ARM1136?
- bne do_DataAbort
+ bne 1f
tst r5, #PSR_J_BIT @ Java?
tsteq r5, #PSR_T_BIT @ Thumb?
- bne do_DataAbort
+ bne 1f
bic r1, r1, #1 << 11 @ clear bit 11 of FSR
ldr r3, [r4] @ read aborted ARM instruction
ARM_BE8(rev r3, r3)
- do_ldrd_abort tmp=ip, insn=r3
+ teq_ldrd tmp=ip, insn=r3 @ insn was LDRD?
+ beq 1f @ yes
tst r3, #1 << 20 @ L = 0 -> write
orreq r1, r1, #1 << 11 @ yes.
#endif
+1: uaccess_disable ip @ disable userspace access
b do_DataAbort
diff --git a/arch/arm/mm/abort-ev7.S b/arch/arm/mm/abort-ev7.S
index 4812ad0..e8d0e08 100644
--- a/arch/arm/mm/abort-ev7.S
+++ b/arch/arm/mm/abort-ev7.S
@@ -15,6 +15,7 @@
ENTRY(v7_early_abort)
mrc p15, 0, r1, c5, c0, 0 @ get FSR
mrc p15, 0, r0, c6, c0, 0 @ get FAR
+ uaccess_disable ip @ disable userspace access
/*
* V6 code adjusts the returned DFSR.
diff --git a/arch/arm/mm/abort-lv4t.S b/arch/arm/mm/abort-lv4t.S
index f398258..6d8e8e3 100644
--- a/arch/arm/mm/abort-lv4t.S
+++ b/arch/arm/mm/abort-lv4t.S
@@ -26,6 +26,7 @@
#endif
bne .data_thumb_abort
ldr r8, [r4] @ read arm instruction
+ uaccess_disable ip @ disable userspace access
tst r8, #1 << 20 @ L = 1 -> write?
orreq r1, r1, #1 << 11 @ yes.
and r7, r8, #15 << 24
@@ -155,6 +156,7 @@
.data_thumb_abort:
ldrh r8, [r4] @ read instruction
+ uaccess_disable ip @ disable userspace access
tst r8, #1 << 11 @ L = 1 -> write?
orreq r1, r1, #1 << 8 @ yes
and r7, r8, #15 << 12
diff --git a/arch/arm/mm/abort-macro.S b/arch/arm/mm/abort-macro.S
index 2cbf68e..4509bee 100644
--- a/arch/arm/mm/abort-macro.S
+++ b/arch/arm/mm/abort-macro.S
@@ -13,6 +13,7 @@
tst \psr, #PSR_T_BIT
beq not_thumb
ldrh \tmp, [\pc] @ Read aborted Thumb instruction
+ uaccess_disable ip @ disable userspace access
and \tmp, \tmp, # 0xfe00 @ Mask opcode field
cmp \tmp, # 0x5600 @ Is it ldrsb?
orreq \tmp, \tmp, #1 << 11 @ Set L-bit if yes
@@ -29,12 +30,9 @@
* [7:4] == 1101
* [20] == 0
*/
- .macro do_ldrd_abort, tmp, insn
- tst \insn, #0x0e100000 @ [27:25,20] == 0
- bne not_ldrd
- and \tmp, \insn, #0x000000f0 @ [7:4] == 1101
- cmp \tmp, #0x000000d0
- beq do_DataAbort
-not_ldrd:
+ .macro teq_ldrd, tmp, insn
+ mov \tmp, #0x0e100000
+ orr \tmp, #0x000000f0
+ and \tmp, \insn, \tmp
+ teq \tmp, #0x000000d0
.endm
-
diff --git a/arch/arm/mm/cache-feroceon-l2.c b/arch/arm/mm/cache-feroceon-l2.c
index 097181e..5c1b7a7 100644
--- a/arch/arm/mm/cache-feroceon-l2.c
+++ b/arch/arm/mm/cache-feroceon-l2.c
@@ -368,7 +368,6 @@
struct device_node *node;
void __iomem *base;
bool l2_wt_override = false;
- struct resource res;
#if defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH)
l2_wt_override = true;
@@ -376,10 +375,7 @@
node = of_find_matching_node(NULL, feroceon_ids);
if (node && of_device_is_compatible(node, "marvell,kirkwood-cache")) {
- if (of_address_to_resource(node, 0, &res))
- return -ENODEV;
-
- base = ioremap(res.start, resource_size(&res));
+ base = of_iomap(node, 0);
if (!base)
return -ENOMEM;
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 71b3d33..493692d 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -1171,6 +1171,11 @@
}
}
+ if (of_property_read_bool(np, "arm,shared-override")) {
+ *aux_val |= L2C_AUX_CTRL_SHARED_OVERRIDE;
+ *aux_mask &= ~L2C_AUX_CTRL_SHARED_OVERRIDE;
+ }
+
prefetch = l2x0_saved_regs.prefetch_ctrl;
ret = of_property_read_u32(np, "arm,double-linefill", &val);
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 1ced8a0..9f509b2 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -39,6 +39,7 @@
#include <asm/system_info.h>
#include <asm/dma-contiguous.h>
+#include "dma.h"
#include "mm.h"
/*
@@ -648,14 +649,18 @@
size = PAGE_ALIGN(size);
want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs);
- if (is_coherent || nommu())
+ if (nommu())
+ addr = __alloc_simple_buffer(dev, size, gfp, &page);
+ else if (dev_get_cma_area(dev) && (gfp & __GFP_WAIT))
+ addr = __alloc_from_contiguous(dev, size, prot, &page,
+ caller, want_vaddr);
+ else if (is_coherent)
addr = __alloc_simple_buffer(dev, size, gfp, &page);
else if (!(gfp & __GFP_WAIT))
addr = __alloc_from_pool(size, &page);
- else if (!dev_get_cma_area(dev))
- addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller, want_vaddr);
else
- addr = __alloc_from_contiguous(dev, size, prot, &page, caller, want_vaddr);
+ addr = __alloc_remap_buffer(dev, size, gfp, prot, &page,
+ caller, want_vaddr);
if (page)
*handle = pfn_to_dma(dev, page_to_pfn(page));
@@ -683,13 +688,12 @@
static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
{
- pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
void *memory;
if (dma_alloc_from_coherent(dev, size, handle, &memory))
return memory;
- return __dma_alloc(dev, size, handle, gfp, prot, true,
+ return __dma_alloc(dev, size, handle, gfp, PAGE_KERNEL, true,
attrs, __builtin_return_address(0));
}
@@ -753,12 +757,12 @@
size = PAGE_ALIGN(size);
- if (is_coherent || nommu()) {
+ if (nommu()) {
__dma_free_buffer(page, size);
- } else if (__free_from_pool(cpu_addr, size)) {
+ } else if (!is_coherent && __free_from_pool(cpu_addr, size)) {
return;
} else if (!dev_get_cma_area(dev)) {
- if (want_vaddr)
+ if (want_vaddr && !is_coherent)
__dma_free_remap(cpu_addr, size);
__dma_free_buffer(page, size);
} else {
@@ -1971,7 +1975,7 @@
{
int next_bitmap;
- if (mapping->nr_bitmaps > mapping->extensions)
+ if (mapping->nr_bitmaps >= mapping->extensions)
return -EINVAL;
next_bitmap = mapping->nr_bitmaps;
diff --git a/arch/arm/mm/dma.h b/arch/arm/mm/dma.h
new file mode 100644
index 0000000..70ea6852
--- /dev/null
+++ b/arch/arm/mm/dma.h
@@ -0,0 +1,32 @@
+#ifndef DMA_H
+#define DMA_H
+
+#include <asm/glue-cache.h>
+
+#ifndef MULTI_CACHE
+#define dmac_map_area __glue(_CACHE,_dma_map_area)
+#define dmac_unmap_area __glue(_CACHE,_dma_unmap_area)
+
+/*
+ * These are private to the dma-mapping API. Do not use directly.
+ * Their sole purpose is to ensure that data held in the cache
+ * is visible to DMA, or data written by DMA to system memory is
+ * visible to the CPU.
+ */
+extern void dmac_map_area(const void *, size_t, int);
+extern void dmac_unmap_area(const void *, size_t, int);
+
+#else
+
+/*
+ * These are private to the dma-mapping API. Do not use directly.
+ * Their sole purpose is to ensure that data held in the cache
+ * is visible to DMA, or data written by DMA to system memory is
+ * visible to the CPU.
+ */
+#define dmac_map_area cpu_cache.dma_map_area
+#define dmac_unmap_area cpu_cache.dma_unmap_area
+
+#endif
+
+#endif
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
index ee8dfa7..9df5f09 100644
--- a/arch/arm/mm/highmem.c
+++ b/arch/arm/mm/highmem.c
@@ -79,7 +79,7 @@
type = kmap_atomic_idx_push();
- idx = type + KM_TYPE_NR * smp_processor_id();
+ idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
vaddr = __fix_to_virt(idx);
#ifdef CONFIG_DEBUG_HIGHMEM
/*
@@ -106,7 +106,7 @@
if (kvaddr >= (void *)FIXADDR_START) {
type = kmap_atomic_idx();
- idx = type + KM_TYPE_NR * smp_processor_id();
+ idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
if (cache_is_vivt())
__cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
@@ -138,7 +138,7 @@
return page_address(page);
type = kmap_atomic_idx_push();
- idx = type + KM_TYPE_NR * smp_processor_id();
+ idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
vaddr = __fix_to_virt(idx);
#ifdef CONFIG_DEBUG_HIGHMEM
BUG_ON(!pte_none(get_fixmap_pte(vaddr)));
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index d1e5ad7..0c81056 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -255,7 +255,7 @@
}
#endif
-void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
+static void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
unsigned long offset, size_t size, unsigned int mtype, void *caller)
{
const struct mem_type *type;
@@ -363,7 +363,7 @@
unsigned int mtype)
{
return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
- __builtin_return_address(0));
+ __builtin_return_address(0));
}
EXPORT_SYMBOL(__arm_ioremap_pfn);
@@ -371,13 +371,26 @@
unsigned int, void *) =
__arm_ioremap_caller;
-void __iomem *
-__arm_ioremap(phys_addr_t phys_addr, size_t size, unsigned int mtype)
+void __iomem *ioremap(resource_size_t res_cookie, size_t size)
{
- return arch_ioremap_caller(phys_addr, size, mtype,
- __builtin_return_address(0));
+ return arch_ioremap_caller(res_cookie, size, MT_DEVICE,
+ __builtin_return_address(0));
}
-EXPORT_SYMBOL(__arm_ioremap);
+EXPORT_SYMBOL(ioremap);
+
+void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size)
+{
+ return arch_ioremap_caller(res_cookie, size, MT_DEVICE_CACHED,
+ __builtin_return_address(0));
+}
+EXPORT_SYMBOL(ioremap_cache);
+
+void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size)
+{
+ return arch_ioremap_caller(res_cookie, size, MT_DEVICE_WC,
+ __builtin_return_address(0));
+}
+EXPORT_SYMBOL(ioremap_wc);
/*
* Remap an arbitrary physical address space into the kernel virtual
@@ -431,11 +444,11 @@
void (*arch_iounmap)(volatile void __iomem *) = __iounmap;
-void __arm_iounmap(volatile void __iomem *io_addr)
+void iounmap(volatile void __iomem *cookie)
{
- arch_iounmap(io_addr);
+ arch_iounmap(cookie);
}
-EXPORT_SYMBOL(__arm_iounmap);
+EXPORT_SYMBOL(iounmap);
#ifdef CONFIG_PCI
static int pci_ioremap_mem_type = MT_DEVICE;
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 6ca7d9a..7cd1514 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -291,13 +291,13 @@
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_RDONLY,
.prot_l1 = PMD_TYPE_TABLE,
- .domain = DOMAIN_USER,
+ .domain = DOMAIN_VECTORS,
},
[MT_HIGH_VECTORS] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_USER | L_PTE_RDONLY,
.prot_l1 = PMD_TYPE_TABLE,
- .domain = DOMAIN_USER,
+ .domain = DOMAIN_VECTORS,
},
[MT_MEMORY_RWX] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
@@ -357,6 +357,47 @@
}
EXPORT_SYMBOL(get_mem_type);
+static pte_t *(*pte_offset_fixmap)(pmd_t *dir, unsigned long addr);
+
+static pte_t bm_pte[PTRS_PER_PTE + PTE_HWTABLE_PTRS]
+ __aligned(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE) __initdata;
+
+static pte_t * __init pte_offset_early_fixmap(pmd_t *dir, unsigned long addr)
+{
+ return &bm_pte[pte_index(addr)];
+}
+
+static pte_t *pte_offset_late_fixmap(pmd_t *dir, unsigned long addr)
+{
+ return pte_offset_kernel(dir, addr);
+}
+
+static inline pmd_t * __init fixmap_pmd(unsigned long addr)
+{
+ pgd_t *pgd = pgd_offset_k(addr);
+ pud_t *pud = pud_offset(pgd, addr);
+ pmd_t *pmd = pmd_offset(pud, addr);
+
+ return pmd;
+}
+
+void __init early_fixmap_init(void)
+{
+ pmd_t *pmd;
+
+ /*
+ * The early fixmap range spans multiple pmds, for which
+ * we are not prepared:
+ */
+ BUILD_BUG_ON((__fix_to_virt(__end_of_permanent_fixed_addresses) >> PMD_SHIFT)
+ != FIXADDR_TOP >> PMD_SHIFT);
+
+ pmd = fixmap_pmd(FIXADDR_TOP);
+ pmd_populate_kernel(&init_mm, pmd, bm_pte);
+
+ pte_offset_fixmap = pte_offset_early_fixmap;
+}
+
/*
* To avoid TLB flush broadcasts, this uses local_flush_tlb_kernel_range().
* As a result, this can only be called with preemption disabled, as under
@@ -365,7 +406,7 @@
void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
{
unsigned long vaddr = __fix_to_virt(idx);
- pte_t *pte = pte_offset_kernel(pmd_off_k(vaddr), vaddr);
+ pte_t *pte = pte_offset_fixmap(pmd_off_k(vaddr), vaddr);
/* Make sure fixmap region does not exceed available allocation. */
BUILD_BUG_ON(FIXADDR_START + (__end_of_fixed_addresses * PAGE_SIZE) >
@@ -855,7 +896,7 @@
}
if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
- md->virtual >= PAGE_OFFSET &&
+ md->virtual >= PAGE_OFFSET && md->virtual < FIXADDR_START &&
(md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
pr_warn("BUG: mapping for 0x%08llx at 0x%08lx out of vmalloc space\n",
(long long)__pfn_to_phys((u64)md->pfn), md->virtual);
@@ -1072,6 +1113,7 @@
int highmem = 0;
phys_addr_t vmalloc_limit = __pa(vmalloc_min - 1) + 1;
struct memblock_region *reg;
+ bool should_use_highmem = false;
for_each_memblock(memory, reg) {
phys_addr_t block_start = reg->base;
@@ -1090,6 +1132,7 @@
pr_notice("Ignoring RAM at %pa-%pa (!CONFIG_HIGHMEM)\n",
&block_start, &block_end);
memblock_remove(reg->base, reg->size);
+ should_use_highmem = true;
continue;
}
@@ -1100,6 +1143,7 @@
&block_start, &block_end, &vmalloc_limit);
memblock_remove(vmalloc_limit, overlap_size);
block_end = vmalloc_limit;
+ should_use_highmem = true;
}
}
@@ -1134,6 +1178,9 @@
}
}
+ if (should_use_highmem)
+ pr_notice("Consider using a HIGHMEM enabled kernel.\n");
+
high_memory = __va(arm_lowmem_limit - 1) + 1;
/*
@@ -1213,10 +1260,10 @@
/*
* Set up the device mappings. Since we clear out the page tables for all
- * mappings above VMALLOC_START, we will remove any debug device mappings.
- * This means you have to be careful how you debug this function, or any
- * called function. This means you can't use any function or debugging
- * method which may touch any device, otherwise the kernel _will_ crash.
+ * mappings above VMALLOC_START, except early fixmap, we might remove debug
+ * device mappings. This means earlycon can be used to debug this function
+ * Any other function or debugging method which may touch any device _will_
+ * crash the kernel.
*/
static void __init devicemaps_init(const struct machine_desc *mdesc)
{
@@ -1231,7 +1278,10 @@
early_trap_init(vectors);
- for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
+ /*
+ * Clear page table except top pmd used by early fixmaps
+ */
+ for (addr = VMALLOC_START; addr < (FIXADDR_TOP & PMD_MASK); addr += PMD_SIZE)
pmd_clear(pmd_off_k(addr));
/*
@@ -1483,6 +1533,35 @@
#endif
+static void __init early_fixmap_shutdown(void)
+{
+ int i;
+ unsigned long va = fix_to_virt(__end_of_permanent_fixed_addresses - 1);
+
+ pte_offset_fixmap = pte_offset_late_fixmap;
+ pmd_clear(fixmap_pmd(va));
+ local_flush_tlb_kernel_page(va);
+
+ for (i = 0; i < __end_of_permanent_fixed_addresses; i++) {
+ pte_t *pte;
+ struct map_desc map;
+
+ map.virtual = fix_to_virt(i);
+ pte = pte_offset_early_fixmap(pmd_off_k(map.virtual), map.virtual);
+
+ /* Only i/o device mappings are supported ATM */
+ if (pte_none(*pte) ||
+ (pte_val(*pte) & L_PTE_MT_MASK) != L_PTE_MT_DEV_SHARED)
+ continue;
+
+ map.pfn = pte_pfn(*pte);
+ map.type = MT_DEVICE;
+ map.length = PAGE_SIZE;
+
+ create_mapping(&map);
+ }
+}
+
/*
* paging_init() sets up the page tables, initialises the zone memory
* maps, and sets up the zero page, bad page and bad page tables.
@@ -1494,7 +1573,9 @@
build_mem_type_table();
prepare_page_table();
map_lowmem();
+ memblock_set_current_limit(arm_lowmem_limit);
dma_contiguous_remap();
+ early_fixmap_shutdown();
devicemaps_init(mdesc);
kmap_init();
tcm_init();
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index afd7e05..1dd1093 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -351,30 +351,43 @@
}
EXPORT_SYMBOL(__arm_ioremap_pfn);
-void __iomem *__arm_ioremap_pfn_caller(unsigned long pfn, unsigned long offset,
- size_t size, unsigned int mtype, void *caller)
-{
- return __arm_ioremap_pfn(pfn, offset, size, mtype);
-}
-
-void __iomem *__arm_ioremap(phys_addr_t phys_addr, size_t size,
- unsigned int mtype)
-{
- return (void __iomem *)phys_addr;
-}
-EXPORT_SYMBOL(__arm_ioremap);
-
-void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t, unsigned int, void *);
-
void __iomem *__arm_ioremap_caller(phys_addr_t phys_addr, size_t size,
unsigned int mtype, void *caller)
{
- return __arm_ioremap(phys_addr, size, mtype);
+ return (void __iomem *)phys_addr;
}
+void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t, unsigned int, void *);
+
+void __iomem *ioremap(resource_size_t res_cookie, size_t size)
+{
+ return __arm_ioremap_caller(res_cookie, size, MT_DEVICE,
+ __builtin_return_address(0));
+}
+EXPORT_SYMBOL(ioremap);
+
+void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size)
+{
+ return __arm_ioremap_caller(res_cookie, size, MT_DEVICE_CACHED,
+ __builtin_return_address(0));
+}
+EXPORT_SYMBOL(ioremap_cache);
+
+void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size)
+{
+ return __arm_ioremap_caller(res_cookie, size, MT_DEVICE_WC,
+ __builtin_return_address(0));
+}
+EXPORT_SYMBOL(ioremap_wc);
+
+void __iounmap(volatile void __iomem *addr)
+{
+}
+EXPORT_SYMBOL(__iounmap);
+
void (*arch_iounmap)(volatile void __iomem *);
-void __arm_iounmap(volatile void __iomem *addr)
+void iounmap(volatile void __iomem *addr)
{
}
-EXPORT_SYMBOL(__arm_iounmap);
+EXPORT_SYMBOL(iounmap);
diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c
index a3681f1..e683db1 100644
--- a/arch/arm/mm/pgd.c
+++ b/arch/arm/mm/pgd.c
@@ -84,6 +84,16 @@
if (!new_pte)
goto no_pte;
+#ifndef CONFIG_ARM_LPAE
+ /*
+ * Modify the PTE pointer to have the correct domain. This
+ * needs to be the vectors domain to avoid the low vectors
+ * being unmapped.
+ */
+ pmd_val(*new_pmd) &= ~PMD_DOMAIN_MASK;
+ pmd_val(*new_pmd) |= PMD_DOMAIN(DOMAIN_VECTORS);
+#endif
+
init_pud = pud_offset(init_pgd, 0);
init_pmd = pmd_offset(init_pud, 0);
init_pte = pte_offset_map(init_pmd, 0);
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 0716bbe..de2b246 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -274,7 +274,10 @@
__v7_b15mp_setup:
__v7_ca17mp_setup:
mov r10, #0
-1:
+1: adr r12, __v7_setup_stack @ the local stack
+ stmia r12, {r0-r5, lr} @ v7_invalidate_l1 touches r0-r6
+ bl v7_invalidate_l1
+ ldmia r12, {r0-r5, lr}
#ifdef CONFIG_SMP
ALT_SMP(mrc p15, 0, r0, c1, c0, 1)
ALT_UP(mov r0, #(1 << 6)) @ fake it for UP
@@ -283,7 +286,7 @@
orreq r0, r0, r10 @ Enable CPU-specific SMP bits
mcreq p15, 0, r0, c1, c0, 1
#endif
- b __v7_setup
+ b __v7_setup_cont
/*
* Errata:
@@ -413,10 +416,11 @@
__v7_setup:
adr r12, __v7_setup_stack @ the local stack
- stmia r12, {r0-r5, r7, r9, r11, lr}
+ stmia r12, {r0-r5, lr} @ v7_invalidate_l1 touches r0-r6
bl v7_invalidate_l1
- ldmia r12, {r0-r5, r7, r9, r11, lr}
+ ldmia r12, {r0-r5, lr}
+__v7_setup_cont:
and r0, r9, #0xff000000 @ ARM?
teq r0, #0x41000000
bne __errata_finish
@@ -480,7 +484,7 @@
.align 2
__v7_setup_stack:
- .space 4 * 11 @ 11 registers
+ .space 4 * 7 @ 12 registers
__INITDATA
diff --git a/arch/arm/vdso/Makefile b/arch/arm/vdso/Makefile
index 9d259d9..1160434 100644
--- a/arch/arm/vdso/Makefile
+++ b/arch/arm/vdso/Makefile
@@ -14,7 +14,7 @@
VDSO_LDFLAGS += -nostdlib -shared
VDSO_LDFLAGS += $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
VDSO_LDFLAGS += $(call cc-ldoption, -Wl$(comma)--build-id)
-VDSO_LDFLAGS += $(call cc-option, -fuse-ld=bfd)
+VDSO_LDFLAGS += $(call cc-ldoption, -fuse-ld=bfd)
obj-$(CONFIG_VDSO) += vdso.o
extra-$(CONFIG_VDSO) += vdso.lds
diff --git a/arch/arm/vdso/vdsomunge.c b/arch/arm/vdso/vdsomunge.c
index 9005b07..aedec81 100644
--- a/arch/arm/vdso/vdsomunge.c
+++ b/arch/arm/vdso/vdsomunge.c
@@ -45,13 +45,11 @@
* it does.
*/
-#define _GNU_SOURCE
-
#include <byteswap.h>
#include <elf.h>
#include <errno.h>
-#include <error.h>
#include <fcntl.h>
+#include <stdarg.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
@@ -82,11 +80,25 @@
#define EF_ARM_ABI_FLOAT_HARD 0x400
#endif
+static int failed;
+static const char *argv0;
static const char *outfile;
+static void fail(const char *fmt, ...)
+{
+ va_list ap;
+
+ failed = 1;
+ fprintf(stderr, "%s: ", argv0);
+ va_start(ap, fmt);
+ vfprintf(stderr, fmt, ap);
+ va_end(ap);
+ exit(EXIT_FAILURE);
+}
+
static void cleanup(void)
{
- if (error_message_count > 0 && outfile != NULL)
+ if (failed && outfile != NULL)
unlink(outfile);
}
@@ -119,68 +131,66 @@
int infd;
atexit(cleanup);
+ argv0 = argv[0];
if (argc != 3)
- error(EXIT_FAILURE, 0, "Usage: %s [infile] [outfile]", argv[0]);
+ fail("Usage: %s [infile] [outfile]\n", argv[0]);
infile = argv[1];
outfile = argv[2];
infd = open(infile, O_RDONLY);
if (infd < 0)
- error(EXIT_FAILURE, errno, "Cannot open %s", infile);
+ fail("Cannot open %s: %s\n", infile, strerror(errno));
if (fstat(infd, &stat) != 0)
- error(EXIT_FAILURE, errno, "Failed stat for %s", infile);
+ fail("Failed stat for %s: %s\n", infile, strerror(errno));
inbuf = mmap(NULL, stat.st_size, PROT_READ, MAP_PRIVATE, infd, 0);
if (inbuf == MAP_FAILED)
- error(EXIT_FAILURE, errno, "Failed to map %s", infile);
+ fail("Failed to map %s: %s\n", infile, strerror(errno));
close(infd);
inhdr = inbuf;
if (memcmp(&inhdr->e_ident, ELFMAG, SELFMAG) != 0)
- error(EXIT_FAILURE, 0, "Not an ELF file");
+ fail("Not an ELF file\n");
if (inhdr->e_ident[EI_CLASS] != ELFCLASS32)
- error(EXIT_FAILURE, 0, "Unsupported ELF class");
+ fail("Unsupported ELF class\n");
swap = inhdr->e_ident[EI_DATA] != HOST_ORDER;
if (read_elf_half(inhdr->e_type, swap) != ET_DYN)
- error(EXIT_FAILURE, 0, "Not a shared object");
+ fail("Not a shared object\n");
- if (read_elf_half(inhdr->e_machine, swap) != EM_ARM) {
- error(EXIT_FAILURE, 0, "Unsupported architecture %#x",
- inhdr->e_machine);
- }
+ if (read_elf_half(inhdr->e_machine, swap) != EM_ARM)
+ fail("Unsupported architecture %#x\n", inhdr->e_machine);
e_flags = read_elf_word(inhdr->e_flags, swap);
if (EF_ARM_EABI_VERSION(e_flags) != EF_ARM_EABI_VER5) {
- error(EXIT_FAILURE, 0, "Unsupported EABI version %#x",
- EF_ARM_EABI_VERSION(e_flags));
+ fail("Unsupported EABI version %#x\n",
+ EF_ARM_EABI_VERSION(e_flags));
}
if (e_flags & EF_ARM_ABI_FLOAT_HARD)
- error(EXIT_FAILURE, 0,
- "Unexpected hard-float flag set in e_flags");
+ fail("Unexpected hard-float flag set in e_flags\n");
clear_soft_float = !!(e_flags & EF_ARM_ABI_FLOAT_SOFT);
outfd = open(outfile, O_RDWR | O_CREAT | O_TRUNC, S_IRUSR | S_IWUSR);
if (outfd < 0)
- error(EXIT_FAILURE, errno, "Cannot open %s", outfile);
+ fail("Cannot open %s: %s\n", outfile, strerror(errno));
if (ftruncate(outfd, stat.st_size) != 0)
- error(EXIT_FAILURE, errno, "Cannot truncate %s", outfile);
+ fail("Cannot truncate %s: %s\n", outfile, strerror(errno));
outbuf = mmap(NULL, stat.st_size, PROT_READ | PROT_WRITE, MAP_SHARED,
outfd, 0);
if (outbuf == MAP_FAILED)
- error(EXIT_FAILURE, errno, "Failed to map %s", outfile);
+ fail("Failed to map %s: %s\n", outfile, strerror(errno));
close(outfd);
@@ -195,7 +205,7 @@
}
if (msync(outbuf, stat.st_size, MS_SYNC) != 0)
- error(EXIT_FAILURE, errno, "Failed to sync %s", outfile);
+ fail("Failed to sync %s: %s\n", outfile, strerror(errno));
return EXIT_SUCCESS;
}
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 0f6edb1..318175f 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -23,9 +23,9 @@
select BUILDTIME_EXTABLE_SORT
select CLONE_BACKWARDS
select COMMON_CLK
- select EDAC_SUPPORT
select CPU_PM if (SUSPEND || CPU_IDLE)
select DCACHE_WORD_ACCESS
+ select EDAC_SUPPORT
select GENERIC_ALLOCATOR
select GENERIC_CLOCKEVENTS
select GENERIC_CLOCKEVENTS_BROADCAST if SMP
diff --git a/arch/arm64/boot/dts/apm/apm-mustang.dts b/arch/arm64/boot/dts/apm/apm-mustang.dts
index 83578e7..4c55833 100644
--- a/arch/arm64/boot/dts/apm/apm-mustang.dts
+++ b/arch/arm64/boot/dts/apm/apm-mustang.dts
@@ -23,6 +23,16 @@
device_type = "memory";
reg = < 0x1 0x00000000 0x0 0x80000000 >; /* Updated by bootloader */
};
+
+ gpio-keys {
+ compatible = "gpio-keys";
+ button@1 {
+ label = "POWER";
+ linux,code = <116>;
+ linux,input-type = <0x1>;
+ interrupts = <0x0 0x2d 0x1>;
+ };
+ };
};
&pcie0clk {
diff --git a/arch/arm64/boot/dts/arm/Makefile b/arch/arm64/boot/dts/arm/Makefile
index c5c98b9..bb3c072 100644
--- a/arch/arm64/boot/dts/arm/Makefile
+++ b/arch/arm64/boot/dts/arm/Makefile
@@ -1,6 +1,7 @@
dtb-$(CONFIG_ARCH_VEXPRESS) += foundation-v8.dtb
dtb-$(CONFIG_ARCH_VEXPRESS) += juno.dtb juno-r1.dtb
dtb-$(CONFIG_ARCH_VEXPRESS) += rtsm_ve-aemv8a.dtb
+dtb-$(CONFIG_ARCH_VEXPRESS) += vexpress-v2f-1xv7-ca53x2.dtb
always := $(dtb-y)
subdir-y := $(dts-dirs)
diff --git a/arch/arm64/boot/dts/arm/vexpress-v2f-1xv7-ca53x2.dts b/arch/arm64/boot/dts/arm/vexpress-v2f-1xv7-ca53x2.dts
new file mode 100644
index 0000000..5b1d018
--- /dev/null
+++ b/arch/arm64/boot/dts/arm/vexpress-v2f-1xv7-ca53x2.dts
@@ -0,0 +1,191 @@
+/*
+ * ARM Ltd. Versatile Express
+ *
+ * LogicTile Express 20MG
+ * V2F-1XV7
+ *
+ * Cortex-A53 (2 cores) Soft Macrocell Model
+ *
+ * HBI-0247C
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+/ {
+ model = "V2F-1XV7 Cortex-A53x2 SMM";
+ arm,hbi = <0x247>;
+ arm,vexpress,site = <0xf>;
+ compatible = "arm,vexpress,v2f-1xv7,ca53x2", "arm,vexpress,v2f-1xv7", "arm,vexpress";
+ interrupt-parent = <&gic>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ chosen {
+ stdout-path = "serial0:38400n8";
+ };
+
+ aliases {
+ serial0 = &v2m_serial0;
+ serial1 = &v2m_serial1;
+ serial2 = &v2m_serial2;
+ serial3 = &v2m_serial3;
+ i2c0 = &v2m_i2c_dvi;
+ i2c1 = &v2m_i2c_pcie;
+ };
+
+ cpus {
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53", "arm,armv8";
+ reg = <0 0>;
+ next-level-cache = <&L2_0>;
+ };
+
+ cpu@1 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53", "arm,armv8";
+ reg = <0 1>;
+ next-level-cache = <&L2_0>;
+ };
+
+ L2_0: l2-cache0 {
+ compatible = "cache";
+ };
+ };
+
+ memory@80000000 {
+ device_type = "memory";
+ reg = <0 0x80000000 0 0x80000000>; /* 2GB @ 2GB */
+ };
+
+ gic: interrupt-controller@2c001000 {
+ compatible = "arm,gic-400";
+ #interrupt-cells = <3>;
+ #address-cells = <0>;
+ interrupt-controller;
+ reg = <0 0x2c001000 0 0x1000>,
+ <0 0x2c002000 0 0x2000>,
+ <0 0x2c004000 0 0x2000>,
+ <0 0x2c006000 0 0x2000>;
+ interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_HIGH)>;
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>;
+ };
+
+ pmu {
+ compatible = "arm,armv8-pmuv3";
+ interrupts = <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ dcc {
+ compatible = "arm,vexpress,config-bus";
+ arm,vexpress,config-bridge = <&v2m_sysreg>;
+
+ smbclk: osc@4 {
+ /* SMC clock */
+ compatible = "arm,vexpress-osc";
+ arm,vexpress-sysreg,func = <1 4>;
+ freq-range = <40000000 40000000>;
+ #clock-cells = <0>;
+ clock-output-names = "smclk";
+ };
+
+ volt@0 {
+ /* VIO to expansion board above */
+ compatible = "arm,vexpress-volt";
+ arm,vexpress-sysreg,func = <2 0>;
+ regulator-name = "VIO_UP";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ };
+
+ volt@1 {
+ /* 12V from power connector J6 */
+ compatible = "arm,vexpress-volt";
+ arm,vexpress-sysreg,func = <2 1>;
+ regulator-name = "12";
+ regulator-always-on;
+ };
+
+ temp@0 {
+ /* FPGA temperature */
+ compatible = "arm,vexpress-temp";
+ arm,vexpress-sysreg,func = <4 0>;
+ label = "FPGA";
+ };
+ };
+
+ smb {
+ compatible = "simple-bus";
+
+ #address-cells = <2>;
+ #size-cells = <1>;
+ ranges = <0 0 0 0x08000000 0x04000000>,
+ <1 0 0 0x14000000 0x04000000>,
+ <2 0 0 0x18000000 0x04000000>,
+ <3 0 0 0x1c000000 0x04000000>,
+ <4 0 0 0x0c000000 0x04000000>,
+ <5 0 0 0x10000000 0x04000000>;
+
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 63>;
+ interrupt-map = <0 0 0 &gic GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 1 &gic GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 2 &gic GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 3 &gic GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 4 &gic GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 5 &gic GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 6 &gic GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 7 &gic GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 8 &gic GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 9 &gic GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 10 &gic GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 11 &gic GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 12 &gic GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 13 &gic GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 14 &gic GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 15 &gic GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 16 &gic GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 17 &gic GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 18 &gic GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 19 &gic GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 20 &gic GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 21 &gic GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 22 &gic GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 23 &gic GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 24 &gic GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 25 &gic GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 26 &gic GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 27 &gic GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 28 &gic GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 29 &gic GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 30 &gic GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 31 &gic GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 32 &gic GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 33 &gic GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 34 &gic GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 35 &gic GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 36 &gic GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 37 &gic GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 38 &gic GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 39 &gic GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 40 &gic GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 41 &gic GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 42 &gic GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>;
+
+ /include/ "../../../../arm/boot/dts/vexpress-v2m-rs1.dtsi"
+ };
+};
diff --git a/arch/arm64/boot/dts/cavium/thunder-88xx.dtsi b/arch/arm64/boot/dts/cavium/thunder-88xx.dtsi
index d8c0bdc..9cb7cf9 100644
--- a/arch/arm64/boot/dts/cavium/thunder-88xx.dtsi
+++ b/arch/arm64/boot/dts/cavium/thunder-88xx.dtsi
@@ -376,10 +376,19 @@
gic0: interrupt-controller@8010,00000000 {
compatible = "arm,gic-v3";
#interrupt-cells = <3>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
interrupt-controller;
reg = <0x8010 0x00000000 0x0 0x010000>, /* GICD */
<0x8010 0x80000000 0x0 0x600000>; /* GICR */
interrupts = <1 9 0xf04>;
+
+ its: gic-its@8010,00020000 {
+ compatible = "arm,gic-v3-its";
+ msi-controller;
+ reg = <0x8010 0x20000 0x0 0x200000>;
+ };
};
uaa0: serial@87e0,24000000 {
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index f38c94f..4e17e7e 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -83,6 +83,7 @@
CONFIG_ATA=y
CONFIG_SATA_AHCI=y
CONFIG_SATA_AHCI_PLATFORM=y
+CONFIG_AHCI_CEVA=y
CONFIG_AHCI_XGENE=y
CONFIG_PATA_PLATFORM=y
CONFIG_PATA_OF_PLATFORM=y
diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h
index 39248d3..406485e 100644
--- a/arch/arm64/include/asm/acpi.h
+++ b/arch/arm64/include/asm/acpi.h
@@ -19,6 +19,14 @@
#include <asm/psci.h>
#include <asm/smp_plat.h>
+/* Macros for consistency checks of the GICC subtable of MADT */
+#define ACPI_MADT_GICC_LENGTH \
+ (acpi_gbl_FADT.header.revision < 6 ? 76 : 80)
+
+#define BAD_MADT_GICC_ENTRY(entry, end) \
+ (!(entry) || (unsigned long)(entry) + sizeof(*(entry)) > (end) || \
+ (entry)->header.length != ACPI_MADT_GICC_LENGTH)
+
/* Basic configuration for ACPI */
#ifdef CONFIG_ACPI
/* ACPI table mapping after acpi_gbl_permanent_mmap is set */
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index a7691a3..f860bfd 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -352,8 +352,8 @@
// TODO: add support for undefined instructions in kernel mode
enable_dbg
mov x0, sp
+ mov x2, x1
mov x1, #BAD_SYNC
- mrs x2, esr_el1
b bad_mode
ENDPROC(el1_sync)
@@ -553,7 +553,7 @@
ct_user_exit
mov x0, sp
mov x1, #BAD_SYNC
- mrs x2, esr_el1
+ mov x2, x25
bl bad_mode
b ret_to_user
ENDPROC(el0_sync)
diff --git a/arch/arm64/kernel/entry32.S b/arch/arm64/kernel/entry32.S
index bd9bfaa..f332d5d 100644
--- a/arch/arm64/kernel/entry32.S
+++ b/arch/arm64/kernel/entry32.S
@@ -32,13 +32,11 @@
ENTRY(compat_sys_sigreturn_wrapper)
mov x0, sp
- mov x27, #0 // prevent syscall restart handling (why)
b compat_sys_sigreturn
ENDPROC(compat_sys_sigreturn_wrapper)
ENTRY(compat_sys_rt_sigreturn_wrapper)
mov x0, sp
- mov x27, #0 // prevent syscall restart handling (why)
b compat_sys_rt_sigreturn
ENDPROC(compat_sys_rt_sigreturn_wrapper)
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 695801a..50fb469 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -438,7 +438,7 @@
struct acpi_madt_generic_interrupt *processor;
processor = (struct acpi_madt_generic_interrupt *)header;
- if (BAD_MADT_ENTRY(processor, end))
+ if (BAD_MADT_GICC_ENTRY(processor, end))
return -EINVAL;
acpi_table_print_madt_entry(header);
diff --git a/arch/arm64/mm/Makefile b/arch/arm64/mm/Makefile
index 9d84feb..773d37a 100644
--- a/arch/arm64/mm/Makefile
+++ b/arch/arm64/mm/Makefile
@@ -4,5 +4,3 @@
context.o proc.o pageattr.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_ARM64_PTDUMP) += dump.o
-
-CFLAGS_mmu.o := -I$(srctree)/scripts/dtc/libfdt/
diff --git a/arch/cris/arch-v32/drivers/sync_serial.c b/arch/cris/arch-v32/drivers/sync_serial.c
index 4dda9bd..e989cee 100644
--- a/arch/cris/arch-v32/drivers/sync_serial.c
+++ b/arch/cris/arch-v32/drivers/sync_serial.c
@@ -1464,7 +1464,7 @@
if (port->write_ts_idx == NBR_IN_DESCR)
port->write_ts_idx = 0;
idx = port->write_ts_idx++;
- do_posix_clock_monotonic_gettime(&port->timestamp[idx]);
+ ktime_get_ts(&port->timestamp[idx]);
port->in_buffer_len += port->inbufchunk;
}
spin_unlock_irqrestore(&port->lock, flags);
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 2a14585..aab7e46 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -2231,7 +2231,7 @@
config MIPS_CPS
bool "MIPS Coherent Processing System support"
- depends on SYS_SUPPORTS_MIPS_CPS && !64BIT
+ depends on SYS_SUPPORTS_MIPS_CPS
select MIPS_CM
select MIPS_CPC
select MIPS_CPS_PM if HOTPLUG_CPU
diff --git a/arch/mips/include/asm/mach-loongson64/mmzone.h b/arch/mips/include/asm/mach-loongson64/mmzone.h
index 37c08a2..c9f7e23 100644
--- a/arch/mips/include/asm/mach-loongson64/mmzone.h
+++ b/arch/mips/include/asm/mach-loongson64/mmzone.h
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2010 Loongson Inc. & Lemote Inc. &
- * Insititute of Computing Technology
+ * Institute of Computing Technology
* Author: Xiang Gao, gaoxiang@ict.ac.cn
* Huacai Chen, chenhc@lemote.com
* Xiaofu Meng, Shuangshuang Zhang
diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h
index 2b25d1b..16f1ea9 100644
--- a/arch/mips/include/asm/smp.h
+++ b/arch/mips/include/asm/smp.h
@@ -23,6 +23,7 @@
extern int smp_num_siblings;
extern cpumask_t cpu_sibling_map[];
extern cpumask_t cpu_core_map[];
+extern cpumask_t cpu_foreign_map;
#define raw_smp_processor_id() (current_thread_info()->cpu)
diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c
index c0c5e59..d8f9b35 100644
--- a/arch/mips/kernel/branch.c
+++ b/arch/mips/kernel/branch.c
@@ -600,7 +600,7 @@
break;
case blezl_op: /* not really i_format */
- if (NO_R6EMU)
+ if (!insn.i_format.rt && NO_R6EMU)
goto sigill_r6;
case blez_op:
/*
@@ -635,7 +635,7 @@
break;
case bgtzl_op:
- if (NO_R6EMU)
+ if (!insn.i_format.rt && NO_R6EMU)
goto sigill_r6;
case bgtz_op:
/*
diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S
index 55b759a..1b6ca63 100644
--- a/arch/mips/kernel/cps-vec.S
+++ b/arch/mips/kernel/cps-vec.S
@@ -60,7 +60,7 @@
nop
/* This is an NMI */
- la k0, nmi_handler
+ PTR_LA k0, nmi_handler
jr k0
nop
@@ -107,10 +107,10 @@
mul t1, t1, t0
mul t1, t1, t2
- li a0, KSEG0
- add a1, a0, t1
+ li a0, CKSEG0
+ PTR_ADD a1, a0, t1
1: cache Index_Store_Tag_I, 0(a0)
- add a0, a0, t0
+ PTR_ADD a0, a0, t0
bne a0, a1, 1b
nop
icache_done:
@@ -134,12 +134,12 @@
mul t1, t1, t0
mul t1, t1, t2
- li a0, KSEG0
- addu a1, a0, t1
- subu a1, a1, t0
+ li a0, CKSEG0
+ PTR_ADDU a1, a0, t1
+ PTR_SUBU a1, a1, t0
1: cache Index_Store_Tag_D, 0(a0)
bne a0, a1, 1b
- add a0, a0, t0
+ PTR_ADD a0, a0, t0
dcache_done:
/* Set Kseg0 CCA to that in s0 */
@@ -152,11 +152,11 @@
/* Enter the coherent domain */
li t0, 0xff
- sw t0, GCR_CL_COHERENCE_OFS(v1)
+ PTR_S t0, GCR_CL_COHERENCE_OFS(v1)
ehb
/* Jump to kseg0 */
- la t0, 1f
+ PTR_LA t0, 1f
jr t0
nop
@@ -178,9 +178,9 @@
nop
/* Off we go! */
- lw t1, VPEBOOTCFG_PC(v0)
- lw gp, VPEBOOTCFG_GP(v0)
- lw sp, VPEBOOTCFG_SP(v0)
+ PTR_L t1, VPEBOOTCFG_PC(v0)
+ PTR_L gp, VPEBOOTCFG_GP(v0)
+ PTR_L sp, VPEBOOTCFG_SP(v0)
jr t1
nop
END(mips_cps_core_entry)
@@ -217,7 +217,7 @@
.org 0x480
LEAF(excep_ejtag)
- la k0, ejtag_debug_handler
+ PTR_LA k0, ejtag_debug_handler
jr k0
nop
END(excep_ejtag)
@@ -229,7 +229,7 @@
nop
.set push
- .set mips32r2
+ .set mips64r2
.set mt
/* Only allow 1 TC per VPE to execute... */
@@ -237,7 +237,7 @@
/* ...and for the moment only 1 VPE */
dvpe
- la t1, 1f
+ PTR_LA t1, 1f
jr.hb t1
nop
@@ -250,25 +250,25 @@
mfc0 t0, CP0_MVPCONF0
srl t0, t0, MVPCONF0_PVPE_SHIFT
andi t0, t0, (MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT)
- addiu t7, t0, 1
+ addiu ta3, t0, 1
/* If there's only 1, we're done */
beqz t0, 2f
nop
/* Loop through each VPE within this core */
- li t5, 1
+ li ta1, 1
1: /* Operate on the appropriate TC */
- mtc0 t5, CP0_VPECONTROL
+ mtc0 ta1, CP0_VPECONTROL
ehb
/* Bind TC to VPE (1:1 TC:VPE mapping) */
- mttc0 t5, CP0_TCBIND
+ mttc0 ta1, CP0_TCBIND
/* Set exclusive TC, non-active, master */
li t0, VPECONF0_MVP
- sll t1, t5, VPECONF0_XTC_SHIFT
+ sll t1, ta1, VPECONF0_XTC_SHIFT
or t0, t0, t1
mttc0 t0, CP0_VPECONF0
@@ -280,8 +280,8 @@
mttc0 t0, CP0_TCHALT
/* Next VPE */
- addiu t5, t5, 1
- slt t0, t5, t7
+ addiu ta1, ta1, 1
+ slt t0, ta1, ta3
bnez t0, 1b
nop
@@ -298,19 +298,19 @@
LEAF(mips_cps_boot_vpes)
/* Retrieve CM base address */
- la t0, mips_cm_base
- lw t0, 0(t0)
+ PTR_LA t0, mips_cm_base
+ PTR_L t0, 0(t0)
/* Calculate a pointer to this cores struct core_boot_config */
- lw t0, GCR_CL_ID_OFS(t0)
+ PTR_L t0, GCR_CL_ID_OFS(t0)
li t1, COREBOOTCFG_SIZE
mul t0, t0, t1
- la t1, mips_cps_core_bootcfg
- lw t1, 0(t1)
- addu t0, t0, t1
+ PTR_LA t1, mips_cps_core_bootcfg
+ PTR_L t1, 0(t1)
+ PTR_ADDU t0, t0, t1
/* Calculate this VPEs ID. If the core doesn't support MT use 0 */
- has_mt t6, 1f
+ has_mt ta2, 1f
li t9, 0
/* Find the number of VPEs present in the core */
@@ -334,24 +334,24 @@
1: /* Calculate a pointer to this VPEs struct vpe_boot_config */
li t1, VPEBOOTCFG_SIZE
mul v0, t9, t1
- lw t7, COREBOOTCFG_VPECONFIG(t0)
- addu v0, v0, t7
+ PTR_L ta3, COREBOOTCFG_VPECONFIG(t0)
+ PTR_ADDU v0, v0, ta3
#ifdef CONFIG_MIPS_MT
/* If the core doesn't support MT then return */
- bnez t6, 1f
+ bnez ta2, 1f
nop
jr ra
nop
.set push
- .set mips32r2
+ .set mips64r2
.set mt
1: /* Enter VPE configuration state */
dvpe
- la t1, 1f
+ PTR_LA t1, 1f
jr.hb t1
nop
1: mfc0 t1, CP0_MVPCONTROL
@@ -360,12 +360,12 @@
ehb
/* Loop through each VPE */
- lw t6, COREBOOTCFG_VPEMASK(t0)
- move t8, t6
- li t5, 0
+ PTR_L ta2, COREBOOTCFG_VPEMASK(t0)
+ move t8, ta2
+ li ta1, 0
/* Check whether the VPE should be running. If not, skip it */
-1: andi t0, t6, 1
+1: andi t0, ta2, 1
beqz t0, 2f
nop
@@ -373,7 +373,7 @@
mfc0 t0, CP0_VPECONTROL
ori t0, t0, VPECONTROL_TARGTC
xori t0, t0, VPECONTROL_TARGTC
- or t0, t0, t5
+ or t0, t0, ta1
mtc0 t0, CP0_VPECONTROL
ehb
@@ -384,8 +384,8 @@
/* Calculate a pointer to the VPEs struct vpe_boot_config */
li t0, VPEBOOTCFG_SIZE
- mul t0, t0, t5
- addu t0, t0, t7
+ mul t0, t0, ta1
+ addu t0, t0, ta3
/* Set the TC restart PC */
lw t1, VPEBOOTCFG_PC(t0)
@@ -423,9 +423,9 @@
mttc0 t0, CP0_VPECONF0
/* Next VPE */
-2: srl t6, t6, 1
- addiu t5, t5, 1
- bnez t6, 1b
+2: srl ta2, ta2, 1
+ addiu ta1, ta1, 1
+ bnez ta2, 1b
nop
/* Leave VPE configuration state */
@@ -445,7 +445,7 @@
/* This VPE should be offline, halt the TC */
li t0, TCHALT_H
mtc0 t0, CP0_TCHALT
- la t0, 1f
+ PTR_LA t0, 1f
1: jr.hb t0
nop
@@ -466,10 +466,10 @@
.set noat
lw $1, TI_CPU(gp)
sll $1, $1, LONGLOG
- la \dest, __per_cpu_offset
+ PTR_LA \dest, __per_cpu_offset
addu $1, $1, \dest
lw $1, 0($1)
- la \dest, cps_cpu_state
+ PTR_LA \dest, cps_cpu_state
addu \dest, \dest, $1
.set pop
.endm
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index 6e8de80..4cc1350 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -73,10 +73,11 @@
.set noreorder
.set nomacro
-1: user_lw(t5, 16(t0)) # argument #5 from usp
-4: user_lw(t6, 20(t0)) # argument #6 from usp
-3: user_lw(t7, 24(t0)) # argument #7 from usp
-2: user_lw(t8, 28(t0)) # argument #8 from usp
+load_a4: user_lw(t5, 16(t0)) # argument #5 from usp
+load_a5: user_lw(t6, 20(t0)) # argument #6 from usp
+load_a6: user_lw(t7, 24(t0)) # argument #7 from usp
+load_a7: user_lw(t8, 28(t0)) # argument #8 from usp
+loads_done:
sw t5, 16(sp) # argument #5 to ksp
sw t6, 20(sp) # argument #6 to ksp
@@ -85,10 +86,10 @@
.set pop
.section __ex_table,"a"
- PTR 1b,bad_stack
- PTR 2b,bad_stack
- PTR 3b,bad_stack
- PTR 4b,bad_stack
+ PTR load_a4, bad_stack_a4
+ PTR load_a5, bad_stack_a5
+ PTR load_a6, bad_stack_a6
+ PTR load_a7, bad_stack_a7
.previous
lw t0, TI_FLAGS($28) # syscall tracing enabled?
@@ -153,8 +154,8 @@
/* ------------------------------------------------------------------------ */
/*
- * The stackpointer for a call with more than 4 arguments is bad.
- * We probably should handle this case a bit more drastic.
+ * Our open-coded access area sanity test for the stack pointer
+ * failed. We probably should handle this case a bit more drastic.
*/
bad_stack:
li v0, EFAULT
@@ -163,6 +164,22 @@
sw t0, PT_R7(sp)
j o32_syscall_exit
+bad_stack_a4:
+ li t5, 0
+ b load_a5
+
+bad_stack_a5:
+ li t6, 0
+ b load_a6
+
+bad_stack_a6:
+ li t7, 0
+ b load_a7
+
+bad_stack_a7:
+ li t8, 0
+ b loads_done
+
/*
* The system call does not exist in this kernel
*/
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index d07b210..f543ff4 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -69,16 +69,17 @@
daddu t1, t0, 32
bltz t1, bad_stack
-1: lw a4, 16(t0) # argument #5 from usp
-2: lw a5, 20(t0) # argument #6 from usp
-3: lw a6, 24(t0) # argument #7 from usp
-4: lw a7, 28(t0) # argument #8 from usp (for indirect syscalls)
+load_a4: lw a4, 16(t0) # argument #5 from usp
+load_a5: lw a5, 20(t0) # argument #6 from usp
+load_a6: lw a6, 24(t0) # argument #7 from usp
+load_a7: lw a7, 28(t0) # argument #8 from usp
+loads_done:
.section __ex_table,"a"
- PTR 1b, bad_stack
- PTR 2b, bad_stack
- PTR 3b, bad_stack
- PTR 4b, bad_stack
+ PTR load_a4, bad_stack_a4
+ PTR load_a5, bad_stack_a5
+ PTR load_a6, bad_stack_a6
+ PTR load_a7, bad_stack_a7
.previous
li t1, _TIF_WORK_SYSCALL_ENTRY
@@ -167,6 +168,22 @@
sd t0, PT_R7(sp)
j o32_syscall_exit
+bad_stack_a4:
+ li a4, 0
+ b load_a5
+
+bad_stack_a5:
+ li a5, 0
+ b load_a6
+
+bad_stack_a6:
+ li a6, 0
+ b load_a7
+
+bad_stack_a7:
+ li a7, 0
+ b loads_done
+
not_o32_scall:
/*
* This is not an o32 compatibility syscall, pass it on
@@ -383,7 +400,7 @@
PTR sys_connect /* 4170 */
PTR sys_getpeername
PTR sys_getsockname
- PTR sys_getsockopt
+ PTR compat_sys_getsockopt
PTR sys_listen
PTR compat_sys_recv /* 4175 */
PTR compat_sys_recvfrom
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index be73c49..008b337 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -337,6 +337,11 @@
min_low_pfn = start;
if (end <= reserved_end)
continue;
+#ifdef CONFIG_BLK_DEV_INITRD
+ /* mapstart should be after initrd_end */
+ if (initrd_end && end <= (unsigned long)PFN_UP(__pa(initrd_end)))
+ continue;
+#endif
if (start >= mapstart)
continue;
mapstart = max(reserved_end, start);
@@ -366,14 +371,6 @@
max_low_pfn = PFN_DOWN(HIGHMEM_START);
}
-#ifdef CONFIG_BLK_DEV_INITRD
- /*
- * mapstart should be after initrd_end
- */
- if (initrd_end)
- mapstart = max(mapstart, (unsigned long)PFN_UP(__pa(initrd_end)));
-#endif
-
/*
* Initialize the boot-time allocator with low memory only.
*/
diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c
index 4251d39..c889377 100644
--- a/arch/mips/kernel/smp-cps.c
+++ b/arch/mips/kernel/smp-cps.c
@@ -133,7 +133,7 @@
/*
* Patch the start of mips_cps_core_entry to provide:
*
- * v0 = CM base address
+ * v1 = CM base address
* s0 = kseg0 CCA
*/
entry_code = (u32 *)&mips_cps_core_entry;
@@ -369,7 +369,7 @@
static void wait_for_sibling_halt(void *ptr_cpu)
{
- unsigned cpu = (unsigned)ptr_cpu;
+ unsigned cpu = (unsigned long)ptr_cpu;
unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]);
unsigned halted;
unsigned long flags;
@@ -430,7 +430,7 @@
*/
err = smp_call_function_single(cpu_death_sibling,
wait_for_sibling_halt,
- (void *)cpu, 1);
+ (void *)(unsigned long)cpu, 1);
if (err)
panic("Failed to call remote sibling CPU\n");
}
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index faa46eb..d0744cc 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -63,6 +63,13 @@
cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(cpu_core_map);
+/*
+ * A logcal cpu mask containing only one VPE per core to
+ * reduce the number of IPIs on large MT systems.
+ */
+cpumask_t cpu_foreign_map __read_mostly;
+EXPORT_SYMBOL(cpu_foreign_map);
+
/* representing cpus for which sibling maps can be computed */
static cpumask_t cpu_sibling_setup_map;
@@ -103,6 +110,29 @@
}
}
+/*
+ * Calculate a new cpu_foreign_map mask whenever a
+ * new cpu appears or disappears.
+ */
+static inline void calculate_cpu_foreign_map(void)
+{
+ int i, k, core_present;
+ cpumask_t temp_foreign_map;
+
+ /* Re-calculate the mask */
+ for_each_online_cpu(i) {
+ core_present = 0;
+ for_each_cpu(k, &temp_foreign_map)
+ if (cpu_data[i].package == cpu_data[k].package &&
+ cpu_data[i].core == cpu_data[k].core)
+ core_present = 1;
+ if (!core_present)
+ cpumask_set_cpu(i, &temp_foreign_map);
+ }
+
+ cpumask_copy(&cpu_foreign_map, &temp_foreign_map);
+}
+
struct plat_smp_ops *mp_ops;
EXPORT_SYMBOL(mp_ops);
@@ -146,6 +176,8 @@
set_cpu_sibling_map(cpu);
set_cpu_core_map(cpu);
+ calculate_cpu_foreign_map();
+
cpumask_set_cpu(cpu, &cpu_callin_map);
synchronise_count_slave(cpu);
@@ -173,9 +205,18 @@
static void stop_this_cpu(void *dummy)
{
/*
- * Remove this CPU:
+ * Remove this CPU. Be a bit slow here and
+ * set the bits for every online CPU so we don't miss
+ * any IPI whilst taking this VPE down.
*/
+
+ cpumask_copy(&cpu_foreign_map, cpu_online_mask);
+
+ /* Make it visible to every other CPU */
+ smp_mb();
+
set_cpu_online(smp_processor_id(), false);
+ calculate_cpu_foreign_map();
local_irq_disable();
while (1);
}
@@ -197,6 +238,7 @@
mp_ops->prepare_cpus(max_cpus);
set_cpu_sibling_map(0);
set_cpu_core_map(0);
+ calculate_cpu_foreign_map();
#ifndef CONFIG_HOTPLUG_CPU
init_cpu_present(cpu_possible_mask);
#endif
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 2a7b38e..e207a43 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -2130,10 +2130,10 @@
BUG_ON(current->mm);
enter_lazy_tlb(&init_mm, current);
- /* Boot CPU's cache setup in setup_arch(). */
- if (!is_boot_cpu)
- cpu_cache_init();
- tlb_init();
+ /* Boot CPU's cache setup in setup_arch(). */
+ if (!is_boot_cpu)
+ cpu_cache_init();
+ tlb_init();
TLBMISS_HANDLER_SETUP();
}
diff --git a/arch/mips/loongson64/common/bonito-irq.c b/arch/mips/loongson64/common/bonito-irq.c
index cc0e4fd..4e116d2 100644
--- a/arch/mips/loongson64/common/bonito-irq.c
+++ b/arch/mips/loongson64/common/bonito-irq.c
@@ -3,7 +3,7 @@
* Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
* Copyright (C) 2000, 2001 Ralf Baechle (ralf@gnu.org)
*
- * Copyright (C) 2007 Lemote Inc. & Insititute of Computing Technology
+ * Copyright (C) 2007 Lemote Inc. & Institute of Computing Technology
* Author: Fuxin Zhang, zhangfx@lemote.com
*
* This program is free software; you can redistribute it and/or modify it
diff --git a/arch/mips/loongson64/common/cmdline.c b/arch/mips/loongson64/common/cmdline.c
index 72fed00..01fbed1 100644
--- a/arch/mips/loongson64/common/cmdline.c
+++ b/arch/mips/loongson64/common/cmdline.c
@@ -6,7 +6,7 @@
* Copyright 2003 ICT CAS
* Author: Michael Guo <guoyi@ict.ac.cn>
*
- * Copyright (C) 2007 Lemote Inc. & Insititute of Computing Technology
+ * Copyright (C) 2007 Lemote Inc. & Institute of Computing Technology
* Author: Fuxin Zhang, zhangfx@lemote.com
*
* Copyright (C) 2009 Lemote Inc.
diff --git a/arch/mips/loongson64/common/cs5536/cs5536_mfgpt.c b/arch/mips/loongson64/common/cs5536/cs5536_mfgpt.c
index 12c75db..8750370 100644
--- a/arch/mips/loongson64/common/cs5536/cs5536_mfgpt.c
+++ b/arch/mips/loongson64/common/cs5536/cs5536_mfgpt.c
@@ -1,7 +1,7 @@
/*
* CS5536 General timer functions
*
- * Copyright (C) 2007 Lemote Inc. & Insititute of Computing Technology
+ * Copyright (C) 2007 Lemote Inc. & Institute of Computing Technology
* Author: Yanhua, yanh@lemote.com
*
* Copyright (C) 2009 Lemote Inc.
diff --git a/arch/mips/loongson64/common/env.c b/arch/mips/loongson64/common/env.c
index 22f04ca..f6c44dd 100644
--- a/arch/mips/loongson64/common/env.c
+++ b/arch/mips/loongson64/common/env.c
@@ -6,7 +6,7 @@
* Copyright 2003 ICT CAS
* Author: Michael Guo <guoyi@ict.ac.cn>
*
- * Copyright (C) 2007 Lemote Inc. & Insititute of Computing Technology
+ * Copyright (C) 2007 Lemote Inc. & Institute of Computing Technology
* Author: Fuxin Zhang, zhangfx@lemote.com
*
* Copyright (C) 2009 Lemote Inc.
diff --git a/arch/mips/loongson64/common/irq.c b/arch/mips/loongson64/common/irq.c
index 687003b..d36d969 100644
--- a/arch/mips/loongson64/common/irq.c
+++ b/arch/mips/loongson64/common/irq.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007 Lemote Inc. & Insititute of Computing Technology
+ * Copyright (C) 2007 Lemote Inc. & Institute of Computing Technology
* Author: Fuxin Zhang, zhangfx@lemote.com
*
* This program is free software; you can redistribute it and/or modify it
diff --git a/arch/mips/loongson64/common/setup.c b/arch/mips/loongson64/common/setup.c
index d477dd6..2dc5122 100644
--- a/arch/mips/loongson64/common/setup.c
+++ b/arch/mips/loongson64/common/setup.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007 Lemote Inc. & Insititute of Computing Technology
+ * Copyright (C) 2007 Lemote Inc. & Institute of Computing Technology
* Author: Fuxin Zhang, zhangfx@lemote.com
*
* This program is free software; you can redistribute it and/or modify it
diff --git a/arch/mips/loongson64/fuloong-2e/irq.c b/arch/mips/loongson64/fuloong-2e/irq.c
index ef5ec8f..892963f8 100644
--- a/arch/mips/loongson64/fuloong-2e/irq.c
+++ b/arch/mips/loongson64/fuloong-2e/irq.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007 Lemote Inc. & Insititute of Computing Technology
+ * Copyright (C) 2007 Lemote Inc. & Institute of Computing Technology
* Author: Fuxin Zhang, zhangfx@lemote.com
*
* This program is free software; you can redistribute it and/or modify it
diff --git a/arch/mips/loongson64/lemote-2f/clock.c b/arch/mips/loongson64/lemote-2f/clock.c
index 462e34d..a78fb65 100644
--- a/arch/mips/loongson64/lemote-2f/clock.c
+++ b/arch/mips/loongson64/lemote-2f/clock.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2006 - 2008 Lemote Inc. & Insititute of Computing Technology
+ * Copyright (C) 2006 - 2008 Lemote Inc. & Institute of Computing Technology
* Author: Yanhua, yanh@lemote.com
*
* This file is subject to the terms and conditions of the GNU General Public
@@ -15,7 +15,7 @@
#include <linux/spinlock.h>
#include <asm/clock.h>
-#include <asm/mach-loongson/loongson.h>
+#include <asm/mach-loongson64/loongson.h>
static LIST_HEAD(clock_list);
static DEFINE_SPINLOCK(clock_lock);
diff --git a/arch/mips/loongson64/loongson-3/numa.c b/arch/mips/loongson64/loongson-3/numa.c
index 12d14ed..6f9e010 100644
--- a/arch/mips/loongson64/loongson-3/numa.c
+++ b/arch/mips/loongson64/loongson-3/numa.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2010 Loongson Inc. & Lemote Inc. &
- * Insititute of Computing Technology
+ * Institute of Computing Technology
* Author: Xiang Gao, gaoxiang@ict.ac.cn
* Huacai Chen, chenhc@lemote.com
* Xiaofu Meng, Shuangshuang Zhang
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
index 22b9b2c..712f17a 100644
--- a/arch/mips/math-emu/cp1emu.c
+++ b/arch/mips/math-emu/cp1emu.c
@@ -451,7 +451,7 @@
/* Fall through */
case jr_op:
/* For R6, JR already emulated in jalr_op */
- if (NO_R6EMU && insn.r_format.opcode == jr_op)
+ if (NO_R6EMU && insn.r_format.func == jr_op)
break;
*contpc = regs->regs[insn.r_format.rs];
return 1;
@@ -551,7 +551,7 @@
dec_insn.next_pc_inc;
return 1;
case blezl_op:
- if (NO_R6EMU)
+ if (!insn.i_format.rt && NO_R6EMU)
break;
case blez_op:
@@ -588,7 +588,7 @@
dec_insn.next_pc_inc;
return 1;
case bgtzl_op:
- if (NO_R6EMU)
+ if (!insn.i_format.rt && NO_R6EMU)
break;
case bgtz_op:
/*
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 7f660dc..fbea443 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -37,6 +37,7 @@
#include <asm/cacheflush.h> /* for run_uncached() */
#include <asm/traps.h>
#include <asm/dma-coherence.h>
+#include <asm/mips-cm.h>
/*
* Special Variant of smp_call_function for use by cache functions:
@@ -51,9 +52,16 @@
{
preempt_disable();
-#ifndef CONFIG_MIPS_MT_SMP
- smp_call_function(func, info, 1);
-#endif
+ /*
+ * The Coherent Manager propagates address-based cache ops to other
+ * cores but not index-based ops. However, r4k_on_each_cpu is used
+ * in both cases so there is no easy way to tell what kind of op is
+ * executed to the other cores. The best we can probably do is
+ * to restrict that call when a CM is not present because both
+ * CM-based SMP protocols (CMP & CPS) restrict index-based cache ops.
+ */
+ if (!mips_cm_present())
+ smp_call_function_many(&cpu_foreign_map, func, info, 1);
func(info);
preempt_enable();
}
@@ -937,7 +945,9 @@
}
static char *way_string[] = { NULL, "direct mapped", "2-way",
- "3-way", "4-way", "5-way", "6-way", "7-way", "8-way"
+ "3-way", "4-way", "5-way", "6-way", "7-way", "8-way",
+ "9-way", "10-way", "11-way", "12-way",
+ "13-way", "14-way", "15-way", "16-way",
};
static void probe_pcache(void)
diff --git a/arch/mips/mti-malta/malta-time.c b/arch/mips/mti-malta/malta-time.c
index 185e682..5625b19 100644
--- a/arch/mips/mti-malta/malta-time.c
+++ b/arch/mips/mti-malta/malta-time.c
@@ -119,18 +119,24 @@
int get_c0_fdc_int(void)
{
- int mips_cpu_fdc_irq;
+ /*
+ * Some cores claim the FDC is routable through the GIC, but it doesn't
+ * actually seem to be connected for those Malta bitstreams.
+ */
+ switch (current_cpu_type()) {
+ case CPU_INTERAPTIV:
+ case CPU_PROAPTIV:
+ return -1;
+ };
if (cpu_has_veic)
- mips_cpu_fdc_irq = -1;
+ return -1;
else if (gic_present)
- mips_cpu_fdc_irq = gic_get_c0_fdc_int();
+ return gic_get_c0_fdc_int();
else if (cp0_fdc_irq >= 0)
- mips_cpu_fdc_irq = MIPS_CPU_IRQ_BASE + cp0_fdc_irq;
+ return MIPS_CPU_IRQ_BASE + cp0_fdc_irq;
else
- mips_cpu_fdc_irq = -1;
-
- return mips_cpu_fdc_irq;
+ return -1;
}
int get_c0_perfcount_int(void)
diff --git a/arch/mips/pistachio/init.c b/arch/mips/pistachio/init.c
index d2dc836..8bd8ebb 100644
--- a/arch/mips/pistachio/init.c
+++ b/arch/mips/pistachio/init.c
@@ -63,13 +63,19 @@
plat_setup_iocoherency();
}
-#define DEFAULT_CPC_BASE_ADDR 0x1bde0000
+#define DEFAULT_CPC_BASE_ADDR 0x1bde0000
+#define DEFAULT_CDMM_BASE_ADDR 0x1bdd0000
phys_addr_t mips_cpc_default_phys_base(void)
{
return DEFAULT_CPC_BASE_ADDR;
}
+phys_addr_t mips_cdmm_phys_base(void)
+{
+ return DEFAULT_CDMM_BASE_ADDR;
+}
+
static void __init mips_nmi_setup(void)
{
void *base;
diff --git a/arch/mips/pistachio/time.c b/arch/mips/pistachio/time.c
index 67889fc..7c73fcb 100644
--- a/arch/mips/pistachio/time.c
+++ b/arch/mips/pistachio/time.c
@@ -27,6 +27,11 @@
return gic_get_c0_perfcount_int();
}
+int get_c0_fdc_int(void)
+{
+ return gic_get_c0_fdc_int();
+}
+
void __init plat_time_init(void)
{
struct device_node *np;
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
index 0a18375..f93c4a4 100644
--- a/arch/parisc/include/asm/pgtable.h
+++ b/arch/parisc/include/asm/pgtable.h
@@ -16,7 +16,7 @@
#include <asm/processor.h>
#include <asm/cache.h>
-extern spinlock_t pa_dbit_lock;
+extern spinlock_t pa_tlb_lock;
/*
* kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel
@@ -33,6 +33,19 @@
*/
#define kern_addr_valid(addr) (1)
+/* Purge data and instruction TLB entries. Must be called holding
+ * the pa_tlb_lock. The TLB purge instructions are slow on SMP
+ * machines since the purge must be broadcast to all CPUs.
+ */
+
+static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
+{
+ mtsp(mm->context, 1);
+ pdtlb(addr);
+ if (unlikely(split_tlb))
+ pitlb(addr);
+}
+
/* Certain architectures need to do special things when PTEs
* within a page table are directly modified. Thus, the following
* hook is made available.
@@ -42,15 +55,20 @@
*(pteptr) = (pteval); \
} while(0)
-extern void purge_tlb_entries(struct mm_struct *, unsigned long);
+#define pte_inserted(x) \
+ ((pte_val(x) & (_PAGE_PRESENT|_PAGE_ACCESSED)) \
+ == (_PAGE_PRESENT|_PAGE_ACCESSED))
-#define set_pte_at(mm, addr, ptep, pteval) \
- do { \
+#define set_pte_at(mm, addr, ptep, pteval) \
+ do { \
+ pte_t old_pte; \
unsigned long flags; \
- spin_lock_irqsave(&pa_dbit_lock, flags); \
- set_pte(ptep, pteval); \
- purge_tlb_entries(mm, addr); \
- spin_unlock_irqrestore(&pa_dbit_lock, flags); \
+ spin_lock_irqsave(&pa_tlb_lock, flags); \
+ old_pte = *ptep; \
+ set_pte(ptep, pteval); \
+ if (pte_inserted(old_pte)) \
+ purge_tlb_entries(mm, addr); \
+ spin_unlock_irqrestore(&pa_tlb_lock, flags); \
} while (0)
#endif /* !__ASSEMBLY__ */
@@ -268,7 +286,7 @@
#define pte_none(x) (pte_val(x) == 0)
#define pte_present(x) (pte_val(x) & _PAGE_PRESENT)
-#define pte_clear(mm,addr,xp) do { pte_val(*(xp)) = 0; } while (0)
+#define pte_clear(mm, addr, xp) set_pte_at(mm, addr, xp, __pte(0))
#define pmd_flag(x) (pmd_val(x) & PxD_FLAG_MASK)
#define pmd_address(x) ((unsigned long)(pmd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT)
@@ -435,15 +453,15 @@
if (!pte_young(*ptep))
return 0;
- spin_lock_irqsave(&pa_dbit_lock, flags);
+ spin_lock_irqsave(&pa_tlb_lock, flags);
pte = *ptep;
if (!pte_young(pte)) {
- spin_unlock_irqrestore(&pa_dbit_lock, flags);
+ spin_unlock_irqrestore(&pa_tlb_lock, flags);
return 0;
}
set_pte(ptep, pte_mkold(pte));
purge_tlb_entries(vma->vm_mm, addr);
- spin_unlock_irqrestore(&pa_dbit_lock, flags);
+ spin_unlock_irqrestore(&pa_tlb_lock, flags);
return 1;
}
@@ -453,11 +471,12 @@
pte_t old_pte;
unsigned long flags;
- spin_lock_irqsave(&pa_dbit_lock, flags);
+ spin_lock_irqsave(&pa_tlb_lock, flags);
old_pte = *ptep;
- pte_clear(mm,addr,ptep);
- purge_tlb_entries(mm, addr);
- spin_unlock_irqrestore(&pa_dbit_lock, flags);
+ set_pte(ptep, __pte(0));
+ if (pte_inserted(old_pte))
+ purge_tlb_entries(mm, addr);
+ spin_unlock_irqrestore(&pa_tlb_lock, flags);
return old_pte;
}
@@ -465,10 +484,10 @@
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
unsigned long flags;
- spin_lock_irqsave(&pa_dbit_lock, flags);
+ spin_lock_irqsave(&pa_tlb_lock, flags);
set_pte(ptep, pte_wrprotect(*ptep));
purge_tlb_entries(mm, addr);
- spin_unlock_irqrestore(&pa_dbit_lock, flags);
+ spin_unlock_irqrestore(&pa_tlb_lock, flags);
}
#define pte_same(A,B) (pte_val(A) == pte_val(B))
diff --git a/arch/parisc/include/asm/tlbflush.h b/arch/parisc/include/asm/tlbflush.h
index 9d086a5..e84b964 100644
--- a/arch/parisc/include/asm/tlbflush.h
+++ b/arch/parisc/include/asm/tlbflush.h
@@ -13,6 +13,9 @@
* active at any one time on the Merced bus. This tlb purge
* synchronisation is fairly lightweight and harmless so we activate
* it on all systems not just the N class.
+
+ * It is also used to ensure PTE updates are atomic and consistent
+ * with the TLB.
*/
extern spinlock_t pa_tlb_lock;
@@ -24,20 +27,24 @@
#define smp_flush_tlb_all() flush_tlb_all()
+int __flush_tlb_range(unsigned long sid,
+ unsigned long start, unsigned long end);
+
+#define flush_tlb_range(vma, start, end) \
+ __flush_tlb_range((vma)->vm_mm->context, start, end)
+
+#define flush_tlb_kernel_range(start, end) \
+ __flush_tlb_range(0, start, end)
+
/*
* flush_tlb_mm()
*
- * XXX This code is NOT valid for HP-UX compatibility processes,
- * (although it will probably work 99% of the time). HP-UX
- * processes are free to play with the space id's and save them
- * over long periods of time, etc. so we have to preserve the
- * space and just flush the entire tlb. We need to check the
- * personality in order to do that, but the personality is not
- * currently being set correctly.
- *
- * Of course, Linux processes could do the same thing, but
- * we don't support that (and the compilers, dynamic linker,
- * etc. do not do that).
+ * The code to switch to a new context is NOT valid for processes
+ * which play with the space id's. Thus, we have to preserve the
+ * space and just flush the entire tlb. However, the compilers,
+ * dynamic linker, etc, do not manipulate space id's, so there
+ * could be a significant performance benefit in switching contexts
+ * and not flushing the whole tlb.
*/
static inline void flush_tlb_mm(struct mm_struct *mm)
@@ -45,10 +52,18 @@
BUG_ON(mm == &init_mm); /* Should never happen */
#if 1 || defined(CONFIG_SMP)
+ /* Except for very small threads, flushing the whole TLB is
+ * faster than using __flush_tlb_range. The pdtlb and pitlb
+ * instructions are very slow because of the TLB broadcast.
+ * It might be faster to do local range flushes on all CPUs
+ * on PA 2.0 systems.
+ */
flush_tlb_all();
#else
/* FIXME: currently broken, causing space id and protection ids
- * to go out of sync, resulting in faults on userspace accesses.
+ * to go out of sync, resulting in faults on userspace accesses.
+ * This approach needs further investigation since running many
+ * small applications (e.g., GCC testsuite) is faster on HP-UX.
*/
if (mm) {
if (mm->context != 0)
@@ -65,22 +80,12 @@
{
unsigned long flags, sid;
- /* For one page, it's not worth testing the split_tlb variable */
-
- mb();
sid = vma->vm_mm->context;
purge_tlb_start(flags);
mtsp(sid, 1);
pdtlb(addr);
- pitlb(addr);
+ if (unlikely(split_tlb))
+ pitlb(addr);
purge_tlb_end(flags);
}
-
-void __flush_tlb_range(unsigned long sid,
- unsigned long start, unsigned long end);
-
-#define flush_tlb_range(vma,start,end) __flush_tlb_range((vma)->vm_mm->context,start,end)
-
-#define flush_tlb_kernel_range(start, end) __flush_tlb_range(0,start,end)
-
#endif
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index f6448c7..cda6dbb 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -342,12 +342,15 @@
EXPORT_SYMBOL(flush_kernel_icache_range_asm);
#define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
-int parisc_cache_flush_threshold __read_mostly = FLUSH_THRESHOLD;
+static unsigned long parisc_cache_flush_threshold __read_mostly = FLUSH_THRESHOLD;
+
+#define FLUSH_TLB_THRESHOLD (2*1024*1024) /* 2MB initial TLB threshold */
+static unsigned long parisc_tlb_flush_threshold __read_mostly = FLUSH_TLB_THRESHOLD;
void __init parisc_setup_cache_timing(void)
{
unsigned long rangetime, alltime;
- unsigned long size;
+ unsigned long size, start;
alltime = mfctl(16);
flush_data_cache();
@@ -364,14 +367,43 @@
/* Racy, but if we see an intermediate value, it's ok too... */
parisc_cache_flush_threshold = size * alltime / rangetime;
- parisc_cache_flush_threshold = (parisc_cache_flush_threshold + L1_CACHE_BYTES - 1) &~ (L1_CACHE_BYTES - 1);
+ parisc_cache_flush_threshold = L1_CACHE_ALIGN(parisc_cache_flush_threshold);
if (!parisc_cache_flush_threshold)
parisc_cache_flush_threshold = FLUSH_THRESHOLD;
if (parisc_cache_flush_threshold > cache_info.dc_size)
parisc_cache_flush_threshold = cache_info.dc_size;
- printk(KERN_INFO "Setting cache flush threshold to %x (%d CPUs online)\n", parisc_cache_flush_threshold, num_online_cpus());
+ printk(KERN_INFO "Setting cache flush threshold to %lu kB\n",
+ parisc_cache_flush_threshold/1024);
+
+ /* calculate TLB flush threshold */
+
+ alltime = mfctl(16);
+ flush_tlb_all();
+ alltime = mfctl(16) - alltime;
+
+ size = PAGE_SIZE;
+ start = (unsigned long) _text;
+ rangetime = mfctl(16);
+ while (start < (unsigned long) _end) {
+ flush_tlb_kernel_range(start, start + PAGE_SIZE);
+ start += PAGE_SIZE;
+ size += PAGE_SIZE;
+ }
+ rangetime = mfctl(16) - rangetime;
+
+ printk(KERN_DEBUG "Whole TLB flush %lu cycles, flushing %lu bytes %lu cycles\n",
+ alltime, size, rangetime);
+
+ parisc_tlb_flush_threshold = size * alltime / rangetime;
+ parisc_tlb_flush_threshold *= num_online_cpus();
+ parisc_tlb_flush_threshold = PAGE_ALIGN(parisc_tlb_flush_threshold);
+ if (!parisc_tlb_flush_threshold)
+ parisc_tlb_flush_threshold = FLUSH_TLB_THRESHOLD;
+
+ printk(KERN_INFO "Setting TLB flush threshold to %lu kB\n",
+ parisc_tlb_flush_threshold/1024);
}
extern void purge_kernel_dcache_page_asm(unsigned long);
@@ -403,48 +435,45 @@
}
EXPORT_SYMBOL(copy_user_page);
-void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
+/* __flush_tlb_range()
+ *
+ * returns 1 if all TLBs were flushed.
+ */
+int __flush_tlb_range(unsigned long sid, unsigned long start,
+ unsigned long end)
{
- unsigned long flags;
+ unsigned long flags, size;
- /* Note: purge_tlb_entries can be called at startup with
- no context. */
-
- purge_tlb_start(flags);
- mtsp(mm->context, 1);
- pdtlb(addr);
- pitlb(addr);
- purge_tlb_end(flags);
-}
-EXPORT_SYMBOL(purge_tlb_entries);
-
-void __flush_tlb_range(unsigned long sid, unsigned long start,
- unsigned long end)
-{
- unsigned long npages;
-
- npages = ((end - (start & PAGE_MASK)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
- if (npages >= 512) /* 2MB of space: arbitrary, should be tuned */
+ size = (end - start);
+ if (size >= parisc_tlb_flush_threshold) {
flush_tlb_all();
- else {
- unsigned long flags;
+ return 1;
+ }
+ /* Purge TLB entries for small ranges using the pdtlb and
+ pitlb instructions. These instructions execute locally
+ but cause a purge request to be broadcast to other TLBs. */
+ if (likely(!split_tlb)) {
+ while (start < end) {
+ purge_tlb_start(flags);
+ mtsp(sid, 1);
+ pdtlb(start);
+ purge_tlb_end(flags);
+ start += PAGE_SIZE;
+ }
+ return 0;
+ }
+
+ /* split TLB case */
+ while (start < end) {
purge_tlb_start(flags);
mtsp(sid, 1);
- if (split_tlb) {
- while (npages--) {
- pdtlb(start);
- pitlb(start);
- start += PAGE_SIZE;
- }
- } else {
- while (npages--) {
- pdtlb(start);
- start += PAGE_SIZE;
- }
- }
+ pdtlb(start);
+ pitlb(start);
purge_tlb_end(flags);
+ start += PAGE_SIZE;
}
+ return 0;
}
static void cacheflush_h_tmp_function(void *dummy)
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index 7581961..c5ef408 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -45,7 +45,7 @@
.level 2.0
#endif
- .import pa_dbit_lock,data
+ .import pa_tlb_lock,data
/* space_to_prot macro creates a prot id from a space id */
@@ -420,8 +420,8 @@
SHLREG %r9,PxD_VALUE_SHIFT,\pmd
extru \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */
- shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd
- LDREG %r0(\pmd),\pte /* pmd is now pte */
+ shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd /* pmd is now pte */
+ LDREG %r0(\pmd),\pte
bb,>=,n \pte,_PAGE_PRESENT_BIT,\fault
.endm
@@ -453,57 +453,53 @@
L2_ptep \pgd,\pte,\index,\va,\fault
.endm
- /* Acquire pa_dbit_lock lock. */
- .macro dbit_lock spc,tmp,tmp1
+ /* Acquire pa_tlb_lock lock and recheck page is still present. */
+ .macro tlb_lock spc,ptp,pte,tmp,tmp1,fault
#ifdef CONFIG_SMP
cmpib,COND(=),n 0,\spc,2f
- load32 PA(pa_dbit_lock),\tmp
+ load32 PA(pa_tlb_lock),\tmp
1: LDCW 0(\tmp),\tmp1
cmpib,COND(=) 0,\tmp1,1b
nop
+ LDREG 0(\ptp),\pte
+ bb,<,n \pte,_PAGE_PRESENT_BIT,2f
+ b \fault
+ stw \spc,0(\tmp)
2:
#endif
.endm
- /* Release pa_dbit_lock lock without reloading lock address. */
- .macro dbit_unlock0 spc,tmp
+ /* Release pa_tlb_lock lock without reloading lock address. */
+ .macro tlb_unlock0 spc,tmp
#ifdef CONFIG_SMP
or,COND(=) %r0,\spc,%r0
stw \spc,0(\tmp)
#endif
.endm
- /* Release pa_dbit_lock lock. */
- .macro dbit_unlock1 spc,tmp
+ /* Release pa_tlb_lock lock. */
+ .macro tlb_unlock1 spc,tmp
#ifdef CONFIG_SMP
- load32 PA(pa_dbit_lock),\tmp
- dbit_unlock0 \spc,\tmp
+ load32 PA(pa_tlb_lock),\tmp
+ tlb_unlock0 \spc,\tmp
#endif
.endm
/* Set the _PAGE_ACCESSED bit of the PTE. Be clever and
* don't needlessly dirty the cache line if it was already set */
- .macro update_ptep spc,ptep,pte,tmp,tmp1
-#ifdef CONFIG_SMP
- or,COND(=) %r0,\spc,%r0
- LDREG 0(\ptep),\pte
-#endif
+ .macro update_accessed ptp,pte,tmp,tmp1
ldi _PAGE_ACCESSED,\tmp1
or \tmp1,\pte,\tmp
and,COND(<>) \tmp1,\pte,%r0
- STREG \tmp,0(\ptep)
+ STREG \tmp,0(\ptp)
.endm
/* Set the dirty bit (and accessed bit). No need to be
* clever, this is only used from the dirty fault */
- .macro update_dirty spc,ptep,pte,tmp
-#ifdef CONFIG_SMP
- or,COND(=) %r0,\spc,%r0
- LDREG 0(\ptep),\pte
-#endif
+ .macro update_dirty ptp,pte,tmp
ldi _PAGE_ACCESSED|_PAGE_DIRTY,\tmp
or \tmp,\pte,\pte
- STREG \pte,0(\ptep)
+ STREG \pte,0(\ptp)
.endm
/* bitshift difference between a PFN (based on kernel's PAGE_SIZE)
@@ -1148,14 +1144,14 @@
L3_ptep ptp,pte,t0,va,dtlb_check_alias_20w
- dbit_lock spc,t0,t1
- update_ptep spc,ptp,pte,t0,t1
+ tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20w
+ update_accessed ptp,pte,t0,t1
make_insert_tlb spc,pte,prot
idtlbt pte,prot
- dbit_unlock1 spc,t0
+ tlb_unlock1 spc,t0
rfir
nop
@@ -1174,14 +1170,14 @@
L3_ptep ptp,pte,t0,va,nadtlb_check_alias_20w
- dbit_lock spc,t0,t1
- update_ptep spc,ptp,pte,t0,t1
+ tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20w
+ update_accessed ptp,pte,t0,t1
make_insert_tlb spc,pte,prot
idtlbt pte,prot
- dbit_unlock1 spc,t0
+ tlb_unlock1 spc,t0
rfir
nop
@@ -1202,20 +1198,20 @@
L2_ptep ptp,pte,t0,va,dtlb_check_alias_11
- dbit_lock spc,t0,t1
- update_ptep spc,ptp,pte,t0,t1
+ tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_11
+ update_accessed ptp,pte,t0,t1
make_insert_tlb_11 spc,pte,prot
- mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
+ mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
mtsp spc,%sr1
idtlba pte,(%sr1,va)
idtlbp prot,(%sr1,va)
- mtsp t0, %sr1 /* Restore sr1 */
- dbit_unlock1 spc,t0
+ mtsp t1, %sr1 /* Restore sr1 */
+ tlb_unlock1 spc,t0
rfir
nop
@@ -1235,21 +1231,20 @@
L2_ptep ptp,pte,t0,va,nadtlb_check_alias_11
- dbit_lock spc,t0,t1
- update_ptep spc,ptp,pte,t0,t1
+ tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_11
+ update_accessed ptp,pte,t0,t1
make_insert_tlb_11 spc,pte,prot
-
- mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
+ mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
mtsp spc,%sr1
idtlba pte,(%sr1,va)
idtlbp prot,(%sr1,va)
- mtsp t0, %sr1 /* Restore sr1 */
- dbit_unlock1 spc,t0
+ mtsp t1, %sr1 /* Restore sr1 */
+ tlb_unlock1 spc,t0
rfir
nop
@@ -1269,16 +1264,16 @@
L2_ptep ptp,pte,t0,va,dtlb_check_alias_20
- dbit_lock spc,t0,t1
- update_ptep spc,ptp,pte,t0,t1
+ tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20
+ update_accessed ptp,pte,t0,t1
make_insert_tlb spc,pte,prot
- f_extend pte,t0
+ f_extend pte,t1
idtlbt pte,prot
- dbit_unlock1 spc,t0
+ tlb_unlock1 spc,t0
rfir
nop
@@ -1297,16 +1292,16 @@
L2_ptep ptp,pte,t0,va,nadtlb_check_alias_20
- dbit_lock spc,t0,t1
- update_ptep spc,ptp,pte,t0,t1
+ tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20
+ update_accessed ptp,pte,t0,t1
make_insert_tlb spc,pte,prot
- f_extend pte,t0
+ f_extend pte,t1
- idtlbt pte,prot
- dbit_unlock1 spc,t0
+ idtlbt pte,prot
+ tlb_unlock1 spc,t0
rfir
nop
@@ -1406,14 +1401,14 @@
L3_ptep ptp,pte,t0,va,itlb_fault
- dbit_lock spc,t0,t1
- update_ptep spc,ptp,pte,t0,t1
+ tlb_lock spc,ptp,pte,t0,t1,itlb_fault
+ update_accessed ptp,pte,t0,t1
make_insert_tlb spc,pte,prot
iitlbt pte,prot
- dbit_unlock1 spc,t0
+ tlb_unlock1 spc,t0
rfir
nop
@@ -1430,14 +1425,14 @@
L3_ptep ptp,pte,t0,va,naitlb_check_alias_20w
- dbit_lock spc,t0,t1
- update_ptep spc,ptp,pte,t0,t1
+ tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20w
+ update_accessed ptp,pte,t0,t1
make_insert_tlb spc,pte,prot
iitlbt pte,prot
- dbit_unlock1 spc,t0
+ tlb_unlock1 spc,t0
rfir
nop
@@ -1458,20 +1453,20 @@
L2_ptep ptp,pte,t0,va,itlb_fault
- dbit_lock spc,t0,t1
- update_ptep spc,ptp,pte,t0,t1
+ tlb_lock spc,ptp,pte,t0,t1,itlb_fault
+ update_accessed ptp,pte,t0,t1
make_insert_tlb_11 spc,pte,prot
- mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
+ mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
mtsp spc,%sr1
iitlba pte,(%sr1,va)
iitlbp prot,(%sr1,va)
- mtsp t0, %sr1 /* Restore sr1 */
- dbit_unlock1 spc,t0
+ mtsp t1, %sr1 /* Restore sr1 */
+ tlb_unlock1 spc,t0
rfir
nop
@@ -1482,20 +1477,20 @@
L2_ptep ptp,pte,t0,va,naitlb_check_alias_11
- dbit_lock spc,t0,t1
- update_ptep spc,ptp,pte,t0,t1
+ tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_11
+ update_accessed ptp,pte,t0,t1
make_insert_tlb_11 spc,pte,prot
- mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
+ mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
mtsp spc,%sr1
iitlba pte,(%sr1,va)
iitlbp prot,(%sr1,va)
- mtsp t0, %sr1 /* Restore sr1 */
- dbit_unlock1 spc,t0
+ mtsp t1, %sr1 /* Restore sr1 */
+ tlb_unlock1 spc,t0
rfir
nop
@@ -1516,16 +1511,16 @@
L2_ptep ptp,pte,t0,va,itlb_fault
- dbit_lock spc,t0,t1
- update_ptep spc,ptp,pte,t0,t1
+ tlb_lock spc,ptp,pte,t0,t1,itlb_fault
+ update_accessed ptp,pte,t0,t1
make_insert_tlb spc,pte,prot
- f_extend pte,t0
+ f_extend pte,t1
iitlbt pte,prot
- dbit_unlock1 spc,t0
+ tlb_unlock1 spc,t0
rfir
nop
@@ -1536,16 +1531,16 @@
L2_ptep ptp,pte,t0,va,naitlb_check_alias_20
- dbit_lock spc,t0,t1
- update_ptep spc,ptp,pte,t0,t1
+ tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20
+ update_accessed ptp,pte,t0,t1
make_insert_tlb spc,pte,prot
- f_extend pte,t0
+ f_extend pte,t1
iitlbt pte,prot
- dbit_unlock1 spc,t0
+ tlb_unlock1 spc,t0
rfir
nop
@@ -1568,14 +1563,14 @@
L3_ptep ptp,pte,t0,va,dbit_fault
- dbit_lock spc,t0,t1
- update_dirty spc,ptp,pte,t1
+ tlb_lock spc,ptp,pte,t0,t1,dbit_fault
+ update_dirty ptp,pte,t1
make_insert_tlb spc,pte,prot
idtlbt pte,prot
- dbit_unlock0 spc,t0
+ tlb_unlock0 spc,t0
rfir
nop
#else
@@ -1588,8 +1583,8 @@
L2_ptep ptp,pte,t0,va,dbit_fault
- dbit_lock spc,t0,t1
- update_dirty spc,ptp,pte,t1
+ tlb_lock spc,ptp,pte,t0,t1,dbit_fault
+ update_dirty ptp,pte,t1
make_insert_tlb_11 spc,pte,prot
@@ -1600,8 +1595,8 @@
idtlbp prot,(%sr1,va)
mtsp t1, %sr1 /* Restore sr1 */
- dbit_unlock0 spc,t0
+ tlb_unlock0 spc,t0
rfir
nop
@@ -1612,16 +1607,16 @@
L2_ptep ptp,pte,t0,va,dbit_fault
- dbit_lock spc,t0,t1
- update_dirty spc,ptp,pte,t1
+ tlb_lock spc,ptp,pte,t0,t1,dbit_fault
+ update_dirty ptp,pte,t1
make_insert_tlb spc,pte,prot
f_extend pte,t1
- idtlbt pte,prot
- dbit_unlock0 spc,t0
+ idtlbt pte,prot
+ tlb_unlock0 spc,t0
rfir
nop
#endif
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
index 6548fd1..b99b39f 100644
--- a/arch/parisc/kernel/traps.c
+++ b/arch/parisc/kernel/traps.c
@@ -43,10 +43,6 @@
#include "../math-emu/math-emu.h" /* for handle_fpe() */
-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
-DEFINE_SPINLOCK(pa_dbit_lock);
-#endif
-
static void parisc_show_stack(struct task_struct *task, unsigned long *sp,
struct pt_regs *regs);
diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S
index ccde8f0..112ccf4 100644
--- a/arch/powerpc/kernel/idle_power7.S
+++ b/arch/powerpc/kernel/idle_power7.S
@@ -52,6 +52,22 @@
.text
/*
+ * Used by threads when the lock bit of core_idle_state is set.
+ * Threads will spin in HMT_LOW until the lock bit is cleared.
+ * r14 - pointer to core_idle_state
+ * r15 - used to load contents of core_idle_state
+ */
+
+core_idle_lock_held:
+ HMT_LOW
+3: lwz r15,0(r14)
+ andi. r15,r15,PNV_CORE_IDLE_LOCK_BIT
+ bne 3b
+ HMT_MEDIUM
+ lwarx r15,0,r14
+ blr
+
+/*
* Pass requested state in r3:
* r3 - PNV_THREAD_NAP/SLEEP/WINKLE
*
@@ -150,6 +166,10 @@
ld r14,PACA_CORE_IDLE_STATE_PTR(r13)
lwarx_loop1:
lwarx r15,0,r14
+
+ andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT
+ bnel core_idle_lock_held
+
andc r15,r15,r7 /* Clear thread bit */
andi. r15,r15,PNV_CORE_IDLE_THREAD_BITS
@@ -294,7 +314,7 @@
* workaround undo code or resyncing timebase or restoring context
* In either case loop until the lock bit is cleared.
*/
- bne core_idle_lock_held
+ bnel core_idle_lock_held
cmpwi cr2,r15,0
lbz r4,PACA_SUBCORE_SIBLING_MASK(r13)
@@ -319,15 +339,6 @@
isync
b common_exit
-core_idle_lock_held:
- HMT_LOW
-core_idle_lock_loop:
- lwz r15,0(14)
- andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT
- bne core_idle_lock_loop
- HMT_MEDIUM
- b lwarx_loop2
-
first_thread_in_subcore:
/* First thread in subcore to wakeup */
ori r15,r15,PNV_CORE_IDLE_LOCK_BIT
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 6530f1b..37de90f 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -297,6 +297,8 @@
__this_cpu_inc(irq_stat.mce_exceptions);
+ add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
+
if (cur_cpu_spec && cur_cpu_spec->machine_check_early)
handled = cur_cpu_spec->machine_check_early(regs);
return handled;
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 6d53597..a67c6d7 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -529,6 +529,10 @@
printk(KERN_ALERT "Unable to handle kernel paging request for "
"instruction fetch\n");
break;
+ case 0x600:
+ printk(KERN_ALERT "Unable to handle kernel paging request for "
+ "unaligned access at address 0x%08lx\n", regs->dar);
+ break;
default:
printk(KERN_ALERT "Unable to handle kernel paging request for "
"unknown fault\n");
diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c
index ec2eb20..df95629 100644
--- a/arch/powerpc/perf/hv-24x7.c
+++ b/arch/powerpc/perf/hv-24x7.c
@@ -320,6 +320,8 @@
if (!attr)
return NULL;
+ sysfs_attr_init(&attr->attr.attr);
+
attr->var = str;
attr->attr.attr.name = name;
attr->attr.attr.mode = 0444;
diff --git a/arch/powerpc/platforms/powernv/opal-elog.c b/arch/powerpc/platforms/powernv/opal-elog.c
index 4949ef0..37f959b 100644
--- a/arch/powerpc/platforms/powernv/opal-elog.c
+++ b/arch/powerpc/platforms/powernv/opal-elog.c
@@ -237,7 +237,7 @@
return elog;
}
-static void elog_work_fn(struct work_struct *work)
+static irqreturn_t elog_event(int irq, void *data)
{
__be64 size;
__be64 id;
@@ -251,7 +251,7 @@
rc = opal_get_elog_size(&id, &size, &type);
if (rc != OPAL_SUCCESS) {
pr_err("ELOG: OPAL log info read failed\n");
- return;
+ return IRQ_HANDLED;
}
elog_size = be64_to_cpu(size);
@@ -270,16 +270,10 @@
* entries.
*/
if (kset_find_obj(elog_kset, name))
- return;
+ return IRQ_HANDLED;
create_elog_obj(log_id, elog_size, elog_type);
-}
-static DECLARE_WORK(elog_work, elog_work_fn);
-
-static irqreturn_t elog_event(int irq, void *data)
-{
- schedule_work(&elog_work);
return IRQ_HANDLED;
}
@@ -304,8 +298,8 @@
return irq;
}
- rc = request_irq(irq, elog_event,
- IRQ_TYPE_LEVEL_HIGH, "opal-elog", NULL);
+ rc = request_threaded_irq(irq, NULL, elog_event,
+ IRQF_TRIGGER_HIGH | IRQF_ONESHOT, "opal-elog", NULL);
if (rc) {
pr_err("%s: Can't request OPAL event irq (%d)\n",
__func__, rc);
diff --git a/arch/powerpc/platforms/powernv/opal-prd.c b/arch/powerpc/platforms/powernv/opal-prd.c
index 46cb3fe..4ece8e4 100644
--- a/arch/powerpc/platforms/powernv/opal-prd.c
+++ b/arch/powerpc/platforms/powernv/opal-prd.c
@@ -112,6 +112,7 @@
static int opal_prd_mmap(struct file *file, struct vm_area_struct *vma)
{
size_t addr, size;
+ pgprot_t page_prot;
int rc;
pr_devel("opal_prd_mmap(0x%016lx, 0x%016lx, 0x%lx, 0x%lx)\n",
@@ -125,13 +126,11 @@
if (!opal_prd_range_is_valid(addr, size))
return -EINVAL;
- vma->vm_page_prot = __pgprot(pgprot_val(phys_mem_access_prot(file,
- vma->vm_pgoff,
- size, vma->vm_page_prot))
- | _PAGE_SPECIAL);
+ page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
+ size, vma->vm_page_prot);
rc = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, size,
- vma->vm_page_prot);
+ page_prot);
return rc;
}
diff --git a/arch/powerpc/sysdev/ppc4xx_hsta_msi.c b/arch/powerpc/sysdev/ppc4xx_hsta_msi.c
index 2bc3367..87f9623 100644
--- a/arch/powerpc/sysdev/ppc4xx_hsta_msi.c
+++ b/arch/powerpc/sysdev/ppc4xx_hsta_msi.c
@@ -18,6 +18,7 @@
#include <linux/pci.h>
#include <linux/semaphore.h>
#include <asm/msi_bitmap.h>
+#include <asm/ppc-pci.h>
struct ppc4xx_hsta_msi {
struct device *dev;
diff --git a/arch/tile/lib/memcpy_user_64.c b/arch/tile/lib/memcpy_user_64.c
index 88c7016..97bbb60 100644
--- a/arch/tile/lib/memcpy_user_64.c
+++ b/arch/tile/lib/memcpy_user_64.c
@@ -28,7 +28,7 @@
#define _ST(p, inst, v) \
({ \
asm("1: " #inst " %0, %1;" \
- ".pushsection .coldtext.memcpy,\"ax\";" \
+ ".pushsection .coldtext,\"ax\";" \
"2: { move r0, %2; jrp lr };" \
".section __ex_table,\"a\";" \
".align 8;" \
@@ -41,7 +41,7 @@
({ \
unsigned long __v; \
asm("1: " #inst " %0, %1;" \
- ".pushsection .coldtext.memcpy,\"ax\";" \
+ ".pushsection .coldtext,\"ax\";" \
"2: { move r0, %2; jrp lr };" \
".section __ex_table,\"a\";" \
".align 8;" \
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 55bced1..3dbb7e7 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -254,6 +254,11 @@
config ARCH_SUPPORTS_DEBUG_PAGEALLOC
def_bool y
+config KASAN_SHADOW_OFFSET
+ hex
+ depends on KASAN
+ default 0xdffffc0000000000
+
config HAVE_INTEL_TXT
def_bool y
depends on INTEL_IOMMU && ACPI
@@ -2015,7 +2020,7 @@
To compile command line arguments into the kernel,
set this option to 'Y', then fill in the
- the boot arguments in CONFIG_CMDLINE.
+ boot arguments in CONFIG_CMDLINE.
Systems with fully functional boot loaders (i.e. non-embedded)
should leave this option set to 'N'.
diff --git a/arch/x86/include/asm/espfix.h b/arch/x86/include/asm/espfix.h
index 99efebb..ca3ce9a 100644
--- a/arch/x86/include/asm/espfix.h
+++ b/arch/x86/include/asm/espfix.h
@@ -9,7 +9,7 @@
DECLARE_PER_CPU_READ_MOSTLY(unsigned long, espfix_waddr);
extern void init_espfix_bsp(void);
-extern void init_espfix_ap(void);
+extern void init_espfix_ap(int cpu);
#endif /* CONFIG_X86_64 */
diff --git a/arch/x86/include/asm/kasan.h b/arch/x86/include/asm/kasan.h
index 8b22422..74a2a8d 100644
--- a/arch/x86/include/asm/kasan.h
+++ b/arch/x86/include/asm/kasan.h
@@ -14,15 +14,11 @@
#ifndef __ASSEMBLY__
-extern pte_t kasan_zero_pte[];
-extern pte_t kasan_zero_pmd[];
-extern pte_t kasan_zero_pud[];
-
#ifdef CONFIG_KASAN
-void __init kasan_map_early_shadow(pgd_t *pgd);
+void __init kasan_early_init(void);
void __init kasan_init(void);
#else
-static inline void kasan_map_early_shadow(pgd_t *pgd) { }
+static inline void kasan_early_init(void) { }
static inline void kasan_init(void) { }
#endif
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index 28eba2d..f813261 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -409,12 +409,6 @@
int irq, vector;
struct apic_chip_data *data;
- /*
- * vector_lock will make sure that we don't run into irq vector
- * assignments that might be happening on another cpu in parallel,
- * while we setup our initial vector to irq mappings.
- */
- raw_spin_lock(&vector_lock);
/* Mark the inuse vectors */
for_each_active_irq(irq) {
data = apic_chip_data(irq_get_irq_data(irq));
@@ -436,16 +430,16 @@
if (!cpumask_test_cpu(cpu, data->domain))
per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED;
}
- raw_spin_unlock(&vector_lock);
}
/*
- * Setup the vector to irq mappings.
+ * Setup the vector to irq mappings. Must be called with vector_lock held.
*/
void setup_vector_irq(int cpu)
{
int irq;
+ lockdep_assert_held(&vector_lock);
/*
* On most of the platforms, legacy PIC delivers the interrupts on the
* boot cpu. But there are certain platforms where PIC interrupts are
diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
index 89427d8..eec40f5 100644
--- a/arch/x86/kernel/early_printk.c
+++ b/arch/x86/kernel/early_printk.c
@@ -175,7 +175,9 @@
}
if (*s) {
- if (kstrtoul(s, 0, &baud) < 0 || baud == 0)
+ baud = simple_strtoull(s, &e, 0);
+
+ if (baud == 0 || s == e)
baud = DEFAULT_BAUD;
}
diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c
index f5d0730..ce95676 100644
--- a/arch/x86/kernel/espfix_64.c
+++ b/arch/x86/kernel/espfix_64.c
@@ -131,25 +131,24 @@
init_espfix_random();
/* The rest is the same as for any other processor */
- init_espfix_ap();
+ init_espfix_ap(0);
}
-void init_espfix_ap(void)
+void init_espfix_ap(int cpu)
{
- unsigned int cpu, page;
+ unsigned int page;
unsigned long addr;
pud_t pud, *pud_p;
pmd_t pmd, *pmd_p;
pte_t pte, *pte_p;
- int n;
+ int n, node;
void *stack_page;
pteval_t ptemask;
/* We only have to do this once... */
- if (likely(this_cpu_read(espfix_stack)))
+ if (likely(per_cpu(espfix_stack, cpu)))
return; /* Already initialized */
- cpu = smp_processor_id();
addr = espfix_base_addr(cpu);
page = cpu/ESPFIX_STACKS_PER_PAGE;
@@ -165,12 +164,15 @@
if (stack_page)
goto unlock_done;
+ node = cpu_to_node(cpu);
ptemask = __supported_pte_mask;
pud_p = &espfix_pud_page[pud_index(addr)];
pud = *pud_p;
if (!pud_present(pud)) {
- pmd_p = (pmd_t *)__get_free_page(PGALLOC_GFP);
+ struct page *page = alloc_pages_node(node, PGALLOC_GFP, 0);
+
+ pmd_p = (pmd_t *)page_address(page);
pud = __pud(__pa(pmd_p) | (PGTABLE_PROT & ptemask));
paravirt_alloc_pmd(&init_mm, __pa(pmd_p) >> PAGE_SHIFT);
for (n = 0; n < ESPFIX_PUD_CLONES; n++)
@@ -180,7 +182,9 @@
pmd_p = pmd_offset(&pud, addr);
pmd = *pmd_p;
if (!pmd_present(pmd)) {
- pte_p = (pte_t *)__get_free_page(PGALLOC_GFP);
+ struct page *page = alloc_pages_node(node, PGALLOC_GFP, 0);
+
+ pte_p = (pte_t *)page_address(page);
pmd = __pmd(__pa(pte_p) | (PGTABLE_PROT & ptemask));
paravirt_alloc_pte(&init_mm, __pa(pte_p) >> PAGE_SHIFT);
for (n = 0; n < ESPFIX_PMD_CLONES; n++)
@@ -188,7 +192,7 @@
}
pte_p = pte_offset_kernel(&pmd, addr);
- stack_page = (void *)__get_free_page(GFP_KERNEL);
+ stack_page = page_address(alloc_pages_node(node, GFP_KERNEL, 0));
pte = __pte(__pa(stack_page) | (__PAGE_KERNEL_RO & ptemask));
for (n = 0; n < ESPFIX_PTE_CLONES; n++)
set_pte(&pte_p[n*PTE_STRIDE], pte);
@@ -199,7 +203,7 @@
unlock_done:
mutex_unlock(&espfix_init_mutex);
done:
- this_cpu_write(espfix_stack, addr);
- this_cpu_write(espfix_waddr, (unsigned long)stack_page
- + (addr & ~PAGE_MASK));
+ per_cpu(espfix_stack, cpu) = addr;
+ per_cpu(espfix_waddr, cpu) = (unsigned long)stack_page
+ + (addr & ~PAGE_MASK);
}
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 5a46681..f129a9a 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -161,11 +161,12 @@
/* Kill off the identity-map trampoline */
reset_early_page_tables();
- kasan_map_early_shadow(early_level4_pgt);
-
- /* clear bss before set_intr_gate with early_idt_handler */
clear_bss();
+ clear_page(init_level4_pgt);
+
+ kasan_early_init();
+
for (i = 0; i < NUM_EXCEPTION_VECTORS; i++)
set_intr_gate(i, early_idt_handler_array[i]);
load_idt((const struct desc_ptr *)&idt_descr);
@@ -177,12 +178,9 @@
*/
load_ucode_bsp();
- clear_page(init_level4_pgt);
/* set init_level4_pgt kernel high mapping*/
init_level4_pgt[511] = early_level4_pgt[511];
- kasan_map_early_shadow(init_level4_pgt);
-
x86_64_start_reservations(real_mode_data);
}
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index e5c27f7..1d40ca8 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -516,38 +516,9 @@
/* This must match the first entry in level2_kernel_pgt */
.quad 0x0000000000000000
-#ifdef CONFIG_KASAN
-#define FILL(VAL, COUNT) \
- .rept (COUNT) ; \
- .quad (VAL) ; \
- .endr
-
-NEXT_PAGE(kasan_zero_pte)
- FILL(kasan_zero_page - __START_KERNEL_map + _KERNPG_TABLE, 512)
-NEXT_PAGE(kasan_zero_pmd)
- FILL(kasan_zero_pte - __START_KERNEL_map + _KERNPG_TABLE, 512)
-NEXT_PAGE(kasan_zero_pud)
- FILL(kasan_zero_pmd - __START_KERNEL_map + _KERNPG_TABLE, 512)
-
-#undef FILL
-#endif
-
-
#include "../../x86/xen/xen-head.S"
__PAGE_ALIGNED_BSS
NEXT_PAGE(empty_zero_page)
.skip PAGE_SIZE
-#ifdef CONFIG_KASAN
-/*
- * This page used as early shadow. We don't use empty_zero_page
- * at early stages, stack instrumentation could write some garbage
- * to this page.
- * Latter we reuse it as zero shadow for large ranges of memory
- * that allowed to access, but not instrumented by kasan
- * (vmalloc/vmemmap ...).
- */
-NEXT_PAGE(kasan_zero_page)
- .skip PAGE_SIZE
-#endif
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 88b36648..c7dfe1b 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -347,14 +347,22 @@
if (!desc)
continue;
+ /*
+ * Protect against concurrent action removal,
+ * affinity changes etc.
+ */
+ raw_spin_lock(&desc->lock);
data = irq_desc_get_irq_data(desc);
cpumask_copy(&affinity_new, data->affinity);
cpumask_clear_cpu(this_cpu, &affinity_new);
/* Do not count inactive or per-cpu irqs. */
- if (!irq_has_action(irq) || irqd_is_per_cpu(data))
+ if (!irq_has_action(irq) || irqd_is_per_cpu(data)) {
+ raw_spin_unlock(&desc->lock);
continue;
+ }
+ raw_spin_unlock(&desc->lock);
/*
* A single irq may be mapped to multiple
* cpu's vector_irq[] (for example IOAPIC cluster
@@ -385,6 +393,9 @@
* vector. If the vector is marked in the used vectors
* bitmap or an irq is assigned to it, we don't count
* it as available.
+ *
+ * As this is an inaccurate snapshot anyway, we can do
+ * this w/o holding vector_lock.
*/
for (vector = FIRST_EXTERNAL_VECTOR;
vector < first_system_vector; vector++) {
@@ -486,6 +497,11 @@
*/
mdelay(1);
+ /*
+ * We can walk the vector array of this cpu without holding
+ * vector_lock because the cpu is already marked !online, so
+ * nothing else will touch it.
+ */
for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
unsigned int irr;
@@ -497,9 +513,9 @@
irq = __this_cpu_read(vector_irq[vector]);
desc = irq_to_desc(irq);
+ raw_spin_lock(&desc->lock);
data = irq_desc_get_irq_data(desc);
chip = irq_data_get_irq_chip(data);
- raw_spin_lock(&desc->lock);
if (chip->irq_retrigger) {
chip->irq_retrigger(data);
__this_cpu_write(vector_irq[vector], VECTOR_RETRIGGERED);
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 8add66b..d3010aa 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -171,11 +171,6 @@
apic_ap_setup();
/*
- * Need to setup vector mappings before we enable interrupts.
- */
- setup_vector_irq(smp_processor_id());
-
- /*
* Save our processor parameters. Note: this information
* is needed for clock calibration.
*/
@@ -239,18 +234,13 @@
check_tsc_sync_target();
/*
- * Enable the espfix hack for this CPU
- */
-#ifdef CONFIG_X86_ESPFIX64
- init_espfix_ap();
-#endif
-
- /*
- * We need to hold vector_lock so there the set of online cpus
- * does not change while we are assigning vectors to cpus. Holding
- * this lock ensures we don't half assign or remove an irq from a cpu.
+ * Lock vector_lock and initialize the vectors on this cpu
+ * before setting the cpu online. We must set it online with
+ * vector_lock held to prevent a concurrent setup/teardown
+ * from seeing a half valid vector space.
*/
lock_vector_lock();
+ setup_vector_irq(smp_processor_id());
set_cpu_online(smp_processor_id(), true);
unlock_vector_lock();
cpu_set_state_online(smp_processor_id());
@@ -854,6 +844,13 @@
initial_code = (unsigned long)start_secondary;
stack_start = idle->thread.sp;
+ /*
+ * Enable the espfix hack for this CPU
+ */
+#ifdef CONFIG_X86_ESPFIX64
+ init_espfix_ap(cpu);
+#endif
+
/* So we see what's up */
announce_cpu(cpu, apicid);
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 5054497..7437b41 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -598,10 +598,19 @@
if (!pit_expect_msb(0xff-i, &delta, &d2))
break;
+ delta -= tsc;
+
+ /*
+ * Extrapolate the error and fail fast if the error will
+ * never be below 500 ppm.
+ */
+ if (i == 1 &&
+ d1 + d2 >= (delta * MAX_QUICK_PIT_ITERATIONS) >> 11)
+ return 0;
+
/*
* Iterate until the error is less than 500 ppm
*/
- delta -= tsc;
if (d1+d2 >= delta >> 11)
continue;
diff --git a/arch/x86/lib/usercopy.c b/arch/x86/lib/usercopy.c
index ddf9ecb..e342586 100644
--- a/arch/x86/lib/usercopy.c
+++ b/arch/x86/lib/usercopy.c
@@ -20,7 +20,7 @@
unsigned long ret;
if (__range_not_ok(from, n, TASK_SIZE))
- return 0;
+ return n;
/*
* Even though this function is typically called from NMI/IRQ context
diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
index 4860906..e1840f3 100644
--- a/arch/x86/mm/kasan_init_64.c
+++ b/arch/x86/mm/kasan_init_64.c
@@ -1,3 +1,4 @@
+#define pr_fmt(fmt) "kasan: " fmt
#include <linux/bootmem.h>
#include <linux/kasan.h>
#include <linux/kdebug.h>
@@ -11,7 +12,19 @@
extern pgd_t early_level4_pgt[PTRS_PER_PGD];
extern struct range pfn_mapped[E820_X_MAX];
-extern unsigned char kasan_zero_page[PAGE_SIZE];
+static pud_t kasan_zero_pud[PTRS_PER_PUD] __page_aligned_bss;
+static pmd_t kasan_zero_pmd[PTRS_PER_PMD] __page_aligned_bss;
+static pte_t kasan_zero_pte[PTRS_PER_PTE] __page_aligned_bss;
+
+/*
+ * This page used as early shadow. We don't use empty_zero_page
+ * at early stages, stack instrumentation could write some garbage
+ * to this page.
+ * Latter we reuse it as zero shadow for large ranges of memory
+ * that allowed to access, but not instrumented by kasan
+ * (vmalloc/vmemmap ...).
+ */
+static unsigned char kasan_zero_page[PAGE_SIZE] __page_aligned_bss;
static int __init map_range(struct range *range)
{
@@ -36,7 +49,7 @@
pgd_clear(pgd_offset_k(start));
}
-void __init kasan_map_early_shadow(pgd_t *pgd)
+static void __init kasan_map_early_shadow(pgd_t *pgd)
{
int i;
unsigned long start = KASAN_SHADOW_START;
@@ -73,7 +86,7 @@
while (IS_ALIGNED(addr, PMD_SIZE) && addr + PMD_SIZE <= end) {
WARN_ON(!pmd_none(*pmd));
set_pmd(pmd, __pmd(__pa_nodebug(kasan_zero_pte)
- | __PAGE_KERNEL_RO));
+ | _KERNPG_TABLE));
addr += PMD_SIZE;
pmd = pmd_offset(pud, addr);
}
@@ -99,7 +112,7 @@
while (IS_ALIGNED(addr, PUD_SIZE) && addr + PUD_SIZE <= end) {
WARN_ON(!pud_none(*pud));
set_pud(pud, __pud(__pa_nodebug(kasan_zero_pmd)
- | __PAGE_KERNEL_RO));
+ | _KERNPG_TABLE));
addr += PUD_SIZE;
pud = pud_offset(pgd, addr);
}
@@ -124,7 +137,7 @@
while (IS_ALIGNED(addr, PGDIR_SIZE) && addr + PGDIR_SIZE <= end) {
WARN_ON(!pgd_none(*pgd));
set_pgd(pgd, __pgd(__pa_nodebug(kasan_zero_pud)
- | __PAGE_KERNEL_RO));
+ | _KERNPG_TABLE));
addr += PGDIR_SIZE;
pgd = pgd_offset_k(addr);
}
@@ -166,6 +179,26 @@
};
#endif
+void __init kasan_early_init(void)
+{
+ int i;
+ pteval_t pte_val = __pa_nodebug(kasan_zero_page) | __PAGE_KERNEL;
+ pmdval_t pmd_val = __pa_nodebug(kasan_zero_pte) | _KERNPG_TABLE;
+ pudval_t pud_val = __pa_nodebug(kasan_zero_pmd) | _KERNPG_TABLE;
+
+ for (i = 0; i < PTRS_PER_PTE; i++)
+ kasan_zero_pte[i] = __pte(pte_val);
+
+ for (i = 0; i < PTRS_PER_PMD; i++)
+ kasan_zero_pmd[i] = __pmd(pmd_val);
+
+ for (i = 0; i < PTRS_PER_PUD; i++)
+ kasan_zero_pud[i] = __pud(pud_val);
+
+ kasan_map_early_shadow(early_level4_pgt);
+ kasan_map_early_shadow(init_level4_pgt);
+}
+
void __init kasan_init(void)
{
int i;
@@ -176,6 +209,7 @@
memcpy(early_level4_pgt, init_level4_pgt, sizeof(early_level4_pgt));
load_cr3(early_level4_pgt);
+ __flush_tlb_all();
clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
@@ -202,5 +236,8 @@
memset(kasan_zero_page, 0, PAGE_SIZE);
load_cr3(init_level4_pgt);
+ __flush_tlb_all();
init_task.kasan_depth = 0;
+
+ pr_info("Kernel address sanitizer initialized\n");
}
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 569ee09..46b58ab 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -352,13 +352,16 @@
pdata->mmio_size = resource_size(rentry->res);
pdata->mmio_base = ioremap(rentry->res->start,
pdata->mmio_size);
- if (!pdata->mmio_base)
- goto err_out;
break;
}
acpi_dev_free_resource_list(&resource_list);
+ if (!pdata->mmio_base) {
+ ret = -ENOMEM;
+ goto err_out;
+ }
+
pdata->dev_desc = dev_desc;
if (dev_desc->setup)
diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c
index 2161fa1..628a42c 100644
--- a/drivers/acpi/nfit.c
+++ b/drivers/acpi/nfit.c
@@ -18,6 +18,7 @@
#include <linux/list.h>
#include <linux/acpi.h>
#include <linux/sort.h>
+#include <linux/pmem.h>
#include <linux/io.h>
#include "nfit.h"
@@ -305,6 +306,23 @@
return true;
}
+static bool add_flush(struct acpi_nfit_desc *acpi_desc,
+ struct acpi_nfit_flush_address *flush)
+{
+ struct device *dev = acpi_desc->dev;
+ struct nfit_flush *nfit_flush = devm_kzalloc(dev, sizeof(*nfit_flush),
+ GFP_KERNEL);
+
+ if (!nfit_flush)
+ return false;
+ INIT_LIST_HEAD(&nfit_flush->list);
+ nfit_flush->flush = flush;
+ list_add_tail(&nfit_flush->list, &acpi_desc->flushes);
+ dev_dbg(dev, "%s: nfit_flush handle: %d hint_count: %d\n", __func__,
+ flush->device_handle, flush->hint_count);
+ return true;
+}
+
static void *add_table(struct acpi_nfit_desc *acpi_desc, void *table,
const void *end)
{
@@ -338,7 +356,8 @@
return err;
break;
case ACPI_NFIT_TYPE_FLUSH_ADDRESS:
- dev_dbg(dev, "%s: flush\n", __func__);
+ if (!add_flush(acpi_desc, table))
+ return err;
break;
case ACPI_NFIT_TYPE_SMBIOS:
dev_dbg(dev, "%s: smbios\n", __func__);
@@ -389,6 +408,7 @@
{
u16 dcr = __to_nfit_memdev(nfit_mem)->region_index;
struct nfit_memdev *nfit_memdev;
+ struct nfit_flush *nfit_flush;
struct nfit_dcr *nfit_dcr;
struct nfit_bdw *nfit_bdw;
struct nfit_idt *nfit_idt;
@@ -442,6 +462,14 @@
nfit_mem->idt_bdw = nfit_idt->idt;
break;
}
+
+ list_for_each_entry(nfit_flush, &acpi_desc->flushes, list) {
+ if (nfit_flush->flush->device_handle !=
+ nfit_memdev->memdev->device_handle)
+ continue;
+ nfit_mem->nfit_flush = nfit_flush;
+ break;
+ }
break;
}
@@ -978,6 +1006,24 @@
return mmio->base_offset + line_offset + table_offset + sub_line_offset;
}
+static void wmb_blk(struct nfit_blk *nfit_blk)
+{
+
+ if (nfit_blk->nvdimm_flush) {
+ /*
+ * The first wmb() is needed to 'sfence' all previous writes
+ * such that they are architecturally visible for the platform
+ * buffer flush. Note that we've already arranged for pmem
+ * writes to avoid the cache via arch_memcpy_to_pmem(). The
+ * final wmb() ensures ordering for the NVDIMM flush write.
+ */
+ wmb();
+ writeq(1, nfit_blk->nvdimm_flush);
+ wmb();
+ } else
+ wmb_pmem();
+}
+
static u64 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw)
{
struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR];
@@ -1012,7 +1058,10 @@
offset = to_interleave_offset(offset, mmio);
writeq(cmd, mmio->base + offset);
- /* FIXME: conditionally perform read-back if mandated by firmware */
+ wmb_blk(nfit_blk);
+
+ if (nfit_blk->dimm_flags & ND_BLK_DCR_LATCH)
+ readq(mmio->base + offset);
}
static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk,
@@ -1026,7 +1075,6 @@
base_offset = nfit_blk->bdw_offset + dpa % L1_CACHE_BYTES
+ lane * mmio->size;
- /* TODO: non-temporal access, flush hints, cache management etc... */
write_blk_ctl(nfit_blk, lane, dpa, len, rw);
while (len) {
unsigned int c;
@@ -1045,13 +1093,19 @@
}
if (rw)
- memcpy(mmio->aperture + offset, iobuf + copied, c);
+ memcpy_to_pmem(mmio->aperture + offset,
+ iobuf + copied, c);
else
- memcpy(iobuf + copied, mmio->aperture + offset, c);
+ memcpy_from_pmem(iobuf + copied,
+ mmio->aperture + offset, c);
copied += c;
len -= c;
}
+
+ if (rw)
+ wmb_blk(nfit_blk);
+
rc = read_blk_stat(nfit_blk, lane) ? -EIO : 0;
return rc;
}
@@ -1124,7 +1178,7 @@
}
static void __iomem *__nfit_spa_map(struct acpi_nfit_desc *acpi_desc,
- struct acpi_nfit_system_address *spa)
+ struct acpi_nfit_system_address *spa, enum spa_map_type type)
{
resource_size_t start = spa->address;
resource_size_t n = spa->length;
@@ -1152,8 +1206,15 @@
if (!res)
goto err_mem;
- /* TODO: cacheability based on the spa type */
- spa_map->iomem = ioremap_nocache(start, n);
+ if (type == SPA_MAP_APERTURE) {
+ /*
+ * TODO: memremap_pmem() support, but that requires cache
+ * flushing when the aperture is moved.
+ */
+ spa_map->iomem = ioremap_wc(start, n);
+ } else
+ spa_map->iomem = ioremap_nocache(start, n);
+
if (!spa_map->iomem)
goto err_map;
@@ -1171,6 +1232,7 @@
* nfit_spa_map - interleave-aware managed-mappings of acpi_nfit_system_address ranges
* @nvdimm_bus: NFIT-bus that provided the spa table entry
* @nfit_spa: spa table to map
+ * @type: aperture or control region
*
* In the case where block-data-window apertures and
* dimm-control-regions are interleaved they will end up sharing a
@@ -1180,12 +1242,12 @@
* unbound.
*/
static void __iomem *nfit_spa_map(struct acpi_nfit_desc *acpi_desc,
- struct acpi_nfit_system_address *spa)
+ struct acpi_nfit_system_address *spa, enum spa_map_type type)
{
void __iomem *iomem;
mutex_lock(&acpi_desc->spa_map_mutex);
- iomem = __nfit_spa_map(acpi_desc, spa);
+ iomem = __nfit_spa_map(acpi_desc, spa, type);
mutex_unlock(&acpi_desc->spa_map_mutex);
return iomem;
@@ -1206,12 +1268,35 @@
return 0;
}
+static int acpi_nfit_blk_get_flags(struct nvdimm_bus_descriptor *nd_desc,
+ struct nvdimm *nvdimm, struct nfit_blk *nfit_blk)
+{
+ struct nd_cmd_dimm_flags flags;
+ int rc;
+
+ memset(&flags, 0, sizeof(flags));
+ rc = nd_desc->ndctl(nd_desc, nvdimm, ND_CMD_DIMM_FLAGS, &flags,
+ sizeof(flags));
+
+ if (rc >= 0 && flags.status == 0)
+ nfit_blk->dimm_flags = flags.flags;
+ else if (rc == -ENOTTY) {
+ /* fall back to a conservative default */
+ nfit_blk->dimm_flags = ND_BLK_DCR_LATCH;
+ rc = 0;
+ } else
+ rc = -ENXIO;
+
+ return rc;
+}
+
static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus,
struct device *dev)
{
struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
struct nd_blk_region *ndbr = to_nd_blk_region(dev);
+ struct nfit_flush *nfit_flush;
struct nfit_blk_mmio *mmio;
struct nfit_blk *nfit_blk;
struct nfit_mem *nfit_mem;
@@ -1223,8 +1308,8 @@
if (!nfit_mem || !nfit_mem->dcr || !nfit_mem->bdw) {
dev_dbg(dev, "%s: missing%s%s%s\n", __func__,
nfit_mem ? "" : " nfit_mem",
- nfit_mem->dcr ? "" : " dcr",
- nfit_mem->bdw ? "" : " bdw");
+ (nfit_mem && nfit_mem->dcr) ? "" : " dcr",
+ (nfit_mem && nfit_mem->bdw) ? "" : " bdw");
return -ENXIO;
}
@@ -1237,7 +1322,8 @@
/* map block aperture memory */
nfit_blk->bdw_offset = nfit_mem->bdw->offset;
mmio = &nfit_blk->mmio[BDW];
- mmio->base = nfit_spa_map(acpi_desc, nfit_mem->spa_bdw);
+ mmio->base = nfit_spa_map(acpi_desc, nfit_mem->spa_bdw,
+ SPA_MAP_APERTURE);
if (!mmio->base) {
dev_dbg(dev, "%s: %s failed to map bdw\n", __func__,
nvdimm_name(nvdimm));
@@ -1259,7 +1345,8 @@
nfit_blk->cmd_offset = nfit_mem->dcr->command_offset;
nfit_blk->stat_offset = nfit_mem->dcr->status_offset;
mmio = &nfit_blk->mmio[DCR];
- mmio->base = nfit_spa_map(acpi_desc, nfit_mem->spa_dcr);
+ mmio->base = nfit_spa_map(acpi_desc, nfit_mem->spa_dcr,
+ SPA_MAP_CONTROL);
if (!mmio->base) {
dev_dbg(dev, "%s: %s failed to map dcr\n", __func__,
nvdimm_name(nvdimm));
@@ -1277,6 +1364,24 @@
return rc;
}
+ rc = acpi_nfit_blk_get_flags(nd_desc, nvdimm, nfit_blk);
+ if (rc < 0) {
+ dev_dbg(dev, "%s: %s failed get DIMM flags\n",
+ __func__, nvdimm_name(nvdimm));
+ return rc;
+ }
+
+ nfit_flush = nfit_mem->nfit_flush;
+ if (nfit_flush && nfit_flush->flush->hint_count != 0) {
+ nfit_blk->nvdimm_flush = devm_ioremap_nocache(dev,
+ nfit_flush->flush->hint_address[0], 8);
+ if (!nfit_blk->nvdimm_flush)
+ return -ENOMEM;
+ }
+
+ if (!arch_has_pmem_api() && !nfit_blk->nvdimm_flush)
+ dev_warn(dev, "unable to guarantee persistence of writes\n");
+
if (mmio->line_size == 0)
return 0;
@@ -1459,6 +1564,7 @@
INIT_LIST_HEAD(&acpi_desc->dcrs);
INIT_LIST_HEAD(&acpi_desc->bdws);
INIT_LIST_HEAD(&acpi_desc->idts);
+ INIT_LIST_HEAD(&acpi_desc->flushes);
INIT_LIST_HEAD(&acpi_desc->memdevs);
INIT_LIST_HEAD(&acpi_desc->dimms);
mutex_init(&acpi_desc->spa_map_mutex);
diff --git a/drivers/acpi/nfit.h b/drivers/acpi/nfit.h
index 81f2e8c..79b6d83 100644
--- a/drivers/acpi/nfit.h
+++ b/drivers/acpi/nfit.h
@@ -40,6 +40,10 @@
NFIT_UUID_MAX,
};
+enum {
+ ND_BLK_DCR_LATCH = 2,
+};
+
struct nfit_spa {
struct acpi_nfit_system_address *spa;
struct list_head list;
@@ -60,6 +64,11 @@
struct list_head list;
};
+struct nfit_flush {
+ struct acpi_nfit_flush_address *flush;
+ struct list_head list;
+};
+
struct nfit_memdev {
struct acpi_nfit_memory_map *memdev;
struct list_head list;
@@ -77,6 +86,7 @@
struct acpi_nfit_system_address *spa_bdw;
struct acpi_nfit_interleave *idt_dcr;
struct acpi_nfit_interleave *idt_bdw;
+ struct nfit_flush *nfit_flush;
struct list_head list;
struct acpi_device *adev;
unsigned long dsm_mask;
@@ -88,6 +98,7 @@
struct mutex spa_map_mutex;
struct list_head spa_maps;
struct list_head memdevs;
+ struct list_head flushes;
struct list_head dimms;
struct list_head spas;
struct list_head dcrs;
@@ -109,7 +120,7 @@
struct nfit_blk_mmio {
union {
void __iomem *base;
- void *aperture;
+ void __pmem *aperture;
};
u64 size;
u64 base_offset;
@@ -123,6 +134,13 @@
u64 bdw_offset; /* post interleave offset */
u64 stat_offset;
u64 cmd_offset;
+ void __iomem *nvdimm_flush;
+ u32 dimm_flags;
+};
+
+enum spa_map_type {
+ SPA_MAP_CONTROL,
+ SPA_MAP_APERTURE,
};
struct nfit_spa_mapping {
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index c262e4a..3b8963f 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -175,10 +175,14 @@
if (!addr || !length)
return;
- acpi_reserve_region(addr, length, gas->space_id, 0, desc);
+ /* Resources are never freed */
+ if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO)
+ request_region(addr, length, desc);
+ else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
+ request_mem_region(addr, length, desc);
}
-static void __init acpi_reserve_resources(void)
+static int __init acpi_reserve_resources(void)
{
acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length,
"ACPI PM1a_EVT_BLK");
@@ -207,7 +211,10 @@
if (!(acpi_gbl_FADT.gpe1_block_length & 0x1))
acpi_request_region(&acpi_gbl_FADT.xgpe1_block,
acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK");
+
+ return 0;
}
+fs_initcall_sync(acpi_reserve_resources);
void acpi_os_printf(const char *fmt, ...)
{
@@ -1862,7 +1869,6 @@
acpi_status __init acpi_os_initialize1(void)
{
- acpi_reserve_resources();
kacpid_wq = alloc_workqueue("kacpid", 0, 1);
kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1);
kacpi_hotplug_wq = alloc_ordered_workqueue("kacpi_hotplug", 0);
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index 10561ce..8244f01 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -26,7 +26,6 @@
#include <linux/device.h>
#include <linux/export.h>
#include <linux/ioport.h>
-#include <linux/list.h>
#include <linux/slab.h>
#ifdef CONFIG_X86
@@ -622,164 +621,3 @@
return (type & types) ? 0 : 1;
}
EXPORT_SYMBOL_GPL(acpi_dev_filter_resource_type);
-
-struct reserved_region {
- struct list_head node;
- u64 start;
- u64 end;
-};
-
-static LIST_HEAD(reserved_io_regions);
-static LIST_HEAD(reserved_mem_regions);
-
-static int request_range(u64 start, u64 end, u8 space_id, unsigned long flags,
- char *desc)
-{
- unsigned int length = end - start + 1;
- struct resource *res;
-
- res = space_id == ACPI_ADR_SPACE_SYSTEM_IO ?
- request_region(start, length, desc) :
- request_mem_region(start, length, desc);
- if (!res)
- return -EIO;
-
- res->flags &= ~flags;
- return 0;
-}
-
-static int add_region_before(u64 start, u64 end, u8 space_id,
- unsigned long flags, char *desc,
- struct list_head *head)
-{
- struct reserved_region *reg;
- int error;
-
- reg = kmalloc(sizeof(*reg), GFP_KERNEL);
- if (!reg)
- return -ENOMEM;
-
- error = request_range(start, end, space_id, flags, desc);
- if (error) {
- kfree(reg);
- return error;
- }
-
- reg->start = start;
- reg->end = end;
- list_add_tail(®->node, head);
- return 0;
-}
-
-/**
- * acpi_reserve_region - Reserve an I/O or memory region as a system resource.
- * @start: Starting address of the region.
- * @length: Length of the region.
- * @space_id: Identifier of address space to reserve the region from.
- * @flags: Resource flags to clear for the region after requesting it.
- * @desc: Region description (for messages).
- *
- * Reserve an I/O or memory region as a system resource to prevent others from
- * using it. If the new region overlaps with one of the regions (in the given
- * address space) already reserved by this routine, only the non-overlapping
- * parts of it will be reserved.
- *
- * Returned is either 0 (success) or a negative error code indicating a resource
- * reservation problem. It is the code of the first encountered error, but the
- * routine doesn't abort until it has attempted to request all of the parts of
- * the new region that don't overlap with other regions reserved previously.
- *
- * The resources requested by this routine are never released.
- */
-int acpi_reserve_region(u64 start, unsigned int length, u8 space_id,
- unsigned long flags, char *desc)
-{
- struct list_head *regions;
- struct reserved_region *reg;
- u64 end = start + length - 1;
- int ret = 0, error = 0;
-
- if (space_id == ACPI_ADR_SPACE_SYSTEM_IO)
- regions = &reserved_io_regions;
- else if (space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
- regions = &reserved_mem_regions;
- else
- return -EINVAL;
-
- if (list_empty(regions))
- return add_region_before(start, end, space_id, flags, desc, regions);
-
- list_for_each_entry(reg, regions, node)
- if (reg->start == end + 1) {
- /* The new region can be prepended to this one. */
- ret = request_range(start, end, space_id, flags, desc);
- if (!ret)
- reg->start = start;
-
- return ret;
- } else if (reg->start > end) {
- /* No overlap. Add the new region here and get out. */
- return add_region_before(start, end, space_id, flags,
- desc, ®->node);
- } else if (reg->end == start - 1) {
- goto combine;
- } else if (reg->end >= start) {
- goto overlap;
- }
-
- /* The new region goes after the last existing one. */
- return add_region_before(start, end, space_id, flags, desc, regions);
-
- overlap:
- /*
- * The new region overlaps an existing one.
- *
- * The head part of the new region immediately preceding the existing
- * overlapping one can be combined with it right away.
- */
- if (reg->start > start) {
- error = request_range(start, reg->start - 1, space_id, flags, desc);
- if (error)
- ret = error;
- else
- reg->start = start;
- }
-
- combine:
- /*
- * The new region is adjacent to an existing one. If it extends beyond
- * that region all the way to the next one, it is possible to combine
- * all three of them.
- */
- while (reg->end < end) {
- struct reserved_region *next = NULL;
- u64 a = reg->end + 1, b = end;
-
- if (!list_is_last(®->node, regions)) {
- next = list_next_entry(reg, node);
- if (next->start <= end)
- b = next->start - 1;
- }
- error = request_range(a, b, space_id, flags, desc);
- if (!error) {
- if (next && next->start == b + 1) {
- reg->end = next->end;
- list_del(&next->node);
- kfree(next);
- } else {
- reg->end = end;
- break;
- }
- } else if (next) {
- if (!ret)
- ret = error;
-
- reg = next;
- } else {
- break;
- }
- }
-
- return ret ? ret : error;
-}
-EXPORT_SYMBOL_GPL(acpi_reserve_region);
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 2649a06..ec25635 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1019,6 +1019,29 @@
return false;
}
+static bool __acpi_match_device_cls(const struct acpi_device_id *id,
+ struct acpi_hardware_id *hwid)
+{
+ int i, msk, byte_shift;
+ char buf[3];
+
+ if (!id->cls)
+ return false;
+
+ /* Apply class-code bitmask, before checking each class-code byte */
+ for (i = 1; i <= 3; i++) {
+ byte_shift = 8 * (3 - i);
+ msk = (id->cls_msk >> byte_shift) & 0xFF;
+ if (!msk)
+ continue;
+
+ sprintf(buf, "%02x", (id->cls >> byte_shift) & msk);
+ if (strncmp(buf, &hwid->id[(i - 1) * 2], 2))
+ return false;
+ }
+ return true;
+}
+
static const struct acpi_device_id *__acpi_match_device(
struct acpi_device *device,
const struct acpi_device_id *ids,
@@ -1036,9 +1059,12 @@
list_for_each_entry(hwid, &device->pnp.ids, list) {
/* First, check the ACPI/PNP IDs provided by the caller. */
- for (id = ids; id->id[0]; id++)
- if (!strcmp((char *) id->id, hwid->id))
+ for (id = ids; id->id[0] || id->cls; id++) {
+ if (id->id[0] && !strcmp((char *) id->id, hwid->id))
return id;
+ else if (id->cls && __acpi_match_device_cls(id, hwid))
+ return id;
+ }
/*
* Next, check ACPI_DT_NAMESPACE_HID and try to match the
@@ -2101,6 +2127,8 @@
if (info->valid & ACPI_VALID_UID)
pnp->unique_id = kstrdup(info->unique_id.string,
GFP_KERNEL);
+ if (info->valid & ACPI_VALID_CLS)
+ acpi_add_id(pnp, info->class_code.string);
kfree(info);
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 6d17a3b..15e40ee 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -48,7 +48,7 @@
config ATA_ACPI
bool "ATA ACPI Support"
- depends on ACPI && PCI
+ depends on ACPI
default y
help
This option adds support for ATA-related ACPI objects.
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
index 614c78f..1befb11 100644
--- a/drivers/ata/ahci_platform.c
+++ b/drivers/ata/ahci_platform.c
@@ -20,6 +20,8 @@
#include <linux/platform_device.h>
#include <linux/libata.h>
#include <linux/ahci_platform.h>
+#include <linux/acpi.h>
+#include <linux/pci_ids.h>
#include "ahci.h"
#define DRV_NAME "ahci"
@@ -79,12 +81,19 @@
};
MODULE_DEVICE_TABLE(of, ahci_of_match);
+static const struct acpi_device_id ahci_acpi_match[] = {
+ { ACPI_DEVICE_CLASS(PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff) },
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, ahci_acpi_match);
+
static struct platform_driver ahci_driver = {
.probe = ahci_probe,
.remove = ata_platform_remove_one,
.driver = {
.name = DRV_NAME,
.of_match_table = ahci_of_match,
+ .acpi_match_table = ahci_acpi_match,
.pm = &ahci_pm_ops,
},
};
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 9c42883..894bda1 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -563,10 +563,8 @@
kfree(fw_priv);
}
-static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int do_firmware_uevent(struct firmware_priv *fw_priv, struct kobj_uevent_env *env)
{
- struct firmware_priv *fw_priv = to_firmware_priv(dev);
-
if (add_uevent_var(env, "FIRMWARE=%s", fw_priv->buf->fw_id))
return -ENOMEM;
if (add_uevent_var(env, "TIMEOUT=%i", loading_timeout))
@@ -577,6 +575,18 @@
return 0;
}
+static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct firmware_priv *fw_priv = to_firmware_priv(dev);
+ int err = 0;
+
+ mutex_lock(&fw_lock);
+ if (fw_priv->buf)
+ err = do_firmware_uevent(fw_priv, env);
+ mutex_unlock(&fw_lock);
+ return err;
+}
+
static struct class firmware_class = {
.name = "firmware",
.class_attrs = firmware_class_attrs,
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index cdd547b..0ee43c1 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -6,6 +6,7 @@
* This file is released under the GPLv2.
*/
+#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/platform_device.h>
@@ -19,6 +20,8 @@
#include <linux/suspend.h>
#include <linux/export.h>
+#define GENPD_RETRY_MAX_MS 250 /* Approximate */
+
#define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \
({ \
type (*__routine)(struct device *__d); \
@@ -2131,6 +2134,7 @@
static void genpd_dev_pm_detach(struct device *dev, bool power_off)
{
struct generic_pm_domain *pd;
+ unsigned int i;
int ret = 0;
pd = pm_genpd_lookup_dev(dev);
@@ -2139,10 +2143,12 @@
dev_dbg(dev, "removing from PM domain %s\n", pd->name);
- while (1) {
+ for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
ret = pm_genpd_remove_device(pd, dev);
if (ret != -EAGAIN)
break;
+
+ mdelay(i);
cond_resched();
}
@@ -2183,6 +2189,7 @@
{
struct of_phandle_args pd_args;
struct generic_pm_domain *pd;
+ unsigned int i;
int ret;
if (!dev->of_node)
@@ -2218,10 +2225,12 @@
dev_dbg(dev, "adding to PM domain %s\n", pd->name);
- while (1) {
+ for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
ret = pm_genpd_add_device(pd, dev);
if (ret != -EAGAIN)
break;
+
+ mdelay(i);
cond_resched();
}
diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c
index 7470004..eb6e674 100644
--- a/drivers/base/power/wakeirq.c
+++ b/drivers/base/power/wakeirq.c
@@ -45,14 +45,12 @@
return -EEXIST;
}
- dev->power.wakeirq = wirq;
- spin_unlock_irqrestore(&dev->power.lock, flags);
-
err = device_wakeup_attach_irq(dev, wirq);
- if (err)
- return err;
+ if (!err)
+ dev->power.wakeirq = wirq;
- return 0;
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+ return err;
}
/**
@@ -105,10 +103,10 @@
return;
spin_lock_irqsave(&dev->power.lock, flags);
+ device_wakeup_detach_irq(dev);
dev->power.wakeirq = NULL;
spin_unlock_irqrestore(&dev->power.lock, flags);
- device_wakeup_detach_irq(dev);
if (wirq->dedicated_irq)
free_irq(wirq->irq, wirq);
kfree(wirq);
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 40f7160..51f15bc 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -281,32 +281,25 @@
* Attach a device wakeirq to the wakeup source so the device
* wake IRQ can be configured automatically for suspend and
* resume.
+ *
+ * Call under the device's power.lock lock.
*/
int device_wakeup_attach_irq(struct device *dev,
struct wake_irq *wakeirq)
{
struct wakeup_source *ws;
- int ret = 0;
- spin_lock_irq(&dev->power.lock);
ws = dev->power.wakeup;
if (!ws) {
dev_err(dev, "forgot to call call device_init_wakeup?\n");
- ret = -EINVAL;
- goto unlock;
+ return -EINVAL;
}
- if (ws->wakeirq) {
- ret = -EEXIST;
- goto unlock;
- }
+ if (ws->wakeirq)
+ return -EEXIST;
ws->wakeirq = wakeirq;
-
-unlock:
- spin_unlock_irq(&dev->power.lock);
-
- return ret;
+ return 0;
}
/**
@@ -314,20 +307,16 @@
* @dev: Device to handle
*
* Removes a device wakeirq from the wakeup source.
+ *
+ * Call under the device's power.lock lock.
*/
void device_wakeup_detach_irq(struct device *dev)
{
struct wakeup_source *ws;
- spin_lock_irq(&dev->power.lock);
ws = dev->power.wakeup;
- if (!ws)
- goto unlock;
-
- ws->wakeirq = NULL;
-
-unlock:
- spin_unlock_irq(&dev->power.lock);
+ if (ws)
+ ws->wakeirq = NULL;
}
/**
diff --git a/drivers/clk/at91/clk-h32mx.c b/drivers/clk/at91/clk-h32mx.c
index 152dcb3..61566bc 100644
--- a/drivers/clk/at91/clk-h32mx.c
+++ b/drivers/clk/at91/clk-h32mx.c
@@ -116,8 +116,10 @@
h32mxclk->pmc = pmc;
clk = clk_register(NULL, &h32mxclk->hw);
- if (!clk)
+ if (!clk) {
+ kfree(h32mxclk);
return;
+ }
of_clk_add_provider(np, of_clk_src_simple_get, clk);
}
diff --git a/drivers/clk/at91/clk-main.c b/drivers/clk/at91/clk-main.c
index c240045..27dfa96 100644
--- a/drivers/clk/at91/clk-main.c
+++ b/drivers/clk/at91/clk-main.c
@@ -171,8 +171,10 @@
irq_set_status_flags(osc->irq, IRQ_NOAUTOEN);
ret = request_irq(osc->irq, clk_main_osc_irq_handler,
IRQF_TRIGGER_HIGH, name, osc);
- if (ret)
+ if (ret) {
+ kfree(osc);
return ERR_PTR(ret);
+ }
if (bypass)
pmc_write(pmc, AT91_CKGR_MOR,
diff --git a/drivers/clk/at91/clk-master.c b/drivers/clk/at91/clk-master.c
index f98eafe..5b3ded5 100644
--- a/drivers/clk/at91/clk-master.c
+++ b/drivers/clk/at91/clk-master.c
@@ -165,12 +165,16 @@
irq_set_status_flags(master->irq, IRQ_NOAUTOEN);
ret = request_irq(master->irq, clk_master_irq_handler,
IRQF_TRIGGER_HIGH, "clk-master", master);
- if (ret)
+ if (ret) {
+ kfree(master);
return ERR_PTR(ret);
+ }
clk = clk_register(NULL, &master->hw);
- if (IS_ERR(clk))
+ if (IS_ERR(clk)) {
+ free_irq(master->irq, master);
kfree(master);
+ }
return clk;
}
diff --git a/drivers/clk/at91/clk-pll.c b/drivers/clk/at91/clk-pll.c
index cbbe403..18b60f4 100644
--- a/drivers/clk/at91/clk-pll.c
+++ b/drivers/clk/at91/clk-pll.c
@@ -346,12 +346,16 @@
irq_set_status_flags(pll->irq, IRQ_NOAUTOEN);
ret = request_irq(pll->irq, clk_pll_irq_handler, IRQF_TRIGGER_HIGH,
id ? "clk-pllb" : "clk-plla", pll);
- if (ret)
+ if (ret) {
+ kfree(pll);
return ERR_PTR(ret);
+ }
clk = clk_register(NULL, &pll->hw);
- if (IS_ERR(clk))
+ if (IS_ERR(clk)) {
+ free_irq(pll->irq, pll);
kfree(pll);
+ }
return clk;
}
diff --git a/drivers/clk/at91/clk-system.c b/drivers/clk/at91/clk-system.c
index a76d03f..58008b3 100644
--- a/drivers/clk/at91/clk-system.c
+++ b/drivers/clk/at91/clk-system.c
@@ -130,13 +130,17 @@
irq_set_status_flags(sys->irq, IRQ_NOAUTOEN);
ret = request_irq(sys->irq, clk_system_irq_handler,
IRQF_TRIGGER_HIGH, name, sys);
- if (ret)
+ if (ret) {
+ kfree(sys);
return ERR_PTR(ret);
+ }
}
clk = clk_register(NULL, &sys->hw);
- if (IS_ERR(clk))
+ if (IS_ERR(clk)) {
+ free_irq(sys->irq, sys);
kfree(sys);
+ }
return clk;
}
diff --git a/drivers/clk/at91/clk-utmi.c b/drivers/clk/at91/clk-utmi.c
index ae3263b..30dd697 100644
--- a/drivers/clk/at91/clk-utmi.c
+++ b/drivers/clk/at91/clk-utmi.c
@@ -118,12 +118,16 @@
irq_set_status_flags(utmi->irq, IRQ_NOAUTOEN);
ret = request_irq(utmi->irq, clk_utmi_irq_handler,
IRQF_TRIGGER_HIGH, "clk-utmi", utmi);
- if (ret)
+ if (ret) {
+ kfree(utmi);
return ERR_PTR(ret);
+ }
clk = clk_register(NULL, &utmi->hw);
- if (IS_ERR(clk))
+ if (IS_ERR(clk)) {
+ free_irq(utmi->irq, utmi);
kfree(utmi);
+ }
return clk;
}
diff --git a/drivers/clk/bcm/clk-iproc-asiu.c b/drivers/clk/bcm/clk-iproc-asiu.c
index e19c09c..f630e1b 100644
--- a/drivers/clk/bcm/clk-iproc-asiu.c
+++ b/drivers/clk/bcm/clk-iproc-asiu.c
@@ -222,10 +222,6 @@
struct iproc_asiu_clk *asiu_clk;
const char *clk_name;
- clk_name = kzalloc(IPROC_CLK_NAME_LEN, GFP_KERNEL);
- if (WARN_ON(!clk_name))
- goto err_clk_register;
-
ret = of_property_read_string_index(node, "clock-output-names",
i, &clk_name);
if (WARN_ON(ret))
@@ -259,7 +255,7 @@
err_clk_register:
for (i = 0; i < num_clks; i++)
- kfree(asiu->clks[i].name);
+ clk_unregister(asiu->clk_data.clks[i]);
iounmap(asiu->gate_base);
err_iomap_gate:
diff --git a/drivers/clk/bcm/clk-iproc-pll.c b/drivers/clk/bcm/clk-iproc-pll.c
index 46fb84b..2dda4e8 100644
--- a/drivers/clk/bcm/clk-iproc-pll.c
+++ b/drivers/clk/bcm/clk-iproc-pll.c
@@ -366,7 +366,7 @@
val = readl(pll->pll_base + ctrl->ndiv_int.offset);
ndiv_int = (val >> ctrl->ndiv_int.shift) &
bit_mask(ctrl->ndiv_int.width);
- ndiv = ndiv_int << ctrl->ndiv_int.shift;
+ ndiv = (u64)ndiv_int << ctrl->ndiv_int.shift;
if (ctrl->flags & IPROC_CLK_PLL_HAS_NDIV_FRAC) {
val = readl(pll->pll_base + ctrl->ndiv_frac.offset);
@@ -374,7 +374,8 @@
bit_mask(ctrl->ndiv_frac.width);
if (ndiv_frac != 0)
- ndiv = (ndiv_int << ctrl->ndiv_int.shift) | ndiv_frac;
+ ndiv = ((u64)ndiv_int << ctrl->ndiv_int.shift) |
+ ndiv_frac;
}
val = readl(pll->pll_base + ctrl->pdiv.offset);
@@ -655,10 +656,6 @@
memset(&init, 0, sizeof(init));
parent_name = node->name;
- clk_name = kzalloc(IPROC_CLK_NAME_LEN, GFP_KERNEL);
- if (WARN_ON(!clk_name))
- goto err_clk_register;
-
ret = of_property_read_string_index(node, "clock-output-names",
i, &clk_name);
if (WARN_ON(ret))
@@ -690,10 +687,8 @@
return;
err_clk_register:
- for (i = 0; i < num_clks; i++) {
- kfree(pll->clks[i].name);
+ for (i = 0; i < num_clks; i++)
clk_unregister(pll->clk_data.clks[i]);
- }
err_pll_register:
if (pll->asiu_base)
diff --git a/drivers/clk/clk-stm32f4.c b/drivers/clk/clk-stm32f4.c
index b9b12a7..3f6f7ad 100644
--- a/drivers/clk/clk-stm32f4.c
+++ b/drivers/clk/clk-stm32f4.c
@@ -268,7 +268,7 @@
memcpy(table, stm32f42xx_gate_map, sizeof(table));
/* only bits set in table can be used as indices */
- if (WARN_ON(secondary > 8 * sizeof(table) ||
+ if (WARN_ON(secondary >= BITS_PER_BYTE * sizeof(table) ||
0 == (table[BIT_ULL_WORD(secondary)] &
BIT_ULL_MASK(secondary))))
return -EINVAL;
diff --git a/drivers/clk/mediatek/clk-mt8173.c b/drivers/clk/mediatek/clk-mt8173.c
index 4b9e04c..8b6523d 100644
--- a/drivers/clk/mediatek/clk-mt8173.c
+++ b/drivers/clk/mediatek/clk-mt8173.c
@@ -700,6 +700,22 @@
MUX(CLK_PERI_UART3_SEL, "uart3_ck_sel", uart_ck_sel_parents, 0x40c, 3, 1),
};
+static struct clk_onecell_data *mt8173_top_clk_data __initdata;
+static struct clk_onecell_data *mt8173_pll_clk_data __initdata;
+
+static void __init mtk_clk_enable_critical(void)
+{
+ if (!mt8173_top_clk_data || !mt8173_pll_clk_data)
+ return;
+
+ clk_prepare_enable(mt8173_pll_clk_data->clks[CLK_APMIXED_ARMCA15PLL]);
+ clk_prepare_enable(mt8173_pll_clk_data->clks[CLK_APMIXED_ARMCA7PLL]);
+ clk_prepare_enable(mt8173_top_clk_data->clks[CLK_TOP_MEM_SEL]);
+ clk_prepare_enable(mt8173_top_clk_data->clks[CLK_TOP_DDRPHYCFG_SEL]);
+ clk_prepare_enable(mt8173_top_clk_data->clks[CLK_TOP_CCI400_SEL]);
+ clk_prepare_enable(mt8173_top_clk_data->clks[CLK_TOP_RTC_SEL]);
+}
+
static void __init mtk_topckgen_init(struct device_node *node)
{
struct clk_onecell_data *clk_data;
@@ -712,19 +728,19 @@
return;
}
- clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
+ mt8173_top_clk_data = clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
mtk_clk_register_factors(root_clk_alias, ARRAY_SIZE(root_clk_alias), clk_data);
mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), clk_data);
mtk_clk_register_composites(top_muxes, ARRAY_SIZE(top_muxes), base,
&mt8173_clk_lock, clk_data);
- clk_prepare_enable(clk_data->clks[CLK_TOP_CCI400_SEL]);
-
r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
if (r)
pr_err("%s(): could not register clock provider: %d\n",
__func__, r);
+
+ mtk_clk_enable_critical();
}
CLK_OF_DECLARE(mtk_topckgen, "mediatek,mt8173-topckgen", mtk_topckgen_init);
@@ -818,13 +834,13 @@
{
struct clk_onecell_data *clk_data;
- clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK);
+ mt8173_pll_clk_data = clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK);
if (!clk_data)
return;
mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
- clk_prepare_enable(clk_data->clks[CLK_APMIXED_ARMCA15PLL]);
+ mtk_clk_enable_critical();
}
CLK_OF_DECLARE(mtk_apmixedsys, "mediatek,mt8173-apmixedsys",
mtk_apmixedsys_init);
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index b95d17f..92936f0 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -530,19 +530,16 @@
struct clk_rcg2 *rcg = to_clk_rcg2(hw);
struct freq_tbl f = *rcg->freq_tbl;
const struct frac_entry *frac = frac_table_pixel;
- unsigned long request, src_rate;
+ unsigned long request;
int delta = 100000;
u32 mask = BIT(rcg->hid_width) - 1;
u32 hid_div;
- int index = qcom_find_src_index(hw, rcg->parent_map, f.src);
- struct clk *parent = clk_get_parent_by_index(hw->clk, index);
for (; frac->num; frac++) {
request = (rate * frac->den) / frac->num;
- src_rate = __clk_round_rate(parent, request);
- if ((src_rate < (request - delta)) ||
- (src_rate > (request + delta)))
+ if ((parent_rate < (request - delta)) ||
+ (parent_rate > (request + delta)))
continue;
regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
diff --git a/drivers/clk/st/clk-flexgen.c b/drivers/clk/st/clk-flexgen.c
index 657ca14..8dd8cce 100644
--- a/drivers/clk/st/clk-flexgen.c
+++ b/drivers/clk/st/clk-flexgen.c
@@ -190,7 +190,7 @@
init.name = name;
init.ops = &flexgen_ops;
- init.flags = CLK_IS_BASIC | flexgen_flags;
+ init.flags = CLK_IS_BASIC | CLK_GET_RATE_NOCACHE | flexgen_flags;
init.parent_names = parent_names;
init.num_parents = num_parents;
@@ -303,6 +303,8 @@
if (!rlock)
goto err;
+ spin_lock_init(rlock);
+
for (i = 0; i < clk_data->clk_num; i++) {
struct clk *clk;
const char *clk_name;
diff --git a/drivers/clk/st/clkgen-fsyn.c b/drivers/clk/st/clkgen-fsyn.c
index e94197f..d9eb2e1 100644
--- a/drivers/clk/st/clkgen-fsyn.c
+++ b/drivers/clk/st/clkgen-fsyn.c
@@ -340,7 +340,7 @@
CLKGEN_FIELD(0x30c, 0xf, 20),
CLKGEN_FIELD(0x310, 0xf, 20) },
.lockstatus_present = true,
- .lock_status = CLKGEN_FIELD(0x2A0, 0x1, 24),
+ .lock_status = CLKGEN_FIELD(0x2f0, 0x1, 24),
.powerup_polarity = 1,
.standby_polarity = 1,
.pll_ops = &st_quadfs_pll_c32_ops,
@@ -489,7 +489,7 @@
struct st_clk_quadfs_pll *pll = to_quadfs_pll(hw);
u32 npda = CLKGEN_READ(pll, npda);
- return !!npda;
+ return pll->data->powerup_polarity ? !npda : !!npda;
}
static int clk_fs660c32_vco_get_rate(unsigned long input, struct stm_fs *fs,
@@ -635,7 +635,7 @@
init.name = name;
init.ops = quadfs->pll_ops;
- init.flags = CLK_IS_BASIC;
+ init.flags = CLK_IS_BASIC | CLK_GET_RATE_NOCACHE;
init.parent_names = &parent_name;
init.num_parents = 1;
@@ -774,7 +774,7 @@
if (fs->lock)
spin_lock_irqsave(fs->lock, flags);
- CLKGEN_WRITE(fs, nsb[fs->chan], !fs->data->standby_polarity);
+ CLKGEN_WRITE(fs, nsb[fs->chan], fs->data->standby_polarity);
if (fs->lock)
spin_unlock_irqrestore(fs->lock, flags);
@@ -1082,10 +1082,6 @@
.compatible = "st,stih407-quadfs660-D",
.data = &st_fs660c32_D_407
},
- {
- .compatible = "st,stih407-quadfs660-D",
- .data = (void *)&st_fs660c32_D_407
- },
{}
};
diff --git a/drivers/clk/st/clkgen-mux.c b/drivers/clk/st/clkgen-mux.c
index 4fbe6e0..717c4a9 100644
--- a/drivers/clk/st/clkgen-mux.c
+++ b/drivers/clk/st/clkgen-mux.c
@@ -237,7 +237,7 @@
init.name = name;
init.ops = &clkgena_divmux_ops;
- init.flags = CLK_IS_BASIC;
+ init.flags = CLK_IS_BASIC | CLK_GET_RATE_NOCACHE;
init.parent_names = parent_names;
init.num_parents = num_parents;
@@ -513,7 +513,8 @@
0, &clk_name))
return;
- clk = clk_register_divider_table(NULL, clk_name, parent_name, 0,
+ clk = clk_register_divider_table(NULL, clk_name, parent_name,
+ CLK_GET_RATE_NOCACHE,
reg + data->offset, data->shift, 1,
0, data->table, NULL);
if (IS_ERR(clk))
@@ -582,7 +583,7 @@
};
static struct clkgen_mux_data stih407_a9_mux_data = {
.offset = 0x1a4,
- .shift = 1,
+ .shift = 0,
.width = 2,
};
@@ -786,7 +787,8 @@
&mux->hw, &clk_mux_ops,
&div->hw, &clk_divider_ops,
&gate->hw, &clk_gate_ops,
- data->clk_flags);
+ data->clk_flags |
+ CLK_GET_RATE_NOCACHE);
if (IS_ERR(clk)) {
kfree(gate);
kfree(div);
diff --git a/drivers/clk/st/clkgen-pll.c b/drivers/clk/st/clkgen-pll.c
index 1065322..72d1c27 100644
--- a/drivers/clk/st/clkgen-pll.c
+++ b/drivers/clk/st/clkgen-pll.c
@@ -406,7 +406,7 @@
init.name = clk_name;
init.ops = pll_data->ops;
- init.flags = CLK_IS_BASIC;
+ init.flags = CLK_IS_BASIC | CLK_GET_RATE_NOCACHE;
init.parent_names = &parent_name;
init.num_parents = 1;
diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c
index 9a82f17..abf7b37 100644
--- a/drivers/clk/sunxi/clk-sunxi.c
+++ b/drivers/clk/sunxi/clk-sunxi.c
@@ -1391,6 +1391,7 @@
CLK_OF_DECLARE(sun6i_a31_clk_init, "allwinner,sun6i-a31", sun6i_init_clocks);
CLK_OF_DECLARE(sun6i_a31s_clk_init, "allwinner,sun6i-a31s", sun6i_init_clocks);
CLK_OF_DECLARE(sun8i_a23_clk_init, "allwinner,sun8i-a23", sun6i_init_clocks);
+CLK_OF_DECLARE(sun8i_a33_clk_init, "allwinner,sun8i-a33", sun6i_init_clocks);
static void __init sun9i_init_clocks(struct device_node *node)
{
diff --git a/drivers/clocksource/timer-imx-gpt.c b/drivers/clocksource/timer-imx-gpt.c
index 879c784..2d59038 100644
--- a/drivers/clocksource/timer-imx-gpt.c
+++ b/drivers/clocksource/timer-imx-gpt.c
@@ -529,6 +529,7 @@
CLOCKSOURCE_OF_DECLARE(imx1_timer, "fsl,imx1-gpt", imx1_timer_init_dt);
CLOCKSOURCE_OF_DECLARE(imx21_timer, "fsl,imx21-gpt", imx21_timer_init_dt);
+CLOCKSOURCE_OF_DECLARE(imx27_timer, "fsl,imx27-gpt", imx21_timer_init_dt);
CLOCKSOURCE_OF_DECLARE(imx31_timer, "fsl,imx31-gpt", imx31_timer_init_dt);
CLOCKSOURCE_OF_DECLARE(imx25_timer, "fsl,imx25-gpt", imx31_timer_init_dt);
CLOCKSOURCE_OF_DECLARE(imx50_timer, "fsl,imx50-gpt", imx31_timer_init_dt);
diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c
index fc897ba..e362860 100644
--- a/drivers/cpufreq/loongson2_cpufreq.c
+++ b/drivers/cpufreq/loongson2_cpufreq.c
@@ -3,7 +3,7 @@
*
* The 2E revision of loongson processor not support this feature.
*
- * Copyright (C) 2006 - 2008 Lemote Inc. & Insititute of Computing Technology
+ * Copyright (C) 2006 - 2008 Lemote Inc. & Institute of Computing Technology
* Author: Yanhua, yanh@lemote.com
*
* This file is subject to the terms and conditions of the GNU General Public
diff --git a/drivers/firmware/qcom_scm-32.c b/drivers/firmware/qcom_scm-32.c
index 1bd6f9c..29e6850 100644
--- a/drivers/firmware/qcom_scm-32.c
+++ b/drivers/firmware/qcom_scm-32.c
@@ -24,7 +24,6 @@
#include <linux/err.h>
#include <linux/qcom_scm.h>
-#include <asm/outercache.h>
#include <asm/cacheflush.h>
#include "qcom_scm.h"
@@ -219,8 +218,7 @@
* Flush the command buffer so that the secure world sees
* the correct data.
*/
- __cpuc_flush_dcache_area((void *)cmd, cmd->len);
- outer_flush_range(cmd_addr, cmd_addr + cmd->len);
+ secure_flush_area(cmd, cmd->len);
ret = smc(cmd_addr);
if (ret < 0)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 975edb1..ae43b58 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -352,7 +352,7 @@
if (((int64_t)timeout_ns) < 0)
return MAX_SCHEDULE_TIMEOUT;
- timeout = ktime_sub_ns(ktime_get(), timeout_ns);
+ timeout = ktime_sub(ns_to_ktime(timeout_ns), ktime_get());
if (ktime_to_ns(timeout) < 0)
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 5cde635..6e77964 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -3403,19 +3403,25 @@
switch (entry->src_data) {
case 0: /* vblank */
- if (disp_int & interrupt_status_offsets[crtc].vblank) {
+ if (disp_int & interrupt_status_offsets[crtc].vblank)
dce_v10_0_crtc_vblank_int_ack(adev, crtc);
- if (amdgpu_irq_enabled(adev, source, irq_type)) {
- drm_handle_vblank(adev->ddev, crtc);
- }
- DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
+ else
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ if (amdgpu_irq_enabled(adev, source, irq_type)) {
+ drm_handle_vblank(adev->ddev, crtc);
}
+ DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
+
break;
case 1: /* vline */
- if (disp_int & interrupt_status_offsets[crtc].vline) {
+ if (disp_int & interrupt_status_offsets[crtc].vline)
dce_v10_0_crtc_vline_int_ack(adev, crtc);
- DRM_DEBUG("IH: D%d vline\n", crtc + 1);
- }
+ else
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ DRM_DEBUG("IH: D%d vline\n", crtc + 1);
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 95efd98..7f7abb0 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -3402,19 +3402,25 @@
switch (entry->src_data) {
case 0: /* vblank */
- if (disp_int & interrupt_status_offsets[crtc].vblank) {
+ if (disp_int & interrupt_status_offsets[crtc].vblank)
dce_v11_0_crtc_vblank_int_ack(adev, crtc);
- if (amdgpu_irq_enabled(adev, source, irq_type)) {
- drm_handle_vblank(adev->ddev, crtc);
- }
- DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
+ else
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ if (amdgpu_irq_enabled(adev, source, irq_type)) {
+ drm_handle_vblank(adev->ddev, crtc);
}
+ DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
+
break;
case 1: /* vline */
- if (disp_int & interrupt_status_offsets[crtc].vline) {
+ if (disp_int & interrupt_status_offsets[crtc].vline)
dce_v11_0_crtc_vline_int_ack(adev, crtc);
- DRM_DEBUG("IH: D%d vline\n", crtc + 1);
- }
+ else
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ DRM_DEBUG("IH: D%d vline\n", crtc + 1);
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index aaca8d6..08387df 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -3237,19 +3237,25 @@
switch (entry->src_data) {
case 0: /* vblank */
- if (disp_int & interrupt_status_offsets[crtc].vblank) {
+ if (disp_int & interrupt_status_offsets[crtc].vblank)
WREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc], LB_VBLANK_STATUS__VBLANK_ACK_MASK);
- if (amdgpu_irq_enabled(adev, source, irq_type)) {
- drm_handle_vblank(adev->ddev, crtc);
- }
- DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
+ else
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ if (amdgpu_irq_enabled(adev, source, irq_type)) {
+ drm_handle_vblank(adev->ddev, crtc);
}
+ DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
+
break;
case 1: /* vline */
- if (disp_int & interrupt_status_offsets[crtc].vline) {
+ if (disp_int & interrupt_status_offsets[crtc].vline)
WREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc], LB_VLINE_STATUS__VLINE_ACK_MASK);
- DRM_DEBUG("IH: D%d vline\n", crtc + 1);
- }
+ else
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ DRM_DEBUG("IH: D%d vline\n", crtc + 1);
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 8a1f999..9be0070 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -420,6 +420,12 @@
pqm_uninit(&p->pqm);
pdd = kfd_get_process_device_data(dev, p);
+
+ if (!pdd) {
+ mutex_unlock(&p->mutex);
+ return;
+ }
+
if (pdd->reset_wavefronts) {
dbgdev_wave_reset_wavefronts(pdd->dev, p);
pdd->reset_wavefronts = false;
@@ -431,8 +437,7 @@
* We don't call amd_iommu_unbind_pasid() here
* because the IOMMU called us.
*/
- if (pdd)
- pdd->bound = false;
+ pdd->bound = false;
mutex_unlock(&p->mutex);
}
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 8867818..d65cbe6 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -157,9 +157,7 @@
struct drm_i915_gem_object *obj;
int ret;
- obj = i915_gem_object_create_stolen(dev, size);
- if (obj == NULL)
- obj = i915_gem_alloc_object(dev, size);
+ obj = i915_gem_alloc_object(dev, size);
if (obj == NULL)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 9daa288..dcc6a88 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -2546,6 +2546,8 @@
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
struct i915_address_space *vm;
+ struct i915_vma *vma;
+ bool flush;
i915_check_and_clear_faults(dev);
@@ -2555,17 +2557,24 @@
dev_priv->gtt.base.total,
true);
+ /* Cache flush objects bound into GGTT and rebind them. */
+ vm = &dev_priv->gtt.base;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
- struct i915_vma *vma = i915_gem_obj_to_vma(obj,
- &dev_priv->gtt.base);
- if (!vma)
- continue;
+ flush = false;
+ list_for_each_entry(vma, &obj->vma_list, vma_link) {
+ if (vma->vm != vm)
+ continue;
- i915_gem_clflush_object(obj, obj->pin_display);
- WARN_ON(i915_vma_bind(vma, obj->cache_level, PIN_UPDATE));
+ WARN_ON(i915_vma_bind(vma, obj->cache_level,
+ PIN_UPDATE));
+
+ flush = true;
+ }
+
+ if (flush)
+ i915_gem_clflush_object(obj, obj->pin_display);
}
-
if (INTEL_INFO(dev)->gen >= 8) {
if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev))
chv_setup_private_ppat(dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 633bd1f..d61e74a 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -183,8 +183,18 @@
if (IS_GEN4(dev)) {
uint32_t ddc2 = I915_READ(DCC2);
- if (!(ddc2 & DCC2_MODIFIED_ENHANCED_DISABLE))
+ if (!(ddc2 & DCC2_MODIFIED_ENHANCED_DISABLE)) {
+ /* Since the swizzling may vary within an
+ * object, we have no idea what the swizzling
+ * is for any page in particular. Thus we
+ * cannot migrate tiled pages using the GPU,
+ * nor can we tell userspace what the exact
+ * swizzling is for any object.
+ */
dev_priv->quirks |= QUIRK_PIN_SWIZZLED_PAGES;
+ swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
+ swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
+ }
}
if (dcc == 0xffffffff) {
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 1b61f98..647b140 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -4854,6 +4854,9 @@
struct intel_plane *intel_plane;
int pipe = intel_crtc->pipe;
+ if (!intel_crtc->active)
+ return;
+
intel_crtc_wait_for_pending_flips(crtc);
intel_pre_disable_primary(crtc);
@@ -7887,7 +7890,7 @@
int pipe = pipe_config->cpu_transcoder;
enum dpio_channel port = vlv_pipe_to_channel(pipe);
intel_clock_t clock;
- u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2;
+ u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
int refclk = 100000;
mutex_lock(&dev_priv->sb_lock);
@@ -7895,10 +7898,13 @@
pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
+ pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
mutex_unlock(&dev_priv->sb_lock);
clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
- clock.m2 = ((pll_dw0 & 0xff) << 22) | (pll_dw2 & 0x3fffff);
+ clock.m2 = (pll_dw0 & 0xff) << 22;
+ if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
+ clock.m2 |= pll_dw2 & 0x3fffff;
clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
index f2daad8..7841970 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
@@ -285,7 +285,7 @@
if (wait) {
if (!wait_for_completion_timeout(&engine->compl,
- msecs_to_jiffies(1))) {
+ msecs_to_jiffies(100))) {
dev_err(dmm->dev, "timed out waiting for done\n");
ret = -ETIMEDOUT;
}
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index ae2df41..12081e6 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -177,7 +177,7 @@
struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos);
struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb, int p);
int omap_framebuffer_pin(struct drm_framebuffer *fb);
-int omap_framebuffer_unpin(struct drm_framebuffer *fb);
+void omap_framebuffer_unpin(struct drm_framebuffer *fb);
void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
struct omap_drm_window *win, struct omap_overlay_info *info);
struct drm_connector *omap_framebuffer_get_next_connector(
@@ -211,7 +211,7 @@
enum dma_data_direction dir);
int omap_gem_get_paddr(struct drm_gem_object *obj,
dma_addr_t *paddr, bool remap);
-int omap_gem_put_paddr(struct drm_gem_object *obj);
+void omap_gem_put_paddr(struct drm_gem_object *obj);
int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages,
bool remap);
int omap_gem_put_pages(struct drm_gem_object *obj);
@@ -236,7 +236,7 @@
/* PVR needs alignment to 8 pixels.. right now that is the most
* restrictive stride requirement..
*/
- return ALIGN(pitch, 8 * bytespp);
+ return roundup(pitch, 8 * bytespp);
}
/* map crtc to vblank mask */
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c
index 0b967e7..51b1219 100644
--- a/drivers/gpu/drm/omapdrm/omap_fb.c
+++ b/drivers/gpu/drm/omapdrm/omap_fb.c
@@ -287,10 +287,10 @@
}
/* unpin, no longer being scanned out: */
-int omap_framebuffer_unpin(struct drm_framebuffer *fb)
+void omap_framebuffer_unpin(struct drm_framebuffer *fb)
{
struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
- int ret, i, n = drm_format_num_planes(fb->pixel_format);
+ int i, n = drm_format_num_planes(fb->pixel_format);
mutex_lock(&omap_fb->lock);
@@ -298,24 +298,16 @@
if (omap_fb->pin_count > 0) {
mutex_unlock(&omap_fb->lock);
- return 0;
+ return;
}
for (i = 0; i < n; i++) {
struct plane *plane = &omap_fb->planes[i];
- ret = omap_gem_put_paddr(plane->bo);
- if (ret)
- goto fail;
+ omap_gem_put_paddr(plane->bo);
plane->paddr = 0;
}
mutex_unlock(&omap_fb->lock);
-
- return 0;
-
-fail:
- mutex_unlock(&omap_fb->lock);
- return ret;
}
struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb, int p)
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index 23b5a84..720d16b 100644
--- a/drivers/gpu/drm/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -135,7 +135,7 @@
fbdev->ywrap_enabled = priv->has_dmm && ywrap_enabled;
if (fbdev->ywrap_enabled) {
/* need to align pitch to page size if using DMM scrolling */
- mode_cmd.pitches[0] = ALIGN(mode_cmd.pitches[0], PAGE_SIZE);
+ mode_cmd.pitches[0] = PAGE_ALIGN(mode_cmd.pitches[0]);
}
/* allocate backing bo */
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index 2ab7780..7ed08fdc 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -808,10 +808,10 @@
/* Release physical address, when DMA is no longer being performed.. this
* could potentially unpin and unmap buffers from TILER
*/
-int omap_gem_put_paddr(struct drm_gem_object *obj)
+void omap_gem_put_paddr(struct drm_gem_object *obj)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
- int ret = 0;
+ int ret;
mutex_lock(&obj->dev->struct_mutex);
if (omap_obj->paddr_cnt > 0) {
@@ -821,7 +821,6 @@
if (ret) {
dev_err(obj->dev->dev,
"could not unpin pages: %d\n", ret);
- goto fail;
}
ret = tiler_release(omap_obj->block);
if (ret) {
@@ -832,9 +831,8 @@
omap_obj->block = NULL;
}
}
-fail:
+
mutex_unlock(&obj->dev->struct_mutex);
- return ret;
}
/* Get rotated scanout address (only valid if already pinned), at the
@@ -1378,11 +1376,7 @@
omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL);
if (!omap_obj)
- goto fail;
-
- spin_lock(&priv->list_lock);
- list_add(&omap_obj->mm_list, &priv->obj_list);
- spin_unlock(&priv->list_lock);
+ return NULL;
obj = &omap_obj->base;
@@ -1392,11 +1386,19 @@
*/
omap_obj->vaddr = dma_alloc_writecombine(dev->dev, size,
&omap_obj->paddr, GFP_KERNEL);
- if (omap_obj->vaddr)
- flags |= OMAP_BO_DMA;
+ if (!omap_obj->vaddr) {
+ kfree(omap_obj);
+ return NULL;
+ }
+
+ flags |= OMAP_BO_DMA;
}
+ spin_lock(&priv->list_lock);
+ list_add(&omap_obj->mm_list, &priv->obj_list);
+ spin_unlock(&priv->list_lock);
+
omap_obj->flags = flags;
if (flags & OMAP_BO_TILED) {
diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c
index cfa8276..0989046 100644
--- a/drivers/gpu/drm/omapdrm/omap_plane.c
+++ b/drivers/gpu/drm/omapdrm/omap_plane.c
@@ -17,6 +17,7 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_plane_helper.h>
@@ -153,9 +154,34 @@
dispc_ovl_enable(omap_plane->id, false);
}
+static int omap_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct drm_crtc_state *crtc_state;
+
+ if (!state->crtc)
+ return 0;
+
+ crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ if (state->crtc_x < 0 || state->crtc_y < 0)
+ return -EINVAL;
+
+ if (state->crtc_x + state->crtc_w > crtc_state->adjusted_mode.hdisplay)
+ return -EINVAL;
+
+ if (state->crtc_y + state->crtc_h > crtc_state->adjusted_mode.vdisplay)
+ return -EINVAL;
+
+ return 0;
+}
+
static const struct drm_plane_helper_funcs omap_plane_helper_funcs = {
.prepare_fb = omap_plane_prepare_fb,
.cleanup_fb = omap_plane_cleanup_fb,
+ .atomic_check = omap_plane_atomic_check,
.atomic_update = omap_plane_atomic_update,
.atomic_disable = omap_plane_atomic_disable,
};
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 4ecf5ca..248953d 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -7964,23 +7964,27 @@
case 1: /* D1 vblank/vline */
switch (src_data) {
case 0: /* D1 vblank */
- if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VBLANK_INTERRUPT) {
- if (rdev->irq.crtc_vblank_int[0]) {
- drm_handle_vblank(rdev->ddev, 0);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- }
- if (atomic_read(&rdev->irq.pflip[0]))
- radeon_crtc_handle_vblank(rdev, 0);
- rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D1 vblank\n");
+ if (!(rdev->irq.stat_regs.cik.disp_int & LB_D1_VBLANK_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ if (rdev->irq.crtc_vblank_int[0]) {
+ drm_handle_vblank(rdev->ddev, 0);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
}
+ if (atomic_read(&rdev->irq.pflip[0]))
+ radeon_crtc_handle_vblank(rdev, 0);
+ rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D1 vblank\n");
+
break;
case 1: /* D1 vline */
- if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VLINE_INTERRUPT) {
- rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D1 vline\n");
- }
+ if (!(rdev->irq.stat_regs.cik.disp_int & LB_D1_VLINE_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D1 vline\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -7990,23 +7994,27 @@
case 2: /* D2 vblank/vline */
switch (src_data) {
case 0: /* D2 vblank */
- if (rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
- if (rdev->irq.crtc_vblank_int[1]) {
- drm_handle_vblank(rdev->ddev, 1);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- }
- if (atomic_read(&rdev->irq.pflip[1]))
- radeon_crtc_handle_vblank(rdev, 1);
- rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D2 vblank\n");
+ if (!(rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VBLANK_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ if (rdev->irq.crtc_vblank_int[1]) {
+ drm_handle_vblank(rdev->ddev, 1);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
}
+ if (atomic_read(&rdev->irq.pflip[1]))
+ radeon_crtc_handle_vblank(rdev, 1);
+ rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D2 vblank\n");
+
break;
case 1: /* D2 vline */
- if (rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VLINE_INTERRUPT) {
- rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D2 vline\n");
- }
+ if (!(rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VLINE_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D2 vline\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -8016,23 +8024,27 @@
case 3: /* D3 vblank/vline */
switch (src_data) {
case 0: /* D3 vblank */
- if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
- if (rdev->irq.crtc_vblank_int[2]) {
- drm_handle_vblank(rdev->ddev, 2);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- }
- if (atomic_read(&rdev->irq.pflip[2]))
- radeon_crtc_handle_vblank(rdev, 2);
- rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D3 vblank\n");
+ if (!(rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ if (rdev->irq.crtc_vblank_int[2]) {
+ drm_handle_vblank(rdev->ddev, 2);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
}
+ if (atomic_read(&rdev->irq.pflip[2]))
+ radeon_crtc_handle_vblank(rdev, 2);
+ rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D3 vblank\n");
+
break;
case 1: /* D3 vline */
- if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
- rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D3 vline\n");
- }
+ if (!(rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VLINE_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D3 vline\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -8042,23 +8054,27 @@
case 4: /* D4 vblank/vline */
switch (src_data) {
case 0: /* D4 vblank */
- if (rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
- if (rdev->irq.crtc_vblank_int[3]) {
- drm_handle_vblank(rdev->ddev, 3);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- }
- if (atomic_read(&rdev->irq.pflip[3]))
- radeon_crtc_handle_vblank(rdev, 3);
- rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D4 vblank\n");
+ if (!(rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ if (rdev->irq.crtc_vblank_int[3]) {
+ drm_handle_vblank(rdev->ddev, 3);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
}
+ if (atomic_read(&rdev->irq.pflip[3]))
+ radeon_crtc_handle_vblank(rdev, 3);
+ rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D4 vblank\n");
+
break;
case 1: /* D4 vline */
- if (rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
- rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D4 vline\n");
- }
+ if (!(rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VLINE_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D4 vline\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -8068,23 +8084,27 @@
case 5: /* D5 vblank/vline */
switch (src_data) {
case 0: /* D5 vblank */
- if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
- if (rdev->irq.crtc_vblank_int[4]) {
- drm_handle_vblank(rdev->ddev, 4);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- }
- if (atomic_read(&rdev->irq.pflip[4]))
- radeon_crtc_handle_vblank(rdev, 4);
- rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D5 vblank\n");
+ if (!(rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ if (rdev->irq.crtc_vblank_int[4]) {
+ drm_handle_vblank(rdev->ddev, 4);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
}
+ if (atomic_read(&rdev->irq.pflip[4]))
+ radeon_crtc_handle_vblank(rdev, 4);
+ rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D5 vblank\n");
+
break;
case 1: /* D5 vline */
- if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
- rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D5 vline\n");
- }
+ if (!(rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VLINE_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D5 vline\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -8094,23 +8114,27 @@
case 6: /* D6 vblank/vline */
switch (src_data) {
case 0: /* D6 vblank */
- if (rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
- if (rdev->irq.crtc_vblank_int[5]) {
- drm_handle_vblank(rdev->ddev, 5);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- }
- if (atomic_read(&rdev->irq.pflip[5]))
- radeon_crtc_handle_vblank(rdev, 5);
- rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D6 vblank\n");
+ if (!(rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ if (rdev->irq.crtc_vblank_int[5]) {
+ drm_handle_vblank(rdev->ddev, 5);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
}
+ if (atomic_read(&rdev->irq.pflip[5]))
+ radeon_crtc_handle_vblank(rdev, 5);
+ rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D6 vblank\n");
+
break;
case 1: /* D6 vline */
- if (rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
- rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D6 vline\n");
- }
+ if (!(rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VLINE_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D6 vline\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -8130,88 +8154,112 @@
case 42: /* HPD hotplug */
switch (src_data) {
case 0:
- if (rdev->irq.stat_regs.cik.disp_int & DC_HPD1_INTERRUPT) {
- rdev->irq.stat_regs.cik.disp_int &= ~DC_HPD1_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD1\n");
- }
+ if (!(rdev->irq.stat_regs.cik.disp_int & DC_HPD1_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.cik.disp_int &= ~DC_HPD1_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD1\n");
+
break;
case 1:
- if (rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_INTERRUPT) {
- rdev->irq.stat_regs.cik.disp_int_cont &= ~DC_HPD2_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD2\n");
- }
+ if (!(rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.cik.disp_int_cont &= ~DC_HPD2_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD2\n");
+
break;
case 2:
- if (rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_INTERRUPT) {
- rdev->irq.stat_regs.cik.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD3\n");
- }
+ if (!(rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.cik.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD3\n");
+
break;
case 3:
- if (rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_INTERRUPT) {
- rdev->irq.stat_regs.cik.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD4\n");
- }
+ if (!(rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.cik.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD4\n");
+
break;
case 4:
- if (rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_INTERRUPT) {
- rdev->irq.stat_regs.cik.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD5\n");
- }
+ if (!(rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.cik.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD5\n");
+
break;
case 5:
- if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_INTERRUPT) {
- rdev->irq.stat_regs.cik.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD6\n");
- }
+ if (!(rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.cik.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD6\n");
+
break;
case 6:
- if (rdev->irq.stat_regs.cik.disp_int & DC_HPD1_RX_INTERRUPT) {
- rdev->irq.stat_regs.cik.disp_int &= ~DC_HPD1_RX_INTERRUPT;
- queue_dp = true;
- DRM_DEBUG("IH: HPD_RX 1\n");
- }
+ if (!(rdev->irq.stat_regs.cik.disp_int & DC_HPD1_RX_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.cik.disp_int &= ~DC_HPD1_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 1\n");
+
break;
case 7:
- if (rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_RX_INTERRUPT) {
- rdev->irq.stat_regs.cik.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;
- queue_dp = true;
- DRM_DEBUG("IH: HPD_RX 2\n");
- }
+ if (!(rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_RX_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.cik.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 2\n");
+
break;
case 8:
- if (rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_RX_INTERRUPT) {
- rdev->irq.stat_regs.cik.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;
- queue_dp = true;
- DRM_DEBUG("IH: HPD_RX 3\n");
- }
+ if (!(rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_RX_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.cik.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 3\n");
+
break;
case 9:
- if (rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_RX_INTERRUPT) {
- rdev->irq.stat_regs.cik.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;
- queue_dp = true;
- DRM_DEBUG("IH: HPD_RX 4\n");
- }
+ if (!(rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_RX_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.cik.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 4\n");
+
break;
case 10:
- if (rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_RX_INTERRUPT) {
- rdev->irq.stat_regs.cik.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;
- queue_dp = true;
- DRM_DEBUG("IH: HPD_RX 5\n");
- }
+ if (!(rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_RX_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.cik.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 5\n");
+
break;
case 11:
- if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
- rdev->irq.stat_regs.cik.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;
- queue_dp = true;
- DRM_DEBUG("IH: HPD_RX 6\n");
- }
+ if (!(rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_RX_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.cik.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 6\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 3a6d483..0acde19 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -4924,7 +4924,7 @@
return IRQ_NONE;
rptr = rdev->ih.rptr;
- DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
+ DRM_DEBUG("evergreen_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
/* Order reading of wptr vs. reading of IH ring data */
rmb();
@@ -4942,23 +4942,27 @@
case 1: /* D1 vblank/vline */
switch (src_data) {
case 0: /* D1 vblank */
- if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) {
- if (rdev->irq.crtc_vblank_int[0]) {
- drm_handle_vblank(rdev->ddev, 0);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- }
- if (atomic_read(&rdev->irq.pflip[0]))
- radeon_crtc_handle_vblank(rdev, 0);
- rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D1 vblank\n");
+ if (!(rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT))
+ DRM_DEBUG("IH: D1 vblank - IH event w/o asserted irq bit?\n");
+
+ if (rdev->irq.crtc_vblank_int[0]) {
+ drm_handle_vblank(rdev->ddev, 0);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
}
+ if (atomic_read(&rdev->irq.pflip[0]))
+ radeon_crtc_handle_vblank(rdev, 0);
+ rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D1 vblank\n");
+
break;
case 1: /* D1 vline */
- if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D1 vline\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT))
+ DRM_DEBUG("IH: D1 vline - IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D1 vline\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -4968,23 +4972,27 @@
case 2: /* D2 vblank/vline */
switch (src_data) {
case 0: /* D2 vblank */
- if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
- if (rdev->irq.crtc_vblank_int[1]) {
- drm_handle_vblank(rdev->ddev, 1);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- }
- if (atomic_read(&rdev->irq.pflip[1]))
- radeon_crtc_handle_vblank(rdev, 1);
- rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D2 vblank\n");
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT))
+ DRM_DEBUG("IH: D2 vblank - IH event w/o asserted irq bit?\n");
+
+ if (rdev->irq.crtc_vblank_int[1]) {
+ drm_handle_vblank(rdev->ddev, 1);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
}
+ if (atomic_read(&rdev->irq.pflip[1]))
+ radeon_crtc_handle_vblank(rdev, 1);
+ rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D2 vblank\n");
+
break;
case 1: /* D2 vline */
- if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D2 vline\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT))
+ DRM_DEBUG("IH: D2 vline - IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D2 vline\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -4994,23 +5002,27 @@
case 3: /* D3 vblank/vline */
switch (src_data) {
case 0: /* D3 vblank */
- if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
- if (rdev->irq.crtc_vblank_int[2]) {
- drm_handle_vblank(rdev->ddev, 2);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- }
- if (atomic_read(&rdev->irq.pflip[2]))
- radeon_crtc_handle_vblank(rdev, 2);
- rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D3 vblank\n");
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT))
+ DRM_DEBUG("IH: D3 vblank - IH event w/o asserted irq bit?\n");
+
+ if (rdev->irq.crtc_vblank_int[2]) {
+ drm_handle_vblank(rdev->ddev, 2);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
}
+ if (atomic_read(&rdev->irq.pflip[2]))
+ radeon_crtc_handle_vblank(rdev, 2);
+ rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D3 vblank\n");
+
break;
case 1: /* D3 vline */
- if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D3 vline\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT))
+ DRM_DEBUG("IH: D3 vline - IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D3 vline\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -5020,23 +5032,27 @@
case 4: /* D4 vblank/vline */
switch (src_data) {
case 0: /* D4 vblank */
- if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
- if (rdev->irq.crtc_vblank_int[3]) {
- drm_handle_vblank(rdev->ddev, 3);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- }
- if (atomic_read(&rdev->irq.pflip[3]))
- radeon_crtc_handle_vblank(rdev, 3);
- rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D4 vblank\n");
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT))
+ DRM_DEBUG("IH: D4 vblank - IH event w/o asserted irq bit?\n");
+
+ if (rdev->irq.crtc_vblank_int[3]) {
+ drm_handle_vblank(rdev->ddev, 3);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
}
+ if (atomic_read(&rdev->irq.pflip[3]))
+ radeon_crtc_handle_vblank(rdev, 3);
+ rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D4 vblank\n");
+
break;
case 1: /* D4 vline */
- if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D4 vline\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT))
+ DRM_DEBUG("IH: D4 vline - IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D4 vline\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -5046,23 +5062,27 @@
case 5: /* D5 vblank/vline */
switch (src_data) {
case 0: /* D5 vblank */
- if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
- if (rdev->irq.crtc_vblank_int[4]) {
- drm_handle_vblank(rdev->ddev, 4);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- }
- if (atomic_read(&rdev->irq.pflip[4]))
- radeon_crtc_handle_vblank(rdev, 4);
- rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D5 vblank\n");
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT))
+ DRM_DEBUG("IH: D5 vblank - IH event w/o asserted irq bit?\n");
+
+ if (rdev->irq.crtc_vblank_int[4]) {
+ drm_handle_vblank(rdev->ddev, 4);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
}
+ if (atomic_read(&rdev->irq.pflip[4]))
+ radeon_crtc_handle_vblank(rdev, 4);
+ rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D5 vblank\n");
+
break;
case 1: /* D5 vline */
- if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D5 vline\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT))
+ DRM_DEBUG("IH: D5 vline - IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D5 vline\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -5072,23 +5092,27 @@
case 6: /* D6 vblank/vline */
switch (src_data) {
case 0: /* D6 vblank */
- if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
- if (rdev->irq.crtc_vblank_int[5]) {
- drm_handle_vblank(rdev->ddev, 5);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- }
- if (atomic_read(&rdev->irq.pflip[5]))
- radeon_crtc_handle_vblank(rdev, 5);
- rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D6 vblank\n");
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT))
+ DRM_DEBUG("IH: D6 vblank - IH event w/o asserted irq bit?\n");
+
+ if (rdev->irq.crtc_vblank_int[5]) {
+ drm_handle_vblank(rdev->ddev, 5);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
}
+ if (atomic_read(&rdev->irq.pflip[5]))
+ radeon_crtc_handle_vblank(rdev, 5);
+ rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D6 vblank\n");
+
break;
case 1: /* D6 vline */
- if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D6 vline\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT))
+ DRM_DEBUG("IH: D6 vline - IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D6 vline\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -5108,88 +5132,100 @@
case 42: /* HPD hotplug */
switch (src_data) {
case 0:
- if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD1\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD1\n");
break;
case 1:
- if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD2\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD2\n");
break;
case 2:
- if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD3\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD3\n");
break;
case 3:
- if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD4\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD4\n");
break;
case 4:
- if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD5\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD5\n");
break;
case 5:
- if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD6\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD6\n");
break;
case 6:
- if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_RX_INTERRUPT;
- queue_dp = true;
- DRM_DEBUG("IH: HPD_RX 1\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 1\n");
break;
case 7:
- if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;
- queue_dp = true;
- DRM_DEBUG("IH: HPD_RX 2\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 2\n");
break;
case 8:
- if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;
- queue_dp = true;
- DRM_DEBUG("IH: HPD_RX 3\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 3\n");
break;
case 9:
- if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;
- queue_dp = true;
- DRM_DEBUG("IH: HPD_RX 4\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 4\n");
break;
case 10:
- if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;
- queue_dp = true;
- DRM_DEBUG("IH: HPD_RX 5\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 5\n");
break;
case 11:
- if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;
- queue_dp = true;
- DRM_DEBUG("IH: HPD_RX 6\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 6\n");
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -5199,46 +5235,52 @@
case 44: /* hdmi */
switch (src_data) {
case 0:
- if (rdev->irq.stat_regs.evergreen.afmt_status1 & AFMT_AZ_FORMAT_WTRIG) {
- rdev->irq.stat_regs.evergreen.afmt_status1 &= ~AFMT_AZ_FORMAT_WTRIG;
- queue_hdmi = true;
- DRM_DEBUG("IH: HDMI0\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.afmt_status1 & AFMT_AZ_FORMAT_WTRIG))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.afmt_status1 &= ~AFMT_AZ_FORMAT_WTRIG;
+ queue_hdmi = true;
+ DRM_DEBUG("IH: HDMI0\n");
break;
case 1:
- if (rdev->irq.stat_regs.evergreen.afmt_status2 & AFMT_AZ_FORMAT_WTRIG) {
- rdev->irq.stat_regs.evergreen.afmt_status2 &= ~AFMT_AZ_FORMAT_WTRIG;
- queue_hdmi = true;
- DRM_DEBUG("IH: HDMI1\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.afmt_status2 & AFMT_AZ_FORMAT_WTRIG))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.afmt_status2 &= ~AFMT_AZ_FORMAT_WTRIG;
+ queue_hdmi = true;
+ DRM_DEBUG("IH: HDMI1\n");
break;
case 2:
- if (rdev->irq.stat_regs.evergreen.afmt_status3 & AFMT_AZ_FORMAT_WTRIG) {
- rdev->irq.stat_regs.evergreen.afmt_status3 &= ~AFMT_AZ_FORMAT_WTRIG;
- queue_hdmi = true;
- DRM_DEBUG("IH: HDMI2\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.afmt_status3 & AFMT_AZ_FORMAT_WTRIG))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.afmt_status3 &= ~AFMT_AZ_FORMAT_WTRIG;
+ queue_hdmi = true;
+ DRM_DEBUG("IH: HDMI2\n");
break;
case 3:
- if (rdev->irq.stat_regs.evergreen.afmt_status4 & AFMT_AZ_FORMAT_WTRIG) {
- rdev->irq.stat_regs.evergreen.afmt_status4 &= ~AFMT_AZ_FORMAT_WTRIG;
- queue_hdmi = true;
- DRM_DEBUG("IH: HDMI3\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.afmt_status4 & AFMT_AZ_FORMAT_WTRIG))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.afmt_status4 &= ~AFMT_AZ_FORMAT_WTRIG;
+ queue_hdmi = true;
+ DRM_DEBUG("IH: HDMI3\n");
break;
case 4:
- if (rdev->irq.stat_regs.evergreen.afmt_status5 & AFMT_AZ_FORMAT_WTRIG) {
- rdev->irq.stat_regs.evergreen.afmt_status5 &= ~AFMT_AZ_FORMAT_WTRIG;
- queue_hdmi = true;
- DRM_DEBUG("IH: HDMI4\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.afmt_status5 & AFMT_AZ_FORMAT_WTRIG))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.afmt_status5 &= ~AFMT_AZ_FORMAT_WTRIG;
+ queue_hdmi = true;
+ DRM_DEBUG("IH: HDMI4\n");
break;
case 5:
- if (rdev->irq.stat_regs.evergreen.afmt_status6 & AFMT_AZ_FORMAT_WTRIG) {
- rdev->irq.stat_regs.evergreen.afmt_status6 &= ~AFMT_AZ_FORMAT_WTRIG;
- queue_hdmi = true;
- DRM_DEBUG("IH: HDMI5\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.afmt_status6 & AFMT_AZ_FORMAT_WTRIG))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.afmt_status6 &= ~AFMT_AZ_FORMAT_WTRIG;
+ queue_hdmi = true;
+ DRM_DEBUG("IH: HDMI5\n");
break;
default:
DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 8e5aeeb..158872e 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -2162,18 +2162,20 @@
DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
}
- ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX];
- if (ring->ring_size)
- r = radeon_ring_init(rdev, ring, ring->ring_size, 0, 0x0);
+ if (rdev->family == CHIP_ARUBA) {
+ ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX];
+ if (ring->ring_size)
+ r = radeon_ring_init(rdev, ring, ring->ring_size, 0, 0x0);
- ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX];
- if (ring->ring_size)
- r = radeon_ring_init(rdev, ring, ring->ring_size, 0, 0x0);
+ ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX];
+ if (ring->ring_size)
+ r = radeon_ring_init(rdev, ring, ring->ring_size, 0, 0x0);
- if (!r)
- r = vce_v1_0_init(rdev);
- else if (r != -ENOENT)
- DRM_ERROR("radeon: failed initializing VCE (%d).\n", r);
+ if (!r)
+ r = vce_v1_0_init(rdev);
+ if (r)
+ DRM_ERROR("radeon: failed initializing VCE (%d).\n", r);
+ }
r = radeon_ib_pool_init(rdev);
if (r) {
@@ -2396,7 +2398,8 @@
radeon_irq_kms_fini(rdev);
uvd_v1_0_fini(rdev);
radeon_uvd_fini(rdev);
- radeon_vce_fini(rdev);
+ if (rdev->family == CHIP_ARUBA)
+ radeon_vce_fini(rdev);
cayman_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 35dafd7..4ea5b10 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -4086,23 +4086,27 @@
case 1: /* D1 vblank/vline */
switch (src_data) {
case 0: /* D1 vblank */
- if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT) {
- if (rdev->irq.crtc_vblank_int[0]) {
- drm_handle_vblank(rdev->ddev, 0);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- }
- if (atomic_read(&rdev->irq.pflip[0]))
- radeon_crtc_handle_vblank(rdev, 0);
- rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D1 vblank\n");
+ if (!(rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT))
+ DRM_DEBUG("IH: D1 vblank - IH event w/o asserted irq bit?\n");
+
+ if (rdev->irq.crtc_vblank_int[0]) {
+ drm_handle_vblank(rdev->ddev, 0);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
}
+ if (atomic_read(&rdev->irq.pflip[0]))
+ radeon_crtc_handle_vblank(rdev, 0);
+ rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D1 vblank\n");
+
break;
case 1: /* D1 vline */
- if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT) {
- rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D1 vline\n");
- }
+ if (!(rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT))
+ DRM_DEBUG("IH: D1 vline - IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D1 vline\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -4112,23 +4116,27 @@
case 5: /* D2 vblank/vline */
switch (src_data) {
case 0: /* D2 vblank */
- if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT) {
- if (rdev->irq.crtc_vblank_int[1]) {
- drm_handle_vblank(rdev->ddev, 1);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- }
- if (atomic_read(&rdev->irq.pflip[1]))
- radeon_crtc_handle_vblank(rdev, 1);
- rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D2 vblank\n");
+ if (!(rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT))
+ DRM_DEBUG("IH: D2 vblank - IH event w/o asserted irq bit?\n");
+
+ if (rdev->irq.crtc_vblank_int[1]) {
+ drm_handle_vblank(rdev->ddev, 1);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
}
+ if (atomic_read(&rdev->irq.pflip[1]))
+ radeon_crtc_handle_vblank(rdev, 1);
+ rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D2 vblank\n");
+
break;
case 1: /* D1 vline */
- if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT) {
- rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D2 vline\n");
- }
+ if (!(rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT))
+ DRM_DEBUG("IH: D2 vline - IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D2 vline\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -4148,46 +4156,53 @@
case 19: /* HPD/DAC hotplug */
switch (src_data) {
case 0:
- if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) {
- rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD1_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD1\n");
- }
+ if (!(rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT))
+ DRM_DEBUG("IH: HPD1 - IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD1_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD1\n");
break;
case 1:
- if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) {
- rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD2_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD2\n");
- }
+ if (!(rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT))
+ DRM_DEBUG("IH: HPD2 - IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD2_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD2\n");
break;
case 4:
- if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) {
- rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD3_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD3\n");
- }
+ if (!(rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT))
+ DRM_DEBUG("IH: HPD3 - IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD3_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD3\n");
break;
case 5:
- if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) {
- rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD4_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD4\n");
- }
+ if (!(rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT))
+ DRM_DEBUG("IH: HPD4 - IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD4_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD4\n");
break;
case 10:
- if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) {
- rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD5\n");
- }
+ if (!(rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT))
+ DRM_DEBUG("IH: HPD5 - IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD5\n");
break;
case 12:
- if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
- rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD6\n");
- }
+ if (!(rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT))
+ DRM_DEBUG("IH: HPD6 - IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD6\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -4197,18 +4212,22 @@
case 21: /* hdmi */
switch (src_data) {
case 4:
- if (rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG) {
- rdev->irq.stat_regs.r600.hdmi0_status &= ~HDMI0_AZ_FORMAT_WTRIG;
- queue_hdmi = true;
- DRM_DEBUG("IH: HDMI0\n");
- }
+ if (!(rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG))
+ DRM_DEBUG("IH: HDMI0 - IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.r600.hdmi0_status &= ~HDMI0_AZ_FORMAT_WTRIG;
+ queue_hdmi = true;
+ DRM_DEBUG("IH: HDMI0\n");
+
break;
case 5:
- if (rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG) {
- rdev->irq.stat_regs.r600.hdmi1_status &= ~HDMI0_AZ_FORMAT_WTRIG;
- queue_hdmi = true;
- DRM_DEBUG("IH: HDMI1\n");
- }
+ if (!(rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG))
+ DRM_DEBUG("IH: HDMI1 - IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.r600.hdmi1_status &= ~HDMI0_AZ_FORMAT_WTRIG;
+ queue_hdmi = true;
+ DRM_DEBUG("IH: HDMI1\n");
+
break;
default:
DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c
index 09e3f39..98f9ada 100644
--- a/drivers/gpu/drm/radeon/r600_cp.c
+++ b/drivers/gpu/drm/radeon/r600_cp.c
@@ -2483,7 +2483,7 @@
struct drm_buf *buf;
u32 *buffer;
const u8 __user *data;
- int size, pass_size;
+ unsigned int size, pass_size;
u64 src_offset, dst_offset;
if (!radeon_check_offset(dev_priv, tex->offset)) {
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
index 45e5406..afaf346 100644
--- a/drivers/gpu/drm/radeon/radeon_cursor.c
+++ b/drivers/gpu/drm/radeon/radeon_cursor.c
@@ -91,15 +91,34 @@
struct radeon_device *rdev = crtc->dev->dev_private;
if (ASIC_IS_DCE4(rdev)) {
+ WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
+ upper_32_bits(radeon_crtc->cursor_addr));
+ WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+ lower_32_bits(radeon_crtc->cursor_addr));
WREG32(RADEON_MM_INDEX, EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset);
WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_EN |
EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) |
EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2));
} else if (ASIC_IS_AVIVO(rdev)) {
+ if (rdev->family >= CHIP_RV770) {
+ if (radeon_crtc->crtc_id)
+ WREG32(R700_D2CUR_SURFACE_ADDRESS_HIGH,
+ upper_32_bits(radeon_crtc->cursor_addr));
+ else
+ WREG32(R700_D1CUR_SURFACE_ADDRESS_HIGH,
+ upper_32_bits(radeon_crtc->cursor_addr));
+ }
+
+ WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+ lower_32_bits(radeon_crtc->cursor_addr));
WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset);
WREG32(RADEON_MM_DATA, AVIVO_D1CURSOR_EN |
(AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT));
} else {
+ /* offset is from DISP(2)_BASE_ADDRESS */
+ WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset,
+ radeon_crtc->cursor_addr - radeon_crtc->legacy_display_base_addr);
+
switch (radeon_crtc->crtc_id) {
case 0:
WREG32(RADEON_MM_INDEX, RADEON_CRTC_GEN_CNTL);
@@ -205,8 +224,9 @@
| (x << 16)
| y));
/* offset is from DISP(2)_BASE_ADDRESS */
- WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, (radeon_crtc->legacy_cursor_offset +
- (yorigin * 256)));
+ WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset,
+ radeon_crtc->cursor_addr - radeon_crtc->legacy_display_base_addr +
+ yorigin * 256);
}
radeon_crtc->cursor_x = x;
@@ -227,53 +247,6 @@
return ret;
}
-static int radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj)
-{
- struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
- struct radeon_device *rdev = crtc->dev->dev_private;
- struct radeon_bo *robj = gem_to_radeon_bo(obj);
- uint64_t gpu_addr;
- int ret;
-
- ret = radeon_bo_reserve(robj, false);
- if (unlikely(ret != 0))
- goto fail;
- /* Only 27 bit offset for legacy cursor */
- ret = radeon_bo_pin_restricted(robj, RADEON_GEM_DOMAIN_VRAM,
- ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27,
- &gpu_addr);
- radeon_bo_unreserve(robj);
- if (ret)
- goto fail;
-
- if (ASIC_IS_DCE4(rdev)) {
- WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
- upper_32_bits(gpu_addr));
- WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
- gpu_addr & 0xffffffff);
- } else if (ASIC_IS_AVIVO(rdev)) {
- if (rdev->family >= CHIP_RV770) {
- if (radeon_crtc->crtc_id)
- WREG32(R700_D2CUR_SURFACE_ADDRESS_HIGH, upper_32_bits(gpu_addr));
- else
- WREG32(R700_D1CUR_SURFACE_ADDRESS_HIGH, upper_32_bits(gpu_addr));
- }
- WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
- gpu_addr & 0xffffffff);
- } else {
- radeon_crtc->legacy_cursor_offset = gpu_addr - radeon_crtc->legacy_display_base_addr;
- /* offset is from DISP(2)_BASE_ADDRESS */
- WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, radeon_crtc->legacy_cursor_offset);
- }
-
- return 0;
-
-fail:
- drm_gem_object_unreference_unlocked(obj);
-
- return ret;
-}
-
int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
struct drm_file *file_priv,
uint32_t handle,
@@ -283,7 +256,9 @@
int32_t hot_y)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct radeon_device *rdev = crtc->dev->dev_private;
struct drm_gem_object *obj;
+ struct radeon_bo *robj;
int ret;
if (!handle) {
@@ -305,6 +280,23 @@
return -ENOENT;
}
+ robj = gem_to_radeon_bo(obj);
+ ret = radeon_bo_reserve(robj, false);
+ if (ret != 0) {
+ drm_gem_object_unreference_unlocked(obj);
+ return ret;
+ }
+ /* Only 27 bit offset for legacy cursor */
+ ret = radeon_bo_pin_restricted(robj, RADEON_GEM_DOMAIN_VRAM,
+ ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27,
+ &radeon_crtc->cursor_addr);
+ radeon_bo_unreserve(robj);
+ if (ret) {
+ DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
+ drm_gem_object_unreference_unlocked(obj);
+ return ret;
+ }
+
radeon_crtc->cursor_width = width;
radeon_crtc->cursor_height = height;
@@ -323,13 +315,7 @@
radeon_crtc->cursor_hot_y = hot_y;
}
- ret = radeon_set_cursor(crtc, obj);
-
- if (ret)
- DRM_ERROR("radeon_set_cursor returned %d, not changing cursor\n",
- ret);
- else
- radeon_show_cursor(crtc);
+ radeon_show_cursor(crtc);
radeon_lock_cursor(crtc, false);
@@ -341,8 +327,7 @@
radeon_bo_unpin(robj);
radeon_bo_unreserve(robj);
}
- if (radeon_crtc->cursor_bo != obj)
- drm_gem_object_unreference_unlocked(radeon_crtc->cursor_bo);
+ drm_gem_object_unreference_unlocked(radeon_crtc->cursor_bo);
}
radeon_crtc->cursor_bo = obj;
@@ -360,7 +345,6 @@
void radeon_cursor_reset(struct drm_crtc *crtc)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
- int ret;
if (radeon_crtc->cursor_bo) {
radeon_lock_cursor(crtc, true);
@@ -368,12 +352,7 @@
radeon_cursor_move_locked(crtc, radeon_crtc->cursor_x,
radeon_crtc->cursor_y);
- ret = radeon_set_cursor(crtc, radeon_crtc->cursor_bo);
- if (ret)
- DRM_ERROR("radeon_set_cursor returned %d, not showing "
- "cursor\n", ret);
- else
- radeon_show_cursor(crtc);
+ radeon_show_cursor(crtc);
radeon_lock_cursor(crtc, false);
}
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 2593b11..d8319da 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1080,6 +1080,22 @@
}
/**
+ * Determine a sensible default GART size according to ASIC family.
+ *
+ * @family ASIC family name
+ */
+static int radeon_gart_size_auto(enum radeon_family family)
+{
+ /* default to a larger gart size on newer asics */
+ if (family >= CHIP_TAHITI)
+ return 2048;
+ else if (family >= CHIP_RV770)
+ return 1024;
+ else
+ return 512;
+}
+
+/**
* radeon_check_arguments - validate module params
*
* @rdev: radeon_device pointer
@@ -1097,27 +1113,17 @@
}
if (radeon_gart_size == -1) {
- /* default to a larger gart size on newer asics */
- if (rdev->family >= CHIP_RV770)
- radeon_gart_size = 1024;
- else
- radeon_gart_size = 512;
+ radeon_gart_size = radeon_gart_size_auto(rdev->family);
}
/* gtt size must be power of two and greater or equal to 32M */
if (radeon_gart_size < 32) {
dev_warn(rdev->dev, "gart size (%d) too small\n",
radeon_gart_size);
- if (rdev->family >= CHIP_RV770)
- radeon_gart_size = 1024;
- else
- radeon_gart_size = 512;
+ radeon_gart_size = radeon_gart_size_auto(rdev->family);
} else if (!radeon_check_pot_argument(radeon_gart_size)) {
dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
radeon_gart_size);
- if (rdev->family >= CHIP_RV770)
- radeon_gart_size = 1024;
- else
- radeon_gart_size = 512;
+ radeon_gart_size = radeon_gart_size_auto(rdev->family);
}
rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;
@@ -1572,11 +1578,21 @@
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
}
- /* unpin the front buffers */
+ /* unpin the front buffers and cursors */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->primary->fb);
struct radeon_bo *robj;
+ if (radeon_crtc->cursor_bo) {
+ struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
+ r = radeon_bo_reserve(robj, false);
+ if (r == 0) {
+ radeon_bo_unpin(robj);
+ radeon_bo_unreserve(robj);
+ }
+ }
+
if (rfb == NULL || rfb->obj == NULL) {
continue;
}
@@ -1639,6 +1655,7 @@
{
struct drm_connector *connector;
struct radeon_device *rdev = dev->dev_private;
+ struct drm_crtc *crtc;
int r;
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
@@ -1678,6 +1695,27 @@
radeon_restore_bios_scratch_regs(rdev);
+ /* pin cursors */
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+
+ if (radeon_crtc->cursor_bo) {
+ struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
+ r = radeon_bo_reserve(robj, false);
+ if (r == 0) {
+ /* Only 27 bit offset for legacy cursor */
+ r = radeon_bo_pin_restricted(robj,
+ RADEON_GEM_DOMAIN_VRAM,
+ ASIC_IS_AVIVO(rdev) ?
+ 0 : 1 << 27,
+ &radeon_crtc->cursor_addr);
+ if (r != 0)
+ DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
+ radeon_bo_unreserve(robj);
+ }
+ }
+ }
+
/* init dig PHYs, disp eng pll */
if (rdev->is_atom_bios) {
radeon_atom_encoder_init(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 634793e..aeb6767 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -257,6 +257,7 @@
}
info->par = rfbdev;
+ info->skip_vt_switch = true;
ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj);
if (ret) {
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index ac3c131..013ec71 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -428,7 +428,6 @@
int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
- struct radeon_device *rdev = dev->dev_private;
struct drm_radeon_gem_busy *args = data;
struct drm_gem_object *gobj;
struct radeon_bo *robj;
@@ -440,10 +439,16 @@
return -ENOENT;
}
robj = gem_to_radeon_bo(gobj);
- r = radeon_bo_wait(robj, &cur_placement, true);
+
+ r = reservation_object_test_signaled_rcu(robj->tbo.resv, true);
+ if (r == 0)
+ r = -EBUSY;
+ else
+ r = 0;
+
+ cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type);
args->domain = radeon_mem_type_to_domain(cur_placement);
drm_gem_object_unreference_unlocked(gobj);
- r = radeon_gem_handle_lockup(rdev, r);
return r;
}
@@ -471,6 +476,7 @@
r = ret;
/* Flush HDP cache via MMIO if necessary */
+ cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type);
if (rdev->asic->mmio_hdp_flush &&
radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM)
robj->rdev->asic->mmio_hdp_flush(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 6de5459..07909d8 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -343,7 +343,6 @@
int max_cursor_width;
int max_cursor_height;
uint32_t legacy_display_base_addr;
- uint32_t legacy_cursor_offset;
enum radeon_rmx_type rmx_type;
u8 h_border;
u8 v_border;
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index ec10533..48d97c0 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -493,38 +493,35 @@
}
if (bo_va->it.start || bo_va->it.last) {
- spin_lock(&vm->status_lock);
- if (list_empty(&bo_va->vm_status)) {
- /* add a clone of the bo_va to clear the old address */
- struct radeon_bo_va *tmp;
- spin_unlock(&vm->status_lock);
- tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
- if (!tmp) {
- mutex_unlock(&vm->mutex);
- r = -ENOMEM;
- goto error_unreserve;
- }
- tmp->it.start = bo_va->it.start;
- tmp->it.last = bo_va->it.last;
- tmp->vm = vm;
- tmp->bo = radeon_bo_ref(bo_va->bo);
- spin_lock(&vm->status_lock);
- list_add(&tmp->vm_status, &vm->freed);
+ /* add a clone of the bo_va to clear the old address */
+ struct radeon_bo_va *tmp;
+ tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
+ if (!tmp) {
+ mutex_unlock(&vm->mutex);
+ r = -ENOMEM;
+ goto error_unreserve;
}
- spin_unlock(&vm->status_lock);
+ tmp->it.start = bo_va->it.start;
+ tmp->it.last = bo_va->it.last;
+ tmp->vm = vm;
+ tmp->bo = radeon_bo_ref(bo_va->bo);
interval_tree_remove(&bo_va->it, &vm->va);
+ spin_lock(&vm->status_lock);
bo_va->it.start = 0;
bo_va->it.last = 0;
+ list_del_init(&bo_va->vm_status);
+ list_add(&tmp->vm_status, &vm->freed);
+ spin_unlock(&vm->status_lock);
}
if (soffset || eoffset) {
+ spin_lock(&vm->status_lock);
bo_va->it.start = soffset;
bo_va->it.last = eoffset - 1;
- interval_tree_insert(&bo_va->it, &vm->va);
- spin_lock(&vm->status_lock);
list_add(&bo_va->vm_status, &vm->cleared);
spin_unlock(&vm->status_lock);
+ interval_tree_insert(&bo_va->it, &vm->va);
}
bo_va->flags = flags;
@@ -1158,7 +1155,8 @@
list_for_each_entry(bo_va, &bo->va, bo_list) {
spin_lock(&bo_va->vm->status_lock);
- if (list_empty(&bo_va->vm_status))
+ if (list_empty(&bo_va->vm_status) &&
+ (bo_va->it.start || bo_va->it.last))
list_add(&bo_va->vm_status, &bo_va->vm->invalidated);
spin_unlock(&bo_va->vm->status_lock);
}
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 26388b5..07037e3 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -6466,23 +6466,27 @@
case 1: /* D1 vblank/vline */
switch (src_data) {
case 0: /* D1 vblank */
- if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) {
- if (rdev->irq.crtc_vblank_int[0]) {
- drm_handle_vblank(rdev->ddev, 0);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- }
- if (atomic_read(&rdev->irq.pflip[0]))
- radeon_crtc_handle_vblank(rdev, 0);
- rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D1 vblank\n");
+ if (!(rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ if (rdev->irq.crtc_vblank_int[0]) {
+ drm_handle_vblank(rdev->ddev, 0);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
}
+ if (atomic_read(&rdev->irq.pflip[0]))
+ radeon_crtc_handle_vblank(rdev, 0);
+ rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D1 vblank\n");
+
break;
case 1: /* D1 vline */
- if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D1 vline\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D1 vline\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -6492,23 +6496,27 @@
case 2: /* D2 vblank/vline */
switch (src_data) {
case 0: /* D2 vblank */
- if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
- if (rdev->irq.crtc_vblank_int[1]) {
- drm_handle_vblank(rdev->ddev, 1);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- }
- if (atomic_read(&rdev->irq.pflip[1]))
- radeon_crtc_handle_vblank(rdev, 1);
- rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D2 vblank\n");
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ if (rdev->irq.crtc_vblank_int[1]) {
+ drm_handle_vblank(rdev->ddev, 1);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
}
+ if (atomic_read(&rdev->irq.pflip[1]))
+ radeon_crtc_handle_vblank(rdev, 1);
+ rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D2 vblank\n");
+
break;
case 1: /* D2 vline */
- if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D2 vline\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D2 vline\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -6518,23 +6526,27 @@
case 3: /* D3 vblank/vline */
switch (src_data) {
case 0: /* D3 vblank */
- if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
- if (rdev->irq.crtc_vblank_int[2]) {
- drm_handle_vblank(rdev->ddev, 2);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- }
- if (atomic_read(&rdev->irq.pflip[2]))
- radeon_crtc_handle_vblank(rdev, 2);
- rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D3 vblank\n");
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ if (rdev->irq.crtc_vblank_int[2]) {
+ drm_handle_vblank(rdev->ddev, 2);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
}
+ if (atomic_read(&rdev->irq.pflip[2]))
+ radeon_crtc_handle_vblank(rdev, 2);
+ rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D3 vblank\n");
+
break;
case 1: /* D3 vline */
- if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D3 vline\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D3 vline\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -6544,23 +6556,27 @@
case 4: /* D4 vblank/vline */
switch (src_data) {
case 0: /* D4 vblank */
- if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
- if (rdev->irq.crtc_vblank_int[3]) {
- drm_handle_vblank(rdev->ddev, 3);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- }
- if (atomic_read(&rdev->irq.pflip[3]))
- radeon_crtc_handle_vblank(rdev, 3);
- rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D4 vblank\n");
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ if (rdev->irq.crtc_vblank_int[3]) {
+ drm_handle_vblank(rdev->ddev, 3);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
}
+ if (atomic_read(&rdev->irq.pflip[3]))
+ radeon_crtc_handle_vblank(rdev, 3);
+ rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D4 vblank\n");
+
break;
case 1: /* D4 vline */
- if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D4 vline\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D4 vline\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -6570,23 +6586,27 @@
case 5: /* D5 vblank/vline */
switch (src_data) {
case 0: /* D5 vblank */
- if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
- if (rdev->irq.crtc_vblank_int[4]) {
- drm_handle_vblank(rdev->ddev, 4);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- }
- if (atomic_read(&rdev->irq.pflip[4]))
- radeon_crtc_handle_vblank(rdev, 4);
- rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D5 vblank\n");
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ if (rdev->irq.crtc_vblank_int[4]) {
+ drm_handle_vblank(rdev->ddev, 4);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
}
+ if (atomic_read(&rdev->irq.pflip[4]))
+ radeon_crtc_handle_vblank(rdev, 4);
+ rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D5 vblank\n");
+
break;
case 1: /* D5 vline */
- if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D5 vline\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D5 vline\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -6596,23 +6616,27 @@
case 6: /* D6 vblank/vline */
switch (src_data) {
case 0: /* D6 vblank */
- if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
- if (rdev->irq.crtc_vblank_int[5]) {
- drm_handle_vblank(rdev->ddev, 5);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- }
- if (atomic_read(&rdev->irq.pflip[5]))
- radeon_crtc_handle_vblank(rdev, 5);
- rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D6 vblank\n");
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ if (rdev->irq.crtc_vblank_int[5]) {
+ drm_handle_vblank(rdev->ddev, 5);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
}
+ if (atomic_read(&rdev->irq.pflip[5]))
+ radeon_crtc_handle_vblank(rdev, 5);
+ rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D6 vblank\n");
+
break;
case 1: /* D6 vline */
- if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D6 vline\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D6 vline\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -6632,88 +6656,112 @@
case 42: /* HPD hotplug */
switch (src_data) {
case 0:
- if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD1\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD1\n");
+
break;
case 1:
- if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD2\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD2\n");
+
break;
case 2:
- if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD3\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD3\n");
+
break;
case 3:
- if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD4\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD4\n");
+
break;
case 4:
- if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD5\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD5\n");
+
break;
case 5:
- if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD6\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD6\n");
+
break;
case 6:
- if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_RX_INTERRUPT;
- queue_dp = true;
- DRM_DEBUG("IH: HPD_RX 1\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 1\n");
+
break;
case 7:
- if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;
- queue_dp = true;
- DRM_DEBUG("IH: HPD_RX 2\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 2\n");
+
break;
case 8:
- if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;
- queue_dp = true;
- DRM_DEBUG("IH: HPD_RX 3\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 3\n");
+
break;
case 9:
- if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;
- queue_dp = true;
- DRM_DEBUG("IH: HPD_RX 4\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 4\n");
+
break;
case 10:
- if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;
- queue_dp = true;
- DRM_DEBUG("IH: HPD_RX 5\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 5\n");
+
break;
case 11:
- if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;
- queue_dp = true;
- DRM_DEBUG("IH: HPD_RX 6\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 6\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 35ac237..577d58d 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -633,6 +633,7 @@
config I2C_MT65XX
tristate "MediaTek I2C adapter"
depends on ARCH_MEDIATEK || COMPILE_TEST
+ depends on HAS_DMA
help
This selects the MediaTek(R) Integrated Inter Circuit bus driver
for MT65xx and MT81xx.
diff --git a/drivers/i2c/busses/i2c-jz4780.c b/drivers/i2c/busses/i2c-jz4780.c
index 19b2d68..f325663 100644
--- a/drivers/i2c/busses/i2c-jz4780.c
+++ b/drivers/i2c/busses/i2c-jz4780.c
@@ -764,12 +764,15 @@
if (IS_ERR(i2c->clk))
return PTR_ERR(i2c->clk);
- clk_prepare_enable(i2c->clk);
+ ret = clk_prepare_enable(i2c->clk);
+ if (ret)
+ return ret;
- if (of_property_read_u32(pdev->dev.of_node, "clock-frequency",
- &clk_freq)) {
+ ret = of_property_read_u32(pdev->dev.of_node, "clock-frequency",
+ &clk_freq);
+ if (ret) {
dev_err(&pdev->dev, "clock-frequency not specified in DT");
- return clk_freq;
+ goto err;
}
i2c->speed = clk_freq / 1000;
@@ -790,10 +793,8 @@
i2c->irq = platform_get_irq(pdev, 0);
ret = devm_request_irq(&pdev->dev, i2c->irq, jz4780_i2c_irq, 0,
dev_name(&pdev->dev), i2c);
- if (ret) {
- ret = -ENODEV;
+ if (ret)
goto err;
- }
ret = i2c_add_adapter(&i2c->adap);
if (ret < 0) {
diff --git a/drivers/i2c/busses/i2c-xgene-slimpro.c b/drivers/i2c/busses/i2c-xgene-slimpro.c
index dcca707..1c9cb65 100644
--- a/drivers/i2c/busses/i2c-xgene-slimpro.c
+++ b/drivers/i2c/busses/i2c-xgene-slimpro.c
@@ -419,6 +419,7 @@
rc = i2c_add_adapter(adapter);
if (rc) {
dev_err(&pdev->dev, "Adapter registeration failed\n");
+ mbox_free_channel(ctx->mbox_chan);
return rc;
}
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 069a41f..e6d4935 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -1012,6 +1012,8 @@
*/
void i2c_unregister_device(struct i2c_client *client)
{
+ if (client->dev.of_node)
+ of_node_clear_flag(client->dev.of_node, OF_POPULATED);
device_unregister(&client->dev);
}
EXPORT_SYMBOL_GPL(i2c_unregister_device);
@@ -1320,8 +1322,11 @@
dev_dbg(&adap->dev, "of_i2c: walking child nodes\n");
- for_each_available_child_of_node(adap->dev.of_node, node)
+ for_each_available_child_of_node(adap->dev.of_node, node) {
+ if (of_node_test_and_set_flag(node, OF_POPULATED))
+ continue;
of_i2c_register_device(adap, node);
+ }
}
static int of_dev_node_match(struct device *dev, void *data)
@@ -1853,6 +1858,11 @@
if (adap == NULL)
return NOTIFY_OK; /* not for us */
+ if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) {
+ put_device(&adap->dev);
+ return NOTIFY_OK;
+ }
+
client = of_i2c_register_device(adap, rd->dn);
put_device(&adap->dev);
@@ -1863,6 +1873,10 @@
}
break;
case OF_RECONFIG_CHANGE_REMOVE:
+ /* already depopulated? */
+ if (!of_node_check_flag(rd->dn, OF_POPULATED))
+ return NOTIFY_OK;
+
/* find our device by node */
client = of_find_i2c_device_by_node(rd->dn);
if (client == NULL)
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index 62641f2..5b5f403 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -771,7 +771,7 @@
*/
static void elan_report_contact(struct elan_tp_data *data,
int contact_num, bool contact_valid,
- bool hover_event, u8 *finger_data)
+ u8 *finger_data)
{
struct input_dev *input = data->input;
unsigned int pos_x, pos_y;
@@ -815,9 +815,7 @@
input_mt_report_slot_state(input, MT_TOOL_FINGER, true);
input_report_abs(input, ABS_MT_POSITION_X, pos_x);
input_report_abs(input, ABS_MT_POSITION_Y, data->max_y - pos_y);
- input_report_abs(input, ABS_MT_DISTANCE, hover_event);
- input_report_abs(input, ABS_MT_PRESSURE,
- hover_event ? 0 : scaled_pressure);
+ input_report_abs(input, ABS_MT_PRESSURE, scaled_pressure);
input_report_abs(input, ABS_TOOL_WIDTH, mk_x);
input_report_abs(input, ABS_MT_TOUCH_MAJOR, major);
input_report_abs(input, ABS_MT_TOUCH_MINOR, minor);
@@ -839,14 +837,14 @@
hover_event = hover_info & 0x40;
for (i = 0; i < ETP_MAX_FINGERS; i++) {
contact_valid = tp_info & (1U << (3 + i));
- elan_report_contact(data, i, contact_valid, hover_event,
- finger_data);
+ elan_report_contact(data, i, contact_valid, finger_data);
if (contact_valid)
finger_data += ETP_FINGER_DATA_LEN;
}
input_report_key(input, BTN_LEFT, tp_info & 0x01);
+ input_report_abs(input, ABS_DISTANCE, hover_event != 0);
input_mt_report_pointer_emulation(input, true);
input_sync(input);
}
@@ -922,6 +920,7 @@
input_abs_set_res(input, ABS_Y, data->y_res);
input_set_abs_params(input, ABS_PRESSURE, 0, ETP_MAX_PRESSURE, 0, 0);
input_set_abs_params(input, ABS_TOOL_WIDTH, 0, ETP_FINGER_WIDTH, 0, 0);
+ input_set_abs_params(input, ABS_DISTANCE, 0, 1, 0, 0);
/* And MT parameters */
input_set_abs_params(input, ABS_MT_POSITION_X, 0, data->max_x, 0, 0);
@@ -934,7 +933,6 @@
ETP_FINGER_WIDTH * max_width, 0, 0);
input_set_abs_params(input, ABS_MT_TOUCH_MINOR, 0,
ETP_FINGER_WIDTH * min_width, 0, 0);
- input_set_abs_params(input, ABS_MT_DISTANCE, 0, 1, 0, 0);
data->input = input;
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 35c8d0c..3a32caf 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -1199,7 +1199,7 @@
ABS_MT_POSITION_Y);
/* Image sensors can report per-contact pressure */
input_set_abs_params(dev, ABS_MT_PRESSURE, 0, 255, 0, 0);
- input_mt_init_slots(dev, 3, INPUT_MT_POINTER | INPUT_MT_TRACK);
+ input_mt_init_slots(dev, 2, INPUT_MT_POINTER | INPUT_MT_TRACK);
/* Image sensors can signal 4 and 5 finger clicks */
__set_bit(BTN_TOOL_QUADTAP, dev->keybit);
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 8d7e1c8..4dd8826 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -1055,7 +1055,7 @@
processor = (struct acpi_madt_generic_interrupt *)header;
- if (BAD_MADT_ENTRY(processor, end))
+ if (BAD_MADT_GICC_ENTRY(processor, end))
return -EINVAL;
/*
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index 4400edd..b7d54d4 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -257,16 +257,6 @@
return MIPS_CPU_IRQ_BASE + cp0_fdc_irq;
}
- /*
- * Some cores claim the FDC is routable but it doesn't actually seem to
- * be connected.
- */
- switch (current_cpu_type()) {
- case CPU_INTERAPTIV:
- case CPU_PROAPTIV:
- return -1;
- }
-
return irq_create_mapping(gic_irq_domain,
GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_FDC));
}
diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
index 8911e51..3a27a84 100644
--- a/drivers/memory/omap-gpmc.c
+++ b/drivers/memory/omap-gpmc.c
@@ -2074,14 +2074,8 @@
ret = gpmc_probe_nand_child(pdev, child);
else if (of_node_cmp(child->name, "onenand") == 0)
ret = gpmc_probe_onenand_child(pdev, child);
- else if (of_node_cmp(child->name, "ethernet") == 0 ||
- of_node_cmp(child->name, "nor") == 0 ||
- of_node_cmp(child->name, "uart") == 0)
+ else
ret = gpmc_probe_generic_child(pdev, child);
-
- if (WARN(ret < 0, "%s: probing gpmc child %s failed\n",
- __func__, child->full_name))
- of_node_put(child);
}
return 0;
diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c
index 0c77240..729e085 100644
--- a/drivers/misc/cxl/api.c
+++ b/drivers/misc/cxl/api.c
@@ -23,6 +23,7 @@
afu = cxl_pci_to_afu(dev);
+ get_device(&afu->dev);
ctx = cxl_context_alloc();
if (IS_ERR(ctx))
return ctx;
@@ -31,6 +32,7 @@
rc = cxl_context_init(ctx, afu, false, NULL);
if (rc) {
kfree(ctx);
+ put_device(&afu->dev);
return ERR_PTR(-ENOMEM);
}
cxl_assign_psn_space(ctx);
@@ -60,6 +62,8 @@
if (ctx->status != CLOSED)
return -EBUSY;
+ put_device(&ctx->afu->dev);
+
cxl_context_free(ctx);
return 0;
@@ -159,7 +163,6 @@
}
ctx->status = STARTED;
- get_device(&ctx->afu->dev);
out:
mutex_unlock(&ctx->status_mutex);
return rc;
@@ -175,12 +178,7 @@
/* Stop a context. Returns 0 on success, otherwise -Errno */
int cxl_stop_context(struct cxl_context *ctx)
{
- int rc;
-
- rc = __detach_context(ctx);
- if (!rc)
- put_device(&ctx->afu->dev);
- return rc;
+ return __detach_context(ctx);
}
EXPORT_SYMBOL_GPL(cxl_stop_context);
diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c
index 2a4c80a..1287148 100644
--- a/drivers/misc/cxl/context.c
+++ b/drivers/misc/cxl/context.c
@@ -113,11 +113,11 @@
if (ctx->afu->current_mode == CXL_MODE_DEDICATED) {
area = ctx->afu->psn_phys;
- if (offset > ctx->afu->adapter->ps_size)
+ if (offset >= ctx->afu->adapter->ps_size)
return VM_FAULT_SIGBUS;
} else {
area = ctx->psn_phys;
- if (offset > ctx->psn_size)
+ if (offset >= ctx->psn_size)
return VM_FAULT_SIGBUS;
}
@@ -145,8 +145,16 @@
*/
int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma)
{
+ u64 start = vma->vm_pgoff << PAGE_SHIFT;
u64 len = vma->vm_end - vma->vm_start;
- len = min(len, ctx->psn_size);
+
+ if (ctx->afu->current_mode == CXL_MODE_DEDICATED) {
+ if (start + len > ctx->afu->adapter->ps_size)
+ return -EINVAL;
+ } else {
+ if (start + len > ctx->psn_size)
+ return -EINVAL;
+ }
if (ctx->afu->current_mode != CXL_MODE_DEDICATED) {
/* make sure there is a valid per process space for this AFU */
diff --git a/drivers/misc/cxl/main.c b/drivers/misc/cxl/main.c
index 833348e..4a164ab 100644
--- a/drivers/misc/cxl/main.c
+++ b/drivers/misc/cxl/main.c
@@ -73,7 +73,7 @@
spin_lock(&adapter->afu_list_lock);
for (slice = 0; slice < adapter->slices; slice++) {
afu = adapter->afu[slice];
- if (!afu->enabled)
+ if (!afu || !afu->enabled)
continue;
rcu_read_lock();
idr_for_each_entry(&afu->contexts_idr, ctx, id)
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
index c68ef58..32ad097 100644
--- a/drivers/misc/cxl/pci.c
+++ b/drivers/misc/cxl/pci.c
@@ -539,7 +539,7 @@
static void cxl_unmap_slice_regs(struct cxl_afu *afu)
{
- if (afu->p1n_mmio)
+ if (afu->p2n_mmio)
iounmap(afu->p2n_mmio);
if (afu->p1n_mmio)
iounmap(afu->p1n_mmio);
diff --git a/drivers/misc/cxl/vphb.c b/drivers/misc/cxl/vphb.c
index b1d1983a..2eba002 100644
--- a/drivers/misc/cxl/vphb.c
+++ b/drivers/misc/cxl/vphb.c
@@ -112,9 +112,10 @@
unsigned long addr;
phb = pci_bus_to_host(bus);
- afu = (struct cxl_afu *)phb->private_data;
if (phb == NULL)
return PCIBIOS_DEVICE_NOT_FOUND;
+ afu = (struct cxl_afu *)phb->private_data;
+
if (cxl_pcie_cfg_record(bus->number, devfn) > afu->crs_num)
return PCIBIOS_DEVICE_NOT_FOUND;
if (offset >= (unsigned long)phb->cfg_data)
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index 357b6ae..458aa5a 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -552,22 +552,6 @@
schedule_work(&device->event_work);
}
-void mei_cl_bus_remove_devices(struct mei_device *dev)
-{
- struct mei_cl *cl, *next;
-
- mutex_lock(&dev->device_lock);
- list_for_each_entry_safe(cl, next, &dev->device_list, device_link) {
- if (cl->device)
- mei_cl_remove_device(cl->device);
-
- list_del(&cl->device_link);
- mei_cl_unlink(cl);
- kfree(cl);
- }
- mutex_unlock(&dev->device_lock);
-}
-
int __init mei_cl_bus_init(void)
{
return bus_register(&mei_cl_bus_type);
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index 94514b2..00c3865 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -333,8 +333,6 @@
mei_nfc_host_exit(dev);
- mei_cl_bus_remove_devices(dev);
-
mutex_lock(&dev->device_lock);
mei_wd_stop(dev);
diff --git a/drivers/misc/mei/nfc.c b/drivers/misc/mei/nfc.c
index b983c4e..290ef30 100644
--- a/drivers/misc/mei/nfc.c
+++ b/drivers/misc/mei/nfc.c
@@ -402,11 +402,12 @@
cldev->priv_data = NULL;
- mutex_lock(&dev->device_lock);
/* Need to remove the device here
* since mei_nfc_free will unlink the clients
*/
mei_cl_remove_device(cldev);
+
+ mutex_lock(&dev->device_lock);
mei_nfc_free(ndev);
mutex_unlock(&dev->device_lock);
}
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index 8eb22c0..7e2c43f 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -535,8 +535,6 @@
__func__, dimm_name, cmd_name, i);
return -ENXIO;
}
- if (!access_ok(VERIFY_READ, p + in_len, in_size))
- return -EFAULT;
if (in_len < sizeof(in_env))
copy = min_t(u32, sizeof(in_env) - in_len, in_size);
else
@@ -557,8 +555,6 @@
__func__, dimm_name, cmd_name, i);
return -EFAULT;
}
- if (!access_ok(VERIFY_WRITE, p + in_len + out_len, out_size))
- return -EFAULT;
if (out_len < sizeof(out_env))
copy = min_t(u32, sizeof(out_env) - out_len, out_size);
else
@@ -570,9 +566,6 @@
}
buf_len = out_len + in_len;
- if (!access_ok(VERIFY_WRITE, p, sizeof(buf_len)))
- return -EFAULT;
-
if (buf_len > ND_IOCTL_MAX_BUFLEN) {
dev_dbg(dev, "%s:%s cmd: %s buf_len: %zu > %d\n", __func__,
dimm_name, cmd_name, buf_len,
@@ -706,8 +699,10 @@
nvdimm_major = rc;
nd_class = class_create(THIS_MODULE, "nd");
- if (IS_ERR(nd_class))
+ if (IS_ERR(nd_class)) {
+ rc = PTR_ERR(nd_class);
goto err_class;
+ }
return 0;
diff --git a/drivers/pnp/system.c b/drivers/pnp/system.c
index 515f338..49c1720 100644
--- a/drivers/pnp/system.c
+++ b/drivers/pnp/system.c
@@ -7,7 +7,6 @@
* Bjorn Helgaas <bjorn.helgaas@hp.com>
*/
-#include <linux/acpi.h>
#include <linux/pnp.h>
#include <linux/device.h>
#include <linux/init.h>
@@ -23,41 +22,25 @@
{"", 0}
};
-#ifdef CONFIG_ACPI
-static bool __reserve_range(u64 start, unsigned int length, bool io, char *desc)
-{
- u8 space_id = io ? ACPI_ADR_SPACE_SYSTEM_IO : ACPI_ADR_SPACE_SYSTEM_MEMORY;
- return !acpi_reserve_region(start, length, space_id, IORESOURCE_BUSY, desc);
-}
-#else
-static bool __reserve_range(u64 start, unsigned int length, bool io, char *desc)
-{
- struct resource *res;
-
- res = io ? request_region(start, length, desc) :
- request_mem_region(start, length, desc);
- if (res) {
- res->flags &= ~IORESOURCE_BUSY;
- return true;
- }
- return false;
-}
-#endif
-
static void reserve_range(struct pnp_dev *dev, struct resource *r, int port)
{
char *regionid;
const char *pnpid = dev_name(&dev->dev);
resource_size_t start = r->start, end = r->end;
- bool reserved;
+ struct resource *res;
regionid = kmalloc(16, GFP_KERNEL);
if (!regionid)
return;
snprintf(regionid, 16, "pnp %s", pnpid);
- reserved = __reserve_range(start, end - start + 1, !!port, regionid);
- if (!reserved)
+ if (port)
+ res = request_region(start, end - start + 1, regionid);
+ else
+ res = request_mem_region(start, end - start + 1, regionid);
+ if (res)
+ res->flags &= ~IORESOURCE_BUSY;
+ else
kfree(regionid);
/*
@@ -66,7 +49,7 @@
* have double reservations.
*/
dev_info(&dev->dev, "%pR %s reserved\n", r,
- reserved ? "has been" : "could not be");
+ res ? "has been" : "could not be");
}
static void reserve_resources_of_dev(struct pnp_dev *dev)
diff --git a/drivers/video/fbdev/stifb.c b/drivers/video/fbdev/stifb.c
index 86621fa..735355b 100644
--- a/drivers/video/fbdev/stifb.c
+++ b/drivers/video/fbdev/stifb.c
@@ -121,6 +121,7 @@
#define REG_3 0x0004a0
#define REG_4 0x000600
#define REG_6 0x000800
+#define REG_7 0x000804
#define REG_8 0x000820
#define REG_9 0x000a04
#define REG_10 0x018000
@@ -135,6 +136,8 @@
#define REG_21 0x200218
#define REG_22 0x0005a0
#define REG_23 0x0005c0
+#define REG_24 0x000808
+#define REG_25 0x000b00
#define REG_26 0x200118
#define REG_27 0x200308
#define REG_32 0x21003c
@@ -429,6 +432,9 @@
#define SET_LENXY_START_RECFILL(fb, lenxy) \
WRITE_WORD(lenxy, fb, REG_9)
+#define SETUP_COPYAREA(fb) \
+ WRITE_BYTE(0, fb, REG_16b1)
+
static void
HYPER_ENABLE_DISABLE_DISPLAY(struct stifb_info *fb, int enable)
{
@@ -1004,6 +1010,36 @@
return 0;
}
+static void
+stifb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
+{
+ struct stifb_info *fb = container_of(info, struct stifb_info, info);
+
+ SETUP_COPYAREA(fb);
+
+ SETUP_HW(fb);
+ if (fb->info.var.bits_per_pixel == 32) {
+ WRITE_WORD(0xBBA0A000, fb, REG_10);
+
+ NGLE_REALLY_SET_IMAGE_PLANEMASK(fb, 0xffffffff);
+ } else {
+ WRITE_WORD(fb->id == S9000_ID_HCRX ? 0x13a02000 : 0x13a01000, fb, REG_10);
+
+ NGLE_REALLY_SET_IMAGE_PLANEMASK(fb, 0xff);
+ }
+
+ NGLE_QUICK_SET_IMAGE_BITMAP_OP(fb,
+ IBOvals(RopSrc, MaskAddrOffset(0),
+ BitmapExtent08, StaticReg(1),
+ DataDynamic, MaskOtc, BGx(0), FGx(0)));
+
+ WRITE_WORD(((area->sx << 16) | area->sy), fb, REG_24);
+ WRITE_WORD(((area->width << 16) | area->height), fb, REG_7);
+ WRITE_WORD(((area->dx << 16) | area->dy), fb, REG_25);
+
+ SETUP_FB(fb);
+}
+
static void __init
stifb_init_display(struct stifb_info *fb)
{
@@ -1069,7 +1105,7 @@
.fb_setcolreg = stifb_setcolreg,
.fb_blank = stifb_blank,
.fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
+ .fb_copyarea = stifb_copyarea,
.fb_imageblit = cfb_imageblit,
};
@@ -1258,7 +1294,7 @@
info->fbops = &stifb_ops;
info->screen_base = ioremap_nocache(REGION_BASE(fb,1), fix->smem_len);
info->screen_size = fix->smem_len;
- info->flags = FBINFO_DEFAULT;
+ info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA;
info->pseudo_palette = &fb->pseudo_palette;
/* This has to be done !!! */
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 510040b..b1dc518 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -540,8 +540,7 @@
unlock_new_inode(inode);
return inode;
error:
- unlock_new_inode(inode);
- iput(inode);
+ iget_failed(inode);
return ERR_PTR(retval);
}
diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
index 09e44337..e8aa57d 100644
--- a/fs/9p/vfs_inode_dotl.c
+++ b/fs/9p/vfs_inode_dotl.c
@@ -149,8 +149,7 @@
unlock_new_inode(inode);
return inode;
error:
- unlock_new_inode(inode);
- iput(inode);
+ iget_failed(inode);
return ERR_PTR(retval);
}
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index 0ef5cc1..81220b2 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -44,6 +44,8 @@
#define BTRFS_INODE_IN_DELALLOC_LIST 9
#define BTRFS_INODE_READDIO_NEED_LOCK 10
#define BTRFS_INODE_HAS_PROPS 11
+/* DIO is ready to submit */
+#define BTRFS_INODE_DIO_READY 12
/*
* The following 3 bits are meant only for the btree inode.
* When any of them is set, it means an error happened while writing an
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 80a9aef..aac314e 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -1778,6 +1778,7 @@
spinlock_t unused_bgs_lock;
struct list_head unused_bgs;
struct mutex unused_bg_unpin_mutex;
+ struct mutex delete_unused_bgs_mutex;
/* For btrfs to record security options */
struct security_mnt_opts security_opts;
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 3f43bfea..a9aadb2 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1751,6 +1751,7 @@
{
struct btrfs_root *root = arg;
int again;
+ struct btrfs_trans_handle *trans;
do {
again = 0;
@@ -1772,7 +1773,6 @@
}
btrfs_run_delayed_iputs(root);
- btrfs_delete_unused_bgs(root->fs_info);
again = btrfs_clean_one_deleted_snapshot(root);
mutex_unlock(&root->fs_info->cleaner_mutex);
@@ -1781,6 +1781,16 @@
* needn't do anything special here.
*/
btrfs_run_defrag_inodes(root->fs_info);
+
+ /*
+ * Acquires fs_info->delete_unused_bgs_mutex to avoid racing
+ * with relocation (btrfs_relocate_chunk) and relocation
+ * acquires fs_info->cleaner_mutex (btrfs_relocate_block_group)
+ * after acquiring fs_info->delete_unused_bgs_mutex. So we
+ * can't hold, nor need to, fs_info->cleaner_mutex when deleting
+ * unused block groups.
+ */
+ btrfs_delete_unused_bgs(root->fs_info);
sleep:
if (!try_to_freeze() && !again) {
set_current_state(TASK_INTERRUPTIBLE);
@@ -1789,6 +1799,34 @@
__set_current_state(TASK_RUNNING);
}
} while (!kthread_should_stop());
+
+ /*
+ * Transaction kthread is stopped before us and wakes us up.
+ * However we might have started a new transaction and COWed some
+ * tree blocks when deleting unused block groups for example. So
+ * make sure we commit the transaction we started to have a clean
+ * shutdown when evicting the btree inode - if it has dirty pages
+ * when we do the final iput() on it, eviction will trigger a
+ * writeback for it which will fail with null pointer dereferences
+ * since work queues and other resources were already released and
+ * destroyed by the time the iput/eviction/writeback is made.
+ */
+ trans = btrfs_attach_transaction(root);
+ if (IS_ERR(trans)) {
+ if (PTR_ERR(trans) != -ENOENT)
+ btrfs_err(root->fs_info,
+ "cleaner transaction attach returned %ld",
+ PTR_ERR(trans));
+ } else {
+ int ret;
+
+ ret = btrfs_commit_transaction(trans, root);
+ if (ret)
+ btrfs_err(root->fs_info,
+ "cleaner open transaction commit returned %d",
+ ret);
+ }
+
return 0;
}
@@ -2492,6 +2530,7 @@
spin_lock_init(&fs_info->unused_bgs_lock);
rwlock_init(&fs_info->tree_mod_log_lock);
mutex_init(&fs_info->unused_bg_unpin_mutex);
+ mutex_init(&fs_info->delete_unused_bgs_mutex);
mutex_init(&fs_info->reloc_mutex);
mutex_init(&fs_info->delalloc_root_mutex);
seqlock_init(&fs_info->profiles_lock);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 38b76cc..1c2bd17 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -9889,6 +9889,8 @@
}
spin_unlock(&fs_info->unused_bgs_lock);
+ mutex_lock(&root->fs_info->delete_unused_bgs_mutex);
+
/* Don't want to race with allocators so take the groups_sem */
down_write(&space_info->groups_sem);
spin_lock(&block_group->lock);
@@ -9983,6 +9985,7 @@
end_trans:
btrfs_end_transaction(trans, root);
next:
+ mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
btrfs_put_block_group(block_group);
spin_lock(&fs_info->unused_bgs_lock);
}
diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c
index f6a596d..d4a582a 100644
--- a/fs/btrfs/inode-map.c
+++ b/fs/btrfs/inode-map.c
@@ -246,6 +246,7 @@
{
struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
struct rb_root *rbroot = &root->free_ino_pinned->free_space_offset;
+ spinlock_t *rbroot_lock = &root->free_ino_pinned->tree_lock;
struct btrfs_free_space *info;
struct rb_node *n;
u64 count;
@@ -254,24 +255,30 @@
return;
while (1) {
+ bool add_to_ctl = true;
+
+ spin_lock(rbroot_lock);
n = rb_first(rbroot);
- if (!n)
+ if (!n) {
+ spin_unlock(rbroot_lock);
break;
+ }
info = rb_entry(n, struct btrfs_free_space, offset_index);
BUG_ON(info->bitmap); /* Logic error */
if (info->offset > root->ino_cache_progress)
- goto free;
+ add_to_ctl = false;
else if (info->offset + info->bytes > root->ino_cache_progress)
count = root->ino_cache_progress - info->offset + 1;
else
count = info->bytes;
- __btrfs_add_free_space(ctl, info->offset, count);
-free:
rb_erase(&info->offset_index, rbroot);
- kfree(info);
+ spin_unlock(rbroot_lock);
+ if (add_to_ctl)
+ __btrfs_add_free_space(ctl, info->offset, count);
+ kmem_cache_free(btrfs_free_space_cachep, info);
}
}
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 855935f..b33c0cf 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -4989,8 +4989,9 @@
/*
* Keep looping until we have no more ranges in the io tree.
* We can have ongoing bios started by readpages (called from readahead)
- * that didn't get their end io callbacks called yet or they are still
- * in progress ((extent_io.c:end_bio_extent_readpage()). This means some
+ * that have their endio callback (extent_io.c:end_bio_extent_readpage)
+ * still in progress (unlocked the pages in the bio but did not yet
+ * unlocked the ranges in the io tree). Therefore this means some
* ranges can still be locked and eviction started because before
* submitting those bios, which are executed by a separate task (work
* queue kthread), inode references (inode->i_count) were not taken
@@ -7546,6 +7547,7 @@
current->journal_info = outstanding_extents;
btrfs_free_reserved_data_space(inode, len);
+ set_bit(BTRFS_INODE_DIO_READY, &BTRFS_I(inode)->runtime_flags);
}
/*
@@ -7871,8 +7873,6 @@
struct bio *dio_bio;
int ret;
- if (err)
- goto out_done;
again:
ret = btrfs_dec_test_first_ordered_pending(inode, &ordered,
&ordered_offset,
@@ -7895,7 +7895,6 @@
ordered = NULL;
goto again;
}
-out_done:
dio_bio = dip->dio_bio;
kfree(dip);
@@ -8163,9 +8162,8 @@
static void btrfs_submit_direct(int rw, struct bio *dio_bio,
struct inode *inode, loff_t file_offset)
{
- struct btrfs_root *root = BTRFS_I(inode)->root;
- struct btrfs_dio_private *dip;
- struct bio *io_bio;
+ struct btrfs_dio_private *dip = NULL;
+ struct bio *io_bio = NULL;
struct btrfs_io_bio *btrfs_bio;
int skip_sum;
int write = rw & REQ_WRITE;
@@ -8182,7 +8180,7 @@
dip = kzalloc(sizeof(*dip), GFP_NOFS);
if (!dip) {
ret = -ENOMEM;
- goto free_io_bio;
+ goto free_ordered;
}
dip->private = dio_bio->bi_private;
@@ -8210,25 +8208,55 @@
if (btrfs_bio->end_io)
btrfs_bio->end_io(btrfs_bio, ret);
-free_io_bio:
- bio_put(io_bio);
free_ordered:
/*
- * If this is a write, we need to clean up the reserved space and kill
- * the ordered extent.
+ * If we arrived here it means either we failed to submit the dip
+ * or we either failed to clone the dio_bio or failed to allocate the
+ * dip. If we cloned the dio_bio and allocated the dip, we can just
+ * call bio_endio against our io_bio so that we get proper resource
+ * cleanup if we fail to submit the dip, otherwise, we must do the
+ * same as btrfs_endio_direct_[write|read] because we can't call these
+ * callbacks - they require an allocated dip and a clone of dio_bio.
*/
- if (write) {
- struct btrfs_ordered_extent *ordered;
- ordered = btrfs_lookup_ordered_extent(inode, file_offset);
- if (!test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags) &&
- !test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags))
- btrfs_free_reserved_extent(root, ordered->start,
- ordered->disk_len, 1);
- btrfs_put_ordered_extent(ordered);
- btrfs_put_ordered_extent(ordered);
+ if (io_bio && dip) {
+ bio_endio(io_bio, ret);
+ /*
+ * The end io callbacks free our dip, do the final put on io_bio
+ * and all the cleanup and final put for dio_bio (through
+ * dio_end_io()).
+ */
+ dip = NULL;
+ io_bio = NULL;
+ } else {
+ if (write) {
+ struct btrfs_ordered_extent *ordered;
+
+ ordered = btrfs_lookup_ordered_extent(inode,
+ file_offset);
+ set_bit(BTRFS_ORDERED_IOERR, &ordered->flags);
+ /*
+ * Decrements our ref on the ordered extent and removes
+ * the ordered extent from the inode's ordered tree,
+ * doing all the proper resource cleanup such as for the
+ * reserved space and waking up any waiters for this
+ * ordered extent (through btrfs_remove_ordered_extent).
+ */
+ btrfs_finish_ordered_io(ordered);
+ } else {
+ unlock_extent(&BTRFS_I(inode)->io_tree, file_offset,
+ file_offset + dio_bio->bi_iter.bi_size - 1);
+ }
+ clear_bit(BIO_UPTODATE, &dio_bio->bi_flags);
+ /*
+ * Releases and cleans up our dio_bio, no need to bio_put()
+ * nor bio_endio()/bio_io_error() against dio_bio.
+ */
+ dio_end_io(dio_bio, ret);
}
- bio_endio(dio_bio, ret);
+ if (io_bio)
+ bio_put(io_bio);
+ kfree(dip);
}
static ssize_t check_direct_IO(struct btrfs_root *root, struct kiocb *iocb,
@@ -8330,9 +8358,18 @@
btrfs_submit_direct, flags);
if (iov_iter_rw(iter) == WRITE) {
current->journal_info = NULL;
- if (ret < 0 && ret != -EIOCBQUEUED)
- btrfs_delalloc_release_space(inode, count);
- else if (ret >= 0 && (size_t)ret < count)
+ if (ret < 0 && ret != -EIOCBQUEUED) {
+ /*
+ * If the error comes from submitting stage,
+ * btrfs_get_blocsk_direct() has free'd data space,
+ * and metadata space will be handled by
+ * finish_ordered_fn, don't do that again to make
+ * sure bytes_may_use is correct.
+ */
+ if (!test_and_clear_bit(BTRFS_INODE_DIO_READY,
+ &BTRFS_I(inode)->runtime_flags))
+ btrfs_delalloc_release_space(inode, count);
+ } else if (ret >= 0 && (size_t)ret < count)
btrfs_delalloc_release_space(inode,
count - (size_t)ret);
}
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index c86b835..5d91776 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -87,7 +87,8 @@
static int btrfs_clone(struct inode *src, struct inode *inode,
- u64 off, u64 olen, u64 olen_aligned, u64 destoff);
+ u64 off, u64 olen, u64 olen_aligned, u64 destoff,
+ int no_time_update);
/* Mask out flags that are inappropriate for the given type of inode. */
static inline __u32 btrfs_mask_flags(umode_t mode, __u32 flags)
@@ -2765,14 +2766,11 @@
return ret;
}
-static struct page *extent_same_get_page(struct inode *inode, u64 off)
+static struct page *extent_same_get_page(struct inode *inode, pgoff_t index)
{
struct page *page;
- pgoff_t index;
struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
- index = off >> PAGE_CACHE_SHIFT;
-
page = grab_cache_page(inode->i_mapping, index);
if (!page)
return NULL;
@@ -2793,6 +2791,20 @@
return page;
}
+static int gather_extent_pages(struct inode *inode, struct page **pages,
+ int num_pages, u64 off)
+{
+ int i;
+ pgoff_t index = off >> PAGE_CACHE_SHIFT;
+
+ for (i = 0; i < num_pages; i++) {
+ pages[i] = extent_same_get_page(inode, index + i);
+ if (!pages[i])
+ return -ENOMEM;
+ }
+ return 0;
+}
+
static inline void lock_extent_range(struct inode *inode, u64 off, u64 len)
{
/* do any pending delalloc/csum calc on src, one way or
@@ -2818,52 +2830,120 @@
}
}
-static void btrfs_double_unlock(struct inode *inode1, u64 loff1,
- struct inode *inode2, u64 loff2, u64 len)
+static void btrfs_double_inode_unlock(struct inode *inode1, struct inode *inode2)
{
- unlock_extent(&BTRFS_I(inode1)->io_tree, loff1, loff1 + len - 1);
- unlock_extent(&BTRFS_I(inode2)->io_tree, loff2, loff2 + len - 1);
-
mutex_unlock(&inode1->i_mutex);
mutex_unlock(&inode2->i_mutex);
}
-static void btrfs_double_lock(struct inode *inode1, u64 loff1,
- struct inode *inode2, u64 loff2, u64 len)
+static void btrfs_double_inode_lock(struct inode *inode1, struct inode *inode2)
+{
+ if (inode1 < inode2)
+ swap(inode1, inode2);
+
+ mutex_lock_nested(&inode1->i_mutex, I_MUTEX_PARENT);
+ if (inode1 != inode2)
+ mutex_lock_nested(&inode2->i_mutex, I_MUTEX_CHILD);
+}
+
+static void btrfs_double_extent_unlock(struct inode *inode1, u64 loff1,
+ struct inode *inode2, u64 loff2, u64 len)
+{
+ unlock_extent(&BTRFS_I(inode1)->io_tree, loff1, loff1 + len - 1);
+ unlock_extent(&BTRFS_I(inode2)->io_tree, loff2, loff2 + len - 1);
+}
+
+static void btrfs_double_extent_lock(struct inode *inode1, u64 loff1,
+ struct inode *inode2, u64 loff2, u64 len)
{
if (inode1 < inode2) {
swap(inode1, inode2);
swap(loff1, loff2);
}
-
- mutex_lock_nested(&inode1->i_mutex, I_MUTEX_PARENT);
lock_extent_range(inode1, loff1, len);
- if (inode1 != inode2) {
- mutex_lock_nested(&inode2->i_mutex, I_MUTEX_CHILD);
+ if (inode1 != inode2)
lock_extent_range(inode2, loff2, len);
+}
+
+struct cmp_pages {
+ int num_pages;
+ struct page **src_pages;
+ struct page **dst_pages;
+};
+
+static void btrfs_cmp_data_free(struct cmp_pages *cmp)
+{
+ int i;
+ struct page *pg;
+
+ for (i = 0; i < cmp->num_pages; i++) {
+ pg = cmp->src_pages[i];
+ if (pg)
+ page_cache_release(pg);
+ pg = cmp->dst_pages[i];
+ if (pg)
+ page_cache_release(pg);
}
+ kfree(cmp->src_pages);
+ kfree(cmp->dst_pages);
+}
+
+static int btrfs_cmp_data_prepare(struct inode *src, u64 loff,
+ struct inode *dst, u64 dst_loff,
+ u64 len, struct cmp_pages *cmp)
+{
+ int ret;
+ int num_pages = PAGE_CACHE_ALIGN(len) >> PAGE_CACHE_SHIFT;
+ struct page **src_pgarr, **dst_pgarr;
+
+ /*
+ * We must gather up all the pages before we initiate our
+ * extent locking. We use an array for the page pointers. Size
+ * of the array is bounded by len, which is in turn bounded by
+ * BTRFS_MAX_DEDUPE_LEN.
+ */
+ src_pgarr = kzalloc(num_pages * sizeof(struct page *), GFP_NOFS);
+ dst_pgarr = kzalloc(num_pages * sizeof(struct page *), GFP_NOFS);
+ if (!src_pgarr || !dst_pgarr) {
+ kfree(src_pgarr);
+ kfree(dst_pgarr);
+ return -ENOMEM;
+ }
+ cmp->num_pages = num_pages;
+ cmp->src_pages = src_pgarr;
+ cmp->dst_pages = dst_pgarr;
+
+ ret = gather_extent_pages(src, cmp->src_pages, cmp->num_pages, loff);
+ if (ret)
+ goto out;
+
+ ret = gather_extent_pages(dst, cmp->dst_pages, cmp->num_pages, dst_loff);
+
+out:
+ if (ret)
+ btrfs_cmp_data_free(cmp);
+ return 0;
}
static int btrfs_cmp_data(struct inode *src, u64 loff, struct inode *dst,
- u64 dst_loff, u64 len)
+ u64 dst_loff, u64 len, struct cmp_pages *cmp)
{
int ret = 0;
+ int i;
struct page *src_page, *dst_page;
unsigned int cmp_len = PAGE_CACHE_SIZE;
void *addr, *dst_addr;
+ i = 0;
while (len) {
if (len < PAGE_CACHE_SIZE)
cmp_len = len;
- src_page = extent_same_get_page(src, loff);
- if (!src_page)
- return -EINVAL;
- dst_page = extent_same_get_page(dst, dst_loff);
- if (!dst_page) {
- page_cache_release(src_page);
- return -EINVAL;
- }
+ BUG_ON(i >= cmp->num_pages);
+
+ src_page = cmp->src_pages[i];
+ dst_page = cmp->dst_pages[i];
+
addr = kmap_atomic(src_page);
dst_addr = kmap_atomic(dst_page);
@@ -2875,15 +2955,12 @@
kunmap_atomic(addr);
kunmap_atomic(dst_addr);
- page_cache_release(src_page);
- page_cache_release(dst_page);
if (ret)
break;
- loff += cmp_len;
- dst_loff += cmp_len;
len -= cmp_len;
+ i++;
}
return ret;
@@ -2914,27 +2991,62 @@
{
int ret;
u64 len = olen;
+ struct cmp_pages cmp;
+ int same_inode = 0;
+ u64 same_lock_start = 0;
+ u64 same_lock_len = 0;
- /*
- * btrfs_clone() can't handle extents in the same file
- * yet. Once that works, we can drop this check and replace it
- * with a check for the same inode, but overlapping extents.
- */
if (src == dst)
- return -EINVAL;
+ same_inode = 1;
if (len == 0)
return 0;
- btrfs_double_lock(src, loff, dst, dst_loff, len);
+ if (same_inode) {
+ mutex_lock(&src->i_mutex);
- ret = extent_same_check_offsets(src, loff, &len, olen);
- if (ret)
- goto out_unlock;
+ ret = extent_same_check_offsets(src, loff, &len, olen);
+ if (ret)
+ goto out_unlock;
- ret = extent_same_check_offsets(dst, dst_loff, &len, olen);
- if (ret)
- goto out_unlock;
+ /*
+ * Single inode case wants the same checks, except we
+ * don't want our length pushed out past i_size as
+ * comparing that data range makes no sense.
+ *
+ * extent_same_check_offsets() will do this for an
+ * unaligned length at i_size, so catch it here and
+ * reject the request.
+ *
+ * This effectively means we require aligned extents
+ * for the single-inode case, whereas the other cases
+ * allow an unaligned length so long as it ends at
+ * i_size.
+ */
+ if (len != olen) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ /* Check for overlapping ranges */
+ if (dst_loff + len > loff && dst_loff < loff + len) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ same_lock_start = min_t(u64, loff, dst_loff);
+ same_lock_len = max_t(u64, loff, dst_loff) + len - same_lock_start;
+ } else {
+ btrfs_double_inode_lock(src, dst);
+
+ ret = extent_same_check_offsets(src, loff, &len, olen);
+ if (ret)
+ goto out_unlock;
+
+ ret = extent_same_check_offsets(dst, dst_loff, &len, olen);
+ if (ret)
+ goto out_unlock;
+ }
/* don't make the dst file partly checksummed */
if ((BTRFS_I(src)->flags & BTRFS_INODE_NODATASUM) !=
@@ -2943,12 +3055,32 @@
goto out_unlock;
}
- ret = btrfs_cmp_data(src, loff, dst, dst_loff, len);
- if (ret == 0)
- ret = btrfs_clone(src, dst, loff, olen, len, dst_loff);
+ ret = btrfs_cmp_data_prepare(src, loff, dst, dst_loff, olen, &cmp);
+ if (ret)
+ goto out_unlock;
+ if (same_inode)
+ lock_extent_range(src, same_lock_start, same_lock_len);
+ else
+ btrfs_double_extent_lock(src, loff, dst, dst_loff, len);
+
+ /* pass original length for comparison so we stay within i_size */
+ ret = btrfs_cmp_data(src, loff, dst, dst_loff, olen, &cmp);
+ if (ret == 0)
+ ret = btrfs_clone(src, dst, loff, olen, len, dst_loff, 1);
+
+ if (same_inode)
+ unlock_extent(&BTRFS_I(src)->io_tree, same_lock_start,
+ same_lock_start + same_lock_len - 1);
+ else
+ btrfs_double_extent_unlock(src, loff, dst, dst_loff, len);
+
+ btrfs_cmp_data_free(&cmp);
out_unlock:
- btrfs_double_unlock(src, loff, dst, dst_loff, len);
+ if (same_inode)
+ mutex_unlock(&src->i_mutex);
+ else
+ btrfs_double_inode_unlock(src, dst);
return ret;
}
@@ -3100,13 +3232,15 @@
struct inode *inode,
u64 endoff,
const u64 destoff,
- const u64 olen)
+ const u64 olen,
+ int no_time_update)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
int ret;
inode_inc_iversion(inode);
- inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+ if (!no_time_update)
+ inode->i_mtime = inode->i_ctime = CURRENT_TIME;
/*
* We round up to the block size at eof when determining which
* extents to clone above, but shouldn't round up the file size.
@@ -3191,13 +3325,13 @@
* @inode: Inode to clone to
* @off: Offset within source to start clone from
* @olen: Original length, passed by user, of range to clone
- * @olen_aligned: Block-aligned value of olen, extent_same uses
- * identical values here
+ * @olen_aligned: Block-aligned value of olen
* @destoff: Offset within @inode to start clone
+ * @no_time_update: Whether to update mtime/ctime on the target inode
*/
static int btrfs_clone(struct inode *src, struct inode *inode,
const u64 off, const u64 olen, const u64 olen_aligned,
- const u64 destoff)
+ const u64 destoff, int no_time_update)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_path *path = NULL;
@@ -3521,7 +3655,8 @@
root->sectorsize);
ret = clone_finish_inode_update(trans, inode,
last_dest_end,
- destoff, olen);
+ destoff, olen,
+ no_time_update);
if (ret)
goto out;
if (new_key.offset + datal >= destoff + len)
@@ -3559,7 +3694,7 @@
clone_update_extent_map(inode, trans, NULL, last_dest_end,
destoff + len - last_dest_end);
ret = clone_finish_inode_update(trans, inode, destoff + len,
- destoff, olen);
+ destoff, olen, no_time_update);
}
out:
@@ -3696,7 +3831,7 @@
lock_extent_range(inode, destoff, len);
}
- ret = btrfs_clone(src, inode, off, olen, len, destoff);
+ ret = btrfs_clone(src, inode, off, olen, len, destoff, 0);
if (same_inode) {
u64 lock_start = min_t(u64, off, destoff);
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index 89656d7..52170cf 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -552,6 +552,10 @@
trace_btrfs_ordered_extent_put(entry->inode, entry);
if (atomic_dec_and_test(&entry->refs)) {
+ ASSERT(list_empty(&entry->log_list));
+ ASSERT(list_empty(&entry->trans_list));
+ ASSERT(list_empty(&entry->root_extent_list));
+ ASSERT(RB_EMPTY_NODE(&entry->rb_node));
if (entry->inode)
btrfs_add_delayed_iput(entry->inode);
while (!list_empty(&entry->list)) {
@@ -579,6 +583,7 @@
spin_lock_irq(&tree->lock);
node = &entry->rb_node;
rb_erase(node, &tree->tree);
+ RB_CLEAR_NODE(node);
if (tree->last == node)
tree->last = NULL;
set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index d5f1f03..e9ace09 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -1349,6 +1349,11 @@
struct btrfs_root *quota_root;
struct btrfs_qgroup *qgroup;
int ret = 0;
+ /* Sometimes we would want to clear the limit on this qgroup.
+ * To meet this requirement, we treat the -1 as a special value
+ * which tell kernel to clear the limit on this qgroup.
+ */
+ const u64 CLEAR_VALUE = -1;
mutex_lock(&fs_info->qgroup_ioctl_lock);
quota_root = fs_info->quota_root;
@@ -1364,14 +1369,42 @@
}
spin_lock(&fs_info->qgroup_lock);
- if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_RFER)
- qgroup->max_rfer = limit->max_rfer;
- if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_EXCL)
- qgroup->max_excl = limit->max_excl;
- if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_RFER)
- qgroup->rsv_rfer = limit->rsv_rfer;
- if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_EXCL)
- qgroup->rsv_excl = limit->rsv_excl;
+ if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_RFER) {
+ if (limit->max_rfer == CLEAR_VALUE) {
+ qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_RFER;
+ limit->flags &= ~BTRFS_QGROUP_LIMIT_MAX_RFER;
+ qgroup->max_rfer = 0;
+ } else {
+ qgroup->max_rfer = limit->max_rfer;
+ }
+ }
+ if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) {
+ if (limit->max_excl == CLEAR_VALUE) {
+ qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_EXCL;
+ limit->flags &= ~BTRFS_QGROUP_LIMIT_MAX_EXCL;
+ qgroup->max_excl = 0;
+ } else {
+ qgroup->max_excl = limit->max_excl;
+ }
+ }
+ if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_RFER) {
+ if (limit->rsv_rfer == CLEAR_VALUE) {
+ qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_RFER;
+ limit->flags &= ~BTRFS_QGROUP_LIMIT_RSV_RFER;
+ qgroup->rsv_rfer = 0;
+ } else {
+ qgroup->rsv_rfer = limit->rsv_rfer;
+ }
+ }
+ if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_EXCL) {
+ if (limit->rsv_excl == CLEAR_VALUE) {
+ qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_EXCL;
+ limit->flags &= ~BTRFS_QGROUP_LIMIT_RSV_EXCL;
+ qgroup->rsv_excl = 0;
+ } else {
+ qgroup->rsv_excl = limit->rsv_excl;
+ }
+ }
qgroup->lim_flags |= limit->flags;
spin_unlock(&fs_info->qgroup_lock);
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 827951f..88cbb59 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -4049,7 +4049,7 @@
if (trans && progress && err == -ENOSPC) {
ret = btrfs_force_chunk_alloc(trans, rc->extent_root,
rc->block_group->flags);
- if (ret == 0) {
+ if (ret == 1) {
err = 0;
progress = 0;
goto restart;
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 9f2feab..94db0fa 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -3571,7 +3571,6 @@
static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
int is_dev_replace)
{
- int ret = 0;
unsigned int flags = WQ_FREEZABLE | WQ_UNBOUND;
int max_active = fs_info->thread_pool_size;
@@ -3584,34 +3583,36 @@
fs_info->scrub_workers =
btrfs_alloc_workqueue("btrfs-scrub", flags,
max_active, 4);
- if (!fs_info->scrub_workers) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!fs_info->scrub_workers)
+ goto fail_scrub_workers;
+
fs_info->scrub_wr_completion_workers =
btrfs_alloc_workqueue("btrfs-scrubwrc", flags,
max_active, 2);
- if (!fs_info->scrub_wr_completion_workers) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!fs_info->scrub_wr_completion_workers)
+ goto fail_scrub_wr_completion_workers;
+
fs_info->scrub_nocow_workers =
btrfs_alloc_workqueue("btrfs-scrubnc", flags, 1, 0);
- if (!fs_info->scrub_nocow_workers) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!fs_info->scrub_nocow_workers)
+ goto fail_scrub_nocow_workers;
fs_info->scrub_parity_workers =
btrfs_alloc_workqueue("btrfs-scrubparity", flags,
max_active, 2);
- if (!fs_info->scrub_parity_workers) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!fs_info->scrub_parity_workers)
+ goto fail_scrub_parity_workers;
}
++fs_info->scrub_workers_refcnt;
-out:
- return ret;
+ return 0;
+
+fail_scrub_parity_workers:
+ btrfs_destroy_workqueue(fs_info->scrub_nocow_workers);
+fail_scrub_nocow_workers:
+ btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers);
+fail_scrub_wr_completion_workers:
+ btrfs_destroy_workqueue(fs_info->scrub_workers);
+fail_scrub_workers:
+ return -ENOMEM;
}
static noinline_for_stack void scrub_workers_put(struct btrfs_fs_info *fs_info)
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 1ce80c1..9c45431 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -4117,6 +4117,187 @@
return 0;
}
+/*
+ * At the moment we always log all xattrs. This is to figure out at log replay
+ * time which xattrs must have their deletion replayed. If a xattr is missing
+ * in the log tree and exists in the fs/subvol tree, we delete it. This is
+ * because if a xattr is deleted, the inode is fsynced and a power failure
+ * happens, causing the log to be replayed the next time the fs is mounted,
+ * we want the xattr to not exist anymore (same behaviour as other filesystems
+ * with a journal, ext3/4, xfs, f2fs, etc).
+ */
+static int btrfs_log_all_xattrs(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct inode *inode,
+ struct btrfs_path *path,
+ struct btrfs_path *dst_path)
+{
+ int ret;
+ struct btrfs_key key;
+ const u64 ino = btrfs_ino(inode);
+ int ins_nr = 0;
+ int start_slot = 0;
+
+ key.objectid = ino;
+ key.type = BTRFS_XATTR_ITEM_KEY;
+ key.offset = 0;
+
+ ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+ if (ret < 0)
+ return ret;
+
+ while (true) {
+ int slot = path->slots[0];
+ struct extent_buffer *leaf = path->nodes[0];
+ int nritems = btrfs_header_nritems(leaf);
+
+ if (slot >= nritems) {
+ if (ins_nr > 0) {
+ u64 last_extent = 0;
+
+ ret = copy_items(trans, inode, dst_path, path,
+ &last_extent, start_slot,
+ ins_nr, 1, 0);
+ /* can't be 1, extent items aren't processed */
+ ASSERT(ret <= 0);
+ if (ret < 0)
+ return ret;
+ ins_nr = 0;
+ }
+ ret = btrfs_next_leaf(root, path);
+ if (ret < 0)
+ return ret;
+ else if (ret > 0)
+ break;
+ continue;
+ }
+
+ btrfs_item_key_to_cpu(leaf, &key, slot);
+ if (key.objectid != ino || key.type != BTRFS_XATTR_ITEM_KEY)
+ break;
+
+ if (ins_nr == 0)
+ start_slot = slot;
+ ins_nr++;
+ path->slots[0]++;
+ cond_resched();
+ }
+ if (ins_nr > 0) {
+ u64 last_extent = 0;
+
+ ret = copy_items(trans, inode, dst_path, path,
+ &last_extent, start_slot,
+ ins_nr, 1, 0);
+ /* can't be 1, extent items aren't processed */
+ ASSERT(ret <= 0);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * If the no holes feature is enabled we need to make sure any hole between the
+ * last extent and the i_size of our inode is explicitly marked in the log. This
+ * is to make sure that doing something like:
+ *
+ * 1) create file with 128Kb of data
+ * 2) truncate file to 64Kb
+ * 3) truncate file to 256Kb
+ * 4) fsync file
+ * 5) <crash/power failure>
+ * 6) mount fs and trigger log replay
+ *
+ * Will give us a file with a size of 256Kb, the first 64Kb of data match what
+ * the file had in its first 64Kb of data at step 1 and the last 192Kb of the
+ * file correspond to a hole. The presence of explicit holes in a log tree is
+ * what guarantees that log replay will remove/adjust file extent items in the
+ * fs/subvol tree.
+ *
+ * Here we do not need to care about holes between extents, that is already done
+ * by copy_items(). We also only need to do this in the full sync path, where we
+ * lookup for extents from the fs/subvol tree only. In the fast path case, we
+ * lookup the list of modified extent maps and if any represents a hole, we
+ * insert a corresponding extent representing a hole in the log tree.
+ */
+static int btrfs_log_trailing_hole(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct inode *inode,
+ struct btrfs_path *path)
+{
+ int ret;
+ struct btrfs_key key;
+ u64 hole_start;
+ u64 hole_size;
+ struct extent_buffer *leaf;
+ struct btrfs_root *log = root->log_root;
+ const u64 ino = btrfs_ino(inode);
+ const u64 i_size = i_size_read(inode);
+
+ if (!btrfs_fs_incompat(root->fs_info, NO_HOLES))
+ return 0;
+
+ key.objectid = ino;
+ key.type = BTRFS_EXTENT_DATA_KEY;
+ key.offset = (u64)-1;
+
+ ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+ ASSERT(ret != 0);
+ if (ret < 0)
+ return ret;
+
+ ASSERT(path->slots[0] > 0);
+ path->slots[0]--;
+ leaf = path->nodes[0];
+ btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
+
+ if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY) {
+ /* inode does not have any extents */
+ hole_start = 0;
+ hole_size = i_size;
+ } else {
+ struct btrfs_file_extent_item *extent;
+ u64 len;
+
+ /*
+ * If there's an extent beyond i_size, an explicit hole was
+ * already inserted by copy_items().
+ */
+ if (key.offset >= i_size)
+ return 0;
+
+ extent = btrfs_item_ptr(leaf, path->slots[0],
+ struct btrfs_file_extent_item);
+
+ if (btrfs_file_extent_type(leaf, extent) ==
+ BTRFS_FILE_EXTENT_INLINE) {
+ len = btrfs_file_extent_inline_len(leaf,
+ path->slots[0],
+ extent);
+ ASSERT(len == i_size);
+ return 0;
+ }
+
+ len = btrfs_file_extent_num_bytes(leaf, extent);
+ /* Last extent goes beyond i_size, no need to log a hole. */
+ if (key.offset + len > i_size)
+ return 0;
+ hole_start = key.offset + len;
+ hole_size = i_size - hole_start;
+ }
+ btrfs_release_path(path);
+
+ /* Last extent ends at i_size. */
+ if (hole_size == 0)
+ return 0;
+
+ hole_size = ALIGN(hole_size, root->sectorsize);
+ ret = btrfs_insert_file_extent(trans, log, ino, hole_start, 0, 0,
+ hole_size, 0, hole_size, 0, 0, 0);
+ return ret;
+}
+
/* log a single inode in the tree log.
* At least one parent directory for this inode must exist in the tree
* or be logged already.
@@ -4155,6 +4336,7 @@
u64 ino = btrfs_ino(inode);
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
u64 logged_isize = 0;
+ bool need_log_inode_item = true;
path = btrfs_alloc_path();
if (!path)
@@ -4263,11 +4445,6 @@
} else {
if (inode_only == LOG_INODE_ALL)
fast_search = true;
- ret = log_inode_item(trans, log, dst_path, inode);
- if (ret) {
- err = ret;
- goto out_unlock;
- }
goto log_extents;
}
@@ -4290,6 +4467,28 @@
if (min_key.type > max_key.type)
break;
+ if (min_key.type == BTRFS_INODE_ITEM_KEY)
+ need_log_inode_item = false;
+
+ /* Skip xattrs, we log them later with btrfs_log_all_xattrs() */
+ if (min_key.type == BTRFS_XATTR_ITEM_KEY) {
+ if (ins_nr == 0)
+ goto next_slot;
+ ret = copy_items(trans, inode, dst_path, path,
+ &last_extent, ins_start_slot,
+ ins_nr, inode_only, logged_isize);
+ if (ret < 0) {
+ err = ret;
+ goto out_unlock;
+ }
+ ins_nr = 0;
+ if (ret) {
+ btrfs_release_path(path);
+ continue;
+ }
+ goto next_slot;
+ }
+
src = path->nodes[0];
if (ins_nr && ins_start_slot + ins_nr == path->slots[0]) {
ins_nr++;
@@ -4357,9 +4556,26 @@
ins_nr = 0;
}
+ btrfs_release_path(path);
+ btrfs_release_path(dst_path);
+ err = btrfs_log_all_xattrs(trans, root, inode, path, dst_path);
+ if (err)
+ goto out_unlock;
+ if (max_key.type >= BTRFS_EXTENT_DATA_KEY && !fast_search) {
+ btrfs_release_path(path);
+ btrfs_release_path(dst_path);
+ err = btrfs_log_trailing_hole(trans, root, inode, path);
+ if (err)
+ goto out_unlock;
+ }
log_extents:
btrfs_release_path(path);
btrfs_release_path(dst_path);
+ if (need_log_inode_item) {
+ err = log_inode_item(trans, log, dst_path, inode);
+ if (err)
+ goto out_unlock;
+ }
if (fast_search) {
/*
* Some ordered extents started by fsync might have completed
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 4b438b4..fbe7c10 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -2766,6 +2766,20 @@
root = root->fs_info->chunk_root;
extent_root = root->fs_info->extent_root;
+ /*
+ * Prevent races with automatic removal of unused block groups.
+ * After we relocate and before we remove the chunk with offset
+ * chunk_offset, automatic removal of the block group can kick in,
+ * resulting in a failure when calling btrfs_remove_chunk() below.
+ *
+ * Make sure to acquire this mutex before doing a tree search (dev
+ * or chunk trees) to find chunks. Otherwise the cleaner kthread might
+ * call btrfs_remove_chunk() (through btrfs_delete_unused_bgs()) after
+ * we release the path used to search the chunk/dev tree and before
+ * the current task acquires this mutex and calls us.
+ */
+ ASSERT(mutex_is_locked(&root->fs_info->delete_unused_bgs_mutex));
+
ret = btrfs_can_relocate(extent_root, chunk_offset);
if (ret)
return -ENOSPC;
@@ -2814,13 +2828,18 @@
key.type = BTRFS_CHUNK_ITEM_KEY;
while (1) {
+ mutex_lock(&root->fs_info->delete_unused_bgs_mutex);
ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
- if (ret < 0)
+ if (ret < 0) {
+ mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
goto error;
+ }
BUG_ON(ret == 0); /* Corruption */
ret = btrfs_previous_item(chunk_root, path, key.objectid,
key.type);
+ if (ret)
+ mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
if (ret < 0)
goto error;
if (ret > 0)
@@ -2843,6 +2862,7 @@
else
BUG_ON(ret);
}
+ mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
if (found_key.offset == 0)
break;
@@ -3299,9 +3319,12 @@
goto error;
}
+ mutex_lock(&fs_info->delete_unused_bgs_mutex);
ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
- if (ret < 0)
+ if (ret < 0) {
+ mutex_unlock(&fs_info->delete_unused_bgs_mutex);
goto error;
+ }
/*
* this shouldn't happen, it means the last relocate
@@ -3313,6 +3336,7 @@
ret = btrfs_previous_item(chunk_root, path, 0,
BTRFS_CHUNK_ITEM_KEY);
if (ret) {
+ mutex_unlock(&fs_info->delete_unused_bgs_mutex);
ret = 0;
break;
}
@@ -3321,8 +3345,10 @@
slot = path->slots[0];
btrfs_item_key_to_cpu(leaf, &found_key, slot);
- if (found_key.objectid != key.objectid)
+ if (found_key.objectid != key.objectid) {
+ mutex_unlock(&fs_info->delete_unused_bgs_mutex);
break;
+ }
chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
@@ -3335,10 +3361,13 @@
ret = should_balance_chunk(chunk_root, leaf, chunk,
found_key.offset);
btrfs_release_path(path);
- if (!ret)
+ if (!ret) {
+ mutex_unlock(&fs_info->delete_unused_bgs_mutex);
goto loop;
+ }
if (counting) {
+ mutex_unlock(&fs_info->delete_unused_bgs_mutex);
spin_lock(&fs_info->balance_lock);
bctl->stat.expected++;
spin_unlock(&fs_info->balance_lock);
@@ -3348,6 +3377,7 @@
ret = btrfs_relocate_chunk(chunk_root,
found_key.objectid,
found_key.offset);
+ mutex_unlock(&fs_info->delete_unused_bgs_mutex);
if (ret && ret != -ENOSPC)
goto error;
if (ret == -ENOSPC) {
@@ -4087,11 +4117,16 @@
key.type = BTRFS_DEV_EXTENT_KEY;
do {
+ mutex_lock(&root->fs_info->delete_unused_bgs_mutex);
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
- if (ret < 0)
+ if (ret < 0) {
+ mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
goto done;
+ }
ret = btrfs_previous_item(root, path, 0, key.type);
+ if (ret)
+ mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
if (ret < 0)
goto done;
if (ret) {
@@ -4105,6 +4140,7 @@
btrfs_item_key_to_cpu(l, &key, path->slots[0]);
if (key.objectid != device->devid) {
+ mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
btrfs_release_path(path);
break;
}
@@ -4113,6 +4149,7 @@
length = btrfs_dev_extent_length(l, dev_extent);
if (key.offset + length <= new_size) {
+ mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
btrfs_release_path(path);
break;
}
@@ -4122,6 +4159,7 @@
btrfs_release_path(path);
ret = btrfs_relocate_chunk(root, chunk_objectid, chunk_offset);
+ mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
if (ret && ret != -ENOSPC)
goto done;
if (ret == -ENOSPC)
@@ -5715,7 +5753,6 @@
static void btrfs_end_bio(struct bio *bio, int err)
{
struct btrfs_bio *bbio = bio->bi_private;
- struct btrfs_device *dev = bbio->stripes[0].dev;
int is_orig_bio = 0;
if (err) {
@@ -5723,6 +5760,7 @@
if (err == -EIO || err == -EREMOTEIO) {
unsigned int stripe_index =
btrfs_io_bio(bio)->stripe_index;
+ struct btrfs_device *dev;
BUG_ON(stripe_index >= bbio->num_stripes);
dev = bbio->stripes[stripe_index].dev;
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index 6b8e2f0..48851f6 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -896,6 +896,7 @@
/* 'X' - originally XFS but some now in the VFS */
COMPATIBLE_IOCTL(FIFREEZE)
COMPATIBLE_IOCTL(FITHAW)
+COMPATIBLE_IOCTL(FITRIM)
COMPATIBLE_IOCTL(KDGETKEYCODE)
COMPATIBLE_IOCTL(KDSETKEYCODE)
COMPATIBLE_IOCTL(KDGKBTYPE)
diff --git a/fs/dcache.c b/fs/dcache.c
index 7a3f3e5..5c8ea15 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -642,7 +642,7 @@
/*
* If we have a d_op->d_delete() operation, we sould not
- * let the dentry count go to zero, so use "put__or_lock".
+ * let the dentry count go to zero, so use "put_or_lock".
*/
if (unlikely(dentry->d_flags & DCACHE_OP_DELETE))
return lockref_put_or_lock(&dentry->d_lockref);
@@ -697,7 +697,7 @@
*/
smp_rmb();
d_flags = ACCESS_ONCE(dentry->d_flags);
- d_flags &= DCACHE_REFERENCED | DCACHE_LRU_LIST;
+ d_flags &= DCACHE_REFERENCED | DCACHE_LRU_LIST | DCACHE_DISCONNECTED;
/* Nothing to do? Dropping the reference was all we needed? */
if (d_flags == (DCACHE_REFERENCED | DCACHE_LRU_LIST) && !d_unhashed(dentry))
@@ -776,6 +776,9 @@
if (unlikely(d_unhashed(dentry)))
goto kill_it;
+ if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED))
+ goto kill_it;
+
if (unlikely(dentry->d_flags & DCACHE_OP_DELETE)) {
if (dentry->d_op->d_delete(dentry))
goto kill_it;
diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
index 72afcc6..feef8a9 100644
--- a/fs/ecryptfs/file.c
+++ b/fs/ecryptfs/file.c
@@ -325,7 +325,6 @@
return rc;
switch (cmd) {
- case FITRIM:
case FS_IOC32_GETFLAGS:
case FS_IOC32_SETFLAGS:
case FS_IOC32_GETVERSION:
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index aadb728..2553aa8 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -504,7 +504,7 @@
struct buffer_head *bh;
int err;
- bh = sb_getblk(inode->i_sb, pblk);
+ bh = sb_getblk_gfp(inode->i_sb, pblk, __GFP_MOVABLE | GFP_NOFS);
if (unlikely(!bh))
return ERR_PTR(-ENOMEM);
@@ -1089,7 +1089,7 @@
err = -EIO;
goto cleanup;
}
- bh = sb_getblk(inode->i_sb, newblock);
+ bh = sb_getblk_gfp(inode->i_sb, newblock, __GFP_MOVABLE | GFP_NOFS);
if (unlikely(!bh)) {
err = -ENOMEM;
goto cleanup;
@@ -1283,7 +1283,7 @@
if (newblock == 0)
return err;
- bh = sb_getblk(inode->i_sb, newblock);
+ bh = sb_getblk_gfp(inode->i_sb, newblock, __GFP_MOVABLE | GFP_NOFS);
if (unlikely(!bh))
return -ENOMEM;
lock_buffer(bh);
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 41f8e55..cecf9aa 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -1323,7 +1323,7 @@
unsigned int offset,
unsigned int length)
{
- int to_release = 0;
+ int to_release = 0, contiguous_blks = 0;
struct buffer_head *head, *bh;
unsigned int curr_off = 0;
struct inode *inode = page->mapping->host;
@@ -1344,14 +1344,23 @@
if ((offset <= curr_off) && (buffer_delay(bh))) {
to_release++;
+ contiguous_blks++;
clear_buffer_delay(bh);
+ } else if (contiguous_blks) {
+ lblk = page->index <<
+ (PAGE_CACHE_SHIFT - inode->i_blkbits);
+ lblk += (curr_off >> inode->i_blkbits) -
+ contiguous_blks;
+ ext4_es_remove_extent(inode, lblk, contiguous_blks);
+ contiguous_blks = 0;
}
curr_off = next_off;
} while ((bh = bh->b_this_page) != head);
- if (to_release) {
+ if (contiguous_blks) {
lblk = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
- ext4_es_remove_extent(inode, lblk, to_release);
+ lblk += (curr_off >> inode->i_blkbits) - contiguous_blks;
+ ext4_es_remove_extent(inode, lblk, contiguous_blks);
}
/* If we have released all the blocks belonging to a cluster, then we
@@ -4344,7 +4353,12 @@
int inode_size = EXT4_INODE_SIZE(sb);
oi.orig_ino = orig_ino;
- ino = (orig_ino & ~(inodes_per_block - 1)) + 1;
+ /*
+ * Calculate the first inode in the inode table block. Inode
+ * numbers are one-based. That is, the first inode in a block
+ * (assuming 4k blocks and 256 byte inodes) is (n*16 + 1).
+ */
+ ino = ((orig_ino - 1) & ~(inodes_per_block - 1)) + 1;
for (i = 0; i < inodes_per_block; i++, ino++, buf += inode_size) {
if (ino == orig_ino)
continue;
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index cb84512..1346cfa 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -755,7 +755,6 @@
return err;
}
case EXT4_IOC_MOVE_EXT:
- case FITRIM:
case EXT4_IOC_RESIZE_FS:
case EXT4_IOC_PRECACHE_EXTENTS:
case EXT4_IOC_SET_ENCRYPTION_POLICY:
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index f6aedf8..34b610e 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -4816,18 +4816,12 @@
/*
* blocks being freed are metadata. these blocks shouldn't
* be used until this transaction is committed
+ *
+ * We use __GFP_NOFAIL because ext4_free_blocks() is not allowed
+ * to fail.
*/
- retry:
- new_entry = kmem_cache_alloc(ext4_free_data_cachep, GFP_NOFS);
- if (!new_entry) {
- /*
- * We use a retry loop because
- * ext4_free_blocks() is not allowed to fail.
- */
- cond_resched();
- congestion_wait(BLK_RW_ASYNC, HZ/50);
- goto retry;
- }
+ new_entry = kmem_cache_alloc(ext4_free_data_cachep,
+ GFP_NOFS|__GFP_NOFAIL);
new_entry->efd_start_cluster = bit;
new_entry->efd_group = block_group;
new_entry->efd_count = count_clusters;
diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c
index b52374e..6163ad2 100644
--- a/fs/ext4/migrate.c
+++ b/fs/ext4/migrate.c
@@ -620,6 +620,7 @@
struct ext4_inode_info *ei = EXT4_I(inode);
struct ext4_extent *ex;
unsigned int i, len;
+ ext4_lblk_t start, end;
ext4_fsblk_t blk;
handle_t *handle;
int ret;
@@ -633,6 +634,14 @@
EXT4_FEATURE_RO_COMPAT_BIGALLOC))
return -EOPNOTSUPP;
+ /*
+ * In order to get correct extent info, force all delayed allocation
+ * blocks to be allocated, otherwise delayed allocation blocks may not
+ * be reflected and bypass the checks on extent header.
+ */
+ if (test_opt(inode->i_sb, DELALLOC))
+ ext4_alloc_da_blocks(inode);
+
handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, 1);
if (IS_ERR(handle))
return PTR_ERR(handle);
@@ -650,11 +659,13 @@
goto errout;
}
if (eh->eh_entries == 0)
- blk = len = 0;
+ blk = len = start = end = 0;
else {
len = le16_to_cpu(ex->ee_len);
blk = ext4_ext_pblock(ex);
- if (len > EXT4_NDIR_BLOCKS) {
+ start = le32_to_cpu(ex->ee_block);
+ end = start + len - 1;
+ if (end >= EXT4_NDIR_BLOCKS) {
ret = -EOPNOTSUPP;
goto errout;
}
@@ -662,7 +673,7 @@
ext4_clear_inode_flag(inode, EXT4_INODE_EXTENTS);
memset(ei->i_data, 0, sizeof(ei->i_data));
- for (i=0; i < len; i++)
+ for (i = start; i <= end; i++)
ei->i_data[i] = cpu_to_le32(blk++);
ext4_mark_inode_dirty(handle, inode);
errout:
diff --git a/fs/hpfs/alloc.c b/fs/hpfs/alloc.c
index f005046..d6a4b55 100644
--- a/fs/hpfs/alloc.c
+++ b/fs/hpfs/alloc.c
@@ -484,3 +484,98 @@
a->btree.first_free = cpu_to_le16(8);
return a;
}
+
+static unsigned find_run(__le32 *bmp, unsigned *idx)
+{
+ unsigned len;
+ while (tstbits(bmp, *idx, 1)) {
+ (*idx)++;
+ if (unlikely(*idx >= 0x4000))
+ return 0;
+ }
+ len = 1;
+ while (!tstbits(bmp, *idx + len, 1))
+ len++;
+ return len;
+}
+
+static int do_trim(struct super_block *s, secno start, unsigned len, secno limit_start, secno limit_end, unsigned minlen, unsigned *result)
+{
+ int err;
+ secno end;
+ if (fatal_signal_pending(current))
+ return -EINTR;
+ end = start + len;
+ if (start < limit_start)
+ start = limit_start;
+ if (end > limit_end)
+ end = limit_end;
+ if (start >= end)
+ return 0;
+ if (end - start < minlen)
+ return 0;
+ err = sb_issue_discard(s, start, end - start, GFP_NOFS, 0);
+ if (err)
+ return err;
+ *result += end - start;
+ return 0;
+}
+
+int hpfs_trim_fs(struct super_block *s, u64 start, u64 end, u64 minlen, unsigned *result)
+{
+ int err = 0;
+ struct hpfs_sb_info *sbi = hpfs_sb(s);
+ unsigned idx, len, start_bmp, end_bmp;
+ __le32 *bmp;
+ struct quad_buffer_head qbh;
+
+ *result = 0;
+ if (!end || end > sbi->sb_fs_size)
+ end = sbi->sb_fs_size;
+ if (start >= sbi->sb_fs_size)
+ return 0;
+ if (minlen > 0x4000)
+ return 0;
+ if (start < sbi->sb_dirband_start + sbi->sb_dirband_size && end > sbi->sb_dirband_start) {
+ hpfs_lock(s);
+ if (s->s_flags & MS_RDONLY) {
+ err = -EROFS;
+ goto unlock_1;
+ }
+ if (!(bmp = hpfs_map_dnode_bitmap(s, &qbh))) {
+ err = -EIO;
+ goto unlock_1;
+ }
+ idx = 0;
+ while ((len = find_run(bmp, &idx)) && !err) {
+ err = do_trim(s, sbi->sb_dirband_start + idx * 4, len * 4, start, end, minlen, result);
+ idx += len;
+ }
+ hpfs_brelse4(&qbh);
+unlock_1:
+ hpfs_unlock(s);
+ }
+ start_bmp = start >> 14;
+ end_bmp = (end + 0x3fff) >> 14;
+ while (start_bmp < end_bmp && !err) {
+ hpfs_lock(s);
+ if (s->s_flags & MS_RDONLY) {
+ err = -EROFS;
+ goto unlock_2;
+ }
+ if (!(bmp = hpfs_map_bitmap(s, start_bmp, &qbh, "trim"))) {
+ err = -EIO;
+ goto unlock_2;
+ }
+ idx = 0;
+ while ((len = find_run(bmp, &idx)) && !err) {
+ err = do_trim(s, (start_bmp << 14) + idx, len, start, end, minlen, result);
+ idx += len;
+ }
+ hpfs_brelse4(&qbh);
+unlock_2:
+ hpfs_unlock(s);
+ start_bmp++;
+ }
+ return err;
+}
diff --git a/fs/hpfs/dir.c b/fs/hpfs/dir.c
index 2a8e074..dc540bf 100644
--- a/fs/hpfs/dir.c
+++ b/fs/hpfs/dir.c
@@ -327,4 +327,5 @@
.iterate = hpfs_readdir,
.release = hpfs_dir_release,
.fsync = hpfs_file_fsync,
+ .unlocked_ioctl = hpfs_ioctl,
};
diff --git a/fs/hpfs/file.c b/fs/hpfs/file.c
index 6d8cfe9..7ca28d6 100644
--- a/fs/hpfs/file.c
+++ b/fs/hpfs/file.c
@@ -203,6 +203,7 @@
.release = hpfs_file_release,
.fsync = hpfs_file_fsync,
.splice_read = generic_file_splice_read,
+ .unlocked_ioctl = hpfs_ioctl,
};
const struct inode_operations hpfs_file_iops =
diff --git a/fs/hpfs/hpfs_fn.h b/fs/hpfs/hpfs_fn.h
index bb04b58..c4867b5 100644
--- a/fs/hpfs/hpfs_fn.h
+++ b/fs/hpfs/hpfs_fn.h
@@ -18,6 +18,8 @@
#include <linux/pagemap.h>
#include <linux/buffer_head.h>
#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/blkdev.h>
#include <asm/unaligned.h>
#include "hpfs.h"
@@ -200,6 +202,7 @@
struct dnode *hpfs_alloc_dnode(struct super_block *, secno, dnode_secno *, struct quad_buffer_head *);
struct fnode *hpfs_alloc_fnode(struct super_block *, secno, fnode_secno *, struct buffer_head **);
struct anode *hpfs_alloc_anode(struct super_block *, secno, anode_secno *, struct buffer_head **);
+int hpfs_trim_fs(struct super_block *, u64, u64, u64, unsigned *);
/* anode.c */
@@ -318,6 +321,7 @@
void hpfs_error(struct super_block *, const char *, ...);
int hpfs_stop_cycles(struct super_block *, int, int *, int *, char *);
unsigned hpfs_get_free_dnodes(struct super_block *);
+long hpfs_ioctl(struct file *file, unsigned cmd, unsigned long arg);
/*
* local time (HPFS) to GMT (Unix)
diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c
index 7cd00d3..68a9bed 100644
--- a/fs/hpfs/super.c
+++ b/fs/hpfs/super.c
@@ -52,17 +52,20 @@
}
/* Filesystem error... */
-static char err_buf[1024];
-
void hpfs_error(struct super_block *s, const char *fmt, ...)
{
+ struct va_format vaf;
va_list args;
va_start(args, fmt);
- vsnprintf(err_buf, sizeof(err_buf), fmt, args);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ pr_err("filesystem error: %pV", &vaf);
+
va_end(args);
- pr_err("filesystem error: %s", err_buf);
if (!hpfs_sb(s)->sb_was_error) {
if (hpfs_sb(s)->sb_err == 2) {
pr_cont("; crashing the system because you wanted it\n");
@@ -196,12 +199,39 @@
return 0;
}
+
+long hpfs_ioctl(struct file *file, unsigned cmd, unsigned long arg)
+{
+ switch (cmd) {
+ case FITRIM: {
+ struct fstrim_range range;
+ secno n_trimmed;
+ int r;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ if (copy_from_user(&range, (struct fstrim_range __user *)arg, sizeof(range)))
+ return -EFAULT;
+ r = hpfs_trim_fs(file_inode(file)->i_sb, range.start >> 9, (range.start + range.len) >> 9, (range.minlen + 511) >> 9, &n_trimmed);
+ if (r)
+ return r;
+ range.len = (u64)n_trimmed << 9;
+ if (copy_to_user((struct fstrim_range __user *)arg, &range, sizeof(range)))
+ return -EFAULT;
+ return 0;
+ }
+ default: {
+ return -ENOIOCTLCMD;
+ }
+ }
+}
+
+
static struct kmem_cache * hpfs_inode_cachep;
static struct inode *hpfs_alloc_inode(struct super_block *sb)
{
struct hpfs_inode_info *ei;
- ei = (struct hpfs_inode_info *)kmem_cache_alloc(hpfs_inode_cachep, GFP_NOFS);
+ ei = kmem_cache_alloc(hpfs_inode_cachep, GFP_NOFS);
if (!ei)
return NULL;
ei->vfs_inode.i_version = 1;
@@ -424,11 +454,14 @@
int o;
struct hpfs_sb_info *sbi = hpfs_sb(s);
char *new_opts = kstrdup(data, GFP_KERNEL);
-
+
+ if (!new_opts)
+ return -ENOMEM;
+
sync_filesystem(s);
*flags |= MS_NOATIME;
-
+
hpfs_lock(s);
uid = sbi->sb_uid; gid = sbi->sb_gid;
umask = 0777 & ~sbi->sb_mode;
diff --git a/fs/jfs/ioctl.c b/fs/jfs/ioctl.c
index 93a1232..8db8b7d 100644
--- a/fs/jfs/ioctl.c
+++ b/fs/jfs/ioctl.c
@@ -180,9 +180,6 @@
case JFS_IOC_SETFLAGS32:
cmd = JFS_IOC_SETFLAGS;
break;
- case FITRIM:
- cmd = FITRIM;
- break;
}
return jfs_ioctl(filp, cmd, arg);
}
diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
index 9a20e51..aba4381 100644
--- a/fs/nilfs2/ioctl.c
+++ b/fs/nilfs2/ioctl.c
@@ -1369,7 +1369,6 @@
case NILFS_IOCTL_SYNC:
case NILFS_IOCTL_RESIZE:
case NILFS_IOCTL_SET_ALLOC_RANGE:
- case FITRIM:
break;
default:
return -ENOIOCTLCMD;
diff --git a/fs/ocfs2/ioctl.c b/fs/ocfs2/ioctl.c
index 53e6c40..3cb097c 100644
--- a/fs/ocfs2/ioctl.c
+++ b/fs/ocfs2/ioctl.c
@@ -980,7 +980,6 @@
case OCFS2_IOC_GROUP_EXTEND:
case OCFS2_IOC_GROUP_ADD:
case OCFS2_IOC_GROUP_ADD64:
- case FITRIM:
break;
case OCFS2_IOC_REFLINK:
if (copy_from_user(&args, argp, sizeof(args)))
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
index f140e3db..d9da5a4 100644
--- a/fs/overlayfs/inode.c
+++ b/fs/overlayfs/inode.c
@@ -343,6 +343,9 @@
struct path realpath;
enum ovl_path_type type;
+ if (d_is_dir(dentry))
+ return d_backing_inode(dentry);
+
type = ovl_path_real(dentry, &realpath);
if (ovl_open_need_copy_up(file_flags, type, realpath.dentry)) {
err = ovl_want_write(dentry);
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index c471dfc..d2445fa 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -58,6 +58,19 @@
acpi_fwnode_handle(adev) : NULL)
#define ACPI_HANDLE(dev) acpi_device_handle(ACPI_COMPANION(dev))
+/**
+ * ACPI_DEVICE_CLASS - macro used to describe an ACPI device with
+ * the PCI-defined class-code information
+ *
+ * @_cls : the class, subclass, prog-if triple for this device
+ * @_msk : the class mask for this device
+ *
+ * This macro is used to create a struct acpi_device_id that matches a
+ * specific PCI class. The .id and .driver_data fields will be left
+ * initialized with the default value.
+ */
+#define ACPI_DEVICE_CLASS(_cls, _msk) .cls = (_cls), .cls_msk = (_msk),
+
static inline bool has_acpi_companion(struct device *dev)
{
return is_acpi_node(dev->fwnode);
@@ -309,9 +322,6 @@
int acpi_resources_are_enforced(void);
-int acpi_reserve_region(u64 start, unsigned int length, u8 space_id,
- unsigned long flags, char *desc);
-
#ifdef CONFIG_HIBERNATION
void __init acpi_no_s4_hw_signature(void);
#endif
@@ -446,6 +456,7 @@
#define ACPI_COMPANION(dev) (NULL)
#define ACPI_COMPANION_SET(dev, adev) do { } while (0)
#define ACPI_HANDLE(dev) (NULL)
+#define ACPI_DEVICE_CLASS(_cls, _msk) .cls = (0), .cls_msk = (0),
struct fwnode_handle;
@@ -507,13 +518,6 @@
return 0;
}
-static inline int acpi_reserve_region(u64 start, unsigned int length,
- u8 space_id, unsigned long flags,
- char *desc)
-{
- return -ENXIO;
-}
-
struct acpi_table_header;
static inline int acpi_table_parse(char *id,
int (*handler)(struct acpi_table_header *))
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 73b4522..e6797de 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -317,6 +317,13 @@
return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE);
}
+
+static inline struct buffer_head *
+sb_getblk_gfp(struct super_block *sb, sector_t block, gfp_t gfp)
+{
+ return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, gfp);
+}
+
static inline struct buffer_head *
sb_find_get_block(struct super_block *sb, sector_t block)
{
diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h
index e154994..3775327 100644
--- a/include/linux/ceph/messenger.h
+++ b/include/linux/ceph/messenger.h
@@ -8,6 +8,7 @@
#include <linux/radix-tree.h>
#include <linux/uio.h>
#include <linux/workqueue.h>
+#include <net/net_namespace.h>
#include <linux/ceph/types.h>
#include <linux/ceph/buffer.h>
@@ -56,6 +57,7 @@
struct ceph_entity_addr my_enc_addr;
atomic_t stopping;
+ possible_net_t net;
bool nocrc;
bool tcp_nodelay;
@@ -267,6 +269,7 @@
u64 required_features,
bool nocrc,
bool tcp_nodelay);
+extern void ceph_messenger_fini(struct ceph_messenger *msgr);
extern void ceph_con_init(struct ceph_connection *con, void *private,
const struct ceph_connection_operations *ops,
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 7f8ad95..e08a6ae 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -17,11 +17,11 @@
# define __release(x) __context__(x,-1)
# define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
# define __percpu __attribute__((noderef, address_space(3)))
+# define __pmem __attribute__((noderef, address_space(5)))
#ifdef CONFIG_SPARSE_RCU_POINTER
# define __rcu __attribute__((noderef, address_space(4)))
#else
# define __rcu
-# define __pmem __attribute__((noderef, address_space(5)))
#endif
extern void __chk_user_ptr(const volatile void __user *);
extern void __chk_io_ptr(const volatile void __iomem *);
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index 624a668..fcea4e4 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -87,7 +87,12 @@
const char *name;
} ____cacheline_internodealigned_in_smp;
-#ifndef CONFIG_SPARSE_IRQ
+#ifdef CONFIG_SPARSE_IRQ
+extern void irq_lock_sparse(void);
+extern void irq_unlock_sparse(void);
+#else
+static inline void irq_lock_sparse(void) { }
+static inline void irq_unlock_sparse(void) { }
extern struct irq_desc irq_desc[NR_IRQS];
#endif
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 8183d66..34f25b7 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -189,6 +189,8 @@
struct acpi_device_id {
__u8 id[ACPI_ID_LEN];
kernel_ulong_t driver_data;
+ __u32 cls;
+ __u32 cls_msk;
};
#define PNP_ID_LEN 8
diff --git a/include/linux/rtc/sirfsoc_rtciobrg.h b/include/linux/rtc/sirfsoc_rtciobrg.h
index 2c92e1c..aefd997 100644
--- a/include/linux/rtc/sirfsoc_rtciobrg.h
+++ b/include/linux/rtc/sirfsoc_rtciobrg.h
@@ -9,10 +9,14 @@
#ifndef _SIRFSOC_RTC_IOBRG_H_
#define _SIRFSOC_RTC_IOBRG_H_
+struct regmap_config;
+
extern void sirfsoc_rtc_iobrg_besyncing(void);
extern u32 sirfsoc_rtc_iobrg_readl(u32 addr);
extern void sirfsoc_rtc_iobrg_writel(u32 val, u32 addr);
+struct regmap *devm_regmap_init_iobg(struct device *dev,
+ const struct regmap_config *config);
#endif
diff --git a/include/linux/tick.h b/include/linux/tick.h
index 3741ba1..edbfc9a 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -67,10 +67,13 @@
static inline void tick_broadcast_control(enum tick_broadcast_mode mode) { }
#endif /* BROADCAST */
-#if defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) && defined(CONFIG_TICK_ONESHOT)
+#ifdef CONFIG_GENERIC_CLOCKEVENTS
extern int tick_broadcast_oneshot_control(enum tick_broadcast_state state);
#else
-static inline int tick_broadcast_oneshot_control(enum tick_broadcast_state state) { return 0; }
+static inline int tick_broadcast_oneshot_control(enum tick_broadcast_state state)
+{
+ return 0;
+}
#endif
static inline void tick_broadcast_enable(void)
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h
index 3aa72e6..6e191e4 100644
--- a/include/linux/timekeeping.h
+++ b/include/linux/timekeeping.h
@@ -145,7 +145,6 @@
}
#endif
-#define do_posix_clock_monotonic_gettime(ts) ktime_get_ts(ts)
#define ktime_get_real_ts64(ts) getnstimeofday64(ts)
/*
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index 09c6564..e85bdfd 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -1021,8 +1021,7 @@
* for strings that are too long, we should not have created
* any.
*/
- if (unlikely((len == 0) || len > MAX_ARG_STRLEN - 1)) {
- WARN_ON(1);
+ if (WARN_ON_ONCE(len < 0 || len > MAX_ARG_STRLEN - 1)) {
send_sig(SIGKILL, current, 0);
return -1;
}
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 9c9c9fa..6a37454 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -21,6 +21,7 @@
#include <linux/suspend.h>
#include <linux/lockdep.h>
#include <linux/tick.h>
+#include <linux/irq.h>
#include <trace/events/power.h>
#include "smpboot.h"
@@ -392,13 +393,19 @@
smpboot_park_threads(cpu);
/*
+ * Prevent irq alloc/free while the dying cpu reorganizes the
+ * interrupt affinities.
+ */
+ irq_lock_sparse();
+
+ /*
* So now all preempt/rcu users must observe !cpu_active().
*/
-
err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
if (err) {
/* CPU didn't die: tell everyone. Can't complain. */
cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
+ irq_unlock_sparse();
goto out_release;
}
BUG_ON(cpu_online(cpu));
@@ -415,6 +422,9 @@
smp_mb(); /* Read from cpu_dead_idle before __cpu_die(). */
per_cpu(cpu_dead_idle, cpu) = false;
+ /* Interrupts are moved away from the dying cpu, reenable alloc/free */
+ irq_unlock_sparse();
+
hotplug_cpu__broadcast_tick_pull(cpu);
/* This actually kills the CPU. */
__cpu_die(cpu);
@@ -517,8 +527,18 @@
goto out_notify;
}
+ /*
+ * Some architectures have to walk the irq descriptors to
+ * setup the vector space for the cpu which comes online.
+ * Prevent irq alloc/free across the bringup.
+ */
+ irq_lock_sparse();
+
/* Arch-specific enabling code. */
ret = __cpu_up(cpu, idle);
+
+ irq_unlock_sparse();
+
if (ret != 0)
goto out_notify;
BUG_ON(!cpu_online(cpu));
diff --git a/kernel/events/core.c b/kernel/events/core.c
index e965cfa..d3dae34 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -4358,14 +4358,6 @@
rcu_read_unlock();
}
-static void rb_free_rcu(struct rcu_head *rcu_head)
-{
- struct ring_buffer *rb;
-
- rb = container_of(rcu_head, struct ring_buffer, rcu_head);
- rb_free(rb);
-}
-
struct ring_buffer *ring_buffer_get(struct perf_event *event)
{
struct ring_buffer *rb;
diff --git a/kernel/events/internal.h b/kernel/events/internal.h
index 2deb24c..2bbad9c 100644
--- a/kernel/events/internal.h
+++ b/kernel/events/internal.h
@@ -11,6 +11,7 @@
struct ring_buffer {
atomic_t refcount;
struct rcu_head rcu_head;
+ struct irq_work irq_work;
#ifdef CONFIG_PERF_USE_VMALLOC
struct work_struct work;
int page_order; /* allocation order */
@@ -55,6 +56,15 @@
};
extern void rb_free(struct ring_buffer *rb);
+
+static inline void rb_free_rcu(struct rcu_head *rcu_head)
+{
+ struct ring_buffer *rb;
+
+ rb = container_of(rcu_head, struct ring_buffer, rcu_head);
+ rb_free(rb);
+}
+
extern struct ring_buffer *
rb_alloc(int nr_pages, long watermark, int cpu, int flags);
extern void perf_event_wakeup(struct perf_event *event);
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index 9647282..b2be01b 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -221,6 +221,8 @@
rcu_read_unlock();
}
+static void rb_irq_work(struct irq_work *work);
+
static void
ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
{
@@ -241,6 +243,16 @@
INIT_LIST_HEAD(&rb->event_list);
spin_lock_init(&rb->event_lock);
+ init_irq_work(&rb->irq_work, rb_irq_work);
+}
+
+static void ring_buffer_put_async(struct ring_buffer *rb)
+{
+ if (!atomic_dec_and_test(&rb->refcount))
+ return;
+
+ rb->rcu_head.next = (void *)rb;
+ irq_work_queue(&rb->irq_work);
}
/*
@@ -319,7 +331,7 @@
rb_free_aux(rb);
err:
- ring_buffer_put(rb);
+ ring_buffer_put_async(rb);
handle->event = NULL;
return NULL;
@@ -370,7 +382,7 @@
local_set(&rb->aux_nest, 0);
rb_free_aux(rb);
- ring_buffer_put(rb);
+ ring_buffer_put_async(rb);
}
/*
@@ -557,7 +569,18 @@
void rb_free_aux(struct ring_buffer *rb)
{
if (atomic_dec_and_test(&rb->aux_refcount))
+ irq_work_queue(&rb->irq_work);
+}
+
+static void rb_irq_work(struct irq_work *work)
+{
+ struct ring_buffer *rb = container_of(work, struct ring_buffer, irq_work);
+
+ if (!atomic_read(&rb->aux_refcount))
__rb_free_aux(rb);
+
+ if (rb->rcu_head.next == (void *)rb)
+ call_rcu(&rb->rcu_head, rb_free_rcu);
}
#ifndef CONFIG_PERF_USE_VMALLOC
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index 4834ee8..61008b8 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -76,12 +76,8 @@
#ifdef CONFIG_SPARSE_IRQ
static inline void irq_mark_irq(unsigned int irq) { }
-extern void irq_lock_sparse(void);
-extern void irq_unlock_sparse(void);
#else
extern void irq_mark_irq(unsigned int irq);
-static inline void irq_lock_sparse(void) { }
-static inline void irq_unlock_sparse(void) { }
#endif
extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr);
diff --git a/kernel/module.c b/kernel/module.c
index 3e0e197..4d2b82e 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -3557,6 +3557,7 @@
mutex_lock(&module_mutex);
/* Unlink carefully: kallsyms could be walking list. */
list_del_rcu(&mod->list);
+ mod_tree_remove(mod);
wake_up_all(&module_wq);
/* Wait for RCU-sched synchronizing before releasing mod->list. */
synchronize_sched();
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index 08ccc3d..50eb107 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -120,19 +120,25 @@
/* The clockevent device is getting replaced. Shut it down. */
case CLOCK_EVT_STATE_SHUTDOWN:
- return dev->set_state_shutdown(dev);
+ if (dev->set_state_shutdown)
+ return dev->set_state_shutdown(dev);
+ return 0;
case CLOCK_EVT_STATE_PERIODIC:
/* Core internal bug */
if (!(dev->features & CLOCK_EVT_FEAT_PERIODIC))
return -ENOSYS;
- return dev->set_state_periodic(dev);
+ if (dev->set_state_periodic)
+ return dev->set_state_periodic(dev);
+ return 0;
case CLOCK_EVT_STATE_ONESHOT:
/* Core internal bug */
if (!(dev->features & CLOCK_EVT_FEAT_ONESHOT))
return -ENOSYS;
- return dev->set_state_oneshot(dev);
+ if (dev->set_state_oneshot)
+ return dev->set_state_oneshot(dev);
+ return 0;
case CLOCK_EVT_STATE_ONESHOT_STOPPED:
/* Core internal bug */
@@ -471,18 +477,6 @@
if (dev->features & CLOCK_EVT_FEAT_DUMMY)
return 0;
- /* New state-specific callbacks */
- if (!dev->set_state_shutdown)
- return -EINVAL;
-
- if ((dev->features & CLOCK_EVT_FEAT_PERIODIC) &&
- !dev->set_state_periodic)
- return -EINVAL;
-
- if ((dev->features & CLOCK_EVT_FEAT_ONESHOT) &&
- !dev->set_state_oneshot)
- return -EINVAL;
-
return 0;
}
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index d39f32c..52b9e19 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -159,7 +159,7 @@
{
struct clock_event_device *bc = tick_broadcast_device.evtdev;
unsigned long flags;
- int ret;
+ int ret = 0;
raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
@@ -221,13 +221,14 @@
* If we kept the cpu in the broadcast mask,
* tell the caller to leave the per cpu device
* in shutdown state. The periodic interrupt
- * is delivered by the broadcast device.
+ * is delivered by the broadcast device, if
+ * the broadcast device exists and is not
+ * hrtimer based.
*/
- ret = cpumask_test_cpu(cpu, tick_broadcast_mask);
+ if (bc && !(bc->features & CLOCK_EVT_FEAT_HRTIMER))
+ ret = cpumask_test_cpu(cpu, tick_broadcast_mask);
break;
default:
- /* Nothing to do */
- ret = 0;
break;
}
}
@@ -265,8 +266,22 @@
* Check, if the current cpu is in the mask
*/
if (cpumask_test_cpu(cpu, mask)) {
+ struct clock_event_device *bc = tick_broadcast_device.evtdev;
+
cpumask_clear_cpu(cpu, mask);
- local = true;
+ /*
+ * We only run the local handler, if the broadcast
+ * device is not hrtimer based. Otherwise we run into
+ * a hrtimer recursion.
+ *
+ * local timer_interrupt()
+ * local_handler()
+ * expire_hrtimers()
+ * bc_handler()
+ * local_handler()
+ * expire_hrtimers()
+ */
+ local = !(bc->features & CLOCK_EVT_FEAT_HRTIMER);
}
if (!cpumask_empty(mask)) {
@@ -301,6 +316,13 @@
bool bc_local;
raw_spin_lock(&tick_broadcast_lock);
+
+ /* Handle spurious interrupts gracefully */
+ if (clockevent_state_shutdown(tick_broadcast_device.evtdev)) {
+ raw_spin_unlock(&tick_broadcast_lock);
+ return;
+ }
+
bc_local = tick_do_periodic_broadcast();
if (clockevent_state_oneshot(dev)) {
@@ -359,8 +381,16 @@
case TICK_BROADCAST_ON:
cpumask_set_cpu(cpu, tick_broadcast_on);
if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_mask)) {
- if (tick_broadcast_device.mode ==
- TICKDEV_MODE_PERIODIC)
+ /*
+ * Only shutdown the cpu local device, if:
+ *
+ * - the broadcast device exists
+ * - the broadcast device is not a hrtimer based one
+ * - the broadcast device is in periodic mode to
+ * avoid a hickup during switch to oneshot mode
+ */
+ if (bc && !(bc->features & CLOCK_EVT_FEAT_HRTIMER) &&
+ tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
clockevents_shutdown(dev);
}
break;
@@ -379,14 +409,16 @@
break;
}
- if (cpumask_empty(tick_broadcast_mask)) {
- if (!bc_stopped)
- clockevents_shutdown(bc);
- } else if (bc_stopped) {
- if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
- tick_broadcast_start_periodic(bc);
- else
- tick_broadcast_setup_oneshot(bc);
+ if (bc) {
+ if (cpumask_empty(tick_broadcast_mask)) {
+ if (!bc_stopped)
+ clockevents_shutdown(bc);
+ } else if (bc_stopped) {
+ if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
+ tick_broadcast_start_periodic(bc);
+ else
+ tick_broadcast_setup_oneshot(bc);
+ }
}
raw_spin_unlock(&tick_broadcast_lock);
}
@@ -662,71 +694,82 @@
clockevents_switch_state(dev, CLOCK_EVT_STATE_SHUTDOWN);
}
-/**
- * tick_broadcast_oneshot_control - Enter/exit broadcast oneshot mode
- * @state: The target state (enter/exit)
- *
- * The system enters/leaves a state, where affected devices might stop
- * Returns 0 on success, -EBUSY if the cpu is used to broadcast wakeups.
- *
- * Called with interrupts disabled, so clockevents_lock is not
- * required here because the local clock event device cannot go away
- * under us.
- */
-int tick_broadcast_oneshot_control(enum tick_broadcast_state state)
+int __tick_broadcast_oneshot_control(enum tick_broadcast_state state)
{
struct clock_event_device *bc, *dev;
- struct tick_device *td;
int cpu, ret = 0;
ktime_t now;
/*
- * Periodic mode does not care about the enter/exit of power
- * states
+ * If there is no broadcast device, tell the caller not to go
+ * into deep idle.
*/
- if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
- return 0;
+ if (!tick_broadcast_device.evtdev)
+ return -EBUSY;
- /*
- * We are called with preemtion disabled from the depth of the
- * idle code, so we can't be moved away.
- */
- td = this_cpu_ptr(&tick_cpu_device);
- dev = td->evtdev;
-
- if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
- return 0;
+ dev = this_cpu_ptr(&tick_cpu_device)->evtdev;
raw_spin_lock(&tick_broadcast_lock);
bc = tick_broadcast_device.evtdev;
cpu = smp_processor_id();
if (state == TICK_BROADCAST_ENTER) {
+ /*
+ * If the current CPU owns the hrtimer broadcast
+ * mechanism, it cannot go deep idle and we do not add
+ * the CPU to the broadcast mask. We don't have to go
+ * through the EXIT path as the local timer is not
+ * shutdown.
+ */
+ ret = broadcast_needs_cpu(bc, cpu);
+ if (ret)
+ goto out;
+
+ /*
+ * If the broadcast device is in periodic mode, we
+ * return.
+ */
+ if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) {
+ /* If it is a hrtimer based broadcast, return busy */
+ if (bc->features & CLOCK_EVT_FEAT_HRTIMER)
+ ret = -EBUSY;
+ goto out;
+ }
+
if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_oneshot_mask)) {
WARN_ON_ONCE(cpumask_test_cpu(cpu, tick_broadcast_pending_mask));
+
+ /* Conditionally shut down the local timer. */
broadcast_shutdown_local(bc, dev);
+
/*
* We only reprogram the broadcast timer if we
* did not mark ourself in the force mask and
* if the cpu local event is earlier than the
* broadcast event. If the current CPU is in
* the force mask, then we are going to be
- * woken by the IPI right away.
+ * woken by the IPI right away; we return
+ * busy, so the CPU does not try to go deep
+ * idle.
*/
- if (!cpumask_test_cpu(cpu, tick_broadcast_force_mask) &&
- dev->next_event.tv64 < bc->next_event.tv64)
+ if (cpumask_test_cpu(cpu, tick_broadcast_force_mask)) {
+ ret = -EBUSY;
+ } else if (dev->next_event.tv64 < bc->next_event.tv64) {
tick_broadcast_set_event(bc, cpu, dev->next_event);
+ /*
+ * In case of hrtimer broadcasts the
+ * programming might have moved the
+ * timer to this cpu. If yes, remove
+ * us from the broadcast mask and
+ * return busy.
+ */
+ ret = broadcast_needs_cpu(bc, cpu);
+ if (ret) {
+ cpumask_clear_cpu(cpu,
+ tick_broadcast_oneshot_mask);
+ }
+ }
}
- /*
- * If the current CPU owns the hrtimer broadcast
- * mechanism, it cannot go deep idle and we remove the
- * CPU from the broadcast mask. We don't have to go
- * through the EXIT path as the local timer is not
- * shutdown.
- */
- ret = broadcast_needs_cpu(bc, cpu);
- if (ret)
- cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
} else {
if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_oneshot_mask)) {
clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT);
@@ -938,6 +981,16 @@
return bc ? bc->features & CLOCK_EVT_FEAT_ONESHOT : false;
}
+#else
+int __tick_broadcast_oneshot_control(enum tick_broadcast_state state)
+{
+ struct clock_event_device *bc = tick_broadcast_device.evtdev;
+
+ if (!bc || (bc->features & CLOCK_EVT_FEAT_HRTIMER))
+ return -EBUSY;
+
+ return 0;
+}
#endif
void __init tick_broadcast_init(void)
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index 76446cb..55e13ef 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -343,6 +343,27 @@
tick_install_broadcast_device(newdev);
}
+/**
+ * tick_broadcast_oneshot_control - Enter/exit broadcast oneshot mode
+ * @state: The target state (enter/exit)
+ *
+ * The system enters/leaves a state, where affected devices might stop
+ * Returns 0 on success, -EBUSY if the cpu is used to broadcast wakeups.
+ *
+ * Called with interrupts disabled, so clockevents_lock is not
+ * required here because the local clock event device cannot go away
+ * under us.
+ */
+int tick_broadcast_oneshot_control(enum tick_broadcast_state state)
+{
+ struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
+
+ if (!(td->evtdev->features & CLOCK_EVT_FEAT_C3STOP))
+ return 0;
+
+ return __tick_broadcast_oneshot_control(state);
+}
+
#ifdef CONFIG_HOTPLUG_CPU
/*
* Transfer the do_timer job away from a dying cpu.
diff --git a/kernel/time/tick-sched.h b/kernel/time/tick-sched.h
index 42fdf49..a4a8d4e 100644
--- a/kernel/time/tick-sched.h
+++ b/kernel/time/tick-sched.h
@@ -71,4 +71,14 @@
static inline void tick_cancel_sched_timer(int cpu) { }
#endif
+#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
+extern int __tick_broadcast_oneshot_control(enum tick_broadcast_state state);
+#else
+static inline int
+__tick_broadcast_oneshot_control(enum tick_broadcast_state state)
+{
+ return -EBUSY;
+}
+#endif
+
#endif
diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
index 777eda7..39f24d6 100644
--- a/lib/Kconfig.kasan
+++ b/lib/Kconfig.kasan
@@ -18,10 +18,6 @@
For better error detection enable CONFIG_STACKTRACE,
and add slub_debug=U to boot cmdline.
-config KASAN_SHADOW_OFFSET
- hex
- default 0xdffffc0000000000 if X86_64
-
choice
prompt "Instrumentation type"
depends on KASAN
diff --git a/mm/memory.c b/mm/memory.c
index a84fbb7..388dcf9 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2670,6 +2670,10 @@
pte_unmap(page_table);
+ /* File mapping without ->vm_ops ? */
+ if (vma->vm_flags & VM_SHARED)
+ return VM_FAULT_SIGBUS;
+
/* Check if we need to add a guard page to the stack */
if (check_stack_guard_page(vma, address) < 0)
return VM_FAULT_SIGSEGV;
@@ -3099,6 +3103,9 @@
- vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
pte_unmap(page_table);
+ /* The VMA was not fully populated on mmap() or missing VM_DONTEXPAND */
+ if (!vma->vm_ops->fault)
+ return VM_FAULT_SIGBUS;
if (!(flags & FAULT_FLAG_WRITE))
return do_read_fault(mm, vma, address, pmd, pgoff, flags,
orig_pte);
@@ -3244,13 +3251,12 @@
barrier();
if (!pte_present(entry)) {
if (pte_none(entry)) {
- if (vma->vm_ops) {
- if (likely(vma->vm_ops->fault))
- return do_fault(mm, vma, address, pte,
- pmd, flags, entry);
- }
- return do_anonymous_page(mm, vma, address,
- pte, pmd, flags);
+ if (vma->vm_ops)
+ return do_fault(mm, vma, address, pte, pmd,
+ flags, entry);
+
+ return do_anonymous_page(mm, vma, address, pte, pmd,
+ flags);
}
return do_swap_page(mm, vma, address,
pte, pmd, flags, entry);
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
index cb7db32..f30329f 100644
--- a/net/ceph/ceph_common.c
+++ b/net/ceph/ceph_common.c
@@ -9,6 +9,7 @@
#include <keys/ceph-type.h>
#include <linux/module.h>
#include <linux/mount.h>
+#include <linux/nsproxy.h>
#include <linux/parser.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
@@ -16,8 +17,6 @@
#include <linux/statfs.h>
#include <linux/string.h>
#include <linux/vmalloc.h>
-#include <linux/nsproxy.h>
-#include <net/net_namespace.h>
#include <linux/ceph/ceph_features.h>
@@ -131,6 +130,13 @@
int i;
int ret;
+ /*
+ * Don't bother comparing options if network namespaces don't
+ * match.
+ */
+ if (!net_eq(current->nsproxy->net_ns, read_pnet(&client->msgr.net)))
+ return -1;
+
ret = memcmp(opt1, opt2, ofs);
if (ret)
return ret;
@@ -335,9 +341,6 @@
int err = -ENOMEM;
substring_t argstr[MAX_OPT_ARGS];
- if (current->nsproxy->net_ns != &init_net)
- return ERR_PTR(-EINVAL);
-
opt = kzalloc(sizeof(*opt), GFP_KERNEL);
if (!opt)
return ERR_PTR(-ENOMEM);
@@ -608,6 +611,7 @@
fail_monc:
ceph_monc_stop(&client->monc);
fail:
+ ceph_messenger_fini(&client->msgr);
kfree(client);
return ERR_PTR(err);
}
@@ -621,8 +625,8 @@
/* unmount */
ceph_osdc_stop(&client->osdc);
-
ceph_monc_stop(&client->monc);
+ ceph_messenger_fini(&client->msgr);
ceph_debugfs_client_cleanup(client);
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 1679f47..e3be1d2 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -6,6 +6,7 @@
#include <linux/inet.h>
#include <linux/kthread.h>
#include <linux/net.h>
+#include <linux/nsproxy.h>
#include <linux/slab.h>
#include <linux/socket.h>
#include <linux/string.h>
@@ -479,7 +480,7 @@
int ret;
BUG_ON(con->sock);
- ret = sock_create_kern(&init_net, con->peer_addr.in_addr.ss_family,
+ ret = sock_create_kern(read_pnet(&con->msgr->net), paddr->ss_family,
SOCK_STREAM, IPPROTO_TCP, &sock);
if (ret)
return ret;
@@ -1731,17 +1732,17 @@
static bool addr_is_blank(struct sockaddr_storage *ss)
{
+ struct in_addr *addr = &((struct sockaddr_in *)ss)->sin_addr;
+ struct in6_addr *addr6 = &((struct sockaddr_in6 *)ss)->sin6_addr;
+
switch (ss->ss_family) {
case AF_INET:
- return ((struct sockaddr_in *)ss)->sin_addr.s_addr == 0;
+ return addr->s_addr == htonl(INADDR_ANY);
case AF_INET6:
- return
- ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[0] == 0 &&
- ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[1] == 0 &&
- ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[2] == 0 &&
- ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[3] == 0;
+ return ipv6_addr_any(addr6);
+ default:
+ return true;
}
- return false;
}
static int addr_port(struct sockaddr_storage *ss)
@@ -2944,11 +2945,18 @@
msgr->tcp_nodelay = tcp_nodelay;
atomic_set(&msgr->stopping, 0);
+ write_pnet(&msgr->net, get_net(current->nsproxy->net_ns));
dout("%s %p\n", __func__, msgr);
}
EXPORT_SYMBOL(ceph_messenger_init);
+void ceph_messenger_fini(struct ceph_messenger *msgr)
+{
+ put_net(read_pnet(&msgr->net));
+}
+EXPORT_SYMBOL(ceph_messenger_fini);
+
static void clear_standby(struct ceph_connection *con)
{
/* come back from STANDBY? */
diff --git a/scripts/mod/devicetable-offsets.c b/scripts/mod/devicetable-offsets.c
index eff7de1..e70fcd1 100644
--- a/scripts/mod/devicetable-offsets.c
+++ b/scripts/mod/devicetable-offsets.c
@@ -63,6 +63,8 @@
DEVID(acpi_device_id);
DEVID_FIELD(acpi_device_id, id);
+ DEVID_FIELD(acpi_device_id, cls);
+ DEVID_FIELD(acpi_device_id, cls_msk);
DEVID(pnp_device_id);
DEVID_FIELD(pnp_device_id, id);
diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
index 84c86f3..5f20882 100644
--- a/scripts/mod/file2alias.c
+++ b/scripts/mod/file2alias.c
@@ -523,12 +523,40 @@
}
ADD_TO_DEVTABLE("serio", serio_device_id, do_serio_entry);
-/* looks like: "acpi:ACPI0003 or acpi:PNP0C0B" or "acpi:LNXVIDEO" */
+/* looks like: "acpi:ACPI0003" or "acpi:PNP0C0B" or "acpi:LNXVIDEO" or
+ * "acpi:bbsspp" (bb=base-class, ss=sub-class, pp=prog-if)
+ *
+ * NOTE: Each driver should use one of the following : _HID, _CIDs
+ * or _CLS. Also, bb, ss, and pp can be substituted with ??
+ * as don't care byte.
+ */
static int do_acpi_entry(const char *filename,
void *symval, char *alias)
{
DEF_FIELD_ADDR(symval, acpi_device_id, id);
- sprintf(alias, "acpi*:%s:*", *id);
+ DEF_FIELD_ADDR(symval, acpi_device_id, cls);
+ DEF_FIELD_ADDR(symval, acpi_device_id, cls_msk);
+
+ if (id && strlen((const char *)*id))
+ sprintf(alias, "acpi*:%s:*", *id);
+ else if (cls) {
+ int i, byte_shift, cnt = 0;
+ unsigned int msk;
+
+ sprintf(&alias[cnt], "acpi*:");
+ cnt = 6;
+ for (i = 1; i <= 3; i++) {
+ byte_shift = 8 * (3-i);
+ msk = (*cls_msk >> byte_shift) & 0xFF;
+ if (msk)
+ sprintf(&alias[cnt], "%02x",
+ (*cls >> byte_shift) & 0xFF);
+ else
+ sprintf(&alias[cnt], "??");
+ cnt += 2;
+ }
+ sprintf(&alias[cnt], ":*");
+ }
return 1;
}
ADD_TO_DEVTABLE("acpi", acpi_device_id, do_acpi_entry);
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index 91ee1b2..12d3db3 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -886,7 +886,8 @@
#define TEXT_SECTIONS ".text", ".text.unlikely", ".sched.text", \
".kprobes.text"
#define OTHER_TEXT_SECTIONS ".ref.text", ".head.text", ".spinlock.text", \
- ".fixup", ".entry.text", ".exception.text", ".text.*"
+ ".fixup", ".entry.text", ".exception.text", ".text.*", \
+ ".coldtext"
#define INIT_SECTIONS ".init.*"
#define MEM_INIT_SECTIONS ".meminit.*"
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 6231081..564079c 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -3283,7 +3283,8 @@
int rc = 0;
if (default_noexec &&
- (prot & PROT_EXEC) && (!file || (!shared && (prot & PROT_WRITE)))) {
+ (prot & PROT_EXEC) && (!file || IS_PRIVATE(file_inode(file)) ||
+ (!shared && (prot & PROT_WRITE)))) {
/*
* We are making executable an anonymous mapping or a
* private file mapping that will also be writable.
diff --git a/security/selinux/ss/ebitmap.c b/security/selinux/ss/ebitmap.c
index afe6a26..57644b1 100644
--- a/security/selinux/ss/ebitmap.c
+++ b/security/selinux/ss/ebitmap.c
@@ -153,6 +153,12 @@
if (offset == (u32)-1)
return 0;
+ /* don't waste ebitmap space if the netlabel bitmap is empty */
+ if (bitmap == 0) {
+ offset += EBITMAP_UNIT_SIZE;
+ continue;
+ }
+
if (e_iter == NULL ||
offset >= e_iter->startbit + EBITMAP_SIZE) {
e_prev = e_iter;
diff --git a/tools/include/linux/compiler.h b/tools/include/linux/compiler.h
index f0e7267..9098083 100644
--- a/tools/include/linux/compiler.h
+++ b/tools/include/linux/compiler.h
@@ -41,4 +41,62 @@
#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
+#include <linux/types.h>
+
+static __always_inline void __read_once_size(const volatile void *p, void *res, int size)
+{
+ switch (size) {
+ case 1: *(__u8 *)res = *(volatile __u8 *)p; break;
+ case 2: *(__u16 *)res = *(volatile __u16 *)p; break;
+ case 4: *(__u32 *)res = *(volatile __u32 *)p; break;
+ case 8: *(__u64 *)res = *(volatile __u64 *)p; break;
+ default:
+ barrier();
+ __builtin_memcpy((void *)res, (const void *)p, size);
+ barrier();
+ }
+}
+
+static __always_inline void __write_once_size(volatile void *p, void *res, int size)
+{
+ switch (size) {
+ case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
+ case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
+ case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
+ case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
+ default:
+ barrier();
+ __builtin_memcpy((void *)p, (const void *)res, size);
+ barrier();
+ }
+}
+
+/*
+ * Prevent the compiler from merging or refetching reads or writes. The
+ * compiler is also forbidden from reordering successive instances of
+ * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the
+ * compiler is aware of some particular ordering. One way to make the
+ * compiler aware of ordering is to put the two invocations of READ_ONCE,
+ * WRITE_ONCE or ACCESS_ONCE() in different C statements.
+ *
+ * In contrast to ACCESS_ONCE these two macros will also work on aggregate
+ * data types like structs or unions. If the size of the accessed data
+ * type exceeds the word size of the machine (e.g., 32 bits or 64 bits)
+ * READ_ONCE() and WRITE_ONCE() will fall back to memcpy and print a
+ * compile-time warning.
+ *
+ * Their two major use cases are: (1) Mediating communication between
+ * process-level code and irq/NMI handlers, all running on the same CPU,
+ * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
+ * mutilate accesses that either do not require ordering or that interact
+ * with an explicit memory barrier or atomic instruction that provides the
+ * required ordering.
+ */
+
+#define READ_ONCE(x) \
+ ({ union { typeof(x) __val; char __c[1]; } __u; __read_once_size(&(x), __u.__c, sizeof(x)); __u.__val; })
+
+#define WRITE_ONCE(x, val) \
+ ({ union { typeof(x) __val; char __c[1]; } __u = { .__val = (val) }; __write_once_size(&(x), __u.__c, sizeof(x)); __u.__val; })
+
#endif /* _TOOLS_LINUX_COMPILER_H */
diff --git a/tools/include/linux/export.h b/tools/include/linux/export.h
deleted file mode 100644
index d07e586..0000000
--- a/tools/include/linux/export.h
+++ /dev/null
@@ -1,10 +0,0 @@
-#ifndef _TOOLS_LINUX_EXPORT_H_
-#define _TOOLS_LINUX_EXPORT_H_
-
-#define EXPORT_SYMBOL(sym)
-#define EXPORT_SYMBOL_GPL(sym)
-#define EXPORT_SYMBOL_GPL_FUTURE(sym)
-#define EXPORT_UNUSED_SYMBOL(sym)
-#define EXPORT_UNUSED_SYMBOL_GPL(sym)
-
-#endif
diff --git a/tools/include/linux/rbtree.h b/tools/include/linux/rbtree.h
new file mode 100644
index 0000000..1125822
--- /dev/null
+++ b/tools/include/linux/rbtree.h
@@ -0,0 +1,104 @@
+/*
+ Red Black Trees
+ (C) 1999 Andrea Arcangeli <andrea@suse.de>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+ linux/include/linux/rbtree.h
+
+ To use rbtrees you'll have to implement your own insert and search cores.
+ This will avoid us to use callbacks and to drop drammatically performances.
+ I know it's not the cleaner way, but in C (not in C++) to get
+ performances and genericity...
+
+ See Documentation/rbtree.txt for documentation and samples.
+*/
+
+#ifndef __TOOLS_LINUX_PERF_RBTREE_H
+#define __TOOLS_LINUX_PERF_RBTREE_H
+
+#include <linux/kernel.h>
+#include <linux/stddef.h>
+
+struct rb_node {
+ unsigned long __rb_parent_color;
+ struct rb_node *rb_right;
+ struct rb_node *rb_left;
+} __attribute__((aligned(sizeof(long))));
+ /* The alignment might seem pointless, but allegedly CRIS needs it */
+
+struct rb_root {
+ struct rb_node *rb_node;
+};
+
+
+#define rb_parent(r) ((struct rb_node *)((r)->__rb_parent_color & ~3))
+
+#define RB_ROOT (struct rb_root) { NULL, }
+#define rb_entry(ptr, type, member) container_of(ptr, type, member)
+
+#define RB_EMPTY_ROOT(root) ((root)->rb_node == NULL)
+
+/* 'empty' nodes are nodes that are known not to be inserted in an rbtree */
+#define RB_EMPTY_NODE(node) \
+ ((node)->__rb_parent_color == (unsigned long)(node))
+#define RB_CLEAR_NODE(node) \
+ ((node)->__rb_parent_color = (unsigned long)(node))
+
+
+extern void rb_insert_color(struct rb_node *, struct rb_root *);
+extern void rb_erase(struct rb_node *, struct rb_root *);
+
+
+/* Find logical next and previous nodes in a tree */
+extern struct rb_node *rb_next(const struct rb_node *);
+extern struct rb_node *rb_prev(const struct rb_node *);
+extern struct rb_node *rb_first(const struct rb_root *);
+extern struct rb_node *rb_last(const struct rb_root *);
+
+/* Postorder iteration - always visit the parent after its children */
+extern struct rb_node *rb_first_postorder(const struct rb_root *);
+extern struct rb_node *rb_next_postorder(const struct rb_node *);
+
+/* Fast replacement of a single node without remove/rebalance/add/rebalance */
+extern void rb_replace_node(struct rb_node *victim, struct rb_node *new,
+ struct rb_root *root);
+
+static inline void rb_link_node(struct rb_node *node, struct rb_node *parent,
+ struct rb_node **rb_link)
+{
+ node->__rb_parent_color = (unsigned long)parent;
+ node->rb_left = node->rb_right = NULL;
+
+ *rb_link = node;
+}
+
+#define rb_entry_safe(ptr, type, member) \
+ ({ typeof(ptr) ____ptr = (ptr); \
+ ____ptr ? rb_entry(____ptr, type, member) : NULL; \
+ })
+
+
+/*
+ * Handy for checking that we are not deleting an entry that is
+ * already in a list, found in block/{blk-throttle,cfq-iosched}.c,
+ * probably should be moved to lib/rbtree.c...
+ */
+static inline void rb_erase_init(struct rb_node *n, struct rb_root *root)
+{
+ rb_erase(n, root);
+ RB_CLEAR_NODE(n);
+}
+#endif /* __TOOLS_LINUX_PERF_RBTREE_H */
diff --git a/tools/include/linux/rbtree_augmented.h b/tools/include/linux/rbtree_augmented.h
new file mode 100644
index 0000000..43be941
--- /dev/null
+++ b/tools/include/linux/rbtree_augmented.h
@@ -0,0 +1,245 @@
+/*
+ Red Black Trees
+ (C) 1999 Andrea Arcangeli <andrea@suse.de>
+ (C) 2002 David Woodhouse <dwmw2@infradead.org>
+ (C) 2012 Michel Lespinasse <walken@google.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+ tools/linux/include/linux/rbtree_augmented.h
+
+ Copied from:
+ linux/include/linux/rbtree_augmented.h
+*/
+
+#ifndef _TOOLS_LINUX_RBTREE_AUGMENTED_H
+#define _TOOLS_LINUX_RBTREE_AUGMENTED_H
+
+#include <linux/compiler.h>
+#include <linux/rbtree.h>
+
+/*
+ * Please note - only struct rb_augment_callbacks and the prototypes for
+ * rb_insert_augmented() and rb_erase_augmented() are intended to be public.
+ * The rest are implementation details you are not expected to depend on.
+ *
+ * See Documentation/rbtree.txt for documentation and samples.
+ */
+
+struct rb_augment_callbacks {
+ void (*propagate)(struct rb_node *node, struct rb_node *stop);
+ void (*copy)(struct rb_node *old, struct rb_node *new);
+ void (*rotate)(struct rb_node *old, struct rb_node *new);
+};
+
+extern void __rb_insert_augmented(struct rb_node *node, struct rb_root *root,
+ void (*augment_rotate)(struct rb_node *old, struct rb_node *new));
+/*
+ * Fixup the rbtree and update the augmented information when rebalancing.
+ *
+ * On insertion, the user must update the augmented information on the path
+ * leading to the inserted node, then call rb_link_node() as usual and
+ * rb_augment_inserted() instead of the usual rb_insert_color() call.
+ * If rb_augment_inserted() rebalances the rbtree, it will callback into
+ * a user provided function to update the augmented information on the
+ * affected subtrees.
+ */
+static inline void
+rb_insert_augmented(struct rb_node *node, struct rb_root *root,
+ const struct rb_augment_callbacks *augment)
+{
+ __rb_insert_augmented(node, root, augment->rotate);
+}
+
+#define RB_DECLARE_CALLBACKS(rbstatic, rbname, rbstruct, rbfield, \
+ rbtype, rbaugmented, rbcompute) \
+static inline void \
+rbname ## _propagate(struct rb_node *rb, struct rb_node *stop) \
+{ \
+ while (rb != stop) { \
+ rbstruct *node = rb_entry(rb, rbstruct, rbfield); \
+ rbtype augmented = rbcompute(node); \
+ if (node->rbaugmented == augmented) \
+ break; \
+ node->rbaugmented = augmented; \
+ rb = rb_parent(&node->rbfield); \
+ } \
+} \
+static inline void \
+rbname ## _copy(struct rb_node *rb_old, struct rb_node *rb_new) \
+{ \
+ rbstruct *old = rb_entry(rb_old, rbstruct, rbfield); \
+ rbstruct *new = rb_entry(rb_new, rbstruct, rbfield); \
+ new->rbaugmented = old->rbaugmented; \
+} \
+static void \
+rbname ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \
+{ \
+ rbstruct *old = rb_entry(rb_old, rbstruct, rbfield); \
+ rbstruct *new = rb_entry(rb_new, rbstruct, rbfield); \
+ new->rbaugmented = old->rbaugmented; \
+ old->rbaugmented = rbcompute(old); \
+} \
+rbstatic const struct rb_augment_callbacks rbname = { \
+ rbname ## _propagate, rbname ## _copy, rbname ## _rotate \
+};
+
+
+#define RB_RED 0
+#define RB_BLACK 1
+
+#define __rb_parent(pc) ((struct rb_node *)(pc & ~3))
+
+#define __rb_color(pc) ((pc) & 1)
+#define __rb_is_black(pc) __rb_color(pc)
+#define __rb_is_red(pc) (!__rb_color(pc))
+#define rb_color(rb) __rb_color((rb)->__rb_parent_color)
+#define rb_is_red(rb) __rb_is_red((rb)->__rb_parent_color)
+#define rb_is_black(rb) __rb_is_black((rb)->__rb_parent_color)
+
+static inline void rb_set_parent(struct rb_node *rb, struct rb_node *p)
+{
+ rb->__rb_parent_color = rb_color(rb) | (unsigned long)p;
+}
+
+static inline void rb_set_parent_color(struct rb_node *rb,
+ struct rb_node *p, int color)
+{
+ rb->__rb_parent_color = (unsigned long)p | color;
+}
+
+static inline void
+__rb_change_child(struct rb_node *old, struct rb_node *new,
+ struct rb_node *parent, struct rb_root *root)
+{
+ if (parent) {
+ if (parent->rb_left == old)
+ parent->rb_left = new;
+ else
+ parent->rb_right = new;
+ } else
+ root->rb_node = new;
+}
+
+extern void __rb_erase_color(struct rb_node *parent, struct rb_root *root,
+ void (*augment_rotate)(struct rb_node *old, struct rb_node *new));
+
+static __always_inline struct rb_node *
+__rb_erase_augmented(struct rb_node *node, struct rb_root *root,
+ const struct rb_augment_callbacks *augment)
+{
+ struct rb_node *child = node->rb_right, *tmp = node->rb_left;
+ struct rb_node *parent, *rebalance;
+ unsigned long pc;
+
+ if (!tmp) {
+ /*
+ * Case 1: node to erase has no more than 1 child (easy!)
+ *
+ * Note that if there is one child it must be red due to 5)
+ * and node must be black due to 4). We adjust colors locally
+ * so as to bypass __rb_erase_color() later on.
+ */
+ pc = node->__rb_parent_color;
+ parent = __rb_parent(pc);
+ __rb_change_child(node, child, parent, root);
+ if (child) {
+ child->__rb_parent_color = pc;
+ rebalance = NULL;
+ } else
+ rebalance = __rb_is_black(pc) ? parent : NULL;
+ tmp = parent;
+ } else if (!child) {
+ /* Still case 1, but this time the child is node->rb_left */
+ tmp->__rb_parent_color = pc = node->__rb_parent_color;
+ parent = __rb_parent(pc);
+ __rb_change_child(node, tmp, parent, root);
+ rebalance = NULL;
+ tmp = parent;
+ } else {
+ struct rb_node *successor = child, *child2;
+ tmp = child->rb_left;
+ if (!tmp) {
+ /*
+ * Case 2: node's successor is its right child
+ *
+ * (n) (s)
+ * / \ / \
+ * (x) (s) -> (x) (c)
+ * \
+ * (c)
+ */
+ parent = successor;
+ child2 = successor->rb_right;
+ augment->copy(node, successor);
+ } else {
+ /*
+ * Case 3: node's successor is leftmost under
+ * node's right child subtree
+ *
+ * (n) (s)
+ * / \ / \
+ * (x) (y) -> (x) (y)
+ * / /
+ * (p) (p)
+ * / /
+ * (s) (c)
+ * \
+ * (c)
+ */
+ do {
+ parent = successor;
+ successor = tmp;
+ tmp = tmp->rb_left;
+ } while (tmp);
+ parent->rb_left = child2 = successor->rb_right;
+ successor->rb_right = child;
+ rb_set_parent(child, successor);
+ augment->copy(node, successor);
+ augment->propagate(parent, successor);
+ }
+
+ successor->rb_left = tmp = node->rb_left;
+ rb_set_parent(tmp, successor);
+
+ pc = node->__rb_parent_color;
+ tmp = __rb_parent(pc);
+ __rb_change_child(node, successor, tmp, root);
+ if (child2) {
+ successor->__rb_parent_color = pc;
+ rb_set_parent_color(child2, parent, RB_BLACK);
+ rebalance = NULL;
+ } else {
+ unsigned long pc2 = successor->__rb_parent_color;
+ successor->__rb_parent_color = pc;
+ rebalance = __rb_is_black(pc2) ? parent : NULL;
+ }
+ tmp = successor;
+ }
+
+ augment->propagate(tmp, NULL);
+ return rebalance;
+}
+
+static __always_inline void
+rb_erase_augmented(struct rb_node *node, struct rb_root *root,
+ const struct rb_augment_callbacks *augment)
+{
+ struct rb_node *rebalance = __rb_erase_augmented(node, root, augment);
+ if (rebalance)
+ __rb_erase_color(rebalance, root, augment->rotate);
+}
+
+#endif /* _TOOLS_LINUX_RBTREE_AUGMENTED_H */
diff --git a/tools/lib/rbtree.c b/tools/lib/rbtree.c
new file mode 100644
index 0000000..17c2b59
--- /dev/null
+++ b/tools/lib/rbtree.c
@@ -0,0 +1,548 @@
+/*
+ Red Black Trees
+ (C) 1999 Andrea Arcangeli <andrea@suse.de>
+ (C) 2002 David Woodhouse <dwmw2@infradead.org>
+ (C) 2012 Michel Lespinasse <walken@google.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+ linux/lib/rbtree.c
+*/
+
+#include <linux/rbtree_augmented.h>
+
+/*
+ * red-black trees properties: http://en.wikipedia.org/wiki/Rbtree
+ *
+ * 1) A node is either red or black
+ * 2) The root is black
+ * 3) All leaves (NULL) are black
+ * 4) Both children of every red node are black
+ * 5) Every simple path from root to leaves contains the same number
+ * of black nodes.
+ *
+ * 4 and 5 give the O(log n) guarantee, since 4 implies you cannot have two
+ * consecutive red nodes in a path and every red node is therefore followed by
+ * a black. So if B is the number of black nodes on every simple path (as per
+ * 5), then the longest possible path due to 4 is 2B.
+ *
+ * We shall indicate color with case, where black nodes are uppercase and red
+ * nodes will be lowercase. Unknown color nodes shall be drawn as red within
+ * parentheses and have some accompanying text comment.
+ */
+
+static inline void rb_set_black(struct rb_node *rb)
+{
+ rb->__rb_parent_color |= RB_BLACK;
+}
+
+static inline struct rb_node *rb_red_parent(struct rb_node *red)
+{
+ return (struct rb_node *)red->__rb_parent_color;
+}
+
+/*
+ * Helper function for rotations:
+ * - old's parent and color get assigned to new
+ * - old gets assigned new as a parent and 'color' as a color.
+ */
+static inline void
+__rb_rotate_set_parents(struct rb_node *old, struct rb_node *new,
+ struct rb_root *root, int color)
+{
+ struct rb_node *parent = rb_parent(old);
+ new->__rb_parent_color = old->__rb_parent_color;
+ rb_set_parent_color(old, new, color);
+ __rb_change_child(old, new, parent, root);
+}
+
+static __always_inline void
+__rb_insert(struct rb_node *node, struct rb_root *root,
+ void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
+{
+ struct rb_node *parent = rb_red_parent(node), *gparent, *tmp;
+
+ while (true) {
+ /*
+ * Loop invariant: node is red
+ *
+ * If there is a black parent, we are done.
+ * Otherwise, take some corrective action as we don't
+ * want a red root or two consecutive red nodes.
+ */
+ if (!parent) {
+ rb_set_parent_color(node, NULL, RB_BLACK);
+ break;
+ } else if (rb_is_black(parent))
+ break;
+
+ gparent = rb_red_parent(parent);
+
+ tmp = gparent->rb_right;
+ if (parent != tmp) { /* parent == gparent->rb_left */
+ if (tmp && rb_is_red(tmp)) {
+ /*
+ * Case 1 - color flips
+ *
+ * G g
+ * / \ / \
+ * p u --> P U
+ * / /
+ * n n
+ *
+ * However, since g's parent might be red, and
+ * 4) does not allow this, we need to recurse
+ * at g.
+ */
+ rb_set_parent_color(tmp, gparent, RB_BLACK);
+ rb_set_parent_color(parent, gparent, RB_BLACK);
+ node = gparent;
+ parent = rb_parent(node);
+ rb_set_parent_color(node, parent, RB_RED);
+ continue;
+ }
+
+ tmp = parent->rb_right;
+ if (node == tmp) {
+ /*
+ * Case 2 - left rotate at parent
+ *
+ * G G
+ * / \ / \
+ * p U --> n U
+ * \ /
+ * n p
+ *
+ * This still leaves us in violation of 4), the
+ * continuation into Case 3 will fix that.
+ */
+ parent->rb_right = tmp = node->rb_left;
+ node->rb_left = parent;
+ if (tmp)
+ rb_set_parent_color(tmp, parent,
+ RB_BLACK);
+ rb_set_parent_color(parent, node, RB_RED);
+ augment_rotate(parent, node);
+ parent = node;
+ tmp = node->rb_right;
+ }
+
+ /*
+ * Case 3 - right rotate at gparent
+ *
+ * G P
+ * / \ / \
+ * p U --> n g
+ * / \
+ * n U
+ */
+ gparent->rb_left = tmp; /* == parent->rb_right */
+ parent->rb_right = gparent;
+ if (tmp)
+ rb_set_parent_color(tmp, gparent, RB_BLACK);
+ __rb_rotate_set_parents(gparent, parent, root, RB_RED);
+ augment_rotate(gparent, parent);
+ break;
+ } else {
+ tmp = gparent->rb_left;
+ if (tmp && rb_is_red(tmp)) {
+ /* Case 1 - color flips */
+ rb_set_parent_color(tmp, gparent, RB_BLACK);
+ rb_set_parent_color(parent, gparent, RB_BLACK);
+ node = gparent;
+ parent = rb_parent(node);
+ rb_set_parent_color(node, parent, RB_RED);
+ continue;
+ }
+
+ tmp = parent->rb_left;
+ if (node == tmp) {
+ /* Case 2 - right rotate at parent */
+ parent->rb_left = tmp = node->rb_right;
+ node->rb_right = parent;
+ if (tmp)
+ rb_set_parent_color(tmp, parent,
+ RB_BLACK);
+ rb_set_parent_color(parent, node, RB_RED);
+ augment_rotate(parent, node);
+ parent = node;
+ tmp = node->rb_left;
+ }
+
+ /* Case 3 - left rotate at gparent */
+ gparent->rb_right = tmp; /* == parent->rb_left */
+ parent->rb_left = gparent;
+ if (tmp)
+ rb_set_parent_color(tmp, gparent, RB_BLACK);
+ __rb_rotate_set_parents(gparent, parent, root, RB_RED);
+ augment_rotate(gparent, parent);
+ break;
+ }
+ }
+}
+
+/*
+ * Inline version for rb_erase() use - we want to be able to inline
+ * and eliminate the dummy_rotate callback there
+ */
+static __always_inline void
+____rb_erase_color(struct rb_node *parent, struct rb_root *root,
+ void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
+{
+ struct rb_node *node = NULL, *sibling, *tmp1, *tmp2;
+
+ while (true) {
+ /*
+ * Loop invariants:
+ * - node is black (or NULL on first iteration)
+ * - node is not the root (parent is not NULL)
+ * - All leaf paths going through parent and node have a
+ * black node count that is 1 lower than other leaf paths.
+ */
+ sibling = parent->rb_right;
+ if (node != sibling) { /* node == parent->rb_left */
+ if (rb_is_red(sibling)) {
+ /*
+ * Case 1 - left rotate at parent
+ *
+ * P S
+ * / \ / \
+ * N s --> p Sr
+ * / \ / \
+ * Sl Sr N Sl
+ */
+ parent->rb_right = tmp1 = sibling->rb_left;
+ sibling->rb_left = parent;
+ rb_set_parent_color(tmp1, parent, RB_BLACK);
+ __rb_rotate_set_parents(parent, sibling, root,
+ RB_RED);
+ augment_rotate(parent, sibling);
+ sibling = tmp1;
+ }
+ tmp1 = sibling->rb_right;
+ if (!tmp1 || rb_is_black(tmp1)) {
+ tmp2 = sibling->rb_left;
+ if (!tmp2 || rb_is_black(tmp2)) {
+ /*
+ * Case 2 - sibling color flip
+ * (p could be either color here)
+ *
+ * (p) (p)
+ * / \ / \
+ * N S --> N s
+ * / \ / \
+ * Sl Sr Sl Sr
+ *
+ * This leaves us violating 5) which
+ * can be fixed by flipping p to black
+ * if it was red, or by recursing at p.
+ * p is red when coming from Case 1.
+ */
+ rb_set_parent_color(sibling, parent,
+ RB_RED);
+ if (rb_is_red(parent))
+ rb_set_black(parent);
+ else {
+ node = parent;
+ parent = rb_parent(node);
+ if (parent)
+ continue;
+ }
+ break;
+ }
+ /*
+ * Case 3 - right rotate at sibling
+ * (p could be either color here)
+ *
+ * (p) (p)
+ * / \ / \
+ * N S --> N Sl
+ * / \ \
+ * sl Sr s
+ * \
+ * Sr
+ */
+ sibling->rb_left = tmp1 = tmp2->rb_right;
+ tmp2->rb_right = sibling;
+ parent->rb_right = tmp2;
+ if (tmp1)
+ rb_set_parent_color(tmp1, sibling,
+ RB_BLACK);
+ augment_rotate(sibling, tmp2);
+ tmp1 = sibling;
+ sibling = tmp2;
+ }
+ /*
+ * Case 4 - left rotate at parent + color flips
+ * (p and sl could be either color here.
+ * After rotation, p becomes black, s acquires
+ * p's color, and sl keeps its color)
+ *
+ * (p) (s)
+ * / \ / \
+ * N S --> P Sr
+ * / \ / \
+ * (sl) sr N (sl)
+ */
+ parent->rb_right = tmp2 = sibling->rb_left;
+ sibling->rb_left = parent;
+ rb_set_parent_color(tmp1, sibling, RB_BLACK);
+ if (tmp2)
+ rb_set_parent(tmp2, parent);
+ __rb_rotate_set_parents(parent, sibling, root,
+ RB_BLACK);
+ augment_rotate(parent, sibling);
+ break;
+ } else {
+ sibling = parent->rb_left;
+ if (rb_is_red(sibling)) {
+ /* Case 1 - right rotate at parent */
+ parent->rb_left = tmp1 = sibling->rb_right;
+ sibling->rb_right = parent;
+ rb_set_parent_color(tmp1, parent, RB_BLACK);
+ __rb_rotate_set_parents(parent, sibling, root,
+ RB_RED);
+ augment_rotate(parent, sibling);
+ sibling = tmp1;
+ }
+ tmp1 = sibling->rb_left;
+ if (!tmp1 || rb_is_black(tmp1)) {
+ tmp2 = sibling->rb_right;
+ if (!tmp2 || rb_is_black(tmp2)) {
+ /* Case 2 - sibling color flip */
+ rb_set_parent_color(sibling, parent,
+ RB_RED);
+ if (rb_is_red(parent))
+ rb_set_black(parent);
+ else {
+ node = parent;
+ parent = rb_parent(node);
+ if (parent)
+ continue;
+ }
+ break;
+ }
+ /* Case 3 - right rotate at sibling */
+ sibling->rb_right = tmp1 = tmp2->rb_left;
+ tmp2->rb_left = sibling;
+ parent->rb_left = tmp2;
+ if (tmp1)
+ rb_set_parent_color(tmp1, sibling,
+ RB_BLACK);
+ augment_rotate(sibling, tmp2);
+ tmp1 = sibling;
+ sibling = tmp2;
+ }
+ /* Case 4 - left rotate at parent + color flips */
+ parent->rb_left = tmp2 = sibling->rb_right;
+ sibling->rb_right = parent;
+ rb_set_parent_color(tmp1, sibling, RB_BLACK);
+ if (tmp2)
+ rb_set_parent(tmp2, parent);
+ __rb_rotate_set_parents(parent, sibling, root,
+ RB_BLACK);
+ augment_rotate(parent, sibling);
+ break;
+ }
+ }
+}
+
+/* Non-inline version for rb_erase_augmented() use */
+void __rb_erase_color(struct rb_node *parent, struct rb_root *root,
+ void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
+{
+ ____rb_erase_color(parent, root, augment_rotate);
+}
+
+/*
+ * Non-augmented rbtree manipulation functions.
+ *
+ * We use dummy augmented callbacks here, and have the compiler optimize them
+ * out of the rb_insert_color() and rb_erase() function definitions.
+ */
+
+static inline void dummy_propagate(struct rb_node *node, struct rb_node *stop) {}
+static inline void dummy_copy(struct rb_node *old, struct rb_node *new) {}
+static inline void dummy_rotate(struct rb_node *old, struct rb_node *new) {}
+
+static const struct rb_augment_callbacks dummy_callbacks = {
+ dummy_propagate, dummy_copy, dummy_rotate
+};
+
+void rb_insert_color(struct rb_node *node, struct rb_root *root)
+{
+ __rb_insert(node, root, dummy_rotate);
+}
+
+void rb_erase(struct rb_node *node, struct rb_root *root)
+{
+ struct rb_node *rebalance;
+ rebalance = __rb_erase_augmented(node, root, &dummy_callbacks);
+ if (rebalance)
+ ____rb_erase_color(rebalance, root, dummy_rotate);
+}
+
+/*
+ * Augmented rbtree manipulation functions.
+ *
+ * This instantiates the same __always_inline functions as in the non-augmented
+ * case, but this time with user-defined callbacks.
+ */
+
+void __rb_insert_augmented(struct rb_node *node, struct rb_root *root,
+ void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
+{
+ __rb_insert(node, root, augment_rotate);
+}
+
+/*
+ * This function returns the first node (in sort order) of the tree.
+ */
+struct rb_node *rb_first(const struct rb_root *root)
+{
+ struct rb_node *n;
+
+ n = root->rb_node;
+ if (!n)
+ return NULL;
+ while (n->rb_left)
+ n = n->rb_left;
+ return n;
+}
+
+struct rb_node *rb_last(const struct rb_root *root)
+{
+ struct rb_node *n;
+
+ n = root->rb_node;
+ if (!n)
+ return NULL;
+ while (n->rb_right)
+ n = n->rb_right;
+ return n;
+}
+
+struct rb_node *rb_next(const struct rb_node *node)
+{
+ struct rb_node *parent;
+
+ if (RB_EMPTY_NODE(node))
+ return NULL;
+
+ /*
+ * If we have a right-hand child, go down and then left as far
+ * as we can.
+ */
+ if (node->rb_right) {
+ node = node->rb_right;
+ while (node->rb_left)
+ node=node->rb_left;
+ return (struct rb_node *)node;
+ }
+
+ /*
+ * No right-hand children. Everything down and left is smaller than us,
+ * so any 'next' node must be in the general direction of our parent.
+ * Go up the tree; any time the ancestor is a right-hand child of its
+ * parent, keep going up. First time it's a left-hand child of its
+ * parent, said parent is our 'next' node.
+ */
+ while ((parent = rb_parent(node)) && node == parent->rb_right)
+ node = parent;
+
+ return parent;
+}
+
+struct rb_node *rb_prev(const struct rb_node *node)
+{
+ struct rb_node *parent;
+
+ if (RB_EMPTY_NODE(node))
+ return NULL;
+
+ /*
+ * If we have a left-hand child, go down and then right as far
+ * as we can.
+ */
+ if (node->rb_left) {
+ node = node->rb_left;
+ while (node->rb_right)
+ node=node->rb_right;
+ return (struct rb_node *)node;
+ }
+
+ /*
+ * No left-hand children. Go up till we find an ancestor which
+ * is a right-hand child of its parent.
+ */
+ while ((parent = rb_parent(node)) && node == parent->rb_left)
+ node = parent;
+
+ return parent;
+}
+
+void rb_replace_node(struct rb_node *victim, struct rb_node *new,
+ struct rb_root *root)
+{
+ struct rb_node *parent = rb_parent(victim);
+
+ /* Set the surrounding nodes to point to the replacement */
+ __rb_change_child(victim, new, parent, root);
+ if (victim->rb_left)
+ rb_set_parent(victim->rb_left, new);
+ if (victim->rb_right)
+ rb_set_parent(victim->rb_right, new);
+
+ /* Copy the pointers/colour from the victim to the replacement */
+ *new = *victim;
+}
+
+static struct rb_node *rb_left_deepest_node(const struct rb_node *node)
+{
+ for (;;) {
+ if (node->rb_left)
+ node = node->rb_left;
+ else if (node->rb_right)
+ node = node->rb_right;
+ else
+ return (struct rb_node *)node;
+ }
+}
+
+struct rb_node *rb_next_postorder(const struct rb_node *node)
+{
+ const struct rb_node *parent;
+ if (!node)
+ return NULL;
+ parent = rb_parent(node);
+
+ /* If we're sitting on node, we've already seen our children */
+ if (parent && node == parent->rb_left && parent->rb_right) {
+ /* If we are the parent's left node, go to the parent's right
+ * node then all the way down to the left */
+ return rb_left_deepest_node(parent->rb_right);
+ } else
+ /* Otherwise we are the parent's right node, and the parent
+ * should be next */
+ return (struct rb_node *)parent;
+}
+
+struct rb_node *rb_first_postorder(const struct rb_root *root)
+{
+ if (!root->rb_node)
+ return NULL;
+
+ return rb_left_deepest_node(root->rb_node);
+}
diff --git a/tools/perf/MANIFEST b/tools/perf/MANIFEST
index fe50a1b..09dc0aa 100644
--- a/tools/perf/MANIFEST
+++ b/tools/perf/MANIFEST
@@ -18,6 +18,7 @@
tools/arch/x86/include/asm/rmwcc.h
tools/lib/traceevent
tools/lib/api
+tools/lib/rbtree.c
tools/lib/symbol/kallsyms.c
tools/lib/symbol/kallsyms.h
tools/lib/util/find_next_bit.c
@@ -44,6 +45,8 @@
tools/include/linux/list.h
tools/include/linux/log2.h
tools/include/linux/poison.h
+tools/include/linux/rbtree.h
+tools/include/linux/rbtree_augmented.h
tools/include/linux/types.h
include/asm-generic/bitops/arch_hweight.h
include/asm-generic/bitops/const_hweight.h
@@ -51,12 +54,10 @@
include/asm-generic/bitops/__fls.h
include/asm-generic/bitops/fls.h
include/linux/perf_event.h
-include/linux/rbtree.h
include/linux/list.h
include/linux/hash.h
include/linux/stringify.h
lib/hweight.c
-lib/rbtree.c
include/linux/swab.h
arch/*/include/asm/unistd*.h
arch/*/include/uapi/asm/unistd*.h
@@ -65,7 +66,6 @@
arch/*/lib/memset*.S
include/linux/poison.h
include/linux/hw_breakpoint.h
-include/linux/rbtree_augmented.h
include/uapi/linux/perf_event.h
include/uapi/linux/const.h
include/uapi/linux/swab.h
diff --git a/tools/perf/util/Build b/tools/perf/util/Build
index 586a59d..601d114 100644
--- a/tools/perf/util/Build
+++ b/tools/perf/util/Build
@@ -139,7 +139,7 @@
$(call rule_mkdir)
$(call if_changed_dep,cc_o_c)
-$(OUTPUT)util/rbtree.o: ../../lib/rbtree.c FORCE
+$(OUTPUT)util/rbtree.o: ../lib/rbtree.c FORCE
$(call rule_mkdir)
$(call if_changed_dep,cc_o_c)
diff --git a/tools/perf/util/include/linux/rbtree.h b/tools/perf/util/include/linux/rbtree.h
deleted file mode 100644
index f06d89f..0000000
--- a/tools/perf/util/include/linux/rbtree.h
+++ /dev/null
@@ -1,16 +0,0 @@
-#ifndef __TOOLS_LINUX_PERF_RBTREE_H
-#define __TOOLS_LINUX_PERF_RBTREE_H
-#include <stdbool.h>
-#include "../../../../include/linux/rbtree.h"
-
-/*
- * Handy for checking that we are not deleting an entry that is
- * already in a list, found in block/{blk-throttle,cfq-iosched}.c,
- * probably should be moved to lib/rbtree.c...
- */
-static inline void rb_erase_init(struct rb_node *n, struct rb_root *root)
-{
- rb_erase(n, root);
- RB_CLEAR_NODE(n);
-}
-#endif /* __TOOLS_LINUX_PERF_RBTREE_H */
diff --git a/tools/perf/util/include/linux/rbtree_augmented.h b/tools/perf/util/include/linux/rbtree_augmented.h
deleted file mode 100644
index 9d6fcdf..0000000
--- a/tools/perf/util/include/linux/rbtree_augmented.h
+++ /dev/null
@@ -1,2 +0,0 @@
-#include <stdbool.h>
-#include "../../../../include/linux/rbtree_augmented.h"
diff --git a/tools/testing/nvdimm/Kbuild b/tools/testing/nvdimm/Kbuild
index 8e9b645..f56914c 100644
--- a/tools/testing/nvdimm/Kbuild
+++ b/tools/testing/nvdimm/Kbuild
@@ -1,3 +1,6 @@
+ldflags-y += --wrap=ioremap_wt
+ldflags-y += --wrap=ioremap_wc
+ldflags-y += --wrap=devm_ioremap_nocache
ldflags-y += --wrap=ioremap_cache
ldflags-y += --wrap=ioremap_nocache
ldflags-y += --wrap=iounmap
diff --git a/tools/testing/nvdimm/test/iomap.c b/tools/testing/nvdimm/test/iomap.c
index c85a6f6..64bfaa5 100644
--- a/tools/testing/nvdimm/test/iomap.c
+++ b/tools/testing/nvdimm/test/iomap.c
@@ -65,6 +65,21 @@
return fallback_fn(offset, size);
}
+void __iomem *__wrap_devm_ioremap_nocache(struct device *dev,
+ resource_size_t offset, unsigned long size)
+{
+ struct nfit_test_resource *nfit_res;
+
+ rcu_read_lock();
+ nfit_res = get_nfit_res(offset);
+ rcu_read_unlock();
+ if (nfit_res)
+ return (void __iomem *) nfit_res->buf + offset
+ - nfit_res->res->start;
+ return devm_ioremap_nocache(dev, offset, size);
+}
+EXPORT_SYMBOL(__wrap_devm_ioremap_nocache);
+
void __iomem *__wrap_ioremap_cache(resource_size_t offset, unsigned long size)
{
return __nfit_test_ioremap(offset, size, ioremap_cache);
@@ -77,6 +92,18 @@
}
EXPORT_SYMBOL(__wrap_ioremap_nocache);
+void __iomem *__wrap_ioremap_wt(resource_size_t offset, unsigned long size)
+{
+ return __nfit_test_ioremap(offset, size, ioremap_wt);
+}
+EXPORT_SYMBOL(__wrap_ioremap_wt);
+
+void __iomem *__wrap_ioremap_wc(resource_size_t offset, unsigned long size)
+{
+ return __nfit_test_ioremap(offset, size, ioremap_wc);
+}
+EXPORT_SYMBOL(__wrap_ioremap_wc);
+
void __wrap_iounmap(volatile void __iomem *addr)
{
struct nfit_test_resource *nfit_res;
diff --git a/tools/testing/nvdimm/test/nfit.c b/tools/testing/nvdimm/test/nfit.c
index 4b69b83..d0bdae4 100644
--- a/tools/testing/nvdimm/test/nfit.c
+++ b/tools/testing/nvdimm/test/nfit.c
@@ -128,6 +128,8 @@
int num_pm;
void **dimm;
dma_addr_t *dimm_dma;
+ void **flush;
+ dma_addr_t *flush_dma;
void **label;
dma_addr_t *label_dma;
void **spa_set;
@@ -155,7 +157,7 @@
int i, rc;
if (!nfit_mem || !test_bit(cmd, &nfit_mem->dsm_mask))
- return -ENXIO;
+ return -ENOTTY;
/* lookup label space for the given dimm */
for (i = 0; i < ARRAY_SIZE(handle); i++)
@@ -331,7 +333,8 @@
+ sizeof(struct acpi_nfit_system_address) * NUM_SPA
+ sizeof(struct acpi_nfit_memory_map) * NUM_MEM
+ sizeof(struct acpi_nfit_control_region) * NUM_DCR
- + sizeof(struct acpi_nfit_data_region) * NUM_BDW;
+ + sizeof(struct acpi_nfit_data_region) * NUM_BDW
+ + sizeof(struct acpi_nfit_flush_address) * NUM_DCR;
int i;
t->nfit_buf = test_alloc(t, nfit_size, &t->nfit_dma);
@@ -356,6 +359,10 @@
if (!t->label[i])
return -ENOMEM;
sprintf(t->label[i], "label%d", i);
+
+ t->flush[i] = test_alloc(t, 8, &t->flush_dma[i]);
+ if (!t->flush[i])
+ return -ENOMEM;
}
for (i = 0; i < NUM_DCR; i++) {
@@ -408,6 +415,7 @@
struct acpi_nfit_system_address *spa;
struct acpi_nfit_control_region *dcr;
struct acpi_nfit_data_region *bdw;
+ struct acpi_nfit_flush_address *flush;
unsigned int offset;
nfit_test_init_header(nfit_buf, size);
@@ -831,6 +839,39 @@
bdw->capacity = DIMM_SIZE;
bdw->start_address = 0;
+ offset = offset + sizeof(struct acpi_nfit_data_region) * 4;
+ /* flush0 (dimm0) */
+ flush = nfit_buf + offset;
+ flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
+ flush->header.length = sizeof(struct acpi_nfit_flush_address);
+ flush->device_handle = handle[0];
+ flush->hint_count = 1;
+ flush->hint_address[0] = t->flush_dma[0];
+
+ /* flush1 (dimm1) */
+ flush = nfit_buf + offset + sizeof(struct acpi_nfit_flush_address) * 1;
+ flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
+ flush->header.length = sizeof(struct acpi_nfit_flush_address);
+ flush->device_handle = handle[1];
+ flush->hint_count = 1;
+ flush->hint_address[0] = t->flush_dma[1];
+
+ /* flush2 (dimm2) */
+ flush = nfit_buf + offset + sizeof(struct acpi_nfit_flush_address) * 2;
+ flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
+ flush->header.length = sizeof(struct acpi_nfit_flush_address);
+ flush->device_handle = handle[2];
+ flush->hint_count = 1;
+ flush->hint_address[0] = t->flush_dma[2];
+
+ /* flush3 (dimm3) */
+ flush = nfit_buf + offset + sizeof(struct acpi_nfit_flush_address) * 3;
+ flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
+ flush->header.length = sizeof(struct acpi_nfit_flush_address);
+ flush->device_handle = handle[3];
+ flush->hint_count = 1;
+ flush->hint_address[0] = t->flush_dma[3];
+
acpi_desc = &t->acpi_desc;
set_bit(ND_CMD_GET_CONFIG_SIZE, &acpi_desc->dimm_dsm_force_en);
set_bit(ND_CMD_GET_CONFIG_DATA, &acpi_desc->dimm_dsm_force_en);
@@ -933,6 +974,10 @@
GFP_KERNEL);
nfit_test->dimm_dma = devm_kcalloc(dev, num, sizeof(dma_addr_t),
GFP_KERNEL);
+ nfit_test->flush = devm_kcalloc(dev, num, sizeof(void *),
+ GFP_KERNEL);
+ nfit_test->flush_dma = devm_kcalloc(dev, num, sizeof(dma_addr_t),
+ GFP_KERNEL);
nfit_test->label = devm_kcalloc(dev, num, sizeof(void *),
GFP_KERNEL);
nfit_test->label_dma = devm_kcalloc(dev, num,
@@ -943,7 +988,8 @@
sizeof(dma_addr_t), GFP_KERNEL);
if (nfit_test->dimm && nfit_test->dimm_dma && nfit_test->label
&& nfit_test->label_dma && nfit_test->dcr
- && nfit_test->dcr_dma)
+ && nfit_test->dcr_dma && nfit_test->flush
+ && nfit_test->flush_dma)
/* pass */;
else
return -ENOMEM;