Merge tag 'imx-drm-next-2015-01-09' of git://git.pengutronix.de/git/pza/linux into drm-next
imx-drm mode fixup support, imx-hdmi bridge conversion and imx-drm cleanup
- Implement mode_fixup for a DI vertical timing limitation
- Use generic DRM OF helpers in DRM core
- Convert imx-hdmi to dw_hdmi drm_bridge and add rockchip
driver
- Add DC use counter to fix multi-display support
- Simplify handling of DI clock flags
- A few small fixes and cleanup
* tag 'imx-drm-next-2015-01-09' of git://git.pengutronix.de/git/pza/linux: (26 commits)
imx-drm: core: handling of DI clock flags to ipu_crtc_mode_set()
gpu: ipu-di: Switch to DIV_ROUND_CLOSEST for DI clock divider calc
gpu: ipu-v3: Use videomode in struct ipu_di_signal_cfg
imx-drm: encoder prepare/mode_set must use adjusted mode
imx-drm: ipuv3-crtc: Implement mode_fixup
drm_modes: add drm_display_mode_to_videomode
gpu: ipu-di: remove some non-functional code
gpu: ipu-di: Add ipu_di_adjust_videomode()
drm: rockchip: export functions needed by rockchip dw_hdmi bridge driver
drm: bridge/dw_hdmi: request interrupt only after initializing the mutes
drm: bridge/dw_hdmi: add rockchip rk3288 support
dt-bindings: Add documentation for rockchip dw hdmi
drm: bridge/dw_hdmi: add function dw_hdmi_phy_enable_spare
drm: bridge/dw_hdmi: clear i2cmphy_stat0 reg in hdmi_phy_wait_i2c_done
drm: bridge/dw_hdmi: add mode_valid support
drm: bridge/dw_hdmi: add support for multi-byte register width access
dt-bindings: add document for dw_hdmi
drm: imx: imx-hdmi: move imx-hdmi to bridge/dw_hdmi
drm: imx: imx-hdmi: split phy configuration to platform driver
drm: imx: imx-hdmi: convert imx-hdmi to drm_bridge mode
...
diff --git a/Documentation/DocBook/drm.tmpl b/Documentation/DocBook/drm.tmpl
index 4b592ff..3b2571e 100644
--- a/Documentation/DocBook/drm.tmpl
+++ b/Documentation/DocBook/drm.tmpl
@@ -239,6 +239,14 @@
Driver supports dedicated render nodes.
</para></listitem>
</varlistentry>
+ <varlistentry>
+ <term>DRIVER_ATOMIC</term>
+ <listitem><para>
+ Driver supports atomic properties. In this case the driver
+ must implement appropriate obj->atomic_get_property() vfuncs
+ for any modeset objects with driver specific properties.
+ </para></listitem>
+ </varlistentry>
</variablelist>
</sect3>
<sect3>
@@ -1377,7 +1385,7 @@
<itemizedlist>
<listitem>
DRM_PLANE_TYPE_PRIMARY represents a "main" plane for a CRTC. Primary
- planes are the planes operated upon by by CRTC modesetting and flipping
+ planes are the planes operated upon by CRTC modesetting and flipping
operations described in <xref linkend="drm-kms-crtcops"/>.
</listitem>
<listitem>
@@ -2362,6 +2370,7 @@
</sect2>
<sect2>
<title>Modeset Helper Functions Reference</title>
+!Iinclude/drm/drm_crtc_helper.h
!Edrivers/gpu/drm/drm_crtc_helper.c
!Pdrivers/gpu/drm/drm_crtc_helper.c overview
</sect2>
@@ -2564,8 +2573,8 @@
<td valign="top" >Description/Restrictions</td>
</tr>
<tr>
- <td rowspan="25" valign="top" >DRM</td>
- <td rowspan="4" valign="top" >Generic</td>
+ <td rowspan="36" valign="top" >DRM</td>
+ <td rowspan="5" valign="top" >Connector</td>
<td valign="top" >“EDID”</td>
<td valign="top" >BLOB | IMMUTABLE</td>
<td valign="top" >0</td>
@@ -2594,7 +2603,14 @@
<td valign="top" >Contains tiling information for a connector.</td>
</tr>
<tr>
- <td rowspan="1" valign="top" >Plane</td>
+ <td valign="top" >“CRTC_ID”</td>
+ <td valign="top" >OBJECT</td>
+ <td valign="top" >DRM_MODE_OBJECT_CRTC</td>
+ <td valign="top" >Connector</td>
+ <td valign="top" >CRTC that connector is attached to (atomic)</td>
+ </tr>
+ <tr>
+ <td rowspan="11" valign="top" >Plane</td>
<td valign="top" >“type”</td>
<td valign="top" >ENUM | IMMUTABLE</td>
<td valign="top" >{ "Overlay", "Primary", "Cursor" }</td>
@@ -2602,6 +2618,76 @@
<td valign="top" >Plane type</td>
</tr>
<tr>
+ <td valign="top" >“SRC_X”</td>
+ <td valign="top" >RANGE</td>
+ <td valign="top" >Min=0, Max=UINT_MAX</td>
+ <td valign="top" >Plane</td>
+ <td valign="top" >Scanout source x coordinate in 16.16 fixed point (atomic)</td>
+ </tr>
+ <tr>
+ <td valign="top" >“SRC_Y”</td>
+ <td valign="top" >RANGE</td>
+ <td valign="top" >Min=0, Max=UINT_MAX</td>
+ <td valign="top" >Plane</td>
+ <td valign="top" >Scanout source y coordinate in 16.16 fixed point (atomic)</td>
+ </tr>
+ <tr>
+ <td valign="top" >“SRC_W”</td>
+ <td valign="top" >RANGE</td>
+ <td valign="top" >Min=0, Max=UINT_MAX</td>
+ <td valign="top" >Plane</td>
+ <td valign="top" >Scanout source width in 16.16 fixed point (atomic)</td>
+ </tr>
+ <tr>
+ <td valign="top" >“SRC_H”</td>
+ <td valign="top" >RANGE</td>
+ <td valign="top" >Min=0, Max=UINT_MAX</td>
+ <td valign="top" >Plane</td>
+ <td valign="top" >Scanout source height in 16.16 fixed point (atomic)</td>
+ </tr>
+ <tr>
+ <td valign="top" >“CRTC_X”</td>
+ <td valign="top" >SIGNED_RANGE</td>
+ <td valign="top" >Min=INT_MIN, Max=INT_MAX</td>
+ <td valign="top" >Plane</td>
+ <td valign="top" >Scanout CRTC (destination) x coordinate (atomic)</td>
+ </tr>
+ <tr>
+ <td valign="top" >“CRTC_Y”</td>
+ <td valign="top" >SIGNED_RANGE</td>
+ <td valign="top" >Min=INT_MIN, Max=INT_MAX</td>
+ <td valign="top" >Plane</td>
+ <td valign="top" >Scanout CRTC (destination) y coordinate (atomic)</td>
+ </tr>
+ <tr>
+ <td valign="top" >“CRTC_W”</td>
+ <td valign="top" >RANGE</td>
+ <td valign="top" >Min=0, Max=UINT_MAX</td>
+ <td valign="top" >Plane</td>
+ <td valign="top" >Scanout CRTC (destination) width (atomic)</td>
+ </tr>
+ <tr>
+ <td valign="top" >“CRTC_H”</td>
+ <td valign="top" >RANGE</td>
+ <td valign="top" >Min=0, Max=UINT_MAX</td>
+ <td valign="top" >Plane</td>
+ <td valign="top" >Scanout CRTC (destination) height (atomic)</td>
+ </tr>
+ <tr>
+ <td valign="top" >“FB_ID”</td>
+ <td valign="top" >OBJECT</td>
+ <td valign="top" >DRM_MODE_OBJECT_FB</td>
+ <td valign="top" >Plane</td>
+ <td valign="top" >Scanout framebuffer (atomic)</td>
+ </tr>
+ <tr>
+ <td valign="top" >“CRTC_ID”</td>
+ <td valign="top" >OBJECT</td>
+ <td valign="top" >DRM_MODE_OBJECT_CRTC</td>
+ <td valign="top" >Plane</td>
+ <td valign="top" >CRTC that plane is attached to (atomic)</td>
+ </tr>
+ <tr>
<td rowspan="2" valign="top" >DVI-I</td>
<td valign="top" >“subconnector”</td>
<td valign="top" >ENUM</td>
@@ -3951,6 +4037,11 @@
!Idrivers/gpu/drm/i915/intel_psr.c
</sect2>
<sect2>
+ <title>Frame Buffer Compression (FBC)</title>
+!Pdrivers/gpu/drm/i915/intel_fbc.c Frame Buffer Compression (FBC)
+!Idrivers/gpu/drm/i915/intel_fbc.c
+ </sect2>
+ <sect2>
<title>DPIO</title>
!Pdrivers/gpu/drm/i915/i915_reg.h DPIO
<table id="dpiox2">
@@ -4054,10 +4145,20 @@
!Idrivers/gpu/drm/i915/i915_cmd_parser.c
</sect2>
<sect2>
+ <title>Batchbuffer Pools</title>
+!Pdrivers/gpu/drm/i915/i915_gem_batch_pool.c batch pool
+!Idrivers/gpu/drm/i915/i915_gem_batch_pool.c
+ </sect2>
+ <sect2>
<title>Logical Rings, Logical Ring Contexts and Execlists</title>
!Pdrivers/gpu/drm/i915/intel_lrc.c Logical Rings, Logical Ring Contexts and Execlists
!Idrivers/gpu/drm/i915/intel_lrc.c
</sect2>
+ <sect2>
+ <title>Global GTT views</title>
+!Pdrivers/gpu/drm/i915/i915_gem_gtt.c Global GTT views
+!Idrivers/gpu/drm/i915/i915_gem_gtt.c
+ </sect2>
</sect1>
<sect1>
diff --git a/Documentation/devicetree/bindings/input/gpio-keys.txt b/Documentation/devicetree/bindings/input/gpio-keys.txt
index a4a38fc..44b7057 100644
--- a/Documentation/devicetree/bindings/input/gpio-keys.txt
+++ b/Documentation/devicetree/bindings/input/gpio-keys.txt
@@ -10,12 +10,13 @@
Each button (key) is represented as a sub-node of "gpio-keys":
Subnode properties:
+ - gpios: OF device-tree gpio specification.
+ - interrupts: the interrupt line for that input.
- label: Descriptive name of the key.
- linux,code: Keycode to emit.
-Required mutual exclusive subnode-properties:
- - gpios: OF device-tree gpio specification.
- - interrupts: the interrupt line for that input
+Note that either "interrupts" or "gpios" properties can be omitted, but not
+both at the same time. Specifying both properties is allowed.
Optional subnode-properties:
- linux,input-type: Specify event type this button/key generates.
@@ -23,6 +24,9 @@
- debounce-interval: Debouncing interval time in milliseconds.
If not specified defaults to 5.
- gpio-key,wakeup: Boolean, button can wake-up the system.
+ - linux,can-disable: Boolean, indicates that button is connected
+ to dedicated (not shared) interrupt which can be disabled to
+ suppress events from the button.
Example nodes:
diff --git a/Documentation/devicetree/bindings/input/stmpe-keypad.txt b/Documentation/devicetree/bindings/input/stmpe-keypad.txt
index 1b97222..12bb771 100644
--- a/Documentation/devicetree/bindings/input/stmpe-keypad.txt
+++ b/Documentation/devicetree/bindings/input/stmpe-keypad.txt
@@ -8,6 +8,8 @@
- debounce-interval : Debouncing interval time in milliseconds
- st,scan-count : Scanning cycles elapsed before key data is updated
- st,no-autorepeat : If specified device will not autorepeat
+ - keypad,num-rows : See ./matrix-keymap.txt
+ - keypad,num-columns : See ./matrix-keymap.txt
Example:
diff --git a/Documentation/devicetree/bindings/video/renesas,du.txt b/Documentation/devicetree/bindings/video/renesas,du.txt
index 5102830..c902323 100644
--- a/Documentation/devicetree/bindings/video/renesas,du.txt
+++ b/Documentation/devicetree/bindings/video/renesas,du.txt
@@ -26,6 +26,10 @@
per LVDS encoder. The functional clocks must be named "du.x" with "x"
being the channel numerical index. The LVDS clocks must be named
"lvds.x" with "x" being the LVDS encoder numerical index.
+ - In addition to the functional and encoder clocks, all DU versions also
+ support externally supplied pixel clocks. Those clocks are optional.
+ When supplied they must be named "dclkin.x" with "x" being the input
+ clock numerical index.
Required nodes:
diff --git a/Makefile b/Makefile
index b1c3254..eb4eca5 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 19
SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc3
NAME = Diseased Newt
# *DOCUMENTATION*
diff --git a/arch/arm/boot/dts/armada-370-db.dts b/arch/arm/boot/dts/armada-370-db.dts
index 1466580..70b1943 100644
--- a/arch/arm/boot/dts/armada-370-db.dts
+++ b/arch/arm/boot/dts/armada-370-db.dts
@@ -203,27 +203,3 @@
compatible = "linux,spdif-dir";
};
};
-
-&pinctrl {
- /*
- * These pins might be muxed as I2S by
- * the bootloader, but it conflicts
- * with the real I2S pins that are
- * muxed using i2s_pins. We must mux
- * those pins to a function other than
- * I2S.
- */
- pinctrl-0 = <&hog_pins1 &hog_pins2>;
- pinctrl-names = "default";
-
- hog_pins1: hog-pins1 {
- marvell,pins = "mpp6", "mpp8", "mpp10",
- "mpp12", "mpp13";
- marvell,function = "gpio";
- };
-
- hog_pins2: hog-pins2 {
- marvell,pins = "mpp5", "mpp7", "mpp9";
- marvell,function = "gpo";
- };
-};
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index 2328fe7..bc393b7e 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -338,6 +338,7 @@
CONFIG_USB_XHCI_HCD=y
CONFIG_USB_XHCI_MVEBU=y
CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_EXYNOS=y
CONFIG_USB_EHCI_TEGRA=y
CONFIG_USB_EHCI_HCD_STI=y
CONFIG_USB_EHCI_HCD_PLATFORM=y
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index f9c8639..715ae19 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -1046,6 +1046,15 @@
seq_printf(m, "model name\t: %s rev %d (%s)\n",
cpu_name, cpuid & 15, elf_platform);
+#if defined(CONFIG_SMP)
+ seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
+ per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
+ (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
+#else
+ seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
+ loops_per_jiffy / (500000/HZ),
+ (loops_per_jiffy / (5000/HZ)) % 100);
+#endif
/* dump out the processor features */
seq_puts(m, "Features\t: ");
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 5e6052e..86ef244 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -387,6 +387,18 @@
void __init smp_cpus_done(unsigned int max_cpus)
{
+ int cpu;
+ unsigned long bogosum = 0;
+
+ for_each_online_cpu(cpu)
+ bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy;
+
+ printk(KERN_INFO "SMP: Total of %d processors activated "
+ "(%lu.%02lu BogoMIPS).\n",
+ num_online_cpus(),
+ bogosum / (500000/HZ),
+ (bogosum / (5000/HZ)) % 100);
+
hyp_mode_check();
}
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index dd301be..5376d90 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -1,6 +1,7 @@
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
CONFIG_AUDIT=y
CONFIG_NO_HZ_IDLE=y
CONFIG_HIGH_RES_TIMERS=y
@@ -13,14 +14,12 @@
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_RESOURCE_COUNTERS=y
CONFIG_MEMCG=y
CONFIG_MEMCG_SWAP=y
CONFIG_MEMCG_KMEM=y
CONFIG_CGROUP_HUGETLB=y
# CONFIG_UTS_NS is not set
# CONFIG_IPC_NS is not set
-# CONFIG_PID_NS is not set
# CONFIG_NET_NS is not set
CONFIG_SCHED_AUTOGROUP=y
CONFIG_BLK_DEV_INITRD=y
@@ -92,7 +91,6 @@
CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_VIRTIO_CONSOLE=y
# CONFIG_HW_RANDOM is not set
-# CONFIG_HMC_DRV is not set
CONFIG_SPI=y
CONFIG_SPI_PL022=y
CONFIG_GPIO_PL061=y
@@ -133,6 +131,8 @@
CONFIG_EXT4_FS=y
CONFIG_FANOTIFY=y
CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
+CONFIG_QUOTA=y
+CONFIG_AUTOFS4_FS=y
CONFIG_FUSE_FS=y
CONFIG_CUSE=y
CONFIG_VFAT_FS=y
@@ -152,14 +152,15 @@
CONFIG_DEBUG_KERNEL=y
CONFIG_LOCKUP_DETECTOR=y
# CONFIG_SCHED_DEBUG is not set
+# CONFIG_DEBUG_PREEMPT is not set
# CONFIG_FTRACE is not set
+CONFIG_KEYS=y
CONFIG_SECURITY=y
CONFIG_CRYPTO_ANSI_CPRNG=y
CONFIG_ARM64_CRYPTO=y
CONFIG_CRYPTO_SHA1_ARM64_CE=y
CONFIG_CRYPTO_SHA2_ARM64_CE=y
CONFIG_CRYPTO_GHASH_ARM64_CE=y
-CONFIG_CRYPTO_AES_ARM64_CE=y
CONFIG_CRYPTO_AES_ARM64_CE_CCM=y
CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y
diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h
index d34189b..9ce3e68 100644
--- a/arch/arm64/include/asm/dma-mapping.h
+++ b/arch/arm64/include/asm/dma-mapping.h
@@ -52,13 +52,14 @@
dev->archdata.dma_ops = ops;
}
-static inline int set_arch_dma_coherent_ops(struct device *dev)
+static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
+ struct iommu_ops *iommu, bool coherent)
{
- dev->archdata.dma_coherent = true;
- set_dma_ops(dev, &coherent_swiotlb_dma_ops);
- return 0;
+ dev->archdata.dma_coherent = coherent;
+ if (coherent)
+ set_dma_ops(dev, &coherent_swiotlb_dma_ops);
}
-#define set_arch_dma_coherent_ops set_arch_dma_coherent_ops
+#define arch_setup_dma_ops arch_setup_dma_ops
/* do not use this function in a driver */
static inline bool is_device_dma_coherent(struct device *dev)
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index df22314..210d632 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -298,7 +298,6 @@
#define pfn_pmd(pfn,prot) (__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
#define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot)
-#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
#define pud_write(pud) pte_write(pud_pte(pud))
#define pud_pfn(pud) (((pud_val(pud) & PUD_MASK) & PHYS_MASK) >> PAGE_SHIFT)
@@ -401,7 +400,7 @@
return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(addr);
}
-#define pud_page(pud) pmd_page(pud_pmd(pud))
+#define pud_page(pud) pfn_to_page(__phys_to_pfn(pud_val(pud) & PHYS_MASK))
#endif /* CONFIG_ARM64_PGTABLE_LEVELS > 2 */
@@ -437,6 +436,8 @@
return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(addr);
}
+#define pgd_page(pgd) pfn_to_page(__phys_to_pfn(pgd_val(pgd) & PHYS_MASK))
+
#endif /* CONFIG_ARM64_PGTABLE_LEVELS > 3 */
#define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd))
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
index 3771b72..2d6b606 100644
--- a/arch/arm64/kernel/suspend.c
+++ b/arch/arm64/kernel/suspend.c
@@ -5,6 +5,7 @@
#include <asm/debug-monitors.h>
#include <asm/pgtable.h>
#include <asm/memory.h>
+#include <asm/mmu_context.h>
#include <asm/smp_plat.h>
#include <asm/suspend.h>
#include <asm/tlbflush.h>
@@ -98,7 +99,18 @@
*/
ret = __cpu_suspend_enter(arg, fn);
if (ret == 0) {
- cpu_switch_mm(mm->pgd, mm);
+ /*
+ * We are resuming from reset with TTBR0_EL1 set to the
+ * idmap to enable the MMU; restore the active_mm mappings in
+ * TTBR0_EL1 unless the active_mm == &init_mm, in which case
+ * the thread entered __cpu_suspend with TTBR0_EL1 set to
+ * reserved TTBR0 page tables and should be restored as such.
+ */
+ if (mm == &init_mm)
+ cpu_set_reserved_ttbr0();
+ else
+ cpu_switch_mm(mm->pgd, mm);
+
flush_tlb_all();
/*
diff --git a/arch/ia64/include/asm/unistd.h b/arch/ia64/include/asm/unistd.h
index f3b51b5..95c39b9 100644
--- a/arch/ia64/include/asm/unistd.h
+++ b/arch/ia64/include/asm/unistd.h
@@ -11,7 +11,7 @@
-#define NR_syscalls 318 /* length of syscall table */
+#define NR_syscalls 319 /* length of syscall table */
/*
* The following defines stop scripts/checksyscalls.sh from complaining about
diff --git a/arch/ia64/include/uapi/asm/unistd.h b/arch/ia64/include/uapi/asm/unistd.h
index 4c2240c..4610795 100644
--- a/arch/ia64/include/uapi/asm/unistd.h
+++ b/arch/ia64/include/uapi/asm/unistd.h
@@ -331,5 +331,6 @@
#define __NR_getrandom 1339
#define __NR_memfd_create 1340
#define __NR_bpf 1341
+#define __NR_execveat 1342
#endif /* _UAPI_ASM_IA64_UNISTD_H */
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index f5e96df..fcf8b8c 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -1779,6 +1779,7 @@
data8 sys_getrandom
data8 sys_memfd_create // 1340
data8 sys_bpf
+ data8 sys_execveat
.org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
#endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */
diff --git a/arch/nios2/kernel/cpuinfo.c b/arch/nios2/kernel/cpuinfo.c
index 51d5bb9..a223691d 100644
--- a/arch/nios2/kernel/cpuinfo.c
+++ b/arch/nios2/kernel/cpuinfo.c
@@ -72,6 +72,7 @@
cpuinfo.has_div = fcpu_has(cpu, "altr,has-div");
cpuinfo.has_mul = fcpu_has(cpu, "altr,has-mul");
cpuinfo.has_mulx = fcpu_has(cpu, "altr,has-mulx");
+ cpuinfo.mmu = fcpu_has(cpu, "altr,has-mmu");
if (IS_ENABLED(CONFIG_NIOS2_HW_DIV_SUPPORT) && !cpuinfo.has_div)
err_cpu("DIV");
diff --git a/arch/nios2/kernel/entry.S b/arch/nios2/kernel/entry.S
index 83bca17..0bdfd13 100644
--- a/arch/nios2/kernel/entry.S
+++ b/arch/nios2/kernel/entry.S
@@ -365,30 +365,14 @@
GET_THREAD_INFO r1
ldw r4, TI_PREEMPT_COUNT(r1)
bne r4, r0, restore_all
-
-need_resched:
ldw r4, TI_FLAGS(r1) /* ? Need resched set */
BTBZ r10, r4, TIF_NEED_RESCHED, restore_all
ldw r4, PT_ESTATUS(sp) /* ? Interrupts off */
andi r10, r4, ESTATUS_EPIE
beq r10, r0, restore_all
- movia r4, PREEMPT_ACTIVE
- stw r4, TI_PREEMPT_COUNT(r1)
- rdctl r10, status /* enable intrs again */
- ori r10, r10 ,STATUS_PIE
- wrctl status, r10
- PUSH r1
- call schedule
- POP r1
- mov r4, r0
- stw r4, TI_PREEMPT_COUNT(r1)
- rdctl r10, status /* disable intrs */
- andi r10, r10, %lo(~STATUS_PIE)
- wrctl status, r10
- br need_resched
-#else
- br restore_all
+ call preempt_schedule_irq
#endif
+ br restore_all
/***********************************************************************
* A few syscall wrappers
diff --git a/arch/parisc/include/asm/ldcw.h b/arch/parisc/include/asm/ldcw.h
index d2d11b7..8121aa6 100644
--- a/arch/parisc/include/asm/ldcw.h
+++ b/arch/parisc/include/asm/ldcw.h
@@ -33,11 +33,18 @@
#endif /*!CONFIG_PA20*/
-/* LDCW, the only atomic read-write operation PA-RISC has. *sigh*. */
+/* LDCW, the only atomic read-write operation PA-RISC has. *sigh*.
+ We don't explicitly expose that "*a" may be written as reload
+ fails to find a register in class R1_REGS when "a" needs to be
+ reloaded when generating 64-bit PIC code. Instead, we clobber
+ memory to indicate to the compiler that the assembly code reads
+ or writes to items other than those listed in the input and output
+ operands. This may pessimize the code somewhat but __ldcw is
+ usually used within code blocks surrounded by memory barriors. */
#define __ldcw(a) ({ \
unsigned __ret; \
- __asm__ __volatile__(__LDCW " 0(%2),%0" \
- : "=r" (__ret), "+m" (*(a)) : "r" (a)); \
+ __asm__ __volatile__(__LDCW " 0(%1),%0" \
+ : "=r" (__ret) : "r" (a) : "memory"); \
__ret; \
})
diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h
index 19c36cb..a46f5f4 100644
--- a/arch/powerpc/include/asm/kexec.h
+++ b/arch/powerpc/include/asm/kexec.h
@@ -86,6 +86,11 @@
extern void reserve_crashkernel(void);
extern void machine_kexec_mask_interrupts(void);
+static inline bool kdump_in_progress(void)
+{
+ return crashing_cpu >= 0;
+}
+
#else /* !CONFIG_KEXEC */
static inline void crash_kexec_secondary(struct pt_regs *regs) { }
@@ -106,6 +111,11 @@
return 0;
}
+static inline bool kdump_in_progress(void)
+{
+ return false;
+}
+
#endif /* CONFIG_KEXEC */
#endif /* ! __ASSEMBLY__ */
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
index ce9577d..91062ee 100644
--- a/arch/powerpc/include/asm/systbl.h
+++ b/arch/powerpc/include/asm/systbl.h
@@ -366,3 +366,4 @@
SYSCALL_SPU(getrandom)
SYSCALL_SPU(memfd_create)
SYSCALL_SPU(bpf)
+COMPAT_SYS(execveat)
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h
index e0da021..36b79c3 100644
--- a/arch/powerpc/include/asm/unistd.h
+++ b/arch/powerpc/include/asm/unistd.h
@@ -12,7 +12,7 @@
#include <uapi/asm/unistd.h>
-#define __NR_syscalls 362
+#define __NR_syscalls 363
#define __NR__exit __NR_exit
#define NR_syscalls __NR_syscalls
diff --git a/arch/powerpc/include/uapi/asm/unistd.h b/arch/powerpc/include/uapi/asm/unistd.h
index f55351f..ef5b5b1 100644
--- a/arch/powerpc/include/uapi/asm/unistd.h
+++ b/arch/powerpc/include/uapi/asm/unistd.h
@@ -384,5 +384,6 @@
#define __NR_getrandom 359
#define __NR_memfd_create 360
#define __NR_bpf 361
+#define __NR_execveat 362
#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c
index 879b3aa..f96d1ec 100644
--- a/arch/powerpc/kernel/machine_kexec_64.c
+++ b/arch/powerpc/kernel/machine_kexec_64.c
@@ -330,7 +330,7 @@
* using debugger IPI.
*/
- if (crashing_cpu == -1)
+ if (!kdump_in_progress())
kexec_prepare_cpus();
pr_debug("kexec: Starting switchover sequence.\n");
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 8ec017c..8b2d2dc 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -700,6 +700,7 @@
smp_store_cpu_info(cpu);
set_dec(tb_ticks_per_jiffy);
preempt_disable();
+ cpu_callin_map[cpu] = 1;
if (smp_ops->setup_cpu)
smp_ops->setup_cpu(cpu);
@@ -738,14 +739,6 @@
notify_cpu_starting(cpu);
set_cpu_online(cpu, true);
- /*
- * CPU must be marked active and online before we signal back to the
- * master, because the scheduler needs to see the cpu_online and
- * cpu_active bits set.
- */
- smp_wmb();
- cpu_callin_map[cpu] = 1;
-
local_irq_enable();
cpu_startup_entry(CPUHP_ONLINE);
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 469751d..b5682fd 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -43,6 +43,7 @@
#include <asm/trace.h>
#include <asm/firmware.h>
#include <asm/plpar_wrappers.h>
+#include <asm/kexec.h>
#include <asm/fadump.h>
#include "pseries.h"
@@ -267,8 +268,13 @@
* out to the user, but at least this will stop us from
* continuing on further and creating an even more
* difficult to debug situation.
+ *
+ * There is a known problem when kdump'ing, if cpus are offline
+ * the above call will fail. Rather than panicking again, keep
+ * going and hope the kdump kernel is also little endian, which
+ * it usually is.
*/
- if (rc)
+ if (rc && !kdump_in_progress())
panic("Could not enable big endian exceptions");
}
#endif
diff --git a/arch/um/Kconfig.common b/arch/um/Kconfig.common
index 87bc868..d195a87 100644
--- a/arch/um/Kconfig.common
+++ b/arch/um/Kconfig.common
@@ -3,6 +3,7 @@
default y
select HAVE_ARCH_AUDITSYSCALL
select HAVE_UID16
+ select HAVE_FUTEX_CMPXCHG if FUTEX
select GENERIC_IRQ_SHOW
select GENERIC_CPU_DEVICES
select GENERIC_IO
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 10fbed1..f83fc6c 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -4448,7 +4448,7 @@
* zap all shadow pages.
*/
if (unlikely(kvm_current_mmio_generation(kvm) == 0)) {
- printk_ratelimited(KERN_INFO "kvm: zapping shadow pages for mmio generation wraparound\n");
+ printk_ratelimited(KERN_DEBUG "kvm: zapping shadow pages for mmio generation wraparound\n");
kvm_mmu_invalidate_zap_all_pages(kvm);
}
}
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index feb852b..d4c58d8 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -5840,53 +5840,10 @@
memset(vmx_msr_bitmap_legacy, 0xff, PAGE_SIZE);
memset(vmx_msr_bitmap_longmode, 0xff, PAGE_SIZE);
- vmx_disable_intercept_for_msr(MSR_FS_BASE, false);
- vmx_disable_intercept_for_msr(MSR_GS_BASE, false);
- vmx_disable_intercept_for_msr(MSR_KERNEL_GS_BASE, true);
- vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false);
- vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false);
- vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false);
- vmx_disable_intercept_for_msr(MSR_IA32_BNDCFGS, true);
-
- memcpy(vmx_msr_bitmap_legacy_x2apic,
- vmx_msr_bitmap_legacy, PAGE_SIZE);
- memcpy(vmx_msr_bitmap_longmode_x2apic,
- vmx_msr_bitmap_longmode, PAGE_SIZE);
-
- if (enable_apicv) {
- for (msr = 0x800; msr <= 0x8ff; msr++)
- vmx_disable_intercept_msr_read_x2apic(msr);
-
- /* According SDM, in x2apic mode, the whole id reg is used.
- * But in KVM, it only use the highest eight bits. Need to
- * intercept it */
- vmx_enable_intercept_msr_read_x2apic(0x802);
- /* TMCCT */
- vmx_enable_intercept_msr_read_x2apic(0x839);
- /* TPR */
- vmx_disable_intercept_msr_write_x2apic(0x808);
- /* EOI */
- vmx_disable_intercept_msr_write_x2apic(0x80b);
- /* SELF-IPI */
- vmx_disable_intercept_msr_write_x2apic(0x83f);
- }
-
- if (enable_ept) {
- kvm_mmu_set_mask_ptes(0ull,
- (enable_ept_ad_bits) ? VMX_EPT_ACCESS_BIT : 0ull,
- (enable_ept_ad_bits) ? VMX_EPT_DIRTY_BIT : 0ull,
- 0ull, VMX_EPT_EXECUTABLE_MASK);
- ept_set_mmio_spte_mask();
- kvm_enable_tdp();
- } else
- kvm_disable_tdp();
-
- update_ple_window_actual_max();
-
if (setup_vmcs_config(&vmcs_config) < 0) {
r = -EIO;
goto out7;
- }
+ }
if (boot_cpu_has(X86_FEATURE_NX))
kvm_enable_efer_bits(EFER_NX);
@@ -5945,6 +5902,49 @@
if (nested)
nested_vmx_setup_ctls_msrs();
+ vmx_disable_intercept_for_msr(MSR_FS_BASE, false);
+ vmx_disable_intercept_for_msr(MSR_GS_BASE, false);
+ vmx_disable_intercept_for_msr(MSR_KERNEL_GS_BASE, true);
+ vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false);
+ vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false);
+ vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false);
+ vmx_disable_intercept_for_msr(MSR_IA32_BNDCFGS, true);
+
+ memcpy(vmx_msr_bitmap_legacy_x2apic,
+ vmx_msr_bitmap_legacy, PAGE_SIZE);
+ memcpy(vmx_msr_bitmap_longmode_x2apic,
+ vmx_msr_bitmap_longmode, PAGE_SIZE);
+
+ if (enable_apicv) {
+ for (msr = 0x800; msr <= 0x8ff; msr++)
+ vmx_disable_intercept_msr_read_x2apic(msr);
+
+ /* According SDM, in x2apic mode, the whole id reg is used.
+ * But in KVM, it only use the highest eight bits. Need to
+ * intercept it */
+ vmx_enable_intercept_msr_read_x2apic(0x802);
+ /* TMCCT */
+ vmx_enable_intercept_msr_read_x2apic(0x839);
+ /* TPR */
+ vmx_disable_intercept_msr_write_x2apic(0x808);
+ /* EOI */
+ vmx_disable_intercept_msr_write_x2apic(0x80b);
+ /* SELF-IPI */
+ vmx_disable_intercept_msr_write_x2apic(0x83f);
+ }
+
+ if (enable_ept) {
+ kvm_mmu_set_mask_ptes(0ull,
+ (enable_ept_ad_bits) ? VMX_EPT_ACCESS_BIT : 0ull,
+ (enable_ept_ad_bits) ? VMX_EPT_DIRTY_BIT : 0ull,
+ 0ull, VMX_EPT_EXECUTABLE_MASK);
+ ept_set_mmio_spte_mask();
+ kvm_enable_tdp();
+ } else
+ kvm_disable_tdp();
+
+ update_ple_window_actual_max();
+
return alloc_kvm_area();
out7:
diff --git a/arch/x86/um/sys_call_table_32.c b/arch/x86/um/sys_call_table_32.c
index 531d426..bd16d6c 100644
--- a/arch/x86/um/sys_call_table_32.c
+++ b/arch/x86/um/sys_call_table_32.c
@@ -34,7 +34,7 @@
extern asmlinkage void sys_ni_syscall(void);
-const sys_call_ptr_t sys_call_table[] __cacheline_aligned = {
+const sys_call_ptr_t sys_call_table[] ____cacheline_aligned = {
/*
* Smells like a compiler bug -- it doesn't work
* when the & below is removed.
diff --git a/arch/x86/um/sys_call_table_64.c b/arch/x86/um/sys_call_table_64.c
index 20c3649..5cdfa9d 100644
--- a/arch/x86/um/sys_call_table_64.c
+++ b/arch/x86/um/sys_call_table_64.c
@@ -47,7 +47,7 @@
extern void sys_ni_syscall(void);
-const sys_call_ptr_t sys_call_table[] __cacheline_aligned = {
+const sys_call_ptr_t sys_call_table[] ____cacheline_aligned = {
/*
* Smells like a compiler bug -- it doesn't work
* when the & below is removed.
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index 1fa7bc3..4665b79 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -455,6 +455,9 @@
{
struct af_alg_completion *completion = req->data;
+ if (err == -EINPROGRESS)
+ return;
+
completion->err = err;
complete(&completion->completion);
}
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 4995365..87b704e 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -985,8 +985,6 @@
state->flags = 0;
switch (cx->type) {
case ACPI_STATE_C1:
- if (cx->entry_method != ACPI_CSTATE_FFH)
- state->flags |= CPUIDLE_FLAG_TIME_INVALID;
state->enter = acpi_idle_enter_c1;
state->enter_dead = acpi_idle_play_dead;
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 1eaadff..c72e79d2c5 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -505,6 +505,23 @@
DMI_MATCH(DMI_PRODUCT_NAME, "HP ENVY 15 Notebook PC"),
},
},
+
+ {
+ .callback = video_disable_native_backlight,
+ .ident = "SAMSUNG 870Z5E/880Z5E/680Z5E",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "870Z5E/880Z5E/680Z5E"),
+ },
+ },
+ {
+ .callback = video_disable_native_backlight,
+ .ident = "SAMSUNG 370R4E/370R4V/370R5E/3570RE/370R5V",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "370R4E/370R4V/370R5E/3570RE/370R5V"),
+ },
+ },
{}
};
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 6a103a3..0d8780c 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -2088,7 +2088,7 @@
* Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR()
* on failure.
*/
-static struct generic_pm_domain *of_genpd_get_from_provider(
+struct generic_pm_domain *of_genpd_get_from_provider(
struct of_phandle_args *genpdspec)
{
struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
@@ -2108,6 +2108,7 @@
return genpd;
}
+EXPORT_SYMBOL_GPL(of_genpd_get_from_provider);
/**
* genpd_dev_pm_detach - Detach a device from its PM domain.
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index d24dd614a..106c693 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -108,6 +108,14 @@
/* Lock to allow exclusive modification to the device and opp lists */
static DEFINE_MUTEX(dev_opp_list_lock);
+#define opp_rcu_lockdep_assert() \
+do { \
+ rcu_lockdep_assert(rcu_read_lock_held() || \
+ lockdep_is_held(&dev_opp_list_lock), \
+ "Missing rcu_read_lock() or " \
+ "dev_opp_list_lock protection"); \
+} while (0)
+
/**
* find_device_opp() - find device_opp struct using device pointer
* @dev: device pointer used to lookup device OPPs
@@ -208,9 +216,7 @@
* This function returns the number of available opps if there are any,
* else returns 0 if none or the corresponding error value.
*
- * Locking: This function must be called under rcu_read_lock(). This function
- * internally references two RCU protected structures: device_opp and opp which
- * are safe as long as we are under a common RCU locked section.
+ * Locking: This function takes rcu_read_lock().
*/
int dev_pm_opp_get_opp_count(struct device *dev)
{
@@ -218,11 +224,14 @@
struct dev_pm_opp *temp_opp;
int count = 0;
+ rcu_read_lock();
+
dev_opp = find_device_opp(dev);
if (IS_ERR(dev_opp)) {
- int r = PTR_ERR(dev_opp);
- dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r);
- return r;
+ count = PTR_ERR(dev_opp);
+ dev_err(dev, "%s: device OPP not found (%d)\n",
+ __func__, count);
+ goto out_unlock;
}
list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
@@ -230,6 +239,8 @@
count++;
}
+out_unlock:
+ rcu_read_unlock();
return count;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
@@ -267,6 +278,8 @@
struct device_opp *dev_opp;
struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
+ opp_rcu_lockdep_assert();
+
dev_opp = find_device_opp(dev);
if (IS_ERR(dev_opp)) {
int r = PTR_ERR(dev_opp);
@@ -313,6 +326,8 @@
struct device_opp *dev_opp;
struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
+ opp_rcu_lockdep_assert();
+
if (!dev || !freq) {
dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
return ERR_PTR(-EINVAL);
@@ -361,6 +376,8 @@
struct device_opp *dev_opp;
struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
+ opp_rcu_lockdep_assert();
+
if (!dev || !freq) {
dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
return ERR_PTR(-EINVAL);
@@ -783,9 +800,15 @@
/* Check for existing list for 'dev' */
dev_opp = find_device_opp(dev);
- if (WARN(IS_ERR(dev_opp), "%s: dev_opp: %ld\n", dev_name(dev),
- PTR_ERR(dev_opp)))
+ if (IS_ERR(dev_opp)) {
+ int error = PTR_ERR(dev_opp);
+ if (error != -ENODEV)
+ WARN(1, "%s: dev_opp: %d\n",
+ IS_ERR_OR_NULL(dev) ?
+ "Invalid device" : dev_name(dev),
+ error);
return;
+ }
/* Hold our list modification lock here */
mutex_lock(&dev_opp_list_lock);
diff --git a/drivers/char/agp/ali-agp.c b/drivers/char/agp/ali-agp.c
index 19db036..dcbbb4e 100644
--- a/drivers/char/agp/ali-agp.c
+++ b/drivers/char/agp/ali-agp.c
@@ -417,6 +417,6 @@
module_init(agp_ali_init);
module_exit(agp_ali_cleanup);
-MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
+MODULE_AUTHOR("Dave Jones");
MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index 3b47ed0..0ef3500 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -813,6 +813,6 @@
module_init(agp_amd64_mod_init);
module_exit(agp_amd64_cleanup);
-MODULE_AUTHOR("Dave Jones <davej@redhat.com>, Andi Kleen");
+MODULE_AUTHOR("Dave Jones, Andi Kleen");
module_param(agp_try_unsupported, bool, 0);
MODULE_LICENSE("GPL");
diff --git a/drivers/char/agp/ati-agp.c b/drivers/char/agp/ati-agp.c
index 18a7a6b..75a9786 100644
--- a/drivers/char/agp/ati-agp.c
+++ b/drivers/char/agp/ati-agp.c
@@ -579,6 +579,6 @@
module_init(agp_ati_init);
module_exit(agp_ati_cleanup);
-MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
+MODULE_AUTHOR("Dave Jones");
MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/char/agp/backend.c b/drivers/char/agp/backend.c
index 317c28c..38ffb28 100644
--- a/drivers/char/agp/backend.c
+++ b/drivers/char/agp/backend.c
@@ -356,7 +356,7 @@
__setup("agp=", agp_setup);
#endif
-MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
+MODULE_AUTHOR("Dave Jones, Jeff Hartmann");
MODULE_DESCRIPTION("AGP GART driver");
MODULE_LICENSE("GPL and additional rights");
MODULE_ALIAS_MISCDEV(AGPGART_MINOR);
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index f9b9ca5..0a21dae 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -920,5 +920,5 @@
module_init(agp_intel_init);
module_exit(agp_intel_cleanup);
-MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
+MODULE_AUTHOR("Dave Jones, Various @Intel");
MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index f333482..92aa43f 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -1438,5 +1438,5 @@
}
EXPORT_SYMBOL(intel_gmch_remove);
-MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
+MODULE_AUTHOR("Dave Jones, Various @Intel");
MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/char/agp/nvidia-agp.c b/drivers/char/agp/nvidia-agp.c
index a1861b7..6c8d39c 100644
--- a/drivers/char/agp/nvidia-agp.c
+++ b/drivers/char/agp/nvidia-agp.c
@@ -1,7 +1,7 @@
/*
* Nvidia AGPGART routines.
* Based upon a 2.4 agpgart diff by the folks from NVIDIA, and hacked up
- * to work in 2.5 by Dave Jones <davej@redhat.com>
+ * to work in 2.5 by Dave Jones.
*/
#include <linux/module.h>
diff --git a/drivers/char/agp/via-agp.c b/drivers/char/agp/via-agp.c
index 228f20c..a4961d3 100644
--- a/drivers/char/agp/via-agp.c
+++ b/drivers/char/agp/via-agp.c
@@ -595,4 +595,4 @@
module_exit(agp_via_cleanup);
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
+MODULE_AUTHOR("Dave Jones");
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 5fa83f7..6b65fa4 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -199,18 +199,6 @@
int guid_set;
char name[16];
struct kref usecount;
-
- /* bmc device attributes */
- struct device_attribute device_id_attr;
- struct device_attribute provides_dev_sdrs_attr;
- struct device_attribute revision_attr;
- struct device_attribute firmware_rev_attr;
- struct device_attribute version_attr;
- struct device_attribute add_dev_support_attr;
- struct device_attribute manufacturer_id_attr;
- struct device_attribute product_id_attr;
- struct device_attribute guid_attr;
- struct device_attribute aux_firmware_rev_attr;
};
#define to_bmc_device(x) container_of((x), struct bmc_device, pdev.dev)
@@ -2252,7 +2240,7 @@
return snprintf(buf, 10, "%u\n", bmc->id.device_id);
}
-DEVICE_ATTR(device_id, S_IRUGO, device_id_show, NULL);
+static DEVICE_ATTR(device_id, S_IRUGO, device_id_show, NULL);
static ssize_t provides_device_sdrs_show(struct device *dev,
struct device_attribute *attr,
@@ -2263,7 +2251,8 @@
return snprintf(buf, 10, "%u\n",
(bmc->id.device_revision & 0x80) >> 7);
}
-DEVICE_ATTR(provides_device_sdrs, S_IRUGO, provides_device_sdrs_show, NULL);
+static DEVICE_ATTR(provides_device_sdrs, S_IRUGO, provides_device_sdrs_show,
+ NULL);
static ssize_t revision_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -2273,7 +2262,7 @@
return snprintf(buf, 20, "%u\n",
bmc->id.device_revision & 0x0F);
}
-DEVICE_ATTR(revision, S_IRUGO, revision_show, NULL);
+static DEVICE_ATTR(revision, S_IRUGO, revision_show, NULL);
static ssize_t firmware_revision_show(struct device *dev,
struct device_attribute *attr,
@@ -2284,7 +2273,7 @@
return snprintf(buf, 20, "%u.%x\n", bmc->id.firmware_revision_1,
bmc->id.firmware_revision_2);
}
-DEVICE_ATTR(firmware_revision, S_IRUGO, firmware_revision_show, NULL);
+static DEVICE_ATTR(firmware_revision, S_IRUGO, firmware_revision_show, NULL);
static ssize_t ipmi_version_show(struct device *dev,
struct device_attribute *attr,
@@ -2296,7 +2285,7 @@
ipmi_version_major(&bmc->id),
ipmi_version_minor(&bmc->id));
}
-DEVICE_ATTR(ipmi_version, S_IRUGO, ipmi_version_show, NULL);
+static DEVICE_ATTR(ipmi_version, S_IRUGO, ipmi_version_show, NULL);
static ssize_t add_dev_support_show(struct device *dev,
struct device_attribute *attr,
@@ -2307,7 +2296,8 @@
return snprintf(buf, 10, "0x%02x\n",
bmc->id.additional_device_support);
}
-DEVICE_ATTR(additional_device_support, S_IRUGO, add_dev_support_show, NULL);
+static DEVICE_ATTR(additional_device_support, S_IRUGO, add_dev_support_show,
+ NULL);
static ssize_t manufacturer_id_show(struct device *dev,
struct device_attribute *attr,
@@ -2317,7 +2307,7 @@
return snprintf(buf, 20, "0x%6.6x\n", bmc->id.manufacturer_id);
}
-DEVICE_ATTR(manufacturer_id, S_IRUGO, manufacturer_id_show, NULL);
+static DEVICE_ATTR(manufacturer_id, S_IRUGO, manufacturer_id_show, NULL);
static ssize_t product_id_show(struct device *dev,
struct device_attribute *attr,
@@ -2327,7 +2317,7 @@
return snprintf(buf, 10, "0x%4.4x\n", bmc->id.product_id);
}
-DEVICE_ATTR(product_id, S_IRUGO, product_id_show, NULL);
+static DEVICE_ATTR(product_id, S_IRUGO, product_id_show, NULL);
static ssize_t aux_firmware_rev_show(struct device *dev,
struct device_attribute *attr,
@@ -2341,7 +2331,7 @@
bmc->id.aux_firmware_revision[1],
bmc->id.aux_firmware_revision[0]);
}
-DEVICE_ATTR(aux_firmware_revision, S_IRUGO, aux_firmware_rev_show, NULL);
+static DEVICE_ATTR(aux_firmware_revision, S_IRUGO, aux_firmware_rev_show, NULL);
static ssize_t guid_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -2352,7 +2342,7 @@
(long long) bmc->guid[0],
(long long) bmc->guid[8]);
}
-DEVICE_ATTR(guid, S_IRUGO, guid_show, NULL);
+static DEVICE_ATTR(guid, S_IRUGO, guid_show, NULL);
static struct attribute *bmc_dev_attrs[] = {
&dev_attr_device_id.attr,
@@ -2392,10 +2382,10 @@
if (bmc->id.aux_firmware_revision_set)
device_remove_file(&bmc->pdev.dev,
- &bmc->aux_firmware_rev_attr);
+ &dev_attr_aux_firmware_revision);
if (bmc->guid_set)
device_remove_file(&bmc->pdev.dev,
- &bmc->guid_attr);
+ &dev_attr_guid);
platform_device_unregister(&bmc->pdev);
}
@@ -2422,16 +2412,14 @@
int err;
if (bmc->id.aux_firmware_revision_set) {
- bmc->aux_firmware_rev_attr.attr.name = "aux_firmware_revision";
err = device_create_file(&bmc->pdev.dev,
- &bmc->aux_firmware_rev_attr);
+ &dev_attr_aux_firmware_revision);
if (err)
goto out;
}
if (bmc->guid_set) {
- bmc->guid_attr.attr.name = "guid";
err = device_create_file(&bmc->pdev.dev,
- &bmc->guid_attr);
+ &dev_attr_guid);
if (err)
goto out_aux_firm;
}
@@ -2441,7 +2429,7 @@
out_aux_firm:
if (bmc->id.aux_firmware_revision_set)
device_remove_file(&bmc->pdev.dev,
- &bmc->aux_firmware_rev_attr);
+ &dev_attr_aux_firmware_revision);
out:
return err;
}
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index e178ac2..fd5a5e8 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -52,6 +52,7 @@
#include <linux/dmi.h>
#include <linux/kthread.h>
#include <linux/acpi.h>
+#include <linux/ctype.h>
#define PFX "ipmi_ssif: "
#define DEVICE_NAME "ipmi_ssif"
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 6a79fc4..095c177 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -462,7 +462,7 @@
/* Register the CP15 based counter if we have one */
if (type & ARCH_CP15_TIMER) {
- if (arch_timer_use_virtual)
+ if (IS_ENABLED(CONFIG_ARM64) || arch_timer_use_virtual)
arch_timer_read_counter = arch_counter_get_cntvct;
else
arch_timer_read_counter = arch_counter_get_cntpct;
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index f56147a..fde97d6 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -211,6 +211,17 @@
/* OPPs might be populated at runtime, don't check for error here */
of_init_opp_table(cpu_dev);
+ /*
+ * But we need OPP table to function so if it is not there let's
+ * give platform code chance to provide it for us.
+ */
+ ret = dev_pm_opp_get_opp_count(cpu_dev);
+ if (ret <= 0) {
+ pr_debug("OPP table is not ready, deferring probe\n");
+ ret = -EPROBE_DEFER;
+ goto out_free_opp;
+ }
+
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv) {
ret = -ENOMEM;
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index a09a29c..46bed4f 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -2028,6 +2028,12 @@
/* Don't start any governor operations if we are entering suspend */
if (cpufreq_suspended)
return 0;
+ /*
+ * Governor might not be initiated here if ACPI _PPC changed
+ * notification happened, so check it.
+ */
+ if (!policy->governor)
+ return -EINVAL;
if (policy->governor->max_transition_latency &&
policy->cpuinfo.transition_latency >
diff --git a/drivers/cpuidle/governors/ladder.c b/drivers/cpuidle/governors/ladder.c
index 37263d9..401c010 100644
--- a/drivers/cpuidle/governors/ladder.c
+++ b/drivers/cpuidle/governors/ladder.c
@@ -79,12 +79,7 @@
last_state = &ldev->states[last_idx];
- if (!(drv->states[last_idx].flags & CPUIDLE_FLAG_TIME_INVALID)) {
- last_residency = cpuidle_get_last_residency(dev) - \
- drv->states[last_idx].exit_latency;
- }
- else
- last_residency = last_state->threshold.promotion_time + 1;
+ last_residency = cpuidle_get_last_residency(dev) - drv->states[last_idx].exit_latency;
/* consider promotion */
if (last_idx < drv->state_count - 1 &&
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index 659d7b0..4058079 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -396,8 +396,8 @@
* power state and occurrence of the wakeup event.
*
* If the entered idle state didn't support residency measurements,
- * we are basically lost in the dark how much time passed.
- * As a compromise, assume we slept for the whole expected time.
+ * we use them anyway if they are short, and if long,
+ * truncate to the whole expected time.
*
* Any measured amount of time will include the exit latency.
* Since we are interested in when the wakeup begun, not when it
@@ -405,23 +405,18 @@
* the measured amount of time is less than the exit latency,
* assume the state was never reached and the exit latency is 0.
*/
- if (unlikely(target->flags & CPUIDLE_FLAG_TIME_INVALID)) {
- /* Use timer value as is */
+
+ /* measured value */
+ measured_us = cpuidle_get_last_residency(dev);
+
+ /* Deduct exit latency */
+ if (measured_us > target->exit_latency)
+ measured_us -= target->exit_latency;
+
+ /* Make sure our coefficients do not exceed unity */
+ if (measured_us > data->next_timer_us)
measured_us = data->next_timer_us;
- } else {
- /* Use measured value */
- measured_us = cpuidle_get_last_residency(dev);
-
- /* Deduct exit latency */
- if (measured_us > target->exit_latency)
- measured_us -= target->exit_latency;
-
- /* Make sure our coefficients do not exceed unity */
- if (measured_us > data->next_timer_us)
- measured_us = data->next_timer_us;
- }
-
/* Update our correction ratio */
new_factor = data->correction_factor[data->bucket];
new_factor -= new_factor / DECAY;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 4f7b275..4c0b1e4 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -121,13 +121,9 @@
if (IS_ERR(process))
return PTR_ERR(process);
- process->is_32bit_user_mode = is_32bit_user_mode;
-
dev_dbg(kfd_device, "process %d opened, compat mode (32 bit) - %d\n",
process->pasid, process->is_32bit_user_mode);
- kfd_init_apertures(process);
-
return 0;
}
@@ -196,6 +192,8 @@
if (args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE ||
args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE_AQL)
q_properties->type = KFD_QUEUE_TYPE_COMPUTE;
+ else if (args->queue_type == KFD_IOC_QUEUE_TYPE_SDMA)
+ q_properties->type = KFD_QUEUE_TYPE_SDMA;
else
return -ENOTSUPP;
@@ -262,8 +260,8 @@
p->pasid,
dev->id);
- err = pqm_create_queue(&p->pqm, dev, filep, &q_properties, 0,
- KFD_QUEUE_TYPE_COMPUTE, &queue_id);
+ err = pqm_create_queue(&p->pqm, dev, filep, &q_properties,
+ 0, q_properties.type, &queue_id);
if (err != 0)
goto err_create_queue;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 43884eb..994a9c1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -26,12 +26,14 @@
#include <linux/slab.h>
#include "kfd_priv.h"
#include "kfd_device_queue_manager.h"
+#include "kfd_pm4_headers.h"
#define MQD_SIZE_ALIGNED 768
static const struct kfd_device_info kaveri_device_info = {
.max_pasid_bits = 16,
.ih_ring_entry_size = 4 * sizeof(uint32_t),
+ .num_of_watch_points = 4,
.mqd_size_aligned = MQD_SIZE_ALIGNED
};
@@ -66,6 +68,10 @@
{ 0x131D, &kaveri_device_info }, /* Kaveri */
};
+static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
+ unsigned int chunk_size);
+static void kfd_gtt_sa_fini(struct kfd_dev *kfd);
+
static const struct kfd_device_info *lookup_device_info(unsigned short did)
{
size_t i;
@@ -173,16 +179,39 @@
max_num_of_queues_per_process *
kfd->device_info->mqd_size_aligned;
- /* add another 512KB for all other allocations on gart */
+ /*
+ * calculate max size of runlist packet.
+ * There can be only 2 packets at once
+ */
+ size += (max_num_of_processes * sizeof(struct pm4_map_process) +
+ max_num_of_processes * max_num_of_queues_per_process *
+ sizeof(struct pm4_map_queues) + sizeof(struct pm4_runlist)) * 2;
+
+ /* Add size of HIQ & DIQ */
+ size += KFD_KERNEL_QUEUE_SIZE * 2;
+
+ /* add another 512KB for all other allocations on gart (HPD, fences) */
size += 512 * 1024;
- if (kfd2kgd->init_sa_manager(kfd->kgd, size)) {
+ if (kfd2kgd->init_gtt_mem_allocation(kfd->kgd, size, &kfd->gtt_mem,
+ &kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr)) {
dev_err(kfd_device,
- "Error initializing sa manager for device (%x:%x)\n",
- kfd->pdev->vendor, kfd->pdev->device);
+ "Could not allocate %d bytes for device (%x:%x)\n",
+ size, kfd->pdev->vendor, kfd->pdev->device);
goto out;
}
+ dev_info(kfd_device,
+ "Allocated %d bytes on gart for device(%x:%x)\n",
+ size, kfd->pdev->vendor, kfd->pdev->device);
+
+ /* Initialize GTT sa with 512 byte chunk size */
+ if (kfd_gtt_sa_init(kfd, size, 512) != 0) {
+ dev_err(kfd_device,
+ "Error initializing gtt sub-allocator\n");
+ goto kfd_gtt_sa_init_error;
+ }
+
kfd_doorbell_init(kfd);
if (kfd_topology_add_device(kfd) != 0) {
@@ -241,7 +270,9 @@
kfd_interrupt_error:
kfd_topology_remove_device(kfd);
kfd_topology_add_device_error:
- kfd2kgd->fini_sa_manager(kfd->kgd);
+ kfd_gtt_sa_fini(kfd);
+kfd_gtt_sa_init_error:
+ kfd2kgd->free_gtt_mem(kfd->kgd, kfd->gtt_mem);
dev_err(kfd_device,
"device (%x:%x) NOT added due to errors\n",
kfd->pdev->vendor, kfd->pdev->device);
@@ -256,6 +287,8 @@
amd_iommu_free_device(kfd->pdev);
kfd_interrupt_exit(kfd);
kfd_topology_remove_device(kfd);
+ kfd_gtt_sa_fini(kfd);
+ kfd2kgd->free_gtt_mem(kfd->kgd, kfd->gtt_mem);
}
kfree(kfd);
@@ -306,3 +339,185 @@
spin_unlock(&kfd->interrupt_lock);
}
}
+
+static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
+ unsigned int chunk_size)
+{
+ unsigned int num_of_bits;
+
+ BUG_ON(!kfd);
+ BUG_ON(!kfd->gtt_mem);
+ BUG_ON(buf_size < chunk_size);
+ BUG_ON(buf_size == 0);
+ BUG_ON(chunk_size == 0);
+
+ kfd->gtt_sa_chunk_size = chunk_size;
+ kfd->gtt_sa_num_of_chunks = buf_size / chunk_size;
+
+ num_of_bits = kfd->gtt_sa_num_of_chunks / BITS_PER_BYTE;
+ BUG_ON(num_of_bits == 0);
+
+ kfd->gtt_sa_bitmap = kzalloc(num_of_bits, GFP_KERNEL);
+
+ if (!kfd->gtt_sa_bitmap)
+ return -ENOMEM;
+
+ pr_debug("kfd: gtt_sa_num_of_chunks = %d, gtt_sa_bitmap = %p\n",
+ kfd->gtt_sa_num_of_chunks, kfd->gtt_sa_bitmap);
+
+ mutex_init(&kfd->gtt_sa_lock);
+
+ return 0;
+
+}
+
+static void kfd_gtt_sa_fini(struct kfd_dev *kfd)
+{
+ mutex_destroy(&kfd->gtt_sa_lock);
+ kfree(kfd->gtt_sa_bitmap);
+}
+
+static inline uint64_t kfd_gtt_sa_calc_gpu_addr(uint64_t start_addr,
+ unsigned int bit_num,
+ unsigned int chunk_size)
+{
+ return start_addr + bit_num * chunk_size;
+}
+
+static inline uint32_t *kfd_gtt_sa_calc_cpu_addr(void *start_addr,
+ unsigned int bit_num,
+ unsigned int chunk_size)
+{
+ return (uint32_t *) ((uint64_t) start_addr + bit_num * chunk_size);
+}
+
+int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size,
+ struct kfd_mem_obj **mem_obj)
+{
+ unsigned int found, start_search, cur_size;
+
+ BUG_ON(!kfd);
+
+ if (size == 0)
+ return -EINVAL;
+
+ if (size > kfd->gtt_sa_num_of_chunks * kfd->gtt_sa_chunk_size)
+ return -ENOMEM;
+
+ *mem_obj = kmalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL);
+ if ((*mem_obj) == NULL)
+ return -ENOMEM;
+
+ pr_debug("kfd: allocated mem_obj = %p for size = %d\n", *mem_obj, size);
+
+ start_search = 0;
+
+ mutex_lock(&kfd->gtt_sa_lock);
+
+kfd_gtt_restart_search:
+ /* Find the first chunk that is free */
+ found = find_next_zero_bit(kfd->gtt_sa_bitmap,
+ kfd->gtt_sa_num_of_chunks,
+ start_search);
+
+ pr_debug("kfd: found = %d\n", found);
+
+ /* If there wasn't any free chunk, bail out */
+ if (found == kfd->gtt_sa_num_of_chunks)
+ goto kfd_gtt_no_free_chunk;
+
+ /* Update fields of mem_obj */
+ (*mem_obj)->range_start = found;
+ (*mem_obj)->range_end = found;
+ (*mem_obj)->gpu_addr = kfd_gtt_sa_calc_gpu_addr(
+ kfd->gtt_start_gpu_addr,
+ found,
+ kfd->gtt_sa_chunk_size);
+ (*mem_obj)->cpu_ptr = kfd_gtt_sa_calc_cpu_addr(
+ kfd->gtt_start_cpu_ptr,
+ found,
+ kfd->gtt_sa_chunk_size);
+
+ pr_debug("kfd: gpu_addr = %p, cpu_addr = %p\n",
+ (uint64_t *) (*mem_obj)->gpu_addr, (*mem_obj)->cpu_ptr);
+
+ /* If we need only one chunk, mark it as allocated and get out */
+ if (size <= kfd->gtt_sa_chunk_size) {
+ pr_debug("kfd: single bit\n");
+ set_bit(found, kfd->gtt_sa_bitmap);
+ goto kfd_gtt_out;
+ }
+
+ /* Otherwise, try to see if we have enough contiguous chunks */
+ cur_size = size - kfd->gtt_sa_chunk_size;
+ do {
+ (*mem_obj)->range_end =
+ find_next_zero_bit(kfd->gtt_sa_bitmap,
+ kfd->gtt_sa_num_of_chunks, ++found);
+ /*
+ * If next free chunk is not contiguous than we need to
+ * restart our search from the last free chunk we found (which
+ * wasn't contiguous to the previous ones
+ */
+ if ((*mem_obj)->range_end != found) {
+ start_search = found;
+ goto kfd_gtt_restart_search;
+ }
+
+ /*
+ * If we reached end of buffer, bail out with error
+ */
+ if (found == kfd->gtt_sa_num_of_chunks)
+ goto kfd_gtt_no_free_chunk;
+
+ /* Check if we don't need another chunk */
+ if (cur_size <= kfd->gtt_sa_chunk_size)
+ cur_size = 0;
+ else
+ cur_size -= kfd->gtt_sa_chunk_size;
+
+ } while (cur_size > 0);
+
+ pr_debug("kfd: range_start = %d, range_end = %d\n",
+ (*mem_obj)->range_start, (*mem_obj)->range_end);
+
+ /* Mark the chunks as allocated */
+ for (found = (*mem_obj)->range_start;
+ found <= (*mem_obj)->range_end;
+ found++)
+ set_bit(found, kfd->gtt_sa_bitmap);
+
+kfd_gtt_out:
+ mutex_unlock(&kfd->gtt_sa_lock);
+ return 0;
+
+kfd_gtt_no_free_chunk:
+ pr_debug("kfd: allocation failed with mem_obj = %p\n", mem_obj);
+ mutex_unlock(&kfd->gtt_sa_lock);
+ kfree(mem_obj);
+ return -ENOMEM;
+}
+
+int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj)
+{
+ unsigned int bit;
+
+ BUG_ON(!kfd);
+ BUG_ON(!mem_obj);
+
+ pr_debug("kfd: free mem_obj = %p, range_start = %d, range_end = %d\n",
+ mem_obj, mem_obj->range_start, mem_obj->range_end);
+
+ mutex_lock(&kfd->gtt_sa_lock);
+
+ /* Mark the chunks as free */
+ for (bit = mem_obj->range_start;
+ bit <= mem_obj->range_end;
+ bit++)
+ clear_bit(bit, kfd->gtt_sa_bitmap);
+
+ mutex_unlock(&kfd->gtt_sa_lock);
+
+ kfree(mem_obj);
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 924e90c..6806e64 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -46,9 +46,24 @@
static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
struct queue *q,
struct qcm_process_device *qpd);
+
static int execute_queues_cpsch(struct device_queue_manager *dqm, bool lock);
static int destroy_queues_cpsch(struct device_queue_manager *dqm, bool lock);
+static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
+ struct queue *q,
+ struct qcm_process_device *qpd);
+
+static void deallocate_sdma_queue(struct device_queue_manager *dqm,
+ unsigned int sdma_queue_id);
+
+static inline
+enum KFD_MQD_TYPE get_mqd_type_from_queue_type(enum kfd_queue_type type)
+{
+ if (type == KFD_QUEUE_TYPE_SDMA)
+ return KFD_MQD_TYPE_CIK_SDMA;
+ return KFD_MQD_TYPE_CIK_CP;
+}
static inline unsigned int get_pipes_num(struct device_queue_manager *dqm)
{
@@ -75,7 +90,6 @@
nybble = (pdd->lds_base >> 60) & 0x0E;
return nybble;
-
}
static inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd)
@@ -190,7 +204,10 @@
*allocated_vmid = qpd->vmid;
q->properties.vmid = qpd->vmid;
- retval = create_compute_queue_nocpsch(dqm, q, qpd);
+ if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE)
+ retval = create_compute_queue_nocpsch(dqm, q, qpd);
+ if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
+ retval = create_sdma_queue_nocpsch(dqm, q, qpd);
if (retval != 0) {
if (list_empty(&qpd->queues_list)) {
@@ -203,7 +220,8 @@
list_add(&q->list, &qpd->queues_list);
dqm->queue_count++;
-
+ if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
+ dqm->sdma_queue_count++;
mutex_unlock(&dqm->lock);
return 0;
}
@@ -280,8 +298,7 @@
struct queue *q)
{
int retval;
- struct mqd_manager *mqd;
-
+ struct mqd_manager *mqd, *mqd_sdma;
BUG_ON(!dqm || !q || !q->mqd || !qpd);
retval = 0;
@@ -295,6 +312,12 @@
goto out;
}
+ mqd_sdma = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_SDMA);
+ if (mqd_sdma == NULL) {
+ mutex_unlock(&dqm->lock);
+ return -ENOMEM;
+ }
+
retval = mqd->destroy_mqd(mqd, q->mqd,
KFD_PREEMPT_TYPE_WAVEFRONT,
QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS,
@@ -303,7 +326,12 @@
if (retval != 0)
goto out;
- deallocate_hqd(dqm, q);
+ if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE)
+ deallocate_hqd(dqm, q);
+ else if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
+ dqm->sdma_queue_count--;
+ deallocate_sdma_queue(dqm, q->sdma_id);
+ }
mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
@@ -324,7 +352,7 @@
BUG_ON(!dqm || !q || !q->mqd);
mutex_lock(&dqm->lock);
- mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_COMPUTE);
+ mqd = dqm->get_mqd_manager(dqm, q->properties.type);
if (mqd == NULL) {
mutex_unlock(&dqm->lock);
return -ENOMEM;
@@ -491,11 +519,8 @@
* because it contains no data when there are no active queues.
*/
- err = kfd2kgd->allocate_mem(dqm->dev->kgd,
- CIK_HPD_EOP_BYTES * pipes_num,
- PAGE_SIZE,
- KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
- (struct kgd_mem **) &dqm->pipeline_mem);
+ err = kfd_gtt_sa_allocate(dqm->dev, CIK_HPD_EOP_BYTES * pipes_num,
+ &dqm->pipeline_mem);
if (err) {
pr_err("kfd: error allocate vidmem num pipes: %d\n",
@@ -510,8 +535,7 @@
mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_COMPUTE);
if (mqd == NULL) {
- kfd2kgd->free_mem(dqm->dev->kgd,
- (struct kgd_mem *) dqm->pipeline_mem);
+ kfd_gtt_sa_free(dqm->dev, dqm->pipeline_mem);
return -ENOMEM;
}
@@ -527,7 +551,6 @@
return 0;
}
-
static int init_scheduler(struct device_queue_manager *dqm)
{
int retval;
@@ -557,6 +580,7 @@
mutex_init(&dqm->lock);
INIT_LIST_HEAD(&dqm->queues);
dqm->queue_count = dqm->next_pipe_to_allocate = 0;
+ dqm->sdma_queue_count = 0;
dqm->allocated_queues = kcalloc(get_pipes_num(dqm),
sizeof(unsigned int), GFP_KERNEL);
if (!dqm->allocated_queues) {
@@ -568,6 +592,7 @@
dqm->allocated_queues[i] = (1 << QUEUES_PER_PIPE) - 1;
dqm->vmid_bitmap = (1 << VMID_PER_DEVICE) - 1;
+ dqm->sdma_bitmap = (1 << CIK_SDMA_QUEUES) - 1;
init_scheduler(dqm);
return 0;
@@ -585,8 +610,7 @@
for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++)
kfree(dqm->mqds[i]);
mutex_destroy(&dqm->lock);
- kfd2kgd->free_mem(dqm->dev->kgd,
- (struct kgd_mem *) dqm->pipeline_mem);
+ kfd_gtt_sa_free(dqm->dev, dqm->pipeline_mem);
}
static int start_nocpsch(struct device_queue_manager *dqm)
@@ -599,6 +623,77 @@
return 0;
}
+static int allocate_sdma_queue(struct device_queue_manager *dqm,
+ unsigned int *sdma_queue_id)
+{
+ int bit;
+
+ if (dqm->sdma_bitmap == 0)
+ return -ENOMEM;
+
+ bit = find_first_bit((unsigned long *)&dqm->sdma_bitmap,
+ CIK_SDMA_QUEUES);
+
+ clear_bit(bit, (unsigned long *)&dqm->sdma_bitmap);
+ *sdma_queue_id = bit;
+
+ return 0;
+}
+
+static void deallocate_sdma_queue(struct device_queue_manager *dqm,
+ unsigned int sdma_queue_id)
+{
+ if (sdma_queue_id < 0 || sdma_queue_id >= CIK_SDMA_QUEUES)
+ return;
+ set_bit(sdma_queue_id, (unsigned long *)&dqm->sdma_bitmap);
+}
+
+static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
+ struct qcm_process_device *qpd)
+{
+ uint32_t value = SDMA_ATC;
+
+ if (q->process->is_32bit_user_mode)
+ value |= SDMA_VA_PTR32 | get_sh_mem_bases_32(qpd_to_pdd(qpd));
+ else
+ value |= SDMA_VA_SHARED_BASE(get_sh_mem_bases_nybble_64(
+ qpd_to_pdd(qpd)));
+ q->properties.sdma_vm_addr = value;
+}
+
+static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
+ struct queue *q,
+ struct qcm_process_device *qpd)
+{
+ struct mqd_manager *mqd;
+ int retval;
+
+ mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_SDMA);
+ if (!mqd)
+ return -ENOMEM;
+
+ retval = allocate_sdma_queue(dqm, &q->sdma_id);
+ if (retval != 0)
+ return retval;
+
+ q->properties.sdma_queue_id = q->sdma_id % CIK_SDMA_QUEUES_PER_ENGINE;
+ q->properties.sdma_engine_id = q->sdma_id / CIK_SDMA_ENGINE_NUM;
+
+ pr_debug("kfd: sdma id is: %d\n", q->sdma_id);
+ pr_debug(" sdma queue id: %d\n", q->properties.sdma_queue_id);
+ pr_debug(" sdma engine id: %d\n", q->properties.sdma_engine_id);
+
+ retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
+ &q->gart_mqd_addr, &q->properties);
+ if (retval != 0) {
+ deallocate_sdma_queue(dqm, q->sdma_id);
+ return retval;
+ }
+
+ init_sdma_vm(dqm, q, qpd);
+ return 0;
+}
+
/*
* Device Queue Manager implementation for cp scheduler
*/
@@ -640,6 +735,7 @@
mutex_init(&dqm->lock);
INIT_LIST_HEAD(&dqm->queues);
dqm->queue_count = dqm->processes_count = 0;
+ dqm->sdma_queue_count = 0;
dqm->active_runlist = false;
retval = init_pipelines(dqm, get_pipes_num(dqm), 0);
if (retval != 0)
@@ -672,18 +768,14 @@
pr_debug("kfd: allocating fence memory\n");
/* allocate fence memory on the gart */
- retval = kfd2kgd->allocate_mem(dqm->dev->kgd,
- sizeof(*dqm->fence_addr),
- 32,
- KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
- (struct kgd_mem **) &dqm->fence_mem);
+ retval = kfd_gtt_sa_allocate(dqm->dev, sizeof(*dqm->fence_addr),
+ &dqm->fence_mem);
if (retval != 0)
goto fail_allocate_vidmem;
dqm->fence_addr = dqm->fence_mem->cpu_ptr;
dqm->fence_gpu_addr = dqm->fence_mem->gpu_addr;
-
list_for_each_entry(node, &dqm->queues, list)
if (node->qpd->pqm->process && dqm->dev)
kfd_bind_process_to_device(dqm->dev,
@@ -712,8 +804,7 @@
pdd = qpd_to_pdd(node->qpd);
pdd->bound = false;
}
- kfd2kgd->free_mem(dqm->dev->kgd,
- (struct kgd_mem *) dqm->fence_mem);
+ kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
pm_uninit(&dqm->packets);
return 0;
@@ -754,6 +845,14 @@
mutex_unlock(&dqm->lock);
}
+static void select_sdma_engine_id(struct queue *q)
+{
+ static int sdma_id;
+
+ q->sdma_id = sdma_id;
+ sdma_id = (sdma_id + 1) % 2;
+}
+
static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
struct qcm_process_device *qpd, int *allocate_vmid)
{
@@ -769,7 +868,12 @@
mutex_lock(&dqm->lock);
- mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_CP);
+ if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
+ select_sdma_engine_id(q);
+
+ mqd = dqm->get_mqd_manager(dqm,
+ get_mqd_type_from_queue_type(q->properties.type));
+
if (mqd == NULL) {
mutex_unlock(&dqm->lock);
return -ENOMEM;
@@ -786,6 +890,9 @@
retval = execute_queues_cpsch(dqm, false);
}
+ if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
+ dqm->sdma_queue_count++;
+
out:
mutex_unlock(&dqm->lock);
return retval;
@@ -809,6 +916,14 @@
return 0;
}
+static int destroy_sdma_queues(struct device_queue_manager *dqm,
+ unsigned int sdma_engine)
+{
+ return pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_SDMA,
+ KFD_PREEMPT_TYPE_FILTER_ALL_QUEUES, 0, false,
+ sdma_engine);
+}
+
static int destroy_queues_cpsch(struct device_queue_manager *dqm, bool lock)
{
int retval;
@@ -821,6 +936,15 @@
mutex_lock(&dqm->lock);
if (dqm->active_runlist == false)
goto out;
+
+ pr_debug("kfd: Before destroying queues, sdma queue count is : %u\n",
+ dqm->sdma_queue_count);
+
+ if (dqm->sdma_queue_count > 0) {
+ destroy_sdma_queues(dqm, 0);
+ destroy_sdma_queues(dqm, 1);
+ }
+
retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_COMPUTE,
KFD_PREEMPT_TYPE_FILTER_ALL_QUEUES, 0, false, 0);
if (retval != 0)
@@ -892,13 +1016,16 @@
/* remove queue from list to prevent rescheduling after preemption */
mutex_lock(&dqm->lock);
-
- mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_CP);
+ mqd = dqm->get_mqd_manager(dqm,
+ get_mqd_type_from_queue_type(q->properties.type));
if (!mqd) {
retval = -ENOMEM;
goto failed;
}
+ if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
+ dqm->sdma_queue_count--;
+
list_del(&q->list);
dqm->queue_count--;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index c3f189e8..554c06e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -36,6 +36,9 @@
#define KFD_VMID_START_OFFSET (8)
#define VMID_PER_DEVICE CIK_VMID_NUM
#define KFD_DQM_FIRST_PIPE (0)
+#define CIK_SDMA_QUEUES (4)
+#define CIK_SDMA_QUEUES_PER_ENGINE (2)
+#define CIK_SDMA_ENGINE_NUM (2)
struct device_process_node {
struct qcm_process_device *qpd;
@@ -130,8 +133,10 @@
struct list_head queues;
unsigned int processes_count;
unsigned int queue_count;
+ unsigned int sdma_queue_count;
unsigned int next_pipe_to_allocate;
unsigned int *allocated_queues;
+ unsigned int sdma_bitmap;
unsigned int vmid_bitmap;
uint64_t pipelines_addr;
struct kfd_mem_obj *pipeline_mem;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
index b5791a5..1a9b355 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
@@ -137,10 +137,6 @@
if (dev == NULL)
return -EINVAL;
- /* Find if pdd exists for combination of process and gpu id */
- if (!kfd_get_process_device_data(dev, process, 0))
- return -EINVAL;
-
/* Calculate physical address of doorbell */
address = kfd_get_process_doorbells(dev, process);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
index 66df4da..35b9875 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
@@ -299,14 +299,15 @@
struct kfd_dev *dev;
struct kfd_process_device *pdd;
- mutex_lock(&process->mutex);
-
/*Iterating over all devices*/
while ((dev = kfd_topology_enum_kfd_devices(id)) != NULL &&
id < NUM_OF_SUPPORTED_GPUS) {
- pdd = kfd_get_process_device_data(dev, process, 1);
-
+ pdd = kfd_create_process_device_data(dev, process);
+ if (pdd == NULL) {
+ pr_err("Failed to create process device data\n");
+ return -1;
+ }
/*
* For 64 bit process aperture will be statically reserved in
* the x86_64 non canonical process address space
@@ -348,8 +349,6 @@
id++;
}
- mutex_unlock(&process->mutex);
-
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
index 9350714..0fd8bb7 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
@@ -72,11 +72,7 @@
if (prop.doorbell_ptr == NULL)
goto err_get_kernel_doorbell;
- retval = kfd2kgd->allocate_mem(dev->kgd,
- queue_size,
- PAGE_SIZE,
- KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
- (struct kgd_mem **) &kq->pq);
+ retval = kfd_gtt_sa_allocate(dev, queue_size, &kq->pq);
if (retval != 0)
goto err_pq_allocate_vidmem;
@@ -84,11 +80,8 @@
kq->pq_kernel_addr = kq->pq->cpu_ptr;
kq->pq_gpu_addr = kq->pq->gpu_addr;
- retval = kfd2kgd->allocate_mem(dev->kgd,
- sizeof(*kq->rptr_kernel),
- 32,
- KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
- (struct kgd_mem **) &kq->rptr_mem);
+ retval = kfd_gtt_sa_allocate(dev, sizeof(*kq->rptr_kernel),
+ &kq->rptr_mem);
if (retval != 0)
goto err_rptr_allocate_vidmem;
@@ -96,11 +89,8 @@
kq->rptr_kernel = kq->rptr_mem->cpu_ptr;
kq->rptr_gpu_addr = kq->rptr_mem->gpu_addr;
- retval = kfd2kgd->allocate_mem(dev->kgd,
- sizeof(*kq->wptr_kernel),
- 32,
- KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
- (struct kgd_mem **) &kq->wptr_mem);
+ retval = kfd_gtt_sa_allocate(dev, sizeof(*kq->wptr_kernel),
+ &kq->wptr_mem);
if (retval != 0)
goto err_wptr_allocate_vidmem;
@@ -145,11 +135,8 @@
} else {
/* allocate fence for DIQ */
- retval = kfd2kgd->allocate_mem(dev->kgd,
- sizeof(uint32_t),
- 32,
- KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
- (struct kgd_mem **) &kq->fence_mem_obj);
+ retval = kfd_gtt_sa_allocate(dev, sizeof(uint32_t),
+ &kq->fence_mem_obj);
if (retval != 0)
goto err_alloc_fence;
@@ -165,11 +152,11 @@
err_init_mqd:
uninit_queue(kq->queue);
err_init_queue:
- kfd2kgd->free_mem(dev->kgd, (struct kgd_mem *) kq->wptr_mem);
+ kfd_gtt_sa_free(dev, kq->wptr_mem);
err_wptr_allocate_vidmem:
- kfd2kgd->free_mem(dev->kgd, (struct kgd_mem *) kq->rptr_mem);
+ kfd_gtt_sa_free(dev, kq->rptr_mem);
err_rptr_allocate_vidmem:
- kfd2kgd->free_mem(dev->kgd, (struct kgd_mem *) kq->pq);
+ kfd_gtt_sa_free(dev, kq->pq);
err_pq_allocate_vidmem:
pr_err("kfd: error init pq\n");
kfd_release_kernel_doorbell(dev, prop.doorbell_ptr);
@@ -190,10 +177,12 @@
QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS,
kq->queue->pipe,
kq->queue->queue);
+ else if (kq->queue->properties.type == KFD_QUEUE_TYPE_DIQ)
+ kfd_gtt_sa_free(kq->dev, kq->fence_mem_obj);
- kfd2kgd->free_mem(kq->dev->kgd, (struct kgd_mem *) kq->rptr_mem);
- kfd2kgd->free_mem(kq->dev->kgd, (struct kgd_mem *) kq->wptr_mem);
- kfd2kgd->free_mem(kq->dev->kgd, (struct kgd_mem *) kq->pq);
+ kfd_gtt_sa_free(kq->dev, kq->rptr_mem);
+ kfd_gtt_sa_free(kq->dev, kq->wptr_mem);
+ kfd_gtt_sa_free(kq->dev, kq->pq);
kfd_release_kernel_doorbell(kq->dev,
kq->queue->properties.doorbell_ptr);
uninit_queue(kq->queue);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
index adc3147..678c33f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
@@ -26,6 +26,7 @@
#include "kfd_priv.h"
#include "kfd_mqd_manager.h"
#include "cik_regs.h"
+#include "../../radeon/cikd.h"
#include "../../radeon/cik_reg.h"
inline void busy_wait(unsigned long ms)
@@ -51,11 +52,8 @@
pr_debug("kfd: In func %s\n", __func__);
- retval = kfd2kgd->allocate_mem(mm->dev->kgd,
- sizeof(struct cik_mqd),
- 256,
- KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
- (struct kgd_mem **) mqd_mem_obj);
+ retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct cik_mqd),
+ mqd_mem_obj);
if (retval != 0)
return -ENOMEM;
@@ -111,18 +109,60 @@
return retval;
}
+static int init_mqd_sdma(struct mqd_manager *mm, void **mqd,
+ struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr,
+ struct queue_properties *q)
+{
+ int retval;
+ struct cik_sdma_rlc_registers *m;
+
+ BUG_ON(!mm || !mqd || !mqd_mem_obj);
+
+ retval = kfd_gtt_sa_allocate(mm->dev,
+ sizeof(struct cik_sdma_rlc_registers),
+ mqd_mem_obj);
+
+ if (retval != 0)
+ return -ENOMEM;
+
+ m = (struct cik_sdma_rlc_registers *) (*mqd_mem_obj)->cpu_ptr;
+
+ memset(m, 0, sizeof(struct cik_sdma_rlc_registers));
+
+ *mqd = m;
+ if (gart_addr != NULL)
+ *gart_addr = (*mqd_mem_obj)->gpu_addr;
+
+ retval = mm->update_mqd(mm, m, q);
+
+ return retval;
+}
+
static void uninit_mqd(struct mqd_manager *mm, void *mqd,
struct kfd_mem_obj *mqd_mem_obj)
{
BUG_ON(!mm || !mqd);
- kfd2kgd->free_mem(mm->dev->kgd, (struct kgd_mem *) mqd_mem_obj);
+ kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
+}
+
+static void uninit_mqd_sdma(struct mqd_manager *mm, void *mqd,
+ struct kfd_mem_obj *mqd_mem_obj)
+{
+ BUG_ON(!mm || !mqd);
+ kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
}
static int load_mqd(struct mqd_manager *mm, void *mqd, uint32_t pipe_id,
uint32_t queue_id, uint32_t __user *wptr)
{
return kfd2kgd->hqd_load(mm->dev->kgd, mqd, pipe_id, queue_id, wptr);
+}
+static int load_mqd_sdma(struct mqd_manager *mm, void *mqd,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t __user *wptr)
+{
+ return kfd2kgd->hqd_sdma_load(mm->dev->kgd, mqd);
}
static int update_mqd(struct mqd_manager *mm, void *mqd,
@@ -170,6 +210,41 @@
return 0;
}
+static int update_mqd_sdma(struct mqd_manager *mm, void *mqd,
+ struct queue_properties *q)
+{
+ struct cik_sdma_rlc_registers *m;
+
+ BUG_ON(!mm || !mqd || !q);
+
+ m = get_sdma_mqd(mqd);
+ m->sdma_rlc_rb_cntl =
+ SDMA_RB_SIZE((ffs(q->queue_size / sizeof(unsigned int)))) |
+ SDMA_RB_VMID(q->vmid) |
+ SDMA_RPTR_WRITEBACK_ENABLE |
+ SDMA_RPTR_WRITEBACK_TIMER(6);
+
+ m->sdma_rlc_rb_base = lower_32_bits(q->queue_address >> 8);
+ m->sdma_rlc_rb_base_hi = upper_32_bits(q->queue_address >> 8);
+ m->sdma_rlc_rb_rptr_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
+ m->sdma_rlc_rb_rptr_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
+ m->sdma_rlc_doorbell = SDMA_OFFSET(q->doorbell_off) | SDMA_DB_ENABLE;
+ m->sdma_rlc_virtual_addr = q->sdma_vm_addr;
+
+ m->sdma_engine_id = q->sdma_engine_id;
+ m->sdma_queue_id = q->sdma_queue_id;
+
+ q->is_active = false;
+ if (q->queue_size > 0 &&
+ q->queue_address != 0 &&
+ q->queue_percent > 0) {
+ m->sdma_rlc_rb_cntl |= SDMA_RB_ENABLE;
+ q->is_active = true;
+ }
+
+ return 0;
+}
+
static int destroy_mqd(struct mqd_manager *mm, void *mqd,
enum kfd_preempt_type type,
unsigned int timeout, uint32_t pipe_id,
@@ -179,6 +254,18 @@
pipe_id, queue_id);
}
+/*
+ * preempt type here is ignored because there is only one way
+ * to preempt sdma queue
+ */
+static int destroy_mqd_sdma(struct mqd_manager *mm, void *mqd,
+ enum kfd_preempt_type type,
+ unsigned int timeout, uint32_t pipe_id,
+ uint32_t queue_id)
+{
+ return kfd2kgd->hqd_sdma_destroy(mm->dev->kgd, mqd, timeout);
+}
+
static bool is_occupied(struct mqd_manager *mm, void *mqd,
uint64_t queue_address, uint32_t pipe_id,
uint32_t queue_id)
@@ -189,6 +276,13 @@
}
+static bool is_occupied_sdma(struct mqd_manager *mm, void *mqd,
+ uint64_t queue_address, uint32_t pipe_id,
+ uint32_t queue_id)
+{
+ return kfd2kgd->hqd_sdma_is_occupied(mm->dev->kgd, mqd);
+}
+
/*
* HIQ MQD Implementation, concrete implementation for HIQ MQD implementation.
* The HIQ queue in Kaveri is using the same MQD structure as all the user mode
@@ -207,11 +301,8 @@
pr_debug("kfd: In func %s\n", __func__);
- retval = kfd2kgd->allocate_mem(mm->dev->kgd,
- sizeof(struct cik_mqd),
- 256,
- KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
- (struct kgd_mem **) mqd_mem_obj);
+ retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct cik_mqd),
+ mqd_mem_obj);
if (retval != 0)
return -ENOMEM;
@@ -301,6 +392,21 @@
return 0;
}
+/*
+ * SDMA MQD Implementation
+ */
+
+struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd)
+{
+ struct cik_sdma_rlc_registers *m;
+
+ BUG_ON(!mqd);
+
+ m = (struct cik_sdma_rlc_registers *)mqd;
+
+ return m;
+}
+
struct mqd_manager *mqd_manager_init(enum KFD_MQD_TYPE type,
struct kfd_dev *dev)
{
@@ -335,6 +441,14 @@
mqd->destroy_mqd = destroy_mqd;
mqd->is_occupied = is_occupied;
break;
+ case KFD_MQD_TYPE_CIK_SDMA:
+ mqd->init_mqd = init_mqd_sdma;
+ mqd->uninit_mqd = uninit_mqd_sdma;
+ mqd->load_mqd = load_mqd_sdma;
+ mqd->update_mqd = update_mqd_sdma;
+ mqd->destroy_mqd = destroy_mqd_sdma;
+ mqd->is_occupied = is_occupied_sdma;
+ break;
default:
kfree(mqd);
return NULL;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
index 5ce9233..3cda952 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
@@ -97,11 +97,8 @@
pm_calc_rlib_size(pm, rl_buffer_size, is_over_subscription);
- retval = kfd2kgd->allocate_mem(pm->dqm->dev->kgd,
- *rl_buffer_size,
- PAGE_SIZE,
- KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
- (struct kgd_mem **) &pm->ib_buffer_obj);
+ retval = kfd_gtt_sa_allocate(pm->dqm->dev, *rl_buffer_size,
+ &pm->ib_buffer_obj);
if (retval != 0) {
pr_err("kfd: failed to allocate runlist IB\n");
@@ -557,8 +554,7 @@
mutex_lock(&pm->lock);
if (pm->allocated) {
- kfd2kgd->free_mem(pm->dqm->dev->kgd,
- (struct kgd_mem *) pm->ib_buffer_obj);
+ kfd_gtt_sa_free(pm->dqm->dev, pm->ib_buffer_obj);
pm->allocated = false;
}
mutex_unlock(&pm->lock);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index f9fb81e3..a79c217 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -107,9 +107,17 @@
struct kfd_device_info {
unsigned int max_pasid_bits;
size_t ih_ring_entry_size;
+ uint8_t num_of_watch_points;
uint16_t mqd_size_aligned;
};
+struct kfd_mem_obj {
+ uint32_t range_start;
+ uint32_t range_end;
+ uint64_t gpu_addr;
+ uint32_t *cpu_ptr;
+};
+
struct kfd_dev {
struct kgd_dev *kgd;
@@ -135,6 +143,14 @@
struct kgd2kfd_shared_resources shared_resources;
+ void *gtt_mem;
+ uint64_t gtt_start_gpu_addr;
+ void *gtt_start_cpu_ptr;
+ void *gtt_sa_bitmap;
+ struct mutex gtt_sa_lock;
+ unsigned int gtt_sa_chunk_size;
+ unsigned int gtt_sa_num_of_chunks;
+
void *interrupt_ring;
size_t interrupt_ring_size;
atomic_t interrupt_ring_rptr;
@@ -162,12 +178,6 @@
extern const struct kfd2kgd_calls *kfd2kgd;
-struct kfd_mem_obj {
- void *bo;
- uint64_t gpu_addr;
- uint32_t *cpu_ptr;
-};
-
enum kfd_mempool {
KFD_MEMPOOL_SYSTEM_CACHEABLE = 1,
KFD_MEMPOOL_SYSTEM_WRITECOMBINE = 2,
@@ -285,6 +295,10 @@
bool is_active;
/* Not relevant for user mode queues in cp scheduling */
unsigned int vmid;
+ /* Relevant only for sdma queues*/
+ uint32_t sdma_engine_id;
+ uint32_t sdma_queue_id;
+ uint32_t sdma_vm_addr;
};
/**
@@ -327,6 +341,8 @@
uint32_t pipe;
uint32_t queue;
+ unsigned int sdma_id;
+
struct kfd_process *process;
struct kfd_dev *device;
};
@@ -472,8 +488,9 @@
struct kfd_process *p);
void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid);
struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
- struct kfd_process *p,
- int create_pdd);
+ struct kfd_process *p);
+struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
+ struct kfd_process *p);
/* Process device data iterator */
struct kfd_process_device *kfd_get_first_process_device_data(struct kfd_process *p);
@@ -501,6 +518,13 @@
struct kfd_process *process,
unsigned int queue_id);
+/* GTT Sub-Allocator */
+
+int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size,
+ struct kfd_mem_obj **mem_obj);
+
+int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj);
+
extern struct device *kfd_device;
/* Topology */
@@ -528,6 +552,8 @@
/* Queue Context Management */
inline uint32_t lower_32(uint64_t x);
inline uint32_t upper_32(uint64_t x);
+struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd);
+inline uint32_t get_sdma_base_addr(struct cik_sdma_rlc_registers *m);
int init_queue(struct queue **q, struct queue_properties properties);
void uninit_queue(struct queue *q);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index b85eb0b..a369c14 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -26,6 +26,8 @@
#include <linux/slab.h>
#include <linux/amd-iommu.h>
#include <linux/notifier.h>
+#include <linux/compat.h>
+
struct mm_struct;
#include "kfd_priv.h"
@@ -285,8 +287,15 @@
if (err != 0)
goto err_process_pqm_init;
+ /* init process apertures*/
+ process->is_32bit_user_mode = is_compat_task();
+ if (kfd_init_apertures(process) != 0)
+ goto err_init_apretures;
+
return process;
+err_init_apretures:
+ pqm_uninit(&process->pqm);
err_process_pqm_init:
hash_del_rcu(&process->kfd_processes);
synchronize_rcu();
@@ -302,24 +311,29 @@
}
struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
- struct kfd_process *p,
- int create_pdd)
+ struct kfd_process *p)
{
struct kfd_process_device *pdd = NULL;
list_for_each_entry(pdd, &p->per_device_data, per_device_list)
if (pdd->dev == dev)
- return pdd;
+ break;
- if (create_pdd) {
- pdd = kzalloc(sizeof(*pdd), GFP_KERNEL);
- if (pdd != NULL) {
- pdd->dev = dev;
- INIT_LIST_HEAD(&pdd->qpd.queues_list);
- INIT_LIST_HEAD(&pdd->qpd.priv_queue_list);
- pdd->qpd.dqm = dev->dqm;
- list_add(&pdd->per_device_list, &p->per_device_data);
- }
+ return pdd;
+}
+
+struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
+ struct kfd_process *p)
+{
+ struct kfd_process_device *pdd = NULL;
+
+ pdd = kzalloc(sizeof(*pdd), GFP_KERNEL);
+ if (pdd != NULL) {
+ pdd->dev = dev;
+ INIT_LIST_HEAD(&pdd->qpd.queues_list);
+ INIT_LIST_HEAD(&pdd->qpd.priv_queue_list);
+ pdd->qpd.dqm = dev->dqm;
+ list_add(&pdd->per_device_list, &p->per_device_data);
}
return pdd;
@@ -335,11 +349,14 @@
struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
struct kfd_process *p)
{
- struct kfd_process_device *pdd = kfd_get_process_device_data(dev, p, 1);
+ struct kfd_process_device *pdd;
int err;
- if (pdd == NULL)
+ pdd = kfd_get_process_device_data(dev, p);
+ if (!pdd) {
+ pr_err("Process device data doesn't exist\n");
return ERR_PTR(-ENOMEM);
+ }
if (pdd->bound)
return pdd;
@@ -375,7 +392,7 @@
pqm_uninit(&p->pqm);
- pdd = kfd_get_process_device_data(dev, p, 0);
+ pdd = kfd_get_process_device_data(dev, p);
/*
* Just mark pdd as unbound, because we still need it to call
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 4752678..948b1ca 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -128,7 +128,6 @@
/* let DQM handle it*/
q_properties->vmid = 0;
q_properties->queue_id = qid;
- q_properties->type = KFD_QUEUE_TYPE_COMPUTE;
retval = init_queue(q, *q_properties);
if (retval != 0)
@@ -167,8 +166,11 @@
q = NULL;
kq = NULL;
- pdd = kfd_get_process_device_data(dev, pqm->process, 1);
- BUG_ON(!pdd);
+ pdd = kfd_get_process_device_data(dev, pqm->process);
+ if (!pdd) {
+ pr_err("Process device data doesn't exist\n");
+ return -1;
+ }
retval = find_available_queue_slot(pqm, qid);
if (retval != 0)
@@ -186,6 +188,7 @@
}
switch (type) {
+ case KFD_QUEUE_TYPE_SDMA:
case KFD_QUEUE_TYPE_COMPUTE:
/* check if there is over subscription */
if ((sched_policy == KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) &&
@@ -273,8 +276,11 @@
dev = pqn->q->device;
BUG_ON(!dev);
- pdd = kfd_get_process_device_data(dev, pqm->process, 1);
- BUG_ON(!pdd);
+ pdd = kfd_get_process_device_data(dev, pqm->process);
+ if (!pdd) {
+ pr_err("Process device data doesn't exist\n");
+ return -1;
+ }
if (pqn->kq) {
/* destroy kernel queue (DIQ) */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 5733e28..4886dde 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -27,6 +27,7 @@
#include <linux/acpi.h>
#include <linux/hash.h>
#include <linux/cpufreq.h>
+#include <linux/log2.h>
#include "kfd_priv.h"
#include "kfd_crat.h"
@@ -630,10 +631,10 @@
static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
char *buffer)
{
- ssize_t ret;
struct kfd_topology_device *dev;
char public_name[KFD_TOPOLOGY_PUBLIC_NAME_SIZE];
uint32_t i;
+ uint32_t log_max_watch_addr;
/* Making sure that the buffer is an empty string */
buffer[0] = 0;
@@ -641,8 +642,10 @@
if (strcmp(attr->name, "gpu_id") == 0) {
dev = container_of(attr, struct kfd_topology_device,
attr_gpuid);
- ret = sysfs_show_32bit_val(buffer, dev->gpu_id);
- } else if (strcmp(attr->name, "name") == 0) {
+ return sysfs_show_32bit_val(buffer, dev->gpu_id);
+ }
+
+ if (strcmp(attr->name, "name") == 0) {
dev = container_of(attr, struct kfd_topology_device,
attr_name);
for (i = 0; i < KFD_TOPOLOGY_PUBLIC_NAME_SIZE; i++) {
@@ -652,76 +655,90 @@
break;
}
public_name[KFD_TOPOLOGY_PUBLIC_NAME_SIZE-1] = 0x0;
- ret = sysfs_show_str_val(buffer, public_name);
- } else {
- dev = container_of(attr, struct kfd_topology_device,
- attr_props);
- sysfs_show_32bit_prop(buffer, "cpu_cores_count",
- dev->node_props.cpu_cores_count);
- sysfs_show_32bit_prop(buffer, "simd_count",
- dev->node_props.simd_count);
-
- if (dev->mem_bank_count < dev->node_props.mem_banks_count) {
- pr_warn("kfd: mem_banks_count truncated from %d to %d\n",
- dev->node_props.mem_banks_count,
- dev->mem_bank_count);
- sysfs_show_32bit_prop(buffer, "mem_banks_count",
- dev->mem_bank_count);
- } else {
- sysfs_show_32bit_prop(buffer, "mem_banks_count",
- dev->node_props.mem_banks_count);
- }
-
- sysfs_show_32bit_prop(buffer, "caches_count",
- dev->node_props.caches_count);
- sysfs_show_32bit_prop(buffer, "io_links_count",
- dev->node_props.io_links_count);
- sysfs_show_32bit_prop(buffer, "cpu_core_id_base",
- dev->node_props.cpu_core_id_base);
- sysfs_show_32bit_prop(buffer, "simd_id_base",
- dev->node_props.simd_id_base);
- sysfs_show_32bit_prop(buffer, "capability",
- dev->node_props.capability);
- sysfs_show_32bit_prop(buffer, "max_waves_per_simd",
- dev->node_props.max_waves_per_simd);
- sysfs_show_32bit_prop(buffer, "lds_size_in_kb",
- dev->node_props.lds_size_in_kb);
- sysfs_show_32bit_prop(buffer, "gds_size_in_kb",
- dev->node_props.gds_size_in_kb);
- sysfs_show_32bit_prop(buffer, "wave_front_size",
- dev->node_props.wave_front_size);
- sysfs_show_32bit_prop(buffer, "array_count",
- dev->node_props.array_count);
- sysfs_show_32bit_prop(buffer, "simd_arrays_per_engine",
- dev->node_props.simd_arrays_per_engine);
- sysfs_show_32bit_prop(buffer, "cu_per_simd_array",
- dev->node_props.cu_per_simd_array);
- sysfs_show_32bit_prop(buffer, "simd_per_cu",
- dev->node_props.simd_per_cu);
- sysfs_show_32bit_prop(buffer, "max_slots_scratch_cu",
- dev->node_props.max_slots_scratch_cu);
- sysfs_show_32bit_prop(buffer, "engine_id",
- dev->node_props.engine_id);
- sysfs_show_32bit_prop(buffer, "vendor_id",
- dev->node_props.vendor_id);
- sysfs_show_32bit_prop(buffer, "device_id",
- dev->node_props.device_id);
- sysfs_show_32bit_prop(buffer, "location_id",
- dev->node_props.location_id);
-
- if (dev->gpu) {
- sysfs_show_32bit_prop(buffer, "max_engine_clk_fcompute",
- kfd2kgd->get_max_engine_clock_in_mhz(
- dev->gpu->kgd));
- sysfs_show_64bit_prop(buffer, "local_mem_size",
- kfd2kgd->get_vmem_size(dev->gpu->kgd));
- }
-
- ret = sysfs_show_32bit_prop(buffer, "max_engine_clk_ccompute",
- cpufreq_quick_get_max(0)/1000);
+ return sysfs_show_str_val(buffer, public_name);
}
- return ret;
+ dev = container_of(attr, struct kfd_topology_device,
+ attr_props);
+ sysfs_show_32bit_prop(buffer, "cpu_cores_count",
+ dev->node_props.cpu_cores_count);
+ sysfs_show_32bit_prop(buffer, "simd_count",
+ dev->node_props.simd_count);
+
+ if (dev->mem_bank_count < dev->node_props.mem_banks_count) {
+ pr_warn("kfd: mem_banks_count truncated from %d to %d\n",
+ dev->node_props.mem_banks_count,
+ dev->mem_bank_count);
+ sysfs_show_32bit_prop(buffer, "mem_banks_count",
+ dev->mem_bank_count);
+ } else {
+ sysfs_show_32bit_prop(buffer, "mem_banks_count",
+ dev->node_props.mem_banks_count);
+ }
+
+ sysfs_show_32bit_prop(buffer, "caches_count",
+ dev->node_props.caches_count);
+ sysfs_show_32bit_prop(buffer, "io_links_count",
+ dev->node_props.io_links_count);
+ sysfs_show_32bit_prop(buffer, "cpu_core_id_base",
+ dev->node_props.cpu_core_id_base);
+ sysfs_show_32bit_prop(buffer, "simd_id_base",
+ dev->node_props.simd_id_base);
+ sysfs_show_32bit_prop(buffer, "capability",
+ dev->node_props.capability);
+ sysfs_show_32bit_prop(buffer, "max_waves_per_simd",
+ dev->node_props.max_waves_per_simd);
+ sysfs_show_32bit_prop(buffer, "lds_size_in_kb",
+ dev->node_props.lds_size_in_kb);
+ sysfs_show_32bit_prop(buffer, "gds_size_in_kb",
+ dev->node_props.gds_size_in_kb);
+ sysfs_show_32bit_prop(buffer, "wave_front_size",
+ dev->node_props.wave_front_size);
+ sysfs_show_32bit_prop(buffer, "array_count",
+ dev->node_props.array_count);
+ sysfs_show_32bit_prop(buffer, "simd_arrays_per_engine",
+ dev->node_props.simd_arrays_per_engine);
+ sysfs_show_32bit_prop(buffer, "cu_per_simd_array",
+ dev->node_props.cu_per_simd_array);
+ sysfs_show_32bit_prop(buffer, "simd_per_cu",
+ dev->node_props.simd_per_cu);
+ sysfs_show_32bit_prop(buffer, "max_slots_scratch_cu",
+ dev->node_props.max_slots_scratch_cu);
+ sysfs_show_32bit_prop(buffer, "vendor_id",
+ dev->node_props.vendor_id);
+ sysfs_show_32bit_prop(buffer, "device_id",
+ dev->node_props.device_id);
+ sysfs_show_32bit_prop(buffer, "location_id",
+ dev->node_props.location_id);
+
+ if (dev->gpu) {
+ log_max_watch_addr =
+ __ilog2_u32(dev->gpu->device_info->num_of_watch_points);
+
+ if (log_max_watch_addr) {
+ dev->node_props.capability |=
+ HSA_CAP_WATCH_POINTS_SUPPORTED;
+
+ dev->node_props.capability |=
+ ((log_max_watch_addr <<
+ HSA_CAP_WATCH_POINTS_TOTALBITS_SHIFT) &
+ HSA_CAP_WATCH_POINTS_TOTALBITS_MASK);
+ }
+
+ sysfs_show_32bit_prop(buffer, "max_engine_clk_fcompute",
+ kfd2kgd->get_max_engine_clock_in_mhz(
+ dev->gpu->kgd));
+ sysfs_show_64bit_prop(buffer, "local_mem_size",
+ kfd2kgd->get_vmem_size(dev->gpu->kgd));
+
+ sysfs_show_32bit_prop(buffer, "fw_version",
+ kfd2kgd->get_fw_version(
+ dev->gpu->kgd,
+ KGD_ENGINE_MEC1));
+ }
+
+ return sysfs_show_32bit_prop(buffer, "max_engine_clk_ccompute",
+ cpufreq_quick_get_max(0)/1000);
}
static const struct sysfs_ops node_ops = {
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index 9c729dd..cd3878f 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -45,6 +45,17 @@
KGD_POOL_FRAMEBUFFER = 3,
};
+enum kgd_engine_type {
+ KGD_ENGINE_PFP = 1,
+ KGD_ENGINE_ME,
+ KGD_ENGINE_CE,
+ KGD_ENGINE_MEC1,
+ KGD_ENGINE_MEC2,
+ KGD_ENGINE_RLC,
+ KGD_ENGINE_SDMA,
+ KGD_ENGINE_MAX
+};
+
struct kgd2kfd_shared_resources {
/* Bit n == 1 means VMID n is available for KFD. */
unsigned int compute_vmid_bitmap;
@@ -99,17 +110,10 @@
/**
* struct kfd2kgd_calls
*
- * @init_sa_manager: Initialize an instance of the sa manager, used by
- * amdkfd for all system memory allocations that are mapped to the GART
- * address space
+ * @init_gtt_mem_allocation: Allocate a buffer on the gart aperture.
+ * The buffer can be used for mqds, hpds, kernel queue, fence and runlists
*
- * @fini_sa_manager: Releases all memory allocations for amdkfd that are
- * handled by kgd sa manager
- *
- * @allocate_mem: Allocate a buffer from amdkfd's sa manager. The buffer can
- * be used for mqds, hpds, kernel queue, fence and runlists
- *
- * @free_mem: Frees a buffer that was allocated by amdkfd's sa manager
+ * @free_gtt_mem: Frees a buffer that was allocated on the gart aperture
*
* @get_vmem_size: Retrieves (physical) size of VRAM
*
@@ -133,22 +137,30 @@
* @hqd_load: Loads the mqd structure to a H/W hqd slot. used only for no cp
* sceduling mode.
*
+ * @hqd_sdma_load: Loads the SDMA mqd structure to a H/W SDMA hqd slot.
+ * used only for no HWS mode.
+ *
* @hqd_is_occupies: Checks if a hqd slot is occupied.
*
* @hqd_destroy: Destructs and preempts the queue assigned to that hqd slot.
*
+ * @hqd_sdma_is_occupied: Checks if an SDMA hqd slot is occupied.
+ *
+ * @hqd_sdma_destroy: Destructs and preempts the SDMA queue assigned to that
+ * SDMA hqd slot.
+ *
+ * @get_fw_version: Returns FW versions from the header
+ *
* This structure contains function pointers to services that the kgd driver
* provides to amdkfd driver.
*
*/
struct kfd2kgd_calls {
- /* Memory management. */
- int (*init_sa_manager)(struct kgd_dev *kgd, unsigned int size);
- void (*fini_sa_manager)(struct kgd_dev *kgd);
- int (*allocate_mem)(struct kgd_dev *kgd, size_t size, size_t alignment,
- enum kgd_memory_pool pool, struct kgd_mem **mem);
+ int (*init_gtt_mem_allocation)(struct kgd_dev *kgd, size_t size,
+ void **mem_obj, uint64_t *gpu_addr,
+ void **cpu_ptr);
- void (*free_mem)(struct kgd_dev *kgd, struct kgd_mem *mem);
+ void (*free_gtt_mem)(struct kgd_dev *kgd, void *mem_obj);
uint64_t (*get_vmem_size)(struct kgd_dev *kgd);
uint64_t (*get_gpu_clock_counter)(struct kgd_dev *kgd);
@@ -170,16 +182,26 @@
int (*hqd_load)(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
uint32_t queue_id, uint32_t __user *wptr);
+ int (*hqd_sdma_load)(struct kgd_dev *kgd, void *mqd);
+
bool (*hqd_is_occupies)(struct kgd_dev *kgd, uint64_t queue_address,
uint32_t pipe_id, uint32_t queue_id);
int (*hqd_destroy)(struct kgd_dev *kgd, uint32_t reset_type,
unsigned int timeout, uint32_t pipe_id,
uint32_t queue_id);
+
+ bool (*hqd_sdma_is_occupied)(struct kgd_dev *kgd, void *mqd);
+
+ int (*hqd_sdma_destroy)(struct kgd_dev *kgd, void *mqd,
+ unsigned int timeout);
+
+ uint16_t (*get_fw_version)(struct kgd_dev *kgd,
+ enum kgd_engine_type type);
};
bool kgd2kfd_init(unsigned interface_version,
- const struct kfd2kgd_calls *f2g,
- const struct kgd2kfd_calls **g2f);
+ const struct kfd2kgd_calls *f2g,
+ const struct kgd2kfd_calls **g2f);
-#endif /* KGD_KFD_INTERFACE_H_INCLUDED */
+#endif /* KGD_KFD_INTERFACE_H_INCLUDED */
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index e3a7a50..42d2ffa 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -653,10 +653,6 @@
return 0;
}
-static void armada_drm_crtc_load_lut(struct drm_crtc *crtc)
-{
-}
-
/* The mode_config.mutex will be held for this call */
static void armada_drm_crtc_disable(struct drm_crtc *crtc)
{
@@ -678,7 +674,6 @@
.mode_fixup = armada_drm_crtc_mode_fixup,
.mode_set = armada_drm_crtc_mode_set,
.mode_set_base = armada_drm_crtc_mode_set_base,
- .load_lut = armada_drm_crtc_load_lut,
.disable = armada_drm_crtc_disable,
};
diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c
index 85f0f8cf..26bcd03 100644
--- a/drivers/gpu/drm/bochs/bochs_kms.c
+++ b/drivers/gpu/drm/bochs/bochs_kms.c
@@ -18,10 +18,6 @@
/* ---------------------------------------------------------------------- */
-static void bochs_crtc_load_lut(struct drm_crtc *crtc)
-{
-}
-
static void bochs_crtc_dpms(struct drm_crtc *crtc, int mode)
{
switch (mode) {
@@ -144,7 +140,6 @@
.mode_set_base = bochs_crtc_mode_set_base,
.prepare = bochs_crtc_prepare,
.commit = bochs_crtc_commit,
- .load_lut = bochs_crtc_load_lut,
};
static void bochs_crtc_init(struct drm_device *dev)
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index ff5f034..1e38dfc 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -56,6 +56,11 @@
if (!state)
return NULL;
+ /* TODO legacy paths should maybe do a better job about
+ * setting this appropriately?
+ */
+ state->allow_modeset = true;
+
state->num_connector = ACCESS_ONCE(dev->mode_config.num_connector);
state->crtcs = kcalloc(dev->mode_config.num_crtc,
@@ -217,6 +222,70 @@
EXPORT_SYMBOL(drm_atomic_get_crtc_state);
/**
+ * drm_atomic_crtc_set_property - set property on CRTC
+ * @crtc: the drm CRTC to set a property on
+ * @state: the state object to update with the new property value
+ * @property: the property to set
+ * @val: the new property value
+ *
+ * Use this instead of calling crtc->atomic_set_property directly.
+ * This function handles generic/core properties and calls out to
+ * driver's ->atomic_set_property() for driver properties. To ensure
+ * consistent behavior you must call this function rather than the
+ * driver hook directly.
+ *
+ * RETURNS:
+ * Zero on success, error code on failure
+ */
+int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
+ struct drm_crtc_state *state, struct drm_property *property,
+ uint64_t val)
+{
+ if (crtc->funcs->atomic_set_property)
+ return crtc->funcs->atomic_set_property(crtc, state, property, val);
+ return -EINVAL;
+}
+EXPORT_SYMBOL(drm_atomic_crtc_set_property);
+
+/*
+ * This function handles generic/core properties and calls out to
+ * driver's ->atomic_get_property() for driver properties. To ensure
+ * consistent behavior you must call this function rather than the
+ * driver hook directly.
+ */
+int drm_atomic_crtc_get_property(struct drm_crtc *crtc,
+ const struct drm_crtc_state *state,
+ struct drm_property *property, uint64_t *val)
+{
+ if (crtc->funcs->atomic_get_property)
+ return crtc->funcs->atomic_get_property(crtc, state, property, val);
+ return -EINVAL;
+}
+
+/**
+ * drm_atomic_crtc_check - check crtc state
+ * @crtc: crtc to check
+ * @state: crtc state to check
+ *
+ * Provides core sanity checks for crtc state.
+ *
+ * RETURNS:
+ * Zero on success, error code on failure
+ */
+static int drm_atomic_crtc_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ /* NOTE: we explicitly don't enforce constraints such as primary
+ * layer covering entire screen, since that is something we want
+ * to allow (on hw that supports it). For hw that does not, it
+ * should be checked in driver's crtc->atomic_check() vfunc.
+ *
+ * TODO: Add generic modeset state checks once we support those.
+ */
+ return 0;
+}
+
+/**
* drm_atomic_get_plane_state - get plane state
* @state: global atomic state object
* @plane: plane to get state object for
@@ -272,6 +341,183 @@
EXPORT_SYMBOL(drm_atomic_get_plane_state);
/**
+ * drm_atomic_plane_set_property - set property on plane
+ * @plane: the drm plane to set a property on
+ * @state: the state object to update with the new property value
+ * @property: the property to set
+ * @val: the new property value
+ *
+ * Use this instead of calling plane->atomic_set_property directly.
+ * This function handles generic/core properties and calls out to
+ * driver's ->atomic_set_property() for driver properties. To ensure
+ * consistent behavior you must call this function rather than the
+ * driver hook directly.
+ *
+ * RETURNS:
+ * Zero on success, error code on failure
+ */
+int drm_atomic_plane_set_property(struct drm_plane *plane,
+ struct drm_plane_state *state, struct drm_property *property,
+ uint64_t val)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_mode_config *config = &dev->mode_config;
+
+ if (property == config->prop_fb_id) {
+ struct drm_framebuffer *fb = drm_framebuffer_lookup(dev, val);
+ drm_atomic_set_fb_for_plane(state, fb);
+ if (fb)
+ drm_framebuffer_unreference(fb);
+ } else if (property == config->prop_crtc_id) {
+ struct drm_crtc *crtc = drm_crtc_find(dev, val);
+ return drm_atomic_set_crtc_for_plane(state, crtc);
+ } else if (property == config->prop_crtc_x) {
+ state->crtc_x = U642I64(val);
+ } else if (property == config->prop_crtc_y) {
+ state->crtc_y = U642I64(val);
+ } else if (property == config->prop_crtc_w) {
+ state->crtc_w = val;
+ } else if (property == config->prop_crtc_h) {
+ state->crtc_h = val;
+ } else if (property == config->prop_src_x) {
+ state->src_x = val;
+ } else if (property == config->prop_src_y) {
+ state->src_y = val;
+ } else if (property == config->prop_src_w) {
+ state->src_w = val;
+ } else if (property == config->prop_src_h) {
+ state->src_h = val;
+ } else if (plane->funcs->atomic_set_property) {
+ return plane->funcs->atomic_set_property(plane, state,
+ property, val);
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_atomic_plane_set_property);
+
+/*
+ * This function handles generic/core properties and calls out to
+ * driver's ->atomic_get_property() for driver properties. To ensure
+ * consistent behavior you must call this function rather than the
+ * driver hook directly.
+ */
+static int
+drm_atomic_plane_get_property(struct drm_plane *plane,
+ const struct drm_plane_state *state,
+ struct drm_property *property, uint64_t *val)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_mode_config *config = &dev->mode_config;
+
+ if (property == config->prop_fb_id) {
+ *val = (state->fb) ? state->fb->base.id : 0;
+ } else if (property == config->prop_crtc_id) {
+ *val = (state->crtc) ? state->crtc->base.id : 0;
+ } else if (property == config->prop_crtc_x) {
+ *val = I642U64(state->crtc_x);
+ } else if (property == config->prop_crtc_y) {
+ *val = I642U64(state->crtc_y);
+ } else if (property == config->prop_crtc_w) {
+ *val = state->crtc_w;
+ } else if (property == config->prop_crtc_h) {
+ *val = state->crtc_h;
+ } else if (property == config->prop_src_x) {
+ *val = state->src_x;
+ } else if (property == config->prop_src_y) {
+ *val = state->src_y;
+ } else if (property == config->prop_src_w) {
+ *val = state->src_w;
+ } else if (property == config->prop_src_h) {
+ *val = state->src_h;
+ } else if (plane->funcs->atomic_get_property) {
+ return plane->funcs->atomic_get_property(plane, state, property, val);
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * drm_atomic_plane_check - check plane state
+ * @plane: plane to check
+ * @state: plane state to check
+ *
+ * Provides core sanity checks for plane state.
+ *
+ * RETURNS:
+ * Zero on success, error code on failure
+ */
+static int drm_atomic_plane_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ unsigned int fb_width, fb_height;
+ unsigned int i;
+
+ /* either *both* CRTC and FB must be set, or neither */
+ if (WARN_ON(state->crtc && !state->fb)) {
+ DRM_DEBUG_KMS("CRTC set but no FB\n");
+ return -EINVAL;
+ } else if (WARN_ON(state->fb && !state->crtc)) {
+ DRM_DEBUG_KMS("FB set but no CRTC\n");
+ return -EINVAL;
+ }
+
+ /* if disabled, we don't care about the rest of the state: */
+ if (!state->crtc)
+ return 0;
+
+ /* Check whether this plane is usable on this CRTC */
+ if (!(plane->possible_crtcs & drm_crtc_mask(state->crtc))) {
+ DRM_DEBUG_KMS("Invalid crtc for plane\n");
+ return -EINVAL;
+ }
+
+ /* Check whether this plane supports the fb pixel format. */
+ for (i = 0; i < plane->format_count; i++)
+ if (state->fb->pixel_format == plane->format_types[i])
+ break;
+ if (i == plane->format_count) {
+ DRM_DEBUG_KMS("Invalid pixel format %s\n",
+ drm_get_format_name(state->fb->pixel_format));
+ return -EINVAL;
+ }
+
+ /* Give drivers some help against integer overflows */
+ if (state->crtc_w > INT_MAX ||
+ state->crtc_x > INT_MAX - (int32_t) state->crtc_w ||
+ state->crtc_h > INT_MAX ||
+ state->crtc_y > INT_MAX - (int32_t) state->crtc_h) {
+ DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n",
+ state->crtc_w, state->crtc_h,
+ state->crtc_x, state->crtc_y);
+ return -ERANGE;
+ }
+
+ fb_width = state->fb->width << 16;
+ fb_height = state->fb->height << 16;
+
+ /* Make sure source coordinates are inside the fb. */
+ if (state->src_w > fb_width ||
+ state->src_x > fb_width - state->src_w ||
+ state->src_h > fb_height ||
+ state->src_y > fb_height - state->src_h) {
+ DRM_DEBUG_KMS("Invalid source coordinates "
+ "%u.%06ux%u.%06u+%u.%06u+%u.%06u\n",
+ state->src_w >> 16, ((state->src_w & 0xffff) * 15625) >> 10,
+ state->src_h >> 16, ((state->src_h & 0xffff) * 15625) >> 10,
+ state->src_x >> 16, ((state->src_x & 0xffff) * 15625) >> 10,
+ state->src_y >> 16, ((state->src_y & 0xffff) * 15625) >> 10);
+ return -ENOSPC;
+ }
+
+ return 0;
+}
+
+/**
* drm_atomic_get_connector_state - get connector state
* @state: global atomic state object
* @connector: connector to get state object for
@@ -343,9 +589,113 @@
EXPORT_SYMBOL(drm_atomic_get_connector_state);
/**
+ * drm_atomic_connector_set_property - set property on connector.
+ * @connector: the drm connector to set a property on
+ * @state: the state object to update with the new property value
+ * @property: the property to set
+ * @val: the new property value
+ *
+ * Use this instead of calling connector->atomic_set_property directly.
+ * This function handles generic/core properties and calls out to
+ * driver's ->atomic_set_property() for driver properties. To ensure
+ * consistent behavior you must call this function rather than the
+ * driver hook directly.
+ *
+ * RETURNS:
+ * Zero on success, error code on failure
+ */
+int drm_atomic_connector_set_property(struct drm_connector *connector,
+ struct drm_connector_state *state, struct drm_property *property,
+ uint64_t val)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_mode_config *config = &dev->mode_config;
+
+ if (property == config->prop_crtc_id) {
+ struct drm_crtc *crtc = drm_crtc_find(dev, val);
+ return drm_atomic_set_crtc_for_connector(state, crtc);
+ } else if (property == config->dpms_property) {
+ /* setting DPMS property requires special handling, which
+ * is done in legacy setprop path for us. Disallow (for
+ * now?) atomic writes to DPMS property:
+ */
+ return -EINVAL;
+ } else if (connector->funcs->atomic_set_property) {
+ return connector->funcs->atomic_set_property(connector,
+ state, property, val);
+ } else {
+ return -EINVAL;
+ }
+}
+EXPORT_SYMBOL(drm_atomic_connector_set_property);
+
+/*
+ * This function handles generic/core properties and calls out to
+ * driver's ->atomic_get_property() for driver properties. To ensure
+ * consistent behavior you must call this function rather than the
+ * driver hook directly.
+ */
+static int
+drm_atomic_connector_get_property(struct drm_connector *connector,
+ const struct drm_connector_state *state,
+ struct drm_property *property, uint64_t *val)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_mode_config *config = &dev->mode_config;
+
+ if (property == config->prop_crtc_id) {
+ *val = (state->crtc) ? state->crtc->base.id : 0;
+ } else if (property == config->dpms_property) {
+ *val = connector->dpms;
+ } else if (connector->funcs->atomic_get_property) {
+ return connector->funcs->atomic_get_property(connector,
+ state, property, val);
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int drm_atomic_get_property(struct drm_mode_object *obj,
+ struct drm_property *property, uint64_t *val)
+{
+ struct drm_device *dev = property->dev;
+ int ret;
+
+ switch (obj->type) {
+ case DRM_MODE_OBJECT_CONNECTOR: {
+ struct drm_connector *connector = obj_to_connector(obj);
+ WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+ ret = drm_atomic_connector_get_property(connector,
+ connector->state, property, val);
+ break;
+ }
+ case DRM_MODE_OBJECT_CRTC: {
+ struct drm_crtc *crtc = obj_to_crtc(obj);
+ WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
+ ret = drm_atomic_crtc_get_property(crtc,
+ crtc->state, property, val);
+ break;
+ }
+ case DRM_MODE_OBJECT_PLANE: {
+ struct drm_plane *plane = obj_to_plane(obj);
+ WARN_ON(!drm_modeset_is_locked(&plane->mutex));
+ ret = drm_atomic_plane_get_property(plane,
+ plane->state, property, val);
+ break;
+ }
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+/**
* drm_atomic_set_crtc_for_plane - set crtc for plane
- * @state: the incoming atomic state
- * @plane: the plane whose incoming state to update
+ * @plane_state: the plane whose incoming state to update
* @crtc: crtc to use for the plane
*
* Changing the assigned crtc for a plane requires us to grab the lock and state
@@ -358,16 +708,12 @@
* sequence must be restarted. All other errors are fatal.
*/
int
-drm_atomic_set_crtc_for_plane(struct drm_atomic_state *state,
- struct drm_plane *plane, struct drm_crtc *crtc)
+drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state,
+ struct drm_crtc *crtc)
{
- struct drm_plane_state *plane_state =
- drm_atomic_get_plane_state(state, plane);
+ struct drm_plane *plane = plane_state->plane;
struct drm_crtc_state *crtc_state;
- if (WARN_ON(IS_ERR(plane_state)))
- return PTR_ERR(plane_state);
-
if (plane_state->crtc) {
crtc_state = drm_atomic_get_crtc_state(plane_state->state,
plane_state->crtc);
@@ -583,14 +929,62 @@
*/
int drm_atomic_check_only(struct drm_atomic_state *state)
{
- struct drm_mode_config *config = &state->dev->mode_config;
+ struct drm_device *dev = state->dev;
+ struct drm_mode_config *config = &dev->mode_config;
+ int nplanes = config->num_total_plane;
+ int ncrtcs = config->num_crtc;
+ int i, ret = 0;
DRM_DEBUG_KMS("checking %p\n", state);
+ for (i = 0; i < nplanes; i++) {
+ struct drm_plane *plane = state->planes[i];
+
+ if (!plane)
+ continue;
+
+ ret = drm_atomic_plane_check(plane, state->plane_states[i]);
+ if (ret) {
+ DRM_DEBUG_KMS("[PLANE:%d] atomic core check failed\n",
+ plane->base.id);
+ return ret;
+ }
+ }
+
+ for (i = 0; i < ncrtcs; i++) {
+ struct drm_crtc *crtc = state->crtcs[i];
+
+ if (!crtc)
+ continue;
+
+ ret = drm_atomic_crtc_check(crtc, state->crtc_states[i]);
+ if (ret) {
+ DRM_DEBUG_KMS("[CRTC:%d] atomic core check failed\n",
+ crtc->base.id);
+ return ret;
+ }
+ }
+
if (config->funcs->atomic_check)
- return config->funcs->atomic_check(state->dev, state);
- else
- return 0;
+ ret = config->funcs->atomic_check(state->dev, state);
+
+ if (!state->allow_modeset) {
+ for (i = 0; i < ncrtcs; i++) {
+ struct drm_crtc *crtc = state->crtcs[i];
+ struct drm_crtc_state *crtc_state = state->crtc_states[i];
+
+ if (!crtc)
+ continue;
+
+ if (crtc_state->mode_changed) {
+ DRM_DEBUG_KMS("[CRTC:%d] requires full modeset\n",
+ crtc->base.id);
+ return -EINVAL;
+ }
+ }
+ }
+
+ return ret;
}
EXPORT_SYMBOL(drm_atomic_check_only);
@@ -655,3 +1049,313 @@
return config->funcs->atomic_commit(state->dev, state, true);
}
EXPORT_SYMBOL(drm_atomic_async_commit);
+
+/*
+ * The big monstor ioctl
+ */
+
+static struct drm_pending_vblank_event *create_vblank_event(
+ struct drm_device *dev, struct drm_file *file_priv, uint64_t user_data)
+{
+ struct drm_pending_vblank_event *e = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ if (file_priv->event_space < sizeof e->event) {
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ goto out;
+ }
+ file_priv->event_space -= sizeof e->event;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ e = kzalloc(sizeof *e, GFP_KERNEL);
+ if (e == NULL) {
+ spin_lock_irqsave(&dev->event_lock, flags);
+ file_priv->event_space += sizeof e->event;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ goto out;
+ }
+
+ e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
+ e->event.base.length = sizeof e->event;
+ e->event.user_data = user_data;
+ e->base.event = &e->event.base;
+ e->base.file_priv = file_priv;
+ e->base.destroy = (void (*) (struct drm_pending_event *)) kfree;
+
+out:
+ return e;
+}
+
+static void destroy_vblank_event(struct drm_device *dev,
+ struct drm_file *file_priv, struct drm_pending_vblank_event *e)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ file_priv->event_space += sizeof e->event;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ kfree(e);
+}
+
+static int atomic_set_prop(struct drm_atomic_state *state,
+ struct drm_mode_object *obj, struct drm_property *prop,
+ uint64_t prop_value)
+{
+ struct drm_mode_object *ref;
+ int ret;
+
+ if (!drm_property_change_valid_get(prop, prop_value, &ref))
+ return -EINVAL;
+
+ switch (obj->type) {
+ case DRM_MODE_OBJECT_CONNECTOR: {
+ struct drm_connector *connector = obj_to_connector(obj);
+ struct drm_connector_state *connector_state;
+
+ connector_state = drm_atomic_get_connector_state(state, connector);
+ if (IS_ERR(connector_state)) {
+ ret = PTR_ERR(connector_state);
+ break;
+ }
+
+ ret = drm_atomic_connector_set_property(connector,
+ connector_state, prop, prop_value);
+ break;
+ }
+ case DRM_MODE_OBJECT_CRTC: {
+ struct drm_crtc *crtc = obj_to_crtc(obj);
+ struct drm_crtc_state *crtc_state;
+
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state)) {
+ ret = PTR_ERR(crtc_state);
+ break;
+ }
+
+ ret = drm_atomic_crtc_set_property(crtc,
+ crtc_state, prop, prop_value);
+ break;
+ }
+ case DRM_MODE_OBJECT_PLANE: {
+ struct drm_plane *plane = obj_to_plane(obj);
+ struct drm_plane_state *plane_state;
+
+ plane_state = drm_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state)) {
+ ret = PTR_ERR(plane_state);
+ break;
+ }
+
+ ret = drm_atomic_plane_set_property(plane,
+ plane_state, prop, prop_value);
+ break;
+ }
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ drm_property_change_valid_put(prop, ref);
+ return ret;
+}
+
+int drm_mode_atomic_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_atomic *arg = data;
+ uint32_t __user *objs_ptr = (uint32_t __user *)(unsigned long)(arg->objs_ptr);
+ uint32_t __user *count_props_ptr = (uint32_t __user *)(unsigned long)(arg->count_props_ptr);
+ uint32_t __user *props_ptr = (uint32_t __user *)(unsigned long)(arg->props_ptr);
+ uint64_t __user *prop_values_ptr = (uint64_t __user *)(unsigned long)(arg->prop_values_ptr);
+ unsigned int copied_objs, copied_props;
+ struct drm_atomic_state *state;
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_plane *plane;
+ unsigned plane_mask = 0;
+ int ret = 0;
+ unsigned int i, j;
+
+ /* disallow for drivers not supporting atomic: */
+ if (!drm_core_check_feature(dev, DRIVER_ATOMIC))
+ return -EINVAL;
+
+ /* disallow for userspace that has not enabled atomic cap (even
+ * though this may be a bit overkill, since legacy userspace
+ * wouldn't know how to call this ioctl)
+ */
+ if (!file_priv->atomic)
+ return -EINVAL;
+
+ if (arg->flags & ~DRM_MODE_ATOMIC_FLAGS)
+ return -EINVAL;
+
+ if (arg->reserved)
+ return -EINVAL;
+
+ if ((arg->flags & DRM_MODE_PAGE_FLIP_ASYNC) &&
+ !dev->mode_config.async_page_flip)
+ return -EINVAL;
+
+ /* can't test and expect an event at the same time. */
+ if ((arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) &&
+ (arg->flags & DRM_MODE_PAGE_FLIP_EVENT))
+ return -EINVAL;
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+ state = drm_atomic_state_alloc(dev);
+ if (!state)
+ return -ENOMEM;
+
+ state->acquire_ctx = &ctx;
+ state->allow_modeset = !!(arg->flags & DRM_MODE_ATOMIC_ALLOW_MODESET);
+
+retry:
+ copied_objs = 0;
+ copied_props = 0;
+
+ for (i = 0; i < arg->count_objs; i++) {
+ uint32_t obj_id, count_props;
+ struct drm_mode_object *obj;
+
+ if (get_user(obj_id, objs_ptr + copied_objs)) {
+ ret = -EFAULT;
+ goto fail;
+ }
+
+ obj = drm_mode_object_find(dev, obj_id, DRM_MODE_OBJECT_ANY);
+ if (!obj || !obj->properties) {
+ ret = -ENOENT;
+ goto fail;
+ }
+
+ if (obj->type == DRM_MODE_OBJECT_PLANE) {
+ plane = obj_to_plane(obj);
+ plane_mask |= (1 << drm_plane_index(plane));
+ plane->old_fb = plane->fb;
+ }
+
+ if (get_user(count_props, count_props_ptr + copied_objs)) {
+ ret = -EFAULT;
+ goto fail;
+ }
+
+ copied_objs++;
+
+ for (j = 0; j < count_props; j++) {
+ uint32_t prop_id;
+ uint64_t prop_value;
+ struct drm_property *prop;
+
+ if (get_user(prop_id, props_ptr + copied_props)) {
+ ret = -EFAULT;
+ goto fail;
+ }
+
+ prop = drm_property_find(dev, prop_id);
+ if (!prop) {
+ ret = -ENOENT;
+ goto fail;
+ }
+
+ if (get_user(prop_value, prop_values_ptr + copied_props)) {
+ ret = -EFAULT;
+ goto fail;
+ }
+
+ ret = atomic_set_prop(state, obj, prop, prop_value);
+ if (ret)
+ goto fail;
+
+ copied_props++;
+ }
+ }
+
+ if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
+ int ncrtcs = dev->mode_config.num_crtc;
+
+ for (i = 0; i < ncrtcs; i++) {
+ struct drm_crtc_state *crtc_state = state->crtc_states[i];
+ struct drm_pending_vblank_event *e;
+
+ if (!crtc_state)
+ continue;
+
+ e = create_vblank_event(dev, file_priv, arg->user_data);
+ if (!e) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ crtc_state->event = e;
+ }
+ }
+
+ if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) {
+ ret = drm_atomic_check_only(state);
+ /* _check_only() does not free state, unlike _commit() */
+ drm_atomic_state_free(state);
+ } else if (arg->flags & DRM_MODE_ATOMIC_NONBLOCK) {
+ ret = drm_atomic_async_commit(state);
+ } else {
+ ret = drm_atomic_commit(state);
+ }
+
+ /* if succeeded, fixup legacy plane crtc/fb ptrs before dropping
+ * locks (ie. while it is still safe to deref plane->state). We
+ * need to do this here because the driver entry points cannot
+ * distinguish between legacy and atomic ioctls.
+ */
+ drm_for_each_plane_mask(plane, dev, plane_mask) {
+ if (ret == 0) {
+ struct drm_framebuffer *new_fb = plane->state->fb;
+ if (new_fb)
+ drm_framebuffer_reference(new_fb);
+ plane->fb = new_fb;
+ plane->crtc = plane->state->crtc;
+ } else {
+ plane->old_fb = NULL;
+ }
+ if (plane->old_fb) {
+ drm_framebuffer_unreference(plane->old_fb);
+ plane->old_fb = NULL;
+ }
+ }
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
+ return ret;
+
+fail:
+ if (ret == -EDEADLK)
+ goto backoff;
+
+ if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
+ int ncrtcs = dev->mode_config.num_crtc;
+
+ for (i = 0; i < ncrtcs; i++) {
+ struct drm_crtc_state *crtc_state = state->crtc_states[i];
+
+ if (!crtc_state)
+ continue;
+
+ destroy_vblank_event(dev, file_priv, crtc_state->event);
+ crtc_state->event = NULL;
+ }
+ }
+
+ drm_atomic_state_free(state);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
+ return ret;
+
+backoff:
+ drm_atomic_state_clear(state);
+ drm_modeset_backoff(&ctx);
+
+ goto retry;
+}
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 4a78a77..541ba83 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -61,7 +61,7 @@
struct drm_crtc_state *crtc_state;
if (plane->state->crtc) {
- crtc_state = state->crtc_states[drm_crtc_index(plane->crtc)];
+ crtc_state = state->crtc_states[drm_crtc_index(plane->state->crtc)];
if (WARN_ON(!crtc_state))
return;
@@ -330,7 +330,29 @@
return 0;
}
-static int
+/**
+ * drm_atomic_helper_check - validate state object for modeset changes
+ * @dev: DRM device
+ * @state: the driver state object
+ *
+ * Check the state object to see if the requested state is physically possible.
+ * This does all the crtc and connector related computations for an atomic
+ * update. It computes and updates crtc_state->mode_changed, adds any additional
+ * connectors needed for full modesets and calls down into ->mode_fixup
+ * functions of the driver backend.
+ *
+ * IMPORTANT:
+ *
+ * Drivers which update ->mode_changed (e.g. in their ->atomic_check hooks if a
+ * plane update can't be done without a full modeset) _must_ call this function
+ * afterwards after that change. It is permitted to call this function multiple
+ * times for the same update, e.g. when the ->atomic_check functions depend upon
+ * the adjusted dotclock for fifo space allocation and watermark computation.
+ *
+ * RETURNS
+ * Zero for success or -errno
+ */
+int
drm_atomic_helper_check_modeset(struct drm_device *dev,
struct drm_atomic_state *state)
{
@@ -406,23 +428,23 @@
return mode_fixup(state);
}
+EXPORT_SYMBOL(drm_atomic_helper_check_modeset);
/**
- * drm_atomic_helper_check - validate state object
+ * drm_atomic_helper_check - validate state object for modeset changes
* @dev: DRM device
* @state: the driver state object
*
* Check the state object to see if the requested state is physically possible.
- * Only crtcs and planes have check callbacks, so for any additional (global)
- * checking that a driver needs it can simply wrap that around this function.
- * Drivers without such needs can directly use this as their ->atomic_check()
- * callback.
+ * This does all the plane update related checks using by calling into the
+ * ->atomic_check hooks provided by the driver.
*
* RETURNS
* Zero for success or -errno
*/
-int drm_atomic_helper_check(struct drm_device *dev,
- struct drm_atomic_state *state)
+int
+drm_atomic_helper_check_planes(struct drm_device *dev,
+ struct drm_atomic_state *state)
{
int nplanes = dev->mode_config.num_total_plane;
int ncrtcs = dev->mode_config.num_crtc;
@@ -445,7 +467,7 @@
ret = funcs->atomic_check(plane, plane_state);
if (ret) {
- DRM_DEBUG_KMS("[PLANE:%d] atomic check failed\n",
+ DRM_DEBUG_KMS("[PLANE:%d] atomic driver check failed\n",
plane->base.id);
return ret;
}
@@ -465,16 +487,49 @@
ret = funcs->atomic_check(crtc, state->crtc_states[i]);
if (ret) {
- DRM_DEBUG_KMS("[CRTC:%d] atomic check failed\n",
+ DRM_DEBUG_KMS("[CRTC:%d] atomic driver check failed\n",
crtc->base.id);
return ret;
}
}
+ return ret;
+}
+EXPORT_SYMBOL(drm_atomic_helper_check_planes);
+
+/**
+ * drm_atomic_helper_check - validate state object
+ * @dev: DRM device
+ * @state: the driver state object
+ *
+ * Check the state object to see if the requested state is physically possible.
+ * Only crtcs and planes have check callbacks, so for any additional (global)
+ * checking that a driver needs it can simply wrap that around this function.
+ * Drivers without such needs can directly use this as their ->atomic_check()
+ * callback.
+ *
+ * This just wraps the two parts of the state checking for planes and modeset
+ * state in the default order: First it calls drm_atomic_helper_check_modeset()
+ * and then drm_atomic_helper_check_planes(). The assumption is that the
+ * ->atomic_check functions depend upon an updated adjusted_mode.clock to
+ * e.g. properly compute watermarks.
+ *
+ * RETURNS
+ * Zero for success or -errno
+ */
+int drm_atomic_helper_check(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ int ret;
+
ret = drm_atomic_helper_check_modeset(dev, state);
if (ret)
return ret;
+ ret = drm_atomic_helper_check_planes(dev, state);
+ if (ret)
+ return ret;
+
return ret;
}
EXPORT_SYMBOL(drm_atomic_helper_check);
@@ -1222,7 +1277,7 @@
goto fail;
}
- ret = drm_atomic_set_crtc_for_plane(state, plane, crtc);
+ ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
if (ret != 0)
goto fail;
drm_atomic_set_fb_for_plane(plane_state, fb);
@@ -1301,7 +1356,7 @@
goto fail;
}
- ret = drm_atomic_set_crtc_for_plane(state, plane, NULL);
+ ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
if (ret != 0)
goto fail;
drm_atomic_set_fb_for_plane(plane_state, NULL);
@@ -1464,7 +1519,7 @@
crtc_state->enable = false;
- ret = drm_atomic_set_crtc_for_plane(state, crtc->primary, NULL);
+ ret = drm_atomic_set_crtc_for_plane(primary_state, NULL);
if (ret != 0)
goto fail;
@@ -1479,7 +1534,7 @@
crtc_state->enable = true;
drm_mode_copy(&crtc_state->mode, set->mode);
- ret = drm_atomic_set_crtc_for_plane(state, crtc->primary, crtc);
+ ret = drm_atomic_set_crtc_for_plane(primary_state, crtc);
if (ret != 0)
goto fail;
drm_atomic_set_fb_for_plane(primary_state, set->fb);
@@ -1558,8 +1613,8 @@
goto fail;
}
- ret = crtc->funcs->atomic_set_property(crtc, crtc_state,
- property, val);
+ ret = drm_atomic_crtc_set_property(crtc, crtc_state,
+ property, val);
if (ret)
goto fail;
@@ -1617,8 +1672,8 @@
goto fail;
}
- ret = plane->funcs->atomic_set_property(plane, plane_state,
- property, val);
+ ret = drm_atomic_plane_set_property(plane, plane_state,
+ property, val);
if (ret)
goto fail;
@@ -1676,8 +1731,8 @@
goto fail;
}
- ret = connector->funcs->atomic_set_property(connector, connector_state,
- property, val);
+ ret = drm_atomic_connector_set_property(connector, connector_state,
+ property, val);
if (ret)
goto fail;
@@ -1751,7 +1806,7 @@
goto fail;
}
- ret = drm_atomic_set_crtc_for_plane(state, plane, crtc);
+ ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
if (ret != 0)
goto fail;
drm_atomic_set_fb_for_plane(plane_state, fb);
@@ -1814,6 +1869,9 @@
{
kfree(crtc->state);
crtc->state = kzalloc(sizeof(*crtc->state), GFP_KERNEL);
+
+ if (crtc->state)
+ crtc->state->crtc = crtc;
}
EXPORT_SYMBOL(drm_atomic_helper_crtc_reset);
@@ -1873,6 +1931,9 @@
kfree(plane->state);
plane->state = kzalloc(sizeof(*plane->state), GFP_KERNEL);
+
+ if (plane->state)
+ plane->state->plane = plane;
}
EXPORT_SYMBOL(drm_atomic_helper_plane_reset);
@@ -1930,6 +1991,9 @@
{
kfree(connector->state);
connector->state = kzalloc(sizeof(*connector->state), GFP_KERNEL);
+
+ if (connector->state)
+ connector->state->connector = connector;
}
EXPORT_SYMBOL(drm_atomic_helper_connector_reset);
diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c
index a6b6906..9a62d7a 100644
--- a/drivers/gpu/drm/drm_cache.c
+++ b/drivers/gpu/drm/drm_cache.c
@@ -32,6 +32,7 @@
#include <drm/drmP.h>
#if defined(CONFIG_X86)
+#include <asm/smp.h>
/*
* clflushopt is an unordered instruction which needs fencing with mfence or
@@ -64,12 +65,6 @@
drm_clflush_page(*pages++);
mb();
}
-
-static void
-drm_clflush_ipi_handler(void *null)
-{
- wbinvd();
-}
#endif
void
@@ -82,7 +77,7 @@
return;
}
- if (on_each_cpu(drm_clflush_ipi_handler, NULL, 1) != 0)
+ if (wbinvd_on_all_cpus())
printk(KERN_ERR "Timed out waiting for cache flush.\n");
#elif defined(__powerpc__)
@@ -121,7 +116,7 @@
return;
}
- if (on_each_cpu(drm_clflush_ipi_handler, NULL, 1) != 0)
+ if (wbinvd_on_all_cpus())
printk(KERN_ERR "Timed out waiting for cache flush.\n");
#else
printk(KERN_ERR "Architecture has no drm_cache.c support\n");
@@ -144,7 +139,7 @@
return;
}
- if (on_each_cpu(drm_clflush_ipi_handler, NULL, 1) != 0)
+ if (wbinvd_on_all_cpus())
printk(KERN_ERR "Timed out waiting for cache flush.\n");
#else
printk(KERN_ERR "Architecture has no drm_cache.c support\n");
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 5213da4..caec5c3 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -38,6 +38,7 @@
#include <drm/drm_edid.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_modeset_lock.h>
+#include <drm/drm_atomic.h>
#include "drm_crtc_internal.h"
#include "drm_internal.h"
@@ -61,8 +62,8 @@
/*
* Global properties
*/
-static const struct drm_prop_enum_list drm_dpms_enum_list[] =
-{ { DRM_MODE_DPMS_ON, "On" },
+static const struct drm_prop_enum_list drm_dpms_enum_list[] = {
+ { DRM_MODE_DPMS_ON, "On" },
{ DRM_MODE_DPMS_STANDBY, "Standby" },
{ DRM_MODE_DPMS_SUSPEND, "Suspend" },
{ DRM_MODE_DPMS_OFF, "Off" }
@@ -70,8 +71,7 @@
DRM_ENUM_NAME_FN(drm_get_dpms_name, drm_dpms_enum_list)
-static const struct drm_prop_enum_list drm_plane_type_enum_list[] =
-{
+static const struct drm_prop_enum_list drm_plane_type_enum_list[] = {
{ DRM_PLANE_TYPE_OVERLAY, "Overlay" },
{ DRM_PLANE_TYPE_PRIMARY, "Primary" },
{ DRM_PLANE_TYPE_CURSOR, "Cursor" },
@@ -80,8 +80,7 @@
/*
* Optional properties
*/
-static const struct drm_prop_enum_list drm_scaling_mode_enum_list[] =
-{
+static const struct drm_prop_enum_list drm_scaling_mode_enum_list[] = {
{ DRM_MODE_SCALE_NONE, "None" },
{ DRM_MODE_SCALE_FULLSCREEN, "Full" },
{ DRM_MODE_SCALE_CENTER, "Center" },
@@ -97,8 +96,7 @@
/*
* Non-global properties, but "required" for certain connectors.
*/
-static const struct drm_prop_enum_list drm_dvi_i_select_enum_list[] =
-{
+static const struct drm_prop_enum_list drm_dvi_i_select_enum_list[] = {
{ DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */
{ DRM_MODE_SUBCONNECTOR_DVID, "DVI-D" }, /* DVI-I */
{ DRM_MODE_SUBCONNECTOR_DVIA, "DVI-A" }, /* DVI-I */
@@ -106,8 +104,7 @@
DRM_ENUM_NAME_FN(drm_get_dvi_i_select_name, drm_dvi_i_select_enum_list)
-static const struct drm_prop_enum_list drm_dvi_i_subconnector_enum_list[] =
-{
+static const struct drm_prop_enum_list drm_dvi_i_subconnector_enum_list[] = {
{ DRM_MODE_SUBCONNECTOR_Unknown, "Unknown" }, /* DVI-I and TV-out */
{ DRM_MODE_SUBCONNECTOR_DVID, "DVI-D" }, /* DVI-I */
{ DRM_MODE_SUBCONNECTOR_DVIA, "DVI-A" }, /* DVI-I */
@@ -116,8 +113,7 @@
DRM_ENUM_NAME_FN(drm_get_dvi_i_subconnector_name,
drm_dvi_i_subconnector_enum_list)
-static const struct drm_prop_enum_list drm_tv_select_enum_list[] =
-{
+static const struct drm_prop_enum_list drm_tv_select_enum_list[] = {
{ DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */
{ DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */
{ DRM_MODE_SUBCONNECTOR_SVIDEO, "SVIDEO" }, /* TV-out */
@@ -127,8 +123,7 @@
DRM_ENUM_NAME_FN(drm_get_tv_select_name, drm_tv_select_enum_list)
-static const struct drm_prop_enum_list drm_tv_subconnector_enum_list[] =
-{
+static const struct drm_prop_enum_list drm_tv_subconnector_enum_list[] = {
{ DRM_MODE_SUBCONNECTOR_Unknown, "Unknown" }, /* DVI-I and TV-out */
{ DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */
{ DRM_MODE_SUBCONNECTOR_SVIDEO, "SVIDEO" }, /* TV-out */
@@ -154,8 +149,8 @@
/*
* Connector and encoder types.
*/
-static struct drm_conn_prop_enum_list drm_connector_enum_list[] =
-{ { DRM_MODE_CONNECTOR_Unknown, "Unknown" },
+static struct drm_conn_prop_enum_list drm_connector_enum_list[] = {
+ { DRM_MODE_CONNECTOR_Unknown, "Unknown" },
{ DRM_MODE_CONNECTOR_VGA, "VGA" },
{ DRM_MODE_CONNECTOR_DVII, "DVI-I" },
{ DRM_MODE_CONNECTOR_DVID, "DVI-D" },
@@ -174,8 +169,8 @@
{ DRM_MODE_CONNECTOR_DSI, "DSI" },
};
-static const struct drm_prop_enum_list drm_encoder_enum_list[] =
-{ { DRM_MODE_ENCODER_NONE, "None" },
+static const struct drm_prop_enum_list drm_encoder_enum_list[] = {
+ { DRM_MODE_ENCODER_NONE, "None" },
{ DRM_MODE_ENCODER_DAC, "DAC" },
{ DRM_MODE_ENCODER_TMDS, "TMDS" },
{ DRM_MODE_ENCODER_LVDS, "LVDS" },
@@ -185,8 +180,7 @@
{ DRM_MODE_ENCODER_DPMST, "DP MST" },
};
-static const struct drm_prop_enum_list drm_subpixel_enum_list[] =
-{
+static const struct drm_prop_enum_list drm_subpixel_enum_list[] = {
{ SubPixelUnknown, "Unknown" },
{ SubPixelHorizontalRGB, "Horizontal RGB" },
{ SubPixelHorizontalBGR, "Horizontal BGR" },
@@ -837,6 +831,7 @@
const struct drm_connector_funcs *funcs,
int connector_type)
{
+ struct drm_mode_config *config = &dev->mode_config;
int ret;
struct ida *connector_ida =
&drm_connector_enum_list[connector_type].ida;
@@ -875,16 +870,20 @@
/* We should add connectors at the end to avoid upsetting the connector
* index too much. */
- list_add_tail(&connector->head, &dev->mode_config.connector_list);
- dev->mode_config.num_connector++;
+ list_add_tail(&connector->head, &config->connector_list);
+ config->num_connector++;
if (connector_type != DRM_MODE_CONNECTOR_VIRTUAL)
drm_object_attach_property(&connector->base,
- dev->mode_config.edid_property,
+ config->edid_property,
0);
drm_object_attach_property(&connector->base,
- dev->mode_config.dpms_property, 0);
+ config->dpms_property, 0);
+
+ if (drm_core_check_feature(dev, DRIVER_ATOMIC)) {
+ drm_object_attach_property(&connector->base, config->prop_crtc_id, 0);
+ }
connector->debugfs_entry = NULL;
@@ -1142,6 +1141,7 @@
void drm_encoder_cleanup(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
+
drm_modeset_lock_all(dev);
drm_mode_object_put(dev, &encoder->base);
kfree(encoder->name);
@@ -1174,6 +1174,7 @@
const uint32_t *formats, uint32_t format_count,
enum drm_plane_type type)
{
+ struct drm_mode_config *config = &dev->mode_config;
int ret;
ret = drm_mode_object_get(dev, &plane->base, DRM_MODE_OBJECT_PLANE);
@@ -1185,8 +1186,8 @@
plane->base.properties = &plane->properties;
plane->dev = dev;
plane->funcs = funcs;
- plane->format_types = kmalloc(sizeof(uint32_t) * format_count,
- GFP_KERNEL);
+ plane->format_types = kmalloc_array(format_count, sizeof(uint32_t),
+ GFP_KERNEL);
if (!plane->format_types) {
DRM_DEBUG_KMS("out of memory when allocating plane\n");
drm_mode_object_put(dev, &plane->base);
@@ -1198,15 +1199,28 @@
plane->possible_crtcs = possible_crtcs;
plane->type = type;
- list_add_tail(&plane->head, &dev->mode_config.plane_list);
- dev->mode_config.num_total_plane++;
+ list_add_tail(&plane->head, &config->plane_list);
+ config->num_total_plane++;
if (plane->type == DRM_PLANE_TYPE_OVERLAY)
- dev->mode_config.num_overlay_plane++;
+ config->num_overlay_plane++;
drm_object_attach_property(&plane->base,
- dev->mode_config.plane_type_property,
+ config->plane_type_property,
plane->type);
+ if (drm_core_check_feature(dev, DRIVER_ATOMIC)) {
+ drm_object_attach_property(&plane->base, config->prop_fb_id, 0);
+ drm_object_attach_property(&plane->base, config->prop_crtc_id, 0);
+ drm_object_attach_property(&plane->base, config->prop_crtc_x, 0);
+ drm_object_attach_property(&plane->base, config->prop_crtc_y, 0);
+ drm_object_attach_property(&plane->base, config->prop_crtc_w, 0);
+ drm_object_attach_property(&plane->base, config->prop_crtc_h, 0);
+ drm_object_attach_property(&plane->base, config->prop_src_x, 0);
+ drm_object_attach_property(&plane->base, config->prop_src_y, 0);
+ drm_object_attach_property(&plane->base, config->prop_src_w, 0);
+ drm_object_attach_property(&plane->base, config->prop_src_h, 0);
+ }
+
return 0;
}
EXPORT_SYMBOL(drm_universal_plane_init);
@@ -1328,50 +1342,109 @@
}
EXPORT_SYMBOL(drm_plane_force_disable);
-static int drm_mode_create_standard_connector_properties(struct drm_device *dev)
+static int drm_mode_create_standard_properties(struct drm_device *dev)
{
- struct drm_property *edid;
- struct drm_property *dpms;
- struct drm_property *dev_path;
+ struct drm_property *prop;
/*
* Standard properties (apply to all connectors)
*/
- edid = drm_property_create(dev, DRM_MODE_PROP_BLOB |
+ prop = drm_property_create(dev, DRM_MODE_PROP_BLOB |
DRM_MODE_PROP_IMMUTABLE,
"EDID", 0);
- dev->mode_config.edid_property = edid;
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.edid_property = prop;
- dpms = drm_property_create_enum(dev, 0,
+ prop = drm_property_create_enum(dev, 0,
"DPMS", drm_dpms_enum_list,
ARRAY_SIZE(drm_dpms_enum_list));
- dev->mode_config.dpms_property = dpms;
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.dpms_property = prop;
- dev_path = drm_property_create(dev,
- DRM_MODE_PROP_BLOB |
- DRM_MODE_PROP_IMMUTABLE,
- "PATH", 0);
- dev->mode_config.path_property = dev_path;
+ prop = drm_property_create(dev,
+ DRM_MODE_PROP_BLOB |
+ DRM_MODE_PROP_IMMUTABLE,
+ "PATH", 0);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.path_property = prop;
- dev->mode_config.tile_property = drm_property_create(dev,
- DRM_MODE_PROP_BLOB |
- DRM_MODE_PROP_IMMUTABLE,
- "TILE", 0);
+ prop = drm_property_create(dev,
+ DRM_MODE_PROP_BLOB |
+ DRM_MODE_PROP_IMMUTABLE,
+ "TILE", 0);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.tile_property = prop;
- return 0;
-}
-
-static int drm_mode_create_standard_plane_properties(struct drm_device *dev)
-{
- struct drm_property *type;
-
- /*
- * Standard properties (apply to all planes)
- */
- type = drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
+ prop = drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
"type", drm_plane_type_enum_list,
ARRAY_SIZE(drm_plane_type_enum_list));
- dev->mode_config.plane_type_property = type;
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.plane_type_property = prop;
+
+ prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
+ "SRC_X", 0, UINT_MAX);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.prop_src_x = prop;
+
+ prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
+ "SRC_Y", 0, UINT_MAX);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.prop_src_y = prop;
+
+ prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
+ "SRC_W", 0, UINT_MAX);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.prop_src_w = prop;
+
+ prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
+ "SRC_H", 0, UINT_MAX);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.prop_src_h = prop;
+
+ prop = drm_property_create_signed_range(dev, DRM_MODE_PROP_ATOMIC,
+ "CRTC_X", INT_MIN, INT_MAX);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.prop_crtc_x = prop;
+
+ prop = drm_property_create_signed_range(dev, DRM_MODE_PROP_ATOMIC,
+ "CRTC_Y", INT_MIN, INT_MAX);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.prop_crtc_y = prop;
+
+ prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
+ "CRTC_W", 0, INT_MAX);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.prop_crtc_w = prop;
+
+ prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
+ "CRTC_H", 0, INT_MAX);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.prop_crtc_h = prop;
+
+ prop = drm_property_create_object(dev, DRM_MODE_PROP_ATOMIC,
+ "FB_ID", DRM_MODE_OBJECT_FB);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.prop_fb_id = prop;
+
+ prop = drm_property_create_object(dev, DRM_MODE_PROP_ATOMIC,
+ "CRTC_ID", DRM_MODE_OBJECT_CRTC);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.prop_crtc_id = prop;
return 0;
}
@@ -1599,7 +1672,7 @@
total_objects += dev->mode_config.num_encoder;
total_objects += dev->mode_config.num_bridge;
- group->id_list = kzalloc(total_objects * sizeof(uint32_t), GFP_KERNEL);
+ group->id_list = kcalloc(total_objects, sizeof(uint32_t), GFP_KERNEL);
if (!group->id_list)
return -ENOMEM;
@@ -1629,7 +1702,8 @@
struct drm_bridge *bridge;
int ret;
- if ((ret = drm_mode_group_init(dev, group)))
+ ret = drm_mode_group_init(dev, group);
+ if (ret)
return ret;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
@@ -1996,6 +2070,44 @@
return connector->encoder;
}
+/* helper for getconnector and getproperties ioctls */
+static int get_properties(struct drm_mode_object *obj, bool atomic,
+ uint32_t __user *prop_ptr, uint64_t __user *prop_values,
+ uint32_t *arg_count_props)
+{
+ int props_count;
+ int i, ret, copied;
+
+ props_count = obj->properties->count;
+ if (!atomic)
+ props_count -= obj->properties->atomic_count;
+
+ if ((*arg_count_props >= props_count) && props_count) {
+ for (i = 0, copied = 0; copied < props_count; i++) {
+ struct drm_property *prop = obj->properties->properties[i];
+ uint64_t val;
+
+ if ((prop->flags & DRM_MODE_PROP_ATOMIC) && !atomic)
+ continue;
+
+ ret = drm_object_property_get_value(obj, prop, &val);
+ if (ret)
+ return ret;
+
+ if (put_user(prop->base.id, prop_ptr + copied))
+ return -EFAULT;
+
+ if (put_user(val, prop_values + copied))
+ return -EFAULT;
+
+ copied++;
+ }
+ }
+ *arg_count_props = props_count;
+
+ return 0;
+}
+
/**
* drm_mode_getconnector - get connector configuration
* @dev: drm device for the ioctl
@@ -2017,15 +2129,12 @@
struct drm_encoder *encoder;
struct drm_display_mode *mode;
int mode_count = 0;
- int props_count = 0;
int encoders_count = 0;
int ret = 0;
int copied = 0;
int i;
struct drm_mode_modeinfo u_mode;
struct drm_mode_modeinfo __user *mode_ptr;
- uint32_t __user *prop_ptr;
- uint64_t __user *prop_values;
uint32_t __user *encoder_ptr;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
@@ -2036,6 +2145,7 @@
DRM_DEBUG_KMS("[CONNECTOR:%d:?]\n", out_resp->connector_id);
mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
connector = drm_connector_find(dev, out_resp->connector_id);
if (!connector) {
@@ -2043,13 +2153,9 @@
goto out;
}
- props_count = connector->properties.count;
-
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- if (connector->encoder_ids[i] != 0) {
+ for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++)
+ if (connector->encoder_ids[i] != 0)
encoders_count++;
- }
- }
if (out_resp->count_modes == 0) {
connector->funcs->fill_modes(connector,
@@ -2069,14 +2175,11 @@
out_resp->mm_height = connector->display_info.height_mm;
out_resp->subpixel = connector->display_info.subpixel_order;
out_resp->connection = connector->status;
- drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
-
encoder = drm_connector_get_encoder(connector);
if (encoder)
out_resp->encoder_id = encoder->base.id;
else
out_resp->encoder_id = 0;
- drm_modeset_unlock(&dev->mode_config.connection_mutex);
/*
* This ioctl is called twice, once to determine how much space is
@@ -2100,26 +2203,12 @@
}
out_resp->count_modes = mode_count;
- if ((out_resp->count_props >= props_count) && props_count) {
- copied = 0;
- prop_ptr = (uint32_t __user *)(unsigned long)(out_resp->props_ptr);
- prop_values = (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr);
- for (i = 0; i < connector->properties.count; i++) {
- if (put_user(connector->properties.ids[i],
- prop_ptr + copied)) {
- ret = -EFAULT;
- goto out;
- }
-
- if (put_user(connector->properties.values[i],
- prop_values + copied)) {
- ret = -EFAULT;
- goto out;
- }
- copied++;
- }
- }
- out_resp->count_props = props_count;
+ ret = get_properties(&connector->base, file_priv->atomic,
+ (uint32_t __user *)(unsigned long)(out_resp->props_ptr),
+ (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr),
+ &out_resp->count_props);
+ if (ret)
+ goto out;
if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
copied = 0;
@@ -2138,6 +2227,7 @@
out_resp->count_encoders = encoders_count;
out:
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
mutex_unlock(&dev->mode_config.mutex);
return ret;
@@ -2529,7 +2619,7 @@
*
* This is a little helper to wrap internal calls to the ->set_config driver
* interface. The only thing it adds is correct refcounting dance.
- *
+ *
* Returns:
* Zero on success, negative errno on failure.
*/
@@ -2569,6 +2659,27 @@
EXPORT_SYMBOL(drm_mode_set_config_internal);
/**
+ * drm_crtc_get_hv_timing - Fetches hdisplay/vdisplay for given mode
+ * @mode: mode to query
+ * @hdisplay: hdisplay value to fill in
+ * @vdisplay: vdisplay value to fill in
+ *
+ * The vdisplay value will be doubled if the specified mode is a stereo mode of
+ * the appropriate layout.
+ */
+void drm_crtc_get_hv_timing(const struct drm_display_mode *mode,
+ int *hdisplay, int *vdisplay)
+{
+ struct drm_display_mode adjusted;
+
+ drm_mode_copy(&adjusted, mode);
+ drm_mode_set_crtcinfo(&adjusted, CRTC_STEREO_DOUBLE_ONLY);
+ *hdisplay = adjusted.crtc_hdisplay;
+ *vdisplay = adjusted.crtc_vdisplay;
+}
+EXPORT_SYMBOL(drm_crtc_get_hv_timing);
+
+/**
* drm_crtc_check_viewport - Checks that a framebuffer is big enough for the
* CRTC viewport
* @crtc: CRTC that framebuffer will be displayed on
@@ -2585,16 +2696,7 @@
{
int hdisplay, vdisplay;
- hdisplay = mode->hdisplay;
- vdisplay = mode->vdisplay;
-
- if (drm_mode_is_stereo(mode)) {
- struct drm_display_mode adjusted = *mode;
-
- drm_mode_set_crtcinfo(&adjusted, CRTC_STEREO_DOUBLE);
- hdisplay = adjusted.crtc_hdisplay;
- vdisplay = adjusted.crtc_vdisplay;
- }
+ drm_crtc_get_hv_timing(mode, &hdisplay, &vdisplay);
if (crtc->invert_dimensions)
swap(hdisplay, vdisplay);
@@ -2690,6 +2792,12 @@
goto out;
}
+ mode->status = drm_mode_validate_basic(mode);
+ if (mode->status != MODE_OK) {
+ ret = -EINVAL;
+ goto out;
+ }
+
drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
ret = drm_crtc_check_viewport(crtc, crtc_req->x, crtc_req->y,
@@ -2721,9 +2829,9 @@
goto out;
}
- connector_set = kmalloc(crtc_req->count_connectors *
- sizeof(struct drm_connector *),
- GFP_KERNEL);
+ connector_set = kmalloc_array(crtc_req->count_connectors,
+ sizeof(struct drm_connector *),
+ GFP_KERNEL);
if (!connector_set) {
ret = -ENOMEM;
goto out;
@@ -2968,6 +3076,7 @@
void *data, struct drm_file *file_priv)
{
struct drm_mode_cursor2 *req = data;
+
return drm_mode_cursor_common(dev, req, file_priv);
}
@@ -3415,7 +3524,7 @@
ret = -EINVAL;
goto out_err1;
}
- clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL);
+ clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL);
if (!clips) {
ret = -ENOMEM;
goto out_err1;
@@ -3516,7 +3625,8 @@
property->dev = dev;
if (num_values) {
- property->values = kzalloc(sizeof(uint64_t)*num_values, GFP_KERNEL);
+ property->values = kcalloc(num_values, sizeof(uint64_t),
+ GFP_KERNEL);
if (!property->values)
goto fail;
}
@@ -3822,9 +3932,11 @@
return;
}
- obj->properties->ids[count] = property->base.id;
+ obj->properties->properties[count] = property;
obj->properties->values[count] = init_val;
obj->properties->count++;
+ if (property->flags & DRM_MODE_PROP_ATOMIC)
+ obj->properties->atomic_count++;
}
EXPORT_SYMBOL(drm_object_attach_property);
@@ -3847,7 +3959,7 @@
int i;
for (i = 0; i < obj->properties->count; i++) {
- if (obj->properties->ids[i] == property->base.id) {
+ if (obj->properties->properties[i] == property) {
obj->properties->values[i] = val;
return 0;
}
@@ -3876,8 +3988,16 @@
{
int i;
+ /* read-only properties bypass atomic mechanism and still store
+ * their value in obj->properties->values[].. mostly to avoid
+ * having to deal w/ EDID and similar props in atomic paths:
+ */
+ if (drm_core_check_feature(property->dev, DRIVER_ATOMIC) &&
+ !(property->flags & DRM_MODE_PROP_IMMUTABLE))
+ return drm_atomic_get_property(obj, property, val);
+
for (i = 0; i < obj->properties->count; i++) {
- if (obj->properties->ids[i] == property->base.id) {
+ if (obj->properties->properties[i] == property) {
*val = obj->properties->values[i];
return 0;
}
@@ -4057,7 +4177,7 @@
if (out_resp->length == blob->length) {
blob_ptr = (void __user *)(unsigned long)out_resp->data;
- if (copy_to_user(blob_ptr, blob->data, blob->length)){
+ if (copy_to_user(blob_ptr, blob->data, blob->length)) {
ret = -EFAULT;
goto done;
}
@@ -4193,25 +4313,38 @@
}
EXPORT_SYMBOL(drm_mode_connector_update_edid_property);
-static bool drm_property_change_is_valid(struct drm_property *property,
- uint64_t value)
+/* Some properties could refer to dynamic refcnt'd objects, or things that
+ * need special locking to handle lifetime issues (ie. to ensure the prop
+ * value doesn't become invalid part way through the property update due to
+ * race). The value returned by reference via 'obj' should be passed back
+ * to drm_property_change_valid_put() after the property is set (and the
+ * object to which the property is attached has a chance to take it's own
+ * reference).
+ */
+bool drm_property_change_valid_get(struct drm_property *property,
+ uint64_t value, struct drm_mode_object **ref)
{
+ int i;
+
if (property->flags & DRM_MODE_PROP_IMMUTABLE)
return false;
+ *ref = NULL;
+
if (drm_property_type_is(property, DRM_MODE_PROP_RANGE)) {
if (value < property->values[0] || value > property->values[1])
return false;
return true;
} else if (drm_property_type_is(property, DRM_MODE_PROP_SIGNED_RANGE)) {
int64_t svalue = U642I64(value);
+
if (svalue < U642I64(property->values[0]) ||
svalue > U642I64(property->values[1]))
return false;
return true;
} else if (drm_property_type_is(property, DRM_MODE_PROP_BITMASK)) {
- int i;
uint64_t valid_mask = 0;
+
for (i = 0; i < property->num_values; i++)
valid_mask |= (1ULL << property->values[i]);
return !(value & ~valid_mask);
@@ -4219,25 +4352,40 @@
/* Only the driver knows */
return true;
} else if (drm_property_type_is(property, DRM_MODE_PROP_OBJECT)) {
- struct drm_mode_object *obj;
/* a zero value for an object property translates to null: */
if (value == 0)
return true;
- /*
- * NOTE: use _object_find() directly to bypass restriction on
- * looking up refcnt'd objects (ie. fb's). For a refcnt'd
- * object this could race against object finalization, so it
- * simply tells us that the object *was* valid. Which is good
- * enough.
- */
- obj = _object_find(property->dev, value, property->values[0]);
- return obj != NULL;
- } else {
- int i;
- for (i = 0; i < property->num_values; i++)
- if (property->values[i] == value)
+
+ /* handle refcnt'd objects specially: */
+ if (property->values[0] == DRM_MODE_OBJECT_FB) {
+ struct drm_framebuffer *fb;
+ fb = drm_framebuffer_lookup(property->dev, value);
+ if (fb) {
+ *ref = &fb->base;
return true;
- return false;
+ } else {
+ return false;
+ }
+ } else {
+ return _object_find(property->dev, value, property->values[0]) != NULL;
+ }
+ }
+
+ for (i = 0; i < property->num_values; i++)
+ if (property->values[i] == value)
+ return true;
+ return false;
+}
+
+void drm_property_change_valid_put(struct drm_property *property,
+ struct drm_mode_object *ref)
+{
+ if (!ref)
+ return;
+
+ if (drm_property_type_is(property, DRM_MODE_PROP_OBJECT)) {
+ if (property->values[0] == DRM_MODE_OBJECT_FB)
+ drm_framebuffer_unreference(obj_to_fb(ref));
}
}
@@ -4356,11 +4504,6 @@
struct drm_mode_obj_get_properties *arg = data;
struct drm_mode_object *obj;
int ret = 0;
- int i;
- int copied = 0;
- int props_count = 0;
- uint32_t __user *props_ptr;
- uint64_t __user *prop_values_ptr;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
@@ -4377,30 +4520,11 @@
goto out;
}
- props_count = obj->properties->count;
+ ret = get_properties(obj, file_priv->atomic,
+ (uint32_t __user *)(unsigned long)(arg->props_ptr),
+ (uint64_t __user *)(unsigned long)(arg->prop_values_ptr),
+ &arg->count_props);
- /* This ioctl is called twice, once to determine how much space is
- * needed, and the 2nd time to fill it. */
- if ((arg->count_props >= props_count) && props_count) {
- copied = 0;
- props_ptr = (uint32_t __user *)(unsigned long)(arg->props_ptr);
- prop_values_ptr = (uint64_t __user *)(unsigned long)
- (arg->prop_values_ptr);
- for (i = 0; i < props_count; i++) {
- if (put_user(obj->properties->ids[i],
- props_ptr + copied)) {
- ret = -EFAULT;
- goto out;
- }
- if (put_user(obj->properties->values[i],
- prop_values_ptr + copied)) {
- ret = -EFAULT;
- goto out;
- }
- copied++;
- }
- }
- arg->count_props = props_count;
out:
drm_modeset_unlock_all(dev);
return ret;
@@ -4429,8 +4553,8 @@
struct drm_mode_object *arg_obj;
struct drm_mode_object *prop_obj;
struct drm_property *property;
- int ret = -EINVAL;
- int i;
+ int i, ret = -EINVAL;
+ struct drm_mode_object *ref;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
@@ -4446,7 +4570,7 @@
goto out;
for (i = 0; i < arg_obj->properties->count; i++)
- if (arg_obj->properties->ids[i] == arg->prop_id)
+ if (arg_obj->properties->properties[i]->base.id == arg->prop_id)
break;
if (i == arg_obj->properties->count)
@@ -4460,7 +4584,7 @@
}
property = obj_to_property(prop_obj);
- if (!drm_property_change_is_valid(property, arg->value))
+ if (!drm_property_change_valid_get(property, arg->value, &ref))
goto out;
switch (arg_obj->type) {
@@ -4477,6 +4601,8 @@
break;
}
+ drm_property_change_valid_put(property, ref);
+
out:
drm_modeset_unlock_all(dev);
return ret;
@@ -4526,7 +4652,8 @@
{
crtc->gamma_size = gamma_size;
- crtc->gamma_store = kzalloc(gamma_size * sizeof(uint16_t) * 3, GFP_KERNEL);
+ crtc->gamma_store = kcalloc(gamma_size, sizeof(uint16_t) * 3,
+ GFP_KERNEL);
if (!crtc->gamma_store) {
crtc->gamma_size = 0;
return -ENOMEM;
@@ -4741,23 +4868,23 @@
if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) {
ret = -ENOMEM;
spin_lock_irqsave(&dev->event_lock, flags);
- if (file_priv->event_space < sizeof e->event) {
+ if (file_priv->event_space < sizeof(e->event)) {
spin_unlock_irqrestore(&dev->event_lock, flags);
goto out;
}
- file_priv->event_space -= sizeof e->event;
+ file_priv->event_space -= sizeof(e->event);
spin_unlock_irqrestore(&dev->event_lock, flags);
- e = kzalloc(sizeof *e, GFP_KERNEL);
+ e = kzalloc(sizeof(*e), GFP_KERNEL);
if (e == NULL) {
spin_lock_irqsave(&dev->event_lock, flags);
- file_priv->event_space += sizeof e->event;
+ file_priv->event_space += sizeof(e->event);
spin_unlock_irqrestore(&dev->event_lock, flags);
goto out;
}
e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
- e->event.base.length = sizeof e->event;
+ e->event.base.length = sizeof(e->event);
e->event.user_data = page_flip->user_data;
e->base.event = &e->event.base;
e->base.file_priv = file_priv;
@@ -4770,7 +4897,7 @@
if (ret) {
if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) {
spin_lock_irqsave(&dev->event_lock, flags);
- file_priv->event_space += sizeof e->event;
+ file_priv->event_space += sizeof(e->event);
spin_unlock_irqrestore(&dev->event_lock, flags);
kfree(e);
}
@@ -5220,8 +5347,7 @@
idr_init(&dev->mode_config.tile_idr);
drm_modeset_lock_all(dev);
- drm_mode_create_standard_connector_properties(dev);
- drm_mode_create_standard_plane_properties(dev);
+ drm_mode_create_standard_properties(dev);
drm_modeset_unlock_all(dev);
/* Just to be sure */
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index d552708..b1979e7 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -946,6 +946,7 @@
crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
if (!crtc_state)
return -ENOMEM;
+ crtc_state->crtc = crtc;
crtc_state->enable = true;
crtc_state->planes_changed = true;
@@ -1005,6 +1006,7 @@
plane_state = kzalloc(sizeof(*plane_state), GFP_KERNEL);
if (!plane_state)
return -ENOMEM;
+ plane_state->plane = plane;
plane_state->crtc = crtc;
drm_atomic_set_fb_for_plane(plane_state, crtc->primary->fb);
diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h
index a2945ee..247dc8b 100644
--- a/drivers/gpu/drm/drm_crtc_internal.h
+++ b/drivers/gpu/drm/drm_crtc_internal.h
@@ -36,3 +36,9 @@
void drm_mode_object_put(struct drm_device *dev,
struct drm_mode_object *object);
+/* drm_atomic.c */
+int drm_atomic_get_property(struct drm_mode_object *obj,
+ struct drm_property *property, uint64_t *val);
+int drm_mode_atomic_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 4f41377..d512134 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -40,15 +40,19 @@
unsigned int drm_debug = 0; /* 1 to enable debug output */
EXPORT_SYMBOL(drm_debug);
+bool drm_atomic = 0;
+
MODULE_AUTHOR(CORE_AUTHOR);
MODULE_DESCRIPTION(CORE_DESC);
MODULE_LICENSE("GPL and additional rights");
MODULE_PARM_DESC(debug, "Enable debug output");
+MODULE_PARM_DESC(atomic, "Enable experimental atomic KMS API");
MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs] (0: never disable, <0: disable immediately)");
MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]");
MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps");
module_param_named(debug, drm_debug, int, 0600);
+module_param_named_unsafe(atomic, drm_atomic, bool, 0600);
static DEFINE_SPINLOCK(drm_minor_lock);
static struct idr drm_minors_idr;
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
index 51efebd..f1b32f9 100644
--- a/drivers/gpu/drm/drm_info.c
+++ b/drivers/gpu/drm/drm_info.c
@@ -153,30 +153,6 @@
}
/**
- * Called when "/proc/dri/.../vblank" is read.
- */
-int drm_vblank_info(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- int crtc;
-
- mutex_lock(&dev->struct_mutex);
- for (crtc = 0; crtc < dev->num_crtcs; crtc++) {
- seq_printf(m, "CRTC %d enable: %d\n",
- crtc, atomic_read(&dev->vblank[crtc].refcount));
- seq_printf(m, "CRTC %d counter: %d\n",
- crtc, drm_vblank_count(dev, crtc));
- seq_printf(m, "CRTC %d last wait: %d\n",
- crtc, dev->vblank[crtc].last_wait);
- seq_printf(m, "CRTC %d in modeset: %d\n",
- crtc, dev->vblank[crtc].inmodeset);
- }
- mutex_unlock(&dev->struct_mutex);
- return 0;
-}
-
-/**
* Called when "/proc/dri/.../clients" is read.
*
*/
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index 7cc0a35..12a61d7 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -55,7 +55,6 @@
int drm_name_info(struct seq_file *m, void *data);
int drm_vm_info(struct seq_file *m, void *data);
int drm_bufs_info(struct seq_file *m, void *data);
-int drm_vblank_info(struct seq_file *m, void *data);
int drm_clients_info(struct seq_file *m, void* data);
int drm_gem_name_info(struct seq_file *m, void *data);
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 00587a1..3785d66 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -32,6 +32,7 @@
#include <drm/drm_core.h>
#include "drm_legacy.h"
#include "drm_internal.h"
+#include "drm_crtc_internal.h"
#include <linux/pci.h>
#include <linux/export.h>
@@ -345,6 +346,17 @@
return -EINVAL;
file_priv->universal_planes = req->value;
break;
+ case DRM_CLIENT_CAP_ATOMIC:
+ /* for now, hide behind experimental drm.atomic moduleparam */
+ if (!drm_atomic)
+ return -EINVAL;
+ if (!drm_core_check_feature(dev, DRIVER_ATOMIC))
+ return -EINVAL;
+ if (req->value > 1)
+ return -EINVAL;
+ file_priv->atomic = req->value;
+ file_priv->universal_planes = req->value;
+ break;
default:
return -EINVAL;
}
@@ -620,6 +632,7 @@
DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, drm_mode_obj_set_property_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR2, drm_mode_cursor2_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATOMIC, drm_mode_atomic_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
};
#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index f5a5f18..75647e7 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -778,7 +778,7 @@
/**
* drm_get_last_vbltimestamp - retrieve raw timestamp for the most recent
- * vblank interval
+ * vblank interval
* @dev: DRM device
* @crtc: which CRTC's vblank timestamp to retrieve
* @tvblank: Pointer to target struct timeval which should receive the timestamp
@@ -830,6 +830,8 @@
* vblank events since the system was booted, including lost events due to
* modesetting activity.
*
+ * This is the legacy version of drm_crtc_vblank_count().
+ *
* Returns:
* The software vblank counter.
*/
@@ -844,6 +846,25 @@
EXPORT_SYMBOL(drm_vblank_count);
/**
+ * drm_crtc_vblank_count - retrieve "cooked" vblank counter value
+ * @crtc: which counter to retrieve
+ *
+ * Fetches the "cooked" vblank count value that represents the number of
+ * vblank events since the system was booted, including lost events due to
+ * modesetting activity.
+ *
+ * This is the native KMS version of drm_vblank_count().
+ *
+ * Returns:
+ * The software vblank counter.
+ */
+u32 drm_crtc_vblank_count(struct drm_crtc *crtc)
+{
+ return drm_vblank_count(crtc->dev, drm_crtc_index(crtc));
+}
+EXPORT_SYMBOL(drm_crtc_vblank_count);
+
+/**
* drm_vblank_count_and_time - retrieve "cooked" vblank counter value
* and the system timestamp corresponding to that vblank counter value.
*
@@ -904,12 +925,15 @@
*
* Updates sequence # and timestamp on event, and sends it to userspace.
* Caller must hold event lock.
+ *
+ * This is the legacy version of drm_crtc_send_vblank_event().
*/
void drm_send_vblank_event(struct drm_device *dev, int crtc,
struct drm_pending_vblank_event *e)
{
struct timeval now;
unsigned int seq;
+
if (crtc >= 0) {
seq = drm_vblank_count_and_time(dev, crtc, &now);
} else {
@@ -923,6 +947,23 @@
EXPORT_SYMBOL(drm_send_vblank_event);
/**
+ * drm_crtc_send_vblank_event - helper to send vblank event after pageflip
+ * @crtc: the source CRTC of the vblank event
+ * @e: the event to send
+ *
+ * Updates sequence # and timestamp on event, and sends it to userspace.
+ * Caller must hold event lock.
+ *
+ * This is the native KMS version of drm_send_vblank_event().
+ */
+void drm_crtc_send_vblank_event(struct drm_crtc *crtc,
+ struct drm_pending_vblank_event *e)
+{
+ drm_send_vblank_event(crtc->dev, drm_crtc_index(crtc), e);
+}
+EXPORT_SYMBOL(drm_crtc_send_vblank_event);
+
+/**
* drm_vblank_enable - enable the vblank interrupt on a CRTC
* @dev: DRM device
* @crtc: CRTC in question
@@ -1382,7 +1423,7 @@
unsigned int seq;
int ret;
- e = kzalloc(sizeof *e, GFP_KERNEL);
+ e = kzalloc(sizeof(*e), GFP_KERNEL);
if (e == NULL) {
ret = -ENOMEM;
goto err_put;
@@ -1391,7 +1432,7 @@
e->pipe = pipe;
e->base.pid = current->pid;
e->event.base.type = DRM_EVENT_VBLANK;
- e->event.base.length = sizeof e->event;
+ e->event.base.length = sizeof(e->event);
e->event.user_data = vblwait->request.signal;
e->base.event = &e->event.base;
e->base.file_priv = file_priv;
@@ -1411,12 +1452,12 @@
goto err_unlock;
}
- if (file_priv->event_space < sizeof e->event) {
+ if (file_priv->event_space < sizeof(e->event)) {
ret = -EBUSY;
goto err_unlock;
}
- file_priv->event_space -= sizeof e->event;
+ file_priv->event_space -= sizeof(e->event);
seq = drm_vblank_count_and_time(dev, pipe, &now);
if ((vblwait->request.type & _DRM_VBLANK_NEXTONMISS) &&
@@ -1594,6 +1635,8 @@
*
* Drivers should call this routine in their vblank interrupt handlers to
* update the vblank counter and send any signals that may be pending.
+ *
+ * This is the legacy version of drm_crtc_handle_vblank().
*/
bool drm_handle_vblank(struct drm_device *dev, int crtc)
{
@@ -1670,3 +1713,21 @@
return true;
}
EXPORT_SYMBOL(drm_handle_vblank);
+
+/**
+ * drm_crtc_handle_vblank - handle a vblank event
+ * @crtc: where this event occurred
+ *
+ * Drivers should call this routine in their vblank interrupt handlers to
+ * update the vblank counter and send any signals that may be pending.
+ *
+ * This is the native KMS version of drm_handle_vblank().
+ *
+ * Returns:
+ * True if the event was successfully handled, false on failure.
+ */
+bool drm_crtc_handle_vblank(struct drm_crtc *crtc)
+{
+ return drm_handle_vblank(crtc->dev, drm_crtc_index(crtc));
+}
+EXPORT_SYMBOL(drm_crtc_handle_vblank);
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 9731aeb..20d977a 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -779,6 +779,8 @@
* - The CRTC_STEREO_DOUBLE flag can be used to compute the timings for
* buffers containing two eyes (only adjust the timings when needed, eg. for
* "frame packing" or "side by side full").
+ * - The CRTC_NO_DBLSCAN and CRTC_NO_VSCAN flags request that adjustment *not*
+ * be performed for doublescan and vscan > 1 modes respectively.
*/
void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags)
{
@@ -805,18 +807,22 @@
}
}
- if (p->flags & DRM_MODE_FLAG_DBLSCAN) {
- p->crtc_vdisplay *= 2;
- p->crtc_vsync_start *= 2;
- p->crtc_vsync_end *= 2;
- p->crtc_vtotal *= 2;
+ if (!(adjust_flags & CRTC_NO_DBLSCAN)) {
+ if (p->flags & DRM_MODE_FLAG_DBLSCAN) {
+ p->crtc_vdisplay *= 2;
+ p->crtc_vsync_start *= 2;
+ p->crtc_vsync_end *= 2;
+ p->crtc_vtotal *= 2;
+ }
}
- if (p->vscan > 1) {
- p->crtc_vdisplay *= p->vscan;
- p->crtc_vsync_start *= p->vscan;
- p->crtc_vsync_end *= p->vscan;
- p->crtc_vtotal *= p->vscan;
+ if (!(adjust_flags & CRTC_NO_VSCAN)) {
+ if (p->vscan > 1) {
+ p->crtc_vdisplay *= p->vscan;
+ p->crtc_vsync_start *= p->vscan;
+ p->crtc_vsync_end *= p->vscan;
+ p->crtc_vtotal *= p->vscan;
+ }
}
if (adjust_flags & CRTC_STEREO_DOUBLE) {
@@ -946,9 +952,40 @@
EXPORT_SYMBOL(drm_mode_equal_no_clocks_no_stereo);
/**
+ * drm_mode_validate_basic - make sure the mode is somewhat sane
+ * @mode: mode to check
+ *
+ * Check that the mode timings are at least somewhat reasonable.
+ * Any hardware specific limits are left up for each driver to check.
+ *
+ * Returns:
+ * The mode status
+ */
+enum drm_mode_status
+drm_mode_validate_basic(const struct drm_display_mode *mode)
+{
+ if (mode->clock == 0)
+ return MODE_CLOCK_LOW;
+
+ if (mode->hdisplay == 0 ||
+ mode->hsync_start < mode->hdisplay ||
+ mode->hsync_end < mode->hsync_start ||
+ mode->htotal < mode->hsync_end)
+ return MODE_H_ILLEGAL;
+
+ if (mode->vdisplay == 0 ||
+ mode->vsync_start < mode->vdisplay ||
+ mode->vsync_end < mode->vsync_start ||
+ mode->vtotal < mode->vsync_end)
+ return MODE_V_ILLEGAL;
+
+ return MODE_OK;
+}
+EXPORT_SYMBOL(drm_mode_validate_basic);
+
+/**
* drm_mode_validate_size - make sure modes adhere to size constraints
- * @dev: DRM device
- * @mode_list: list of modes to check
+ * @mode: mode to check
* @maxX: maximum width
* @maxY: maximum height
*
@@ -956,20 +993,21 @@
* limitations of the DRM device/connector. If a mode is too big its status
* member is updated with the appropriate validation failure code. The list
* itself is not changed.
+ *
+ * Returns:
+ * The mode status
*/
-void drm_mode_validate_size(struct drm_device *dev,
- struct list_head *mode_list,
- int maxX, int maxY)
+enum drm_mode_status
+drm_mode_validate_size(const struct drm_display_mode *mode,
+ int maxX, int maxY)
{
- struct drm_display_mode *mode;
+ if (maxX > 0 && mode->hdisplay > maxX)
+ return MODE_VIRTUAL_X;
- list_for_each_entry(mode, mode_list, head) {
- if (maxX > 0 && mode->hdisplay > maxX)
- mode->status = MODE_VIRTUAL_X;
+ if (maxY > 0 && mode->vdisplay > maxY)
+ return MODE_VIRTUAL_Y;
- if (maxY > 0 && mode->vdisplay > maxY)
- mode->status = MODE_VIRTUAL_Y;
- }
+ return MODE_OK;
}
EXPORT_SYMBOL(drm_mode_validate_size);
diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c
index 18a1ac6..f24c4cf 100644
--- a/drivers/gpu/drm/drm_plane_helper.c
+++ b/drivers/gpu/drm/drm_plane_helper.c
@@ -142,6 +142,17 @@
{
int hscale, vscale;
+ if (!fb) {
+ *visible = false;
+ return 0;
+ }
+
+ /* crtc should only be NULL when disabling (i.e., !fb) */
+ if (WARN_ON(!crtc)) {
+ *visible = false;
+ return 0;
+ }
+
if (!crtc->enabled && !can_update_disabled) {
DRM_DEBUG_KMS("Cannot update plane of a disabled CRTC.\n");
return -EINVAL;
@@ -155,11 +166,6 @@
return -ERANGE;
}
- if (!fb) {
- *visible = false;
- return 0;
- }
-
*visible = drm_rect_clip_scaled(src, dest, clip, hscale, vscale);
if (!*visible)
/*
@@ -517,6 +523,7 @@
plane_state = kzalloc(sizeof(*plane_state), GFP_KERNEL);
if (!plane_state)
return -ENOMEM;
+ plane_state->plane = plane;
plane_state->crtc = crtc;
drm_atomic_set_fb_for_plane(plane_state, fb);
@@ -563,6 +570,7 @@
plane_state = kzalloc(sizeof(*plane_state), GFP_KERNEL);
if (!plane_state)
return -ENOMEM;
+ plane_state->plane = plane;
plane_state->crtc = NULL;
drm_atomic_set_fb_for_plane(plane_state, NULL);
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index 7483a47..2fbdcca 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -58,28 +58,23 @@
static bool drm_kms_helper_poll = true;
module_param_named(poll, drm_kms_helper_poll, bool, 0600);
-static void drm_mode_validate_flag(struct drm_connector *connector,
- int flags)
+static enum drm_mode_status
+drm_mode_validate_flag(const struct drm_display_mode *mode,
+ int flags)
{
- struct drm_display_mode *mode;
+ if ((mode->flags & DRM_MODE_FLAG_INTERLACE) &&
+ !(flags & DRM_MODE_FLAG_INTERLACE))
+ return MODE_NO_INTERLACE;
- if (flags == (DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_INTERLACE |
- DRM_MODE_FLAG_3D_MASK))
- return;
+ if ((mode->flags & DRM_MODE_FLAG_DBLSCAN) &&
+ !(flags & DRM_MODE_FLAG_DBLSCAN))
+ return MODE_NO_DBLESCAN;
- list_for_each_entry(mode, &connector->modes, head) {
- if ((mode->flags & DRM_MODE_FLAG_INTERLACE) &&
- !(flags & DRM_MODE_FLAG_INTERLACE))
- mode->status = MODE_NO_INTERLACE;
- if ((mode->flags & DRM_MODE_FLAG_DBLSCAN) &&
- !(flags & DRM_MODE_FLAG_DBLSCAN))
- mode->status = MODE_NO_DBLESCAN;
- if ((mode->flags & DRM_MODE_FLAG_3D_MASK) &&
- !(flags & DRM_MODE_FLAG_3D_MASK))
- mode->status = MODE_NO_STEREO;
- }
+ if ((mode->flags & DRM_MODE_FLAG_3D_MASK) &&
+ !(flags & DRM_MODE_FLAG_3D_MASK))
+ return MODE_NO_STEREO;
- return;
+ return MODE_OK;
}
static int drm_helper_probe_add_cmdline_mode(struct drm_connector *connector)
@@ -164,18 +159,22 @@
drm_mode_connector_list_update(connector, merge_type_bits);
- if (maxX && maxY)
- drm_mode_validate_size(dev, &connector->modes, maxX, maxY);
-
if (connector->interlace_allowed)
mode_flags |= DRM_MODE_FLAG_INTERLACE;
if (connector->doublescan_allowed)
mode_flags |= DRM_MODE_FLAG_DBLSCAN;
if (connector->stereo_allowed)
mode_flags |= DRM_MODE_FLAG_3D_MASK;
- drm_mode_validate_flag(connector, mode_flags);
list_for_each_entry(mode, &connector->modes, head) {
+ mode->status = drm_mode_validate_basic(mode);
+
+ if (mode->status == MODE_OK)
+ mode->status = drm_mode_validate_size(mode, maxX, maxY);
+
+ if (mode->status == MODE_OK)
+ mode->status = drm_mode_validate_flag(mode, mode_flags);
+
if (mode->status == MODE_OK && connector_funcs->mode_valid)
mode->status = connector_funcs->mode_valid(connector,
mode);
diff --git a/drivers/gpu/drm/i2c/adv7511.c b/drivers/gpu/drm/i2c/adv7511.c
index faf1c0c..fa140e0 100644
--- a/drivers/gpu/drm/i2c/adv7511.c
+++ b/drivers/gpu/drm/i2c/adv7511.c
@@ -644,9 +644,6 @@
if (mode->clock > 165000)
return MODE_CLOCK_HIGH;
- if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- return MODE_NO_INTERLACE;
-
return MODE_OK;
}
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index e4083e4..1849ffa 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -19,6 +19,7 @@
# GEM code
i915-y += i915_cmd_parser.o \
+ i915_gem_batch_pool.o \
i915_gem_context.o \
i915_gem_render_state.o \
i915_gem_debug.o \
@@ -47,6 +48,7 @@
i915-y += intel_audio.o \
intel_bios.o \
intel_display.o \
+ intel_fbc.o \
intel_fifo_underrun.o \
intel_frontbuffer.o \
intel_modes.o \
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 22c992a..806e812 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -152,6 +152,7 @@
CMD( MI_PREDICATE, SMI, F, 1, S ),
CMD( MI_TOPOLOGY_FILTER, SMI, F, 1, S ),
CMD( MI_DISPLAY_FLIP, SMI, !F, 0xFF, R ),
+ CMD( MI_SET_APPID, SMI, F, 1, S ),
CMD( MI_SET_CONTEXT, SMI, !F, 0xFF, R ),
CMD( MI_URB_CLEAR, SMI, !F, 0xFF, S ),
CMD( MI_STORE_DWORD_IMM, SMI, !F, 0x3F, B,
@@ -210,6 +211,7 @@
CMD( MI_SET_PREDICATE, SMI, F, 1, S ),
CMD( MI_RS_CONTROL, SMI, F, 1, S ),
CMD( MI_URB_ATOMIC_ALLOC, SMI, F, 1, S ),
+ CMD( MI_SET_APPID, SMI, F, 1, S ),
CMD( MI_RS_CONTEXT, SMI, F, 1, S ),
CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, M ),
CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ),
@@ -229,6 +231,7 @@
static const struct drm_i915_cmd_descriptor video_cmds[] = {
CMD( MI_ARB_ON_OFF, SMI, F, 1, R ),
+ CMD( MI_SET_APPID, SMI, F, 1, S ),
CMD( MI_STORE_DWORD_IMM, SMI, !F, 0xFF, B,
.bits = {{
.offset = 0,
@@ -272,6 +275,7 @@
static const struct drm_i915_cmd_descriptor vecs_cmds[] = {
CMD( MI_ARB_ON_OFF, SMI, F, 1, R ),
+ CMD( MI_SET_APPID, SMI, F, 1, S ),
CMD( MI_STORE_DWORD_IMM, SMI, !F, 0xFF, B,
.bits = {{
.offset = 0,
@@ -401,6 +405,7 @@
#define REG64(addr) (addr), (addr + sizeof(u32))
static const u32 gen7_render_regs[] = {
+ REG64(GPGPU_THREADS_DISPATCHED),
REG64(HS_INVOCATION_COUNT),
REG64(DS_INVOCATION_COUNT),
REG64(IA_VERTICES_COUNT),
@@ -481,13 +486,17 @@
u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT;
u32 subclient =
(cmd_header & INSTR_SUBCLIENT_MASK) >> INSTR_SUBCLIENT_SHIFT;
+ u32 op = (cmd_header & INSTR_26_TO_24_MASK) >> INSTR_26_TO_24_SHIFT;
if (client == INSTR_MI_CLIENT)
return 0x3F;
else if (client == INSTR_RC_CLIENT) {
- if (subclient == INSTR_MEDIA_SUBCLIENT)
- return 0xFFF;
- else
+ if (subclient == INSTR_MEDIA_SUBCLIENT) {
+ if (op == 6)
+ return 0xFFFF;
+ else
+ return 0xFFF;
+ } else
return 0xFF;
}
@@ -716,13 +725,13 @@
BUG_ON(!validate_cmds_sorted(ring, cmd_tables, cmd_table_count));
BUG_ON(!validate_regs_sorted(ring));
- if (hash_empty(ring->cmd_hash)) {
- ret = init_hash_table(ring, cmd_tables, cmd_table_count);
- if (ret) {
- DRM_ERROR("CMD: cmd_parser_init failed!\n");
- fini_hash_table(ring);
- return ret;
- }
+ WARN_ON(!hash_empty(ring->cmd_hash));
+
+ ret = init_hash_table(ring, cmd_tables, cmd_table_count);
+ if (ret) {
+ DRM_ERROR("CMD: cmd_parser_init failed!\n");
+ fini_hash_table(ring);
+ return ret;
}
ring->needs_cmd_parser = true;
@@ -840,6 +849,69 @@
return (u32*)addr;
}
+/* Returns a vmap'd pointer to dest_obj, which the caller must unmap */
+static u32 *copy_batch(struct drm_i915_gem_object *dest_obj,
+ struct drm_i915_gem_object *src_obj,
+ u32 batch_start_offset,
+ u32 batch_len)
+{
+ int ret = 0;
+ int needs_clflush = 0;
+ u32 *src_base, *dest_base = NULL;
+ u32 *src_addr, *dest_addr;
+ u32 offset = batch_start_offset / sizeof(*dest_addr);
+ u32 end = batch_start_offset + batch_len;
+
+ if (end > dest_obj->base.size || end > src_obj->base.size)
+ return ERR_PTR(-E2BIG);
+
+ ret = i915_gem_obj_prepare_shmem_read(src_obj, &needs_clflush);
+ if (ret) {
+ DRM_DEBUG_DRIVER("CMD: failed to prep read\n");
+ return ERR_PTR(ret);
+ }
+
+ src_base = vmap_batch(src_obj);
+ if (!src_base) {
+ DRM_DEBUG_DRIVER("CMD: Failed to vmap batch\n");
+ ret = -ENOMEM;
+ goto unpin_src;
+ }
+
+ src_addr = src_base + offset;
+
+ if (needs_clflush)
+ drm_clflush_virt_range((char *)src_addr, batch_len);
+
+ ret = i915_gem_object_set_to_cpu_domain(dest_obj, true);
+ if (ret) {
+ DRM_DEBUG_DRIVER("CMD: Failed to set batch CPU domain\n");
+ goto unmap_src;
+ }
+
+ dest_base = vmap_batch(dest_obj);
+ if (!dest_base) {
+ DRM_DEBUG_DRIVER("CMD: Failed to vmap shadow batch\n");
+ ret = -ENOMEM;
+ goto unmap_src;
+ }
+
+ dest_addr = dest_base + offset;
+
+ if (batch_start_offset != 0)
+ memset((u8 *)dest_base, 0, batch_start_offset);
+
+ memcpy(dest_addr, src_addr, batch_len);
+ memset((u8 *)dest_addr + batch_len, 0, dest_obj->base.size - end);
+
+unmap_src:
+ vunmap(src_base);
+unpin_src:
+ i915_gem_object_unpin_pages(src_obj);
+
+ return ret ? ERR_PTR(ret) : dest_base;
+}
+
/**
* i915_needs_cmd_parser() - should a given ring use software command parsing?
* @ring: the ring in question
@@ -956,7 +1028,9 @@
* i915_parse_cmds() - parse a submitted batch buffer for privilege violations
* @ring: the ring on which the batch is to execute
* @batch_obj: the batch buffer in question
+ * @shadow_batch_obj: copy of the batch buffer in question
* @batch_start_offset: byte offset in the batch at which execution starts
+ * @batch_len: length of the commands in batch_obj
* @is_master: is the submitting process the drm master?
*
* Parses the specified batch buffer looking for privilege violations as
@@ -967,33 +1041,38 @@
*/
int i915_parse_cmds(struct intel_engine_cs *ring,
struct drm_i915_gem_object *batch_obj,
+ struct drm_i915_gem_object *shadow_batch_obj,
u32 batch_start_offset,
+ u32 batch_len,
bool is_master)
{
int ret = 0;
u32 *cmd, *batch_base, *batch_end;
struct drm_i915_cmd_descriptor default_desc = { 0 };
- int needs_clflush = 0;
bool oacontrol_set = false; /* OACONTROL tracking. See check_cmd() */
- ret = i915_gem_obj_prepare_shmem_read(batch_obj, &needs_clflush);
+ ret = i915_gem_obj_ggtt_pin(shadow_batch_obj, 4096, 0);
if (ret) {
- DRM_DEBUG_DRIVER("CMD: failed to prep read\n");
- return ret;
+ DRM_DEBUG_DRIVER("CMD: Failed to pin shadow batch\n");
+ return -1;
}
- batch_base = vmap_batch(batch_obj);
- if (!batch_base) {
- DRM_DEBUG_DRIVER("CMD: Failed to vmap batch\n");
- i915_gem_object_unpin_pages(batch_obj);
- return -ENOMEM;
+ batch_base = copy_batch(shadow_batch_obj, batch_obj,
+ batch_start_offset, batch_len);
+ if (IS_ERR(batch_base)) {
+ DRM_DEBUG_DRIVER("CMD: Failed to copy batch\n");
+ i915_gem_object_ggtt_unpin(shadow_batch_obj);
+ return PTR_ERR(batch_base);
}
- if (needs_clflush)
- drm_clflush_virt_range((char *)batch_base, batch_obj->base.size);
-
cmd = batch_base + (batch_start_offset / sizeof(*cmd));
- batch_end = cmd + (batch_obj->base.size / sizeof(*batch_end));
+
+ /*
+ * We use the batch length as size because the shadow object is as
+ * large or larger and copy_batch() will write MI_NOPs to the extra
+ * space. Parsing should be faster in some cases this way.
+ */
+ batch_end = cmd + (batch_len / sizeof(*batch_end));
while (cmd < batch_end) {
const struct drm_i915_cmd_descriptor *desc;
@@ -1053,8 +1132,7 @@
}
vunmap(batch_base);
-
- i915_gem_object_unpin_pages(batch_obj);
+ i915_gem_object_ggtt_unpin(shadow_batch_obj);
return ret;
}
@@ -1076,6 +1154,7 @@
* hardware parsing enabled (so does not allow new use cases).
* 2. Allow access to the MI_PREDICATE_SRC0 and
* MI_PREDICATE_SRC1 registers.
+ * 3. Allow access to the GPGPU_THREADS_DISPATCHED register.
*/
- return 2;
+ return 3;
}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 779a275..e515aad 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -96,9 +96,7 @@
static const char *get_pin_flag(struct drm_i915_gem_object *obj)
{
- if (obj->user_pin_count > 0)
- return "P";
- else if (i915_gem_obj_is_pinned(obj))
+ if (i915_gem_obj_is_pinned(obj))
return "p";
else
return " ";
@@ -133,9 +131,9 @@
obj->base.size / 1024,
obj->base.read_domains,
obj->base.write_domain,
- obj->last_read_seqno,
- obj->last_write_seqno,
- obj->last_fenced_seqno,
+ i915_gem_request_get_seqno(obj->last_read_req),
+ i915_gem_request_get_seqno(obj->last_write_req),
+ i915_gem_request_get_seqno(obj->last_fenced_req),
i915_cache_level_str(to_i915(obj->base.dev), obj->cache_level),
obj->dirty ? " dirty" : "",
obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
@@ -154,8 +152,9 @@
seq_puts(m, " (pp");
else
seq_puts(m, " (g");
- seq_printf(m, "gtt offset: %08lx, size: %08lx)",
- vma->node.start, vma->node.size);
+ seq_printf(m, "gtt offset: %08lx, size: %08lx, type: %u)",
+ vma->node.start, vma->node.size,
+ vma->ggtt_view.type);
}
if (obj->stolen)
seq_printf(m, " (stolen: %08lx)", obj->stolen->start);
@@ -168,8 +167,9 @@
*t = '\0';
seq_printf(m, " (%s mappable)", s);
}
- if (obj->ring != NULL)
- seq_printf(m, " (%s)", obj->ring->name);
+ if (obj->last_read_req != NULL)
+ seq_printf(m, " (%s)",
+ i915_gem_request_get_ring(obj->last_read_req)->name);
if (obj->frontbuffer_bits)
seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits);
}
@@ -336,7 +336,7 @@
if (ppgtt->file_priv != stats->file_priv)
continue;
- if (obj->ring) /* XXX per-vma statistic */
+ if (obj->active) /* XXX per-vma statistic */
stats->active += obj->base.size;
else
stats->inactive += obj->base.size;
@@ -346,7 +346,7 @@
} else {
if (i915_gem_obj_ggtt_bound(obj)) {
stats->global += obj->base.size;
- if (obj->ring)
+ if (obj->active)
stats->active += obj->base.size;
else
stats->inactive += obj->base.size;
@@ -360,6 +360,33 @@
return 0;
}
+#define print_file_stats(m, name, stats) \
+ seq_printf(m, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu global, %zu shared, %zu unbound)\n", \
+ name, \
+ stats.count, \
+ stats.total, \
+ stats.active, \
+ stats.inactive, \
+ stats.global, \
+ stats.shared, \
+ stats.unbound)
+
+static void print_batch_pool_stats(struct seq_file *m,
+ struct drm_i915_private *dev_priv)
+{
+ struct drm_i915_gem_object *obj;
+ struct file_stats stats;
+
+ memset(&stats, 0, sizeof(stats));
+
+ list_for_each_entry(obj,
+ &dev_priv->mm.batch_pool.cache_list,
+ batch_pool_list)
+ per_file_stats(0, obj, &stats);
+
+ print_file_stats(m, "batch pool", stats);
+}
+
#define count_vmas(list, member) do { \
list_for_each_entry(vma, list, member) { \
size += i915_gem_obj_ggtt_size(vma->obj); \
@@ -442,6 +469,9 @@
dev_priv->gtt.mappable_end - dev_priv->gtt.base.start);
seq_putc(m, '\n');
+ print_batch_pool_stats(m, dev_priv);
+
+ seq_putc(m, '\n');
list_for_each_entry_reverse(file, &dev->filelist, lhead) {
struct file_stats stats;
struct task_struct *task;
@@ -459,15 +489,7 @@
*/
rcu_read_lock();
task = pid_task(file->pid, PIDTYPE_PID);
- seq_printf(m, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu global, %zu shared, %zu unbound)\n",
- task ? task->comm : "<unknown>",
- stats.count,
- stats.total,
- stats.active,
- stats.inactive,
- stats.global,
- stats.shared,
- stats.unbound);
+ print_file_stats(m, task ? task->comm : "<unknown>", stats);
rcu_read_unlock();
}
@@ -543,14 +565,16 @@
seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
pipe, plane);
}
- if (work->flip_queued_ring) {
+ if (work->flip_queued_req) {
+ struct intel_engine_cs *ring =
+ i915_gem_request_get_ring(work->flip_queued_req);
+
seq_printf(m, "Flip queued on %s at seqno %u, next seqno %u [current breadcrumb %u], completed? %d\n",
- work->flip_queued_ring->name,
- work->flip_queued_seqno,
+ ring->name,
+ i915_gem_request_get_seqno(work->flip_queued_req),
dev_priv->next_seqno,
- work->flip_queued_ring->get_seqno(work->flip_queued_ring, true),
- i915_seqno_passed(work->flip_queued_ring->get_seqno(work->flip_queued_ring, true),
- work->flip_queued_seqno));
+ ring->get_seqno(ring, true),
+ i915_gem_request_completed(work->flip_queued_req, true));
} else
seq_printf(m, "Flip not associated with any ring\n");
seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n",
@@ -582,6 +606,36 @@
return 0;
}
+static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj;
+ int count = 0;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ seq_puts(m, "cache:\n");
+ list_for_each_entry(obj,
+ &dev_priv->mm.batch_pool.cache_list,
+ batch_pool_list) {
+ seq_puts(m, " ");
+ describe_obj(m, obj);
+ seq_putc(m, '\n');
+ count++;
+ }
+
+ seq_printf(m, "total: %d\n", count);
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
static int i915_gem_request_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = m->private;
@@ -2155,6 +2209,8 @@
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 psrperf = 0;
+ u32 stat[3];
+ enum pipe pipe;
bool enabled = false;
intel_runtime_pm_get(dev_priv);
@@ -2169,14 +2225,36 @@
seq_printf(m, "Re-enable work scheduled: %s\n",
yesno(work_busy(&dev_priv->psr.work.work)));
- enabled = HAS_PSR(dev) &&
- I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
- seq_printf(m, "HW Enabled & Active bit: %s\n", yesno(enabled));
+ if (HAS_PSR(dev)) {
+ if (HAS_DDI(dev))
+ enabled = I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
+ else {
+ for_each_pipe(dev_priv, pipe) {
+ stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) &
+ VLV_EDP_PSR_CURR_STATE_MASK;
+ if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
+ (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
+ enabled = true;
+ }
+ }
+ }
+ seq_printf(m, "HW Enabled & Active bit: %s", yesno(enabled));
- if (HAS_PSR(dev))
+ if (!HAS_DDI(dev))
+ for_each_pipe(dev_priv, pipe) {
+ if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
+ (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
+ seq_printf(m, " pipe %c", pipe_name(pipe));
+ }
+ seq_puts(m, "\n");
+
+ /* CHV PSR has no kind of performance counter */
+ if (HAS_PSR(dev) && HAS_DDI(dev)) {
psrperf = I915_READ(EDP_PSR_PERF_CNT(dev)) &
EDP_PSR_PERF_CNT_MASK;
- seq_printf(m, "Performance_Counter: %u\n", psrperf);
+
+ seq_printf(m, "Performance_Counter: %u\n", psrperf);
+ }
mutex_unlock(&dev_priv->psr.lock);
intel_runtime_pm_put(dev_priv);
@@ -2322,7 +2400,7 @@
case POWER_DOMAIN_INIT:
return "INIT";
default:
- WARN_ON(1);
+ MISSING_CASE(domain);
return "?";
}
}
@@ -2718,6 +2796,9 @@
enum pipe pipe;
int plane;
+ if (INTEL_INFO(dev)->gen < 9)
+ return 0;
+
drm_modeset_lock_all(dev);
ddb = &dev_priv->wm.skl_hw.ddb;
@@ -2830,7 +2911,7 @@
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
char buf[PIPE_CRC_BUFFER_LEN];
- int head, tail, n_entries, n;
+ int n_entries;
ssize_t bytes_read;
/*
@@ -2862,36 +2943,39 @@
}
/* We now have one or more entries to read */
- head = pipe_crc->head;
- tail = pipe_crc->tail;
- n_entries = min((size_t)CIRC_CNT(head, tail, INTEL_PIPE_CRC_ENTRIES_NR),
- count / PIPE_CRC_LINE_LEN);
- spin_unlock_irq(&pipe_crc->lock);
+ n_entries = count / PIPE_CRC_LINE_LEN;
bytes_read = 0;
- n = 0;
- do {
- struct intel_pipe_crc_entry *entry = &pipe_crc->entries[tail];
+ while (n_entries > 0) {
+ struct intel_pipe_crc_entry *entry =
+ &pipe_crc->entries[pipe_crc->tail];
int ret;
+ if (CIRC_CNT(pipe_crc->head, pipe_crc->tail,
+ INTEL_PIPE_CRC_ENTRIES_NR) < 1)
+ break;
+
+ BUILD_BUG_ON_NOT_POWER_OF_2(INTEL_PIPE_CRC_ENTRIES_NR);
+ pipe_crc->tail = (pipe_crc->tail + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
+
bytes_read += snprintf(buf, PIPE_CRC_BUFFER_LEN,
"%8u %8x %8x %8x %8x %8x\n",
entry->frame, entry->crc[0],
entry->crc[1], entry->crc[2],
entry->crc[3], entry->crc[4]);
- ret = copy_to_user(user_buf + n * PIPE_CRC_LINE_LEN,
- buf, PIPE_CRC_LINE_LEN);
+ spin_unlock_irq(&pipe_crc->lock);
+
+ ret = copy_to_user(user_buf, buf, PIPE_CRC_LINE_LEN);
if (ret == PIPE_CRC_LINE_LEN)
return -EFAULT;
- BUILD_BUG_ON_NOT_POWER_OF_2(INTEL_PIPE_CRC_ENTRIES_NR);
- tail = (tail + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
- n++;
- } while (--n_entries);
+ user_buf += PIPE_CRC_LINE_LEN;
+ n_entries--;
- spin_lock_irq(&pipe_crc->lock);
- pipe_crc->tail = tail;
+ spin_lock_irq(&pipe_crc->lock);
+ }
+
spin_unlock_irq(&pipe_crc->lock);
return bytes_read;
@@ -3072,6 +3156,12 @@
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_VLV;
need_stable_symbols = true;
break;
+ case INTEL_PIPE_CRC_SOURCE_DP_D:
+ if (!IS_CHERRYVIEW(dev))
+ return -EINVAL;
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_VLV;
+ need_stable_symbols = true;
+ break;
case INTEL_PIPE_CRC_SOURCE_NONE:
*val = 0;
break;
@@ -3092,11 +3182,19 @@
uint32_t tmp = I915_READ(PORT_DFT2_G4X);
tmp |= DC_BALANCE_RESET_VLV;
- if (pipe == PIPE_A)
+ switch (pipe) {
+ case PIPE_A:
tmp |= PIPE_A_SCRAMBLE_RESET;
- else
+ break;
+ case PIPE_B:
tmp |= PIPE_B_SCRAMBLE_RESET;
-
+ break;
+ case PIPE_C:
+ tmp |= PIPE_C_SCRAMBLE_RESET;
+ break;
+ default:
+ return -EINVAL;
+ }
I915_WRITE(PORT_DFT2_G4X, tmp);
}
@@ -3185,10 +3283,19 @@
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t tmp = I915_READ(PORT_DFT2_G4X);
- if (pipe == PIPE_A)
+ switch (pipe) {
+ case PIPE_A:
tmp &= ~PIPE_A_SCRAMBLE_RESET;
- else
+ break;
+ case PIPE_B:
tmp &= ~PIPE_B_SCRAMBLE_RESET;
+ break;
+ case PIPE_C:
+ tmp &= ~PIPE_C_SCRAMBLE_RESET;
+ break;
+ default:
+ return;
+ }
if (!(tmp & PIPE_SCRAMBLE_RESET_MASK))
tmp &= ~DC_BALANCE_RESET_VLV;
I915_WRITE(PORT_DFT2_G4X, tmp);
@@ -3359,13 +3466,15 @@
/* none -> real source transition */
if (source) {
+ struct intel_pipe_crc_entry *entries;
+
DRM_DEBUG_DRIVER("collecting CRCs for pipe %c, %s\n",
pipe_name(pipe), pipe_crc_source_name(source));
- pipe_crc->entries = kzalloc(sizeof(*pipe_crc->entries) *
- INTEL_PIPE_CRC_ENTRIES_NR,
- GFP_KERNEL);
- if (!pipe_crc->entries)
+ entries = kcalloc(INTEL_PIPE_CRC_ENTRIES_NR,
+ sizeof(pipe_crc->entries[0]),
+ GFP_KERNEL);
+ if (!entries)
return -ENOMEM;
/*
@@ -3377,6 +3486,8 @@
hsw_disable_ips(crtc);
spin_lock_irq(&pipe_crc->lock);
+ kfree(pipe_crc->entries);
+ pipe_crc->entries = entries;
pipe_crc->head = 0;
pipe_crc->tail = 0;
spin_unlock_irq(&pipe_crc->lock);
@@ -3404,6 +3515,8 @@
spin_lock_irq(&pipe_crc->lock);
entries = pipe_crc->entries;
pipe_crc->entries = NULL;
+ pipe_crc->head = 0;
+ pipe_crc->tail = 0;
spin_unlock_irq(&pipe_crc->lock);
kfree(entries);
@@ -4296,6 +4409,7 @@
{"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS},
{"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
{"i915_gem_hws_vebox", i915_hws_info, 0, (void *)VECS},
+ {"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
{"i915_frequency_info", i915_frequency_info, 0},
{"i915_drpc_info", i915_drpc_info, 0},
{"i915_emon_status", i915_emon_status, 0},
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index ecee3bc..52730ed 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -928,6 +928,7 @@
mutex_lock(&dev->struct_mutex);
i915_gem_cleanup_ringbuffer(dev);
+ i915_gem_batch_pool_fini(&dev_priv->mm.batch_pool);
i915_gem_context_fini(dev);
mutex_unlock(&dev->struct_mutex);
i915_gem_cleanup_stolen(dev);
@@ -1004,6 +1005,13 @@
kfree(file_priv);
}
+static int
+i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ return -ENODEV;
+}
+
const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH),
@@ -1025,8 +1033,8 @@
DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index f990ab4c..0763fa0 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -811,6 +811,8 @@
if (!i915.reset)
return 0;
+ intel_reset_gt_powersave(dev);
+
mutex_lock(&dev->struct_mutex);
i915_gem_reset(dev);
@@ -839,6 +841,8 @@
return ret;
}
+ intel_overlay_reset(dev_priv);
+
/* Ok, now get things going again... */
/*
@@ -880,7 +884,7 @@
* of re-init after reset.
*/
if (INTEL_INFO(dev)->gen > 5)
- intel_reset_gt_powersave(dev);
+ intel_enable_gt_powersave(dev);
} else {
mutex_unlock(&dev->struct_mutex);
}
@@ -1297,7 +1301,9 @@
err = vlv_allow_gt_wake(dev_priv, false);
if (err)
goto err2;
- vlv_save_gunit_s0ix_state(dev_priv);
+
+ if (!IS_CHERRYVIEW(dev_priv->dev))
+ vlv_save_gunit_s0ix_state(dev_priv);
err = vlv_force_gfx_clock(dev_priv, false);
if (err)
@@ -1328,7 +1334,8 @@
*/
ret = vlv_force_gfx_clock(dev_priv, true);
- vlv_restore_gunit_s0ix_state(dev_priv);
+ if (!IS_CHERRYVIEW(dev_priv->dev))
+ vlv_restore_gunit_s0ix_state(dev_priv);
err = vlv_allow_gt_wake(dev_priv, true);
if (!ret)
@@ -1584,7 +1591,7 @@
.gem_prime_import = i915_gem_prime_import,
.dumb_create = i915_gem_dumb_create,
- .dumb_map_offset = i915_gem_dumb_map_offset,
+ .dumb_map_offset = i915_gem_mmap_gtt,
.dumb_destroy = drm_gem_dumb_destroy,
.ioctls = i915_ioctls,
.fops = &i915_driver_fops,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 63bcda5..fd7a493 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -55,10 +55,51 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20141121"
+#define DRIVER_DATE "20141219"
#undef WARN_ON
-#define WARN_ON(x) WARN(x, "WARN_ON(" #x ")")
+/* Many gcc seem to no see through this and fall over :( */
+#if 0
+#define WARN_ON(x) ({ \
+ bool __i915_warn_cond = (x); \
+ if (__builtin_constant_p(__i915_warn_cond)) \
+ BUILD_BUG_ON(__i915_warn_cond); \
+ WARN(__i915_warn_cond, "WARN_ON(" #x ")"); })
+#else
+#define WARN_ON(x) WARN((x), "WARN_ON(" #x ")")
+#endif
+
+#define MISSING_CASE(x) WARN(1, "Missing switch case (%lu) in %s\n", \
+ (long) (x), __func__);
+
+/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
+ * WARN_ON()) for hw state sanity checks to check for unexpected conditions
+ * which may not necessarily be a user visible problem. This will either
+ * WARN() or DRM_ERROR() depending on the verbose_checks moduleparam, to
+ * enable distros and users to tailor their preferred amount of i915 abrt
+ * spam.
+ */
+#define I915_STATE_WARN(condition, format...) ({ \
+ int __ret_warn_on = !!(condition); \
+ if (unlikely(__ret_warn_on)) { \
+ if (i915.verbose_state_checks) \
+ __WARN_printf(format); \
+ else \
+ DRM_ERROR(format); \
+ } \
+ unlikely(__ret_warn_on); \
+})
+
+#define I915_STATE_WARN_ON(condition) ({ \
+ int __ret_warn_on = !!(condition); \
+ if (unlikely(__ret_warn_on)) { \
+ if (i915.verbose_state_checks) \
+ __WARN_printf("WARN_ON(" #condition ")\n"); \
+ else \
+ DRM_ERROR("WARN_ON(" #condition ")\n"); \
+ } \
+ unlikely(__ret_warn_on); \
+})
enum pipe {
INVALID_PIPE = -1,
@@ -1130,6 +1171,11 @@
int which_slice;
};
+struct i915_gem_batch_pool {
+ struct drm_device *dev;
+ struct list_head cache_list;
+};
+
struct i915_gem_mm {
/** Memory allocator for GTT stolen memory */
struct drm_mm stolen;
@@ -1143,6 +1189,13 @@
*/
struct list_head unbound_list;
+ /*
+ * A pool of objects to use as shadow copies of client batch buffers
+ * when the command parser is enabled. Prevents the client from
+ * modifying the batch contents after software parsing.
+ */
+ struct i915_gem_batch_pool batch_pool;
+
/** Usable portion of the GTT for GEM */
unsigned long stolen_base; /* limited to low memory (32-bit) */
@@ -1307,6 +1360,13 @@
SEAMLESS_DRRS_SUPPORT = 2
};
+enum psr_lines_to_wait {
+ PSR_0_LINES_TO_WAIT = 0,
+ PSR_1_LINE_TO_WAIT,
+ PSR_4_LINES_TO_WAIT,
+ PSR_8_LINES_TO_WAIT
+};
+
struct intel_vbt_data {
struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
@@ -1336,10 +1396,20 @@
struct edp_power_seq edp_pps;
struct {
+ bool full_link;
+ bool require_aux_wakeup;
+ int idle_frames;
+ enum psr_lines_to_wait lines_to_wait;
+ int tp1_wakeup_time;
+ int tp2_tp3_wakeup_time;
+ } psr;
+
+ struct {
u16 pwm_freq_hz;
bool present;
bool active_low_pwm;
u8 min_brightness; /* min_brightness/255 of max */
+ u8 controller; /* brightness controller number */
} backlight;
/* MIPI DSI */
@@ -1772,6 +1842,8 @@
void (*stop_ring)(struct intel_engine_cs *ring);
} gt;
+ uint32_t request_uniq;
+
/*
* NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
* will be rejected. Instead look for a better place.
@@ -1855,6 +1927,8 @@
/** Used in execbuf to temporarily hold a ref */
struct list_head obj_exec_link;
+ struct list_head batch_pool_list;
+
/**
* This is set if the object is on the active lists (has pending
* rendering and so a non-zero seqno), and is not set if it i s on
@@ -1926,13 +2000,11 @@
void *dma_buf_vmapping;
int vmapping_count;
- struct intel_engine_cs *ring;
-
/** Breadcrumb of last rendering to the buffer. */
- uint32_t last_read_seqno;
- uint32_t last_write_seqno;
+ struct drm_i915_gem_request *last_read_req;
+ struct drm_i915_gem_request *last_write_req;
/** Breadcrumb of last fenced GPU access to the buffer. */
- uint32_t last_fenced_seqno;
+ struct drm_i915_gem_request *last_fenced_req;
/** Current tiling stride for the object, if it's tiled. */
uint32_t stride;
@@ -1943,10 +2015,6 @@
/** Record of address bit 17 of each page at last unbind. */
unsigned long *bit_17;
- /** User space pin count and filp owning the pin */
- unsigned long user_pin_count;
- struct drm_file *pin_filp;
-
union {
/** for phy allocated objects */
struct drm_dma_handle *phys_handle;
@@ -1975,11 +2043,14 @@
* The request queue allows us to note sequence numbers that have been emitted
* and may be associated with active buffers to be retired.
*
- * By keeping this list, we can avoid having to do questionable
- * sequence-number comparisons on buffer last_rendering_seqnos, and associate
- * an emission time with seqnos for tracking how far ahead of the GPU we are.
+ * By keeping this list, we can avoid having to do questionable sequence
+ * number comparisons on buffer last_read|write_seqno. It also allows an
+ * emission time to be associated with the request for tracking how far ahead
+ * of the GPU the submission is.
*/
struct drm_i915_gem_request {
+ struct kref ref;
+
/** On Which ring this request was generated */
struct intel_engine_cs *ring;
@@ -2007,8 +2078,55 @@
struct drm_i915_file_private *file_priv;
/** file_priv list entry for this request */
struct list_head client_list;
+
+ uint32_t uniq;
};
+void i915_gem_request_free(struct kref *req_ref);
+
+static inline uint32_t
+i915_gem_request_get_seqno(struct drm_i915_gem_request *req)
+{
+ return req ? req->seqno : 0;
+}
+
+static inline struct intel_engine_cs *
+i915_gem_request_get_ring(struct drm_i915_gem_request *req)
+{
+ return req ? req->ring : NULL;
+}
+
+static inline void
+i915_gem_request_reference(struct drm_i915_gem_request *req)
+{
+ kref_get(&req->ref);
+}
+
+static inline void
+i915_gem_request_unreference(struct drm_i915_gem_request *req)
+{
+ WARN_ON(!mutex_is_locked(&req->ring->dev->struct_mutex));
+ kref_put(&req->ref, i915_gem_request_free);
+}
+
+static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst,
+ struct drm_i915_gem_request *src)
+{
+ if (src)
+ i915_gem_request_reference(src);
+
+ if (*pdst)
+ i915_gem_request_unreference(*pdst);
+
+ *pdst = src;
+}
+
+/*
+ * XXX: i915_gem_request_completed should be here but currently needs the
+ * definition of i915_seqno_passed() which is below. It will be moved in
+ * a later patch when the call to i915_seqno_passed() is obsoleted...
+ */
+
struct drm_i915_file_private {
struct drm_i915_private *dev_priv;
struct drm_file *file;
@@ -2242,7 +2360,8 @@
#define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi)
#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
-#define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev))
+#define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev) || \
+ IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
#define HAS_RUNTIME_PM(dev) (IS_GEN6(dev) || IS_HASWELL(dev) || \
IS_BROADWELL(dev) || IS_VALLEYVIEW(dev))
#define HAS_RC6(dev) (INTEL_INFO(dev)->gen >= 6)
@@ -2312,6 +2431,7 @@
bool disable_vtd_wa;
int use_mmio_flip;
bool mmio_debug;
+ bool verbose_state_checks;
};
extern struct i915_params i915 __read_mostly;
@@ -2412,10 +2532,6 @@
struct drm_file *file_priv);
int i915_gem_execbuffer2(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-int i915_gem_pin_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
@@ -2460,10 +2576,23 @@
#define PIN_GLOBAL 0x4
#define PIN_OFFSET_BIAS 0x8
#define PIN_OFFSET_MASK (~4095)
+int __must_check i915_gem_object_pin_view(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ uint32_t alignment,
+ uint64_t flags,
+ const struct i915_ggtt_view *view);
+static inline
int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
uint32_t alignment,
- uint64_t flags);
+ uint64_t flags)
+{
+ return i915_gem_object_pin_view(obj, vm, alignment, flags,
+ &i915_ggtt_view_normal);
+}
+
+int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
+ u32 flags);
int __must_check i915_vma_unbind(struct i915_vma *vma);
int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);
@@ -2501,9 +2630,8 @@
int i915_gem_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
-int i915_gem_dumb_map_offset(struct drm_file *file_priv,
- struct drm_device *dev, uint32_t handle,
- uint64_t *offset);
+int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
+ uint32_t handle, uint64_t *offset);
/**
* Returns true if seq1 is later than seq2.
*/
@@ -2513,6 +2641,18 @@
return (int32_t)(seq1 - seq2) >= 0;
}
+static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req,
+ bool lazy_coherency)
+{
+ u32 seqno;
+
+ BUG_ON(req == NULL);
+
+ seqno = req->ring->get_seqno(req->ring, lazy_coherency);
+
+ return i915_seqno_passed(seqno, req->seqno);
+}
+
int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
@@ -2528,7 +2668,7 @@
void i915_gem_retire_requests_ring(struct intel_engine_cs *ring);
int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
bool interruptible);
-int __must_check i915_gem_check_olr(struct intel_engine_cs *ring, u32 seqno);
+int __must_check i915_gem_check_olr(struct drm_i915_gem_request *req);
static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
{
@@ -2571,17 +2711,15 @@
int __must_check i915_gem_suspend(struct drm_device *dev);
int __i915_add_request(struct intel_engine_cs *ring,
struct drm_file *file,
- struct drm_i915_gem_object *batch_obj,
- u32 *seqno);
-#define i915_add_request(ring, seqno) \
- __i915_add_request(ring, NULL, NULL, seqno)
-int __i915_wait_seqno(struct intel_engine_cs *ring, u32 seqno,
+ struct drm_i915_gem_object *batch_obj);
+#define i915_add_request(ring) \
+ __i915_add_request(ring, NULL, NULL)
+int __i915_wait_request(struct drm_i915_gem_request *req,
unsigned reset_counter,
bool interruptible,
s64 *timeout,
struct drm_i915_file_private *file_priv);
-int __must_check i915_wait_seqno(struct intel_engine_cs *ring,
- uint32_t seqno);
+int __must_check i915_wait_request(struct drm_i915_gem_request *req);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
int __must_check
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
@@ -2615,18 +2753,51 @@
void i915_gem_restore_fences(struct drm_device *dev);
+unsigned long i915_gem_obj_offset_view(struct drm_i915_gem_object *o,
+ struct i915_address_space *vm,
+ enum i915_ggtt_view_type view);
+static inline
unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
- struct i915_address_space *vm);
+ struct i915_address_space *vm)
+{
+ return i915_gem_obj_offset_view(o, vm, I915_GGTT_VIEW_NORMAL);
+}
bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o);
+bool i915_gem_obj_bound_view(struct drm_i915_gem_object *o,
+ struct i915_address_space *vm,
+ enum i915_ggtt_view_type view);
+static inline
bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
- struct i915_address_space *vm);
+ struct i915_address_space *vm)
+{
+ return i915_gem_obj_bound_view(o, vm, I915_GGTT_VIEW_NORMAL);
+}
+
unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
struct i915_address_space *vm);
+struct i915_vma *i915_gem_obj_to_vma_view(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ const struct i915_ggtt_view *view);
+static inline
struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm);
+ struct i915_address_space *vm)
+{
+ return i915_gem_obj_to_vma_view(obj, vm, &i915_ggtt_view_normal);
+}
+
+struct i915_vma *
+i915_gem_obj_lookup_or_create_vma_view(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ const struct i915_ggtt_view *view);
+
+static inline
struct i915_vma *
i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm);
+ struct i915_address_space *vm)
+{
+ return i915_gem_obj_lookup_or_create_vma_view(obj, vm,
+ &i915_ggtt_view_normal);
+}
struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj);
static inline bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj) {
@@ -2808,6 +2979,13 @@
void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone);
const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
+/* i915_gem_batch_pool.c */
+void i915_gem_batch_pool_init(struct drm_device *dev,
+ struct i915_gem_batch_pool *pool);
+void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool);
+struct drm_i915_gem_object*
+i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool, size_t size);
+
/* i915_cmd_parser.c */
int i915_cmd_parser_get_version(void);
int i915_cmd_parser_init_ring(struct intel_engine_cs *ring);
@@ -2815,7 +2993,9 @@
bool i915_needs_cmd_parser(struct intel_engine_cs *ring);
int i915_parse_cmds(struct intel_engine_cs *ring,
struct drm_i915_gem_object *batch_obj,
+ struct drm_i915_gem_object *shadow_batch_obj,
u32 batch_start_offset,
+ u32 batch_len,
bool is_master);
/* i915_suspend.c */
@@ -2895,9 +3075,6 @@
bool force_restore);
extern void i915_redisable_vga(struct drm_device *dev);
extern void i915_redisable_vga_power_on(struct drm_device *dev);
-extern bool intel_fbc_enabled(struct drm_device *dev);
-extern void bdw_fbc_sw_flush(struct drm_device *dev, u32 value);
-extern void intel_disable_fbc(struct drm_device *dev);
extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
extern void intel_init_pch_refclk(struct drm_device *dev);
extern void gen6_set_rps(struct drm_device *dev, u8 val);
@@ -3073,4 +3250,11 @@
}
}
+static inline void i915_trace_irq_get(struct intel_engine_cs *ring,
+ struct drm_i915_gem_request *req)
+{
+ if (ring->trace_irq_req == NULL && ring->irq_get(ring))
+ i915_gem_request_assign(&ring->trace_irq_req, req);
+}
+
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 4a9faea6..3044fb3 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -401,7 +401,6 @@
i915_gem_create(struct drm_file *file,
struct drm_device *dev,
uint64_t size,
- bool dumb,
uint32_t *handle_p)
{
struct drm_i915_gem_object *obj;
@@ -417,7 +416,6 @@
if (obj == NULL)
return -ENOMEM;
- obj->base.dumb = dumb;
ret = drm_gem_handle_create(file, &obj->base, &handle);
/* drop reference from allocate - handle holds it now */
drm_gem_object_unreference_unlocked(&obj->base);
@@ -437,7 +435,7 @@
args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
args->size = args->pitch * args->height;
return i915_gem_create(file, dev,
- args->size, true, &args->handle);
+ args->size, &args->handle);
}
/**
@@ -450,7 +448,7 @@
struct drm_i915_gem_create *args = data;
return i915_gem_create(file, dev,
- args->size, false, &args->handle);
+ args->size, &args->handle);
}
static inline int
@@ -1153,19 +1151,18 @@
}
/*
- * Compare seqno against outstanding lazy request. Emit a request if they are
- * equal.
+ * Compare arbitrary request against outstanding lazy request. Emit on match.
*/
int
-i915_gem_check_olr(struct intel_engine_cs *ring, u32 seqno)
+i915_gem_check_olr(struct drm_i915_gem_request *req)
{
int ret;
- BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
+ WARN_ON(!mutex_is_locked(&req->ring->dev->struct_mutex));
ret = 0;
- if (seqno == ring->outstanding_lazy_seqno)
- ret = i915_add_request(ring, NULL);
+ if (req == req->ring->outstanding_lazy_request)
+ ret = i915_add_request(req->ring);
return ret;
}
@@ -1190,10 +1187,9 @@
}
/**
- * __i915_wait_seqno - wait until execution of seqno has finished
- * @ring: the ring expected to report seqno
- * @seqno: duh!
- * @reset_counter: reset sequence associated with the given seqno
+ * __i915_wait_request - wait until execution of request has finished
+ * @req: duh!
+ * @reset_counter: reset sequence associated with the given request
* @interruptible: do an interruptible wait (normally yes)
* @timeout: in - how long to wait (NULL forever); out - how much time remaining
*
@@ -1204,15 +1200,16 @@
* reset_counter _must_ be read before, and an appropriate smp_rmb must be
* inserted.
*
- * Returns 0 if the seqno was found within the alloted time. Else returns the
+ * Returns 0 if the request was found within the alloted time. Else returns the
* errno with remaining time filled in timeout argument.
*/
-int __i915_wait_seqno(struct intel_engine_cs *ring, u32 seqno,
+int __i915_wait_request(struct drm_i915_gem_request *req,
unsigned reset_counter,
bool interruptible,
s64 *timeout,
struct drm_i915_file_private *file_priv)
{
+ struct intel_engine_cs *ring = i915_gem_request_get_ring(req);
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
const bool irq_test_in_progress =
@@ -1224,7 +1221,7 @@
WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled");
- if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
+ if (i915_gem_request_completed(req, true))
return 0;
timeout_expire = timeout ?
@@ -1242,7 +1239,7 @@
return -ENODEV;
/* Record current time in case interrupted by signal, or wedged */
- trace_i915_gem_request_wait_begin(ring, seqno);
+ trace_i915_gem_request_wait_begin(req);
before = ktime_get_raw_ns();
for (;;) {
struct timer_list timer;
@@ -1261,7 +1258,7 @@
break;
}
- if (i915_seqno_passed(ring->get_seqno(ring, false), seqno)) {
+ if (i915_gem_request_completed(req, false)) {
ret = 0;
break;
}
@@ -1293,7 +1290,7 @@
}
}
now = ktime_get_raw_ns();
- trace_i915_gem_request_wait_end(ring, seqno);
+ trace_i915_gem_request_wait_end(req);
if (!irq_test_in_progress)
ring->irq_put(ring);
@@ -1320,32 +1317,40 @@
}
/**
- * Waits for a sequence number to be signaled, and cleans up the
+ * Waits for a request to be signaled, and cleans up the
* request and object lists appropriately for that event.
*/
int
-i915_wait_seqno(struct intel_engine_cs *ring, uint32_t seqno)
+i915_wait_request(struct drm_i915_gem_request *req)
{
- struct drm_device *dev = ring->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- bool interruptible = dev_priv->mm.interruptible;
+ struct drm_device *dev;
+ struct drm_i915_private *dev_priv;
+ bool interruptible;
unsigned reset_counter;
int ret;
+ BUG_ON(req == NULL);
+
+ dev = req->ring->dev;
+ dev_priv = dev->dev_private;
+ interruptible = dev_priv->mm.interruptible;
+
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
- BUG_ON(seqno == 0);
ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
if (ret)
return ret;
- ret = i915_gem_check_olr(ring, seqno);
+ ret = i915_gem_check_olr(req);
if (ret)
return ret;
reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
- return __i915_wait_seqno(ring, seqno, reset_counter, interruptible,
- NULL, NULL);
+ i915_gem_request_reference(req);
+ ret = __i915_wait_request(req, reset_counter,
+ interruptible, NULL, NULL);
+ i915_gem_request_unreference(req);
+ return ret;
}
static int
@@ -1357,11 +1362,11 @@
/* Manually manage the write flush as we may have not yet
* retired the buffer.
*
- * Note that the last_write_seqno is always the earlier of
- * the two (read/write) seqno, so if we haved successfully waited,
+ * Note that the last_write_req is always the earlier of
+ * the two (read/write) requests, so if we haved successfully waited,
* we know we have passed the last write.
*/
- obj->last_write_seqno = 0;
+ i915_gem_request_assign(&obj->last_write_req, NULL);
return 0;
}
@@ -1374,15 +1379,14 @@
i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
bool readonly)
{
- struct intel_engine_cs *ring = obj->ring;
- u32 seqno;
+ struct drm_i915_gem_request *req;
int ret;
- seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
- if (seqno == 0)
+ req = readonly ? obj->last_write_req : obj->last_read_req;
+ if (!req)
return 0;
- ret = i915_wait_seqno(ring, seqno);
+ ret = i915_wait_request(req);
if (ret)
return ret;
@@ -1397,33 +1401,33 @@
struct drm_i915_file_private *file_priv,
bool readonly)
{
+ struct drm_i915_gem_request *req;
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring = obj->ring;
unsigned reset_counter;
- u32 seqno;
int ret;
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
BUG_ON(!dev_priv->mm.interruptible);
- seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
- if (seqno == 0)
+ req = readonly ? obj->last_write_req : obj->last_read_req;
+ if (!req)
return 0;
ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
if (ret)
return ret;
- ret = i915_gem_check_olr(ring, seqno);
+ ret = i915_gem_check_olr(req);
if (ret)
return ret;
reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
+ i915_gem_request_reference(req);
mutex_unlock(&dev->struct_mutex);
- ret = __i915_wait_seqno(ring, seqno, reset_counter, true, NULL,
- file_priv);
+ ret = __i915_wait_request(req, reset_counter, true, NULL, file_priv);
mutex_lock(&dev->struct_mutex);
+ i915_gem_request_unreference(req);
if (ret)
return ret;
@@ -1840,10 +1844,10 @@
drm_gem_free_mmap_offset(&obj->base);
}
-static int
+int
i915_gem_mmap_gtt(struct drm_file *file,
struct drm_device *dev,
- uint32_t handle, bool dumb,
+ uint32_t handle,
uint64_t *offset)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1860,13 +1864,6 @@
goto unlock;
}
- /*
- * We don't allow dumb mmaps on objects created using another
- * interface.
- */
- WARN_ONCE(dumb && !(obj->base.dumb || obj->base.import_attach),
- "Illegal dumb map of accelerated buffer.\n");
-
if (obj->base.size > dev_priv->gtt.mappable_end) {
ret = -E2BIG;
goto out;
@@ -1891,15 +1888,6 @@
return ret;
}
-int
-i915_gem_dumb_map_offset(struct drm_file *file,
- struct drm_device *dev,
- uint32_t handle,
- uint64_t *offset)
-{
- return i915_gem_mmap_gtt(file, dev, handle, true, offset);
-}
-
/**
* i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
* @dev: DRM device
@@ -1921,7 +1909,7 @@
{
struct drm_i915_gem_mmap_gtt *args = data;
- return i915_gem_mmap_gtt(file, dev, args->handle, false, &args->offset);
+ return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
}
static inline int
@@ -2268,14 +2256,18 @@
i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
struct intel_engine_cs *ring)
{
- u32 seqno = intel_ring_get_seqno(ring);
+ struct drm_i915_gem_request *req;
+ struct intel_engine_cs *old_ring;
BUG_ON(ring == NULL);
- if (obj->ring != ring && obj->last_write_seqno) {
- /* Keep the seqno relative to the current ring */
- obj->last_write_seqno = seqno;
+
+ req = intel_ring_get_request(ring);
+ old_ring = i915_gem_request_get_ring(obj->last_read_req);
+
+ if (old_ring != ring && obj->last_write_req) {
+ /* Keep the request relative to the current ring */
+ i915_gem_request_assign(&obj->last_write_req, req);
}
- obj->ring = ring;
/* Add a reference if we're newly entering the active list. */
if (!obj->active) {
@@ -2285,7 +2277,7 @@
list_move_tail(&obj->ring_list, &ring->active_list);
- obj->last_read_seqno = seqno;
+ i915_gem_request_assign(&obj->last_read_req, req);
}
void i915_vma_move_to_active(struct i915_vma *vma,
@@ -2298,29 +2290,25 @@
static void
i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
{
- struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
- struct i915_address_space *vm;
struct i915_vma *vma;
BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
BUG_ON(!obj->active);
- list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
- vma = i915_gem_obj_to_vma(obj, vm);
- if (vma && !list_empty(&vma->mm_list))
- list_move_tail(&vma->mm_list, &vm->inactive_list);
+ list_for_each_entry(vma, &obj->vma_list, vma_link) {
+ if (!list_empty(&vma->mm_list))
+ list_move_tail(&vma->mm_list, &vma->vm->inactive_list);
}
intel_fb_obj_flush(obj, true);
list_del_init(&obj->ring_list);
- obj->ring = NULL;
- obj->last_read_seqno = 0;
- obj->last_write_seqno = 0;
+ i915_gem_request_assign(&obj->last_read_req, NULL);
+ i915_gem_request_assign(&obj->last_write_req, NULL);
obj->base.write_domain = 0;
- obj->last_fenced_seqno = 0;
+ i915_gem_request_assign(&obj->last_fenced_req, NULL);
obj->active = 0;
drm_gem_object_unreference(&obj->base);
@@ -2331,13 +2319,10 @@
static void
i915_gem_object_retire(struct drm_i915_gem_object *obj)
{
- struct intel_engine_cs *ring = obj->ring;
-
- if (ring == NULL)
+ if (obj->last_read_req == NULL)
return;
- if (i915_seqno_passed(ring->get_seqno(ring, true),
- obj->last_read_seqno))
+ if (i915_gem_request_completed(obj->last_read_req, true))
i915_gem_object_move_to_inactive(obj);
}
@@ -2413,8 +2398,7 @@
int __i915_add_request(struct intel_engine_cs *ring,
struct drm_file *file,
- struct drm_i915_gem_object *obj,
- u32 *out_seqno)
+ struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
struct drm_i915_gem_request *request;
@@ -2422,7 +2406,7 @@
u32 request_ring_position, request_start;
int ret;
- request = ring->preallocated_lazy_request;
+ request = ring->outstanding_lazy_request;
if (WARN_ON(request == NULL))
return -ENOMEM;
@@ -2467,8 +2451,6 @@
return ret;
}
- request->seqno = intel_ring_get_seqno(ring);
- request->ring = ring;
request->head = request_start;
request->tail = request_ring_position;
@@ -2503,9 +2485,8 @@
spin_unlock(&file_priv->mm.lock);
}
- trace_i915_gem_request_add(ring, request->seqno);
- ring->outstanding_lazy_seqno = 0;
- ring->preallocated_lazy_request = NULL;
+ trace_i915_gem_request_add(request);
+ ring->outstanding_lazy_request = NULL;
i915_queue_hangcheck(ring->dev);
@@ -2515,8 +2496,6 @@
round_jiffies_up_relative(HZ));
intel_mark_busy(dev_priv->dev);
- if (out_seqno)
- *out_seqno = request->seqno;
return 0;
}
@@ -2580,33 +2559,39 @@
static void i915_gem_free_request(struct drm_i915_gem_request *request)
{
- struct intel_context *ctx = request->ctx;
-
list_del(&request->list);
i915_gem_request_remove_from_client(request);
+ i915_gem_request_unreference(request);
+}
+
+void i915_gem_request_free(struct kref *req_ref)
+{
+ struct drm_i915_gem_request *req = container_of(req_ref,
+ typeof(*req), ref);
+ struct intel_context *ctx = req->ctx;
+
if (ctx) {
if (i915.enable_execlists) {
- struct intel_engine_cs *ring = request->ring;
+ struct intel_engine_cs *ring = req->ring;
if (ctx != ring->default_context)
intel_lr_context_unpin(ring, ctx);
}
+
i915_gem_context_unreference(ctx);
}
- kfree(request);
+
+ kfree(req);
}
struct drm_i915_gem_request *
i915_gem_find_active_request(struct intel_engine_cs *ring)
{
struct drm_i915_gem_request *request;
- u32 completed_seqno;
-
- completed_seqno = ring->get_seqno(ring, false);
list_for_each_entry(request, &ring->request_list, list) {
- if (i915_seqno_passed(completed_seqno, request->seqno))
+ if (i915_gem_request_completed(request, false))
continue;
return request;
@@ -2681,10 +2666,8 @@
i915_gem_free_request(request);
}
- /* These may not have been flush before the reset, do so now */
- kfree(ring->preallocated_lazy_request);
- ring->preallocated_lazy_request = NULL;
- ring->outstanding_lazy_seqno = 0;
+ /* This may not have been flushed before the reset, so clean it now */
+ i915_gem_request_assign(&ring->outstanding_lazy_request, NULL);
}
void i915_gem_restore_fences(struct drm_device *dev)
@@ -2736,15 +2719,11 @@
void
i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
{
- uint32_t seqno;
-
if (list_empty(&ring->request_list))
return;
WARN_ON(i915_verify_lists(ring->dev));
- seqno = ring->get_seqno(ring, true);
-
/* Move any buffers on the active list that are no longer referenced
* by the ringbuffer to the flushing/inactive lists as appropriate,
* before we free the context associated with the requests.
@@ -2756,7 +2735,7 @@
struct drm_i915_gem_object,
ring_list);
- if (!i915_seqno_passed(seqno, obj->last_read_seqno))
+ if (!i915_gem_request_completed(obj->last_read_req, true))
break;
i915_gem_object_move_to_inactive(obj);
@@ -2771,10 +2750,10 @@
struct drm_i915_gem_request,
list);
- if (!i915_seqno_passed(seqno, request->seqno))
+ if (!i915_gem_request_completed(request, true))
break;
- trace_i915_gem_request_retire(ring, request->seqno);
+ trace_i915_gem_request_retire(request);
/* This is one of the few common intersection points
* between legacy ringbuffer submission and execlists:
@@ -2797,10 +2776,10 @@
i915_gem_free_request(request);
}
- if (unlikely(ring->trace_irq_seqno &&
- i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
+ if (unlikely(ring->trace_irq_req &&
+ i915_gem_request_completed(ring->trace_irq_req, true))) {
ring->irq_put(ring);
- ring->trace_irq_seqno = 0;
+ i915_gem_request_assign(&ring->trace_irq_req, NULL);
}
WARN_ON(i915_verify_lists(ring->dev));
@@ -2872,14 +2851,17 @@
static int
i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
{
+ struct intel_engine_cs *ring;
int ret;
if (obj->active) {
- ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
+ ring = i915_gem_request_get_ring(obj->last_read_req);
+
+ ret = i915_gem_check_olr(obj->last_read_req);
if (ret)
return ret;
- i915_gem_retire_requests_ring(obj->ring);
+ i915_gem_retire_requests_ring(ring);
}
return 0;
@@ -2913,9 +2895,8 @@
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_wait *args = data;
struct drm_i915_gem_object *obj;
- struct intel_engine_cs *ring = NULL;
+ struct drm_i915_gem_request *req;
unsigned reset_counter;
- u32 seqno = 0;
int ret = 0;
if (args->flags != 0)
@@ -2936,13 +2917,10 @@
if (ret)
goto out;
- if (obj->active) {
- seqno = obj->last_read_seqno;
- ring = obj->ring;
- }
+ if (!obj->active || !obj->last_read_req)
+ goto out;
- if (seqno == 0)
- goto out;
+ req = obj->last_read_req;
/* Do this after OLR check to make sure we make forward progress polling
* on this IOCTL with a timeout <=0 (like busy ioctl)
@@ -2954,10 +2932,15 @@
drm_gem_object_unreference(&obj->base);
reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
+ i915_gem_request_reference(req);
mutex_unlock(&dev->struct_mutex);
- return __i915_wait_seqno(ring, seqno, reset_counter, true,
- &args->timeout_ns, file->driver_priv);
+ ret = __i915_wait_request(req, reset_counter, true, &args->timeout_ns,
+ file->driver_priv);
+ mutex_lock(&dev->struct_mutex);
+ i915_gem_request_unreference(req);
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
out:
drm_gem_object_unreference(&obj->base);
@@ -2981,10 +2964,12 @@
i915_gem_object_sync(struct drm_i915_gem_object *obj,
struct intel_engine_cs *to)
{
- struct intel_engine_cs *from = obj->ring;
+ struct intel_engine_cs *from;
u32 seqno;
int ret, idx;
+ from = i915_gem_request_get_ring(obj->last_read_req);
+
if (from == NULL || to == from)
return 0;
@@ -2993,24 +2978,25 @@
idx = intel_ring_sync_index(from, to);
- seqno = obj->last_read_seqno;
+ seqno = i915_gem_request_get_seqno(obj->last_read_req);
/* Optimization: Avoid semaphore sync when we are sure we already
* waited for an object with higher seqno */
if (seqno <= from->semaphore.sync_seqno[idx])
return 0;
- ret = i915_gem_check_olr(obj->ring, seqno);
+ ret = i915_gem_check_olr(obj->last_read_req);
if (ret)
return ret;
- trace_i915_gem_ring_sync_to(from, to, seqno);
+ trace_i915_gem_ring_sync_to(from, to, obj->last_read_req);
ret = to->semaphore.sync_to(to, from, seqno);
if (!ret)
- /* We use last_read_seqno because sync_to()
+ /* We use last_read_req because sync_to()
* might have just caused seqno wrap under
* the radar.
*/
- from->semaphore.sync_seqno[idx] = obj->last_read_seqno;
+ from->semaphore.sync_seqno[idx] =
+ i915_gem_request_get_seqno(obj->last_read_req);
return ret;
}
@@ -3066,10 +3052,8 @@
* cause memory corruption through use-after-free.
*/
- /* Throw away the active reference before moving to the unbound list */
- i915_gem_object_retire(obj);
-
- if (i915_is_ggtt(vma->vm)) {
+ if (i915_is_ggtt(vma->vm) &&
+ vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
i915_gem_object_finish_gtt(obj);
/* release the fence reg _after_ flushing */
@@ -3083,8 +3067,15 @@
vma->unbind_vma(vma);
list_del_init(&vma->mm_list);
- if (i915_is_ggtt(vma->vm))
- obj->map_and_fenceable = false;
+ if (i915_is_ggtt(vma->vm)) {
+ if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
+ obj->map_and_fenceable = false;
+ } else if (vma->ggtt_view.pages) {
+ sg_free_table(vma->ggtt_view.pages);
+ kfree(vma->ggtt_view.pages);
+ vma->ggtt_view.pages = NULL;
+ }
+ }
drm_mm_remove_node(&vma->node);
i915_gem_vma_destroy(vma);
@@ -3092,6 +3083,10 @@
/* Since the unbound list is global, only move to that list if
* no more VMAs exist. */
if (list_empty(&obj->vma_list)) {
+ /* Throw away the active reference before
+ * moving to the unbound list. */
+ i915_gem_object_retire(obj);
+
i915_gem_gtt_finish_object(obj);
list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
}
@@ -3275,17 +3270,12 @@
"bogus fence setup with stride: 0x%x, tiling mode: %i\n",
obj->stride, obj->tiling_mode);
- switch (INTEL_INFO(dev)->gen) {
- case 9:
- case 8:
- case 7:
- case 6:
- case 5:
- case 4: i965_write_fence_reg(dev, reg, obj); break;
- case 3: i915_write_fence_reg(dev, reg, obj); break;
- case 2: i830_write_fence_reg(dev, reg, obj); break;
- default: BUG();
- }
+ if (IS_GEN2(dev))
+ i830_write_fence_reg(dev, reg, obj);
+ else if (IS_GEN3(dev))
+ i915_write_fence_reg(dev, reg, obj);
+ else if (INTEL_INFO(dev)->gen >= 4)
+ i965_write_fence_reg(dev, reg, obj);
/* And similarly be paranoid that no direct access to this region
* is reordered to before the fence is installed.
@@ -3324,12 +3314,12 @@
static int
i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
{
- if (obj->last_fenced_seqno) {
- int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
+ if (obj->last_fenced_req) {
+ int ret = i915_wait_request(obj->last_fenced_req);
if (ret)
return ret;
- obj->last_fenced_seqno = 0;
+ i915_gem_request_assign(&obj->last_fenced_req, NULL);
}
return 0;
@@ -3502,7 +3492,8 @@
i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
unsigned alignment,
- uint64_t flags)
+ uint64_t flags,
+ const struct i915_ggtt_view *view)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3552,7 +3543,7 @@
i915_gem_object_pin_pages(obj);
- vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
+ vma = i915_gem_obj_lookup_or_create_vma_view(obj, vm, view);
if (IS_ERR(vma))
goto err_unpin;
@@ -3582,15 +3573,19 @@
if (ret)
goto err_remove_node;
+ trace_i915_vma_bind(vma, flags);
+ ret = i915_vma_bind(vma, obj->cache_level,
+ flags & PIN_GLOBAL ? GLOBAL_BIND : 0);
+ if (ret)
+ goto err_finish_gtt;
+
list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
list_add_tail(&vma->mm_list, &vm->inactive_list);
- trace_i915_vma_bind(vma, flags);
- vma->bind_vma(vma, obj->cache_level,
- flags & PIN_GLOBAL ? GLOBAL_BIND : 0);
-
return vma;
+err_finish_gtt:
+ i915_gem_gtt_finish_object(obj);
err_remove_node:
drm_mm_remove_node(&vma->node);
err_free_vma:
@@ -3793,9 +3788,12 @@
}
list_for_each_entry(vma, &obj->vma_list, vma_link)
- if (drm_mm_node_allocated(&vma->node))
- vma->bind_vma(vma, cache_level,
- vma->bound & GLOBAL_BIND);
+ if (drm_mm_node_allocated(&vma->node)) {
+ ret = i915_vma_bind(vma, cache_level,
+ vma->bound & GLOBAL_BIND);
+ if (ret)
+ return ret;
+ }
}
list_for_each_entry(vma, &obj->vma_list, vma_link)
@@ -3914,18 +3912,14 @@
if (!vma)
return false;
- /* There are 3 sources that pin objects:
+ /* There are 2 sources that pin objects:
* 1. The display engine (scanouts, sprites, cursors);
* 2. Reservations for execbuffer;
- * 3. The user.
*
* We can ignore reservations as we hold the struct_mutex and
- * are only called outside of the reservation path. The user
- * can only increment pin_count once, and so if after
- * subtracting the potential reference by the user, any pin_count
- * remains, it must be due to another use by the display engine.
+ * are only called outside of the reservation path.
*/
- return vma->pin_count - !!obj->user_pin_count;
+ return vma->pin_count;
}
/*
@@ -3942,7 +3936,7 @@
bool was_pin_display;
int ret;
- if (pipelined != obj->ring) {
+ if (pipelined != i915_gem_request_get_ring(obj->last_read_req)) {
ret = i915_gem_object_sync(obj, pipelined);
if (ret)
return ret;
@@ -4094,10 +4088,8 @@
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_file_private *file_priv = file->driver_priv;
unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
- struct drm_i915_gem_request *request;
- struct intel_engine_cs *ring = NULL;
+ struct drm_i915_gem_request *request, *target = NULL;
unsigned reset_counter;
- u32 seqno = 0;
int ret;
ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
@@ -4113,19 +4105,24 @@
if (time_after_eq(request->emitted_jiffies, recent_enough))
break;
- ring = request->ring;
- seqno = request->seqno;
+ target = request;
}
reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
+ if (target)
+ i915_gem_request_reference(target);
spin_unlock(&file_priv->mm.lock);
- if (seqno == 0)
+ if (target == NULL)
return 0;
- ret = __i915_wait_seqno(ring, seqno, reset_counter, true, NULL, NULL);
+ ret = __i915_wait_request(target, reset_counter, true, NULL, NULL);
if (ret == 0)
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
+ mutex_lock(&dev->struct_mutex);
+ i915_gem_request_unreference(target);
+ mutex_unlock(&dev->struct_mutex);
+
return ret;
}
@@ -4149,10 +4146,11 @@
}
int
-i915_gem_object_pin(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm,
- uint32_t alignment,
- uint64_t flags)
+i915_gem_object_pin_view(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ uint32_t alignment,
+ uint64_t flags,
+ const struct i915_ggtt_view *view)
{
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
struct i915_vma *vma;
@@ -4168,7 +4166,7 @@
if (WARN_ON((flags & (PIN_MAPPABLE | PIN_GLOBAL)) == PIN_MAPPABLE))
return -EINVAL;
- vma = i915_gem_obj_to_vma(obj, vm);
+ vma = i915_gem_obj_to_vma_view(obj, vm, view);
if (vma) {
if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
return -EBUSY;
@@ -4178,7 +4176,8 @@
"bo is already pinned with incorrect alignment:"
" offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
" obj->map_and_fenceable=%d\n",
- i915_gem_obj_offset(obj, vm), alignment,
+ i915_gem_obj_offset_view(obj, vm, view->type),
+ alignment,
!!(flags & PIN_MAPPABLE),
obj->map_and_fenceable);
ret = i915_vma_unbind(vma);
@@ -4191,13 +4190,17 @@
bound = vma ? vma->bound : 0;
if (vma == NULL || !drm_mm_node_allocated(&vma->node)) {
- vma = i915_gem_object_bind_to_vm(obj, vm, alignment, flags);
+ vma = i915_gem_object_bind_to_vm(obj, vm, alignment,
+ flags, view);
if (IS_ERR(vma))
return PTR_ERR(vma);
}
- if (flags & PIN_GLOBAL && !(vma->bound & GLOBAL_BIND))
- vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND);
+ if (flags & PIN_GLOBAL && !(vma->bound & GLOBAL_BIND)) {
+ ret = i915_vma_bind(vma, obj->cache_level, GLOBAL_BIND);
+ if (ret)
+ return ret;
+ }
if ((bound ^ vma->bound) & GLOBAL_BIND) {
bool mappable, fenceable;
@@ -4269,102 +4272,6 @@
}
int
-i915_gem_pin_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file)
-{
- struct drm_i915_gem_pin *args = data;
- struct drm_i915_gem_object *obj;
- int ret;
-
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return -ENODEV;
-
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- return ret;
-
- obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
- if (&obj->base == NULL) {
- ret = -ENOENT;
- goto unlock;
- }
-
- if (obj->madv != I915_MADV_WILLNEED) {
- DRM_DEBUG("Attempting to pin a purgeable buffer\n");
- ret = -EFAULT;
- goto out;
- }
-
- if (obj->pin_filp != NULL && obj->pin_filp != file) {
- DRM_DEBUG("Already pinned in i915_gem_pin_ioctl(): %d\n",
- args->handle);
- ret = -EINVAL;
- goto out;
- }
-
- if (obj->user_pin_count == ULONG_MAX) {
- ret = -EBUSY;
- goto out;
- }
-
- if (obj->user_pin_count == 0) {
- ret = i915_gem_obj_ggtt_pin(obj, args->alignment, PIN_MAPPABLE);
- if (ret)
- goto out;
- }
-
- obj->user_pin_count++;
- obj->pin_filp = file;
-
- args->offset = i915_gem_obj_ggtt_offset(obj);
-out:
- drm_gem_object_unreference(&obj->base);
-unlock:
- mutex_unlock(&dev->struct_mutex);
- return ret;
-}
-
-int
-i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file)
-{
- struct drm_i915_gem_pin *args = data;
- struct drm_i915_gem_object *obj;
- int ret;
-
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return -ENODEV;
-
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- return ret;
-
- obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
- if (&obj->base == NULL) {
- ret = -ENOENT;
- goto unlock;
- }
-
- if (obj->pin_filp != file) {
- DRM_DEBUG("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
- args->handle);
- ret = -EINVAL;
- goto out;
- }
- obj->user_pin_count--;
- if (obj->user_pin_count == 0) {
- obj->pin_filp = NULL;
- i915_gem_object_ggtt_unpin(obj);
- }
-
-out:
- drm_gem_object_unreference(&obj->base);
-unlock:
- mutex_unlock(&dev->struct_mutex);
- return ret;
-}
-
-int
i915_gem_busy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
@@ -4390,9 +4297,11 @@
ret = i915_gem_object_flush_active(obj);
args->busy = obj->active;
- if (obj->ring) {
+ if (obj->last_read_req) {
+ struct intel_engine_cs *ring;
BUILD_BUG_ON(I915_NUM_RINGS > 16);
- args->busy |= intel_ring_flag(obj->ring) << 16;
+ ring = i915_gem_request_get_ring(obj->last_read_req);
+ args->busy |= intel_ring_flag(ring) << 16;
}
drm_gem_object_unreference(&obj->base);
@@ -4472,6 +4381,7 @@
INIT_LIST_HEAD(&obj->ring_list);
INIT_LIST_HEAD(&obj->obj_exec_link);
INIT_LIST_HEAD(&obj->vma_list);
+ INIT_LIST_HEAD(&obj->batch_pool_list);
obj->ops = ops;
@@ -4627,12 +4537,13 @@
intel_runtime_pm_put(dev_priv);
}
-struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm)
+struct i915_vma *i915_gem_obj_to_vma_view(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ const struct i915_ggtt_view *view)
{
struct i915_vma *vma;
list_for_each_entry(vma, &obj->vma_list, vma_link)
- if (vma->vm == vm)
+ if (vma->vm == vm && vma->ggtt_view.type == view->type)
return vma;
return NULL;
@@ -4692,6 +4603,11 @@
cancel_delayed_work_sync(&dev_priv->mm.retire_work);
flush_delayed_work(&dev_priv->mm.idle_work);
+ /* Assert that we sucessfully flushed all the work and
+ * reset the GPU back to its idle, low power state.
+ */
+ WARN_ON(dev_priv->mm.busy);
+
return 0;
err:
@@ -4803,14 +4719,6 @@
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
- /*
- * At least 830 can leave some of the unused rings
- * "active" (ie. head != tail) after resume which
- * will prevent c3 entry. Makes sure all unused rings
- * are totally idle.
- */
- init_unused_rings(dev);
-
ret = intel_init_render_ring_buffer(dev);
if (ret)
return ret;
@@ -4863,6 +4771,7 @@
i915_gem_init_hw(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_engine_cs *ring;
int ret, i;
if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
@@ -4889,9 +4798,19 @@
i915_gem_init_swizzling(dev);
- ret = dev_priv->gt.init_rings(dev);
- if (ret)
- return ret;
+ /*
+ * At least 830 can leave some of the unused rings
+ * "active" (ie. head != tail) after resume which
+ * will prevent c3 entry. Makes sure all unused rings
+ * are totally idle.
+ */
+ init_unused_rings(dev);
+
+ for_each_ring(ring, dev_priv, i) {
+ ret = ring->init_hw(ring);
+ if (ret)
+ return ret;
+ }
for (i = 0; i < NUM_L3_SLICES(dev); i++)
i915_gem_l3_remap(&dev_priv->ring[RCS], i);
@@ -4951,18 +4870,18 @@
}
ret = i915_gem_init_userptr(dev);
- if (ret) {
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
+ if (ret)
+ goto out_unlock;
i915_gem_init_global_gtt(dev);
ret = i915_gem_context_init(dev);
- if (ret) {
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
+ if (ret)
+ goto out_unlock;
+
+ ret = dev_priv->gt.init_rings(dev);
+ if (ret)
+ goto out_unlock;
ret = i915_gem_init_hw(dev);
if (ret == -EIO) {
@@ -4974,6 +4893,8 @@
atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
ret = 0;
}
+
+out_unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
@@ -5074,6 +4995,8 @@
dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
register_oom_notifier(&dev_priv->mm.oom_notifier);
+ i915_gem_batch_pool_init(dev, &dev_priv->mm.batch_pool);
+
mutex_init(&dev_priv->fb_tracking.lock);
}
@@ -5234,8 +5157,9 @@
}
/* All the new VM stuff */
-unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
- struct i915_address_space *vm)
+unsigned long i915_gem_obj_offset_view(struct drm_i915_gem_object *o,
+ struct i915_address_space *vm,
+ enum i915_ggtt_view_type view)
{
struct drm_i915_private *dev_priv = o->base.dev->dev_private;
struct i915_vma *vma;
@@ -5243,7 +5167,7 @@
WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
list_for_each_entry(vma, &o->vma_list, vma_link) {
- if (vma->vm == vm)
+ if (vma->vm == vm && vma->ggtt_view.type == view)
return vma->node.start;
}
@@ -5252,13 +5176,16 @@
return -1;
}
-bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
- struct i915_address_space *vm)
+bool i915_gem_obj_bound_view(struct drm_i915_gem_object *o,
+ struct i915_address_space *vm,
+ enum i915_ggtt_view_type view)
{
struct i915_vma *vma;
list_for_each_entry(vma, &o->vma_list, vma_link)
- if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
+ if (vma->vm == vm &&
+ vma->ggtt_view.type == view &&
+ drm_mm_node_allocated(&vma->node))
return true;
return false;
@@ -5390,11 +5317,13 @@
struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
{
+ struct i915_address_space *ggtt = i915_obj_to_ggtt(obj);
struct i915_vma *vma;
- vma = list_first_entry(&obj->vma_list, typeof(*vma), vma_link);
- if (vma->vm != i915_obj_to_ggtt(obj))
- return NULL;
+ list_for_each_entry(vma, &obj->vma_list, vma_link)
+ if (vma->vm == ggtt &&
+ vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
+ return vma;
- return vma;
+ return NULL;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_batch_pool.c b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
new file mode 100644
index 0000000..c690170
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
@@ -0,0 +1,137 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "i915_drv.h"
+
+/**
+ * DOC: batch pool
+ *
+ * In order to submit batch buffers as 'secure', the software command parser
+ * must ensure that a batch buffer cannot be modified after parsing. It does
+ * this by copying the user provided batch buffer contents to a kernel owned
+ * buffer from which the hardware will actually execute, and by carefully
+ * managing the address space bindings for such buffers.
+ *
+ * The batch pool framework provides a mechanism for the driver to manage a
+ * set of scratch buffers to use for this purpose. The framework can be
+ * extended to support other uses cases should they arise.
+ */
+
+/**
+ * i915_gem_batch_pool_init() - initialize a batch buffer pool
+ * @dev: the drm device
+ * @pool: the batch buffer pool
+ */
+void i915_gem_batch_pool_init(struct drm_device *dev,
+ struct i915_gem_batch_pool *pool)
+{
+ pool->dev = dev;
+ INIT_LIST_HEAD(&pool->cache_list);
+}
+
+/**
+ * i915_gem_batch_pool_fini() - clean up a batch buffer pool
+ * @pool: the pool to clean up
+ *
+ * Note: Callers must hold the struct_mutex.
+ */
+void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool)
+{
+ WARN_ON(!mutex_is_locked(&pool->dev->struct_mutex));
+
+ while (!list_empty(&pool->cache_list)) {
+ struct drm_i915_gem_object *obj =
+ list_first_entry(&pool->cache_list,
+ struct drm_i915_gem_object,
+ batch_pool_list);
+
+ WARN_ON(obj->active);
+
+ list_del_init(&obj->batch_pool_list);
+ drm_gem_object_unreference(&obj->base);
+ }
+}
+
+/**
+ * i915_gem_batch_pool_get() - select a buffer from the pool
+ * @pool: the batch buffer pool
+ * @size: the minimum desired size of the returned buffer
+ *
+ * Finds or allocates a batch buffer in the pool with at least the requested
+ * size. The caller is responsible for any domain, active/inactive, or
+ * purgeability management for the returned buffer.
+ *
+ * Note: Callers must hold the struct_mutex
+ *
+ * Return: the selected batch buffer object
+ */
+struct drm_i915_gem_object *
+i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
+ size_t size)
+{
+ struct drm_i915_gem_object *obj = NULL;
+ struct drm_i915_gem_object *tmp, *next;
+
+ WARN_ON(!mutex_is_locked(&pool->dev->struct_mutex));
+
+ list_for_each_entry_safe(tmp, next,
+ &pool->cache_list, batch_pool_list) {
+
+ if (tmp->active)
+ continue;
+
+ /* While we're looping, do some clean up */
+ if (tmp->madv == __I915_MADV_PURGED) {
+ list_del(&tmp->batch_pool_list);
+ drm_gem_object_unreference(&tmp->base);
+ continue;
+ }
+
+ /*
+ * Select a buffer that is at least as big as needed
+ * but not 'too much' bigger. A better way to do this
+ * might be to bucket the pool objects based on size.
+ */
+ if (tmp->base.size >= size &&
+ tmp->base.size <= (2 * size)) {
+ obj = tmp;
+ break;
+ }
+ }
+
+ if (!obj) {
+ obj = i915_gem_alloc_object(pool->dev, size);
+ if (!obj)
+ return ERR_PTR(-ENOMEM);
+
+ list_add_tail(&obj->batch_pool_list, &pool->cache_list);
+ }
+ else
+ /* Keep list in LRU order */
+ list_move_tail(&obj->batch_pool_list, &pool->cache_list);
+
+ obj->madv = I915_MADV_WILLNEED;
+
+ return obj;
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index d17ff43..9b23fb1 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -408,14 +408,25 @@
BUG_ON(!dev_priv->ring[RCS].default_context);
- if (i915.enable_execlists)
- return 0;
+ if (i915.enable_execlists) {
+ for_each_ring(ring, dev_priv, i) {
+ if (ring->init_context) {
+ ret = ring->init_context(ring,
+ ring->default_context);
+ if (ret) {
+ DRM_ERROR("ring init context: %d\n",
+ ret);
+ return ret;
+ }
+ }
+ }
- for_each_ring(ring, dev_priv, i) {
- ret = i915_switch_context(ring, ring->default_context);
- if (ret)
- return ret;
- }
+ } else
+ for_each_ring(ring, dev_priv, i) {
+ ret = i915_switch_context(ring, ring->default_context);
+ if (ret)
+ return ret;
+ }
return 0;
}
@@ -473,7 +484,12 @@
u32 hw_flags)
{
u32 flags = hw_flags | MI_MM_SPACE_GTT;
- int ret;
+ const int num_rings =
+ /* Use an extended w/a on ivb+ if signalling from other rings */
+ i915_semaphore_is_enabled(ring->dev) ?
+ hweight32(INTEL_INFO(ring->dev)->ring_mask) - 1 :
+ 0;
+ int len, i, ret;
/* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB
* invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value
@@ -490,15 +506,31 @@
if (!IS_HASWELL(ring->dev) && INTEL_INFO(ring->dev)->gen < 8)
flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN);
- ret = intel_ring_begin(ring, 6);
+
+ len = 4;
+ if (INTEL_INFO(ring->dev)->gen >= 7)
+ len += 2 + (num_rings ? 4*num_rings + 2 : 0);
+
+ ret = intel_ring_begin(ring, len);
if (ret)
return ret;
/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
- if (INTEL_INFO(ring->dev)->gen >= 7)
+ if (INTEL_INFO(ring->dev)->gen >= 7) {
intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
- else
- intel_ring_emit(ring, MI_NOOP);
+ if (num_rings) {
+ struct intel_engine_cs *signaller;
+
+ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
+ for_each_ring(signaller, to_i915(ring->dev), i) {
+ if (signaller == ring)
+ continue;
+
+ intel_ring_emit(ring, RING_PSMI_CTL(signaller->mmio_base));
+ intel_ring_emit(ring, _MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
+ }
+ }
+ }
intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(ring, MI_SET_CONTEXT);
@@ -510,10 +542,21 @@
*/
intel_ring_emit(ring, MI_NOOP);
- if (INTEL_INFO(ring->dev)->gen >= 7)
+ if (INTEL_INFO(ring->dev)->gen >= 7) {
+ if (num_rings) {
+ struct intel_engine_cs *signaller;
+
+ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
+ for_each_ring(signaller, to_i915(ring->dev), i) {
+ if (signaller == ring)
+ continue;
+
+ intel_ring_emit(ring, RING_PSMI_CTL(signaller->mmio_base));
+ intel_ring_emit(ring, _MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
+ }
+ }
intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
- else
- intel_ring_emit(ring, MI_NOOP);
+ }
intel_ring_advance(ring);
@@ -579,9 +622,14 @@
goto unpin_out;
vma = i915_gem_obj_to_ggtt(to->legacy_hw_ctx.rcs_state);
- if (!(vma->bound & GLOBAL_BIND))
- vma->bind_vma(vma, to->legacy_hw_ctx.rcs_state->cache_level,
- GLOBAL_BIND);
+ if (!(vma->bound & GLOBAL_BIND)) {
+ ret = i915_vma_bind(vma,
+ to->legacy_hw_ctx.rcs_state->cache_level,
+ GLOBAL_BIND);
+ /* This shouldn't ever fail. */
+ if (WARN_ONCE(ret, "GGTT context bind failed!"))
+ goto unpin_out;
+ }
if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to))
hw_flags |= MI_RESTORE_INHIBIT;
@@ -619,7 +667,8 @@
* swapped, but there is no way to do that yet.
*/
from->legacy_hw_ctx.rcs_state->dirty = 1;
- BUG_ON(from->legacy_hw_ctx.rcs_state->ring != ring);
+ BUG_ON(i915_gem_request_get_ring(
+ from->legacy_hw_ctx.rcs_state->last_read_req) != ring);
/* obj is kept alive until the next request by its active ref */
i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state);
@@ -639,10 +688,6 @@
if (ret)
DRM_ERROR("ring init context: %d\n", ret);
}
-
- ret = i915_gem_render_state_init(ring);
- if (ret)
- DRM_ERROR("init render state: %d\n", ret);
}
return 0;
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index f06027b..1d6e092 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -37,6 +37,7 @@
#define __EXEC_OBJECT_HAS_FENCE (1<<30)
#define __EXEC_OBJECT_NEEDS_MAP (1<<29)
#define __EXEC_OBJECT_NEEDS_BIAS (1<<28)
+#define __EXEC_OBJECT_PURGEABLE (1<<27)
#define BATCH_OFFSET_BIAS (256*1024)
@@ -121,9 +122,6 @@
goto err;
}
- WARN_ONCE(obj->base.dumb,
- "GPU use of dumb buffer is illegal.\n");
-
drm_gem_object_reference(&obj->base);
list_add_tail(&obj->obj_exec_link, &objects);
}
@@ -226,7 +224,12 @@
if (entry->flags & __EXEC_OBJECT_HAS_PIN)
vma->pin_count--;
- entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
+ if (entry->flags & __EXEC_OBJECT_PURGEABLE)
+ obj->madv = I915_MADV_DONTNEED;
+
+ entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE |
+ __EXEC_OBJECT_HAS_PIN |
+ __EXEC_OBJECT_PURGEABLE);
}
static void eb_destroy(struct eb_vmas *eb)
@@ -360,9 +363,12 @@
* through the ppgtt for non_secure batchbuffers. */
if (unlikely(IS_GEN6(dev) &&
reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
- !(target_vma->bound & GLOBAL_BIND)))
- target_vma->bind_vma(target_vma, target_i915_obj->cache_level,
- GLOBAL_BIND);
+ !(target_vma->bound & GLOBAL_BIND))) {
+ ret = i915_vma_bind(target_vma, target_i915_obj->cache_level,
+ GLOBAL_BIND);
+ if (WARN_ONCE(ret, "Unexpected failure to bind target VMA!"))
+ return ret;
+ }
/* Validate that the target is in a valid r/w GPU domain */
if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
@@ -946,7 +952,7 @@
i915_gem_execbuffer_move_to_active(struct list_head *vmas,
struct intel_engine_cs *ring)
{
- u32 seqno = intel_ring_get_seqno(ring);
+ struct drm_i915_gem_request *req = intel_ring_get_request(ring);
struct i915_vma *vma;
list_for_each_entry(vma, vmas, exec_list) {
@@ -963,7 +969,7 @@
i915_vma_move_to_active(vma, ring);
if (obj->base.write_domain) {
obj->dirty = 1;
- obj->last_write_seqno = seqno;
+ i915_gem_request_assign(&obj->last_write_req, req);
intel_fb_obj_invalidate(obj, ring);
@@ -971,7 +977,7 @@
obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
}
if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
- obj->last_fenced_seqno = seqno;
+ i915_gem_request_assign(&obj->last_fenced_req, req);
if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
struct drm_i915_private *dev_priv = to_i915(ring->dev);
list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
@@ -993,7 +999,7 @@
ring->gpu_caches_dirty = true;
/* Add a breadcrumb for the completion of the batch buffer */
- (void)__i915_add_request(ring, file, obj, NULL);
+ (void)__i915_add_request(ring, file, obj);
}
static int
@@ -1063,6 +1069,65 @@
return 0;
}
+static struct drm_i915_gem_object*
+i915_gem_execbuffer_parse(struct intel_engine_cs *ring,
+ struct drm_i915_gem_exec_object2 *shadow_exec_entry,
+ struct eb_vmas *eb,
+ struct drm_i915_gem_object *batch_obj,
+ u32 batch_start_offset,
+ u32 batch_len,
+ bool is_master,
+ u32 *flags)
+{
+ struct drm_i915_private *dev_priv = to_i915(batch_obj->base.dev);
+ struct drm_i915_gem_object *shadow_batch_obj;
+ int ret;
+
+ shadow_batch_obj = i915_gem_batch_pool_get(&dev_priv->mm.batch_pool,
+ batch_obj->base.size);
+ if (IS_ERR(shadow_batch_obj))
+ return shadow_batch_obj;
+
+ ret = i915_parse_cmds(ring,
+ batch_obj,
+ shadow_batch_obj,
+ batch_start_offset,
+ batch_len,
+ is_master);
+ if (ret) {
+ if (ret == -EACCES)
+ return batch_obj;
+ } else {
+ struct i915_vma *vma;
+
+ memset(shadow_exec_entry, 0, sizeof(*shadow_exec_entry));
+
+ vma = i915_gem_obj_to_ggtt(shadow_batch_obj);
+ vma->exec_entry = shadow_exec_entry;
+ vma->exec_entry->flags = __EXEC_OBJECT_PURGEABLE;
+ drm_gem_object_reference(&shadow_batch_obj->base);
+ list_add_tail(&vma->exec_list, &eb->vmas);
+
+ shadow_batch_obj->base.pending_read_domains =
+ batch_obj->base.pending_read_domains;
+
+ /*
+ * Set the DISPATCH_SECURE bit to remove the NON_SECURE
+ * bit from MI_BATCH_BUFFER_START commands issued in the
+ * dispatch_execbuffer implementations. We specifically
+ * don't want that set when the command parser is
+ * enabled.
+ *
+ * FIXME: with aliasing ppgtt, buffers that should only
+ * be in ggtt still end up in the aliasing ppgtt. remove
+ * this check when that is fixed.
+ */
+ if (USES_FULL_PPGTT(dev))
+ *flags |= I915_DISPATCH_SECURE;
+ }
+
+ return ret ? ERR_PTR(ret) : shadow_batch_obj;
+}
int
i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
@@ -1211,7 +1276,7 @@
return ret;
}
- trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
+ trace_i915_gem_ring_dispatch(intel_ring_get_request(ring), flags);
i915_gem_execbuffer_move_to_active(vmas, ring);
i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
@@ -1280,6 +1345,7 @@
struct drm_i915_private *dev_priv = dev->dev_private;
struct eb_vmas *eb;
struct drm_i915_gem_object *batch_obj;
+ struct drm_i915_gem_exec_object2 shadow_exec_entry;
struct intel_engine_cs *ring;
struct intel_context *ctx;
struct i915_address_space *vm;
@@ -1396,28 +1462,24 @@
ret = -EINVAL;
goto err;
}
- batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
if (i915_needs_cmd_parser(ring)) {
- ret = i915_parse_cmds(ring,
- batch_obj,
- args->batch_start_offset,
- file->is_master);
- if (ret) {
- if (ret != -EACCES)
- goto err;
- } else {
- /*
- * XXX: Actually do this when enabling batch copy...
- *
- * Set the DISPATCH_SECURE bit to remove the NON_SECURE bit
- * from MI_BATCH_BUFFER_START commands issued in the
- * dispatch_execbuffer implementations. We specifically don't
- * want that set when the command parser is enabled.
- */
+ batch_obj = i915_gem_execbuffer_parse(ring,
+ &shadow_exec_entry,
+ eb,
+ batch_obj,
+ args->batch_start_offset,
+ args->batch_len,
+ file->is_master,
+ &flags);
+ if (IS_ERR(batch_obj)) {
+ ret = PTR_ERR(batch_obj);
+ goto err;
}
}
+ batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
+
/* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
* batch" bit. Hence we need to pin secure batches into the global gtt.
* hsw should have this fixed, but bdw mucks it up again. */
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 171f6ea..746f77f 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -30,6 +30,68 @@
#include "i915_trace.h"
#include "intel_drv.h"
+/**
+ * DOC: Global GTT views
+ *
+ * Background and previous state
+ *
+ * Historically objects could exists (be bound) in global GTT space only as
+ * singular instances with a view representing all of the object's backing pages
+ * in a linear fashion. This view will be called a normal view.
+ *
+ * To support multiple views of the same object, where the number of mapped
+ * pages is not equal to the backing store, or where the layout of the pages
+ * is not linear, concept of a GGTT view was added.
+ *
+ * One example of an alternative view is a stereo display driven by a single
+ * image. In this case we would have a framebuffer looking like this
+ * (2x2 pages):
+ *
+ * 12
+ * 34
+ *
+ * Above would represent a normal GGTT view as normally mapped for GPU or CPU
+ * rendering. In contrast, fed to the display engine would be an alternative
+ * view which could look something like this:
+ *
+ * 1212
+ * 3434
+ *
+ * In this example both the size and layout of pages in the alternative view is
+ * different from the normal view.
+ *
+ * Implementation and usage
+ *
+ * GGTT views are implemented using VMAs and are distinguished via enum
+ * i915_ggtt_view_type and struct i915_ggtt_view.
+ *
+ * A new flavour of core GEM functions which work with GGTT bound objects were
+ * added with the _view suffix. They take the struct i915_ggtt_view parameter
+ * encapsulating all metadata required to implement a view.
+ *
+ * As a helper for callers which are only interested in the normal view,
+ * globally const i915_ggtt_view_normal singleton instance exists. All old core
+ * GEM API functions, the ones not taking the view parameter, are operating on,
+ * or with the normal GGTT view.
+ *
+ * Code wanting to add or use a new GGTT view needs to:
+ *
+ * 1. Add a new enum with a suitable name.
+ * 2. Extend the metadata in the i915_ggtt_view structure if required.
+ * 3. Add support to i915_get_vma_pages().
+ *
+ * New views are required to build a scatter-gather table from within the
+ * i915_get_vma_pages function. This table is stored in the vma.ggtt_view and
+ * exists for the lifetime of an VMA.
+ *
+ * Core API is designed to have copy semantics which means that passed in
+ * struct i915_ggtt_view does not need to be persistent (left around after
+ * calling the core API functions).
+ *
+ */
+
+const struct i915_ggtt_view i915_ggtt_view_normal;
+
static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv);
static void chv_setup_private_ppat(struct drm_i915_private *dev_priv);
@@ -40,8 +102,6 @@
has_aliasing_ppgtt = INTEL_INFO(dev)->gen >= 6;
has_full_ppgtt = INTEL_INFO(dev)->gen >= 7;
- if (IS_GEN8(dev))
- has_full_ppgtt = false; /* XXX why? */
/*
* We don't allow disabling PPGTT for gen9+ as it's a requirement for
@@ -72,7 +132,10 @@
return 0;
}
- return has_aliasing_ppgtt ? 1 : 0;
+ if (INTEL_INFO(dev)->gen >= 8 && i915.enable_execlists)
+ return 2;
+ else
+ return has_aliasing_ppgtt ? 1 : 0;
}
@@ -132,7 +195,7 @@
pte |= GEN6_PTE_UNCACHED;
break;
default:
- WARN_ON(1);
+ MISSING_CASE(level);
}
return pte;
@@ -156,7 +219,7 @@
pte |= GEN6_PTE_UNCACHED;
break;
default:
- WARN_ON(1);
+ MISSING_CASE(level);
}
return pte;
@@ -1102,10 +1165,8 @@
if (INTEL_INFO(dev)->gen < 8)
return gen6_ppgtt_init(ppgtt);
- else if (IS_GEN8(dev) || IS_GEN9(dev))
- return gen8_ppgtt_init(ppgtt, dev_priv->gtt.base.total);
else
- BUG();
+ return gen8_ppgtt_init(ppgtt, dev_priv->gtt.base.total);
}
int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
{
@@ -1146,7 +1207,7 @@
else if (INTEL_INFO(dev)->gen >= 8)
gen8_ppgtt_enable(dev);
else
- WARN_ON(1);
+ MISSING_CASE(INTEL_INFO(dev)->gen);
if (ppgtt) {
for_each_ring(ring, dev_priv, i) {
@@ -1341,9 +1402,12 @@
/* The bind_vma code tries to be smart about tracking mappings.
* Unfortunately above, we've just wiped out the mappings
* without telling our object about it. So we need to fake it.
+ *
+ * Bind is not expected to fail since this is only called on
+ * resume and assumption is all requirements exist already.
*/
vma->bound &= ~GLOBAL_BIND;
- vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND);
+ WARN_ON(i915_vma_bind(vma, obj->cache_level, GLOBAL_BIND));
}
@@ -1538,7 +1602,7 @@
AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
BUG_ON(!i915_is_ggtt(vma->vm));
- intel_gtt_insert_sg_entries(vma->obj->pages, entry, flags);
+ intel_gtt_insert_sg_entries(vma->ggtt_view.pages, entry, flags);
vma->bound = GLOBAL_BIND;
}
@@ -1588,7 +1652,7 @@
if (!dev_priv->mm.aliasing_ppgtt || flags & GLOBAL_BIND) {
if (!(vma->bound & GLOBAL_BIND) ||
(cache_level != obj->cache_level)) {
- vma->vm->insert_entries(vma->vm, obj->pages,
+ vma->vm->insert_entries(vma->vm, vma->ggtt_view.pages,
vma->node.start,
cache_level, flags);
vma->bound |= GLOBAL_BIND;
@@ -1600,7 +1664,7 @@
(cache_level != obj->cache_level))) {
struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
appgtt->base.insert_entries(&appgtt->base,
- vma->obj->pages,
+ vma->ggtt_view.pages,
vma->node.start,
cache_level, flags);
vma->bound |= LOCAL_BIND;
@@ -2165,7 +2229,8 @@
}
static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm)
+ struct i915_address_space *vm,
+ const struct i915_ggtt_view *view)
{
struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
if (vma == NULL)
@@ -2176,12 +2241,9 @@
INIT_LIST_HEAD(&vma->exec_list);
vma->vm = vm;
vma->obj = obj;
+ vma->ggtt_view = *view;
- switch (INTEL_INFO(vm->dev)->gen) {
- case 9:
- case 8:
- case 7:
- case 6:
+ if (INTEL_INFO(vm->dev)->gen >= 6) {
if (i915_is_ggtt(vm)) {
vma->unbind_vma = ggtt_unbind_vma;
vma->bind_vma = ggtt_bind_vma;
@@ -2189,39 +2251,73 @@
vma->unbind_vma = ppgtt_unbind_vma;
vma->bind_vma = ppgtt_bind_vma;
}
- break;
- case 5:
- case 4:
- case 3:
- case 2:
+ } else {
BUG_ON(!i915_is_ggtt(vm));
vma->unbind_vma = i915_ggtt_unbind_vma;
vma->bind_vma = i915_ggtt_bind_vma;
- break;
- default:
- BUG();
}
- /* Keep GGTT vmas first to make debug easier */
- if (i915_is_ggtt(vm))
- list_add(&vma->vma_link, &obj->vma_list);
- else {
- list_add_tail(&vma->vma_link, &obj->vma_list);
+ list_add_tail(&vma->vma_link, &obj->vma_list);
+ if (!i915_is_ggtt(vm))
i915_ppgtt_get(i915_vm_to_ppgtt(vm));
- }
return vma;
}
struct i915_vma *
-i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm)
+i915_gem_obj_lookup_or_create_vma_view(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ const struct i915_ggtt_view *view)
{
struct i915_vma *vma;
- vma = i915_gem_obj_to_vma(obj, vm);
+ vma = i915_gem_obj_to_vma_view(obj, vm, view);
if (!vma)
- vma = __i915_gem_vma_create(obj, vm);
+ vma = __i915_gem_vma_create(obj, vm, view);
return vma;
}
+
+static inline
+int i915_get_vma_pages(struct i915_vma *vma)
+{
+ if (vma->ggtt_view.pages)
+ return 0;
+
+ if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
+ vma->ggtt_view.pages = vma->obj->pages;
+ else
+ WARN_ONCE(1, "GGTT view %u not implemented!\n",
+ vma->ggtt_view.type);
+
+ if (!vma->ggtt_view.pages) {
+ DRM_ERROR("Failed to get pages for VMA view type %u!\n",
+ vma->ggtt_view.type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
+ * @vma: VMA to map
+ * @cache_level: mapping cache level
+ * @flags: flags like global or local mapping
+ *
+ * DMA addresses are taken from the scatter-gather table of this object (or of
+ * this VMA in case of non-default GGTT views) and PTE entries set up.
+ * Note that DMA addresses are also the only part of the SG table we care about.
+ */
+int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
+ u32 flags)
+{
+ int ret = i915_get_vma_pages(vma);
+
+ if (ret)
+ return ret;
+
+ vma->bind_vma(vma, cache_level, flags);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index beaf4bc..e377c7d 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -109,7 +109,20 @@
#define GEN8_PPAT_ELLC_OVERRIDE (0<<2)
#define GEN8_PPAT(i, x) ((uint64_t) (x) << ((i) * 8))
+enum i915_ggtt_view_type {
+ I915_GGTT_VIEW_NORMAL = 0,
+};
+
+struct i915_ggtt_view {
+ enum i915_ggtt_view_type type;
+
+ struct sg_table *pages;
+};
+
+extern const struct i915_ggtt_view i915_ggtt_view_normal;
+
enum i915_cache_level;
+
/**
* A VMA represents a GEM BO that is bound into an address space. Therefore, a
* VMA's presence cannot be guaranteed before binding, or after unbinding the
@@ -129,6 +142,15 @@
#define PTE_READ_ONLY (1<<2)
unsigned int bound : 4;
+ /**
+ * Support different GGTT views into the same object.
+ * This means there can be multiple VMA mappings per object and per VM.
+ * i915_ggtt_view_type is used to distinguish between those entries.
+ * The default one of zero (I915_GGTT_VIEW_NORMAL) is default and also
+ * assumed in GEM functions which take no ggtt view parameter.
+ */
+ struct i915_ggtt_view ggtt_view;
+
/** This object's place on the active/inactive lists */
struct list_head mm_list;
@@ -146,11 +168,10 @@
/**
* How many users have pinned this object in GTT space. The following
- * users can each hold at most one reference: pwrite/pread, pin_ioctl
- * (via user_pin_count), execbuffer (objects are not allowed multiple
- * times for the same batchbuffer), and the framebuffer code. When
- * switching/pageflipping, the framebuffer code has at most two buffers
- * pinned per crtc.
+ * users can each hold at most one reference: pwrite/pread, execbuffer
+ * (objects are not allowed multiple times for the same batchbuffer),
+ * and the framebuffer code. When switching/pageflipping, the
+ * framebuffer code has at most two buffers pinned per crtc.
*
* In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
* bits with absolutely no headroom. So use 4 bits. */
@@ -182,7 +203,7 @@
* List of objects currently involved in rendering.
*
* Includes buffers having the contents of their GPU caches
- * flushed, not necessarily primitives. last_rendering_seqno
+ * flushed, not necessarily primitives. last_read_req
* represents when the rendering involved will be completed.
*
* A reference is held on the buffer while on this list.
@@ -193,7 +214,7 @@
* LRU list of objects which are not in the ringbuffer and
* are ready to unbind, but are still in the GTT.
*
- * last_rendering_seqno is 0 while an object is in this list.
+ * last_read_req is NULL while an object is in this list.
*
* A reference is not held on the buffer while on this list,
* as merely being GTT-bound shouldn't prevent its being
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index 98dcd94..521548a 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -173,7 +173,7 @@
i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring);
- ret = __i915_add_request(ring, NULL, so.obj, NULL);
+ ret = __i915_add_request(ring, NULL, so.obj);
/* __i915_add_request moves object to inactive if it fails */
out:
i915_gem_render_state_fini(&so);
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 4727a4e..7a24bd1 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -399,7 +399,7 @@
}
obj->fence_dirty =
- obj->last_fenced_seqno ||
+ obj->last_fenced_req ||
obj->fence_reg != I915_FENCE_REG_NONE;
obj->tiling_mode = args->tiling_mode;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index cdaee6c..be5c990 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -670,8 +670,8 @@
err->size = obj->base.size;
err->name = obj->base.name;
- err->rseqno = obj->last_read_seqno;
- err->wseqno = obj->last_write_seqno;
+ err->rseqno = i915_gem_request_get_seqno(obj->last_read_req);
+ err->wseqno = i915_gem_request_get_seqno(obj->last_write_req);
err->gtt_offset = vma->node.start;
err->read_domains = obj->base.read_domains;
err->write_domain = obj->base.write_domain;
@@ -679,13 +679,12 @@
err->pinned = 0;
if (i915_gem_obj_is_pinned(obj))
err->pinned = 1;
- if (obj->user_pin_count > 0)
- err->pinned = -1;
err->tiling = obj->tiling_mode;
err->dirty = obj->dirty;
err->purgeable = obj->madv != I915_MADV_WILLNEED;
err->userptr = obj->userptr.mm != NULL;
- err->ring = obj->ring ? obj->ring->id : -1;
+ err->ring = obj->last_read_req ?
+ i915_gem_request_get_ring(obj->last_read_req)->id : -1;
err->cache_level = obj->cache_level;
}
@@ -719,10 +718,8 @@
break;
list_for_each_entry(vma, &obj->vma_list, vma_link)
- if (vma->vm == vm && vma->pin_count > 0) {
+ if (vma->vm == vm && vma->pin_count > 0)
capture_bo(err++, vma);
- break;
- }
}
return err - first;
@@ -767,32 +764,21 @@
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
- /* Fences */
- switch (INTEL_INFO(dev)->gen) {
- case 9:
- case 8:
- case 7:
- case 6:
- for (i = 0; i < dev_priv->num_fence_regs; i++)
- error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
- break;
- case 5:
- case 4:
- for (i = 0; i < 16; i++)
- error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
- break;
- case 3:
- if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
- for (i = 0; i < 8; i++)
- error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
- case 2:
+ if (IS_GEN3(dev) || IS_GEN2(dev)) {
for (i = 0; i < 8; i++)
error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
- break;
-
- default:
- BUG();
- }
+ if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+ for (i = 0; i < 8; i++)
+ error->fence[i+8] = I915_READ(FENCE_REG_945_8 +
+ (i * 4));
+ } else if (IS_GEN5(dev) || IS_GEN4(dev))
+ for (i = 0; i < 16; i++)
+ error->fence[i] = I915_READ64(FENCE_REG_965_0 +
+ (i * 8));
+ else if (INTEL_INFO(dev)->gen >= 6)
+ for (i = 0; i < dev_priv->num_fence_regs; i++)
+ error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 +
+ (i * 8));
}
@@ -926,9 +912,13 @@
ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(ring));
- switch (INTEL_INFO(dev)->gen) {
- case 9:
- case 8:
+ if (IS_GEN6(dev))
+ ering->vm_info.pp_dir_base =
+ I915_READ(RING_PP_DIR_BASE_READ(ring));
+ else if (IS_GEN7(dev))
+ ering->vm_info.pp_dir_base =
+ I915_READ(RING_PP_DIR_BASE(ring));
+ else if (INTEL_INFO(dev)->gen >= 8)
for (i = 0; i < 4; i++) {
ering->vm_info.pdp[i] =
I915_READ(GEN8_RING_PDP_UDW(ring, i));
@@ -936,16 +926,6 @@
ering->vm_info.pdp[i] |=
I915_READ(GEN8_RING_PDP_LDW(ring, i));
}
- break;
- case 7:
- ering->vm_info.pp_dir_base =
- I915_READ(RING_PP_DIR_BASE(ring));
- break;
- case 6:
- ering->vm_info.pp_dir_base =
- I915_READ(RING_PP_DIR_BASE_READ(ring));
- break;
- }
}
}
@@ -1097,10 +1077,8 @@
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
list_for_each_entry(vma, &obj->vma_list, vma_link)
- if (vma->vm == vm && vma->pin_count > 0) {
+ if (vma->vm == vm && vma->pin_count > 0)
i++;
- break;
- }
}
error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx];
@@ -1378,26 +1356,15 @@
struct drm_i915_private *dev_priv = dev->dev_private;
memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
- switch (INTEL_INFO(dev)->gen) {
- case 2:
- case 3:
+ if (IS_GEN2(dev) || IS_GEN3(dev))
instdone[0] = I915_READ(INSTDONE);
- break;
- case 4:
- case 5:
- case 6:
+ else if (IS_GEN4(dev) || IS_GEN5(dev) || IS_GEN6(dev)) {
instdone[0] = I915_READ(INSTDONE_I965);
instdone[1] = I915_READ(INSTDONE1);
- break;
- default:
- WARN_ONCE(1, "Unsupported platform\n");
- case 7:
- case 8:
- case 9:
+ } else if (INTEL_INFO(dev)->gen >= 7) {
instdone[0] = I915_READ(GEN7_INSTDONE_1);
instdone[1] = I915_READ(GEN7_SC_INSTDONE);
instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
- break;
}
}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 981834b..5d83773 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -183,6 +183,8 @@
{
assert_spin_locked(&dev_priv->irq_lock);
+ WARN_ON(enabled_irq_mask & ~interrupt_mask);
+
if (WARN_ON(!intel_irqs_enabled(dev_priv)))
return;
@@ -229,6 +231,8 @@
{
uint32_t new_val;
+ WARN_ON(enabled_irq_mask & ~interrupt_mask);
+
assert_spin_locked(&dev_priv->irq_lock);
new_val = dev_priv->pm_irq_mask;
@@ -281,10 +285,14 @@
struct drm_i915_private *dev_priv = dev->dev_private;
spin_lock_irq(&dev_priv->irq_lock);
+
WARN_ON(dev_priv->rps.pm_iir);
WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
dev_priv->rps.interrupts_enabled = true;
+ I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) |
+ dev_priv->pm_rps_events);
gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
+
spin_unlock_irq(&dev_priv->irq_lock);
}
@@ -328,6 +336,8 @@
sdeimr &= ~interrupt_mask;
sdeimr |= (~enabled_irq_mask & interrupt_mask);
+ WARN_ON(enabled_irq_mask & ~interrupt_mask);
+
assert_spin_locked(&dev_priv->irq_lock);
if (WARN_ON(!intel_irqs_enabled(dev_priv)))
@@ -1013,7 +1023,7 @@
if (!intel_ring_initialized(ring))
return;
- trace_i915_gem_request_complete(ring);
+ trace_i915_gem_request_notify(ring);
wake_up_all(&ring->irq_queue);
}
@@ -1379,14 +1389,14 @@
if (rcs & GT_RENDER_USER_INTERRUPT)
notify_ring(dev, ring);
if (rcs & GT_CONTEXT_SWITCH_INTERRUPT)
- intel_execlists_handle_ctx_events(ring);
+ intel_lrc_irq_handler(ring);
bcs = tmp >> GEN8_BCS_IRQ_SHIFT;
ring = &dev_priv->ring[BCS];
if (bcs & GT_RENDER_USER_INTERRUPT)
notify_ring(dev, ring);
if (bcs & GT_CONTEXT_SWITCH_INTERRUPT)
- intel_execlists_handle_ctx_events(ring);
+ intel_lrc_irq_handler(ring);
} else
DRM_ERROR("The master control interrupt lied (GT0)!\n");
}
@@ -1402,14 +1412,14 @@
if (vcs & GT_RENDER_USER_INTERRUPT)
notify_ring(dev, ring);
if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
- intel_execlists_handle_ctx_events(ring);
+ intel_lrc_irq_handler(ring);
vcs = tmp >> GEN8_VCS2_IRQ_SHIFT;
ring = &dev_priv->ring[VCS2];
if (vcs & GT_RENDER_USER_INTERRUPT)
notify_ring(dev, ring);
if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
- intel_execlists_handle_ctx_events(ring);
+ intel_lrc_irq_handler(ring);
} else
DRM_ERROR("The master control interrupt lied (GT1)!\n");
}
@@ -1436,7 +1446,7 @@
if (vcs & GT_RENDER_USER_INTERRUPT)
notify_ring(dev, ring);
if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
- intel_execlists_handle_ctx_events(ring);
+ intel_lrc_irq_handler(ring);
} else
DRM_ERROR("The master control interrupt lied (GT3)!\n");
}
@@ -2749,18 +2759,18 @@
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
-static u32
-ring_last_seqno(struct intel_engine_cs *ring)
+static struct drm_i915_gem_request *
+ring_last_request(struct intel_engine_cs *ring)
{
return list_entry(ring->request_list.prev,
- struct drm_i915_gem_request, list)->seqno;
+ struct drm_i915_gem_request, list);
}
static bool
-ring_idle(struct intel_engine_cs *ring, u32 seqno)
+ring_idle(struct intel_engine_cs *ring)
{
return (list_empty(&ring->request_list) ||
- i915_seqno_passed(seqno, ring_last_seqno(ring)));
+ i915_gem_request_completed(ring_last_request(ring), false));
}
static bool
@@ -2980,7 +2990,7 @@
acthd = intel_ring_get_active_head(ring);
if (ring->hangcheck.seqno == seqno) {
- if (ring_idle(ring, seqno)) {
+ if (ring_idle(ring)) {
ring->hangcheck.action = HANGCHECK_IDLE;
if (waitqueue_active(&ring->irq_queue)) {
@@ -3307,8 +3317,10 @@
GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
if (INTEL_INFO(dev)->gen >= 6) {
- pm_irqs |= dev_priv->pm_rps_events;
-
+ /*
+ * RPS interrupts will get enabled/disabled on demand when RPS
+ * itself is enabled/disabled.
+ */
if (HAS_VEBOX(dev))
pm_irqs |= PM_VEBOX_USER_INTERRUPT;
@@ -3520,7 +3532,11 @@
dev_priv->pm_irq_mask = 0xffffffff;
GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
- GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, dev_priv->pm_rps_events);
+ /*
+ * RPS interrupts will get enabled/disabled on demand when RPS itself
+ * is enabled/disabled.
+ */
+ GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0);
GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
}
@@ -3609,7 +3625,7 @@
vlv_display_irq_reset(dev_priv);
- dev_priv->irq_mask = 0;
+ dev_priv->irq_mask = ~0;
}
static void valleyview_irq_uninstall(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index c91cb20..07252d8 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -35,7 +35,7 @@
.vbt_sdvo_panel_type = -1,
.enable_rc6 = -1,
.enable_fbc = -1,
- .enable_execlists = 0,
+ .enable_execlists = -1,
.enable_hangcheck = true,
.enable_ppgtt = -1,
.enable_psr = 0,
@@ -51,6 +51,7 @@
.disable_vtd_wa = 0,
.use_mmio_flip = 0,
.mmio_debug = 0,
+ .verbose_state_checks = 1,
};
module_param_named(modeset, i915.modeset, int, 0400);
@@ -122,7 +123,7 @@
module_param_named(enable_execlists, i915.enable_execlists, int, 0400);
MODULE_PARM_DESC(enable_execlists,
"Override execlists usage. "
- "(-1=auto, 0=disabled [default], 1=enabled)");
+ "(-1=auto [default], 0=disabled, 1=enabled)");
module_param_named(enable_psr, i915.enable_psr, int, 0600);
MODULE_PARM_DESC(enable_psr, "Enable PSR (default: false)");
@@ -173,3 +174,7 @@
MODULE_PARM_DESC(mmio_debug,
"Enable the MMIO debug code (default: false). This may negatively "
"affect performance.");
+
+module_param_named(verbose_state_checks, i915.verbose_state_checks, bool, 0600);
+MODULE_PARM_DESC(verbose_state_checks,
+ "Enable verbose logs (ie. WARN_ON()) in case of unexpected hw state conditions.");
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index eefdc23..40ca873 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -31,6 +31,8 @@
#define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
#define _PIPE3(pipe, a, b, c) ((pipe) == PIPE_A ? (a) : \
(pipe) == PIPE_B ? (b) : (c))
+#define _PORT3(port, a, b, c) ((port) == PORT_A ? (a) : \
+ (port) == PORT_B ? (b) : (c))
#define _MASKED_FIELD(mask, value) ({ \
if (__builtin_constant_p(mask)) \
@@ -217,6 +219,8 @@
#define INSTR_SUBCLIENT_SHIFT 27
#define INSTR_SUBCLIENT_MASK 0x18000000
#define INSTR_MEDIA_SUBCLIENT 0x2
+#define INSTR_26_TO_24_MASK 0x7000000
+#define INSTR_26_TO_24_SHIFT 24
/*
* Memory interface instructions used by the kernel
@@ -246,6 +250,7 @@
#define MI_BATCH_BUFFER_END MI_INSTR(0x0a, 0)
#define MI_SUSPEND_FLUSH MI_INSTR(0x0b, 0)
#define MI_SUSPEND_FLUSH_EN (1<<0)
+#define MI_SET_APPID MI_INSTR(0x0e, 0)
#define MI_OVERLAY_FLIP MI_INSTR(0x11, 0)
#define MI_OVERLAY_CONTINUE (0x0<<21)
#define MI_OVERLAY_ON (0x1<<21)
@@ -303,8 +308,9 @@
#define MI_SEMAPHORE_POLL (1<<15)
#define MI_SEMAPHORE_SAD_GTE_SDD (1<<12)
#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1)
-#define MI_STORE_DWORD_IMM_GEN8 MI_INSTR(0x20, 2)
-#define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */
+#define MI_STORE_DWORD_IMM_GEN4 MI_INSTR(0x20, 2)
+#define MI_MEM_VIRTUAL (1 << 22) /* 945,g33,965 */
+#define MI_USE_GGTT (1 << 22) /* g4x+ */
#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1)
#define MI_STORE_DWORD_INDEX_SHIFT 2
/* Official intel docs are somewhat sloppy concerning MI_LOAD_REGISTER_IMM:
@@ -395,6 +401,7 @@
#define PIPE_CONTROL_STORE_DATA_INDEX (1<<21)
#define PIPE_CONTROL_CS_STALL (1<<20)
#define PIPE_CONTROL_TLB_INVALIDATE (1<<18)
+#define PIPE_CONTROL_MEDIA_STATE_CLEAR (1<<16)
#define PIPE_CONTROL_QW_WRITE (1<<14)
#define PIPE_CONTROL_POST_SYNC_OP_MASK (3<<14)
#define PIPE_CONTROL_DEPTH_STALL (1<<13)
@@ -469,17 +476,18 @@
*/
#define BCS_SWCTRL 0x22200
-#define HS_INVOCATION_COUNT 0x2300
-#define DS_INVOCATION_COUNT 0x2308
-#define IA_VERTICES_COUNT 0x2310
-#define IA_PRIMITIVES_COUNT 0x2318
-#define VS_INVOCATION_COUNT 0x2320
-#define GS_INVOCATION_COUNT 0x2328
-#define GS_PRIMITIVES_COUNT 0x2330
-#define CL_INVOCATION_COUNT 0x2338
-#define CL_PRIMITIVES_COUNT 0x2340
-#define PS_INVOCATION_COUNT 0x2348
-#define PS_DEPTH_COUNT 0x2350
+#define GPGPU_THREADS_DISPATCHED 0x2290
+#define HS_INVOCATION_COUNT 0x2300
+#define DS_INVOCATION_COUNT 0x2308
+#define IA_VERTICES_COUNT 0x2310
+#define IA_PRIMITIVES_COUNT 0x2318
+#define VS_INVOCATION_COUNT 0x2320
+#define GS_INVOCATION_COUNT 0x2328
+#define GS_PRIMITIVES_COUNT 0x2330
+#define CL_INVOCATION_COUNT 0x2338
+#define CL_PRIMITIVES_COUNT 0x2340
+#define PS_INVOCATION_COUNT 0x2348
+#define PS_DEPTH_COUNT 0x2350
/* There are the 4 64-bit counter registers, one for each stream output */
#define GEN7_SO_NUM_PRIMS_WRITTEN(n) (0x5200 + (n) * 8)
@@ -1128,6 +1136,7 @@
#define GEN6_VERSYNC (RING_SYNC_1(VEBOX_RING_BASE))
#define GEN6_VEVSYNC (RING_SYNC_2(VEBOX_RING_BASE))
#define GEN6_NOSYNC 0
+#define RING_PSMI_CTL(base) ((base)+0x50)
#define RING_MAX_IDLE(base) ((base)+0x54)
#define RING_HWS_PGA(base) ((base)+0x80)
#define RING_HWS_PGA_GEN6(base) ((base)+0x2080)
@@ -1458,6 +1467,7 @@
#define GEN6_BLITTER_FBC_NOTIFY (1<<3)
#define GEN6_RC_SLEEP_PSMI_CONTROL 0x2050
+#define GEN6_PSMI_SLEEP_MSG_DISABLE (1 << 0)
#define GEN8_RC_SEMA_IDLE_MSG_DISABLE (1 << 12)
#define GEN8_FF_DOP_CLOCK_GATE_DISABLE (1<<10)
@@ -1506,7 +1516,7 @@
#define I915_ISP_INTERRUPT (1<<22)
#define I915_LPE_PIPE_B_INTERRUPT (1<<21)
#define I915_LPE_PIPE_A_INTERRUPT (1<<20)
-#define I915_MIPIB_INTERRUPT (1<<19)
+#define I915_MIPIC_INTERRUPT (1<<19)
#define I915_MIPIA_INTERRUPT (1<<18)
#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18)
#define I915_DISPLAY_PORT_INTERRUPT (1<<17)
@@ -2536,6 +2546,42 @@
#define PIPESRC(trans) _TRANSCODER2(trans, _PIPEASRC)
#define PIPE_MULT(trans) _TRANSCODER2(trans, _PIPE_MULT_A)
+/* VLV eDP PSR registers */
+#define _PSRCTLA (VLV_DISPLAY_BASE + 0x60090)
+#define _PSRCTLB (VLV_DISPLAY_BASE + 0x61090)
+#define VLV_EDP_PSR_ENABLE (1<<0)
+#define VLV_EDP_PSR_RESET (1<<1)
+#define VLV_EDP_PSR_MODE_MASK (7<<2)
+#define VLV_EDP_PSR_MODE_HW_TIMER (1<<3)
+#define VLV_EDP_PSR_MODE_SW_TIMER (1<<2)
+#define VLV_EDP_PSR_SINGLE_FRAME_UPDATE (1<<7)
+#define VLV_EDP_PSR_ACTIVE_ENTRY (1<<8)
+#define VLV_EDP_PSR_SRC_TRANSMITTER_STATE (1<<9)
+#define VLV_EDP_PSR_DBL_FRAME (1<<10)
+#define VLV_EDP_PSR_FRAME_COUNT_MASK (0xff<<16)
+#define VLV_EDP_PSR_IDLE_FRAME_SHIFT 16
+#define VLV_PSRCTL(pipe) _PIPE(pipe, _PSRCTLA, _PSRCTLB)
+
+#define _VSCSDPA (VLV_DISPLAY_BASE + 0x600a0)
+#define _VSCSDPB (VLV_DISPLAY_BASE + 0x610a0)
+#define VLV_EDP_PSR_SDP_FREQ_MASK (3<<30)
+#define VLV_EDP_PSR_SDP_FREQ_ONCE (1<<31)
+#define VLV_EDP_PSR_SDP_FREQ_EVFRAME (1<<30)
+#define VLV_VSCSDP(pipe) _PIPE(pipe, _VSCSDPA, _VSCSDPB)
+
+#define _PSRSTATA (VLV_DISPLAY_BASE + 0x60094)
+#define _PSRSTATB (VLV_DISPLAY_BASE + 0x61094)
+#define VLV_EDP_PSR_LAST_STATE_MASK (7<<3)
+#define VLV_EDP_PSR_CURR_STATE_MASK 7
+#define VLV_EDP_PSR_DISABLED (0<<0)
+#define VLV_EDP_PSR_INACTIVE (1<<0)
+#define VLV_EDP_PSR_IN_TRANS_TO_ACTIVE (2<<0)
+#define VLV_EDP_PSR_ACTIVE_NORFB_UP (3<<0)
+#define VLV_EDP_PSR_ACTIVE_SF_UPDATE (4<<0)
+#define VLV_EDP_PSR_EXIT (5<<0)
+#define VLV_EDP_PSR_IN_TRANS (1<<7)
+#define VLV_PSRSTAT(pipe) _PIPE(pipe, _PSRSTATA, _PSRSTATB)
+
/* HSW+ eDP PSR registers */
#define EDP_PSR_BASE(dev) (IS_HASWELL(dev) ? 0x64800 : 0x6f800)
#define EDP_PSR_CTL(dev) (EDP_PSR_BASE(dev) + 0)
@@ -2759,7 +2805,8 @@
#define DC_BALANCE_RESET (1 << 25)
#define PORT_DFT2_G4X (dev_priv->info.display_mmio_offset + 0x61154)
#define DC_BALANCE_RESET_VLV (1 << 31)
-#define PIPE_SCRAMBLE_RESET_MASK (0x3 << 0)
+#define PIPE_SCRAMBLE_RESET_MASK ((1 << 14) | (0x3 << 0))
+#define PIPE_C_SCRAMBLE_RESET (1 << 14) /* chv */
#define PIPE_B_SCRAMBLE_RESET (1 << 1)
#define PIPE_A_SCRAMBLE_RESET (1 << 0)
@@ -6003,6 +6050,10 @@
#define GEN8_PMINTR_REDIRECT_TO_NON_DISP (1<<31)
#define VLV_PWRDWNUPCTL 0xA294
+#define VLV_CHICKEN_3 (VLV_DISPLAY_BASE + 0x7040C)
+#define PIXEL_OVERLAP_CNT_MASK (3 << 30)
+#define PIXEL_OVERLAP_CNT_SHIFT 30
+
#define GEN6_PMISR 0x44020
#define GEN6_PMIMR 0x44024 /* rps_lock */
#define GEN6_PMIIR 0x44028
@@ -6628,29 +6679,31 @@
#define PIPE_CSC_POSTOFF_ME(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_ME, _PIPE_B_CSC_POSTOFF_ME)
#define PIPE_CSC_POSTOFF_LO(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_LO, _PIPE_B_CSC_POSTOFF_LO)
-/* VLV MIPI registers */
+/* MIPI DSI registers */
+
+#define _MIPI_PORT(port, a, c) _PORT3(port, a, 0, c) /* ports A and C only */
#define _MIPIA_PORT_CTRL (VLV_DISPLAY_BASE + 0x61190)
-#define _MIPIB_PORT_CTRL (VLV_DISPLAY_BASE + 0x61700)
-#define MIPI_PORT_CTRL(tc) _TRANSCODER(tc, _MIPIA_PORT_CTRL, \
- _MIPIB_PORT_CTRL)
-#define DPI_ENABLE (1 << 31) /* A + B */
+#define _MIPIC_PORT_CTRL (VLV_DISPLAY_BASE + 0x61700)
+#define MIPI_PORT_CTRL(port) _MIPI_PORT(port, _MIPIA_PORT_CTRL, _MIPIC_PORT_CTRL)
+#define DPI_ENABLE (1 << 31) /* A + C */
#define MIPIA_MIPI4DPHY_DELAY_COUNT_SHIFT 27
#define MIPIA_MIPI4DPHY_DELAY_COUNT_MASK (0xf << 27)
+#define DUAL_LINK_MODE_SHIFT 26
#define DUAL_LINK_MODE_MASK (1 << 26)
#define DUAL_LINK_MODE_FRONT_BACK (0 << 26)
#define DUAL_LINK_MODE_PIXEL_ALTERNATIVE (1 << 26)
-#define DITHERING_ENABLE (1 << 25) /* A + B */
+#define DITHERING_ENABLE (1 << 25) /* A + C */
#define FLOPPED_HSTX (1 << 23)
#define DE_INVERT (1 << 19) /* XXX */
#define MIPIA_FLISDSI_DELAY_COUNT_SHIFT 18
#define MIPIA_FLISDSI_DELAY_COUNT_MASK (0xf << 18)
#define AFE_LATCHOUT (1 << 17)
#define LP_OUTPUT_HOLD (1 << 16)
-#define MIPIB_FLISDSI_DELAY_COUNT_HIGH_SHIFT 15
-#define MIPIB_FLISDSI_DELAY_COUNT_HIGH_MASK (1 << 15)
-#define MIPIB_MIPI4DPHY_DELAY_COUNT_SHIFT 11
-#define MIPIB_MIPI4DPHY_DELAY_COUNT_MASK (0xf << 11)
+#define MIPIC_FLISDSI_DELAY_COUNT_HIGH_SHIFT 15
+#define MIPIC_FLISDSI_DELAY_COUNT_HIGH_MASK (1 << 15)
+#define MIPIC_MIPI4DPHY_DELAY_COUNT_SHIFT 11
+#define MIPIC_MIPI4DPHY_DELAY_COUNT_MASK (0xf << 11)
#define CSB_SHIFT 9
#define CSB_MASK (3 << 9)
#define CSB_20MHZ (0 << 9)
@@ -6659,10 +6712,10 @@
#define BANDGAP_MASK (1 << 8)
#define BANDGAP_PNW_CIRCUIT (0 << 8)
#define BANDGAP_LNC_CIRCUIT (1 << 8)
-#define MIPIB_FLISDSI_DELAY_COUNT_LOW_SHIFT 5
-#define MIPIB_FLISDSI_DELAY_COUNT_LOW_MASK (7 << 5)
-#define TEARING_EFFECT_DELAY (1 << 4) /* A + B */
-#define TEARING_EFFECT_SHIFT 2 /* A + B */
+#define MIPIC_FLISDSI_DELAY_COUNT_LOW_SHIFT 5
+#define MIPIC_FLISDSI_DELAY_COUNT_LOW_MASK (7 << 5)
+#define TEARING_EFFECT_DELAY (1 << 4) /* A + C */
+#define TEARING_EFFECT_SHIFT 2 /* A + C */
#define TEARING_EFFECT_MASK (3 << 2)
#define TEARING_EFFECT_OFF (0 << 2)
#define TEARING_EFFECT_DSI (1 << 2)
@@ -6674,9 +6727,9 @@
#define LANE_CONFIGURATION_DUAL_LINK_B (2 << 0)
#define _MIPIA_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61194)
-#define _MIPIB_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61704)
-#define MIPI_TEARING_CTRL(tc) _TRANSCODER(tc, \
- _MIPIA_TEARING_CTRL, _MIPIB_TEARING_CTRL)
+#define _MIPIC_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61704)
+#define MIPI_TEARING_CTRL(port) _MIPI_PORT(port, \
+ _MIPIA_TEARING_CTRL, _MIPIC_TEARING_CTRL)
#define TEARING_EFFECT_DELAY_SHIFT 0
#define TEARING_EFFECT_DELAY_MASK (0xffff << 0)
@@ -6686,9 +6739,9 @@
/* MIPI DSI Controller and D-PHY registers */
#define _MIPIA_DEVICE_READY (dev_priv->mipi_mmio_base + 0xb000)
-#define _MIPIB_DEVICE_READY (dev_priv->mipi_mmio_base + 0xb800)
-#define MIPI_DEVICE_READY(tc) _TRANSCODER(tc, _MIPIA_DEVICE_READY, \
- _MIPIB_DEVICE_READY)
+#define _MIPIC_DEVICE_READY (dev_priv->mipi_mmio_base + 0xb800)
+#define MIPI_DEVICE_READY(port) _MIPI_PORT(port, _MIPIA_DEVICE_READY, \
+ _MIPIC_DEVICE_READY)
#define BUS_POSSESSION (1 << 3) /* set to give bus to receiver */
#define ULPS_STATE_MASK (3 << 1)
#define ULPS_STATE_ENTER (2 << 1)
@@ -6697,13 +6750,13 @@
#define DEVICE_READY (1 << 0)
#define _MIPIA_INTR_STAT (dev_priv->mipi_mmio_base + 0xb004)
-#define _MIPIB_INTR_STAT (dev_priv->mipi_mmio_base + 0xb804)
-#define MIPI_INTR_STAT(tc) _TRANSCODER(tc, _MIPIA_INTR_STAT, \
- _MIPIB_INTR_STAT)
+#define _MIPIC_INTR_STAT (dev_priv->mipi_mmio_base + 0xb804)
+#define MIPI_INTR_STAT(port) _MIPI_PORT(port, _MIPIA_INTR_STAT, \
+ _MIPIC_INTR_STAT)
#define _MIPIA_INTR_EN (dev_priv->mipi_mmio_base + 0xb008)
-#define _MIPIB_INTR_EN (dev_priv->mipi_mmio_base + 0xb808)
-#define MIPI_INTR_EN(tc) _TRANSCODER(tc, _MIPIA_INTR_EN, \
- _MIPIB_INTR_EN)
+#define _MIPIC_INTR_EN (dev_priv->mipi_mmio_base + 0xb808)
+#define MIPI_INTR_EN(port) _MIPI_PORT(port, _MIPIA_INTR_EN, \
+ _MIPIC_INTR_EN)
#define TEARING_EFFECT (1 << 31)
#define SPL_PKT_SENT_INTERRUPT (1 << 30)
#define GEN_READ_DATA_AVAIL (1 << 29)
@@ -6738,9 +6791,9 @@
#define RXSOT_ERROR (1 << 0)
#define _MIPIA_DSI_FUNC_PRG (dev_priv->mipi_mmio_base + 0xb00c)
-#define _MIPIB_DSI_FUNC_PRG (dev_priv->mipi_mmio_base + 0xb80c)
-#define MIPI_DSI_FUNC_PRG(tc) _TRANSCODER(tc, _MIPIA_DSI_FUNC_PRG, \
- _MIPIB_DSI_FUNC_PRG)
+#define _MIPIC_DSI_FUNC_PRG (dev_priv->mipi_mmio_base + 0xb80c)
+#define MIPI_DSI_FUNC_PRG(port) _MIPI_PORT(port, _MIPIA_DSI_FUNC_PRG, \
+ _MIPIC_DSI_FUNC_PRG)
#define CMD_MODE_DATA_WIDTH_MASK (7 << 13)
#define CMD_MODE_NOT_SUPPORTED (0 << 13)
#define CMD_MODE_DATA_WIDTH_16_BIT (1 << 13)
@@ -6762,93 +6815,93 @@
#define DATA_LANES_PRG_REG_MASK (7 << 0)
#define _MIPIA_HS_TX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb010)
-#define _MIPIB_HS_TX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb810)
-#define MIPI_HS_TX_TIMEOUT(tc) _TRANSCODER(tc, _MIPIA_HS_TX_TIMEOUT, \
- _MIPIB_HS_TX_TIMEOUT)
+#define _MIPIC_HS_TX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb810)
+#define MIPI_HS_TX_TIMEOUT(port) _MIPI_PORT(port, _MIPIA_HS_TX_TIMEOUT, \
+ _MIPIC_HS_TX_TIMEOUT)
#define HIGH_SPEED_TX_TIMEOUT_COUNTER_MASK 0xffffff
#define _MIPIA_LP_RX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb014)
-#define _MIPIB_LP_RX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb814)
-#define MIPI_LP_RX_TIMEOUT(tc) _TRANSCODER(tc, _MIPIA_LP_RX_TIMEOUT, \
- _MIPIB_LP_RX_TIMEOUT)
+#define _MIPIC_LP_RX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb814)
+#define MIPI_LP_RX_TIMEOUT(port) _MIPI_PORT(port, _MIPIA_LP_RX_TIMEOUT, \
+ _MIPIC_LP_RX_TIMEOUT)
#define LOW_POWER_RX_TIMEOUT_COUNTER_MASK 0xffffff
#define _MIPIA_TURN_AROUND_TIMEOUT (dev_priv->mipi_mmio_base + 0xb018)
-#define _MIPIB_TURN_AROUND_TIMEOUT (dev_priv->mipi_mmio_base + 0xb818)
-#define MIPI_TURN_AROUND_TIMEOUT(tc) _TRANSCODER(tc, \
- _MIPIA_TURN_AROUND_TIMEOUT, _MIPIB_TURN_AROUND_TIMEOUT)
+#define _MIPIC_TURN_AROUND_TIMEOUT (dev_priv->mipi_mmio_base + 0xb818)
+#define MIPI_TURN_AROUND_TIMEOUT(port) _MIPI_PORT(port, \
+ _MIPIA_TURN_AROUND_TIMEOUT, _MIPIC_TURN_AROUND_TIMEOUT)
#define TURN_AROUND_TIMEOUT_MASK 0x3f
#define _MIPIA_DEVICE_RESET_TIMER (dev_priv->mipi_mmio_base + 0xb01c)
-#define _MIPIB_DEVICE_RESET_TIMER (dev_priv->mipi_mmio_base + 0xb81c)
-#define MIPI_DEVICE_RESET_TIMER(tc) _TRANSCODER(tc, \
- _MIPIA_DEVICE_RESET_TIMER, _MIPIB_DEVICE_RESET_TIMER)
+#define _MIPIC_DEVICE_RESET_TIMER (dev_priv->mipi_mmio_base + 0xb81c)
+#define MIPI_DEVICE_RESET_TIMER(port) _MIPI_PORT(port, \
+ _MIPIA_DEVICE_RESET_TIMER, _MIPIC_DEVICE_RESET_TIMER)
#define DEVICE_RESET_TIMER_MASK 0xffff
#define _MIPIA_DPI_RESOLUTION (dev_priv->mipi_mmio_base + 0xb020)
-#define _MIPIB_DPI_RESOLUTION (dev_priv->mipi_mmio_base + 0xb820)
-#define MIPI_DPI_RESOLUTION(tc) _TRANSCODER(tc, _MIPIA_DPI_RESOLUTION, \
- _MIPIB_DPI_RESOLUTION)
+#define _MIPIC_DPI_RESOLUTION (dev_priv->mipi_mmio_base + 0xb820)
+#define MIPI_DPI_RESOLUTION(port) _MIPI_PORT(port, _MIPIA_DPI_RESOLUTION, \
+ _MIPIC_DPI_RESOLUTION)
#define VERTICAL_ADDRESS_SHIFT 16
#define VERTICAL_ADDRESS_MASK (0xffff << 16)
#define HORIZONTAL_ADDRESS_SHIFT 0
#define HORIZONTAL_ADDRESS_MASK 0xffff
#define _MIPIA_DBI_FIFO_THROTTLE (dev_priv->mipi_mmio_base + 0xb024)
-#define _MIPIB_DBI_FIFO_THROTTLE (dev_priv->mipi_mmio_base + 0xb824)
-#define MIPI_DBI_FIFO_THROTTLE(tc) _TRANSCODER(tc, \
- _MIPIA_DBI_FIFO_THROTTLE, _MIPIB_DBI_FIFO_THROTTLE)
+#define _MIPIC_DBI_FIFO_THROTTLE (dev_priv->mipi_mmio_base + 0xb824)
+#define MIPI_DBI_FIFO_THROTTLE(port) _MIPI_PORT(port, \
+ _MIPIA_DBI_FIFO_THROTTLE, _MIPIC_DBI_FIFO_THROTTLE)
#define DBI_FIFO_EMPTY_HALF (0 << 0)
#define DBI_FIFO_EMPTY_QUARTER (1 << 0)
#define DBI_FIFO_EMPTY_7_LOCATIONS (2 << 0)
/* regs below are bits 15:0 */
#define _MIPIA_HSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb028)
-#define _MIPIB_HSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb828)
-#define MIPI_HSYNC_PADDING_COUNT(tc) _TRANSCODER(tc, \
- _MIPIA_HSYNC_PADDING_COUNT, _MIPIB_HSYNC_PADDING_COUNT)
+#define _MIPIC_HSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb828)
+#define MIPI_HSYNC_PADDING_COUNT(port) _MIPI_PORT(port, \
+ _MIPIA_HSYNC_PADDING_COUNT, _MIPIC_HSYNC_PADDING_COUNT)
#define _MIPIA_HBP_COUNT (dev_priv->mipi_mmio_base + 0xb02c)
-#define _MIPIB_HBP_COUNT (dev_priv->mipi_mmio_base + 0xb82c)
-#define MIPI_HBP_COUNT(tc) _TRANSCODER(tc, _MIPIA_HBP_COUNT, \
- _MIPIB_HBP_COUNT)
+#define _MIPIC_HBP_COUNT (dev_priv->mipi_mmio_base + 0xb82c)
+#define MIPI_HBP_COUNT(port) _MIPI_PORT(port, _MIPIA_HBP_COUNT, \
+ _MIPIC_HBP_COUNT)
#define _MIPIA_HFP_COUNT (dev_priv->mipi_mmio_base + 0xb030)
-#define _MIPIB_HFP_COUNT (dev_priv->mipi_mmio_base + 0xb830)
-#define MIPI_HFP_COUNT(tc) _TRANSCODER(tc, _MIPIA_HFP_COUNT, \
- _MIPIB_HFP_COUNT)
+#define _MIPIC_HFP_COUNT (dev_priv->mipi_mmio_base + 0xb830)
+#define MIPI_HFP_COUNT(port) _MIPI_PORT(port, _MIPIA_HFP_COUNT, \
+ _MIPIC_HFP_COUNT)
#define _MIPIA_HACTIVE_AREA_COUNT (dev_priv->mipi_mmio_base + 0xb034)
-#define _MIPIB_HACTIVE_AREA_COUNT (dev_priv->mipi_mmio_base + 0xb834)
-#define MIPI_HACTIVE_AREA_COUNT(tc) _TRANSCODER(tc, \
- _MIPIA_HACTIVE_AREA_COUNT, _MIPIB_HACTIVE_AREA_COUNT)
+#define _MIPIC_HACTIVE_AREA_COUNT (dev_priv->mipi_mmio_base + 0xb834)
+#define MIPI_HACTIVE_AREA_COUNT(port) _MIPI_PORT(port, \
+ _MIPIA_HACTIVE_AREA_COUNT, _MIPIC_HACTIVE_AREA_COUNT)
#define _MIPIA_VSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb038)
-#define _MIPIB_VSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb838)
-#define MIPI_VSYNC_PADDING_COUNT(tc) _TRANSCODER(tc, \
- _MIPIA_VSYNC_PADDING_COUNT, _MIPIB_VSYNC_PADDING_COUNT)
+#define _MIPIC_VSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb838)
+#define MIPI_VSYNC_PADDING_COUNT(port) _MIPI_PORT(port, \
+ _MIPIA_VSYNC_PADDING_COUNT, _MIPIC_VSYNC_PADDING_COUNT)
#define _MIPIA_VBP_COUNT (dev_priv->mipi_mmio_base + 0xb03c)
-#define _MIPIB_VBP_COUNT (dev_priv->mipi_mmio_base + 0xb83c)
-#define MIPI_VBP_COUNT(tc) _TRANSCODER(tc, _MIPIA_VBP_COUNT, \
- _MIPIB_VBP_COUNT)
+#define _MIPIC_VBP_COUNT (dev_priv->mipi_mmio_base + 0xb83c)
+#define MIPI_VBP_COUNT(port) _MIPI_PORT(port, _MIPIA_VBP_COUNT, \
+ _MIPIC_VBP_COUNT)
#define _MIPIA_VFP_COUNT (dev_priv->mipi_mmio_base + 0xb040)
-#define _MIPIB_VFP_COUNT (dev_priv->mipi_mmio_base + 0xb840)
-#define MIPI_VFP_COUNT(tc) _TRANSCODER(tc, _MIPIA_VFP_COUNT, \
- _MIPIB_VFP_COUNT)
+#define _MIPIC_VFP_COUNT (dev_priv->mipi_mmio_base + 0xb840)
+#define MIPI_VFP_COUNT(port) _MIPI_PORT(port, _MIPIA_VFP_COUNT, \
+ _MIPIC_VFP_COUNT)
#define _MIPIA_HIGH_LOW_SWITCH_COUNT (dev_priv->mipi_mmio_base + 0xb044)
-#define _MIPIB_HIGH_LOW_SWITCH_COUNT (dev_priv->mipi_mmio_base + 0xb844)
-#define MIPI_HIGH_LOW_SWITCH_COUNT(tc) _TRANSCODER(tc, \
- _MIPIA_HIGH_LOW_SWITCH_COUNT, _MIPIB_HIGH_LOW_SWITCH_COUNT)
+#define _MIPIC_HIGH_LOW_SWITCH_COUNT (dev_priv->mipi_mmio_base + 0xb844)
+#define MIPI_HIGH_LOW_SWITCH_COUNT(port) _MIPI_PORT(port, \
+ _MIPIA_HIGH_LOW_SWITCH_COUNT, _MIPIC_HIGH_LOW_SWITCH_COUNT)
/* regs above are bits 15:0 */
#define _MIPIA_DPI_CONTROL (dev_priv->mipi_mmio_base + 0xb048)
-#define _MIPIB_DPI_CONTROL (dev_priv->mipi_mmio_base + 0xb848)
-#define MIPI_DPI_CONTROL(tc) _TRANSCODER(tc, _MIPIA_DPI_CONTROL, \
- _MIPIB_DPI_CONTROL)
+#define _MIPIC_DPI_CONTROL (dev_priv->mipi_mmio_base + 0xb848)
+#define MIPI_DPI_CONTROL(port) _MIPI_PORT(port, _MIPIA_DPI_CONTROL, \
+ _MIPIC_DPI_CONTROL)
#define DPI_LP_MODE (1 << 6)
#define BACKLIGHT_OFF (1 << 5)
#define BACKLIGHT_ON (1 << 4)
@@ -6858,30 +6911,30 @@
#define SHUTDOWN (1 << 0)
#define _MIPIA_DPI_DATA (dev_priv->mipi_mmio_base + 0xb04c)
-#define _MIPIB_DPI_DATA (dev_priv->mipi_mmio_base + 0xb84c)
-#define MIPI_DPI_DATA(tc) _TRANSCODER(tc, _MIPIA_DPI_DATA, \
- _MIPIB_DPI_DATA)
+#define _MIPIC_DPI_DATA (dev_priv->mipi_mmio_base + 0xb84c)
+#define MIPI_DPI_DATA(port) _MIPI_PORT(port, _MIPIA_DPI_DATA, \
+ _MIPIC_DPI_DATA)
#define COMMAND_BYTE_SHIFT 0
#define COMMAND_BYTE_MASK (0x3f << 0)
#define _MIPIA_INIT_COUNT (dev_priv->mipi_mmio_base + 0xb050)
-#define _MIPIB_INIT_COUNT (dev_priv->mipi_mmio_base + 0xb850)
-#define MIPI_INIT_COUNT(tc) _TRANSCODER(tc, _MIPIA_INIT_COUNT, \
- _MIPIB_INIT_COUNT)
+#define _MIPIC_INIT_COUNT (dev_priv->mipi_mmio_base + 0xb850)
+#define MIPI_INIT_COUNT(port) _MIPI_PORT(port, _MIPIA_INIT_COUNT, \
+ _MIPIC_INIT_COUNT)
#define MASTER_INIT_TIMER_SHIFT 0
#define MASTER_INIT_TIMER_MASK (0xffff << 0)
#define _MIPIA_MAX_RETURN_PKT_SIZE (dev_priv->mipi_mmio_base + 0xb054)
-#define _MIPIB_MAX_RETURN_PKT_SIZE (dev_priv->mipi_mmio_base + 0xb854)
-#define MIPI_MAX_RETURN_PKT_SIZE(tc) _TRANSCODER(tc, \
- _MIPIA_MAX_RETURN_PKT_SIZE, _MIPIB_MAX_RETURN_PKT_SIZE)
+#define _MIPIC_MAX_RETURN_PKT_SIZE (dev_priv->mipi_mmio_base + 0xb854)
+#define MIPI_MAX_RETURN_PKT_SIZE(port) _MIPI_PORT(port, \
+ _MIPIA_MAX_RETURN_PKT_SIZE, _MIPIC_MAX_RETURN_PKT_SIZE)
#define MAX_RETURN_PKT_SIZE_SHIFT 0
#define MAX_RETURN_PKT_SIZE_MASK (0x3ff << 0)
#define _MIPIA_VIDEO_MODE_FORMAT (dev_priv->mipi_mmio_base + 0xb058)
-#define _MIPIB_VIDEO_MODE_FORMAT (dev_priv->mipi_mmio_base + 0xb858)
-#define MIPI_VIDEO_MODE_FORMAT(tc) _TRANSCODER(tc, \
- _MIPIA_VIDEO_MODE_FORMAT, _MIPIB_VIDEO_MODE_FORMAT)
+#define _MIPIC_VIDEO_MODE_FORMAT (dev_priv->mipi_mmio_base + 0xb858)
+#define MIPI_VIDEO_MODE_FORMAT(port) _MIPI_PORT(port, \
+ _MIPIA_VIDEO_MODE_FORMAT, _MIPIC_VIDEO_MODE_FORMAT)
#define RANDOM_DPI_DISPLAY_RESOLUTION (1 << 4)
#define DISABLE_VIDEO_BTA (1 << 3)
#define IP_TG_CONFIG (1 << 2)
@@ -6890,9 +6943,9 @@
#define VIDEO_MODE_BURST (3 << 0)
#define _MIPIA_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb05c)
-#define _MIPIB_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb85c)
-#define MIPI_EOT_DISABLE(tc) _TRANSCODER(tc, _MIPIA_EOT_DISABLE, \
- _MIPIB_EOT_DISABLE)
+#define _MIPIC_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb85c)
+#define MIPI_EOT_DISABLE(port) _MIPI_PORT(port, _MIPIA_EOT_DISABLE, \
+ _MIPIC_EOT_DISABLE)
#define LP_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 7)
#define HS_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 6)
#define LOW_CONTENTION_RECOVERY_DISABLE (1 << 5)
@@ -6903,32 +6956,32 @@
#define EOT_DISABLE (1 << 0)
#define _MIPIA_LP_BYTECLK (dev_priv->mipi_mmio_base + 0xb060)
-#define _MIPIB_LP_BYTECLK (dev_priv->mipi_mmio_base + 0xb860)
-#define MIPI_LP_BYTECLK(tc) _TRANSCODER(tc, _MIPIA_LP_BYTECLK, \
- _MIPIB_LP_BYTECLK)
+#define _MIPIC_LP_BYTECLK (dev_priv->mipi_mmio_base + 0xb860)
+#define MIPI_LP_BYTECLK(port) _MIPI_PORT(port, _MIPIA_LP_BYTECLK, \
+ _MIPIC_LP_BYTECLK)
#define LP_BYTECLK_SHIFT 0
#define LP_BYTECLK_MASK (0xffff << 0)
/* bits 31:0 */
#define _MIPIA_LP_GEN_DATA (dev_priv->mipi_mmio_base + 0xb064)
-#define _MIPIB_LP_GEN_DATA (dev_priv->mipi_mmio_base + 0xb864)
-#define MIPI_LP_GEN_DATA(tc) _TRANSCODER(tc, _MIPIA_LP_GEN_DATA, \
- _MIPIB_LP_GEN_DATA)
+#define _MIPIC_LP_GEN_DATA (dev_priv->mipi_mmio_base + 0xb864)
+#define MIPI_LP_GEN_DATA(port) _MIPI_PORT(port, _MIPIA_LP_GEN_DATA, \
+ _MIPIC_LP_GEN_DATA)
/* bits 31:0 */
#define _MIPIA_HS_GEN_DATA (dev_priv->mipi_mmio_base + 0xb068)
-#define _MIPIB_HS_GEN_DATA (dev_priv->mipi_mmio_base + 0xb868)
-#define MIPI_HS_GEN_DATA(tc) _TRANSCODER(tc, _MIPIA_HS_GEN_DATA, \
- _MIPIB_HS_GEN_DATA)
+#define _MIPIC_HS_GEN_DATA (dev_priv->mipi_mmio_base + 0xb868)
+#define MIPI_HS_GEN_DATA(port) _MIPI_PORT(port, _MIPIA_HS_GEN_DATA, \
+ _MIPIC_HS_GEN_DATA)
#define _MIPIA_LP_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb06c)
-#define _MIPIB_LP_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb86c)
-#define MIPI_LP_GEN_CTRL(tc) _TRANSCODER(tc, _MIPIA_LP_GEN_CTRL, \
- _MIPIB_LP_GEN_CTRL)
+#define _MIPIC_LP_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb86c)
+#define MIPI_LP_GEN_CTRL(port) _MIPI_PORT(port, _MIPIA_LP_GEN_CTRL, \
+ _MIPIC_LP_GEN_CTRL)
#define _MIPIA_HS_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb070)
-#define _MIPIB_HS_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb870)
-#define MIPI_HS_GEN_CTRL(tc) _TRANSCODER(tc, _MIPIA_HS_GEN_CTRL, \
- _MIPIB_HS_GEN_CTRL)
+#define _MIPIC_HS_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb870)
+#define MIPI_HS_GEN_CTRL(port) _MIPI_PORT(port, _MIPIA_HS_GEN_CTRL, \
+ _MIPIC_HS_GEN_CTRL)
#define LONG_PACKET_WORD_COUNT_SHIFT 8
#define LONG_PACKET_WORD_COUNT_MASK (0xffff << 8)
#define SHORT_PACKET_PARAM_SHIFT 8
@@ -6940,9 +6993,9 @@
/* data type values, see include/video/mipi_display.h */
#define _MIPIA_GEN_FIFO_STAT (dev_priv->mipi_mmio_base + 0xb074)
-#define _MIPIB_GEN_FIFO_STAT (dev_priv->mipi_mmio_base + 0xb874)
-#define MIPI_GEN_FIFO_STAT(tc) _TRANSCODER(tc, _MIPIA_GEN_FIFO_STAT, \
- _MIPIB_GEN_FIFO_STAT)
+#define _MIPIC_GEN_FIFO_STAT (dev_priv->mipi_mmio_base + 0xb874)
+#define MIPI_GEN_FIFO_STAT(port) _MIPI_PORT(port, _MIPIA_GEN_FIFO_STAT, \
+ _MIPIC_GEN_FIFO_STAT)
#define DPI_FIFO_EMPTY (1 << 28)
#define DBI_FIFO_EMPTY (1 << 27)
#define LP_CTRL_FIFO_EMPTY (1 << 26)
@@ -6959,17 +7012,17 @@
#define HS_DATA_FIFO_FULL (1 << 0)
#define _MIPIA_HS_LS_DBI_ENABLE (dev_priv->mipi_mmio_base + 0xb078)
-#define _MIPIB_HS_LS_DBI_ENABLE (dev_priv->mipi_mmio_base + 0xb878)
-#define MIPI_HS_LP_DBI_ENABLE(tc) _TRANSCODER(tc, \
- _MIPIA_HS_LS_DBI_ENABLE, _MIPIB_HS_LS_DBI_ENABLE)
+#define _MIPIC_HS_LS_DBI_ENABLE (dev_priv->mipi_mmio_base + 0xb878)
+#define MIPI_HS_LP_DBI_ENABLE(port) _MIPI_PORT(port, \
+ _MIPIA_HS_LS_DBI_ENABLE, _MIPIC_HS_LS_DBI_ENABLE)
#define DBI_HS_LP_MODE_MASK (1 << 0)
#define DBI_LP_MODE (1 << 0)
#define DBI_HS_MODE (0 << 0)
#define _MIPIA_DPHY_PARAM (dev_priv->mipi_mmio_base + 0xb080)
-#define _MIPIB_DPHY_PARAM (dev_priv->mipi_mmio_base + 0xb880)
-#define MIPI_DPHY_PARAM(tc) _TRANSCODER(tc, _MIPIA_DPHY_PARAM, \
- _MIPIB_DPHY_PARAM)
+#define _MIPIC_DPHY_PARAM (dev_priv->mipi_mmio_base + 0xb880)
+#define MIPI_DPHY_PARAM(port) _MIPI_PORT(port, _MIPIA_DPHY_PARAM, \
+ _MIPIC_DPHY_PARAM)
#define EXIT_ZERO_COUNT_SHIFT 24
#define EXIT_ZERO_COUNT_MASK (0x3f << 24)
#define TRAIL_COUNT_SHIFT 16
@@ -6981,36 +7034,36 @@
/* bits 31:0 */
#define _MIPIA_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb084)
-#define _MIPIB_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb884)
-#define MIPI_DBI_BW_CTRL(tc) _TRANSCODER(tc, _MIPIA_DBI_BW_CTRL, \
- _MIPIB_DBI_BW_CTRL)
+#define _MIPIC_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb884)
+#define MIPI_DBI_BW_CTRL(port) _MIPI_PORT(port, _MIPIA_DBI_BW_CTRL, \
+ _MIPIC_DBI_BW_CTRL)
#define _MIPIA_CLK_LANE_SWITCH_TIME_CNT (dev_priv->mipi_mmio_base \
+ 0xb088)
-#define _MIPIB_CLK_LANE_SWITCH_TIME_CNT (dev_priv->mipi_mmio_base \
+#define _MIPIC_CLK_LANE_SWITCH_TIME_CNT (dev_priv->mipi_mmio_base \
+ 0xb888)
-#define MIPI_CLK_LANE_SWITCH_TIME_CNT(tc) _TRANSCODER(tc, \
- _MIPIA_CLK_LANE_SWITCH_TIME_CNT, _MIPIB_CLK_LANE_SWITCH_TIME_CNT)
+#define MIPI_CLK_LANE_SWITCH_TIME_CNT(port) _MIPI_PORT(port, \
+ _MIPIA_CLK_LANE_SWITCH_TIME_CNT, _MIPIC_CLK_LANE_SWITCH_TIME_CNT)
#define LP_HS_SSW_CNT_SHIFT 16
#define LP_HS_SSW_CNT_MASK (0xffff << 16)
#define HS_LP_PWR_SW_CNT_SHIFT 0
#define HS_LP_PWR_SW_CNT_MASK (0xffff << 0)
#define _MIPIA_STOP_STATE_STALL (dev_priv->mipi_mmio_base + 0xb08c)
-#define _MIPIB_STOP_STATE_STALL (dev_priv->mipi_mmio_base + 0xb88c)
-#define MIPI_STOP_STATE_STALL(tc) _TRANSCODER(tc, \
- _MIPIA_STOP_STATE_STALL, _MIPIB_STOP_STATE_STALL)
+#define _MIPIC_STOP_STATE_STALL (dev_priv->mipi_mmio_base + 0xb88c)
+#define MIPI_STOP_STATE_STALL(port) _MIPI_PORT(port, \
+ _MIPIA_STOP_STATE_STALL, _MIPIC_STOP_STATE_STALL)
#define STOP_STATE_STALL_COUNTER_SHIFT 0
#define STOP_STATE_STALL_COUNTER_MASK (0xff << 0)
#define _MIPIA_INTR_STAT_REG_1 (dev_priv->mipi_mmio_base + 0xb090)
-#define _MIPIB_INTR_STAT_REG_1 (dev_priv->mipi_mmio_base + 0xb890)
-#define MIPI_INTR_STAT_REG_1(tc) _TRANSCODER(tc, \
- _MIPIA_INTR_STAT_REG_1, _MIPIB_INTR_STAT_REG_1)
+#define _MIPIC_INTR_STAT_REG_1 (dev_priv->mipi_mmio_base + 0xb890)
+#define MIPI_INTR_STAT_REG_1(port) _MIPI_PORT(port, \
+ _MIPIA_INTR_STAT_REG_1, _MIPIC_INTR_STAT_REG_1)
#define _MIPIA_INTR_EN_REG_1 (dev_priv->mipi_mmio_base + 0xb094)
-#define _MIPIB_INTR_EN_REG_1 (dev_priv->mipi_mmio_base + 0xb894)
-#define MIPI_INTR_EN_REG_1(tc) _TRANSCODER(tc, _MIPIA_INTR_EN_REG_1, \
- _MIPIB_INTR_EN_REG_1)
+#define _MIPIC_INTR_EN_REG_1 (dev_priv->mipi_mmio_base + 0xb894)
+#define MIPI_INTR_EN_REG_1(port) _MIPI_PORT(port, _MIPIA_INTR_EN_REG_1, \
+ _MIPIC_INTR_EN_REG_1)
#define RX_CONTENTION_DETECTED (1 << 0)
/* XXX: only pipe A ?!? */
@@ -7029,9 +7082,9 @@
/* MIPI adapter registers */
#define _MIPIA_CTRL (dev_priv->mipi_mmio_base + 0xb104)
-#define _MIPIB_CTRL (dev_priv->mipi_mmio_base + 0xb904)
-#define MIPI_CTRL(tc) _TRANSCODER(tc, _MIPIA_CTRL, \
- _MIPIB_CTRL)
+#define _MIPIC_CTRL (dev_priv->mipi_mmio_base + 0xb904)
+#define MIPI_CTRL(port) _MIPI_PORT(port, _MIPIA_CTRL, \
+ _MIPIC_CTRL)
#define ESCAPE_CLOCK_DIVIDER_SHIFT 5 /* A only */
#define ESCAPE_CLOCK_DIVIDER_MASK (3 << 5)
#define ESCAPE_CLOCK_DIVIDER_1 (0 << 5)
@@ -7044,24 +7097,24 @@
#define RGB_FLIP_TO_BGR (1 << 2)
#define _MIPIA_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb108)
-#define _MIPIB_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb908)
-#define MIPI_DATA_ADDRESS(tc) _TRANSCODER(tc, _MIPIA_DATA_ADDRESS, \
- _MIPIB_DATA_ADDRESS)
+#define _MIPIC_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb908)
+#define MIPI_DATA_ADDRESS(port) _MIPI_PORT(port, _MIPIA_DATA_ADDRESS, \
+ _MIPIC_DATA_ADDRESS)
#define DATA_MEM_ADDRESS_SHIFT 5
#define DATA_MEM_ADDRESS_MASK (0x7ffffff << 5)
#define DATA_VALID (1 << 0)
#define _MIPIA_DATA_LENGTH (dev_priv->mipi_mmio_base + 0xb10c)
-#define _MIPIB_DATA_LENGTH (dev_priv->mipi_mmio_base + 0xb90c)
-#define MIPI_DATA_LENGTH(tc) _TRANSCODER(tc, _MIPIA_DATA_LENGTH, \
- _MIPIB_DATA_LENGTH)
+#define _MIPIC_DATA_LENGTH (dev_priv->mipi_mmio_base + 0xb90c)
+#define MIPI_DATA_LENGTH(port) _MIPI_PORT(port, _MIPIA_DATA_LENGTH, \
+ _MIPIC_DATA_LENGTH)
#define DATA_LENGTH_SHIFT 0
#define DATA_LENGTH_MASK (0xfffff << 0)
#define _MIPIA_COMMAND_ADDRESS (dev_priv->mipi_mmio_base + 0xb110)
-#define _MIPIB_COMMAND_ADDRESS (dev_priv->mipi_mmio_base + 0xb910)
-#define MIPI_COMMAND_ADDRESS(tc) _TRANSCODER(tc, \
- _MIPIA_COMMAND_ADDRESS, _MIPIB_COMMAND_ADDRESS)
+#define _MIPIC_COMMAND_ADDRESS (dev_priv->mipi_mmio_base + 0xb910)
+#define MIPI_COMMAND_ADDRESS(port) _MIPI_PORT(port, \
+ _MIPIA_COMMAND_ADDRESS, _MIPIC_COMMAND_ADDRESS)
#define COMMAND_MEM_ADDRESS_SHIFT 5
#define COMMAND_MEM_ADDRESS_MASK (0x7ffffff << 5)
#define AUTO_PWG_ENABLE (1 << 2)
@@ -7069,22 +7122,22 @@
#define COMMAND_VALID (1 << 0)
#define _MIPIA_COMMAND_LENGTH (dev_priv->mipi_mmio_base + 0xb114)
-#define _MIPIB_COMMAND_LENGTH (dev_priv->mipi_mmio_base + 0xb914)
-#define MIPI_COMMAND_LENGTH(tc) _TRANSCODER(tc, _MIPIA_COMMAND_LENGTH, \
- _MIPIB_COMMAND_LENGTH)
+#define _MIPIC_COMMAND_LENGTH (dev_priv->mipi_mmio_base + 0xb914)
+#define MIPI_COMMAND_LENGTH(port) _MIPI_PORT(port, _MIPIA_COMMAND_LENGTH, \
+ _MIPIC_COMMAND_LENGTH)
#define COMMAND_LENGTH_SHIFT(n) (8 * (n)) /* n: 0...3 */
#define COMMAND_LENGTH_MASK(n) (0xff << (8 * (n)))
#define _MIPIA_READ_DATA_RETURN0 (dev_priv->mipi_mmio_base + 0xb118)
-#define _MIPIB_READ_DATA_RETURN0 (dev_priv->mipi_mmio_base + 0xb918)
-#define MIPI_READ_DATA_RETURN(tc, n) \
- (_TRANSCODER(tc, _MIPIA_READ_DATA_RETURN0, _MIPIB_READ_DATA_RETURN0) \
+#define _MIPIC_READ_DATA_RETURN0 (dev_priv->mipi_mmio_base + 0xb918)
+#define MIPI_READ_DATA_RETURN(port, n) \
+ (_MIPI_PORT(port, _MIPIA_READ_DATA_RETURN0, _MIPIC_READ_DATA_RETURN0) \
+ 4 * (n)) /* n: 0...7 */
#define _MIPIA_READ_DATA_VALID (dev_priv->mipi_mmio_base + 0xb138)
-#define _MIPIB_READ_DATA_VALID (dev_priv->mipi_mmio_base + 0xb938)
-#define MIPI_READ_DATA_VALID(tc) _TRANSCODER(tc, \
- _MIPIA_READ_DATA_VALID, _MIPIB_READ_DATA_VALID)
+#define _MIPIC_READ_DATA_VALID (dev_priv->mipi_mmio_base + 0xb938)
+#define MIPI_READ_DATA_VALID(port) _MIPI_PORT(port, \
+ _MIPIA_READ_DATA_VALID, _MIPIC_READ_DATA_VALID)
#define READ_DATA_VALID(n) (1 << (n))
/* For UMS only (deprecated): */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 2636882..9f19ed3 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -264,7 +264,7 @@
}
/* only restore FBC info on the platform that supports FBC*/
- intel_disable_fbc(dev);
+ intel_fbc_disable(dev);
/* restore FBC interval */
if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev))
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 751d4ad..6058a01 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -328,8 +328,8 @@
TRACE_EVENT(i915_gem_ring_sync_to,
TP_PROTO(struct intel_engine_cs *from,
struct intel_engine_cs *to,
- u32 seqno),
- TP_ARGS(from, to, seqno),
+ struct drm_i915_gem_request *req),
+ TP_ARGS(from, to, req),
TP_STRUCT__entry(
__field(u32, dev)
@@ -342,7 +342,7 @@
__entry->dev = from->dev->primary->index;
__entry->sync_from = from->id;
__entry->sync_to = to->id;
- __entry->seqno = seqno;
+ __entry->seqno = i915_gem_request_get_seqno(req);
),
TP_printk("dev=%u, sync-from=%u, sync-to=%u, seqno=%u",
@@ -352,8 +352,8 @@
);
TRACE_EVENT(i915_gem_ring_dispatch,
- TP_PROTO(struct intel_engine_cs *ring, u32 seqno, u32 flags),
- TP_ARGS(ring, seqno, flags),
+ TP_PROTO(struct drm_i915_gem_request *req, u32 flags),
+ TP_ARGS(req, flags),
TP_STRUCT__entry(
__field(u32, dev)
@@ -363,11 +363,13 @@
),
TP_fast_assign(
+ struct intel_engine_cs *ring =
+ i915_gem_request_get_ring(req);
__entry->dev = ring->dev->primary->index;
__entry->ring = ring->id;
- __entry->seqno = seqno;
+ __entry->seqno = i915_gem_request_get_seqno(req);
__entry->flags = flags;
- i915_trace_irq_get(ring, seqno);
+ i915_trace_irq_get(ring, req);
),
TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x",
@@ -398,31 +400,36 @@
);
DECLARE_EVENT_CLASS(i915_gem_request,
- TP_PROTO(struct intel_engine_cs *ring, u32 seqno),
- TP_ARGS(ring, seqno),
+ TP_PROTO(struct drm_i915_gem_request *req),
+ TP_ARGS(req),
TP_STRUCT__entry(
__field(u32, dev)
__field(u32, ring)
+ __field(u32, uniq)
__field(u32, seqno)
),
TP_fast_assign(
+ struct intel_engine_cs *ring =
+ i915_gem_request_get_ring(req);
__entry->dev = ring->dev->primary->index;
__entry->ring = ring->id;
- __entry->seqno = seqno;
+ __entry->uniq = req ? req->uniq : 0;
+ __entry->seqno = i915_gem_request_get_seqno(req);
),
- TP_printk("dev=%u, ring=%u, seqno=%u",
- __entry->dev, __entry->ring, __entry->seqno)
+ TP_printk("dev=%u, ring=%u, uniq=%u, seqno=%u",
+ __entry->dev, __entry->ring, __entry->uniq,
+ __entry->seqno)
);
DEFINE_EVENT(i915_gem_request, i915_gem_request_add,
- TP_PROTO(struct intel_engine_cs *ring, u32 seqno),
- TP_ARGS(ring, seqno)
+ TP_PROTO(struct drm_i915_gem_request *req),
+ TP_ARGS(req)
);
-TRACE_EVENT(i915_gem_request_complete,
+TRACE_EVENT(i915_gem_request_notify,
TP_PROTO(struct intel_engine_cs *ring),
TP_ARGS(ring),
@@ -443,17 +450,23 @@
);
DEFINE_EVENT(i915_gem_request, i915_gem_request_retire,
- TP_PROTO(struct intel_engine_cs *ring, u32 seqno),
- TP_ARGS(ring, seqno)
+ TP_PROTO(struct drm_i915_gem_request *req),
+ TP_ARGS(req)
+);
+
+DEFINE_EVENT(i915_gem_request, i915_gem_request_complete,
+ TP_PROTO(struct drm_i915_gem_request *req),
+ TP_ARGS(req)
);
TRACE_EVENT(i915_gem_request_wait_begin,
- TP_PROTO(struct intel_engine_cs *ring, u32 seqno),
- TP_ARGS(ring, seqno),
+ TP_PROTO(struct drm_i915_gem_request *req),
+ TP_ARGS(req),
TP_STRUCT__entry(
__field(u32, dev)
__field(u32, ring)
+ __field(u32, uniq)
__field(u32, seqno)
__field(bool, blocking)
),
@@ -465,20 +478,24 @@
* less desirable.
*/
TP_fast_assign(
+ struct intel_engine_cs *ring =
+ i915_gem_request_get_ring(req);
__entry->dev = ring->dev->primary->index;
__entry->ring = ring->id;
- __entry->seqno = seqno;
- __entry->blocking = mutex_is_locked(&ring->dev->struct_mutex);
+ __entry->uniq = req ? req->uniq : 0;
+ __entry->seqno = i915_gem_request_get_seqno(req);
+ __entry->blocking =
+ mutex_is_locked(&ring->dev->struct_mutex);
),
- TP_printk("dev=%u, ring=%u, seqno=%u, blocking=%s",
- __entry->dev, __entry->ring, __entry->seqno,
- __entry->blocking ? "yes (NB)" : "no")
+ TP_printk("dev=%u, ring=%u, uniq=%u, seqno=%u, blocking=%s",
+ __entry->dev, __entry->ring, __entry->uniq,
+ __entry->seqno, __entry->blocking ? "yes (NB)" : "no")
);
DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end,
- TP_PROTO(struct intel_engine_cs *ring, u32 seqno),
- TP_ARGS(ring, seqno)
+ TP_PROTO(struct drm_i915_gem_request *req),
+ TP_ARGS(req)
);
DECLARE_EVENT_CLASS(i915_ring,
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index a4bd90f..65b1fbc 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -314,6 +314,7 @@
{
const struct bdb_lfp_backlight_data *backlight_data;
const struct bdb_lfp_backlight_data_entry *entry;
+ const struct bdb_lfp_backlight_control_data *bl_ctrl_data;
backlight_data = find_section(bdb, BDB_LVDS_BACKLIGHT);
if (!backlight_data)
@@ -326,6 +327,7 @@
}
entry = &backlight_data->data[panel_type];
+ bl_ctrl_data = &backlight_data->blc_ctl[panel_type];
dev_priv->vbt.backlight.present = entry->type == BDB_BACKLIGHT_TYPE_PWM;
if (!dev_priv->vbt.backlight.present) {
@@ -337,12 +339,30 @@
dev_priv->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz;
dev_priv->vbt.backlight.active_low_pwm = entry->active_low_pwm;
dev_priv->vbt.backlight.min_brightness = entry->min_brightness;
+
+ dev_priv->vbt.backlight.controller = 0;
+ if (bdb->version >= 191) {
+ dev_priv->vbt.backlight.present =
+ bl_ctrl_data->pin == BLC_CONTROL_PIN_DDI;
+ if (!dev_priv->vbt.backlight.present) {
+ DRM_DEBUG_KMS("BL control pin is not DDI (pin %u)\n",
+ bl_ctrl_data->pin);
+ return;
+ }
+ if (bl_ctrl_data->controller == 1)
+ dev_priv->vbt.backlight.controller =
+ bl_ctrl_data->controller;
+ }
+
DRM_DEBUG_KMS("VBT backlight PWM modulation frequency %u Hz, "
"active %s, min brightness %u, level %u\n",
dev_priv->vbt.backlight.pwm_freq_hz,
dev_priv->vbt.backlight.active_low_pwm ? "low" : "high",
dev_priv->vbt.backlight.min_brightness,
backlight_data->level[panel_type]);
+
+ DRM_DEBUG_KMS("VBT BL controller %u\n",
+ dev_priv->vbt.backlight.controller);
}
/* Try to find sdvo panel data */
@@ -664,6 +684,50 @@
}
}
+static void
+parse_psr(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
+{
+ struct bdb_psr *psr;
+ struct psr_table *psr_table;
+
+ psr = find_section(bdb, BDB_PSR);
+ if (!psr) {
+ DRM_DEBUG_KMS("No PSR BDB found.\n");
+ return;
+ }
+
+ psr_table = &psr->psr_table[panel_type];
+
+ dev_priv->vbt.psr.full_link = psr_table->full_link;
+ dev_priv->vbt.psr.require_aux_wakeup = psr_table->require_aux_to_wakeup;
+
+ /* Allowed VBT values goes from 0 to 15 */
+ dev_priv->vbt.psr.idle_frames = psr_table->idle_frames < 0 ? 0 :
+ psr_table->idle_frames > 15 ? 15 : psr_table->idle_frames;
+
+ switch (psr_table->lines_to_wait) {
+ case 0:
+ dev_priv->vbt.psr.lines_to_wait = PSR_0_LINES_TO_WAIT;
+ break;
+ case 1:
+ dev_priv->vbt.psr.lines_to_wait = PSR_1_LINE_TO_WAIT;
+ break;
+ case 2:
+ dev_priv->vbt.psr.lines_to_wait = PSR_4_LINES_TO_WAIT;
+ break;
+ case 3:
+ dev_priv->vbt.psr.lines_to_wait = PSR_8_LINES_TO_WAIT;
+ break;
+ default:
+ DRM_DEBUG_KMS("VBT has unknown PSR lines to wait %u\n",
+ psr_table->lines_to_wait);
+ break;
+ }
+
+ dev_priv->vbt.psr.tp1_wakeup_time = psr_table->tp1_wakeup_time;
+ dev_priv->vbt.psr.tp2_tp3_wakeup_time = psr_table->tp2_tp3_wakeup_time;
+}
+
static u8 *goto_next_sequence(u8 *data, int *size)
{
u16 len;
@@ -1241,6 +1305,7 @@
parse_device_mapping(dev_priv, bdb);
parse_driver_features(dev_priv, bdb);
parse_edp(dev_priv, bdb);
+ parse_psr(dev_priv, bdb);
parse_mipi(dev_priv, bdb);
parse_ddi_ports(dev_priv, bdb);
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index 7603765..9a7202e 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -80,7 +80,7 @@
#define BDB_EXT_MMIO_REGS 6
#define BDB_SWF_IO 7
#define BDB_SWF_MMIO 8
-#define BDB_DOT_CLOCK_TABLE 9
+#define BDB_PSR 9
#define BDB_MODE_REMOVAL_TABLE 10
#define BDB_CHILD_DEVICE_TABLE 11
#define BDB_DRIVER_FEATURES 12
@@ -402,10 +402,21 @@
u8 obsolete3;
} __packed;
+#define BLC_CONTROL_PIN_PMIC 0
+#define BLC_CONTROL_PIN_LPSS_PWM 1
+#define BLC_CONTROL_PIN_DDI 2
+#define BLC_CONTROL_PIN_CABC 3
+
+struct bdb_lfp_backlight_control_data {
+ u8 controller:4;
+ u8 pin:4;
+} __packed;
+
struct bdb_lfp_backlight_data {
u8 entry_size;
struct bdb_lfp_backlight_data_entry data[16];
u8 level[16];
+ struct bdb_lfp_backlight_control_data blc_ctl[16];
} __packed;
struct aimdb_header {
@@ -556,6 +567,26 @@
u16 edp_t3_optimization;
} __packed;
+struct psr_table {
+ /* Feature bits */
+ u8 full_link:1;
+ u8 require_aux_to_wakeup:1;
+ u8 feature_bits_rsvd:6;
+
+ /* Wait times */
+ u8 idle_frames:4;
+ u8 lines_to_wait:3;
+ u8 wait_times_rsvd:1;
+
+ /* TP wake up time in multiple of 100 */
+ u16 tp1_wakeup_time;
+ u16 tp2_tp3_wakeup_time;
+} __packed;
+
+struct bdb_psr {
+ struct psr_table psr_table[16];
+} __packed;
+
void intel_setup_bios(struct drm_device *dev);
int intel_parse_bios(struct drm_device *dev);
@@ -798,7 +829,8 @@
#define DUAL_LINK_PIXEL_ALT 2
u16 dual_link:2;
u16 lane_cnt:2;
- u16 rsvd3:12;
+ u16 pixel_overlap:3;
+ u16 rsvd3:9;
u16 rsvd4;
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index e6b45cd..1c92ad4 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -128,15 +128,15 @@
};
static const struct ddi_buf_trans skl_ddi_translations_dp[] = {
- { 0x00000018, 0x000000a0 },
- { 0x00004014, 0x00000098 },
+ { 0x00000018, 0x000000a2 },
+ { 0x00004014, 0x0000009B },
{ 0x00006012, 0x00000088 },
- { 0x00008010, 0x00000080 },
- { 0x00000018, 0x00000098 },
+ { 0x00008010, 0x00000087 },
+ { 0x00000018, 0x0000009B },
{ 0x00004014, 0x00000088 },
- { 0x00006012, 0x00000080 },
+ { 0x00006012, 0x00000087 },
{ 0x00000018, 0x00000088 },
- { 0x00004014, 0x00000080 },
+ { 0x00004014, 0x00000087 },
};
static const struct ddi_buf_trans skl_ddi_translations_hdmi[] = {
@@ -834,7 +834,12 @@
void intel_ddi_clock_get(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
{
- hsw_ddi_clock_get(encoder, pipe_config);
+ struct drm_device *dev = encoder->base.dev;
+
+ if (INTEL_INFO(dev)->gen <= 8)
+ hsw_ddi_clock_get(encoder, pipe_config);
+ else
+ skl_ddi_clock_get(encoder, pipe_config);
}
static void
@@ -2029,7 +2034,6 @@
enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
struct intel_hdmi *intel_hdmi;
u32 temp, flags = 0;
- struct drm_device *dev = dev_priv->dev;
temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
if (temp & TRANS_DDI_PHSYNC)
@@ -2106,10 +2110,7 @@
dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
}
- if (INTEL_INFO(dev)->gen <= 8)
- hsw_ddi_clock_get(encoder, pipe_config);
- else
- skl_ddi_clock_get(encoder, pipe_config);
+ intel_ddi_clock_get(encoder, pipe_config);
}
static void intel_ddi_destroy(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index fb3e3d4..d01db1b 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1024,7 +1024,7 @@
reg = DPLL(pipe);
val = I915_READ(reg);
cur_state = !!(val & DPLL_VCO_ENABLE);
- WARN(cur_state != state,
+ I915_STATE_WARN(cur_state != state,
"PLL state assertion failure (expected %s, current %s)\n",
state_string(state), state_string(cur_state));
}
@@ -1040,7 +1040,7 @@
mutex_unlock(&dev_priv->dpio_lock);
cur_state = val & DSI_PLL_VCO_EN;
- WARN(cur_state != state,
+ I915_STATE_WARN(cur_state != state,
"DSI PLL state assertion failure (expected %s, current %s)\n",
state_string(state), state_string(cur_state));
}
@@ -1071,7 +1071,7 @@
return;
cur_state = pll->get_hw_state(dev_priv, pll, &hw_state);
- WARN(cur_state != state,
+ I915_STATE_WARN(cur_state != state,
"%s assertion failure (expected %s, current %s)\n",
pll->name, state_string(state), state_string(cur_state));
}
@@ -1095,7 +1095,7 @@
val = I915_READ(reg);
cur_state = !!(val & FDI_TX_ENABLE);
}
- WARN(cur_state != state,
+ I915_STATE_WARN(cur_state != state,
"FDI TX state assertion failure (expected %s, current %s)\n",
state_string(state), state_string(cur_state));
}
@@ -1112,7 +1112,7 @@
reg = FDI_RX_CTL(pipe);
val = I915_READ(reg);
cur_state = !!(val & FDI_RX_ENABLE);
- WARN(cur_state != state,
+ I915_STATE_WARN(cur_state != state,
"FDI RX state assertion failure (expected %s, current %s)\n",
state_string(state), state_string(cur_state));
}
@@ -1135,7 +1135,7 @@
reg = FDI_TX_CTL(pipe);
val = I915_READ(reg);
- WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
+ I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
}
void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
@@ -1148,7 +1148,7 @@
reg = FDI_RX_CTL(pipe);
val = I915_READ(reg);
cur_state = !!(val & FDI_RX_PLL_ENABLE);
- WARN(cur_state != state,
+ I915_STATE_WARN(cur_state != state,
"FDI RX PLL assertion failure (expected %s, current %s)\n",
state_string(state), state_string(cur_state));
}
@@ -1190,7 +1190,7 @@
((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
locked = false;
- WARN(panel_pipe == pipe && locked,
+ I915_STATE_WARN(panel_pipe == pipe && locked,
"panel assertion failure, pipe %c regs locked\n",
pipe_name(pipe));
}
@@ -1206,7 +1206,7 @@
else
cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
- WARN(cur_state != state,
+ I915_STATE_WARN(cur_state != state,
"cursor on pipe %c assertion failure (expected %s, current %s)\n",
pipe_name(pipe), state_string(state), state_string(cur_state));
}
@@ -1236,7 +1236,7 @@
cur_state = !!(val & PIPECONF_ENABLE);
}
- WARN(cur_state != state,
+ I915_STATE_WARN(cur_state != state,
"pipe %c assertion failure (expected %s, current %s)\n",
pipe_name(pipe), state_string(state), state_string(cur_state));
}
@@ -1251,7 +1251,7 @@
reg = DSPCNTR(plane);
val = I915_READ(reg);
cur_state = !!(val & DISPLAY_PLANE_ENABLE);
- WARN(cur_state != state,
+ I915_STATE_WARN(cur_state != state,
"plane %c assertion failure (expected %s, current %s)\n",
plane_name(plane), state_string(state), state_string(cur_state));
}
@@ -1271,7 +1271,7 @@
if (INTEL_INFO(dev)->gen >= 4) {
reg = DSPCNTR(pipe);
val = I915_READ(reg);
- WARN(val & DISPLAY_PLANE_ENABLE,
+ I915_STATE_WARN(val & DISPLAY_PLANE_ENABLE,
"plane %c assertion failure, should be disabled but not\n",
plane_name(pipe));
return;
@@ -1283,7 +1283,7 @@
val = I915_READ(reg);
cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
DISPPLANE_SEL_PIPE_SHIFT;
- WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
+ I915_STATE_WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
"plane %c assertion failure, should be off on pipe %c but is still active\n",
plane_name(i), pipe_name(pipe));
}
@@ -1299,7 +1299,7 @@
if (INTEL_INFO(dev)->gen >= 9) {
for_each_sprite(pipe, sprite) {
val = I915_READ(PLANE_CTL(pipe, sprite));
- WARN(val & PLANE_CTL_ENABLE,
+ I915_STATE_WARN(val & PLANE_CTL_ENABLE,
"plane %d assertion failure, should be off on pipe %c but is still active\n",
sprite, pipe_name(pipe));
}
@@ -1307,20 +1307,20 @@
for_each_sprite(pipe, sprite) {
reg = SPCNTR(pipe, sprite);
val = I915_READ(reg);
- WARN(val & SP_ENABLE,
+ I915_STATE_WARN(val & SP_ENABLE,
"sprite %c assertion failure, should be off on pipe %c but is still active\n",
sprite_name(pipe, sprite), pipe_name(pipe));
}
} else if (INTEL_INFO(dev)->gen >= 7) {
reg = SPRCTL(pipe);
val = I915_READ(reg);
- WARN(val & SPRITE_ENABLE,
+ I915_STATE_WARN(val & SPRITE_ENABLE,
"sprite %c assertion failure, should be off on pipe %c but is still active\n",
plane_name(pipe), pipe_name(pipe));
} else if (INTEL_INFO(dev)->gen >= 5) {
reg = DVSCNTR(pipe);
val = I915_READ(reg);
- WARN(val & DVS_ENABLE,
+ I915_STATE_WARN(val & DVS_ENABLE,
"sprite %c assertion failure, should be off on pipe %c but is still active\n",
plane_name(pipe), pipe_name(pipe));
}
@@ -1328,7 +1328,7 @@
static void assert_vblank_disabled(struct drm_crtc *crtc)
{
- if (WARN_ON(drm_crtc_vblank_get(crtc) == 0))
+ if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
drm_crtc_vblank_put(crtc);
}
@@ -1337,12 +1337,12 @@
u32 val;
bool enabled;
- WARN_ON(!(HAS_PCH_IBX(dev_priv->dev) || HAS_PCH_CPT(dev_priv->dev)));
+ I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv->dev) || HAS_PCH_CPT(dev_priv->dev)));
val = I915_READ(PCH_DREF_CONTROL);
enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
DREF_SUPERSPREAD_SOURCE_MASK));
- WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
+ I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
}
static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
@@ -1355,7 +1355,7 @@
reg = PCH_TRANSCONF(pipe);
val = I915_READ(reg);
enabled = !!(val & TRANS_ENABLE);
- WARN(enabled,
+ I915_STATE_WARN(enabled,
"transcoder assertion failed, should be off on pipe %c but is still active\n",
pipe_name(pipe));
}
@@ -1435,11 +1435,11 @@
enum pipe pipe, int reg, u32 port_sel)
{
u32 val = I915_READ(reg);
- WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
+ I915_STATE_WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
"PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
reg, pipe_name(pipe));
- WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0
+ I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0
&& (val & DP_PIPEB_SELECT),
"IBX PCH dp port still using transcoder B\n");
}
@@ -1448,11 +1448,11 @@
enum pipe pipe, int reg)
{
u32 val = I915_READ(reg);
- WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
+ I915_STATE_WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
"PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
reg, pipe_name(pipe));
- WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_ENABLE) == 0
+ I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_ENABLE) == 0
&& (val & SDVO_PIPE_B_SELECT),
"IBX PCH hdmi port still using transcoder B\n");
}
@@ -1469,13 +1469,13 @@
reg = PCH_ADPA;
val = I915_READ(reg);
- WARN(adpa_pipe_enabled(dev_priv, pipe, val),
+ I915_STATE_WARN(adpa_pipe_enabled(dev_priv, pipe, val),
"PCH VGA enabled on transcoder %c, should be disabled\n",
pipe_name(pipe));
reg = PCH_LVDS;
val = I915_READ(reg);
- WARN(lvds_pipe_enabled(dev_priv, pipe, val),
+ I915_STATE_WARN(lvds_pipe_enabled(dev_priv, pipe, val),
"PCH LVDS enabled on transcoder %c, should be disabled\n",
pipe_name(pipe));
@@ -2954,71 +2954,6 @@
crtc->config.pipe_src_h = adjusted_mode->crtc_vdisplay;
}
-static int
-intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
- struct drm_framebuffer *fb)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum pipe pipe = intel_crtc->pipe;
- struct drm_framebuffer *old_fb = crtc->primary->fb;
- struct drm_i915_gem_object *old_obj = intel_fb_obj(old_fb);
- int ret;
-
- if (intel_crtc_has_pending_flip(crtc)) {
- DRM_ERROR("pipe is still busy with an old pageflip\n");
- return -EBUSY;
- }
-
- /* no fb bound */
- if (!fb) {
- DRM_ERROR("No FB bound\n");
- return 0;
- }
-
- if (intel_crtc->plane > INTEL_INFO(dev)->num_pipes) {
- DRM_ERROR("no plane for crtc: plane %c, num_pipes %d\n",
- plane_name(intel_crtc->plane),
- INTEL_INFO(dev)->num_pipes);
- return -EINVAL;
- }
-
- mutex_lock(&dev->struct_mutex);
- ret = intel_pin_and_fence_fb_obj(crtc->primary, fb, NULL);
- if (ret == 0)
- i915_gem_track_fb(old_obj, intel_fb_obj(fb),
- INTEL_FRONTBUFFER_PRIMARY(pipe));
- mutex_unlock(&dev->struct_mutex);
- if (ret != 0) {
- DRM_ERROR("pin & fence failed\n");
- return ret;
- }
-
- dev_priv->display.update_primary_plane(crtc, fb, x, y);
-
- if (intel_crtc->active)
- intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
-
- crtc->primary->fb = fb;
- crtc->x = x;
- crtc->y = y;
-
- if (old_fb) {
- if (intel_crtc->active && old_fb != fb)
- intel_wait_for_vblank(dev, intel_crtc->pipe);
- mutex_lock(&dev->struct_mutex);
- intel_unpin_fb_obj(old_obj);
- mutex_unlock(&dev->struct_mutex);
- }
-
- mutex_lock(&dev->struct_mutex);
- intel_update_fbc(dev);
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
-}
-
static void intel_fdi_normal_train(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -4125,7 +4060,7 @@
drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
intel_plane = to_intel_plane(plane);
if (intel_plane->pipe == pipe)
- intel_plane_disable(&intel_plane->base);
+ plane->funcs->disable_plane(plane);
}
}
@@ -4266,7 +4201,7 @@
hsw_enable_ips(intel_crtc);
mutex_lock(&dev->struct_mutex);
- intel_update_fbc(dev);
+ intel_fbc_update(dev);
mutex_unlock(&dev->struct_mutex);
/*
@@ -4288,7 +4223,7 @@
intel_crtc_wait_for_pending_flips(crtc);
if (dev_priv->fbc.plane == plane)
- intel_disable_fbc(dev);
+ intel_fbc_disable(dev);
hsw_disable_ips(intel_crtc);
@@ -4591,7 +4526,7 @@
intel_update_watermarks(crtc);
mutex_lock(&dev->struct_mutex);
- intel_update_fbc(dev);
+ intel_fbc_update(dev);
mutex_unlock(&dev->struct_mutex);
}
@@ -4646,7 +4581,7 @@
intel_update_watermarks(crtc);
mutex_lock(&dev->struct_mutex);
- intel_update_fbc(dev);
+ intel_fbc_update(dev);
mutex_unlock(&dev->struct_mutex);
if (intel_crtc_to_shared_dpll(intel_crtc))
@@ -4909,7 +4844,7 @@
cmd = 0;
break;
default:
- WARN_ON(1);
+ MISSING_CASE(cdclk);
return;
}
@@ -5251,7 +5186,7 @@
intel_update_watermarks(crtc);
mutex_lock(&dev->struct_mutex);
- intel_update_fbc(dev);
+ intel_fbc_update(dev);
mutex_unlock(&dev->struct_mutex);
}
@@ -5309,8 +5244,6 @@
struct drm_device *dev = crtc->dev;
struct drm_connector *connector;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *old_obj = intel_fb_obj(crtc->primary->fb);
- enum pipe pipe = to_intel_crtc(crtc)->pipe;
/* crtc should still be enabled when we disable it. */
WARN_ON(!crtc->enabled);
@@ -5318,14 +5251,7 @@
dev_priv->display.crtc_disable(crtc);
dev_priv->display.off(crtc);
- if (crtc->primary->fb) {
- mutex_lock(&dev->struct_mutex);
- intel_unpin_fb_obj(old_obj);
- i915_gem_track_fb(old_obj, NULL,
- INTEL_FRONTBUFFER_PRIMARY(pipe));
- mutex_unlock(&dev->struct_mutex);
- crtc->primary->fb = NULL;
- }
+ crtc->primary->funcs->disable_plane(crtc->primary);
/* Update computed state. */
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
@@ -5382,25 +5308,25 @@
if (connector->mst_port)
return;
- WARN(connector->base.dpms == DRM_MODE_DPMS_OFF,
+ I915_STATE_WARN(connector->base.dpms == DRM_MODE_DPMS_OFF,
"wrong connector dpms state\n");
- WARN(connector->base.encoder != &encoder->base,
+ I915_STATE_WARN(connector->base.encoder != &encoder->base,
"active connector not linked to encoder\n");
if (encoder) {
- WARN(!encoder->connectors_active,
+ I915_STATE_WARN(!encoder->connectors_active,
"encoder->connectors_active not set\n");
encoder_enabled = encoder->get_hw_state(encoder, &pipe);
- WARN(!encoder_enabled, "encoder not enabled\n");
- if (WARN_ON(!encoder->base.crtc))
+ I915_STATE_WARN(!encoder_enabled, "encoder not enabled\n");
+ if (I915_STATE_WARN_ON(!encoder->base.crtc))
return;
crtc = encoder->base.crtc;
- WARN(!crtc->enabled, "crtc not enabled\n");
- WARN(!to_intel_crtc(crtc)->active, "crtc not active\n");
- WARN(pipe != to_intel_crtc(crtc)->pipe,
+ I915_STATE_WARN(!crtc->enabled, "crtc not enabled\n");
+ I915_STATE_WARN(!to_intel_crtc(crtc)->active, "crtc not active\n");
+ I915_STATE_WARN(pipe != to_intel_crtc(crtc)->pipe,
"encoder active on the wrong pipe\n");
}
}
@@ -7810,24 +7736,24 @@
struct intel_crtc *crtc;
for_each_intel_crtc(dev, crtc)
- WARN(crtc->active, "CRTC for pipe %c enabled\n",
+ I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
pipe_name(crtc->pipe));
- WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
- WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n");
- WARN(I915_READ(WRPLL_CTL1) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
- WARN(I915_READ(WRPLL_CTL2) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n");
- WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n");
- WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
+ I915_STATE_WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
+ I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n");
+ I915_STATE_WARN(I915_READ(WRPLL_CTL1) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
+ I915_STATE_WARN(I915_READ(WRPLL_CTL2) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n");
+ I915_STATE_WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n");
+ I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
"CPU PWM1 enabled\n");
if (IS_HASWELL(dev))
- WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
+ I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
"CPU PWM2 enabled\n");
- WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
+ I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
"PCH PWM1 enabled\n");
- WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
+ I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
"Utility pin enabled\n");
- WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
+ I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
/*
* In theory we can still leave IRQs enabled, as long as only the HPD
@@ -7835,7 +7761,7 @@
* gen-specific and since we only disable LCPLL after we fully disable
* the interrupts, the check below should be enough.
*/
- WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
+ I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
}
static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
@@ -8055,12 +7981,21 @@
enum port port,
struct intel_crtc_config *pipe_config)
{
- u32 temp;
+ u32 temp, dpll_ctl1;
temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
pipe_config->ddi_pll_sel = temp >> (port * 3 + 1);
switch (pipe_config->ddi_pll_sel) {
+ case SKL_DPLL0:
+ /*
+ * On SKL the eDP DPLL (DPLL0 as we don't use SSC) is not part
+ * of the shared DPLL framework and thus needs to be read out
+ * separately
+ */
+ dpll_ctl1 = I915_READ(DPLL_CTRL1);
+ pipe_config->dpll_hw_state.ctrl1 = dpll_ctl1 & 0x3f;
+ break;
case SKL_DPLL1:
pipe_config->shared_dpll = DPLL_ID_SKL_DPLL1;
break;
@@ -8286,7 +8221,7 @@
cntl |= CURSOR_MODE_256_ARGB_AX;
break;
default:
- WARN_ON(1);
+ MISSING_CASE(intel_crtc->cursor_width);
return;
}
cntl |= pipe << 28; /* Connect to correct pipe */
@@ -8405,109 +8340,6 @@
return true;
}
-static int intel_crtc_cursor_set_obj(struct drm_crtc *crtc,
- struct drm_i915_gem_object *obj,
- uint32_t width, uint32_t height)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum pipe pipe = intel_crtc->pipe;
- unsigned old_width;
- uint32_t addr;
- int ret;
-
- /* if we want to turn off the cursor ignore width and height */
- if (!obj) {
- DRM_DEBUG_KMS("cursor off\n");
- addr = 0;
- mutex_lock(&dev->struct_mutex);
- goto finish;
- }
-
- /* we only need to pin inside GTT if cursor is non-phy */
- mutex_lock(&dev->struct_mutex);
- if (!INTEL_INFO(dev)->cursor_needs_physical) {
- unsigned alignment;
-
- /*
- * Global gtt pte registers are special registers which actually
- * forward writes to a chunk of system memory. Which means that
- * there is no risk that the register values disappear as soon
- * as we call intel_runtime_pm_put(), so it is correct to wrap
- * only the pin/unpin/fence and not more.
- */
- intel_runtime_pm_get(dev_priv);
-
- /* Note that the w/a also requires 2 PTE of padding following
- * the bo. We currently fill all unused PTE with the shadow
- * page and so we should always have valid PTE following the
- * cursor preventing the VT-d warning.
- */
- alignment = 0;
- if (need_vtd_wa(dev))
- alignment = 64*1024;
-
- ret = i915_gem_object_pin_to_display_plane(obj, alignment, NULL);
- if (ret) {
- DRM_DEBUG_KMS("failed to move cursor bo into the GTT\n");
- intel_runtime_pm_put(dev_priv);
- goto fail_locked;
- }
-
- ret = i915_gem_object_put_fence(obj);
- if (ret) {
- DRM_DEBUG_KMS("failed to release fence for cursor");
- intel_runtime_pm_put(dev_priv);
- goto fail_unpin;
- }
-
- addr = i915_gem_obj_ggtt_offset(obj);
-
- intel_runtime_pm_put(dev_priv);
- } else {
- int align = IS_I830(dev) ? 16 * 1024 : 256;
- ret = i915_gem_object_attach_phys(obj, align);
- if (ret) {
- DRM_DEBUG_KMS("failed to attach phys object\n");
- goto fail_locked;
- }
- addr = obj->phys_handle->busaddr;
- }
-
- finish:
- if (intel_crtc->cursor_bo) {
- if (!INTEL_INFO(dev)->cursor_needs_physical)
- i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo);
- }
-
- i915_gem_track_fb(intel_crtc->cursor_bo, obj,
- INTEL_FRONTBUFFER_CURSOR(pipe));
- mutex_unlock(&dev->struct_mutex);
-
- old_width = intel_crtc->cursor_width;
-
- intel_crtc->cursor_addr = addr;
- intel_crtc->cursor_bo = obj;
- intel_crtc->cursor_width = width;
- intel_crtc->cursor_height = height;
-
- if (intel_crtc->active) {
- if (old_width != width)
- intel_update_watermarks(crtc);
- intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
-
- intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_CURSOR(pipe));
- }
-
- return 0;
-fail_unpin:
- i915_gem_object_unpin_from_display_plane(obj);
-fail_locked:
- mutex_unlock(&dev->struct_mutex);
- return ret;
-}
-
static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, uint32_t start, uint32_t size)
{
@@ -9115,7 +8947,10 @@
drm_gem_object_unreference(&work->pending_flip_obj->base);
drm_gem_object_unreference(&work->old_fb_obj->base);
- intel_update_fbc(dev);
+ intel_fbc_update(dev);
+
+ if (work->flip_queued_req)
+ i915_gem_request_assign(&work->flip_queued_req, NULL);
mutex_unlock(&dev->struct_mutex);
intel_frontbuffer_flip_complete(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
@@ -9511,25 +9346,53 @@
else if (i915.enable_execlists)
return true;
else
- return ring != obj->ring;
+ return ring != i915_gem_request_get_ring(obj->last_read_req);
}
-static void intel_do_mmio_flip(struct intel_crtc *intel_crtc)
+static void skl_do_mmio_flip(struct intel_crtc *intel_crtc)
+{
+ struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_framebuffer *fb = intel_crtc->base.primary->fb;
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+ struct drm_i915_gem_object *obj = intel_fb->obj;
+ const enum pipe pipe = intel_crtc->pipe;
+ u32 ctl, stride;
+
+ ctl = I915_READ(PLANE_CTL(pipe, 0));
+ ctl &= ~PLANE_CTL_TILED_MASK;
+ if (obj->tiling_mode == I915_TILING_X)
+ ctl |= PLANE_CTL_TILED_X;
+
+ /*
+ * The stride is either expressed as a multiple of 64 bytes chunks for
+ * linear buffers or in number of tiles for tiled buffers.
+ */
+ stride = fb->pitches[0] >> 6;
+ if (obj->tiling_mode == I915_TILING_X)
+ stride = fb->pitches[0] >> 9; /* X tiles are 512 bytes wide */
+
+ /*
+ * Both PLANE_CTL and PLANE_STRIDE are not updated on vblank but on
+ * PLANE_SURF updates, the update is then guaranteed to be atomic.
+ */
+ I915_WRITE(PLANE_CTL(pipe, 0), ctl);
+ I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
+
+ I915_WRITE(PLANE_SURF(pipe, 0), intel_crtc->unpin_work->gtt_offset);
+ POSTING_READ(PLANE_SURF(pipe, 0));
+}
+
+static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc)
{
struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_framebuffer *intel_fb =
to_intel_framebuffer(intel_crtc->base.primary->fb);
struct drm_i915_gem_object *obj = intel_fb->obj;
- bool atomic_update;
- u32 start_vbl_count;
u32 dspcntr;
u32 reg;
- intel_mark_page_flip_active(intel_crtc);
-
- atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count);
-
reg = DSPCNTR(intel_crtc->plane);
dspcntr = I915_READ(reg);
@@ -9544,26 +9407,50 @@
intel_crtc->unpin_work->gtt_offset);
POSTING_READ(DSPSURF(intel_crtc->plane));
+}
+
+/*
+ * XXX: This is the temporary way to update the plane registers until we get
+ * around to using the usual plane update functions for MMIO flips
+ */
+static void intel_do_mmio_flip(struct intel_crtc *intel_crtc)
+{
+ struct drm_device *dev = intel_crtc->base.dev;
+ bool atomic_update;
+ u32 start_vbl_count;
+
+ intel_mark_page_flip_active(intel_crtc);
+
+ atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count);
+
+ if (INTEL_INFO(dev)->gen >= 9)
+ skl_do_mmio_flip(intel_crtc);
+ else
+ /* use_mmio_flip() retricts MMIO flips to ilk+ */
+ ilk_do_mmio_flip(intel_crtc);
+
if (atomic_update)
intel_pipe_update_end(intel_crtc, start_vbl_count);
}
static void intel_mmio_flip_work_func(struct work_struct *work)
{
- struct intel_crtc *intel_crtc =
+ struct intel_crtc *crtc =
container_of(work, struct intel_crtc, mmio_flip.work);
- struct intel_engine_cs *ring;
- uint32_t seqno;
+ struct intel_mmio_flip *mmio_flip;
- seqno = intel_crtc->mmio_flip.seqno;
- ring = intel_crtc->mmio_flip.ring;
+ mmio_flip = &crtc->mmio_flip;
+ if (mmio_flip->req)
+ WARN_ON(__i915_wait_request(mmio_flip->req,
+ crtc->reset_counter,
+ false, NULL, NULL) != 0);
- if (seqno)
- WARN_ON(__i915_wait_seqno(ring, seqno,
- intel_crtc->reset_counter,
- false, NULL, NULL) != 0);
-
- intel_do_mmio_flip(intel_crtc);
+ intel_do_mmio_flip(crtc);
+ if (mmio_flip->req) {
+ mutex_lock(&crtc->base.dev->struct_mutex);
+ i915_gem_request_assign(&mmio_flip->req, NULL);
+ mutex_unlock(&crtc->base.dev->struct_mutex);
+ }
}
static int intel_queue_mmio_flip(struct drm_device *dev,
@@ -9575,8 +9462,8 @@
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- intel_crtc->mmio_flip.seqno = obj->last_write_seqno;
- intel_crtc->mmio_flip.ring = obj->ring;
+ i915_gem_request_assign(&intel_crtc->mmio_flip.req,
+ obj->last_write_req);
schedule_work(&intel_crtc->mmio_flip.work);
@@ -9671,9 +9558,8 @@
return false;
if (work->flip_ready_vblank == 0) {
- if (work->flip_queued_ring &&
- !i915_seqno_passed(work->flip_queued_ring->get_seqno(work->flip_queued_ring, true),
- work->flip_queued_seqno))
+ if (work->flip_queued_req &&
+ !i915_gem_request_completed(work->flip_queued_req, true))
return false;
work->flip_ready_vblank = drm_vblank_count(dev, intel_crtc->pipe);
@@ -9726,6 +9612,8 @@
struct drm_framebuffer *old_fb = crtc->primary->fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_plane *primary = crtc->primary;
+ struct intel_plane *intel_plane = to_intel_plane(primary);
enum pipe pipe = intel_crtc->pipe;
struct intel_unpin_work *work;
struct intel_engine_cs *ring;
@@ -9818,7 +9706,7 @@
} else if (IS_IVYBRIDGE(dev)) {
ring = &dev_priv->ring[BCS];
} else if (INTEL_INFO(dev)->gen >= 7) {
- ring = obj->ring;
+ ring = i915_gem_request_get_ring(obj->last_read_req);
if (ring == NULL || ring->id != RCS)
ring = &dev_priv->ring[BCS];
} else {
@@ -9838,16 +9726,16 @@
if (ret)
goto cleanup_unpin;
- work->flip_queued_seqno = obj->last_write_seqno;
- work->flip_queued_ring = obj->ring;
+ i915_gem_request_assign(&work->flip_queued_req,
+ obj->last_write_req);
} else {
ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, ring,
page_flip_flags);
if (ret)
goto cleanup_unpin;
- work->flip_queued_seqno = intel_ring_get_seqno(ring);
- work->flip_queued_ring = ring;
+ i915_gem_request_assign(&work->flip_queued_req,
+ intel_ring_get_request(ring));
}
work->flip_queued_vblank = drm_vblank_count(dev, intel_crtc->pipe);
@@ -9856,7 +9744,7 @@
i915_gem_track_fb(work->old_fb_obj, obj,
INTEL_FRONTBUFFER_PRIMARY(pipe));
- intel_disable_fbc(dev);
+ intel_fbc_disable(dev);
intel_frontbuffer_flip_prepare(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
mutex_unlock(&dev->struct_mutex);
@@ -9884,8 +9772,15 @@
if (ret == -EIO) {
out_hang:
- intel_crtc_wait_for_pending_flips(crtc);
- ret = intel_pipe_set_base(crtc, crtc->x, crtc->y, fb);
+ ret = primary->funcs->update_plane(primary, crtc, fb,
+ intel_plane->crtc_x,
+ intel_plane->crtc_y,
+ intel_plane->crtc_h,
+ intel_plane->crtc_w,
+ intel_plane->src_x,
+ intel_plane->src_y,
+ intel_plane->src_h,
+ intel_plane->src_w);
if (ret == 0 && event) {
spin_lock_irq(&dev->event_lock);
drm_send_vblank_event(dev, pipe, event);
@@ -10254,9 +10149,9 @@
* computation to clearly distinguish it from the adjusted mode, which
* can be changed by the connectors in the below retry loop.
*/
- drm_mode_set_crtcinfo(&pipe_config->requested_mode, CRTC_STEREO_DOUBLE);
- pipe_config->pipe_src_w = pipe_config->requested_mode.crtc_hdisplay;
- pipe_config->pipe_src_h = pipe_config->requested_mode.crtc_vdisplay;
+ drm_crtc_get_hv_timing(&pipe_config->requested_mode,
+ &pipe_config->pipe_src_w,
+ &pipe_config->pipe_src_h);
encoder_retry:
/* Ensure the port clock defaults are reset when retrying. */
@@ -10742,7 +10637,7 @@
* ->get_hw_state callbacks. */
intel_connector_check_state(connector);
- WARN(&connector->new_encoder->base != connector->base.encoder,
+ I915_STATE_WARN(&connector->new_encoder->base != connector->base.encoder,
"connector's staged encoder doesn't match current encoder\n");
}
}
@@ -10762,9 +10657,9 @@
encoder->base.base.id,
encoder->base.name);
- WARN(&encoder->new_crtc->base != encoder->base.crtc,
+ I915_STATE_WARN(&encoder->new_crtc->base != encoder->base.crtc,
"encoder's stage crtc doesn't match current crtc\n");
- WARN(encoder->connectors_active && !encoder->base.crtc,
+ I915_STATE_WARN(encoder->connectors_active && !encoder->base.crtc,
"encoder's active_connectors set, but no crtc\n");
list_for_each_entry(connector, &dev->mode_config.connector_list,
@@ -10783,19 +10678,19 @@
if (!enabled && encoder->base.encoder_type == DRM_MODE_ENCODER_DPMST)
continue;
- WARN(!!encoder->base.crtc != enabled,
+ I915_STATE_WARN(!!encoder->base.crtc != enabled,
"encoder's enabled state mismatch "
"(expected %i, found %i)\n",
!!encoder->base.crtc, enabled);
- WARN(active && !encoder->base.crtc,
+ I915_STATE_WARN(active && !encoder->base.crtc,
"active encoder with no crtc\n");
- WARN(encoder->connectors_active != active,
+ I915_STATE_WARN(encoder->connectors_active != active,
"encoder's computed active state doesn't match tracked active state "
"(expected %i, found %i)\n", active, encoder->connectors_active);
active = encoder->get_hw_state(encoder, &pipe);
- WARN(active != encoder->connectors_active,
+ I915_STATE_WARN(active != encoder->connectors_active,
"encoder's hw state doesn't match sw tracking "
"(expected %i, found %i)\n",
encoder->connectors_active, active);
@@ -10804,7 +10699,7 @@
continue;
tracked_pipe = to_intel_crtc(encoder->base.crtc)->pipe;
- WARN(active && pipe != tracked_pipe,
+ I915_STATE_WARN(active && pipe != tracked_pipe,
"active encoder's pipe doesn't match"
"(expected %i, found %i)\n",
tracked_pipe, pipe);
@@ -10829,7 +10724,7 @@
DRM_DEBUG_KMS("[CRTC:%d]\n",
crtc->base.base.id);
- WARN(crtc->active && !crtc->base.enabled,
+ I915_STATE_WARN(crtc->active && !crtc->base.enabled,
"active crtc, but not enabled in sw tracking\n");
for_each_intel_encoder(dev, encoder) {
@@ -10840,10 +10735,10 @@
active = true;
}
- WARN(active != crtc->active,
+ I915_STATE_WARN(active != crtc->active,
"crtc's computed active state doesn't match tracked active state "
"(expected %i, found %i)\n", active, crtc->active);
- WARN(enabled != crtc->base.enabled,
+ I915_STATE_WARN(enabled != crtc->base.enabled,
"crtc's computed enabled state doesn't match tracked enabled state "
"(expected %i, found %i)\n", enabled, crtc->base.enabled);
@@ -10863,13 +10758,13 @@
encoder->get_config(encoder, &pipe_config);
}
- WARN(crtc->active != active,
+ I915_STATE_WARN(crtc->active != active,
"crtc active state doesn't match with hw state "
"(expected %i, found %i)\n", crtc->active, active);
if (active &&
!intel_pipe_config_compare(dev, &crtc->config, &pipe_config)) {
- WARN(1, "pipe state doesn't match!\n");
+ I915_STATE_WARN(1, "pipe state doesn't match!\n");
intel_dump_pipe_config(crtc, &pipe_config,
"[hw state]");
intel_dump_pipe_config(crtc, &crtc->config,
@@ -10897,14 +10792,14 @@
active = pll->get_hw_state(dev_priv, pll, &dpll_hw_state);
- WARN(pll->active > hweight32(pll->config.crtc_mask),
+ I915_STATE_WARN(pll->active > hweight32(pll->config.crtc_mask),
"more active pll users than references: %i vs %i\n",
pll->active, hweight32(pll->config.crtc_mask));
- WARN(pll->active && !pll->on,
+ I915_STATE_WARN(pll->active && !pll->on,
"pll in active use but not on in sw tracking\n");
- WARN(pll->on && !pll->active,
+ I915_STATE_WARN(pll->on && !pll->active,
"pll in on but not on in use in sw tracking\n");
- WARN(pll->on != active,
+ I915_STATE_WARN(pll->on != active,
"pll on state mismatch (expected %i, found %i)\n",
pll->on, active);
@@ -10914,14 +10809,14 @@
if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll)
active_crtcs++;
}
- WARN(pll->active != active_crtcs,
+ I915_STATE_WARN(pll->active != active_crtcs,
"pll active crtcs mismatch (expected %i, found %i)\n",
pll->active, active_crtcs);
- WARN(hweight32(pll->config.crtc_mask) != enabled_crtcs,
+ I915_STATE_WARN(hweight32(pll->config.crtc_mask) != enabled_crtcs,
"pll enabled crtcs mismatch (expected %i, found %i)\n",
hweight32(pll->config.crtc_mask), enabled_crtcs);
- WARN(pll->on && memcmp(&pll->config.hw_state, &dpll_hw_state,
+ I915_STATE_WARN(pll->on && memcmp(&pll->config.hw_state, &dpll_hw_state,
sizeof(dpll_hw_state)),
"pll hw state mismatch\n");
}
@@ -11114,26 +11009,15 @@
* on the DPLL.
*/
for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
- struct drm_framebuffer *old_fb = crtc->primary->fb;
- struct drm_i915_gem_object *old_obj = intel_fb_obj(old_fb);
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ struct drm_plane *primary = intel_crtc->base.primary;
+ int vdisplay, hdisplay;
- mutex_lock(&dev->struct_mutex);
- ret = intel_pin_and_fence_fb_obj(crtc->primary, fb, NULL);
- if (ret != 0) {
- DRM_ERROR("pin & fence failed\n");
- mutex_unlock(&dev->struct_mutex);
- goto done;
- }
- if (old_fb)
- intel_unpin_fb_obj(old_obj);
- i915_gem_track_fb(old_obj, obj,
- INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
- mutex_unlock(&dev->struct_mutex);
-
- crtc->primary->fb = fb;
- crtc->x = x;
- crtc->y = y;
+ drm_crtc_get_hv_timing(mode, &hdisplay, &vdisplay);
+ ret = primary->funcs->update_plane(primary, &intel_crtc->base,
+ fb, 0, 0,
+ hdisplay, vdisplay,
+ x << 16, y << 16,
+ hdisplay << 16, vdisplay << 16);
}
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
@@ -11601,11 +11485,14 @@
disable_pipes);
} else if (config->fb_changed) {
struct intel_crtc *intel_crtc = to_intel_crtc(set->crtc);
+ struct drm_plane *primary = set->crtc->primary;
+ int vdisplay, hdisplay;
- intel_crtc_wait_for_pending_flips(set->crtc);
-
- ret = intel_pipe_set_base(set->crtc,
- set->x, set->y, set->fb);
+ drm_crtc_get_hv_timing(set->mode, &hdisplay, &vdisplay);
+ ret = primary->funcs->update_plane(primary, set->crtc, set->fb,
+ 0, 0, hdisplay, vdisplay,
+ set->x << 16, set->y << 16,
+ hdisplay << 16, vdisplay << 16);
/*
* We need to make sure the primary plane is re-enabled if it
@@ -11762,95 +11649,115 @@
BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
}
-static int
-intel_primary_plane_disable(struct drm_plane *plane)
+/**
+ * intel_prepare_plane_fb - Prepare fb for usage on plane
+ * @plane: drm plane to prepare for
+ * @fb: framebuffer to prepare for presentation
+ *
+ * Prepares a framebuffer for usage on a display plane. Generally this
+ * involves pinning the underlying object and updating the frontbuffer tracking
+ * bits. Some older platforms need special physical address handling for
+ * cursor planes.
+ *
+ * Returns 0 on success, negative error code on failure.
+ */
+int
+intel_prepare_plane_fb(struct drm_plane *plane,
+ struct drm_framebuffer *fb)
{
struct drm_device *dev = plane->dev;
- struct intel_crtc *intel_crtc;
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ enum pipe pipe = intel_plane->pipe;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb);
+ unsigned frontbuffer_bits = 0;
+ int ret = 0;
- if (!plane->fb)
+ if (WARN_ON(fb == plane->fb || !obj))
return 0;
- BUG_ON(!plane->crtc);
+ switch (plane->type) {
+ case DRM_PLANE_TYPE_PRIMARY:
+ frontbuffer_bits = INTEL_FRONTBUFFER_PRIMARY(pipe);
+ break;
+ case DRM_PLANE_TYPE_CURSOR:
+ frontbuffer_bits = INTEL_FRONTBUFFER_CURSOR(pipe);
+ break;
+ case DRM_PLANE_TYPE_OVERLAY:
+ frontbuffer_bits = INTEL_FRONTBUFFER_SPRITE(pipe);
+ break;
+ }
- intel_crtc = to_intel_crtc(plane->crtc);
-
- /*
- * Even though we checked plane->fb above, it's still possible that
- * the primary plane has been implicitly disabled because the crtc
- * coordinates given weren't visible, or because we detected
- * that it was 100% covered by a sprite plane. Or, the CRTC may be
- * off and we've set a fb, but haven't actually turned on the CRTC yet.
- * In either case, we need to unpin the FB and let the fb pointer get
- * updated, but otherwise we don't need to touch the hardware.
- */
- if (!intel_crtc->primary_enabled)
- goto disable_unpin;
-
- intel_crtc_wait_for_pending_flips(plane->crtc);
- intel_disable_primary_hw_plane(plane, plane->crtc);
-
-disable_unpin:
mutex_lock(&dev->struct_mutex);
- i915_gem_track_fb(intel_fb_obj(plane->fb), NULL,
- INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
- intel_unpin_fb_obj(intel_fb_obj(plane->fb));
- mutex_unlock(&dev->struct_mutex);
- plane->fb = NULL;
- return 0;
+ if (plane->type == DRM_PLANE_TYPE_CURSOR &&
+ INTEL_INFO(dev)->cursor_needs_physical) {
+ int align = IS_I830(dev) ? 16 * 1024 : 256;
+ ret = i915_gem_object_attach_phys(obj, align);
+ if (ret)
+ DRM_DEBUG_KMS("failed to attach phys object\n");
+ } else {
+ ret = intel_pin_and_fence_fb_obj(plane, fb, NULL);
+ }
+
+ if (ret == 0)
+ i915_gem_track_fb(old_obj, obj, frontbuffer_bits);
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
+}
+
+/**
+ * intel_cleanup_plane_fb - Cleans up an fb after plane use
+ * @plane: drm plane to clean up for
+ * @fb: old framebuffer that was on plane
+ *
+ * Cleans up a framebuffer that has just been removed from a plane.
+ */
+void
+intel_cleanup_plane_fb(struct drm_plane *plane,
+ struct drm_framebuffer *fb)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+
+ if (WARN_ON(!obj))
+ return;
+
+ if (plane->type != DRM_PLANE_TYPE_CURSOR ||
+ !INTEL_INFO(dev)->cursor_needs_physical) {
+ mutex_lock(&dev->struct_mutex);
+ intel_unpin_fb_obj(obj);
+ mutex_unlock(&dev->struct_mutex);
+ }
}
static int
intel_check_primary_plane(struct drm_plane *plane,
struct intel_plane_state *state)
{
- struct drm_crtc *crtc = state->crtc;
- struct drm_framebuffer *fb = state->fb;
+ struct drm_crtc *crtc = state->base.crtc;
+ struct drm_framebuffer *fb = state->base.fb;
struct drm_rect *dest = &state->dst;
struct drm_rect *src = &state->src;
const struct drm_rect *clip = &state->clip;
-
- return drm_plane_helper_check_update(plane, crtc, fb,
- src, dest, clip,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
- false, true, &state->visible);
-}
-
-static int
-intel_prepare_primary_plane(struct drm_plane *plane,
- struct intel_plane_state *state)
-{
- struct drm_crtc *crtc = state->crtc;
- struct drm_framebuffer *fb = state->fb;
- struct drm_device *dev = crtc->dev;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum pipe pipe = intel_crtc->pipe;
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
- struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb);
int ret;
- intel_crtc_wait_for_pending_flips(crtc);
+ ret = drm_plane_helper_check_update(plane, crtc, fb,
+ src, dest, clip,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ false, true, &state->visible);
+ if (ret)
+ return ret;
+ intel_crtc_wait_for_pending_flips(crtc);
if (intel_crtc_has_pending_flip(crtc)) {
DRM_ERROR("pipe is still busy with an old pageflip\n");
return -EBUSY;
}
- if (old_obj != obj) {
- mutex_lock(&dev->struct_mutex);
- ret = intel_pin_and_fence_fb_obj(plane, fb, NULL);
- if (ret == 0)
- i915_gem_track_fb(old_obj, obj,
- INTEL_FRONTBUFFER_PRIMARY(pipe));
- mutex_unlock(&dev->struct_mutex);
- if (ret != 0) {
- DRM_DEBUG_KMS("pin & fence failed\n");
- return ret;
- }
- }
-
return 0;
}
@@ -11858,19 +11765,28 @@
intel_commit_primary_plane(struct drm_plane *plane,
struct intel_plane_state *state)
{
- struct drm_crtc *crtc = state->crtc;
- struct drm_framebuffer *fb = state->fb;
- struct drm_device *dev = crtc->dev;
+ struct drm_crtc *crtc = state->base.crtc;
+ struct drm_framebuffer *fb = state->base.fb;
+ struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum pipe pipe = intel_crtc->pipe;
- struct drm_framebuffer *old_fb = plane->fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
- struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb);
struct intel_plane *intel_plane = to_intel_plane(plane);
struct drm_rect *src = &state->src;
+ enum pipe pipe = intel_plane->pipe;
- crtc->primary->fb = fb;
+ if (!fb) {
+ /*
+ * 'prepare' is never called when plane is being disabled, so
+ * we need to handle frontbuffer tracking here
+ */
+ mutex_lock(&dev->struct_mutex);
+ i915_gem_track_fb(intel_fb_obj(plane->fb), NULL,
+ INTEL_FRONTBUFFER_PRIMARY(pipe));
+ mutex_unlock(&dev->struct_mutex);
+ }
+
+ plane->fb = fb;
crtc->x = src->x1 >> 16;
crtc->y = src->y1 >> 16;
@@ -11899,7 +11815,7 @@
INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
dev_priv->fbc.plane == intel_crtc->plane &&
intel_plane->rotation != BIT(DRM_ROTATE_0)) {
- intel_disable_fbc(dev);
+ intel_fbc_disable(dev);
}
if (state->visible) {
@@ -11934,33 +11850,28 @@
intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
mutex_lock(&dev->struct_mutex);
- intel_update_fbc(dev);
- mutex_unlock(&dev->struct_mutex);
- }
-
- if (old_fb && old_fb != fb) {
- if (intel_crtc->active)
- intel_wait_for_vblank(dev, intel_crtc->pipe);
-
- mutex_lock(&dev->struct_mutex);
- intel_unpin_fb_obj(old_obj);
+ intel_fbc_update(dev);
mutex_unlock(&dev->struct_mutex);
}
}
-static int
-intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc,
- struct drm_framebuffer *fb, int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h)
+int
+intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h)
{
+ struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_framebuffer *old_fb = plane->fb;
struct intel_plane_state state;
+ struct intel_plane *intel_plane = to_intel_plane(plane);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int ret;
- state.crtc = crtc;
- state.fb = fb;
+ state.base.crtc = crtc ? crtc : plane->crtc;
+ state.base.fb = fb;
/* sample coordinates in 16.16 fixed point */
state.src.x1 = src_x;
@@ -11982,19 +11893,50 @@
state.orig_src = state.src;
state.orig_dst = state.dst;
- ret = intel_check_primary_plane(plane, &state);
+ ret = intel_plane->check_plane(plane, &state);
if (ret)
return ret;
- ret = intel_prepare_primary_plane(plane, &state);
- if (ret)
- return ret;
+ if (fb != old_fb && fb) {
+ ret = intel_prepare_plane_fb(plane, fb);
+ if (ret)
+ return ret;
+ }
- intel_commit_primary_plane(plane, &state);
+ intel_runtime_pm_get(dev_priv);
+ intel_plane->commit_plane(plane, &state);
+ intel_runtime_pm_put(dev_priv);
+
+ if (fb != old_fb && old_fb) {
+ if (intel_crtc->active)
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+ intel_cleanup_plane_fb(plane, old_fb);
+ }
+
+ plane->fb = fb;
return 0;
}
+/**
+ * intel_disable_plane - disable a plane
+ * @plane: plane to disable
+ *
+ * General disable handler for all plane types.
+ */
+int
+intel_disable_plane(struct drm_plane *plane)
+{
+ if (!plane->fb)
+ return 0;
+
+ if (WARN_ON(!plane->crtc))
+ return -EINVAL;
+
+ return plane->funcs->update_plane(plane, plane->crtc, NULL,
+ 0, 0, 0, 0, 0, 0, 0, 0);
+}
+
/* Common destruction function for both primary and cursor planes */
static void intel_plane_destroy(struct drm_plane *plane)
{
@@ -12004,8 +11946,8 @@
}
static const struct drm_plane_funcs intel_primary_plane_funcs = {
- .update_plane = intel_primary_plane_setplane,
- .disable_plane = intel_primary_plane_disable,
+ .update_plane = intel_update_plane,
+ .disable_plane = intel_disable_plane,
.destroy = intel_plane_destroy,
.set_property = intel_plane_set_property
};
@@ -12026,6 +11968,8 @@
primary->pipe = pipe;
primary->plane = pipe;
primary->rotation = BIT(DRM_ROTATE_0);
+ primary->check_plane = intel_check_primary_plane;
+ primary->commit_plane = intel_commit_primary_plane;
if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4)
primary->plane = !pipe;
@@ -12058,23 +12002,12 @@
}
static int
-intel_cursor_plane_disable(struct drm_plane *plane)
-{
- if (!plane->fb)
- return 0;
-
- BUG_ON(!plane->crtc);
-
- return intel_crtc_cursor_set_obj(plane->crtc, NULL, 0, 0);
-}
-
-static int
intel_check_cursor_plane(struct drm_plane *plane,
struct intel_plane_state *state)
{
- struct drm_crtc *crtc = state->crtc;
+ struct drm_crtc *crtc = state->base.crtc;
struct drm_device *dev = crtc->dev;
- struct drm_framebuffer *fb = state->fb;
+ struct drm_framebuffer *fb = state->base.fb;
struct drm_rect *dest = &state->dst;
struct drm_rect *src = &state->src;
const struct drm_rect *clip = &state->clip;
@@ -12124,18 +12057,21 @@
return ret;
}
-static int
+static void
intel_commit_cursor_plane(struct drm_plane *plane,
struct intel_plane_state *state)
{
- struct drm_crtc *crtc = state->crtc;
- struct drm_framebuffer *fb = state->fb;
+ struct drm_crtc *crtc = state->base.crtc;
+ struct drm_device *dev = crtc->dev;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_plane *intel_plane = to_intel_plane(plane);
- struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- struct drm_i915_gem_object *obj = intel_fb->obj;
- int crtc_w, crtc_h;
+ struct drm_i915_gem_object *obj = intel_fb_obj(state->base.fb);
+ struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb);
+ enum pipe pipe = intel_crtc->pipe;
+ unsigned old_width;
+ uint32_t addr;
+ plane->fb = state->base.fb;
crtc->cursor_x = state->orig_dst.x1;
crtc->cursor_y = state->orig_dst.y1;
@@ -12149,64 +12085,47 @@
intel_plane->src_h = drm_rect_height(&state->orig_src);
intel_plane->obj = obj;
- if (fb != crtc->cursor->fb) {
- crtc_w = drm_rect_width(&state->orig_dst);
- crtc_h = drm_rect_height(&state->orig_dst);
- return intel_crtc_cursor_set_obj(crtc, obj, crtc_w, crtc_h);
- } else {
+ if (intel_crtc->cursor_bo == obj)
+ goto update;
+
+ /*
+ * 'prepare' is only called when fb != NULL; we still need to update
+ * frontbuffer tracking for the 'disable' case here.
+ */
+ if (!obj) {
+ mutex_lock(&dev->struct_mutex);
+ i915_gem_track_fb(old_obj, NULL,
+ INTEL_FRONTBUFFER_CURSOR(pipe));
+ mutex_unlock(&dev->struct_mutex);
+ }
+
+ if (!obj)
+ addr = 0;
+ else if (!INTEL_INFO(dev)->cursor_needs_physical)
+ addr = i915_gem_obj_ggtt_offset(obj);
+ else
+ addr = obj->phys_handle->busaddr;
+
+ intel_crtc->cursor_addr = addr;
+ intel_crtc->cursor_bo = obj;
+update:
+ old_width = intel_crtc->cursor_width;
+
+ intel_crtc->cursor_width = drm_rect_width(&state->orig_dst);
+ intel_crtc->cursor_height = drm_rect_height(&state->orig_dst);
+
+ if (intel_crtc->active) {
+ if (old_width != intel_crtc->cursor_width)
+ intel_update_watermarks(crtc);
intel_crtc_update_cursor(crtc, state->visible);
- intel_frontbuffer_flip(crtc->dev,
- INTEL_FRONTBUFFER_CURSOR(intel_crtc->pipe));
-
- return 0;
+ intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_CURSOR(pipe));
}
}
-static int
-intel_cursor_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
- struct drm_framebuffer *fb, int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h)
-{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_plane_state state;
- int ret;
-
- state.crtc = crtc;
- state.fb = fb;
-
- /* sample coordinates in 16.16 fixed point */
- state.src.x1 = src_x;
- state.src.x2 = src_x + src_w;
- state.src.y1 = src_y;
- state.src.y2 = src_y + src_h;
-
- /* integer pixels */
- state.dst.x1 = crtc_x;
- state.dst.x2 = crtc_x + crtc_w;
- state.dst.y1 = crtc_y;
- state.dst.y2 = crtc_y + crtc_h;
-
- state.clip.x1 = 0;
- state.clip.y1 = 0;
- state.clip.x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0;
- state.clip.y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0;
-
- state.orig_src = state.src;
- state.orig_dst = state.dst;
-
- ret = intel_check_cursor_plane(plane, &state);
- if (ret)
- return ret;
-
- return intel_commit_cursor_plane(plane, &state);
-}
-
static const struct drm_plane_funcs intel_cursor_plane_funcs = {
- .update_plane = intel_cursor_plane_update,
- .disable_plane = intel_cursor_plane_disable,
+ .update_plane = intel_update_plane,
+ .disable_plane = intel_disable_plane,
.destroy = intel_plane_destroy,
.set_property = intel_plane_set_property,
};
@@ -12225,6 +12144,8 @@
cursor->pipe = pipe;
cursor->plane = pipe;
cursor->rotation = BIT(DRM_ROTATE_0);
+ cursor->check_plane = intel_check_cursor_plane;
+ cursor->commit_plane = intel_commit_cursor_plane;
drm_universal_plane_init(dev, &cursor->base, 0,
&intel_cursor_plane_funcs,
@@ -12383,28 +12304,6 @@
return true;
}
-const char *intel_output_name(int output)
-{
- static const char *names[] = {
- [INTEL_OUTPUT_UNUSED] = "Unused",
- [INTEL_OUTPUT_ANALOG] = "Analog",
- [INTEL_OUTPUT_DVO] = "DVO",
- [INTEL_OUTPUT_SDVO] = "SDVO",
- [INTEL_OUTPUT_LVDS] = "LVDS",
- [INTEL_OUTPUT_TVOUT] = "TV",
- [INTEL_OUTPUT_HDMI] = "HDMI",
- [INTEL_OUTPUT_DISPLAYPORT] = "DisplayPort",
- [INTEL_OUTPUT_EDP] = "eDP",
- [INTEL_OUTPUT_DSI] = "DSI",
- [INTEL_OUTPUT_UNKNOWN] = "Unknown",
- };
-
- if (output < 0 || output >= ARRAY_SIZE(names) || !names[output])
- return "Invalid";
-
- return names[output];
-}
-
static bool intel_crt_present(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -13153,7 +13052,7 @@
intel_setup_outputs(dev);
/* Just in case the BIOS is doing something questionable. */
- intel_disable_fbc(dev);
+ intel_fbc_disable(dev);
drm_modeset_lock_all(dev);
intel_modeset_setup_hw_state(dev, false);
@@ -13670,7 +13569,7 @@
intel_unregister_dsm_handler();
- intel_disable_fbc(dev);
+ intel_fbc_disable(dev);
ironlake_teardown_rc6(dev);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 5cecc20..88d81a8 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1558,7 +1558,7 @@
vdd = edp_panel_vdd_on(intel_dp);
pps_unlock(intel_dp);
- WARN(!vdd, "eDP port %c VDD already requested on\n",
+ I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
port_name(dp_to_dig_port(intel_dp)->port));
}
@@ -1642,7 +1642,7 @@
if (!is_edp(intel_dp))
return;
- WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
+ I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
port_name(dp_to_dig_port(intel_dp)->port));
intel_dp->want_panel_vdd = false;
@@ -2105,6 +2105,9 @@
if (crtc->config.has_audio)
intel_audio_codec_disable(encoder);
+ if (HAS_PSR(dev) && !HAS_DDI(dev))
+ intel_psr_disable(intel_dp);
+
/* Make sure the panel is off before trying to change the mode. But also
* ensure that we have vdd while we switch off the panel. */
intel_edp_panel_vdd_on(intel_dp);
@@ -2329,6 +2332,7 @@
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
intel_edp_backlight_on(intel_dp);
+ intel_psr_enable(intel_dp);
}
static void g4x_pre_enable_dp(struct intel_encoder *encoder)
@@ -4306,7 +4310,6 @@
drm_dp_aux_unregister(&intel_dp->aux);
intel_dp_mst_encoder_cleanup(intel_dig_port);
- drm_encoder_cleanup(encoder);
if (is_edp(intel_dp)) {
cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
/*
@@ -4322,6 +4325,7 @@
intel_dp->edp_notifier.notifier_call = NULL;
}
}
+ drm_encoder_cleanup(encoder);
kfree(intel_dig_port);
}
@@ -4763,14 +4767,9 @@
}
/*
- * FIXME: This needs proper synchronization with psr state. But really
- * hard to tell without seeing the user of this function of this code.
- * Check locking and ordering once that lands.
+ * FIXME: This needs proper synchronization with psr state for some
+ * platforms that cannot have PSR and DRRS enabled at the same time.
*/
- if (INTEL_INFO(dev)->gen < 8 && intel_psr_is_enabled(dev)) {
- DRM_DEBUG_KMS("DRRS is disabled as PSR is enabled\n");
- return;
- }
encoder = intel_attached_encoder(&intel_connector->base);
intel_dp = enc_to_intel_dp(&encoder->base);
@@ -5086,7 +5085,7 @@
intel_dp_aux_init(intel_dp, intel_connector);
/* init MST on ports that can support it */
- if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
if (port == PORT_B || port == PORT_C || port == PORT_D) {
intel_dp_mst_encoder_init(intel_dig_port,
intel_connector->base.base.id);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 25fdbb1..588b618 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -244,8 +244,7 @@
} intel_clock_t;
struct intel_plane_state {
- struct drm_crtc *crtc;
- struct drm_framebuffer *fb;
+ struct drm_plane_state base;
struct drm_rect src;
struct drm_rect dst;
struct drm_rect clip;
@@ -406,8 +405,7 @@
};
struct intel_mmio_flip {
- u32 seqno;
- struct intel_engine_cs *ring;
+ struct drm_i915_gem_request *req;
struct work_struct work;
};
@@ -510,6 +508,10 @@
uint32_t src_w, uint32_t src_h);
void (*disable_plane)(struct drm_plane *plane,
struct drm_crtc *crtc);
+ int (*check_plane)(struct drm_plane *plane,
+ struct intel_plane_state *state);
+ void (*commit_plane)(struct drm_plane *plane,
+ struct intel_plane_state *state);
int (*update_colorkey)(struct drm_plane *plane,
struct drm_intel_sprite_colorkey *key);
void (*get_colorkey)(struct drm_plane *plane,
@@ -708,8 +710,7 @@
#define INTEL_FLIP_COMPLETE 2
u32 flip_count;
u32 gtt_offset;
- struct intel_engine_cs *flip_queued_ring;
- u32 flip_queued_seqno;
+ struct drm_i915_gem_request *flip_queued_req;
int flip_queued_vblank;
int flip_ready_vblank;
bool enable_stall_check;
@@ -874,7 +875,6 @@
void intel_audio_codec_disable(struct intel_encoder *encoder);
/* intel_display.c */
-const char *intel_output_name(int output);
bool intel_has_pending_fb_unpin(struct drm_device *dev);
int intel_pch_rawclk(struct drm_device *dev);
void intel_mark_busy(struct drm_device *dev);
@@ -925,6 +925,10 @@
void intel_finish_page_flip(struct drm_device *dev, int pipe);
void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
void intel_check_page_flip(struct drm_device *dev, int pipe);
+int intel_prepare_plane_fb(struct drm_plane *plane,
+ struct drm_framebuffer *fb);
+void intel_cleanup_plane_fb(struct drm_plane *plane,
+ struct drm_framebuffer *fb);
/* shared dpll functions */
struct intel_shared_dpll *intel_crtc_to_shared_dpll(struct intel_crtc *crtc);
@@ -1010,6 +1014,12 @@
void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv);
uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes);
void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes);
+int intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h);
+int intel_disable_plane(struct drm_plane *plane);
/* intel_dp_mst.c */
int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id);
@@ -1053,6 +1063,13 @@
}
#endif
+/* intel_fbc.c */
+bool intel_fbc_enabled(struct drm_device *dev);
+void intel_fbc_update(struct drm_device *dev);
+void intel_fbc_init(struct drm_i915_private *dev_priv);
+void intel_fbc_disable(struct drm_device *dev);
+void bdw_fbc_sw_flush(struct drm_device *dev, u32 value);
+
/* intel_hdmi.c */
void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port);
void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
@@ -1083,6 +1100,7 @@
struct drm_file *file_priv);
int intel_overlay_attrs(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+void intel_overlay_reset(struct drm_i915_private *dev_priv);
/* intel_panel.c */
@@ -1115,7 +1133,6 @@
/* intel_psr.c */
-bool intel_psr_is_enabled(struct drm_device *dev);
void intel_psr_enable(struct intel_dp *intel_dp);
void intel_psr_disable(struct intel_dp *intel_dp);
void intel_psr_invalidate(struct drm_device *dev,
@@ -1159,8 +1176,6 @@
bool enabled, bool scaled);
void intel_init_pm(struct drm_device *dev);
void intel_pm_setup(struct drm_device *dev);
-bool intel_fbc_enabled(struct drm_device *dev);
-void intel_update_fbc(struct drm_device *dev);
void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
void intel_gpu_ips_teardown(void);
void intel_init_gt_powersave(struct drm_device *dev);
@@ -1191,7 +1206,6 @@
struct drm_property *prop,
uint64_t val);
int intel_plane_restore(struct drm_plane *plane);
-void intel_plane_disable(struct drm_plane *plane);
int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index 0b18407..42b6d6f 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -102,11 +102,62 @@
return true;
}
+static void intel_dsi_port_enable(struct intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ enum port port;
+ u32 temp;
+
+ if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) {
+ temp = I915_READ(VLV_CHICKEN_3);
+ temp &= ~PIXEL_OVERLAP_CNT_MASK |
+ intel_dsi->pixel_overlap <<
+ PIXEL_OVERLAP_CNT_SHIFT;
+ I915_WRITE(VLV_CHICKEN_3, temp);
+ }
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+ temp = I915_READ(MIPI_PORT_CTRL(port));
+ temp &= ~LANE_CONFIGURATION_MASK;
+ temp &= ~DUAL_LINK_MODE_MASK;
+
+ if (intel_dsi->ports == ((1 << PORT_A) | (1 << PORT_C))) {
+ temp |= (intel_dsi->dual_link - 1)
+ << DUAL_LINK_MODE_SHIFT;
+ temp |= intel_crtc->pipe ?
+ LANE_CONFIGURATION_DUAL_LINK_B :
+ LANE_CONFIGURATION_DUAL_LINK_A;
+ }
+ /* assert ip_tg_enable signal */
+ I915_WRITE(MIPI_PORT_CTRL(port), temp | DPI_ENABLE);
+ POSTING_READ(MIPI_PORT_CTRL(port));
+ }
+}
+
+static void intel_dsi_port_disable(struct intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ enum port port;
+ u32 temp;
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+ /* de-assert ip_tg_enable signal */
+ temp = I915_READ(MIPI_PORT_CTRL(port));
+ I915_WRITE(MIPI_PORT_CTRL(port), temp & ~DPI_ENABLE);
+ POSTING_READ(MIPI_PORT_CTRL(port));
+ }
+}
+
static void intel_dsi_device_ready(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
- int pipe = intel_crtc->pipe;
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ enum port port;
u32 val;
DRM_DEBUG_KMS("\n");
@@ -120,18 +171,26 @@
/* bandgap reset is needed after everytime we do power gate */
band_gap_reset(dev_priv);
- I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_ENTER);
- usleep_range(2500, 3000);
+ for_each_dsi_port(port, intel_dsi->ports) {
- val = I915_READ(MIPI_PORT_CTRL(pipe));
- I915_WRITE(MIPI_PORT_CTRL(pipe), val | LP_OUTPUT_HOLD);
- usleep_range(1000, 1500);
+ I915_WRITE(MIPI_DEVICE_READY(port), ULPS_STATE_ENTER);
+ usleep_range(2500, 3000);
- I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_EXIT);
- usleep_range(2500, 3000);
+ val = I915_READ(MIPI_PORT_CTRL(port));
- I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY);
- usleep_range(2500, 3000);
+ /* Enable MIPI PHY transparent latch
+ * Common bit for both MIPI Port A & MIPI Port C
+ * No similar bit in MIPI Port C reg
+ */
+ I915_WRITE(MIPI_PORT_CTRL(PORT_A), val | LP_OUTPUT_HOLD);
+ usleep_range(1000, 1500);
+
+ I915_WRITE(MIPI_DEVICE_READY(port), ULPS_STATE_EXIT);
+ usleep_range(2500, 3000);
+
+ I915_WRITE(MIPI_DEVICE_READY(port), DEVICE_READY);
+ usleep_range(2500, 3000);
+ }
}
static void intel_dsi_enable(struct intel_encoder *encoder)
@@ -140,13 +199,12 @@
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
- int pipe = intel_crtc->pipe;
- u32 temp;
+ enum port port = intel_dsi_pipe_to_port(intel_crtc->pipe);
DRM_DEBUG_KMS("\n");
if (is_cmd_mode(intel_dsi))
- I915_WRITE(MIPI_MAX_RETURN_PKT_SIZE(pipe), 8 * 4);
+ I915_WRITE(MIPI_MAX_RETURN_PKT_SIZE(port), 8 * 4);
else {
msleep(20); /* XXX */
dpi_send_cmd(intel_dsi, TURN_ON, DPI_LP_MODE_EN);
@@ -157,11 +215,7 @@
wait_for_dsi_fifo_empty(intel_dsi);
- /* assert ip_tg_enable signal */
- temp = I915_READ(MIPI_PORT_CTRL(pipe)) & ~LANE_CONFIGURATION_MASK;
- temp = temp | intel_dsi->port_bits;
- I915_WRITE(MIPI_PORT_CTRL(pipe), temp | DPI_ENABLE);
- POSTING_READ(MIPI_PORT_CTRL(pipe));
+ intel_dsi_port_enable(encoder);
}
}
@@ -235,9 +289,8 @@
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
- int pipe = intel_crtc->pipe;
+ enum port port;
u32 temp;
DRM_DEBUG_KMS("\n");
@@ -245,31 +298,28 @@
if (is_vid_mode(intel_dsi)) {
wait_for_dsi_fifo_empty(intel_dsi);
- /* de-assert ip_tg_enable signal */
- temp = I915_READ(MIPI_PORT_CTRL(pipe));
- I915_WRITE(MIPI_PORT_CTRL(pipe), temp & ~DPI_ENABLE);
- POSTING_READ(MIPI_PORT_CTRL(pipe));
-
+ intel_dsi_port_disable(encoder);
msleep(2);
}
- /* Panel commands can be sent when clock is in LP11 */
- I915_WRITE(MIPI_DEVICE_READY(pipe), 0x0);
+ for_each_dsi_port(port, intel_dsi->ports) {
+ /* Panel commands can be sent when clock is in LP11 */
+ I915_WRITE(MIPI_DEVICE_READY(port), 0x0);
- temp = I915_READ(MIPI_CTRL(pipe));
- temp &= ~ESCAPE_CLOCK_DIVIDER_MASK;
- I915_WRITE(MIPI_CTRL(pipe), temp |
- intel_dsi->escape_clk_div <<
- ESCAPE_CLOCK_DIVIDER_SHIFT);
+ temp = I915_READ(MIPI_CTRL(port));
+ temp &= ~ESCAPE_CLOCK_DIVIDER_MASK;
+ I915_WRITE(MIPI_CTRL(port), temp |
+ intel_dsi->escape_clk_div <<
+ ESCAPE_CLOCK_DIVIDER_SHIFT);
- I915_WRITE(MIPI_EOT_DISABLE(pipe), CLOCKSTOP);
+ I915_WRITE(MIPI_EOT_DISABLE(port), CLOCKSTOP);
- temp = I915_READ(MIPI_DSI_FUNC_PRG(pipe));
- temp &= ~VID_MODE_FORMAT_MASK;
- I915_WRITE(MIPI_DSI_FUNC_PRG(pipe), temp);
+ temp = I915_READ(MIPI_DSI_FUNC_PRG(port));
+ temp &= ~VID_MODE_FORMAT_MASK;
+ I915_WRITE(MIPI_DSI_FUNC_PRG(port), temp);
- I915_WRITE(MIPI_DEVICE_READY(pipe), 0x1);
-
+ I915_WRITE(MIPI_DEVICE_READY(port), 0x1);
+ }
/* if disable packets are sent before sending shutdown packet then in
* some next enable sequence send turn on packet error is observed */
if (intel_dsi->dev.dev_ops->disable)
@@ -281,31 +331,42 @@
static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
- int pipe = intel_crtc->pipe;
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ enum port port;
u32 val;
DRM_DEBUG_KMS("\n");
+ for_each_dsi_port(port, intel_dsi->ports) {
- I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY | ULPS_STATE_ENTER);
- usleep_range(2000, 2500);
+ I915_WRITE(MIPI_DEVICE_READY(port), DEVICE_READY |
+ ULPS_STATE_ENTER);
+ usleep_range(2000, 2500);
- I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY | ULPS_STATE_EXIT);
- usleep_range(2000, 2500);
+ I915_WRITE(MIPI_DEVICE_READY(port), DEVICE_READY |
+ ULPS_STATE_EXIT);
+ usleep_range(2000, 2500);
- I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY | ULPS_STATE_ENTER);
- usleep_range(2000, 2500);
+ I915_WRITE(MIPI_DEVICE_READY(port), DEVICE_READY |
+ ULPS_STATE_ENTER);
+ usleep_range(2000, 2500);
- if (wait_for(((I915_READ(MIPI_PORT_CTRL(pipe)) & AFE_LATCHOUT)
- == 0x00000), 30))
- DRM_ERROR("DSI LP not going Low\n");
+ /* Wait till Clock lanes are in LP-00 state for MIPI Port A
+ * only. MIPI Port C has no similar bit for checking
+ */
+ if (wait_for(((I915_READ(MIPI_PORT_CTRL(PORT_A)) & AFE_LATCHOUT)
+ == 0x00000), 30))
+ DRM_ERROR("DSI LP not going Low\n");
- val = I915_READ(MIPI_PORT_CTRL(pipe));
- I915_WRITE(MIPI_PORT_CTRL(pipe), val & ~LP_OUTPUT_HOLD);
- usleep_range(1000, 1500);
+ val = I915_READ(MIPI_PORT_CTRL(port));
+ /* Disable MIPI PHY transparent latch
+ * Common bit for both MIPI Port A & MIPI Port C
+ */
+ I915_WRITE(MIPI_PORT_CTRL(PORT_A), val & ~LP_OUTPUT_HOLD);
+ usleep_range(1000, 1500);
- I915_WRITE(MIPI_DEVICE_READY(pipe), 0x00);
- usleep_range(2000, 2500);
+ I915_WRITE(MIPI_DEVICE_READY(port), 0x00);
+ usleep_range(2000, 2500);
+ }
vlv_disable_dsi_pll(encoder);
}
@@ -337,9 +398,11 @@
enum pipe *pipe)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct drm_device *dev = encoder->base.dev;
enum intel_display_power_domain power_domain;
- u32 port, func;
- enum pipe p;
+ u32 dpi_enabled, func;
+ enum port port;
DRM_DEBUG_KMS("\n");
@@ -348,13 +411,23 @@
return false;
/* XXX: this only works for one DSI output */
- for (p = PIPE_A; p <= PIPE_B; p++) {
- port = I915_READ(MIPI_PORT_CTRL(p));
- func = I915_READ(MIPI_DSI_FUNC_PRG(p));
+ for_each_dsi_port(port, intel_dsi->ports) {
+ func = I915_READ(MIPI_DSI_FUNC_PRG(port));
+ dpi_enabled = I915_READ(MIPI_PORT_CTRL(port)) &
+ DPI_ENABLE;
- if ((port & DPI_ENABLE) || (func & CMD_MODE_DATA_WIDTH_MASK)) {
- if (I915_READ(MIPI_DEVICE_READY(p)) & DEVICE_READY) {
- *pipe = p;
+ /* Due to some hardware limitations on BYT, MIPI Port C DPI
+ * Enable bit does not get set. To check whether DSI Port C
+ * was enabled in BIOS, check the Pipe B enable bit
+ */
+ if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) &&
+ (port == PORT_C))
+ dpi_enabled = I915_READ(PIPECONF(PIPE_B)) &
+ PIPECONF_ENABLE;
+
+ if (dpi_enabled || (func & CMD_MODE_DATA_WIDTH_MASK)) {
+ if (I915_READ(MIPI_DEVICE_READY(port)) & DEVICE_READY) {
+ *pipe = port == PORT_A ? PIPE_A : PIPE_B;
return true;
}
}
@@ -437,7 +510,7 @@
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
- int pipe = intel_crtc->pipe;
+ enum port port;
unsigned int bpp = intel_crtc->config.pipe_bpp;
unsigned int lane_count = intel_dsi->lane_count;
@@ -448,6 +521,15 @@
hsync = mode->hsync_end - mode->hsync_start;
hbp = mode->htotal - mode->hsync_end;
+ if (intel_dsi->dual_link) {
+ hactive /= 2;
+ if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK)
+ hactive += intel_dsi->pixel_overlap;
+ hfp /= 2;
+ hsync /= 2;
+ hbp /= 2;
+ }
+
vfp = mode->vsync_start - mode->vdisplay;
vsync = mode->vsync_end - mode->vsync_start;
vbp = mode->vtotal - mode->vsync_end;
@@ -460,18 +542,20 @@
intel_dsi->burst_mode_ratio);
hbp = txbyteclkhs(hbp, bpp, lane_count, intel_dsi->burst_mode_ratio);
- I915_WRITE(MIPI_HACTIVE_AREA_COUNT(pipe), hactive);
- I915_WRITE(MIPI_HFP_COUNT(pipe), hfp);
+ for_each_dsi_port(port, intel_dsi->ports) {
+ I915_WRITE(MIPI_HACTIVE_AREA_COUNT(port), hactive);
+ I915_WRITE(MIPI_HFP_COUNT(port), hfp);
- /* meaningful for video mode non-burst sync pulse mode only, can be zero
- * for non-burst sync events and burst modes */
- I915_WRITE(MIPI_HSYNC_PADDING_COUNT(pipe), hsync);
- I915_WRITE(MIPI_HBP_COUNT(pipe), hbp);
+ /* meaningful for video mode non-burst sync pulse mode only,
+ * can be zero for non-burst sync events and burst modes */
+ I915_WRITE(MIPI_HSYNC_PADDING_COUNT(port), hsync);
+ I915_WRITE(MIPI_HBP_COUNT(port), hbp);
- /* vertical values are in terms of lines */
- I915_WRITE(MIPI_VFP_COUNT(pipe), vfp);
- I915_WRITE(MIPI_VSYNC_PADDING_COUNT(pipe), vsync);
- I915_WRITE(MIPI_VBP_COUNT(pipe), vbp);
+ /* vertical values are in terms of lines */
+ I915_WRITE(MIPI_VFP_COUNT(port), vfp);
+ I915_WRITE(MIPI_VSYNC_PADDING_COUNT(port), vsync);
+ I915_WRITE(MIPI_VBP_COUNT(port), vbp);
+ }
}
static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
@@ -483,32 +567,43 @@
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
struct drm_display_mode *adjusted_mode =
&intel_crtc->config.adjusted_mode;
- int pipe = intel_crtc->pipe;
+ enum port port;
unsigned int bpp = intel_crtc->config.pipe_bpp;
u32 val, tmp;
+ u16 mode_hdisplay;
- DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe));
+ DRM_DEBUG_KMS("pipe %c\n", pipe_name(intel_crtc->pipe));
- /* escape clock divider, 20MHz, shared for A and C. device ready must be
- * off when doing this! txclkesc? */
- tmp = I915_READ(MIPI_CTRL(0));
- tmp &= ~ESCAPE_CLOCK_DIVIDER_MASK;
- I915_WRITE(MIPI_CTRL(0), tmp | ESCAPE_CLOCK_DIVIDER_1);
+ mode_hdisplay = adjusted_mode->hdisplay;
- /* read request priority is per pipe */
- tmp = I915_READ(MIPI_CTRL(pipe));
- tmp &= ~READ_REQUEST_PRIORITY_MASK;
- I915_WRITE(MIPI_CTRL(pipe), tmp | READ_REQUEST_PRIORITY_HIGH);
+ if (intel_dsi->dual_link) {
+ mode_hdisplay /= 2;
+ if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK)
+ mode_hdisplay += intel_dsi->pixel_overlap;
+ }
- /* XXX: why here, why like this? handling in irq handler?! */
- I915_WRITE(MIPI_INTR_STAT(pipe), 0xffffffff);
- I915_WRITE(MIPI_INTR_EN(pipe), 0xffffffff);
+ for_each_dsi_port(port, intel_dsi->ports) {
+ /* escape clock divider, 20MHz, shared for A and C.
+ * device ready must be off when doing this! txclkesc? */
+ tmp = I915_READ(MIPI_CTRL(PORT_A));
+ tmp &= ~ESCAPE_CLOCK_DIVIDER_MASK;
+ I915_WRITE(MIPI_CTRL(PORT_A), tmp | ESCAPE_CLOCK_DIVIDER_1);
- I915_WRITE(MIPI_DPHY_PARAM(pipe), intel_dsi->dphy_reg);
+ /* read request priority is per pipe */
+ tmp = I915_READ(MIPI_CTRL(port));
+ tmp &= ~READ_REQUEST_PRIORITY_MASK;
+ I915_WRITE(MIPI_CTRL(port), tmp | READ_REQUEST_PRIORITY_HIGH);
- I915_WRITE(MIPI_DPI_RESOLUTION(pipe),
- adjusted_mode->vdisplay << VERTICAL_ADDRESS_SHIFT |
- adjusted_mode->hdisplay << HORIZONTAL_ADDRESS_SHIFT);
+ /* XXX: why here, why like this? handling in irq handler?! */
+ I915_WRITE(MIPI_INTR_STAT(port), 0xffffffff);
+ I915_WRITE(MIPI_INTR_EN(port), 0xffffffff);
+
+ I915_WRITE(MIPI_DPHY_PARAM(port), intel_dsi->dphy_reg);
+
+ I915_WRITE(MIPI_DPI_RESOLUTION(port),
+ adjusted_mode->vdisplay << VERTICAL_ADDRESS_SHIFT |
+ mode_hdisplay << HORIZONTAL_ADDRESS_SHIFT);
+ }
set_dsi_timings(encoder, adjusted_mode);
@@ -522,95 +617,102 @@
/* XXX: cross-check bpp vs. pixel format? */
val |= intel_dsi->pixel_format;
}
- I915_WRITE(MIPI_DSI_FUNC_PRG(pipe), val);
- /* timeouts for recovery. one frame IIUC. if counter expires, EOT and
- * stop state. */
-
- /*
- * In burst mode, value greater than one DPI line Time in byte clock
- * (txbyteclkhs) To timeout this timer 1+ of the above said value is
- * recommended.
- *
- * In non-burst mode, Value greater than one DPI frame time in byte
- * clock(txbyteclkhs) To timeout this timer 1+ of the above said value
- * is recommended.
- *
- * In DBI only mode, value greater than one DBI frame time in byte
- * clock(txbyteclkhs) To timeout this timer 1+ of the above said value
- * is recommended.
- */
-
- if (is_vid_mode(intel_dsi) &&
- intel_dsi->video_mode_format == VIDEO_MODE_BURST) {
- I915_WRITE(MIPI_HS_TX_TIMEOUT(pipe),
- txbyteclkhs(adjusted_mode->htotal, bpp,
- intel_dsi->lane_count,
- intel_dsi->burst_mode_ratio) + 1);
- } else {
- I915_WRITE(MIPI_HS_TX_TIMEOUT(pipe),
- txbyteclkhs(adjusted_mode->vtotal *
- adjusted_mode->htotal,
- bpp, intel_dsi->lane_count,
- intel_dsi->burst_mode_ratio) + 1);
- }
- I915_WRITE(MIPI_LP_RX_TIMEOUT(pipe), intel_dsi->lp_rx_timeout);
- I915_WRITE(MIPI_TURN_AROUND_TIMEOUT(pipe), intel_dsi->turn_arnd_val);
- I915_WRITE(MIPI_DEVICE_RESET_TIMER(pipe), intel_dsi->rst_timer_val);
-
- /* dphy stuff */
-
- /* in terms of low power clock */
- I915_WRITE(MIPI_INIT_COUNT(pipe), txclkesc(intel_dsi->escape_clk_div, 100));
-
- val = 0;
+ tmp = 0;
if (intel_dsi->eotp_pkt == 0)
- val |= EOT_DISABLE;
-
+ tmp |= EOT_DISABLE;
if (intel_dsi->clock_stop)
- val |= CLOCKSTOP;
+ tmp |= CLOCKSTOP;
- /* recovery disables */
- I915_WRITE(MIPI_EOT_DISABLE(pipe), val);
+ for_each_dsi_port(port, intel_dsi->ports) {
+ I915_WRITE(MIPI_DSI_FUNC_PRG(port), val);
- /* in terms of low power clock */
- I915_WRITE(MIPI_INIT_COUNT(pipe), intel_dsi->init_count);
+ /* timeouts for recovery. one frame IIUC. if counter expires,
+ * EOT and stop state. */
- /* in terms of txbyteclkhs. actual high to low switch +
- * MIPI_STOP_STATE_STALL * MIPI_LP_BYTECLK.
- *
- * XXX: write MIPI_STOP_STATE_STALL?
- */
- I915_WRITE(MIPI_HIGH_LOW_SWITCH_COUNT(pipe),
- intel_dsi->hs_to_lp_count);
+ /*
+ * In burst mode, value greater than one DPI line Time in byte
+ * clock (txbyteclkhs) To timeout this timer 1+ of the above
+ * said value is recommended.
+ *
+ * In non-burst mode, Value greater than one DPI frame time in
+ * byte clock(txbyteclkhs) To timeout this timer 1+ of the above
+ * said value is recommended.
+ *
+ * In DBI only mode, value greater than one DBI frame time in
+ * byte clock(txbyteclkhs) To timeout this timer 1+ of the above
+ * said value is recommended.
+ */
- /* XXX: low power clock equivalence in terms of byte clock. the number
- * of byte clocks occupied in one low power clock. based on txbyteclkhs
- * and txclkesc. txclkesc time / txbyteclk time * (105 +
- * MIPI_STOP_STATE_STALL) / 105.???
- */
- I915_WRITE(MIPI_LP_BYTECLK(pipe), intel_dsi->lp_byte_clk);
+ if (is_vid_mode(intel_dsi) &&
+ intel_dsi->video_mode_format == VIDEO_MODE_BURST) {
+ I915_WRITE(MIPI_HS_TX_TIMEOUT(port),
+ txbyteclkhs(adjusted_mode->htotal, bpp,
+ intel_dsi->lane_count,
+ intel_dsi->burst_mode_ratio) + 1);
+ } else {
+ I915_WRITE(MIPI_HS_TX_TIMEOUT(port),
+ txbyteclkhs(adjusted_mode->vtotal *
+ adjusted_mode->htotal,
+ bpp, intel_dsi->lane_count,
+ intel_dsi->burst_mode_ratio) + 1);
+ }
+ I915_WRITE(MIPI_LP_RX_TIMEOUT(port), intel_dsi->lp_rx_timeout);
+ I915_WRITE(MIPI_TURN_AROUND_TIMEOUT(port),
+ intel_dsi->turn_arnd_val);
+ I915_WRITE(MIPI_DEVICE_RESET_TIMER(port),
+ intel_dsi->rst_timer_val);
- /* the bw essential for transmitting 16 long packets containing 252
- * bytes meant for dcs write memory command is programmed in this
- * register in terms of byte clocks. based on dsi transfer rate and the
- * number of lanes configured the time taken to transmit 16 long packets
- * in a dsi stream varies. */
- I915_WRITE(MIPI_DBI_BW_CTRL(pipe), intel_dsi->bw_timer);
+ /* dphy stuff */
- I915_WRITE(MIPI_CLK_LANE_SWITCH_TIME_CNT(pipe),
- intel_dsi->clk_lp_to_hs_count << LP_HS_SSW_CNT_SHIFT |
- intel_dsi->clk_hs_to_lp_count << HS_LP_PWR_SW_CNT_SHIFT);
+ /* in terms of low power clock */
+ I915_WRITE(MIPI_INIT_COUNT(port),
+ txclkesc(intel_dsi->escape_clk_div, 100));
- if (is_vid_mode(intel_dsi))
- /* Some panels might have resolution which is not a multiple of
- * 64 like 1366 x 768. Enable RANDOM resolution support for such
- * panels by default */
- I915_WRITE(MIPI_VIDEO_MODE_FORMAT(pipe),
- intel_dsi->video_frmt_cfg_bits |
- intel_dsi->video_mode_format |
- IP_TG_CONFIG |
- RANDOM_DPI_DISPLAY_RESOLUTION);
+
+ /* recovery disables */
+ I915_WRITE(MIPI_EOT_DISABLE(port), val);
+
+ /* in terms of low power clock */
+ I915_WRITE(MIPI_INIT_COUNT(port), intel_dsi->init_count);
+
+ /* in terms of txbyteclkhs. actual high to low switch +
+ * MIPI_STOP_STATE_STALL * MIPI_LP_BYTECLK.
+ *
+ * XXX: write MIPI_STOP_STATE_STALL?
+ */
+ I915_WRITE(MIPI_HIGH_LOW_SWITCH_COUNT(port),
+ intel_dsi->hs_to_lp_count);
+
+ /* XXX: low power clock equivalence in terms of byte clock.
+ * the number of byte clocks occupied in one low power clock.
+ * based on txbyteclkhs and txclkesc.
+ * txclkesc time / txbyteclk time * (105 + MIPI_STOP_STATE_STALL
+ * ) / 105.???
+ */
+ I915_WRITE(MIPI_LP_BYTECLK(port), intel_dsi->lp_byte_clk);
+
+ /* the bw essential for transmitting 16 long packets containing
+ * 252 bytes meant for dcs write memory command is programmed in
+ * this register in terms of byte clocks. based on dsi transfer
+ * rate and the number of lanes configured the time taken to
+ * transmit 16 long packets in a dsi stream varies. */
+ I915_WRITE(MIPI_DBI_BW_CTRL(port), intel_dsi->bw_timer);
+
+ I915_WRITE(MIPI_CLK_LANE_SWITCH_TIME_CNT(port),
+ intel_dsi->clk_lp_to_hs_count << LP_HS_SSW_CNT_SHIFT |
+ intel_dsi->clk_hs_to_lp_count << HS_LP_PWR_SW_CNT_SHIFT);
+
+ if (is_vid_mode(intel_dsi))
+ /* Some panels might have resolution which is not a
+ * multiple of 64 like 1366 x 768. Enable RANDOM
+ * resolution support for such panels by default */
+ I915_WRITE(MIPI_VIDEO_MODE_FORMAT(port),
+ intel_dsi->video_frmt_cfg_bits |
+ intel_dsi->video_mode_format |
+ IP_TG_CONFIG |
+ RANDOM_DPI_DISPLAY_RESOLUTION);
+ }
}
static void intel_dsi_pre_pll_enable(struct intel_encoder *encoder)
@@ -748,6 +850,15 @@
intel_connector->get_hw_state = intel_connector_get_hw_state;
intel_connector->unregister = intel_connector_unregister;
+ /* Pipe A maps to MIPI DSI port A, pipe B maps to MIPI DSI port C */
+ if (dev_priv->vbt.dsi.port == DVO_PORT_MIPIA) {
+ intel_encoder->crtc_mask = (1 << PIPE_A);
+ intel_dsi->ports = (1 << PORT_A);
+ } else if (dev_priv->vbt.dsi.port == DVO_PORT_MIPIC) {
+ intel_encoder->crtc_mask = (1 << PIPE_B);
+ intel_dsi->ports = (1 << PORT_C);
+ }
+
for (i = 0; i < ARRAY_SIZE(intel_dsi_devices); i++) {
dsi = &intel_dsi_devices[i];
intel_dsi->dev = *dsi;
@@ -762,8 +873,6 @@
}
intel_encoder->type = INTEL_OUTPUT_DSI;
- intel_encoder->crtc_mask = (1 << 0); /* XXX */
-
intel_encoder->cloneable = 0;
drm_connector_init(dev, connector, &intel_dsi_connector_funcs,
DRM_MODE_CONNECTOR_DSI);
diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h
index 657eb5c..8fe2064 100644
--- a/drivers/gpu/drm/i915/intel_dsi.h
+++ b/drivers/gpu/drm/i915/intel_dsi.h
@@ -28,6 +28,11 @@
#include <drm/drm_crtc.h>
#include "intel_drv.h"
+/* Dual Link support */
+#define DSI_DUAL_LINK_NONE 0
+#define DSI_DUAL_LINK_FRONT_BACK 1
+#define DSI_DUAL_LINK_PIXEL_ALT 2
+
struct intel_dsi_device {
unsigned int panel_id;
const char *name;
@@ -78,6 +83,9 @@
struct intel_connector *attached_connector;
+ /* bit mask of ports being driven */
+ u16 ports;
+
/* if true, use HS mode, otherwise LP */
bool hs;
@@ -101,6 +109,8 @@
u8 clock_stop;
u8 escape_clk_div;
+ u8 dual_link;
+ u8 pixel_overlap;
u32 port_bits;
u32 bw_timer;
u32 dphy_reg;
@@ -127,6 +137,22 @@
u16 panel_pwr_cycle_delay;
};
+/* XXX: Transitional before dual port configuration */
+static inline enum port intel_dsi_pipe_to_port(enum pipe pipe)
+{
+ if (pipe == PIPE_A)
+ return PORT_A;
+ else if (pipe == PIPE_B)
+ return PORT_C;
+
+ WARN(1, "DSI on pipe %c, assuming port C\n", pipe_name(pipe));
+ return PORT_C;
+}
+
+#define for_each_dsi_port(__port, __ports_mask) \
+ for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++) \
+ if ((__ports_mask) & (1 << (__port)))
+
static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder)
{
return container_of(encoder, struct intel_dsi, base.base);
diff --git a/drivers/gpu/drm/i915/intel_dsi_cmd.c b/drivers/gpu/drm/i915/intel_dsi_cmd.c
index f4767fd..562811c 100644
--- a/drivers/gpu/drm/i915/intel_dsi_cmd.c
+++ b/drivers/gpu/drm/i915/intel_dsi_cmd.c
@@ -48,21 +48,19 @@
* For memory writes, these should probably be used for performance.
*/
-static void print_stat(struct intel_dsi *intel_dsi)
+static void print_stat(struct intel_dsi *intel_dsi, enum port port)
{
struct drm_encoder *encoder = &intel_dsi->base.base;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
- enum pipe pipe = intel_crtc->pipe;
u32 val;
- val = I915_READ(MIPI_INTR_STAT(pipe));
+ val = I915_READ(MIPI_INTR_STAT(port));
#define STAT_BIT(val, bit) (val) & (bit) ? " " #bit : ""
- DRM_DEBUG_KMS("MIPI_INTR_STAT(%d) = %08x"
+ DRM_DEBUG_KMS("MIPI_INTR_STAT(%c) = %08x"
"%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s"
- "\n", pipe, val,
+ "\n", port_name(port), val,
STAT_BIT(val, TEARING_EFFECT),
STAT_BIT(val, SPL_PKT_SENT_INTERRUPT),
STAT_BIT(val, GEN_READ_DATA_AVAIL),
@@ -104,34 +102,31 @@
};
/* enable or disable command mode hs transmissions */
-void dsi_hs_mode_enable(struct intel_dsi *intel_dsi, bool enable)
+void dsi_hs_mode_enable(struct intel_dsi *intel_dsi, bool enable,
+ enum port port)
{
struct drm_encoder *encoder = &intel_dsi->base.base;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
- enum pipe pipe = intel_crtc->pipe;
u32 temp;
u32 mask = DBI_FIFO_EMPTY;
- if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == mask, 50))
+ if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(port)) & mask) == mask, 50))
DRM_ERROR("Timeout waiting for DBI FIFO empty\n");
- temp = I915_READ(MIPI_HS_LP_DBI_ENABLE(pipe));
+ temp = I915_READ(MIPI_HS_LP_DBI_ENABLE(port));
temp &= DBI_HS_LP_MODE_MASK;
- I915_WRITE(MIPI_HS_LP_DBI_ENABLE(pipe), enable ? DBI_HS_MODE : DBI_LP_MODE);
+ I915_WRITE(MIPI_HS_LP_DBI_ENABLE(port), enable ? DBI_HS_MODE : DBI_LP_MODE);
intel_dsi->hs = enable;
}
static int dsi_vc_send_short(struct intel_dsi *intel_dsi, int channel,
- u8 data_type, u16 data)
+ u8 data_type, u16 data, enum port port)
{
struct drm_encoder *encoder = &intel_dsi->base.base;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
- enum pipe pipe = intel_crtc->pipe;
u32 ctrl_reg;
u32 ctrl;
u32 mask;
@@ -140,16 +135,16 @@
channel, data_type, data);
if (intel_dsi->hs) {
- ctrl_reg = MIPI_HS_GEN_CTRL(pipe);
+ ctrl_reg = MIPI_HS_GEN_CTRL(port);
mask = HS_CTRL_FIFO_FULL;
} else {
- ctrl_reg = MIPI_LP_GEN_CTRL(pipe);
+ ctrl_reg = MIPI_LP_GEN_CTRL(port);
mask = LP_CTRL_FIFO_FULL;
}
- if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == 0, 50)) {
+ if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(port)) & mask) == 0, 50)) {
DRM_ERROR("Timeout waiting for HS/LP CTRL FIFO !full\n");
- print_stat(intel_dsi);
+ print_stat(intel_dsi, port);
}
/*
@@ -167,13 +162,11 @@
}
static int dsi_vc_send_long(struct intel_dsi *intel_dsi, int channel,
- u8 data_type, const u8 *data, int len)
+ u8 data_type, const u8 *data, int len, enum port port)
{
struct drm_encoder *encoder = &intel_dsi->base.base;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
- enum pipe pipe = intel_crtc->pipe;
u32 data_reg;
int i, j, n;
u32 mask;
@@ -182,14 +175,14 @@
channel, data_type, len);
if (intel_dsi->hs) {
- data_reg = MIPI_HS_GEN_DATA(pipe);
+ data_reg = MIPI_HS_GEN_DATA(port);
mask = HS_DATA_FIFO_FULL;
} else {
- data_reg = MIPI_LP_GEN_DATA(pipe);
+ data_reg = MIPI_LP_GEN_DATA(port);
mask = LP_DATA_FIFO_FULL;
}
- if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == 0, 50))
+ if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(port)) & mask) == 0, 50))
DRM_ERROR("Timeout waiting for HS/LP DATA FIFO !full\n");
for (i = 0; i < len; i += n) {
@@ -204,12 +197,12 @@
* dwords, then wait for not set, then continue. */
}
- return dsi_vc_send_short(intel_dsi, channel, data_type, len);
+ return dsi_vc_send_short(intel_dsi, channel, data_type, len, port);
}
static int dsi_vc_write_common(struct intel_dsi *intel_dsi,
int channel, const u8 *data, int len,
- enum dsi_type type)
+ enum dsi_type type, enum port port)
{
int ret;
@@ -217,50 +210,54 @@
BUG_ON(type == DSI_GENERIC);
ret = dsi_vc_send_short(intel_dsi, channel,
MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM,
- 0);
+ 0, port);
} else if (len == 1) {
ret = dsi_vc_send_short(intel_dsi, channel,
type == DSI_GENERIC ?
MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM :
- MIPI_DSI_DCS_SHORT_WRITE, data[0]);
+ MIPI_DSI_DCS_SHORT_WRITE, data[0],
+ port);
} else if (len == 2) {
ret = dsi_vc_send_short(intel_dsi, channel,
type == DSI_GENERIC ?
MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM :
MIPI_DSI_DCS_SHORT_WRITE_PARAM,
- (data[1] << 8) | data[0]);
+ (data[1] << 8) | data[0], port);
} else {
ret = dsi_vc_send_long(intel_dsi, channel,
- type == DSI_GENERIC ?
- MIPI_DSI_GENERIC_LONG_WRITE :
- MIPI_DSI_DCS_LONG_WRITE, data, len);
+ type == DSI_GENERIC ?
+ MIPI_DSI_GENERIC_LONG_WRITE :
+ MIPI_DSI_DCS_LONG_WRITE, data, len,
+ port);
}
return ret;
}
int dsi_vc_dcs_write(struct intel_dsi *intel_dsi, int channel,
- const u8 *data, int len)
+ const u8 *data, int len, enum port port)
{
- return dsi_vc_write_common(intel_dsi, channel, data, len, DSI_DCS);
+ return dsi_vc_write_common(intel_dsi, channel, data, len, DSI_DCS,
+ port);
}
int dsi_vc_generic_write(struct intel_dsi *intel_dsi, int channel,
- const u8 *data, int len)
+ const u8 *data, int len, enum port port)
{
- return dsi_vc_write_common(intel_dsi, channel, data, len, DSI_GENERIC);
+ return dsi_vc_write_common(intel_dsi, channel, data, len, DSI_GENERIC,
+ port);
}
static int dsi_vc_dcs_send_read_request(struct intel_dsi *intel_dsi,
- int channel, u8 dcs_cmd)
+ int channel, u8 dcs_cmd, enum port port)
{
return dsi_vc_send_short(intel_dsi, channel, MIPI_DSI_DCS_READ,
- dcs_cmd);
+ dcs_cmd, port);
}
static int dsi_vc_generic_send_read_request(struct intel_dsi *intel_dsi,
int channel, u8 *reqdata,
- int reqlen)
+ int reqlen, enum port port)
{
u16 data;
u8 data_type;
@@ -282,24 +279,22 @@
BUG();
}
- return dsi_vc_send_short(intel_dsi, channel, data_type, data);
+ return dsi_vc_send_short(intel_dsi, channel, data_type, data, port);
}
static int dsi_read_data_return(struct intel_dsi *intel_dsi,
- u8 *buf, int buflen)
+ u8 *buf, int buflen, enum port port)
{
struct drm_encoder *encoder = &intel_dsi->base.base;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
- enum pipe pipe = intel_crtc->pipe;
int i, len = 0;
u32 data_reg, val;
if (intel_dsi->hs) {
- data_reg = MIPI_HS_GEN_DATA(pipe);
+ data_reg = MIPI_HS_GEN_DATA(port);
} else {
- data_reg = MIPI_LP_GEN_DATA(pipe);
+ data_reg = MIPI_LP_GEN_DATA(port);
}
while (len < buflen) {
@@ -312,13 +307,11 @@
}
int dsi_vc_dcs_read(struct intel_dsi *intel_dsi, int channel, u8 dcs_cmd,
- u8 *buf, int buflen)
+ u8 *buf, int buflen, enum port port)
{
struct drm_encoder *encoder = &intel_dsi->base.base;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
- enum pipe pipe = intel_crtc->pipe;
u32 mask;
int ret;
@@ -327,17 +320,17 @@
* longer than MIPI_MAX_RETURN_PKT_SIZE
*/
- I915_WRITE(MIPI_INTR_STAT(pipe), GEN_READ_DATA_AVAIL);
+ I915_WRITE(MIPI_INTR_STAT(port), GEN_READ_DATA_AVAIL);
- ret = dsi_vc_dcs_send_read_request(intel_dsi, channel, dcs_cmd);
+ ret = dsi_vc_dcs_send_read_request(intel_dsi, channel, dcs_cmd, port);
if (ret)
return ret;
mask = GEN_READ_DATA_AVAIL;
- if (wait_for((I915_READ(MIPI_INTR_STAT(pipe)) & mask) == mask, 50))
+ if (wait_for((I915_READ(MIPI_INTR_STAT(port)) & mask) == mask, 50))
DRM_ERROR("Timeout waiting for read data.\n");
- ret = dsi_read_data_return(intel_dsi, buf, buflen);
+ ret = dsi_read_data_return(intel_dsi, buf, buflen, port);
if (ret < 0)
return ret;
@@ -348,13 +341,11 @@
}
int dsi_vc_generic_read(struct intel_dsi *intel_dsi, int channel,
- u8 *reqdata, int reqlen, u8 *buf, int buflen)
+ u8 *reqdata, int reqlen, u8 *buf, int buflen, enum port port)
{
struct drm_encoder *encoder = &intel_dsi->base.base;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
- enum pipe pipe = intel_crtc->pipe;
u32 mask;
int ret;
@@ -363,18 +354,18 @@
* longer than MIPI_MAX_RETURN_PKT_SIZE
*/
- I915_WRITE(MIPI_INTR_STAT(pipe), GEN_READ_DATA_AVAIL);
+ I915_WRITE(MIPI_INTR_STAT(port), GEN_READ_DATA_AVAIL);
ret = dsi_vc_generic_send_read_request(intel_dsi, channel, reqdata,
- reqlen);
+ reqlen, port);
if (ret)
return ret;
mask = GEN_READ_DATA_AVAIL;
- if (wait_for((I915_READ(MIPI_INTR_STAT(pipe)) & mask) == mask, 50))
+ if (wait_for((I915_READ(MIPI_INTR_STAT(port)) & mask) == mask, 50))
DRM_ERROR("Timeout waiting for read data.\n");
- ret = dsi_read_data_return(intel_dsi, buf, buflen);
+ ret = dsi_read_data_return(intel_dsi, buf, buflen, port);
if (ret < 0)
return ret;
@@ -394,8 +385,7 @@
struct drm_encoder *encoder = &intel_dsi->base.base;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
- enum pipe pipe = intel_crtc->pipe;
+ enum port port;
u32 mask;
/* XXX: pipe, hs */
@@ -404,18 +394,23 @@
else
cmd |= DPI_LP_MODE;
- /* clear bit */
- I915_WRITE(MIPI_INTR_STAT(pipe), SPL_PKT_SENT_INTERRUPT);
+ for_each_dsi_port(port, intel_dsi->ports) {
+ /* clear bit */
+ I915_WRITE(MIPI_INTR_STAT(port), SPL_PKT_SENT_INTERRUPT);
- /* XXX: old code skips write if control unchanged */
- if (cmd == I915_READ(MIPI_DPI_CONTROL(pipe)))
- DRM_ERROR("Same special packet %02x twice in a row.\n", cmd);
+ /* XXX: old code skips write if control unchanged */
+ if (cmd == I915_READ(MIPI_DPI_CONTROL(port)))
+ DRM_ERROR("Same special packet %02x twice in a row.\n",
+ cmd);
- I915_WRITE(MIPI_DPI_CONTROL(pipe), cmd);
+ I915_WRITE(MIPI_DPI_CONTROL(port), cmd);
- mask = SPL_PKT_SENT_INTERRUPT;
- if (wait_for((I915_READ(MIPI_INTR_STAT(pipe)) & mask) == mask, 100))
- DRM_ERROR("Video mode command 0x%08x send failed.\n", cmd);
+ mask = SPL_PKT_SENT_INTERRUPT;
+ if (wait_for((I915_READ(MIPI_INTR_STAT(port)) & mask) == mask,
+ 100))
+ DRM_ERROR("Video mode command 0x%08x send failed.\n",
+ cmd);
+ }
return 0;
}
@@ -426,12 +421,12 @@
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
- enum pipe pipe = intel_crtc->pipe;
+ enum port port = intel_dsi_pipe_to_port(intel_crtc->pipe);
u32 mask;
mask = LP_CTRL_FIFO_EMPTY | HS_CTRL_FIFO_EMPTY |
LP_DATA_FIFO_EMPTY | HS_DATA_FIFO_EMPTY;
- if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == mask, 100))
+ if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(port)) & mask) == mask, 100))
DRM_ERROR("DPI FIFOs are not empty\n");
}
diff --git a/drivers/gpu/drm/i915/intel_dsi_cmd.h b/drivers/gpu/drm/i915/intel_dsi_cmd.h
index 46aa1ac..326a5ac 100644
--- a/drivers/gpu/drm/i915/intel_dsi_cmd.h
+++ b/drivers/gpu/drm/i915/intel_dsi_cmd.h
@@ -36,77 +36,81 @@
#define DPI_LP_MODE_EN false
#define DPI_HS_MODE_EN true
-void dsi_hs_mode_enable(struct intel_dsi *intel_dsi, bool enable);
+void dsi_hs_mode_enable(struct intel_dsi *intel_dsi, bool enable,
+ enum port port);
int dsi_vc_dcs_write(struct intel_dsi *intel_dsi, int channel,
- const u8 *data, int len);
+ const u8 *data, int len, enum port port);
int dsi_vc_generic_write(struct intel_dsi *intel_dsi, int channel,
- const u8 *data, int len);
+ const u8 *data, int len, enum port port);
int dsi_vc_dcs_read(struct intel_dsi *intel_dsi, int channel, u8 dcs_cmd,
- u8 *buf, int buflen);
+ u8 *buf, int buflen, enum port port);
int dsi_vc_generic_read(struct intel_dsi *intel_dsi, int channel,
- u8 *reqdata, int reqlen, u8 *buf, int buflen);
+ u8 *reqdata, int reqlen, u8 *buf, int buflen, enum port port);
int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs);
void wait_for_dsi_fifo_empty(struct intel_dsi *intel_dsi);
/* XXX: questionable write helpers */
static inline int dsi_vc_dcs_write_0(struct intel_dsi *intel_dsi,
- int channel, u8 dcs_cmd)
+ int channel, u8 dcs_cmd, enum port port)
{
- return dsi_vc_dcs_write(intel_dsi, channel, &dcs_cmd, 1);
+ return dsi_vc_dcs_write(intel_dsi, channel, &dcs_cmd, 1, port);
}
static inline int dsi_vc_dcs_write_1(struct intel_dsi *intel_dsi,
- int channel, u8 dcs_cmd, u8 param)
+ int channel, u8 dcs_cmd, u8 param, enum port port)
{
u8 buf[2] = { dcs_cmd, param };
- return dsi_vc_dcs_write(intel_dsi, channel, buf, 2);
+ return dsi_vc_dcs_write(intel_dsi, channel, buf, 2, port);
}
static inline int dsi_vc_generic_write_0(struct intel_dsi *intel_dsi,
- int channel)
+ int channel, enum port port)
{
- return dsi_vc_generic_write(intel_dsi, channel, NULL, 0);
+ return dsi_vc_generic_write(intel_dsi, channel, NULL, 0, port);
}
static inline int dsi_vc_generic_write_1(struct intel_dsi *intel_dsi,
- int channel, u8 param)
+ int channel, u8 param, enum port port)
{
- return dsi_vc_generic_write(intel_dsi, channel, ¶m, 1);
+ return dsi_vc_generic_write(intel_dsi, channel, ¶m, 1, port);
}
static inline int dsi_vc_generic_write_2(struct intel_dsi *intel_dsi,
- int channel, u8 param1, u8 param2)
+ int channel, u8 param1, u8 param2, enum port port)
{
u8 buf[2] = { param1, param2 };
- return dsi_vc_generic_write(intel_dsi, channel, buf, 2);
+ return dsi_vc_generic_write(intel_dsi, channel, buf, 2, port);
}
/* XXX: questionable read helpers */
static inline int dsi_vc_generic_read_0(struct intel_dsi *intel_dsi,
- int channel, u8 *buf, int buflen)
+ int channel, u8 *buf, int buflen, enum port port)
{
- return dsi_vc_generic_read(intel_dsi, channel, NULL, 0, buf, buflen);
+ return dsi_vc_generic_read(intel_dsi, channel, NULL, 0, buf, buflen,
+ port);
}
static inline int dsi_vc_generic_read_1(struct intel_dsi *intel_dsi,
int channel, u8 param, u8 *buf,
- int buflen)
+ int buflen, enum port port)
{
- return dsi_vc_generic_read(intel_dsi, channel, ¶m, 1, buf, buflen);
+ return dsi_vc_generic_read(intel_dsi, channel, ¶m, 1, buf, buflen,
+ port);
}
static inline int dsi_vc_generic_read_2(struct intel_dsi *intel_dsi,
int channel, u8 param1, u8 param2,
- u8 *buf, int buflen)
+ u8 *buf, int buflen, enum port port)
{
u8 req[2] = { param1, param2 };
- return dsi_vc_generic_read(intel_dsi, channel, req, 2, buf, buflen);
+ return dsi_vc_generic_read(intel_dsi, channel, req, 2, buf, buflen,
+ port);
}
diff --git a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
index f6bdd44..5493aef 100644
--- a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
+++ b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
@@ -94,16 +94,31 @@
{ GPIO_NC_11_PCONF0, GPIO_NC_11_PAD, 0}
};
+static inline enum port intel_dsi_seq_port_to_port(u8 port)
+{
+ return port ? PORT_C : PORT_A;
+}
+
static u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi, u8 *data)
{
- u8 type, byte, mode, vc, port;
+ u8 type, byte, mode, vc, seq_port;
u16 len;
+ enum port port;
byte = *data++;
mode = (byte >> MIPI_TRANSFER_MODE_SHIFT) & 0x1;
vc = (byte >> MIPI_VIRTUAL_CHANNEL_SHIFT) & 0x3;
- port = (byte >> MIPI_PORT_SHIFT) & 0x3;
+ seq_port = (byte >> MIPI_PORT_SHIFT) & 0x3;
+ /* For DSI single link on Port A & C, the seq_port value which is
+ * parsed from Sequence Block#53 of VBT has been set to 0
+ * Now, read/write of packets for the DSI single link on Port A and
+ * Port C will based on the DVO port from VBT block 2.
+ */
+ if (intel_dsi->ports == (1 << PORT_C))
+ port = PORT_C;
+ else
+ port = intel_dsi_seq_port_to_port(seq_port);
/* LP or HS mode */
intel_dsi->hs = mode;
@@ -115,13 +130,13 @@
switch (type) {
case MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM:
- dsi_vc_generic_write_0(intel_dsi, vc);
+ dsi_vc_generic_write_0(intel_dsi, vc, port);
break;
case MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM:
- dsi_vc_generic_write_1(intel_dsi, vc, *data);
+ dsi_vc_generic_write_1(intel_dsi, vc, *data, port);
break;
case MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM:
- dsi_vc_generic_write_2(intel_dsi, vc, *data, *(data + 1));
+ dsi_vc_generic_write_2(intel_dsi, vc, *data, *(data + 1), port);
break;
case MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM:
case MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM:
@@ -129,19 +144,19 @@
DRM_DEBUG_DRIVER("Generic Read not yet implemented or used\n");
break;
case MIPI_DSI_GENERIC_LONG_WRITE:
- dsi_vc_generic_write(intel_dsi, vc, data, len);
+ dsi_vc_generic_write(intel_dsi, vc, data, len, port);
break;
case MIPI_DSI_DCS_SHORT_WRITE:
- dsi_vc_dcs_write_0(intel_dsi, vc, *data);
+ dsi_vc_dcs_write_0(intel_dsi, vc, *data, port);
break;
case MIPI_DSI_DCS_SHORT_WRITE_PARAM:
- dsi_vc_dcs_write_1(intel_dsi, vc, *data, *(data + 1));
+ dsi_vc_dcs_write_1(intel_dsi, vc, *data, *(data + 1), port);
break;
case MIPI_DSI_DCS_READ:
DRM_DEBUG_DRIVER("DCS Read not yet implemented or used\n");
break;
case MIPI_DSI_DCS_LONG_WRITE:
- dsi_vc_dcs_write(intel_dsi, vc, data, len);
+ dsi_vc_dcs_write(intel_dsi, vc, data, len, port);
break;
}
@@ -280,6 +295,11 @@
intel_dsi->clock_stop = mipi_config->enable_clk_stop ? 1 : 0;
intel_dsi->lane_count = mipi_config->lane_cnt + 1;
intel_dsi->pixel_format = mipi_config->videomode_color_format << 7;
+ intel_dsi->dual_link = mipi_config->dual_link;
+ intel_dsi->pixel_overlap = mipi_config->pixel_overlap;
+
+ if (intel_dsi->dual_link)
+ intel_dsi->ports = ((1 << PORT_A) | (1 << PORT_C));
if (intel_dsi->pixel_format == VID_MODE_FORMAT_RGB666)
bits_per_pixel = 18;
@@ -299,6 +319,20 @@
pclk = mode->clock;
+ /* In dual link mode each port needs half of pixel clock */
+ if (intel_dsi->dual_link) {
+ pclk = pclk / 2;
+
+ /* we can enable pixel_overlap if needed by panel. In this
+ * case we need to increase the pixelclock for extra pixels
+ */
+ if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) {
+ pclk += DIV_ROUND_UP(mode->vtotal *
+ intel_dsi->pixel_overlap *
+ 60, 1000);
+ }
+ }
+
/* Burst Mode Ratio
* Target ddr frequency from VBT / non burst ddr freq
* multiply by 100 to preserve remainder
@@ -493,6 +527,12 @@
DRM_DEBUG_KMS("Clockstop %s\n", intel_dsi->clock_stop ?
"disabled" : "enabled");
DRM_DEBUG_KMS("Mode %s\n", intel_dsi->operation_mode ? "command" : "video");
+ if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK)
+ DRM_DEBUG_KMS("Dual link: DSI_DUAL_LINK_FRONT_BACK\n");
+ else if (intel_dsi->dual_link == DSI_DUAL_LINK_PIXEL_ALT)
+ DRM_DEBUG_KMS("Dual link: DSI_DUAL_LINK_PIXEL_ALT\n");
+ else
+ DRM_DEBUG_KMS("Dual link: NONE\n");
DRM_DEBUG_KMS("Pixel Format %d\n", intel_dsi->pixel_format);
DRM_DEBUG_KMS("TLPX %d\n", intel_dsi->escape_clk_div);
DRM_DEBUG_KMS("LP RX Timeout 0x%x\n", intel_dsi->lp_rx_timeout);
diff --git a/drivers/gpu/drm/i915/intel_dsi_pll.c b/drivers/gpu/drm/i915/intel_dsi_pll.c
index fa7a6ca..3622d0b 100644
--- a/drivers/gpu/drm/i915/intel_dsi_pll.c
+++ b/drivers/gpu/drm/i915/intel_dsi_pll.c
@@ -241,7 +241,11 @@
return;
}
- dsi_mnp.dsi_pll_ctrl |= DSI_PLL_CLK_GATE_DSI0_DSIPLL;
+ if (intel_dsi->ports & (1 << PORT_A))
+ dsi_mnp.dsi_pll_ctrl |= DSI_PLL_CLK_GATE_DSI0_DSIPLL;
+
+ if (intel_dsi->ports & (1 << PORT_C))
+ dsi_mnp.dsi_pll_ctrl |= DSI_PLL_CLK_GATE_DSI1_DSIPLL;
DRM_DEBUG_KMS("dsi pll div %08x, ctrl %08x\n",
dsi_mnp.dsi_pll_div, dsi_mnp.dsi_pll_ctrl);
@@ -269,12 +273,14 @@
tmp |= DSI_PLL_VCO_EN;
vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, tmp);
- mutex_unlock(&dev_priv->dpio_lock);
+ if (wait_for(vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL) &
+ DSI_PLL_LOCK, 20)) {
- if (wait_for(I915_READ(PIPECONF(PIPE_A)) & PIPECONF_DSI_PLL_LOCKED, 20)) {
+ mutex_unlock(&dev_priv->dpio_lock);
DRM_ERROR("DSI PLL lock failed\n");
return;
}
+ mutex_unlock(&dev_priv->dpio_lock);
DRM_DEBUG_KMS("DSI PLL locked\n");
}
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
new file mode 100644
index 0000000..4daceae
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -0,0 +1,701 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * DOC: Frame Buffer Compression (FBC)
+ *
+ * FBC tries to save memory bandwidth (and so power consumption) by
+ * compressing the amount of memory used by the display. It is total
+ * transparent to user space and completely handled in the kernel.
+ *
+ * The benefits of FBC are mostly visible with solid backgrounds and
+ * variation-less patterns. It comes from keeping the memory footprint small
+ * and having fewer memory pages opened and accessed for refreshing the display.
+ *
+ * i915 is responsible to reserve stolen memory for FBC and configure its
+ * offset on proper registers. The hardware takes care of all
+ * compress/decompress. However there are many known cases where we have to
+ * forcibly disable it to allow proper screen updates.
+ */
+
+#include "intel_drv.h"
+#include "i915_drv.h"
+
+static void i8xx_fbc_disable(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 fbc_ctl;
+
+ dev_priv->fbc.enabled = false;
+
+ /* Disable compression */
+ fbc_ctl = I915_READ(FBC_CONTROL);
+ if ((fbc_ctl & FBC_CTL_EN) == 0)
+ return;
+
+ fbc_ctl &= ~FBC_CTL_EN;
+ I915_WRITE(FBC_CONTROL, fbc_ctl);
+
+ /* Wait for compressing bit to clear */
+ if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
+ DRM_DEBUG_KMS("FBC idle timed out\n");
+ return;
+ }
+
+ DRM_DEBUG_KMS("disabled FBC\n");
+}
+
+static void i8xx_fbc_enable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_framebuffer *fb = crtc->primary->fb;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int cfb_pitch;
+ int i;
+ u32 fbc_ctl;
+
+ dev_priv->fbc.enabled = true;
+
+ cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE;
+ if (fb->pitches[0] < cfb_pitch)
+ cfb_pitch = fb->pitches[0];
+
+ /* FBC_CTL wants 32B or 64B units */
+ if (IS_GEN2(dev))
+ cfb_pitch = (cfb_pitch / 32) - 1;
+ else
+ cfb_pitch = (cfb_pitch / 64) - 1;
+
+ /* Clear old tags */
+ for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
+ I915_WRITE(FBC_TAG + (i * 4), 0);
+
+ if (IS_GEN4(dev)) {
+ u32 fbc_ctl2;
+
+ /* Set it up... */
+ fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
+ fbc_ctl2 |= FBC_CTL_PLANE(intel_crtc->plane);
+ I915_WRITE(FBC_CONTROL2, fbc_ctl2);
+ I915_WRITE(FBC_FENCE_OFF, crtc->y);
+ }
+
+ /* enable it... */
+ fbc_ctl = I915_READ(FBC_CONTROL);
+ fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT;
+ fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC;
+ if (IS_I945GM(dev))
+ fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
+ fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
+ fbc_ctl |= obj->fence_reg;
+ I915_WRITE(FBC_CONTROL, fbc_ctl);
+
+ DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c\n",
+ cfb_pitch, crtc->y, plane_name(intel_crtc->plane));
+}
+
+static bool i8xx_fbc_enabled(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
+}
+
+static void g4x_fbc_enable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_framebuffer *fb = crtc->primary->fb;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ u32 dpfc_ctl;
+
+ dev_priv->fbc.enabled = true;
+
+ dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane) | DPFC_SR_EN;
+ if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
+ dpfc_ctl |= DPFC_CTL_LIMIT_2X;
+ else
+ dpfc_ctl |= DPFC_CTL_LIMIT_1X;
+ dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
+
+ I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
+
+ /* enable it... */
+ I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
+
+ DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
+}
+
+static void g4x_fbc_disable(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 dpfc_ctl;
+
+ dev_priv->fbc.enabled = false;
+
+ /* Disable compression */
+ dpfc_ctl = I915_READ(DPFC_CONTROL);
+ if (dpfc_ctl & DPFC_CTL_EN) {
+ dpfc_ctl &= ~DPFC_CTL_EN;
+ I915_WRITE(DPFC_CONTROL, dpfc_ctl);
+
+ DRM_DEBUG_KMS("disabled FBC\n");
+ }
+}
+
+static bool g4x_fbc_enabled(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
+}
+
+static void snb_fbc_blit_update(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 blt_ecoskpd;
+
+ /* Make sure blitter notifies FBC of writes */
+
+ /* Blitter is part of Media powerwell on VLV. No impact of
+ * his param in other platforms for now */
+ gen6_gt_force_wake_get(dev_priv, FORCEWAKE_MEDIA);
+
+ blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
+ blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
+ GEN6_BLITTER_LOCK_SHIFT;
+ I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
+ blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
+ I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
+ blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
+ GEN6_BLITTER_LOCK_SHIFT);
+ I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
+ POSTING_READ(GEN6_BLITTER_ECOSKPD);
+
+ gen6_gt_force_wake_put(dev_priv, FORCEWAKE_MEDIA);
+}
+
+static void ilk_fbc_enable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_framebuffer *fb = crtc->primary->fb;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ u32 dpfc_ctl;
+
+ dev_priv->fbc.enabled = true;
+
+ dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane);
+ if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
+ dev_priv->fbc.threshold++;
+
+ switch (dev_priv->fbc.threshold) {
+ case 4:
+ case 3:
+ dpfc_ctl |= DPFC_CTL_LIMIT_4X;
+ break;
+ case 2:
+ dpfc_ctl |= DPFC_CTL_LIMIT_2X;
+ break;
+ case 1:
+ dpfc_ctl |= DPFC_CTL_LIMIT_1X;
+ break;
+ }
+ dpfc_ctl |= DPFC_CTL_FENCE_EN;
+ if (IS_GEN5(dev))
+ dpfc_ctl |= obj->fence_reg;
+
+ I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
+ I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
+ /* enable it... */
+ I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
+
+ if (IS_GEN6(dev)) {
+ I915_WRITE(SNB_DPFC_CTL_SA,
+ SNB_CPU_FENCE_ENABLE | obj->fence_reg);
+ I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
+ snb_fbc_blit_update(dev);
+ }
+
+ DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
+}
+
+static void ilk_fbc_disable(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 dpfc_ctl;
+
+ dev_priv->fbc.enabled = false;
+
+ /* Disable compression */
+ dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
+ if (dpfc_ctl & DPFC_CTL_EN) {
+ dpfc_ctl &= ~DPFC_CTL_EN;
+ I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
+
+ DRM_DEBUG_KMS("disabled FBC\n");
+ }
+}
+
+static bool ilk_fbc_enabled(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
+}
+
+static void gen7_fbc_enable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_framebuffer *fb = crtc->primary->fb;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ u32 dpfc_ctl;
+
+ dev_priv->fbc.enabled = true;
+
+ dpfc_ctl = IVB_DPFC_CTL_PLANE(intel_crtc->plane);
+ if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
+ dev_priv->fbc.threshold++;
+
+ switch (dev_priv->fbc.threshold) {
+ case 4:
+ case 3:
+ dpfc_ctl |= DPFC_CTL_LIMIT_4X;
+ break;
+ case 2:
+ dpfc_ctl |= DPFC_CTL_LIMIT_2X;
+ break;
+ case 1:
+ dpfc_ctl |= DPFC_CTL_LIMIT_1X;
+ break;
+ }
+
+ dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
+
+ if (dev_priv->fbc.false_color)
+ dpfc_ctl |= FBC_CTL_FALSE_COLOR;
+
+ I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
+
+ if (IS_IVYBRIDGE(dev)) {
+ /* WaFbcAsynchFlipDisableFbcQueue:ivb */
+ I915_WRITE(ILK_DISPLAY_CHICKEN1,
+ I915_READ(ILK_DISPLAY_CHICKEN1) |
+ ILK_FBCQ_DIS);
+ } else {
+ /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
+ I915_WRITE(CHICKEN_PIPESL_1(intel_crtc->pipe),
+ I915_READ(CHICKEN_PIPESL_1(intel_crtc->pipe)) |
+ HSW_FBCQ_DIS);
+ }
+
+ I915_WRITE(SNB_DPFC_CTL_SA,
+ SNB_CPU_FENCE_ENABLE | obj->fence_reg);
+ I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
+
+ snb_fbc_blit_update(dev);
+
+ DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
+}
+
+/**
+ * intel_fbc_enabled - Is FBC enabled?
+ * @dev: the drm_device
+ *
+ * This function is used to verify the current state of FBC.
+ * FIXME: This should be tracked in the plane config eventually
+ * instead of queried at runtime for most callers.
+ */
+bool intel_fbc_enabled(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ return dev_priv->fbc.enabled;
+}
+
+void bdw_fbc_sw_flush(struct drm_device *dev, u32 value)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!IS_GEN8(dev))
+ return;
+
+ if (!intel_fbc_enabled(dev))
+ return;
+
+ I915_WRITE(MSG_FBC_REND_STATE, value);
+}
+
+static void intel_fbc_work_fn(struct work_struct *__work)
+{
+ struct intel_fbc_work *work =
+ container_of(to_delayed_work(__work),
+ struct intel_fbc_work, work);
+ struct drm_device *dev = work->crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ mutex_lock(&dev->struct_mutex);
+ if (work == dev_priv->fbc.fbc_work) {
+ /* Double check that we haven't switched fb without cancelling
+ * the prior work.
+ */
+ if (work->crtc->primary->fb == work->fb) {
+ dev_priv->display.enable_fbc(work->crtc);
+
+ dev_priv->fbc.plane = to_intel_crtc(work->crtc)->plane;
+ dev_priv->fbc.fb_id = work->crtc->primary->fb->base.id;
+ dev_priv->fbc.y = work->crtc->y;
+ }
+
+ dev_priv->fbc.fbc_work = NULL;
+ }
+ mutex_unlock(&dev->struct_mutex);
+
+ kfree(work);
+}
+
+static void intel_fbc_cancel_work(struct drm_i915_private *dev_priv)
+{
+ if (dev_priv->fbc.fbc_work == NULL)
+ return;
+
+ DRM_DEBUG_KMS("cancelling pending FBC enable\n");
+
+ /* Synchronisation is provided by struct_mutex and checking of
+ * dev_priv->fbc.fbc_work, so we can perform the cancellation
+ * entirely asynchronously.
+ */
+ if (cancel_delayed_work(&dev_priv->fbc.fbc_work->work))
+ /* tasklet was killed before being run, clean up */
+ kfree(dev_priv->fbc.fbc_work);
+
+ /* Mark the work as no longer wanted so that if it does
+ * wake-up (because the work was already running and waiting
+ * for our mutex), it will discover that is no longer
+ * necessary to run.
+ */
+ dev_priv->fbc.fbc_work = NULL;
+}
+
+static void intel_fbc_enable(struct drm_crtc *crtc)
+{
+ struct intel_fbc_work *work;
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!dev_priv->display.enable_fbc)
+ return;
+
+ intel_fbc_cancel_work(dev_priv);
+
+ work = kzalloc(sizeof(*work), GFP_KERNEL);
+ if (work == NULL) {
+ DRM_ERROR("Failed to allocate FBC work structure\n");
+ dev_priv->display.enable_fbc(crtc);
+ return;
+ }
+
+ work->crtc = crtc;
+ work->fb = crtc->primary->fb;
+ INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
+
+ dev_priv->fbc.fbc_work = work;
+
+ /* Delay the actual enabling to let pageflipping cease and the
+ * display to settle before starting the compression. Note that
+ * this delay also serves a second purpose: it allows for a
+ * vblank to pass after disabling the FBC before we attempt
+ * to modify the control registers.
+ *
+ * A more complicated solution would involve tracking vblanks
+ * following the termination of the page-flipping sequence
+ * and indeed performing the enable as a co-routine and not
+ * waiting synchronously upon the vblank.
+ *
+ * WaFbcWaitForVBlankBeforeEnable:ilk,snb
+ */
+ schedule_delayed_work(&work->work, msecs_to_jiffies(50));
+}
+
+/**
+ * intel_fbc_disable - disable FBC
+ * @dev: the drm_device
+ *
+ * This function disables FBC.
+ */
+void intel_fbc_disable(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ intel_fbc_cancel_work(dev_priv);
+
+ if (!dev_priv->display.disable_fbc)
+ return;
+
+ dev_priv->display.disable_fbc(dev);
+ dev_priv->fbc.plane = -1;
+}
+
+static bool set_no_fbc_reason(struct drm_i915_private *dev_priv,
+ enum no_fbc_reason reason)
+{
+ if (dev_priv->fbc.no_fbc_reason == reason)
+ return false;
+
+ dev_priv->fbc.no_fbc_reason = reason;
+ return true;
+}
+
+/**
+ * intel_fbc_update - enable/disable FBC as needed
+ * @dev: the drm_device
+ *
+ * Set up the framebuffer compression hardware at mode set time. We
+ * enable it if possible:
+ * - plane A only (on pre-965)
+ * - no pixel mulitply/line duplication
+ * - no alpha buffer discard
+ * - no dual wide
+ * - framebuffer <= max_hdisplay in width, max_vdisplay in height
+ *
+ * We can't assume that any compression will take place (worst case),
+ * so the compressed buffer has to be the same size as the uncompressed
+ * one. It also must reside (along with the line length buffer) in
+ * stolen memory.
+ *
+ * We need to enable/disable FBC on a global basis.
+ */
+void intel_fbc_update(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = NULL, *tmp_crtc;
+ struct intel_crtc *intel_crtc;
+ struct drm_framebuffer *fb;
+ struct drm_i915_gem_object *obj;
+ const struct drm_display_mode *adjusted_mode;
+ unsigned int max_width, max_height;
+
+ if (!HAS_FBC(dev)) {
+ set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED);
+ return;
+ }
+
+ if (!i915.powersave) {
+ if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
+ DRM_DEBUG_KMS("fbc disabled per module param\n");
+ return;
+ }
+
+ /*
+ * If FBC is already on, we just have to verify that we can
+ * keep it that way...
+ * Need to disable if:
+ * - more than one pipe is active
+ * - changing FBC params (stride, fence, mode)
+ * - new fb is too large to fit in compressed buffer
+ * - going to an unsupported config (interlace, pixel multiply, etc.)
+ */
+ for_each_crtc(dev, tmp_crtc) {
+ if (intel_crtc_active(tmp_crtc) &&
+ to_intel_crtc(tmp_crtc)->primary_enabled) {
+ if (crtc) {
+ if (set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES))
+ DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
+ goto out_disable;
+ }
+ crtc = tmp_crtc;
+ }
+ }
+
+ if (!crtc || crtc->primary->fb == NULL) {
+ if (set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT))
+ DRM_DEBUG_KMS("no output, disabling\n");
+ goto out_disable;
+ }
+
+ intel_crtc = to_intel_crtc(crtc);
+ fb = crtc->primary->fb;
+ obj = intel_fb_obj(fb);
+ adjusted_mode = &intel_crtc->config.adjusted_mode;
+
+ if (i915.enable_fbc < 0) {
+ if (set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT))
+ DRM_DEBUG_KMS("disabled per chip default\n");
+ goto out_disable;
+ }
+ if (!i915.enable_fbc) {
+ if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
+ DRM_DEBUG_KMS("fbc disabled per module param\n");
+ goto out_disable;
+ }
+ if ((adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ||
+ (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
+ if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
+ DRM_DEBUG_KMS("mode incompatible with compression, "
+ "disabling\n");
+ goto out_disable;
+ }
+
+ if (INTEL_INFO(dev)->gen >= 8 || IS_HASWELL(dev)) {
+ max_width = 4096;
+ max_height = 4096;
+ } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
+ max_width = 4096;
+ max_height = 2048;
+ } else {
+ max_width = 2048;
+ max_height = 1536;
+ }
+ if (intel_crtc->config.pipe_src_w > max_width ||
+ intel_crtc->config.pipe_src_h > max_height) {
+ if (set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE))
+ DRM_DEBUG_KMS("mode too large for compression, disabling\n");
+ goto out_disable;
+ }
+ if ((INTEL_INFO(dev)->gen < 4 || HAS_DDI(dev)) &&
+ intel_crtc->plane != PLANE_A) {
+ if (set_no_fbc_reason(dev_priv, FBC_BAD_PLANE))
+ DRM_DEBUG_KMS("plane not A, disabling compression\n");
+ goto out_disable;
+ }
+
+ /* The use of a CPU fence is mandatory in order to detect writes
+ * by the CPU to the scanout and trigger updates to the FBC.
+ */
+ if (obj->tiling_mode != I915_TILING_X ||
+ obj->fence_reg == I915_FENCE_REG_NONE) {
+ if (set_no_fbc_reason(dev_priv, FBC_NOT_TILED))
+ DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
+ goto out_disable;
+ }
+ if (INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
+ to_intel_plane(crtc->primary)->rotation != BIT(DRM_ROTATE_0)) {
+ if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
+ DRM_DEBUG_KMS("Rotation unsupported, disabling\n");
+ goto out_disable;
+ }
+
+ /* If the kernel debugger is active, always disable compression */
+ if (in_dbg_master())
+ goto out_disable;
+
+ if (i915_gem_stolen_setup_compression(dev, obj->base.size,
+ drm_format_plane_cpp(fb->pixel_format, 0))) {
+ if (set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL))
+ DRM_DEBUG_KMS("framebuffer too large, disabling compression\n");
+ goto out_disable;
+ }
+
+ /* If the scanout has not changed, don't modify the FBC settings.
+ * Note that we make the fundamental assumption that the fb->obj
+ * cannot be unpinned (and have its GTT offset and fence revoked)
+ * without first being decoupled from the scanout and FBC disabled.
+ */
+ if (dev_priv->fbc.plane == intel_crtc->plane &&
+ dev_priv->fbc.fb_id == fb->base.id &&
+ dev_priv->fbc.y == crtc->y)
+ return;
+
+ if (intel_fbc_enabled(dev)) {
+ /* We update FBC along two paths, after changing fb/crtc
+ * configuration (modeswitching) and after page-flipping
+ * finishes. For the latter, we know that not only did
+ * we disable the FBC at the start of the page-flip
+ * sequence, but also more than one vblank has passed.
+ *
+ * For the former case of modeswitching, it is possible
+ * to switch between two FBC valid configurations
+ * instantaneously so we do need to disable the FBC
+ * before we can modify its control registers. We also
+ * have to wait for the next vblank for that to take
+ * effect. However, since we delay enabling FBC we can
+ * assume that a vblank has passed since disabling and
+ * that we can safely alter the registers in the deferred
+ * callback.
+ *
+ * In the scenario that we go from a valid to invalid
+ * and then back to valid FBC configuration we have
+ * no strict enforcement that a vblank occurred since
+ * disabling the FBC. However, along all current pipe
+ * disabling paths we do need to wait for a vblank at
+ * some point. And we wait before enabling FBC anyway.
+ */
+ DRM_DEBUG_KMS("disabling active FBC for update\n");
+ intel_fbc_disable(dev);
+ }
+
+ intel_fbc_enable(crtc);
+ dev_priv->fbc.no_fbc_reason = FBC_OK;
+ return;
+
+out_disable:
+ /* Multiple disables should be harmless */
+ if (intel_fbc_enabled(dev)) {
+ DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
+ intel_fbc_disable(dev);
+ }
+ i915_gem_stolen_cleanup_compression(dev);
+}
+
+/**
+ * intel_fbc_init - Initialize FBC
+ * @dev_priv: the i915 device
+ *
+ * This function might be called during PM init process.
+ */
+void intel_fbc_init(struct drm_i915_private *dev_priv)
+{
+ if (!HAS_FBC(dev_priv)) {
+ dev_priv->fbc.enabled = false;
+ return;
+ }
+
+ if (INTEL_INFO(dev_priv)->gen >= 7) {
+ dev_priv->display.fbc_enabled = ilk_fbc_enabled;
+ dev_priv->display.enable_fbc = gen7_fbc_enable;
+ dev_priv->display.disable_fbc = ilk_fbc_disable;
+ } else if (INTEL_INFO(dev_priv)->gen >= 5) {
+ dev_priv->display.fbc_enabled = ilk_fbc_enabled;
+ dev_priv->display.enable_fbc = ilk_fbc_enable;
+ dev_priv->display.disable_fbc = ilk_fbc_disable;
+ } else if (IS_GM45(dev_priv)) {
+ dev_priv->display.fbc_enabled = g4x_fbc_enabled;
+ dev_priv->display.enable_fbc = g4x_fbc_enable;
+ dev_priv->display.disable_fbc = g4x_fbc_disable;
+ } else {
+ dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
+ dev_priv->display.enable_fbc = i8xx_fbc_enable;
+ dev_priv->display.disable_fbc = i8xx_fbc_disable;
+
+ /* This value was pulled out of someone's hat */
+ I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
+ }
+
+ dev_priv->fbc.enabled = dev_priv->display.fbc_enabled(dev_priv->dev);
+}
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index e588376..7670a0f 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -212,8 +212,7 @@
* @enable_execlists: value of i915.enable_execlists module parameter.
*
* Only certain platforms support Execlists (the prerequisites being
- * support for Logical Ring Contexts and Aliasing PPGTT or better),
- * and only when enabled via module parameter.
+ * support for Logical Ring Contexts and Aliasing PPGTT or better).
*
* Return: 1 if Execlists is supported and has to be enabled.
*/
@@ -474,13 +473,13 @@
}
/**
- * intel_execlists_handle_ctx_events() - handle Context Switch interrupts
+ * intel_lrc_irq_handler() - handle Context Switch interrupts
* @ring: Engine Command Streamer to handle.
*
* Check the unread Context Status Buffers and manage the submission of new
* contexts to the ELSP accordingly.
*/
-void intel_execlists_handle_ctx_events(struct intel_engine_cs *ring)
+void intel_lrc_irq_handler(struct intel_engine_cs *ring)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
u32 status_pointer;
@@ -876,40 +875,48 @@
}
}
-static int logical_ring_alloc_seqno(struct intel_engine_cs *ring,
- struct intel_context *ctx)
+static int logical_ring_alloc_request(struct intel_engine_cs *ring,
+ struct intel_context *ctx)
{
+ struct drm_i915_gem_request *request;
+ struct drm_i915_private *dev_private = ring->dev->dev_private;
int ret;
- if (ring->outstanding_lazy_seqno)
+ if (ring->outstanding_lazy_request)
return 0;
- if (ring->preallocated_lazy_request == NULL) {
- struct drm_i915_gem_request *request;
+ request = kzalloc(sizeof(*request), GFP_KERNEL);
+ if (request == NULL)
+ return -ENOMEM;
- request = kmalloc(sizeof(*request), GFP_KERNEL);
- if (request == NULL)
- return -ENOMEM;
-
- if (ctx != ring->default_context) {
- ret = intel_lr_context_pin(ring, ctx);
- if (ret) {
- kfree(request);
- return ret;
- }
+ if (ctx != ring->default_context) {
+ ret = intel_lr_context_pin(ring, ctx);
+ if (ret) {
+ kfree(request);
+ return ret;
}
-
- /* Hold a reference to the context this request belongs to
- * (we will need it when the time comes to emit/retire the
- * request).
- */
- request->ctx = ctx;
- i915_gem_context_reference(request->ctx);
-
- ring->preallocated_lazy_request = request;
}
- return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno);
+ kref_init(&request->ref);
+ request->ring = ring;
+ request->uniq = dev_private->request_uniq++;
+
+ ret = i915_gem_get_seqno(ring->dev, &request->seqno);
+ if (ret) {
+ intel_lr_context_unpin(ring, ctx);
+ kfree(request);
+ return ret;
+ }
+
+ /* Hold a reference to the context this request belongs to
+ * (we will need it when the time comes to emit/retire the
+ * request).
+ */
+ request->ctx = ctx;
+ i915_gem_context_reference(request->ctx);
+
+ ring->outstanding_lazy_request = request;
+ return 0;
}
static int logical_ring_wait_request(struct intel_ringbuffer *ringbuf,
@@ -917,39 +924,38 @@
{
struct intel_engine_cs *ring = ringbuf->ring;
struct drm_i915_gem_request *request;
- u32 seqno = 0;
int ret;
- if (ringbuf->last_retired_head != -1) {
- ringbuf->head = ringbuf->last_retired_head;
- ringbuf->last_retired_head = -1;
-
- ringbuf->space = intel_ring_space(ringbuf);
- if (ringbuf->space >= bytes)
- return 0;
- }
+ if (intel_ring_space(ringbuf) >= bytes)
+ return 0;
list_for_each_entry(request, &ring->request_list, list) {
+ /*
+ * The request queue is per-engine, so can contain requests
+ * from multiple ringbuffers. Here, we must ignore any that
+ * aren't from the ringbuffer we're considering.
+ */
+ struct intel_context *ctx = request->ctx;
+ if (ctx->engine[ring->id].ringbuf != ringbuf)
+ continue;
+
+ /* Would completion of this request free enough space? */
if (__intel_ring_space(request->tail, ringbuf->tail,
ringbuf->size) >= bytes) {
- seqno = request->seqno;
break;
}
}
- if (seqno == 0)
+ if (&request->list == &ring->request_list)
return -ENOSPC;
- ret = i915_wait_seqno(ring, seqno);
+ ret = i915_wait_request(request);
if (ret)
return ret;
i915_gem_retire_requests_ring(ring);
- ringbuf->head = ringbuf->last_retired_head;
- ringbuf->last_retired_head = -1;
- ringbuf->space = intel_ring_space(ringbuf);
- return 0;
+ return intel_ring_space(ringbuf) >= bytes ? 0 : -ENOSPC;
}
static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf,
@@ -975,13 +981,10 @@
* case by choosing an insanely large timeout. */
end = jiffies + 60 * HZ;
+ ret = 0;
do {
- ringbuf->head = I915_READ_HEAD(ring);
- ringbuf->space = intel_ring_space(ringbuf);
- if (ringbuf->space >= bytes) {
- ret = 0;
+ if (intel_ring_space(ringbuf) >= bytes)
break;
- }
msleep(1);
@@ -1022,7 +1025,7 @@
iowrite32(MI_NOOP, virt++);
ringbuf->tail = 0;
- ringbuf->space = intel_ring_space(ringbuf);
+ intel_ring_update_space(ringbuf);
return 0;
}
@@ -1076,7 +1079,7 @@
return ret;
/* Preallocate the olr before touching the ring */
- ret = logical_ring_alloc_seqno(ring, ringbuf->FIXME_lrc_ctx);
+ ret = logical_ring_alloc_request(ring, ringbuf->FIXME_lrc_ctx);
if (ret)
return ret;
@@ -1093,7 +1096,7 @@
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_workarounds *w = &dev_priv->workarounds;
- if (WARN_ON(w->count == 0))
+ if (WARN_ON_ONCE(w->count == 0))
return 0;
ring->gpu_caches_dirty = true;
@@ -1159,10 +1162,6 @@
*/
I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
- ret = intel_init_pipe_control(ring);
- if (ret)
- return ret;
-
I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
return init_workarounds_ring(ring);
@@ -1321,7 +1320,7 @@
if (ret)
return ret;
- cmd = MI_STORE_DWORD_IMM_GEN8;
+ cmd = MI_STORE_DWORD_IMM_GEN4;
cmd |= MI_GLOBAL_GTT;
intel_logical_ring_emit(ringbuf, cmd);
@@ -1329,7 +1328,8 @@
(ring->status_page.gfx_addr +
(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)));
intel_logical_ring_emit(ringbuf, 0);
- intel_logical_ring_emit(ringbuf, ring->outstanding_lazy_seqno);
+ intel_logical_ring_emit(ringbuf,
+ i915_gem_request_get_seqno(ring->outstanding_lazy_request));
intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
intel_logical_ring_emit(ringbuf, MI_NOOP);
intel_logical_ring_advance_and_submit(ringbuf);
@@ -1337,6 +1337,18 @@
return 0;
}
+static int gen8_init_rcs_context(struct intel_engine_cs *ring,
+ struct intel_context *ctx)
+{
+ int ret;
+
+ ret = intel_logical_ring_workarounds_emit(ring, ctx);
+ if (ret)
+ return ret;
+
+ return intel_lr_context_render_state_init(ring, ctx);
+}
+
/**
* intel_logical_ring_cleanup() - deallocate the Engine Command Streamer
*
@@ -1354,8 +1366,7 @@
intel_logical_ring_stop(ring);
WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
- ring->preallocated_lazy_request = NULL;
- ring->outstanding_lazy_seqno = 0;
+ i915_gem_request_assign(&ring->outstanding_lazy_request, NULL);
if (ring->cleanup)
ring->cleanup(ring);
@@ -1389,12 +1400,6 @@
if (ret)
return ret;
- if (ring->init) {
- ret = ring->init(ring);
- if (ret)
- return ret;
- }
-
ret = intel_lr_context_deferred_create(ring->default_context, ring);
return ret;
@@ -1404,6 +1409,7 @@
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring = &dev_priv->ring[RCS];
+ int ret;
ring->name = "render ring";
ring->id = RCS;
@@ -1415,8 +1421,8 @@
if (HAS_L3_DPF(dev))
ring->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
- ring->init = gen8_init_render_ring;
- ring->init_context = intel_logical_ring_workarounds_emit;
+ ring->init_hw = gen8_init_render_ring;
+ ring->init_context = gen8_init_rcs_context;
ring->cleanup = intel_fini_pipe_control;
ring->get_seqno = gen8_get_seqno;
ring->set_seqno = gen8_set_seqno;
@@ -1426,7 +1432,12 @@
ring->irq_put = gen8_logical_ring_put_irq;
ring->emit_bb_start = gen8_emit_bb_start;
- return logical_ring_init(dev, ring);
+ ring->dev = dev;
+ ret = logical_ring_init(dev, ring);
+ if (ret)
+ return ret;
+
+ return intel_init_pipe_control(ring);
}
static int logical_bsd_ring_init(struct drm_device *dev)
@@ -1442,7 +1453,7 @@
ring->irq_keep_mask =
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
- ring->init = gen8_init_common_ring;
+ ring->init_hw = gen8_init_common_ring;
ring->get_seqno = gen8_get_seqno;
ring->set_seqno = gen8_set_seqno;
ring->emit_request = gen8_emit_request;
@@ -1467,7 +1478,7 @@
ring->irq_keep_mask =
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
- ring->init = gen8_init_common_ring;
+ ring->init_hw = gen8_init_common_ring;
ring->get_seqno = gen8_get_seqno;
ring->set_seqno = gen8_set_seqno;
ring->emit_request = gen8_emit_request;
@@ -1492,7 +1503,7 @@
ring->irq_keep_mask =
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
- ring->init = gen8_init_common_ring;
+ ring->init_hw = gen8_init_common_ring;
ring->get_seqno = gen8_get_seqno;
ring->set_seqno = gen8_set_seqno;
ring->emit_request = gen8_emit_request;
@@ -1517,7 +1528,7 @@
ring->irq_keep_mask =
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
- ring->init = gen8_init_common_ring;
+ ring->init_hw = gen8_init_common_ring;
ring->get_seqno = gen8_get_seqno;
ring->set_seqno = gen8_set_seqno;
ring->emit_request = gen8_emit_request;
@@ -1616,7 +1627,7 @@
i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring);
- ret = __i915_add_request(ring, file, so.obj, NULL);
+ ret = __i915_add_request(ring, file, so.obj);
/* intel_logical_ring_add_request moves object to inactive if it
* fails */
out:
@@ -1835,8 +1846,7 @@
int ret;
WARN_ON(ctx->legacy_hw_ctx.rcs_state != NULL);
- if (ctx->engine[ring->id].state)
- return 0;
+ WARN_ON(ctx->engine[ring->id].state);
context_size = round_up(get_lr_context_size(ring), 4096);
@@ -1872,8 +1882,8 @@
ringbuf->effective_size = ringbuf->size;
ringbuf->head = 0;
ringbuf->tail = 0;
- ringbuf->space = ringbuf->size;
ringbuf->last_retired_head = -1;
+ intel_ring_update_space(ringbuf);
if (ringbuf->obj == NULL) {
ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
@@ -1907,21 +1917,17 @@
if (ctx == ring->default_context)
lrc_setup_hardware_status_page(ring, ctx_obj);
-
- if (ring->id == RCS && !ctx->rcs_initialized) {
+ else if (ring->id == RCS && !ctx->rcs_initialized) {
if (ring->init_context) {
ret = ring->init_context(ring, ctx);
- if (ret)
+ if (ret) {
DRM_ERROR("ring init context: %d\n", ret);
+ ctx->engine[ring->id].ringbuf = NULL;
+ ctx->engine[ring->id].state = NULL;
+ goto error;
+ }
}
- ret = intel_lr_context_render_state_init(ring, ctx);
- if (ret) {
- DRM_ERROR("Init render state failed: %d\n", ret);
- ctx->engine[ring->id].ringbuf = NULL;
- ctx->engine[ring->id].state = NULL;
- goto error;
- }
ctx->rcs_initialized = true;
}
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 14b216b..960fcbd 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -112,7 +112,7 @@
int elsp_submitted;
};
-void intel_execlists_handle_ctx_events(struct intel_engine_cs *ring);
+void intel_lrc_irq_handler(struct intel_engine_cs *ring);
void intel_execlists_retire_requests(struct intel_engine_cs *ring);
#endif /* _INTEL_LRC_H_ */
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index dc2f4f26..973c9de 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -182,7 +182,7 @@
u32 flip_addr;
struct drm_i915_gem_object *reg_bo;
/* flip handling */
- uint32_t last_flip_req;
+ struct drm_i915_gem_request *last_flip_req;
void (*flip_tail)(struct intel_overlay *);
};
@@ -217,17 +217,19 @@
int ret;
BUG_ON(overlay->last_flip_req);
- ret = i915_add_request(ring, &overlay->last_flip_req);
+ i915_gem_request_assign(&overlay->last_flip_req,
+ ring->outstanding_lazy_request);
+ ret = i915_add_request(ring);
if (ret)
return ret;
overlay->flip_tail = tail;
- ret = i915_wait_seqno(ring, overlay->last_flip_req);
+ ret = i915_wait_request(overlay->last_flip_req);
if (ret)
return ret;
i915_gem_retire_requests(dev);
- overlay->last_flip_req = 0;
+ i915_gem_request_assign(&overlay->last_flip_req, NULL);
return 0;
}
@@ -286,7 +288,10 @@
intel_ring_emit(ring, flip_addr);
intel_ring_advance(ring);
- return i915_add_request(ring, &overlay->last_flip_req);
+ WARN_ON(overlay->last_flip_req);
+ i915_gem_request_assign(&overlay->last_flip_req,
+ ring->outstanding_lazy_request);
+ return i915_add_request(ring);
}
static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
@@ -361,23 +366,20 @@
* We have to be careful not to repeat work forever an make forward progess. */
static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
{
- struct drm_device *dev = overlay->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring = &dev_priv->ring[RCS];
int ret;
- if (overlay->last_flip_req == 0)
+ if (overlay->last_flip_req == NULL)
return 0;
- ret = i915_wait_seqno(ring, overlay->last_flip_req);
+ ret = i915_wait_request(overlay->last_flip_req);
if (ret)
return ret;
- i915_gem_retire_requests(dev);
+ i915_gem_retire_requests(overlay->dev);
if (overlay->flip_tail)
overlay->flip_tail(overlay);
- overlay->last_flip_req = 0;
+ i915_gem_request_assign(&overlay->last_flip_req, NULL);
return 0;
}
@@ -392,6 +394,8 @@
struct intel_engine_cs *ring = &dev_priv->ring[RCS];
int ret;
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
/* Only wait if there is actually an old frame to release to
* guarantee forward progress.
*/
@@ -422,6 +426,22 @@
return 0;
}
+void intel_overlay_reset(struct drm_i915_private *dev_priv)
+{
+ struct intel_overlay *overlay = dev_priv->overlay;
+
+ if (!overlay)
+ return;
+
+ intel_overlay_release_old_vid(overlay);
+
+ overlay->last_flip_req = NULL;
+ overlay->old_xscale = 0;
+ overlay->old_yscale = 0;
+ overlay->crtc = NULL;
+ overlay->active = false;
+}
+
struct put_image_params {
int format;
short dst_x;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 1f4b56e..a3ebaa8 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -52,17 +52,6 @@
#define INTEL_RC6p_ENABLE (1<<1)
#define INTEL_RC6pp_ENABLE (1<<2)
-/* FBC, or Frame Buffer Compression, is a technique employed to compress the
- * framebuffer contents in-memory, aiming at reducing the required bandwidth
- * during in-memory transfers and, therefore, reduce the power packet.
- *
- * The benefits of FBC are mostly visible with solid backgrounds and
- * variation-less patterns.
- *
- * FBC-related functionality can be enabled by the means of the
- * i915.i915_enable_fbc parameter
- */
-
static void gen9_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -87,613 +76,6 @@
_MASKED_BIT_ENABLE(GEN8_4x4_STC_OPTIMIZATION_DISABLE));
}
-static void i8xx_disable_fbc(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 fbc_ctl;
-
- dev_priv->fbc.enabled = false;
-
- /* Disable compression */
- fbc_ctl = I915_READ(FBC_CONTROL);
- if ((fbc_ctl & FBC_CTL_EN) == 0)
- return;
-
- fbc_ctl &= ~FBC_CTL_EN;
- I915_WRITE(FBC_CONTROL, fbc_ctl);
-
- /* Wait for compressing bit to clear */
- if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
- DRM_DEBUG_KMS("FBC idle timed out\n");
- return;
- }
-
- DRM_DEBUG_KMS("disabled FBC\n");
-}
-
-static void i8xx_enable_fbc(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_framebuffer *fb = crtc->primary->fb;
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int cfb_pitch;
- int i;
- u32 fbc_ctl;
-
- dev_priv->fbc.enabled = true;
-
- cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE;
- if (fb->pitches[0] < cfb_pitch)
- cfb_pitch = fb->pitches[0];
-
- /* FBC_CTL wants 32B or 64B units */
- if (IS_GEN2(dev))
- cfb_pitch = (cfb_pitch / 32) - 1;
- else
- cfb_pitch = (cfb_pitch / 64) - 1;
-
- /* Clear old tags */
- for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
- I915_WRITE(FBC_TAG + (i * 4), 0);
-
- if (IS_GEN4(dev)) {
- u32 fbc_ctl2;
-
- /* Set it up... */
- fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
- fbc_ctl2 |= FBC_CTL_PLANE(intel_crtc->plane);
- I915_WRITE(FBC_CONTROL2, fbc_ctl2);
- I915_WRITE(FBC_FENCE_OFF, crtc->y);
- }
-
- /* enable it... */
- fbc_ctl = I915_READ(FBC_CONTROL);
- fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT;
- fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC;
- if (IS_I945GM(dev))
- fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
- fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
- fbc_ctl |= obj->fence_reg;
- I915_WRITE(FBC_CONTROL, fbc_ctl);
-
- DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c\n",
- cfb_pitch, crtc->y, plane_name(intel_crtc->plane));
-}
-
-static bool i8xx_fbc_enabled(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
-}
-
-static void g4x_enable_fbc(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_framebuffer *fb = crtc->primary->fb;
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- u32 dpfc_ctl;
-
- dev_priv->fbc.enabled = true;
-
- dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane) | DPFC_SR_EN;
- if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
- dpfc_ctl |= DPFC_CTL_LIMIT_2X;
- else
- dpfc_ctl |= DPFC_CTL_LIMIT_1X;
- dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
-
- I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
-
- /* enable it... */
- I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
-
- DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
-}
-
-static void g4x_disable_fbc(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 dpfc_ctl;
-
- dev_priv->fbc.enabled = false;
-
- /* Disable compression */
- dpfc_ctl = I915_READ(DPFC_CONTROL);
- if (dpfc_ctl & DPFC_CTL_EN) {
- dpfc_ctl &= ~DPFC_CTL_EN;
- I915_WRITE(DPFC_CONTROL, dpfc_ctl);
-
- DRM_DEBUG_KMS("disabled FBC\n");
- }
-}
-
-static bool g4x_fbc_enabled(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
-}
-
-static void sandybridge_blit_fbc_update(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 blt_ecoskpd;
-
- /* Make sure blitter notifies FBC of writes */
-
- /* Blitter is part of Media powerwell on VLV. No impact of
- * his param in other platforms for now */
- gen6_gt_force_wake_get(dev_priv, FORCEWAKE_MEDIA);
-
- blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
- blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
- GEN6_BLITTER_LOCK_SHIFT;
- I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
- blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
- I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
- blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
- GEN6_BLITTER_LOCK_SHIFT);
- I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
- POSTING_READ(GEN6_BLITTER_ECOSKPD);
-
- gen6_gt_force_wake_put(dev_priv, FORCEWAKE_MEDIA);
-}
-
-static void ironlake_enable_fbc(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_framebuffer *fb = crtc->primary->fb;
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- u32 dpfc_ctl;
-
- dev_priv->fbc.enabled = true;
-
- dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane);
- if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
- dev_priv->fbc.threshold++;
-
- switch (dev_priv->fbc.threshold) {
- case 4:
- case 3:
- dpfc_ctl |= DPFC_CTL_LIMIT_4X;
- break;
- case 2:
- dpfc_ctl |= DPFC_CTL_LIMIT_2X;
- break;
- case 1:
- dpfc_ctl |= DPFC_CTL_LIMIT_1X;
- break;
- }
- dpfc_ctl |= DPFC_CTL_FENCE_EN;
- if (IS_GEN5(dev))
- dpfc_ctl |= obj->fence_reg;
-
- I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
- I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
- /* enable it... */
- I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
-
- if (IS_GEN6(dev)) {
- I915_WRITE(SNB_DPFC_CTL_SA,
- SNB_CPU_FENCE_ENABLE | obj->fence_reg);
- I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
- sandybridge_blit_fbc_update(dev);
- }
-
- DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
-}
-
-static void ironlake_disable_fbc(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 dpfc_ctl;
-
- dev_priv->fbc.enabled = false;
-
- /* Disable compression */
- dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
- if (dpfc_ctl & DPFC_CTL_EN) {
- dpfc_ctl &= ~DPFC_CTL_EN;
- I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
-
- DRM_DEBUG_KMS("disabled FBC\n");
- }
-}
-
-static bool ironlake_fbc_enabled(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
-}
-
-static void gen7_enable_fbc(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_framebuffer *fb = crtc->primary->fb;
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- u32 dpfc_ctl;
-
- dev_priv->fbc.enabled = true;
-
- dpfc_ctl = IVB_DPFC_CTL_PLANE(intel_crtc->plane);
- if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
- dev_priv->fbc.threshold++;
-
- switch (dev_priv->fbc.threshold) {
- case 4:
- case 3:
- dpfc_ctl |= DPFC_CTL_LIMIT_4X;
- break;
- case 2:
- dpfc_ctl |= DPFC_CTL_LIMIT_2X;
- break;
- case 1:
- dpfc_ctl |= DPFC_CTL_LIMIT_1X;
- break;
- }
-
- dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
-
- if (dev_priv->fbc.false_color)
- dpfc_ctl |= FBC_CTL_FALSE_COLOR;
-
- I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
-
- if (IS_IVYBRIDGE(dev)) {
- /* WaFbcAsynchFlipDisableFbcQueue:ivb */
- I915_WRITE(ILK_DISPLAY_CHICKEN1,
- I915_READ(ILK_DISPLAY_CHICKEN1) |
- ILK_FBCQ_DIS);
- } else {
- /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
- I915_WRITE(CHICKEN_PIPESL_1(intel_crtc->pipe),
- I915_READ(CHICKEN_PIPESL_1(intel_crtc->pipe)) |
- HSW_FBCQ_DIS);
- }
-
- I915_WRITE(SNB_DPFC_CTL_SA,
- SNB_CPU_FENCE_ENABLE | obj->fence_reg);
- I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
-
- sandybridge_blit_fbc_update(dev);
-
- DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
-}
-
-bool intel_fbc_enabled(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- return dev_priv->fbc.enabled;
-}
-
-void bdw_fbc_sw_flush(struct drm_device *dev, u32 value)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (!IS_GEN8(dev))
- return;
-
- if (!intel_fbc_enabled(dev))
- return;
-
- I915_WRITE(MSG_FBC_REND_STATE, value);
-}
-
-static void intel_fbc_work_fn(struct work_struct *__work)
-{
- struct intel_fbc_work *work =
- container_of(to_delayed_work(__work),
- struct intel_fbc_work, work);
- struct drm_device *dev = work->crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- mutex_lock(&dev->struct_mutex);
- if (work == dev_priv->fbc.fbc_work) {
- /* Double check that we haven't switched fb without cancelling
- * the prior work.
- */
- if (work->crtc->primary->fb == work->fb) {
- dev_priv->display.enable_fbc(work->crtc);
-
- dev_priv->fbc.plane = to_intel_crtc(work->crtc)->plane;
- dev_priv->fbc.fb_id = work->crtc->primary->fb->base.id;
- dev_priv->fbc.y = work->crtc->y;
- }
-
- dev_priv->fbc.fbc_work = NULL;
- }
- mutex_unlock(&dev->struct_mutex);
-
- kfree(work);
-}
-
-static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
-{
- if (dev_priv->fbc.fbc_work == NULL)
- return;
-
- DRM_DEBUG_KMS("cancelling pending FBC enable\n");
-
- /* Synchronisation is provided by struct_mutex and checking of
- * dev_priv->fbc.fbc_work, so we can perform the cancellation
- * entirely asynchronously.
- */
- if (cancel_delayed_work(&dev_priv->fbc.fbc_work->work))
- /* tasklet was killed before being run, clean up */
- kfree(dev_priv->fbc.fbc_work);
-
- /* Mark the work as no longer wanted so that if it does
- * wake-up (because the work was already running and waiting
- * for our mutex), it will discover that is no longer
- * necessary to run.
- */
- dev_priv->fbc.fbc_work = NULL;
-}
-
-static void intel_enable_fbc(struct drm_crtc *crtc)
-{
- struct intel_fbc_work *work;
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (!dev_priv->display.enable_fbc)
- return;
-
- intel_cancel_fbc_work(dev_priv);
-
- work = kzalloc(sizeof(*work), GFP_KERNEL);
- if (work == NULL) {
- DRM_ERROR("Failed to allocate FBC work structure\n");
- dev_priv->display.enable_fbc(crtc);
- return;
- }
-
- work->crtc = crtc;
- work->fb = crtc->primary->fb;
- INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
-
- dev_priv->fbc.fbc_work = work;
-
- /* Delay the actual enabling to let pageflipping cease and the
- * display to settle before starting the compression. Note that
- * this delay also serves a second purpose: it allows for a
- * vblank to pass after disabling the FBC before we attempt
- * to modify the control registers.
- *
- * A more complicated solution would involve tracking vblanks
- * following the termination of the page-flipping sequence
- * and indeed performing the enable as a co-routine and not
- * waiting synchronously upon the vblank.
- *
- * WaFbcWaitForVBlankBeforeEnable:ilk,snb
- */
- schedule_delayed_work(&work->work, msecs_to_jiffies(50));
-}
-
-void intel_disable_fbc(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- intel_cancel_fbc_work(dev_priv);
-
- if (!dev_priv->display.disable_fbc)
- return;
-
- dev_priv->display.disable_fbc(dev);
- dev_priv->fbc.plane = -1;
-}
-
-static bool set_no_fbc_reason(struct drm_i915_private *dev_priv,
- enum no_fbc_reason reason)
-{
- if (dev_priv->fbc.no_fbc_reason == reason)
- return false;
-
- dev_priv->fbc.no_fbc_reason = reason;
- return true;
-}
-
-/**
- * intel_update_fbc - enable/disable FBC as needed
- * @dev: the drm_device
- *
- * Set up the framebuffer compression hardware at mode set time. We
- * enable it if possible:
- * - plane A only (on pre-965)
- * - no pixel mulitply/line duplication
- * - no alpha buffer discard
- * - no dual wide
- * - framebuffer <= max_hdisplay in width, max_vdisplay in height
- *
- * We can't assume that any compression will take place (worst case),
- * so the compressed buffer has to be the same size as the uncompressed
- * one. It also must reside (along with the line length buffer) in
- * stolen memory.
- *
- * We need to enable/disable FBC on a global basis.
- */
-void intel_update_fbc(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc = NULL, *tmp_crtc;
- struct intel_crtc *intel_crtc;
- struct drm_framebuffer *fb;
- struct drm_i915_gem_object *obj;
- const struct drm_display_mode *adjusted_mode;
- unsigned int max_width, max_height;
-
- if (!HAS_FBC(dev)) {
- set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED);
- return;
- }
-
- if (!i915.powersave) {
- if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
- DRM_DEBUG_KMS("fbc disabled per module param\n");
- return;
- }
-
- /*
- * If FBC is already on, we just have to verify that we can
- * keep it that way...
- * Need to disable if:
- * - more than one pipe is active
- * - changing FBC params (stride, fence, mode)
- * - new fb is too large to fit in compressed buffer
- * - going to an unsupported config (interlace, pixel multiply, etc.)
- */
- for_each_crtc(dev, tmp_crtc) {
- if (intel_crtc_active(tmp_crtc) &&
- to_intel_crtc(tmp_crtc)->primary_enabled) {
- if (crtc) {
- if (set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES))
- DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
- goto out_disable;
- }
- crtc = tmp_crtc;
- }
- }
-
- if (!crtc || crtc->primary->fb == NULL) {
- if (set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT))
- DRM_DEBUG_KMS("no output, disabling\n");
- goto out_disable;
- }
-
- intel_crtc = to_intel_crtc(crtc);
- fb = crtc->primary->fb;
- obj = intel_fb_obj(fb);
- adjusted_mode = &intel_crtc->config.adjusted_mode;
-
- if (i915.enable_fbc < 0) {
- if (set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT))
- DRM_DEBUG_KMS("disabled per chip default\n");
- goto out_disable;
- }
- if (!i915.enable_fbc) {
- if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
- DRM_DEBUG_KMS("fbc disabled per module param\n");
- goto out_disable;
- }
- if ((adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ||
- (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
- if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
- DRM_DEBUG_KMS("mode incompatible with compression, "
- "disabling\n");
- goto out_disable;
- }
-
- if (INTEL_INFO(dev)->gen >= 8 || IS_HASWELL(dev)) {
- max_width = 4096;
- max_height = 4096;
- } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
- max_width = 4096;
- max_height = 2048;
- } else {
- max_width = 2048;
- max_height = 1536;
- }
- if (intel_crtc->config.pipe_src_w > max_width ||
- intel_crtc->config.pipe_src_h > max_height) {
- if (set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE))
- DRM_DEBUG_KMS("mode too large for compression, disabling\n");
- goto out_disable;
- }
- if ((INTEL_INFO(dev)->gen < 4 || HAS_DDI(dev)) &&
- intel_crtc->plane != PLANE_A) {
- if (set_no_fbc_reason(dev_priv, FBC_BAD_PLANE))
- DRM_DEBUG_KMS("plane not A, disabling compression\n");
- goto out_disable;
- }
-
- /* The use of a CPU fence is mandatory in order to detect writes
- * by the CPU to the scanout and trigger updates to the FBC.
- */
- if (obj->tiling_mode != I915_TILING_X ||
- obj->fence_reg == I915_FENCE_REG_NONE) {
- if (set_no_fbc_reason(dev_priv, FBC_NOT_TILED))
- DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
- goto out_disable;
- }
- if (INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
- to_intel_plane(crtc->primary)->rotation != BIT(DRM_ROTATE_0)) {
- if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
- DRM_DEBUG_KMS("Rotation unsupported, disabling\n");
- goto out_disable;
- }
-
- /* If the kernel debugger is active, always disable compression */
- if (in_dbg_master())
- goto out_disable;
-
- if (i915_gem_stolen_setup_compression(dev, obj->base.size,
- drm_format_plane_cpp(fb->pixel_format, 0))) {
- if (set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL))
- DRM_DEBUG_KMS("framebuffer too large, disabling compression\n");
- goto out_disable;
- }
-
- /* If the scanout has not changed, don't modify the FBC settings.
- * Note that we make the fundamental assumption that the fb->obj
- * cannot be unpinned (and have its GTT offset and fence revoked)
- * without first being decoupled from the scanout and FBC disabled.
- */
- if (dev_priv->fbc.plane == intel_crtc->plane &&
- dev_priv->fbc.fb_id == fb->base.id &&
- dev_priv->fbc.y == crtc->y)
- return;
-
- if (intel_fbc_enabled(dev)) {
- /* We update FBC along two paths, after changing fb/crtc
- * configuration (modeswitching) and after page-flipping
- * finishes. For the latter, we know that not only did
- * we disable the FBC at the start of the page-flip
- * sequence, but also more than one vblank has passed.
- *
- * For the former case of modeswitching, it is possible
- * to switch between two FBC valid configurations
- * instantaneously so we do need to disable the FBC
- * before we can modify its control registers. We also
- * have to wait for the next vblank for that to take
- * effect. However, since we delay enabling FBC we can
- * assume that a vblank has passed since disabling and
- * that we can safely alter the registers in the deferred
- * callback.
- *
- * In the scenario that we go from a valid to invalid
- * and then back to valid FBC configuration we have
- * no strict enforcement that a vblank occurred since
- * disabling the FBC. However, along all current pipe
- * disabling paths we do need to wait for a vblank at
- * some point. And we wait before enabling FBC anyway.
- */
- DRM_DEBUG_KMS("disabling active FBC for update\n");
- intel_disable_fbc(dev);
- }
-
- intel_enable_fbc(crtc);
- dev_priv->fbc.no_fbc_reason = FBC_OK;
- return;
-
-out_disable:
- /* Multiple disables should be harmless */
- if (intel_fbc_enabled(dev)) {
- DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
- intel_disable_fbc(dev);
- }
- i915_gem_stolen_cleanup_compression(dev);
-}
static void i915_pineview_get_mem_freq(struct drm_device *dev)
{
@@ -3286,7 +2668,8 @@
list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
struct intel_plane *intel_plane = to_intel_plane(plane);
- if (intel_plane->pipe == pipe)
+ if (intel_plane->pipe == pipe &&
+ plane->type == DRM_PLANE_TYPE_OVERLAY)
p->plane[i++] = intel_plane->wm;
}
}
@@ -3621,9 +3004,8 @@
skl_ddb_entry_size(&cur_ddb->pipe[pipe])) {
skl_wm_flush_pipe(dev_priv, pipe, 2);
intel_wait_for_vblank(dev, pipe);
+ reallocated[pipe] = true;
}
-
- reallocated[pipe] = true;
}
/*
@@ -5307,7 +4689,8 @@
I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
I915_WRITE(GEN6_RC_SLEEP, 0);
- I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
+ /* TO threshold set to 1750 us ( 0x557 * 1.28 us) */
+ I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
/* allows RC6 residency counter to work */
I915_WRITE(VLV_COUNTER_CONTROL,
@@ -5321,7 +4704,7 @@
/* 3: Enable RC6 */
if ((intel_enable_rc6(dev) & INTEL_RC6_ENABLE) &&
(pcbr >> VLV_PCBR_ADDR_SHIFT))
- rc6_mode = GEN6_RC_CTL_EI_MODE(1);
+ rc6_mode = GEN7_RC_CTL_TO_MODE;
I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
@@ -5681,146 +5064,27 @@
return ((m * x) / 127) - b;
}
-static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
+static int _pxvid_to_vd(u8 pxvid)
+{
+ if (pxvid == 0)
+ return 0;
+
+ if (pxvid >= 8 && pxvid < 31)
+ pxvid = 31;
+
+ return (pxvid + 2) * 125;
+}
+
+static u32 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
{
struct drm_device *dev = dev_priv->dev;
- static const struct v_table {
- u16 vd; /* in .1 mil */
- u16 vm; /* in .1 mil */
- } v_table[] = {
- { 0, 0, },
- { 375, 0, },
- { 500, 0, },
- { 625, 0, },
- { 750, 0, },
- { 875, 0, },
- { 1000, 0, },
- { 1125, 0, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4250, 3125, },
- { 4375, 3250, },
- { 4500, 3375, },
- { 4625, 3500, },
- { 4750, 3625, },
- { 4875, 3750, },
- { 5000, 3875, },
- { 5125, 4000, },
- { 5250, 4125, },
- { 5375, 4250, },
- { 5500, 4375, },
- { 5625, 4500, },
- { 5750, 4625, },
- { 5875, 4750, },
- { 6000, 4875, },
- { 6125, 5000, },
- { 6250, 5125, },
- { 6375, 5250, },
- { 6500, 5375, },
- { 6625, 5500, },
- { 6750, 5625, },
- { 6875, 5750, },
- { 7000, 5875, },
- { 7125, 6000, },
- { 7250, 6125, },
- { 7375, 6250, },
- { 7500, 6375, },
- { 7625, 6500, },
- { 7750, 6625, },
- { 7875, 6750, },
- { 8000, 6875, },
- { 8125, 7000, },
- { 8250, 7125, },
- { 8375, 7250, },
- { 8500, 7375, },
- { 8625, 7500, },
- { 8750, 7625, },
- { 8875, 7750, },
- { 9000, 7875, },
- { 9125, 8000, },
- { 9250, 8125, },
- { 9375, 8250, },
- { 9500, 8375, },
- { 9625, 8500, },
- { 9750, 8625, },
- { 9875, 8750, },
- { 10000, 8875, },
- { 10125, 9000, },
- { 10250, 9125, },
- { 10375, 9250, },
- { 10500, 9375, },
- { 10625, 9500, },
- { 10750, 9625, },
- { 10875, 9750, },
- { 11000, 9875, },
- { 11125, 10000, },
- { 11250, 10125, },
- { 11375, 10250, },
- { 11500, 10375, },
- { 11625, 10500, },
- { 11750, 10625, },
- { 11875, 10750, },
- { 12000, 10875, },
- { 12125, 11000, },
- { 12250, 11125, },
- { 12375, 11250, },
- { 12500, 11375, },
- { 12625, 11500, },
- { 12750, 11625, },
- { 12875, 11750, },
- { 13000, 11875, },
- { 13125, 12000, },
- { 13250, 12125, },
- { 13375, 12250, },
- { 13500, 12375, },
- { 13625, 12500, },
- { 13750, 12625, },
- { 13875, 12750, },
- { 14000, 12875, },
- { 14125, 13000, },
- { 14250, 13125, },
- { 14375, 13250, },
- { 14500, 13375, },
- { 14625, 13500, },
- { 14750, 13625, },
- { 14875, 13750, },
- { 15000, 13875, },
- { 15125, 14000, },
- { 15250, 14125, },
- { 15375, 14250, },
- { 15500, 14375, },
- { 15625, 14500, },
- { 15750, 14625, },
- { 15875, 14750, },
- { 16000, 14875, },
- { 16125, 15000, },
- };
+ const int vd = _pxvid_to_vd(pxvid);
+ const int vm = vd - 1125;
+
if (INTEL_INFO(dev)->is_mobile)
- return v_table[pxvid].vm;
- else
- return v_table[pxvid].vd;
+ return vm > 0 ? vm : 0;
+
+ return vd;
}
static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
@@ -6191,6 +5455,20 @@
valleyview_cleanup_gt_powersave(dev);
}
+static void gen6_suspend_rps(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
+ /*
+ * TODO: disable RPS interrupts on GEN9+ too once RPS support
+ * is added for it.
+ */
+ if (INTEL_INFO(dev)->gen < 9)
+ gen6_disable_rps_interrupts(dev);
+}
+
/**
* intel_suspend_gt_powersave - suspend PM work and helper threads
* @dev: drm device
@@ -6206,14 +5484,7 @@
if (INTEL_INFO(dev)->gen < 6)
return;
- flush_delayed_work(&dev_priv->rps.delayed_resume_work);
-
- /*
- * TODO: disable RPS interrupts on GEN9+ too once RPS support
- * is added for it.
- */
- if (INTEL_INFO(dev)->gen < 9)
- gen6_disable_rps_interrupts(dev);
+ gen6_suspend_rps(dev);
/* Force GPU to min freq during suspend */
gen6_rps_idle(dev_priv);
@@ -6316,8 +5587,11 @@
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ if (INTEL_INFO(dev)->gen < 6)
+ return;
+
+ gen6_suspend_rps(dev);
dev_priv->rps.enabled = false;
- intel_enable_gt_powersave(dev);
}
static void ibx_init_clock_gating(struct drm_device *dev)
@@ -7041,43 +6315,12 @@
lpt_suspend_hw(dev);
}
-static void intel_init_fbc(struct drm_i915_private *dev_priv)
-{
- if (!HAS_FBC(dev_priv)) {
- dev_priv->fbc.enabled = false;
- return;
- }
-
- if (INTEL_INFO(dev_priv)->gen >= 7) {
- dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
- dev_priv->display.enable_fbc = gen7_enable_fbc;
- dev_priv->display.disable_fbc = ironlake_disable_fbc;
- } else if (INTEL_INFO(dev_priv)->gen >= 5) {
- dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
- dev_priv->display.enable_fbc = ironlake_enable_fbc;
- dev_priv->display.disable_fbc = ironlake_disable_fbc;
- } else if (IS_GM45(dev_priv)) {
- dev_priv->display.fbc_enabled = g4x_fbc_enabled;
- dev_priv->display.enable_fbc = g4x_enable_fbc;
- dev_priv->display.disable_fbc = g4x_disable_fbc;
- } else {
- dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
- dev_priv->display.enable_fbc = i8xx_enable_fbc;
- dev_priv->display.disable_fbc = i8xx_disable_fbc;
-
- /* This value was pulled out of someone's hat */
- I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
- }
-
- dev_priv->fbc.enabled = dev_priv->display.fbc_enabled(dev_priv->dev);
-}
-
/* Set up chip specific power management-related functions */
void intel_init_pm(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- intel_init_fbc(dev_priv);
+ intel_fbc_init(dev_priv);
/* For cxsr */
if (IS_PINEVIEW(dev))
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index 716b8a9..dd0e6e0 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -61,14 +61,15 @@
return intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED;
}
-bool intel_psr_is_enabled(struct drm_device *dev)
+static bool vlv_is_psr_active_on_pipe(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t val;
- if (!HAS_PSR(dev))
- return false;
-
- return I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
+ val = I915_READ(VLV_PSRSTAT(pipe)) &
+ VLV_EDP_PSR_CURR_STATE_MASK;
+ return (val == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
+ (val == VLV_EDP_PSR_ACTIVE_SF_UPDATE);
}
static void intel_psr_write_vsc(struct intel_dp *intel_dp,
@@ -100,7 +101,23 @@
POSTING_READ(ctl_reg);
}
-static void intel_psr_setup_vsc(struct intel_dp *intel_dp)
+static void vlv_psr_setup_vsc(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
+ enum pipe pipe = to_intel_crtc(crtc)->pipe;
+ uint32_t val;
+
+ /* VLV auto-generate VSC package as per EDP 1.3 spec, Table 3.10 */
+ val = I915_READ(VLV_VSCSDP(pipe));
+ val &= ~VLV_EDP_PSR_SDP_FREQ_MASK;
+ val |= VLV_EDP_PSR_SDP_FREQ_EVFRAME;
+ I915_WRITE(VLV_VSCSDP(pipe), val);
+}
+
+static void hsw_psr_setup_vsc(struct intel_dp *intel_dp)
{
struct edp_vsc_psr psr_vsc;
@@ -113,14 +130,20 @@
intel_psr_write_vsc(intel_dp, &psr_vsc);
}
-static void intel_psr_enable_sink(struct intel_dp *intel_dp)
+static void vlv_psr_enable_sink(struct intel_dp *intel_dp)
+{
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
+ DP_PSR_ENABLE);
+}
+
+static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t aux_clock_divider;
int precharge = 0x3;
- bool only_standby = false;
+ bool only_standby = dev_priv->vbt.psr.full_link;
static const uint8_t aux_msg[] = {
[0] = DP_AUX_NATIVE_WRITE << 4,
[1] = DP_SET_POWER >> 8,
@@ -157,13 +180,50 @@
(aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT));
}
-static void intel_psr_enable_source(struct intel_dp *intel_dp)
+static void vlv_psr_enable_source(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = dig_port->base.base.crtc;
+ enum pipe pipe = to_intel_crtc(crtc)->pipe;
+
+ /* Transition from PSR_state 0 to PSR_state 1, i.e. PSR Inactive */
+ I915_WRITE(VLV_PSRCTL(pipe),
+ VLV_EDP_PSR_MODE_SW_TIMER |
+ VLV_EDP_PSR_SRC_TRANSMITTER_STATE |
+ VLV_EDP_PSR_ENABLE);
+}
+
+static void vlv_psr_activate(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = dig_port->base.base.crtc;
+ enum pipe pipe = to_intel_crtc(crtc)->pipe;
+
+ /* Let's do the transition from PSR_state 1 to PSR_state 2
+ * that is PSR transition to active - static frame transmission.
+ * Then Hardware is responsible for the transition to PSR_state 3
+ * that is PSR active - no Remote Frame Buffer (RFB) update.
+ */
+ I915_WRITE(VLV_PSRCTL(pipe), I915_READ(VLV_PSRCTL(pipe)) |
+ VLV_EDP_PSR_ACTIVE_ENTRY);
+}
+
+static void hsw_psr_enable_source(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t max_sleep_time = 0x1f;
- uint32_t idle_frames = 1;
+ /* Lately it was identified that depending on panel idle frame count
+ * calculated at HW can be off by 1. So let's use what came
+ * from VBT + 1 and at minimum 2 to be on the safe side.
+ */
+ uint32_t idle_frames = dev_priv->vbt.psr.idle_frames ?
+ dev_priv->vbt.psr.idle_frames + 1 : 2;
uint32_t val = 0x0;
const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
bool only_standby = false;
@@ -176,7 +236,6 @@
val |= EDP_PSR_TP2_TP3_TIME_0us;
val |= EDP_PSR_TP1_TIME_0us;
val |= EDP_PSR_SKIP_AUX_EXIT;
- val |= IS_BROADWELL(dev) ? BDW_PSR_SINGLE_FRAME : 0;
} else
val |= EDP_PSR_LINK_DISABLE;
@@ -231,7 +290,7 @@
return true;
}
-static void intel_psr_do_enable(struct intel_dp *intel_dp)
+static void intel_psr_activate(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
@@ -242,7 +301,14 @@
lockdep_assert_held(&dev_priv->psr.lock);
/* Enable/Re-enable PSR on the host */
- intel_psr_enable_source(intel_dp);
+ if (HAS_DDI(dev))
+ /* On HSW+ after we enable PSR on source it will activate it
+ * as soon as it match configure idle_frame count. So
+ * we just actually enable it here on activation time.
+ */
+ hsw_psr_enable_source(intel_dp);
+ else
+ vlv_psr_activate(intel_dp);
dev_priv->psr.active = true;
}
@@ -280,20 +346,83 @@
dev_priv->psr.busy_frontbuffer_bits = 0;
- intel_psr_setup_vsc(intel_dp);
+ if (HAS_DDI(dev)) {
+ hsw_psr_setup_vsc(intel_dp);
- /* Avoid continuous PSR exit by masking memup and hpd */
- I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
- EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
+ /* Avoid continuous PSR exit by masking memup and hpd */
+ I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
+ EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
- /* Enable PSR on the panel */
- intel_psr_enable_sink(intel_dp);
+ /* Enable PSR on the panel */
+ hsw_psr_enable_sink(intel_dp);
+ } else {
+ vlv_psr_setup_vsc(intel_dp);
+
+ /* Enable PSR on the panel */
+ vlv_psr_enable_sink(intel_dp);
+
+ /* On HSW+ enable_source also means go to PSR entry/active
+ * state as soon as idle_frame achieved and here would be
+ * to soon. However on VLV enable_source just enable PSR
+ * but let it on inactive state. So we might do this prior
+ * to active transition, i.e. here.
+ */
+ vlv_psr_enable_source(intel_dp);
+ }
dev_priv->psr.enabled = intel_dp;
unlock:
mutex_unlock(&dev_priv->psr.lock);
}
+static void vlv_psr_disable(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(intel_dig_port->base.base.crtc);
+ uint32_t val;
+
+ if (dev_priv->psr.active) {
+ /* Put VLV PSR back to PSR_state 0 that is PSR Disabled. */
+ if (wait_for((I915_READ(VLV_PSRSTAT(intel_crtc->pipe)) &
+ VLV_EDP_PSR_IN_TRANS) == 0, 1))
+ WARN(1, "PSR transition took longer than expected\n");
+
+ val = I915_READ(VLV_PSRCTL(intel_crtc->pipe));
+ val &= ~VLV_EDP_PSR_ACTIVE_ENTRY;
+ val &= ~VLV_EDP_PSR_ENABLE;
+ val &= ~VLV_EDP_PSR_MODE_MASK;
+ I915_WRITE(VLV_PSRCTL(intel_crtc->pipe), val);
+
+ dev_priv->psr.active = false;
+ } else {
+ WARN_ON(vlv_is_psr_active_on_pipe(dev, intel_crtc->pipe));
+ }
+}
+
+static void hsw_psr_disable(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->psr.active) {
+ I915_WRITE(EDP_PSR_CTL(dev),
+ I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE);
+
+ /* Wait till PSR is idle */
+ if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) &
+ EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
+ DRM_ERROR("Timed out waiting for PSR Idle State\n");
+
+ dev_priv->psr.active = false;
+ } else {
+ WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
+ }
+}
+
/**
* intel_psr_disable - Disable PSR
* @intel_dp: Intel DP
@@ -312,19 +441,10 @@
return;
}
- if (dev_priv->psr.active) {
- I915_WRITE(EDP_PSR_CTL(dev),
- I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE);
-
- /* Wait till PSR is idle */
- if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) &
- EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
- DRM_ERROR("Timed out waiting for PSR Idle State\n");
-
- dev_priv->psr.active = false;
- } else {
- WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
- }
+ if (HAS_DDI(dev))
+ hsw_psr_disable(intel_dp);
+ else
+ vlv_psr_disable(intel_dp);
dev_priv->psr.enabled = NULL;
mutex_unlock(&dev_priv->psr.lock);
@@ -337,18 +457,27 @@
struct drm_i915_private *dev_priv =
container_of(work, typeof(*dev_priv), psr.work.work);
struct intel_dp *intel_dp = dev_priv->psr.enabled;
+ struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
+ enum pipe pipe = to_intel_crtc(crtc)->pipe;
/* We have to make sure PSR is ready for re-enable
* otherwise it keeps disabled until next full enable/disable cycle.
* PSR might take some time to get fully disabled
* and be ready for re-enable.
*/
- if (wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev_priv->dev)) &
- EDP_PSR_STATUS_STATE_MASK) == 0, 50)) {
- DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
- return;
+ if (HAS_DDI(dev_priv->dev)) {
+ if (wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev_priv->dev)) &
+ EDP_PSR_STATUS_STATE_MASK) == 0, 50)) {
+ DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
+ return;
+ }
+ } else {
+ if (wait_for((I915_READ(VLV_PSRSTAT(pipe)) &
+ VLV_EDP_PSR_IN_TRANS) == 0, 1)) {
+ DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
+ return;
+ }
}
-
mutex_lock(&dev_priv->psr.lock);
intel_dp = dev_priv->psr.enabled;
@@ -363,7 +492,7 @@
if (dev_priv->psr.busy_frontbuffer_bits)
goto unlock;
- intel_psr_do_enable(intel_dp);
+ intel_psr_activate(intel_dp);
unlock:
mutex_unlock(&dev_priv->psr.lock);
}
@@ -371,17 +500,47 @@
static void intel_psr_exit(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_dp *intel_dp = dev_priv->psr.enabled;
+ struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
+ enum pipe pipe = to_intel_crtc(crtc)->pipe;
+ u32 val;
- if (dev_priv->psr.active) {
- u32 val = I915_READ(EDP_PSR_CTL(dev));
+ if (!dev_priv->psr.active)
+ return;
+
+ if (HAS_DDI(dev)) {
+ val = I915_READ(EDP_PSR_CTL(dev));
WARN_ON(!(val & EDP_PSR_ENABLE));
I915_WRITE(EDP_PSR_CTL(dev), val & ~EDP_PSR_ENABLE);
dev_priv->psr.active = false;
+ } else {
+ val = I915_READ(VLV_PSRCTL(pipe));
+
+ /* Here we do the transition from PSR_state 3 to PSR_state 5
+ * directly once PSR State 4 that is active with single frame
+ * update can be skipped. PSR_state 5 that is PSR exit then
+ * Hardware is responsible to transition back to PSR_state 1
+ * that is PSR inactive. Same state after
+ * vlv_edp_psr_enable_source.
+ */
+ val &= ~VLV_EDP_PSR_ACTIVE_ENTRY;
+ I915_WRITE(VLV_PSRCTL(pipe), val);
+
+ /* Send AUX wake up - Spec says after transitioning to PSR
+ * active we have to send AUX wake up by writing 01h in DPCD
+ * 600h of sink device.
+ * XXX: This might slow down the transition, but without this
+ * HW doesn't complete the transition to PSR_state 1 and we
+ * never get the screen updated.
+ */
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
+ DP_SET_POWER_D0);
}
+ dev_priv->psr.active = false;
}
/**
@@ -459,6 +618,17 @@
(frontbuffer_bits & INTEL_FRONTBUFFER_SPRITE(pipe)))
intel_psr_exit(dev);
+ /*
+ * On Valleyview and Cherryview we don't use hardware tracking so
+ * sprite plane updates or cursor moves don't result in a PSR
+ * invalidating. Which means we need to manually fake this in
+ * software for all flushes, not just when we've seen a preceding
+ * invalidation through frontbuffer rendering. */
+ if (!HAS_DDI(dev) &&
+ ((frontbuffer_bits & INTEL_FRONTBUFFER_SPRITE(pipe)) ||
+ (frontbuffer_bits & INTEL_FRONTBUFFER_CURSOR(pipe))))
+ intel_psr_exit(dev);
+
if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
schedule_delayed_work(&dev_priv->psr.work,
msecs_to_jiffies(100));
diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen6.c b/drivers/gpu/drm/i915/intel_renderstate_gen6.c
index 56c1429..11c8e7b 100644
--- a/drivers/gpu/drm/i915/intel_renderstate_gen6.c
+++ b/drivers/gpu/drm/i915/intel_renderstate_gen6.c
@@ -1,3 +1,28 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Generated by: intel-gpu-tools-1.8-220-g01153e7
+ */
+
#include "intel_renderstate.h"
static const u32 gen6_null_state_relocs[] = {
diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen7.c b/drivers/gpu/drm/i915/intel_renderstate_gen7.c
index 419e35a..6551806 100644
--- a/drivers/gpu/drm/i915/intel_renderstate_gen7.c
+++ b/drivers/gpu/drm/i915/intel_renderstate_gen7.c
@@ -1,3 +1,28 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Generated by: intel-gpu-tools-1.8-220-g01153e7
+ */
+
#include "intel_renderstate.h"
static const u32 gen7_null_state_relocs[] = {
diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen8.c b/drivers/gpu/drm/i915/intel_renderstate_gen8.c
index 78011d7..95288a3 100644
--- a/drivers/gpu/drm/i915/intel_renderstate_gen8.c
+++ b/drivers/gpu/drm/i915/intel_renderstate_gen8.c
@@ -1,3 +1,28 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Generated by: intel-gpu-tools-1.8-220-g01153e7
+ */
+
#include "intel_renderstate.h"
static const u32 gen8_null_state_relocs[] = {
diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen9.c b/drivers/gpu/drm/i915/intel_renderstate_gen9.c
index 8750753..16a7ec2 100644
--- a/drivers/gpu/drm/i915/intel_renderstate_gen9.c
+++ b/drivers/gpu/drm/i915/intel_renderstate_gen9.c
@@ -1,3 +1,28 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Generated by: intel-gpu-tools-1.8-220-g01153e7
+ */
+
#include "intel_renderstate.h"
static const u32 gen9_null_state_relocs[] = {
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 9f445e9..12a36f0 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -52,16 +52,27 @@
int __intel_ring_space(int head, int tail, int size)
{
- int space = head - (tail + I915_RING_FREE_SPACE);
- if (space < 0)
+ int space = head - tail;
+ if (space <= 0)
space += size;
- return space;
+ return space - I915_RING_FREE_SPACE;
+}
+
+void intel_ring_update_space(struct intel_ringbuffer *ringbuf)
+{
+ if (ringbuf->last_retired_head != -1) {
+ ringbuf->head = ringbuf->last_retired_head;
+ ringbuf->last_retired_head = -1;
+ }
+
+ ringbuf->space = __intel_ring_space(ringbuf->head & HEAD_ADDR,
+ ringbuf->tail, ringbuf->size);
}
int intel_ring_space(struct intel_ringbuffer *ringbuf)
{
- return __intel_ring_space(ringbuf->head & HEAD_ADDR,
- ringbuf->tail, ringbuf->size);
+ intel_ring_update_space(ringbuf);
+ return ringbuf->space;
}
bool intel_ring_stopped(struct intel_engine_cs *ring)
@@ -362,12 +373,15 @@
flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_MEDIA_STATE_CLEAR;
/*
* TLB invalidate requires a post-sync write.
*/
flags |= PIPE_CONTROL_QW_WRITE;
flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
+ flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD;
+
/* Workaround: we must issue a pipe_control with CS-stall bit
* set before a pipe_control command that has the state cache
* invalidate bit set. */
@@ -589,10 +603,10 @@
goto out;
}
+ ringbuf->last_retired_head = -1;
ringbuf->head = I915_READ_HEAD(ring);
ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
- ringbuf->space = intel_ring_space(ringbuf);
- ringbuf->last_retired_head = -1;
+ intel_ring_update_space(ringbuf);
memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
@@ -624,8 +638,7 @@
{
int ret;
- if (ring->scratch.obj)
- return 0;
+ WARN_ON(ring->scratch.obj);
ring->scratch.obj = i915_gem_alloc_object(ring->dev, 4096);
if (ring->scratch.obj == NULL) {
@@ -669,7 +682,7 @@
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_workarounds *w = &dev_priv->workarounds;
- if (WARN_ON(w->count == 0))
+ if (WARN_ON_ONCE(w->count == 0))
return 0;
ring->gpu_caches_dirty = true;
@@ -700,6 +713,22 @@
return 0;
}
+static int intel_rcs_ctx_init(struct intel_engine_cs *ring,
+ struct intel_context *ctx)
+{
+ int ret;
+
+ ret = intel_ring_workarounds_emit(ring, ctx);
+ if (ret != 0)
+ return ret;
+
+ ret = i915_gem_render_state_init(ring);
+ if (ret)
+ DRM_ERROR("init render state: %d\n", ret);
+
+ return ret;
+}
+
static int wa_add(struct drm_i915_private *dev_priv,
const u32 addr, const u32 mask, const u32 val)
{
@@ -759,9 +788,12 @@
* workaround for for a possible hang in the unlikely event a TLB
* invalidation occurs during a PSD flush.
*/
+ /* WaForceEnableNonCoherent:bdw */
+ /* WaHdcDisableFetchWhenMasked:bdw */
/* WaDisableFenceDestinationToSLM:bdw (GT3 pre-production) */
WA_SET_BIT_MASKED(HDC_CHICKEN0,
HDC_FORCE_NON_COHERENT |
+ HDC_DONOT_FETCH_MEM_WHEN_MASKED |
(IS_BDW_GT3(dev) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
/* Wa4x4STCOptimizationDisable:bdw */
@@ -858,12 +890,6 @@
_MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) |
_MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
- if (INTEL_INFO(dev)->gen >= 5) {
- ret = intel_init_pipe_control(ring);
- if (ret)
- return ret;
- }
-
if (IS_GEN6(dev)) {
/* From the Sandybridge PRM, volume 1 part 3, page 24:
* "If this bit is set, STCunit will have LRA as replacement
@@ -915,17 +941,20 @@
return ret;
for_each_ring(waiter, dev_priv, i) {
+ u32 seqno;
u64 gtt_offset = signaller->semaphore.signal_ggtt[i];
if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
continue;
+ seqno = i915_gem_request_get_seqno(
+ signaller->outstanding_lazy_request);
intel_ring_emit(signaller, GFX_OP_PIPE_CONTROL(6));
intel_ring_emit(signaller, PIPE_CONTROL_GLOBAL_GTT_IVB |
PIPE_CONTROL_QW_WRITE |
PIPE_CONTROL_FLUSH_ENABLE);
intel_ring_emit(signaller, lower_32_bits(gtt_offset));
intel_ring_emit(signaller, upper_32_bits(gtt_offset));
- intel_ring_emit(signaller, signaller->outstanding_lazy_seqno);
+ intel_ring_emit(signaller, seqno);
intel_ring_emit(signaller, 0);
intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
MI_SEMAPHORE_TARGET(waiter->id));
@@ -953,16 +982,19 @@
return ret;
for_each_ring(waiter, dev_priv, i) {
+ u32 seqno;
u64 gtt_offset = signaller->semaphore.signal_ggtt[i];
if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
continue;
+ seqno = i915_gem_request_get_seqno(
+ signaller->outstanding_lazy_request);
intel_ring_emit(signaller, (MI_FLUSH_DW + 1) |
MI_FLUSH_DW_OP_STOREDW);
intel_ring_emit(signaller, lower_32_bits(gtt_offset) |
MI_FLUSH_DW_USE_GTT);
intel_ring_emit(signaller, upper_32_bits(gtt_offset));
- intel_ring_emit(signaller, signaller->outstanding_lazy_seqno);
+ intel_ring_emit(signaller, seqno);
intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
MI_SEMAPHORE_TARGET(waiter->id));
intel_ring_emit(signaller, 0);
@@ -991,9 +1023,11 @@
for_each_ring(useless, dev_priv, i) {
u32 mbox_reg = signaller->semaphore.mbox.signal[i];
if (mbox_reg != GEN6_NOSYNC) {
+ u32 seqno = i915_gem_request_get_seqno(
+ signaller->outstanding_lazy_request);
intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(signaller, mbox_reg);
- intel_ring_emit(signaller, signaller->outstanding_lazy_seqno);
+ intel_ring_emit(signaller, seqno);
}
}
@@ -1028,7 +1062,8 @@
intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- intel_ring_emit(ring, ring->outstanding_lazy_seqno);
+ intel_ring_emit(ring,
+ i915_gem_request_get_seqno(ring->outstanding_lazy_request));
intel_ring_emit(ring, MI_USER_INTERRUPT);
__intel_ring_advance(ring);
@@ -1146,7 +1181,8 @@
PIPE_CONTROL_WRITE_FLUSH |
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
- intel_ring_emit(ring, ring->outstanding_lazy_seqno);
+ intel_ring_emit(ring,
+ i915_gem_request_get_seqno(ring->outstanding_lazy_request));
intel_ring_emit(ring, 0);
PIPE_CONTROL_FLUSH(ring, scratch_addr);
scratch_addr += 2 * CACHELINE_BYTES; /* write to separate cachelines */
@@ -1165,7 +1201,8 @@
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
PIPE_CONTROL_NOTIFY);
intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
- intel_ring_emit(ring, ring->outstanding_lazy_seqno);
+ intel_ring_emit(ring,
+ i915_gem_request_get_seqno(ring->outstanding_lazy_request));
intel_ring_emit(ring, 0);
__intel_ring_advance(ring);
@@ -1405,7 +1442,8 @@
intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- intel_ring_emit(ring, ring->outstanding_lazy_seqno);
+ intel_ring_emit(ring,
+ i915_gem_request_get_seqno(ring->outstanding_lazy_request));
intel_ring_emit(ring, MI_USER_INTERRUPT);
__intel_ring_advance(ring);
@@ -1786,15 +1824,15 @@
static int intel_init_ring_buffer(struct drm_device *dev,
struct intel_engine_cs *ring)
{
- struct intel_ringbuffer *ringbuf = ring->buffer;
+ struct intel_ringbuffer *ringbuf;
int ret;
- if (ringbuf == NULL) {
- ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
- if (!ringbuf)
- return -ENOMEM;
- ring->buffer = ringbuf;
- }
+ WARN_ON(ring->buffer);
+
+ ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
+ if (!ringbuf)
+ return -ENOMEM;
+ ring->buffer = ringbuf;
ring->dev = dev;
INIT_LIST_HEAD(&ring->active_list);
@@ -1817,21 +1855,21 @@
goto error;
}
- if (ringbuf->obj == NULL) {
- ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
- if (ret) {
- DRM_ERROR("Failed to allocate ringbuffer %s: %d\n",
- ring->name, ret);
- goto error;
- }
+ WARN_ON(ringbuf->obj);
- ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf);
- if (ret) {
- DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n",
- ring->name, ret);
- intel_destroy_ringbuffer_obj(ringbuf);
- goto error;
- }
+ ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
+ if (ret) {
+ DRM_ERROR("Failed to allocate ringbuffer %s: %d\n",
+ ring->name, ret);
+ goto error;
+ }
+
+ ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf);
+ if (ret) {
+ DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n",
+ ring->name, ret);
+ intel_destroy_ringbuffer_obj(ringbuf);
+ goto error;
}
/* Workaround an erratum on the i830 which causes a hang if
@@ -1846,10 +1884,6 @@
if (ret)
goto error;
- ret = ring->init(ring);
- if (ret)
- goto error;
-
return 0;
error:
@@ -1874,8 +1908,7 @@
intel_unpin_ringbuffer_obj(ringbuf);
intel_destroy_ringbuffer_obj(ringbuf);
- ring->preallocated_lazy_request = NULL;
- ring->outstanding_lazy_seqno = 0;
+ i915_gem_request_assign(&ring->outstanding_lazy_request, NULL);
if (ring->cleanup)
ring->cleanup(ring);
@@ -1892,38 +1925,27 @@
{
struct intel_ringbuffer *ringbuf = ring->buffer;
struct drm_i915_gem_request *request;
- u32 seqno = 0;
int ret;
- if (ringbuf->last_retired_head != -1) {
- ringbuf->head = ringbuf->last_retired_head;
- ringbuf->last_retired_head = -1;
-
- ringbuf->space = intel_ring_space(ringbuf);
- if (ringbuf->space >= n)
- return 0;
- }
+ if (intel_ring_space(ringbuf) >= n)
+ return 0;
list_for_each_entry(request, &ring->request_list, list) {
if (__intel_ring_space(request->tail, ringbuf->tail,
ringbuf->size) >= n) {
- seqno = request->seqno;
break;
}
}
- if (seqno == 0)
+ if (&request->list == &ring->request_list)
return -ENOSPC;
- ret = i915_wait_seqno(ring, seqno);
+ ret = i915_wait_request(request);
if (ret)
return ret;
i915_gem_retire_requests_ring(ring);
- ringbuf->head = ringbuf->last_retired_head;
- ringbuf->last_retired_head = -1;
- ringbuf->space = intel_ring_space(ringbuf);
return 0;
}
@@ -1949,14 +1971,14 @@
* case by choosing an insanely large timeout. */
end = jiffies + 60 * HZ;
+ ret = 0;
trace_i915_ring_wait_begin(ring);
do {
- ringbuf->head = I915_READ_HEAD(ring);
- ringbuf->space = intel_ring_space(ringbuf);
- if (ringbuf->space >= n) {
- ret = 0;
+ if (intel_ring_space(ringbuf) >= n)
break;
- }
+ ringbuf->head = I915_READ_HEAD(ring);
+ if (intel_ring_space(ringbuf) >= n)
+ break;
msleep(1);
@@ -1997,19 +2019,19 @@
iowrite32(MI_NOOP, virt++);
ringbuf->tail = 0;
- ringbuf->space = intel_ring_space(ringbuf);
+ intel_ring_update_space(ringbuf);
return 0;
}
int intel_ring_idle(struct intel_engine_cs *ring)
{
- u32 seqno;
+ struct drm_i915_gem_request *req;
int ret;
/* We need to add any requests required to flush the objects and ring */
- if (ring->outstanding_lazy_seqno) {
- ret = i915_add_request(ring, NULL);
+ if (ring->outstanding_lazy_request) {
+ ret = i915_add_request(ring);
if (ret)
return ret;
}
@@ -2018,30 +2040,39 @@
if (list_empty(&ring->request_list))
return 0;
- seqno = list_entry(ring->request_list.prev,
+ req = list_entry(ring->request_list.prev,
struct drm_i915_gem_request,
- list)->seqno;
+ list);
- return i915_wait_seqno(ring, seqno);
+ return i915_wait_request(req);
}
static int
-intel_ring_alloc_seqno(struct intel_engine_cs *ring)
+intel_ring_alloc_request(struct intel_engine_cs *ring)
{
- if (ring->outstanding_lazy_seqno)
+ int ret;
+ struct drm_i915_gem_request *request;
+ struct drm_i915_private *dev_private = ring->dev->dev_private;
+
+ if (ring->outstanding_lazy_request)
return 0;
- if (ring->preallocated_lazy_request == NULL) {
- struct drm_i915_gem_request *request;
+ request = kzalloc(sizeof(*request), GFP_KERNEL);
+ if (request == NULL)
+ return -ENOMEM;
- request = kmalloc(sizeof(*request), GFP_KERNEL);
- if (request == NULL)
- return -ENOMEM;
+ kref_init(&request->ref);
+ request->ring = ring;
+ request->uniq = dev_private->request_uniq++;
- ring->preallocated_lazy_request = request;
+ ret = i915_gem_get_seqno(ring->dev, &request->seqno);
+ if (ret) {
+ kfree(request);
+ return ret;
}
- return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno);
+ ring->outstanding_lazy_request = request;
+ return 0;
}
static int __intel_ring_prepare(struct intel_engine_cs *ring,
@@ -2081,7 +2112,7 @@
return ret;
/* Preallocate the olr before touching the ring */
- ret = intel_ring_alloc_seqno(ring);
+ ret = intel_ring_alloc_request(ring);
if (ret)
return ret;
@@ -2116,7 +2147,7 @@
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- BUG_ON(ring->outstanding_lazy_seqno);
+ BUG_ON(ring->outstanding_lazy_request);
if (INTEL_INFO(dev)->gen == 6 || INTEL_INFO(dev)->gen == 7) {
I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);
@@ -2338,7 +2369,7 @@
}
}
- ring->init_context = intel_ring_workarounds_emit;
+ ring->init_context = intel_rcs_ctx_init;
ring->add_request = gen6_add_request;
ring->flush = gen8_render_ring_flush;
ring->irq_get = gen8_ring_get_irq;
@@ -2423,7 +2454,7 @@
ring->dispatch_execbuffer = i830_dispatch_execbuffer;
else
ring->dispatch_execbuffer = i915_dispatch_execbuffer;
- ring->init = init_render_ring;
+ ring->init_hw = init_render_ring;
ring->cleanup = render_ring_cleanup;
/* Workaround batchbuffer to combat CS tlb bug. */
@@ -2445,7 +2476,17 @@
ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj);
}
- return intel_init_ring_buffer(dev, ring);
+ ret = intel_init_ring_buffer(dev, ring);
+ if (ret)
+ return ret;
+
+ if (INTEL_INFO(dev)->gen >= 5) {
+ ret = intel_init_pipe_control(ring);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
}
int intel_init_bsd_ring_buffer(struct drm_device *dev)
@@ -2516,7 +2557,7 @@
}
ring->dispatch_execbuffer = i965_dispatch_execbuffer;
}
- ring->init = init_ring_common;
+ ring->init_hw = init_ring_common;
return intel_init_ring_buffer(dev, ring);
}
@@ -2555,7 +2596,7 @@
ring->semaphore.signal = gen8_xcs_signal;
GEN8_RING_SEMAPHORE_INIT;
}
- ring->init = init_ring_common;
+ ring->init_hw = init_ring_common;
return intel_init_ring_buffer(dev, ring);
}
@@ -2612,7 +2653,7 @@
ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
}
}
- ring->init = init_ring_common;
+ ring->init_hw = init_ring_common;
return intel_init_ring_buffer(dev, ring);
}
@@ -2663,7 +2704,7 @@
ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
}
}
- ring->init = init_ring_common;
+ ring->init_hw = init_ring_common;
return intel_init_ring_buffer(dev, ring);
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index fe426cf..6dbb6f4 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -142,11 +142,11 @@
unsigned irq_refcount; /* protected by dev_priv->irq_lock */
u32 irq_enable_mask; /* bitmask to enable ring interrupt */
- u32 trace_irq_seqno;
+ struct drm_i915_gem_request *trace_irq_req;
bool __must_check (*irq_get)(struct intel_engine_cs *ring);
void (*irq_put)(struct intel_engine_cs *ring);
- int (*init)(struct intel_engine_cs *ring);
+ int (*init_hw)(struct intel_engine_cs *ring);
int (*init_context)(struct intel_engine_cs *ring,
struct intel_context *ctx);
@@ -251,7 +251,7 @@
* ringbuffer.
*
* Includes buffers having the contents of their GPU caches
- * flushed, not necessarily primitives. last_rendering_seqno
+ * flushed, not necessarily primitives. last_read_req
* represents when the rendering involved will be completed.
*
* A reference is held on the buffer while on this list.
@@ -267,8 +267,7 @@
/**
* Do we have some not yet emitted requests outstanding?
*/
- struct drm_i915_gem_request *preallocated_lazy_request;
- u32 outstanding_lazy_seqno;
+ struct drm_i915_gem_request *outstanding_lazy_request;
bool gpu_caches_dirty;
bool fbc_dirty;
@@ -408,6 +407,7 @@
ringbuf->tail &= ringbuf->size - 1;
}
int __intel_ring_space(int head, int tail, int size);
+void intel_ring_update_space(struct intel_ringbuffer *ringbuf);
int intel_ring_space(struct intel_ringbuffer *ringbuf);
bool intel_ring_stopped(struct intel_engine_cs *ring);
void __intel_ring_advance(struct intel_engine_cs *ring);
@@ -436,16 +436,11 @@
return ringbuf->tail;
}
-static inline u32 intel_ring_get_seqno(struct intel_engine_cs *ring)
+static inline struct drm_i915_gem_request *
+intel_ring_get_request(struct intel_engine_cs *ring)
{
- BUG_ON(ring->outstanding_lazy_seqno == 0);
- return ring->outstanding_lazy_seqno;
-}
-
-static inline void i915_trace_irq_get(struct intel_engine_cs *ring, u32 seqno)
-{
- if (ring->trace_irq_seqno == 0 && ring->irq_get(ring))
- ring->trace_irq_seqno = seqno;
+ BUG_ON(ring->outstanding_lazy_request == NULL);
+ return ring->outstanding_lazy_request;
}
#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index f5a78d5..6aa3a81 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -118,7 +118,7 @@
}
/**
- * intel_display_power_is_enabled - unlocked check for a power domain
+ * intel_display_power_is_enabled - check for a power domain
* @dev_priv: i915 device instance
* @domain: power domain to check
*
@@ -633,7 +633,7 @@
return;
mismatch:
- WARN(1, "state mismatch for '%s' (always_on %d hw state %d use-count %d disable_power_well %d\n",
+ I915_STATE_WARN(1, "state mismatch for '%s' (always_on %d hw state %d use-count %d disable_power_well %d\n",
power_well->name, power_well->always_on, enabled,
power_well->count, i915.disable_power_well);
}
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 7d9c340..c18e57d 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -1004,7 +1004,7 @@
hsw_enable_ips(intel_crtc);
mutex_lock(&dev->struct_mutex);
- intel_update_fbc(dev);
+ intel_fbc_update(dev);
mutex_unlock(&dev->struct_mutex);
}
@@ -1017,7 +1017,7 @@
mutex_lock(&dev->struct_mutex);
if (dev_priv->fbc.plane == intel_crtc->plane)
- intel_disable_fbc(dev);
+ intel_fbc_disable(dev);
mutex_unlock(&dev->struct_mutex);
/*
@@ -1096,9 +1096,9 @@
intel_check_sprite_plane(struct drm_plane *plane,
struct intel_plane_state *state)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(state->crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc);
struct intel_plane *intel_plane = to_intel_plane(plane);
- struct drm_framebuffer *fb = state->fb;
+ struct drm_framebuffer *fb = state->base.fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
int crtc_x, crtc_y;
unsigned int crtc_w, crtc_h;
@@ -1109,7 +1109,12 @@
const struct drm_rect *clip = &state->clip;
int hscale, vscale;
int max_scale, min_scale;
- int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
+ int pixel_size;
+
+ if (!fb) {
+ state->visible = false;
+ return 0;
+ }
/* Don't modify another pipe's plane */
if (intel_plane->pipe != intel_crtc->pipe) {
@@ -1232,6 +1237,7 @@
if (src_w < 3 || src_h < 3)
state->visible = false;
+ pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
width_bytes = ((src_x * pixel_size) & 63) +
src_w * pixel_size;
@@ -1257,53 +1263,17 @@
return 0;
}
-static int
-intel_prepare_sprite_plane(struct drm_plane *plane,
- struct intel_plane_state *state)
-{
- struct drm_device *dev = plane->dev;
- struct drm_crtc *crtc = state->crtc;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_plane *intel_plane = to_intel_plane(plane);
- enum pipe pipe = intel_crtc->pipe;
- struct drm_framebuffer *fb = state->fb;
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
- struct drm_i915_gem_object *old_obj = intel_plane->obj;
- int ret;
-
- if (old_obj != obj) {
- mutex_lock(&dev->struct_mutex);
-
- /* Note that this will apply the VT-d workaround for scanouts,
- * which is more restrictive than required for sprites. (The
- * primary plane requires 256KiB alignment with 64 PTE padding,
- * the sprite planes only require 128KiB alignment and 32 PTE
- * padding.
- */
- ret = intel_pin_and_fence_fb_obj(plane, fb, NULL);
- if (ret == 0)
- i915_gem_track_fb(old_obj, obj,
- INTEL_FRONTBUFFER_SPRITE(pipe));
- mutex_unlock(&dev->struct_mutex);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
static void
intel_commit_sprite_plane(struct drm_plane *plane,
struct intel_plane_state *state)
{
struct drm_device *dev = plane->dev;
- struct drm_crtc *crtc = state->crtc;
+ struct drm_crtc *crtc = state->base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_plane *intel_plane = to_intel_plane(plane);
enum pipe pipe = intel_crtc->pipe;
- struct drm_framebuffer *fb = state->fb;
+ struct drm_framebuffer *fb = state->base.fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
- struct drm_i915_gem_object *old_obj = intel_plane->obj;
int crtc_x, crtc_y;
unsigned int crtc_w, crtc_h;
uint32_t src_x, src_y, src_w, src_h;
@@ -1312,6 +1282,17 @@
bool primary_enabled;
/*
+ * 'prepare' is never called when plane is being disabled, so we need
+ * to handle frontbuffer tracking here
+ */
+ if (!fb) {
+ mutex_lock(&dev->struct_mutex);
+ i915_gem_track_fb(intel_fb_obj(plane->fb), NULL,
+ INTEL_FRONTBUFFER_SPRITE(pipe));
+ mutex_unlock(&dev->struct_mutex);
+ }
+
+ /*
* If the sprite is completely covering the primary plane,
* we can disable the primary and save power.
*/
@@ -1361,112 +1342,6 @@
if (!primary_was_enabled && primary_enabled)
intel_post_enable_primary(crtc);
}
-
- /* Unpin old obj after new one is active to avoid ugliness */
- if (old_obj && old_obj != obj) {
-
- /*
- * It's fairly common to simply update the position of
- * an existing object. In that case, we don't need to
- * wait for vblank to avoid ugliness, we only need to
- * do the pin & ref bookkeeping.
- */
- if (intel_crtc->active)
- intel_wait_for_vblank(dev, intel_crtc->pipe);
-
- mutex_lock(&dev->struct_mutex);
- intel_unpin_fb_obj(old_obj);
- mutex_unlock(&dev->struct_mutex);
- }
-}
-
-static int
-intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
- struct drm_framebuffer *fb, int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h)
-{
- struct intel_plane_state state;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int ret;
-
- state.crtc = crtc;
- state.fb = fb;
-
- /* sample coordinates in 16.16 fixed point */
- state.src.x1 = src_x;
- state.src.x2 = src_x + src_w;
- state.src.y1 = src_y;
- state.src.y2 = src_y + src_h;
-
- /* integer pixels */
- state.dst.x1 = crtc_x;
- state.dst.x2 = crtc_x + crtc_w;
- state.dst.y1 = crtc_y;
- state.dst.y2 = crtc_y + crtc_h;
-
- state.clip.x1 = 0;
- state.clip.y1 = 0;
- state.clip.x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0;
- state.clip.y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0;
- state.orig_src = state.src;
- state.orig_dst = state.dst;
-
- ret = intel_check_sprite_plane(plane, &state);
- if (ret)
- return ret;
-
- ret = intel_prepare_sprite_plane(plane, &state);
- if (ret)
- return ret;
-
- intel_commit_sprite_plane(plane, &state);
- return 0;
-}
-
-static int
-intel_disable_plane(struct drm_plane *plane)
-{
- struct drm_device *dev = plane->dev;
- struct intel_plane *intel_plane = to_intel_plane(plane);
- struct intel_crtc *intel_crtc;
- enum pipe pipe;
-
- if (!plane->fb)
- return 0;
-
- if (WARN_ON(!plane->crtc))
- return -EINVAL;
-
- intel_crtc = to_intel_crtc(plane->crtc);
- pipe = intel_crtc->pipe;
-
- if (intel_crtc->active) {
- bool primary_was_enabled = intel_crtc->primary_enabled;
-
- intel_crtc->primary_enabled = true;
-
- intel_plane->disable_plane(plane, plane->crtc);
-
- if (!primary_was_enabled && intel_crtc->primary_enabled)
- intel_post_enable_primary(plane->crtc);
- }
-
- if (intel_plane->obj) {
- if (intel_crtc->active)
- intel_wait_for_vblank(dev, intel_plane->pipe);
-
- mutex_lock(&dev->struct_mutex);
- intel_unpin_fb_obj(intel_plane->obj);
- i915_gem_track_fb(intel_plane->obj, NULL,
- INTEL_FRONTBUFFER_SPRITE(pipe));
- mutex_unlock(&dev->struct_mutex);
-
- intel_plane->obj = NULL;
- }
-
- return 0;
}
static void intel_destroy_plane(struct drm_plane *plane)
@@ -1576,14 +1451,6 @@
intel_plane->src_w, intel_plane->src_h);
}
-void intel_plane_disable(struct drm_plane *plane)
-{
- if (!plane->crtc || !plane->fb)
- return;
-
- intel_disable_plane(plane);
-}
-
static const struct drm_plane_funcs intel_plane_funcs = {
.update_plane = intel_update_plane,
.disable_plane = intel_disable_plane,
@@ -1720,6 +1587,8 @@
intel_plane->pipe = pipe;
intel_plane->plane = plane;
intel_plane->rotation = BIT(DRM_ROTATE_0);
+ intel_plane->check_plane = intel_check_sprite_plane;
+ intel_plane->commit_plane = intel_commit_sprite_plane;
possible_crtcs = (1 << pipe);
ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs,
&intel_plane_funcs,
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 46de8d7..e9561de 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -647,9 +647,9 @@
#define FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg) \
(REG_RANGE((reg), 0x2000, 0x4000) || \
- REG_RANGE((reg), 0x5000, 0x8000) || \
+ REG_RANGE((reg), 0x5200, 0x8000) || \
REG_RANGE((reg), 0x8300, 0x8500) || \
- REG_RANGE((reg), 0xB000, 0xC000) || \
+ REG_RANGE((reg), 0xB000, 0xB480) || \
REG_RANGE((reg), 0xE000, 0xE800))
#define FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg) \
@@ -658,17 +658,14 @@
REG_RANGE((reg), 0x12000, 0x14000) || \
REG_RANGE((reg), 0x1A000, 0x1C000) || \
REG_RANGE((reg), 0x1E800, 0x1EA00) || \
- REG_RANGE((reg), 0x30000, 0x40000))
+ REG_RANGE((reg), 0x30000, 0x38000))
#define FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg) \
(REG_RANGE((reg), 0x4000, 0x5000) || \
REG_RANGE((reg), 0x8000, 0x8300) || \
REG_RANGE((reg), 0x8500, 0x8600) || \
REG_RANGE((reg), 0x9000, 0xB000) || \
- REG_RANGE((reg), 0xC000, 0xC800) || \
- REG_RANGE((reg), 0xF000, 0x10000) || \
- REG_RANGE((reg), 0x14000, 0x14400) || \
- REG_RANGE((reg), 0x22000, 0x24000))
+ REG_RANGE((reg), 0xF000, 0x10000))
#define FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) \
REG_RANGE((reg), 0xB00, 0x2000)
@@ -1202,7 +1199,7 @@
switch (INTEL_INFO(dev)->gen) {
default:
- WARN_ON(1);
+ MISSING_CASE(INTEL_INFO(dev)->gen);
return;
case 9:
ASSIGN_WRITE_MMIO_VFUNCS(gen9);
@@ -1300,7 +1297,7 @@
reg->val = I915_READ8(reg->offset);
break;
default:
- WARN_ON(1);
+ MISSING_CASE(entry->size);
ret = -EINVAL;
goto out;
}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index aa87304..94a5bee 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -386,9 +386,7 @@
msm_gem_put_iova(gpu->memptrs_bo, gpu->base.id);
drm_gem_object_unreference(gpu->memptrs_bo);
}
- if (gpu->pm4)
- release_firmware(gpu->pm4);
- if (gpu->pfp)
- release_firmware(gpu->pfp);
+ release_firmware(gpu->pm4);
+ release_firmware(gpu->pfp);
msm_gpu_cleanup(&gpu->base);
}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
index fbebb04..b4e70e0 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
@@ -141,6 +141,15 @@
uint32_t hpd_ctrl;
int i, ret;
+ for (i = 0; i < config->hpd_reg_cnt; i++) {
+ ret = regulator_enable(hdmi->hpd_regs[i]);
+ if (ret) {
+ dev_err(dev->dev, "failed to enable hpd regulator: %s (%d)\n",
+ config->hpd_reg_names[i], ret);
+ goto fail;
+ }
+ }
+
ret = gpio_config(hdmi, true);
if (ret) {
dev_err(dev->dev, "failed to configure GPIOs: %d\n", ret);
@@ -164,15 +173,6 @@
}
}
- for (i = 0; i < config->hpd_reg_cnt; i++) {
- ret = regulator_enable(hdmi->hpd_regs[i]);
- if (ret) {
- dev_err(dev->dev, "failed to enable hpd regulator: %s (%d)\n",
- config->hpd_reg_names[i], ret);
- goto fail;
- }
- }
-
hdmi_set_mode(hdmi, false);
phy->funcs->reset(phy);
hdmi_set_mode(hdmi, true);
@@ -200,7 +200,7 @@
return ret;
}
-static int hdp_disable(struct hdmi_connector *hdmi_connector)
+static void hdp_disable(struct hdmi_connector *hdmi_connector)
{
struct hdmi *hdmi = hdmi_connector->hdmi;
const struct hdmi_platform_config *config = hdmi->config;
@@ -212,28 +212,19 @@
hdmi_set_mode(hdmi, false);
- for (i = 0; i < config->hpd_reg_cnt; i++) {
- ret = regulator_disable(hdmi->hpd_regs[i]);
- if (ret) {
- dev_err(dev->dev, "failed to disable hpd regulator: %s (%d)\n",
- config->hpd_reg_names[i], ret);
- goto fail;
- }
- }
-
for (i = 0; i < config->hpd_clk_cnt; i++)
clk_disable_unprepare(hdmi->hpd_clks[i]);
ret = gpio_config(hdmi, false);
- if (ret) {
- dev_err(dev->dev, "failed to unconfigure GPIOs: %d\n", ret);
- goto fail;
+ if (ret)
+ dev_warn(dev->dev, "failed to unconfigure GPIOs: %d\n", ret);
+
+ for (i = 0; i < config->hpd_reg_cnt; i++) {
+ ret = regulator_disable(hdmi->hpd_regs[i]);
+ if (ret)
+ dev_warn(dev->dev, "failed to disable hpd regulator: %s (%d)\n",
+ config->hpd_reg_names[i], ret);
}
-
- return 0;
-
-fail:
- return ret;
}
static void
@@ -260,11 +251,11 @@
(hpd_int_status & HDMI_HPD_INT_STATUS_INT)) {
bool detected = !!(hpd_int_status & HDMI_HPD_INT_STATUS_CABLE_DETECTED);
- DBG("status=%04x, ctrl=%04x", hpd_int_status, hpd_int_ctrl);
-
- /* ack the irq: */
+ /* ack & disable (temporarily) HPD events: */
hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL,
- hpd_int_ctrl | HDMI_HPD_INT_CTRL_INT_ACK);
+ HDMI_HPD_INT_CTRL_INT_ACK);
+
+ DBG("status=%04x, ctrl=%04x", hpd_int_status, hpd_int_ctrl);
/* detect disconnect if we are connected or visa versa: */
hpd_int_ctrl = HDMI_HPD_INT_CTRL_INT_EN;
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
index a7672e1..20ae503 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
@@ -323,25 +323,12 @@
drm_crtc_vblank_put(crtc);
}
-static void mdp4_crtc_load_lut(struct drm_crtc *crtc)
-{
-}
-
static int mdp4_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
- struct drm_device *dev = crtc->dev;
-
DBG("%s: check", mdp4_crtc->name);
-
- if (mdp4_crtc->event) {
- dev_err(dev->dev, "already pending flip!\n");
- return -EBUSY;
- }
-
// TODO anything else to check?
-
return 0;
}
@@ -357,7 +344,7 @@
struct drm_device *dev = crtc->dev;
unsigned long flags;
- DBG("%s: flush", mdp4_crtc->name);
+ DBG("%s: event: %p", mdp4_crtc->name, crtc->state->event);
WARN_ON(mdp4_crtc->event);
@@ -524,7 +511,6 @@
.mode_set_base = drm_helper_crtc_mode_set_base,
.prepare = mdp4_crtc_prepare,
.commit = mdp4_crtc_commit,
- .load_lut = mdp4_crtc_load_lut,
.atomic_check = mdp4_crtc_atomic_check,
.atomic_begin = mdp4_crtc_atomic_begin,
.atomic_flush = mdp4_crtc_atomic_flush,
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index 0e9a2e3..6b25f9f 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -275,10 +275,6 @@
mdp5_disable(get_kms(crtc));
}
-static void mdp5_crtc_load_lut(struct drm_crtc *crtc)
-{
-}
-
struct plane_state {
struct drm_plane *plane;
struct mdp5_plane_state *state;
@@ -303,11 +299,6 @@
DBG("%s: check", mdp5_crtc->name);
- if (mdp5_crtc->event) {
- dev_err(dev->dev, "already pending flip!\n");
- return -EBUSY;
- }
-
/* request a free CTL, if none is already allocated for this CRTC */
if (state->enable && !mdp5_crtc->ctl) {
mdp5_crtc->ctl = mdp5_ctlm_request(mdp5_kms->ctlm, crtc);
@@ -364,7 +355,7 @@
struct drm_device *dev = crtc->dev;
unsigned long flags;
- DBG("%s: flush", mdp5_crtc->name);
+ DBG("%s: event: %p", mdp5_crtc->name, crtc->state->event);
WARN_ON(mdp5_crtc->event);
@@ -407,7 +398,6 @@
.mode_set_base = drm_helper_crtc_mode_set_base,
.prepare = mdp5_crtc_prepare,
.commit = mdp5_crtc_commit,
- .load_lut = mdp5_crtc_load_lut,
.atomic_check = mdp5_crtc_atomic_check,
.atomic_begin = mdp5_crtc_atomic_begin,
.atomic_flush = mdp5_crtc_atomic_flush,
@@ -460,10 +450,7 @@
/* now that we know what irq's we want: */
mdp5_crtc->err.irqmask = intf2err(intf);
mdp5_crtc->vblank.irqmask = intf2vblank(intf);
-
- /* when called from modeset_init(), skip the rest until later: */
- if (!mdp5_kms)
- return;
+ mdp_irq_update(&mdp5_kms->base);
spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
intf_sel = mdp5_read(mdp5_kms, REG_MDP5_DISP_INTF_SEL);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index a11f1b8..9f01a4f 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -216,17 +216,7 @@
goto fail;
}
- /* NOTE: the vsync and error irq's are actually associated with
- * the INTF/encoder.. the easiest way to deal with this (ie. what
- * we do now) is assume a fixed relationship between crtc's and
- * encoders. I'm not sure if there is ever a need to more freely
- * assign crtcs to encoders, but if there is then we need to take
- * care of error and vblank irq's that the crtc has registered,
- * and also update user-requested vblank_mask.
- */
- encoder->possible_crtcs = BIT(0);
- mdp5_crtc_set_intf(priv->crtcs[0], 3, INTF_HDMI);
-
+ encoder->possible_crtcs = (1 << priv->num_crtcs) - 1;;
priv->encoders[priv->num_encoders++] = encoder;
/* Construct bridge/connector for HDMI: */
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index 26e5fde..fc76f63 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -113,6 +113,7 @@
} else {
mdp5_state->zpos = 1 + drm_plane_index(plane);
}
+ mdp5_state->base.plane = plane;
plane->state = &mdp5_state->base;
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp_kms.c b/drivers/gpu/drm/msm/mdp/mdp_kms.c
index 03455b6..2a73172 100644
--- a/drivers/gpu/drm/msm/mdp/mdp_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp_kms.c
@@ -42,7 +42,10 @@
mdp_kms->funcs->set_irqmask(mdp_kms, irqmask);
}
-static void update_irq_unlocked(struct mdp_kms *mdp_kms)
+/* if an mdp_irq's irqmask has changed, such as when mdp5 crtc<->encoder
+ * link changes, this must be called to figure out the new global irqmask
+ */
+void mdp_irq_update(struct mdp_kms *mdp_kms)
{
unsigned long flags;
spin_lock_irqsave(&list_lock, flags);
@@ -122,7 +125,7 @@
spin_unlock_irqrestore(&list_lock, flags);
if (needs_update)
- update_irq_unlocked(mdp_kms);
+ mdp_irq_update(mdp_kms);
}
void mdp_irq_unregister(struct mdp_kms *mdp_kms, struct mdp_irq *irq)
@@ -141,5 +144,5 @@
spin_unlock_irqrestore(&list_lock, flags);
if (needs_update)
- update_irq_unlocked(mdp_kms);
+ mdp_irq_update(mdp_kms);
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp_kms.h b/drivers/gpu/drm/msm/mdp/mdp_kms.h
index 99557b5..b268ce9 100644
--- a/drivers/gpu/drm/msm/mdp/mdp_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp_kms.h
@@ -75,7 +75,7 @@
void mdp_irq_wait(struct mdp_kms *mdp_kms, uint32_t irqmask);
void mdp_irq_register(struct mdp_kms *mdp_kms, struct mdp_irq *irq);
void mdp_irq_unregister(struct mdp_kms *mdp_kms, struct mdp_irq *irq);
-
+void mdp_irq_update(struct mdp_kms *mdp_kms);
/*
* pixel format helpers:
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index f0de412..2c39654 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -23,10 +23,41 @@
struct drm_atomic_state *state;
uint32_t fence;
struct msm_fence_cb fence_cb;
+ uint32_t crtc_mask;
};
static void fence_cb(struct msm_fence_cb *cb);
+/* block until specified crtcs are no longer pending update, and
+ * atomically mark them as pending update
+ */
+static int start_atomic(struct msm_drm_private *priv, uint32_t crtc_mask)
+{
+ int ret;
+
+ spin_lock(&priv->pending_crtcs_event.lock);
+ ret = wait_event_interruptible_locked(priv->pending_crtcs_event,
+ !(priv->pending_crtcs & crtc_mask));
+ if (ret == 0) {
+ DBG("start: %08x", crtc_mask);
+ priv->pending_crtcs |= crtc_mask;
+ }
+ spin_unlock(&priv->pending_crtcs_event.lock);
+
+ return ret;
+}
+
+/* clear specified crtcs (no longer pending update)
+ */
+static void end_atomic(struct msm_drm_private *priv, uint32_t crtc_mask)
+{
+ spin_lock(&priv->pending_crtcs_event.lock);
+ DBG("end: %08x", crtc_mask);
+ priv->pending_crtcs &= ~crtc_mask;
+ wake_up_all_locked(&priv->pending_crtcs_event);
+ spin_unlock(&priv->pending_crtcs_event.lock);
+}
+
static struct msm_commit *new_commit(struct drm_atomic_state *state)
{
struct msm_commit *c = kzalloc(sizeof(*c), GFP_KERNEL);
@@ -58,12 +89,27 @@
drm_atomic_helper_commit_post_planes(dev, state);
+ /* NOTE: _wait_for_vblanks() only waits for vblank on
+ * enabled CRTCs. So we end up faulting when disabling
+ * due to (potentially) unref'ing the outgoing fb's
+ * before the vblank when the disable has latched.
+ *
+ * But if it did wait on disabled (or newly disabled)
+ * CRTCs, that would be racy (ie. we could have missed
+ * the irq. We need some way to poll for pipe shut
+ * down. Or just live with occasionally hitting the
+ * timeout in the CRTC disable path (which really should
+ * not be critical path)
+ */
+
drm_atomic_helper_wait_for_vblanks(dev, state);
drm_atomic_helper_cleanup_planes(dev, state);
drm_atomic_state_free(state);
+ end_atomic(dev->dev_private, c->crtc_mask);
+
kfree(c);
}
@@ -81,6 +127,26 @@
}
+int msm_atomic_check(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ int ret;
+
+ /*
+ * msm ->atomic_check can update ->mode_changed for pixel format
+ * changes, hence must be run before we check the modeset changes.
+ */
+ ret = drm_atomic_helper_check_planes(dev, state);
+ if (ret)
+ return ret;
+
+ ret = drm_atomic_helper_check_modeset(dev, state);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
/**
* drm_atomic_helper_commit - commit validated state object
* @dev: DRM device
@@ -97,8 +163,9 @@
int msm_atomic_commit(struct drm_device *dev,
struct drm_atomic_state *state, bool async)
{
- struct msm_commit *c;
int nplanes = dev->mode_config.num_total_plane;
+ int ncrtcs = dev->mode_config.num_crtc;
+ struct msm_commit *c;
int i, ret;
ret = drm_atomic_helper_prepare_planes(dev, state);
@@ -106,6 +173,18 @@
return ret;
c = new_commit(state);
+ if (!c)
+ return -ENOMEM;
+
+ /*
+ * Figure out what crtcs we have:
+ */
+ for (i = 0; i < ncrtcs; i++) {
+ struct drm_crtc *crtc = state->crtcs[i];
+ if (!crtc)
+ continue;
+ c->crtc_mask |= (1 << drm_crtc_index(crtc));
+ }
/*
* Figure out what fence to wait for:
@@ -122,6 +201,14 @@
}
/*
+ * Wait for pending updates on any of the same crtc's and then
+ * mark our set of crtc's as busy:
+ */
+ ret = start_atomic(dev->dev_private, c->crtc_mask);
+ if (ret)
+ return ret;
+
+ /*
* This is the point of no return - everything below never fails except
* when the hw goes bonghits. Which means we can commit the new state on
* the software side now.
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index c795217..f1ebedd 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -29,7 +29,7 @@
static const struct drm_mode_config_funcs mode_config_funcs = {
.fb_create = msm_framebuffer_create,
.output_poll_changed = msm_fb_output_poll_changed,
- .atomic_check = drm_atomic_helper_check,
+ .atomic_check = msm_atomic_check,
.atomic_commit = msm_atomic_commit,
};
@@ -193,6 +193,7 @@
priv->wq = alloc_ordered_workqueue("msm", 0);
init_waitqueue_head(&priv->fence_event);
+ init_waitqueue_head(&priv->pending_crtcs_event);
INIT_LIST_HEAD(&priv->inactive_list);
INIT_LIST_HEAD(&priv->fence_cbs);
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 1363038..22e5391 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -96,6 +96,10 @@
/* callbacks deferred until bo is inactive: */
struct list_head fence_cbs;
+ /* crtcs pending async atomic updates: */
+ uint32_t pending_crtcs;
+ wait_queue_head_t pending_crtcs_event;
+
/* registered MMUs: */
unsigned int num_mmus;
struct msm_mmu *mmus[NUM_DOMAINS];
@@ -144,6 +148,8 @@
(_cb)->func = _func; \
} while (0)
+int msm_atomic_check(struct drm_device *dev,
+ struct drm_atomic_state *state);
int msm_atomic_commit(struct drm_device *dev,
struct drm_atomic_state *state, bool async);
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index 94d55e5..1f3af13 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -190,8 +190,7 @@
fail:
if (ret) {
- if (fbi)
- framebuffer_release(fbi);
+ framebuffer_release(fbi);
if (fb) {
drm_framebuffer_unregister_private(fb);
drm_framebuffer_remove(fb);
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 4a6f0e4..49dea4f 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -535,8 +535,7 @@
drm_free_large(msm_obj->pages);
} else {
- if (msm_obj->vaddr)
- vunmap(msm_obj->vaddr);
+ vunmap(msm_obj->vaddr);
put_pages(obj);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 5d93902..f804243 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -876,7 +876,6 @@
if (ret)
return ret;
- bo->gem.dumb = true;
ret = drm_gem_handle_create(file_priv, &bo->gem, &args->handle);
drm_gem_object_unreference_unlocked(&bo->gem);
return ret;
@@ -892,14 +891,6 @@
gem = drm_gem_object_lookup(dev, file_priv, handle);
if (gem) {
struct nouveau_bo *bo = nouveau_gem_object(gem);
-
- /*
- * We don't allow dumb mmaps on objects created using another
- * interface.
- */
- WARN_ONCE(!(gem->dumb || gem->import_attach),
- "Illegal dumb map of accelerated buffer.\n");
-
*poffset = drm_vma_node_offset_addr(&bo->bo.vma_node);
drm_gem_object_unreference_unlocked(gem);
return 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 28d51a2..42c34ba 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -444,9 +444,6 @@
list_for_each_entry(nvbo, list, entry) {
struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
- WARN_ONCE(nvbo->gem.dumb,
- "GPU use of dumb buffer is illegal.\n");
-
ret = nouveau_gem_set_domain(&nvbo->gem, b->read_domains,
b->write_domains,
b->valid_domains);
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 753a6de..3d1cfcb 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -28,6 +28,7 @@
#include "nouveau_ttm.h"
#include "nouveau_gem.h"
+#include "drm_legacy.h"
static int
nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
{
@@ -281,7 +282,7 @@
struct nouveau_drm *drm = nouveau_drm(file_priv->minor->dev);
if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
- return -EINVAL;
+ return drm_legacy_mmap(filp, vma);
return ttm_bo_mmap(filp, vma, &drm->ttm.bdev);
}
diff --git a/drivers/gpu/drm/radeon/cik_reg.h b/drivers/gpu/drm/radeon/cik_reg.h
index 79c45e8..bbb8f2e 100644
--- a/drivers/gpu/drm/radeon/cik_reg.h
+++ b/drivers/gpu/drm/radeon/cik_reg.h
@@ -147,10 +147,42 @@
#define CIK_LB_DESKTOP_HEIGHT 0x6b0c
+#define KFD_CIK_SDMA_QUEUE_OFFSET 0x200
+
#define CP_HQD_IQ_RPTR 0xC970u
#define AQL_ENABLE (1U << 0)
-
-#define IDLE (1 << 2)
+#define SDMA0_RLC0_RB_CNTL 0xD400u
+#define SDMA_RB_VMID(x) (x << 24)
+#define SDMA0_RLC0_RB_BASE 0xD404u
+#define SDMA0_RLC0_RB_BASE_HI 0xD408u
+#define SDMA0_RLC0_RB_RPTR 0xD40Cu
+#define SDMA0_RLC0_RB_WPTR 0xD410u
+#define SDMA0_RLC0_RB_WPTR_POLL_CNTL 0xD414u
+#define SDMA0_RLC0_RB_WPTR_POLL_ADDR_HI 0xD418u
+#define SDMA0_RLC0_RB_WPTR_POLL_ADDR_LO 0xD41Cu
+#define SDMA0_RLC0_RB_RPTR_ADDR_HI 0xD420u
+#define SDMA0_RLC0_RB_RPTR_ADDR_LO 0xD424u
+#define SDMA0_RLC0_IB_CNTL 0xD428u
+#define SDMA0_RLC0_IB_RPTR 0xD42Cu
+#define SDMA0_RLC0_IB_OFFSET 0xD430u
+#define SDMA0_RLC0_IB_BASE_LO 0xD434u
+#define SDMA0_RLC0_IB_BASE_HI 0xD438u
+#define SDMA0_RLC0_IB_SIZE 0xD43Cu
+#define SDMA0_RLC0_SKIP_CNTL 0xD440u
+#define SDMA0_RLC0_CONTEXT_STATUS 0xD444u
+#define SDMA_RLC_IDLE (1 << 2)
+#define SDMA0_RLC0_DOORBELL 0xD448u
+#define SDMA_OFFSET(x) (x << 0)
+#define SDMA_DB_ENABLE (1 << 28)
+#define SDMA0_RLC0_VIRTUAL_ADDR 0xD49Cu
+#define SDMA_ATC (1 << 0)
+#define SDMA_VA_PTR32 (1 << 4)
+#define SDMA_VA_SHARED_BASE(x) (x << 8)
+#define SDMA0_RLC0_APE1_CNTL 0xD4A0u
+#define SDMA0_RLC0_DOORBELL_LOG 0xD4A4u
+#define SDMA0_RLC0_WATERMARK 0xD4A8u
+#define SDMA0_CNTL 0xD010
+#define SDMA1_CNTL 0xD810
struct cik_mqd {
uint32_t header;
@@ -283,4 +315,137 @@
uint32_t queue_doorbell_id15;
};
+struct cik_sdma_rlc_registers {
+ uint32_t sdma_rlc_rb_cntl;
+ uint32_t sdma_rlc_rb_base;
+ uint32_t sdma_rlc_rb_base_hi;
+ uint32_t sdma_rlc_rb_rptr;
+ uint32_t sdma_rlc_rb_wptr;
+ uint32_t sdma_rlc_rb_wptr_poll_cntl;
+ uint32_t sdma_rlc_rb_wptr_poll_addr_hi;
+ uint32_t sdma_rlc_rb_wptr_poll_addr_lo;
+ uint32_t sdma_rlc_rb_rptr_addr_hi;
+ uint32_t sdma_rlc_rb_rptr_addr_lo;
+ uint32_t sdma_rlc_ib_cntl;
+ uint32_t sdma_rlc_ib_rptr;
+ uint32_t sdma_rlc_ib_offset;
+ uint32_t sdma_rlc_ib_base_lo;
+ uint32_t sdma_rlc_ib_base_hi;
+ uint32_t sdma_rlc_ib_size;
+ uint32_t sdma_rlc_skip_cntl;
+ uint32_t sdma_rlc_context_status;
+ uint32_t sdma_rlc_doorbell;
+ uint32_t sdma_rlc_virtual_addr;
+ uint32_t sdma_rlc_ape1_cntl;
+ uint32_t sdma_rlc_doorbell_log;
+ uint32_t reserved_22;
+ uint32_t reserved_23;
+ uint32_t reserved_24;
+ uint32_t reserved_25;
+ uint32_t reserved_26;
+ uint32_t reserved_27;
+ uint32_t reserved_28;
+ uint32_t reserved_29;
+ uint32_t reserved_30;
+ uint32_t reserved_31;
+ uint32_t reserved_32;
+ uint32_t reserved_33;
+ uint32_t reserved_34;
+ uint32_t reserved_35;
+ uint32_t reserved_36;
+ uint32_t reserved_37;
+ uint32_t reserved_38;
+ uint32_t reserved_39;
+ uint32_t reserved_40;
+ uint32_t reserved_41;
+ uint32_t reserved_42;
+ uint32_t reserved_43;
+ uint32_t reserved_44;
+ uint32_t reserved_45;
+ uint32_t reserved_46;
+ uint32_t reserved_47;
+ uint32_t reserved_48;
+ uint32_t reserved_49;
+ uint32_t reserved_50;
+ uint32_t reserved_51;
+ uint32_t reserved_52;
+ uint32_t reserved_53;
+ uint32_t reserved_54;
+ uint32_t reserved_55;
+ uint32_t reserved_56;
+ uint32_t reserved_57;
+ uint32_t reserved_58;
+ uint32_t reserved_59;
+ uint32_t reserved_60;
+ uint32_t reserved_61;
+ uint32_t reserved_62;
+ uint32_t reserved_63;
+ uint32_t reserved_64;
+ uint32_t reserved_65;
+ uint32_t reserved_66;
+ uint32_t reserved_67;
+ uint32_t reserved_68;
+ uint32_t reserved_69;
+ uint32_t reserved_70;
+ uint32_t reserved_71;
+ uint32_t reserved_72;
+ uint32_t reserved_73;
+ uint32_t reserved_74;
+ uint32_t reserved_75;
+ uint32_t reserved_76;
+ uint32_t reserved_77;
+ uint32_t reserved_78;
+ uint32_t reserved_79;
+ uint32_t reserved_80;
+ uint32_t reserved_81;
+ uint32_t reserved_82;
+ uint32_t reserved_83;
+ uint32_t reserved_84;
+ uint32_t reserved_85;
+ uint32_t reserved_86;
+ uint32_t reserved_87;
+ uint32_t reserved_88;
+ uint32_t reserved_89;
+ uint32_t reserved_90;
+ uint32_t reserved_91;
+ uint32_t reserved_92;
+ uint32_t reserved_93;
+ uint32_t reserved_94;
+ uint32_t reserved_95;
+ uint32_t reserved_96;
+ uint32_t reserved_97;
+ uint32_t reserved_98;
+ uint32_t reserved_99;
+ uint32_t reserved_100;
+ uint32_t reserved_101;
+ uint32_t reserved_102;
+ uint32_t reserved_103;
+ uint32_t reserved_104;
+ uint32_t reserved_105;
+ uint32_t reserved_106;
+ uint32_t reserved_107;
+ uint32_t reserved_108;
+ uint32_t reserved_109;
+ uint32_t reserved_110;
+ uint32_t reserved_111;
+ uint32_t reserved_112;
+ uint32_t reserved_113;
+ uint32_t reserved_114;
+ uint32_t reserved_115;
+ uint32_t reserved_116;
+ uint32_t reserved_117;
+ uint32_t reserved_118;
+ uint32_t reserved_119;
+ uint32_t reserved_120;
+ uint32_t reserved_121;
+ uint32_t reserved_122;
+ uint32_t reserved_123;
+ uint32_t reserved_124;
+ uint32_t reserved_125;
+ uint32_t reserved_126;
+ uint32_t reserved_127;
+ uint32_t sdma_engine_id;
+ uint32_t sdma_queue_id;
+};
+
#endif
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
index dde5c7e..1f4ded1 100644
--- a/drivers/gpu/drm/radeon/cik_sdma.c
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -283,6 +283,33 @@
}
/**
+ * cik_sdma_ctx_switch_enable - enable/disable sdma engine preemption
+ *
+ * @rdev: radeon_device pointer
+ * @enable: enable/disable preemption.
+ *
+ * Halt or unhalt the async dma engines (CIK).
+ */
+void cik_sdma_ctx_switch_enable(struct radeon_device *rdev, bool enable)
+{
+ uint32_t reg_offset, value;
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ if (i == 0)
+ reg_offset = SDMA0_REGISTER_OFFSET;
+ else
+ reg_offset = SDMA1_REGISTER_OFFSET;
+ value = RREG32(SDMA0_CNTL + reg_offset);
+ if (enable)
+ value |= AUTO_CTXSW_ENABLE;
+ else
+ value &= ~AUTO_CTXSW_ENABLE;
+ WREG32(SDMA0_CNTL + reg_offset, value);
+ }
+}
+
+/**
* cik_sdma_enable - stop the async dma engines
*
* @rdev: radeon_device pointer
@@ -312,6 +339,8 @@
me_cntl |= SDMA_HALT;
WREG32(SDMA0_ME_CNTL + reg_offset, me_cntl);
}
+
+ cik_sdma_ctx_switch_enable(rdev, enable);
}
/**
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index fe48f22..a46f737 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -394,10 +394,9 @@
return r;
}
-static int radeon_mode_mmap(struct drm_file *filp,
- struct drm_device *dev,
- uint32_t handle, bool dumb,
- uint64_t *offset_p)
+int radeon_mode_dumb_mmap(struct drm_file *filp,
+ struct drm_device *dev,
+ uint32_t handle, uint64_t *offset_p)
{
struct drm_gem_object *gobj;
struct radeon_bo *robj;
@@ -406,14 +405,6 @@
if (gobj == NULL) {
return -ENOENT;
}
-
- /*
- * We don't allow dumb mmaps on objects created using another
- * interface.
- */
- WARN_ONCE(dumb && !(gobj->dumb || gobj->import_attach),
- "Illegal dumb map of GPU buffer.\n");
-
robj = gem_to_radeon_bo(gobj);
if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) {
drm_gem_object_unreference_unlocked(gobj);
@@ -424,20 +415,12 @@
return 0;
}
-int radeon_mode_dumb_mmap(struct drm_file *filp,
- struct drm_device *dev,
- uint32_t handle, uint64_t *offset_p)
-{
- return radeon_mode_mmap(filp, dev, handle, true, offset_p);
-}
-
int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
struct drm_radeon_gem_mmap *args = data;
- return radeon_mode_mmap(filp, dev, args->handle, false,
- &args->addr_ptr);
+ return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr);
}
int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
@@ -763,7 +746,6 @@
return -ENOMEM;
r = drm_gem_handle_create(file_priv, gobj, &handle);
- gobj->dumb = true;
/* drop reference from allocate - handle holds it now */
drm_gem_object_unreference_unlocked(gobj);
if (r) {
diff --git a/drivers/gpu/drm/radeon/radeon_kfd.c b/drivers/gpu/drm/radeon/radeon_kfd.c
index 065d020..cae11ee 100644
--- a/drivers/gpu/drm/radeon/radeon_kfd.c
+++ b/drivers/gpu/drm/radeon/radeon_kfd.c
@@ -28,27 +28,29 @@
#include "cikd.h"
#include "cik_reg.h"
#include "radeon_kfd.h"
+#include "radeon_ucode.h"
+#include <linux/firmware.h>
#define CIK_PIPE_PER_MEC (4)
struct kgd_mem {
- struct radeon_sa_bo *sa_bo;
+ struct radeon_bo *bo;
uint64_t gpu_addr;
- void *ptr;
+ void *cpu_ptr;
};
-static int init_sa_manager(struct kgd_dev *kgd, unsigned int size);
-static void fini_sa_manager(struct kgd_dev *kgd);
-static int allocate_mem(struct kgd_dev *kgd, size_t size, size_t alignment,
- enum kgd_memory_pool pool, struct kgd_mem **mem);
+static int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
+ void **mem_obj, uint64_t *gpu_addr,
+ void **cpu_ptr);
-static void free_mem(struct kgd_dev *kgd, struct kgd_mem *mem);
+static void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj);
static uint64_t get_vmem_size(struct kgd_dev *kgd);
static uint64_t get_gpu_clock_counter(struct kgd_dev *kgd);
static uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd);
+static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type);
/*
* Register access functions
@@ -68,19 +70,20 @@
static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
uint32_t queue_id, uint32_t __user *wptr);
-
+static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd);
static bool kgd_hqd_is_occupies(struct kgd_dev *kgd, uint64_t queue_address,
uint32_t pipe_id, uint32_t queue_id);
static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
unsigned int timeout, uint32_t pipe_id,
uint32_t queue_id);
+static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
+static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
+ unsigned int timeout);
static const struct kfd2kgd_calls kfd2kgd = {
- .init_sa_manager = init_sa_manager,
- .fini_sa_manager = fini_sa_manager,
- .allocate_mem = allocate_mem,
- .free_mem = free_mem,
+ .init_gtt_mem_allocation = alloc_gtt_mem,
+ .free_gtt_mem = free_gtt_mem,
.get_vmem_size = get_vmem_size,
.get_gpu_clock_counter = get_gpu_clock_counter,
.get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz,
@@ -89,8 +92,12 @@
.init_memory = kgd_init_memory,
.init_pipeline = kgd_init_pipeline,
.hqd_load = kgd_hqd_load,
+ .hqd_sdma_load = kgd_hqd_sdma_load,
.hqd_is_occupies = kgd_hqd_is_occupies,
+ .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
.hqd_destroy = kgd_hqd_destroy,
+ .hqd_sdma_destroy = kgd_hqd_sdma_destroy,
+ .get_fw_version = get_fw_version
};
static const struct kgd2kfd_calls *kgd2kfd;
@@ -178,87 +185,78 @@
return r;
}
-static u32 pool_to_domain(enum kgd_memory_pool p)
-{
- switch (p) {
- case KGD_POOL_FRAMEBUFFER: return RADEON_GEM_DOMAIN_VRAM;
- default: return RADEON_GEM_DOMAIN_GTT;
- }
-}
-
-static int init_sa_manager(struct kgd_dev *kgd, unsigned int size)
+static int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
+ void **mem_obj, uint64_t *gpu_addr,
+ void **cpu_ptr)
{
struct radeon_device *rdev = (struct radeon_device *)kgd;
+ struct kgd_mem **mem = (struct kgd_mem **) mem_obj;
int r;
BUG_ON(kgd == NULL);
-
- r = radeon_sa_bo_manager_init(rdev, &rdev->kfd_bo,
- size,
- RADEON_GPU_PAGE_SIZE,
- RADEON_GEM_DOMAIN_GTT,
- RADEON_GEM_GTT_WC);
-
- if (r)
- return r;
-
- r = radeon_sa_bo_manager_start(rdev, &rdev->kfd_bo);
- if (r)
- radeon_sa_bo_manager_fini(rdev, &rdev->kfd_bo);
-
- return r;
-}
-
-static void fini_sa_manager(struct kgd_dev *kgd)
-{
- struct radeon_device *rdev = (struct radeon_device *)kgd;
-
- BUG_ON(kgd == NULL);
-
- radeon_sa_bo_manager_suspend(rdev, &rdev->kfd_bo);
- radeon_sa_bo_manager_fini(rdev, &rdev->kfd_bo);
-}
-
-static int allocate_mem(struct kgd_dev *kgd, size_t size, size_t alignment,
- enum kgd_memory_pool pool, struct kgd_mem **mem)
-{
- struct radeon_device *rdev = (struct radeon_device *)kgd;
- u32 domain;
- int r;
-
- BUG_ON(kgd == NULL);
-
- domain = pool_to_domain(pool);
- if (domain != RADEON_GEM_DOMAIN_GTT) {
- dev_err(rdev->dev,
- "Only allowed to allocate gart memory for kfd\n");
- return -EINVAL;
- }
+ BUG_ON(gpu_addr == NULL);
+ BUG_ON(cpu_ptr == NULL);
*mem = kmalloc(sizeof(struct kgd_mem), GFP_KERNEL);
if ((*mem) == NULL)
return -ENOMEM;
- r = radeon_sa_bo_new(rdev, &rdev->kfd_bo, &(*mem)->sa_bo, size,
- alignment);
+ r = radeon_bo_create(rdev, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_GTT,
+ RADEON_GEM_GTT_WC, NULL, NULL, &(*mem)->bo);
if (r) {
- dev_err(rdev->dev, "failed to get memory for kfd (%d)\n", r);
+ dev_err(rdev->dev,
+ "failed to allocate BO for amdkfd (%d)\n", r);
return r;
}
- (*mem)->ptr = radeon_sa_bo_cpu_addr((*mem)->sa_bo);
- (*mem)->gpu_addr = radeon_sa_bo_gpu_addr((*mem)->sa_bo);
+ /* map the buffer */
+ r = radeon_bo_reserve((*mem)->bo, true);
+ if (r) {
+ dev_err(rdev->dev, "(%d) failed to reserve bo for amdkfd\n", r);
+ goto allocate_mem_reserve_bo_failed;
+ }
+
+ r = radeon_bo_pin((*mem)->bo, RADEON_GEM_DOMAIN_GTT,
+ &(*mem)->gpu_addr);
+ if (r) {
+ dev_err(rdev->dev, "(%d) failed to pin bo for amdkfd\n", r);
+ goto allocate_mem_pin_bo_failed;
+ }
+ *gpu_addr = (*mem)->gpu_addr;
+
+ r = radeon_bo_kmap((*mem)->bo, &(*mem)->cpu_ptr);
+ if (r) {
+ dev_err(rdev->dev,
+ "(%d) failed to map bo to kernel for amdkfd\n", r);
+ goto allocate_mem_kmap_bo_failed;
+ }
+ *cpu_ptr = (*mem)->cpu_ptr;
+
+ radeon_bo_unreserve((*mem)->bo);
return 0;
+
+allocate_mem_kmap_bo_failed:
+ radeon_bo_unpin((*mem)->bo);
+allocate_mem_pin_bo_failed:
+ radeon_bo_unreserve((*mem)->bo);
+allocate_mem_reserve_bo_failed:
+ radeon_bo_unref(&(*mem)->bo);
+
+ return r;
}
-static void free_mem(struct kgd_dev *kgd, struct kgd_mem *mem)
+static void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj)
{
- struct radeon_device *rdev = (struct radeon_device *)kgd;
+ struct kgd_mem *mem = (struct kgd_mem *) mem_obj;
- BUG_ON(kgd == NULL);
+ BUG_ON(mem == NULL);
- radeon_sa_bo_free(rdev, &mem->sa_bo, NULL);
+ radeon_bo_reserve(mem->bo, true);
+ radeon_bo_kunmap(mem->bo);
+ radeon_bo_unpin(mem->bo);
+ radeon_bo_unreserve(mem->bo);
+ radeon_bo_unref(&(mem->bo));
kfree(mem);
}
@@ -431,11 +429,28 @@
return 0;
}
+static inline uint32_t get_sdma_base_addr(struct cik_sdma_rlc_registers *m)
+{
+ uint32_t retval;
+
+ retval = m->sdma_engine_id * SDMA1_REGISTER_OFFSET +
+ m->sdma_queue_id * KFD_CIK_SDMA_QUEUE_OFFSET;
+
+ pr_debug("kfd: sdma base address: 0x%x\n", retval);
+
+ return retval;
+}
+
static inline struct cik_mqd *get_mqd(void *mqd)
{
return (struct cik_mqd *)mqd;
}
+static inline struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd)
+{
+ return (struct cik_sdma_rlc_registers *)mqd;
+}
+
static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
uint32_t queue_id, uint32_t __user *wptr)
{
@@ -513,6 +528,45 @@
return 0;
}
+static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd)
+{
+ struct cik_sdma_rlc_registers *m;
+ uint32_t sdma_base_addr;
+
+ m = get_sdma_mqd(mqd);
+ sdma_base_addr = get_sdma_base_addr(m);
+
+ write_register(kgd,
+ sdma_base_addr + SDMA0_RLC0_VIRTUAL_ADDR,
+ m->sdma_rlc_virtual_addr);
+
+ write_register(kgd,
+ sdma_base_addr + SDMA0_RLC0_RB_BASE,
+ m->sdma_rlc_rb_base);
+
+ write_register(kgd,
+ sdma_base_addr + SDMA0_RLC0_RB_BASE_HI,
+ m->sdma_rlc_rb_base_hi);
+
+ write_register(kgd,
+ sdma_base_addr + SDMA0_RLC0_RB_RPTR_ADDR_LO,
+ m->sdma_rlc_rb_rptr_addr_lo);
+
+ write_register(kgd,
+ sdma_base_addr + SDMA0_RLC0_RB_RPTR_ADDR_HI,
+ m->sdma_rlc_rb_rptr_addr_hi);
+
+ write_register(kgd,
+ sdma_base_addr + SDMA0_RLC0_DOORBELL,
+ m->sdma_rlc_doorbell);
+
+ write_register(kgd,
+ sdma_base_addr + SDMA0_RLC0_RB_CNTL,
+ m->sdma_rlc_rb_cntl);
+
+ return 0;
+}
+
static bool kgd_hqd_is_occupies(struct kgd_dev *kgd, uint64_t queue_address,
uint32_t pipe_id, uint32_t queue_id)
{
@@ -534,6 +588,24 @@
return retval;
}
+static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
+{
+ struct cik_sdma_rlc_registers *m;
+ uint32_t sdma_base_addr;
+ uint32_t sdma_rlc_rb_cntl;
+
+ m = get_sdma_mqd(mqd);
+ sdma_base_addr = get_sdma_base_addr(m);
+
+ sdma_rlc_rb_cntl = read_register(kgd,
+ sdma_base_addr + SDMA0_RLC0_RB_CNTL);
+
+ if (sdma_rlc_rb_cntl & SDMA_RB_ENABLE)
+ return true;
+
+ return false;
+}
+
static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
unsigned int timeout, uint32_t pipe_id,
uint32_t queue_id)
@@ -561,3 +633,85 @@
release_queue(kgd);
return 0;
}
+
+static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
+ unsigned int timeout)
+{
+ struct cik_sdma_rlc_registers *m;
+ uint32_t sdma_base_addr;
+ uint32_t temp;
+
+ m = get_sdma_mqd(mqd);
+ sdma_base_addr = get_sdma_base_addr(m);
+
+ temp = read_register(kgd, sdma_base_addr + SDMA0_RLC0_RB_CNTL);
+ temp = temp & ~SDMA_RB_ENABLE;
+ write_register(kgd, sdma_base_addr + SDMA0_RLC0_RB_CNTL, temp);
+
+ while (true) {
+ temp = read_register(kgd, sdma_base_addr +
+ SDMA0_RLC0_CONTEXT_STATUS);
+ if (temp & SDMA_RLC_IDLE)
+ break;
+ if (timeout == 0)
+ return -ETIME;
+ msleep(20);
+ timeout -= 20;
+ }
+
+ write_register(kgd, sdma_base_addr + SDMA0_RLC0_DOORBELL, 0);
+ write_register(kgd, sdma_base_addr + SDMA0_RLC0_RB_RPTR, 0);
+ write_register(kgd, sdma_base_addr + SDMA0_RLC0_RB_WPTR, 0);
+ write_register(kgd, sdma_base_addr + SDMA0_RLC0_RB_BASE, 0);
+
+ return 0;
+}
+
+static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
+{
+ struct radeon_device *rdev = (struct radeon_device *) kgd;
+ const union radeon_firmware_header *hdr;
+
+ BUG_ON(kgd == NULL || rdev->mec_fw == NULL);
+
+ switch (type) {
+ case KGD_ENGINE_PFP:
+ hdr = (const union radeon_firmware_header *) rdev->pfp_fw->data;
+ break;
+
+ case KGD_ENGINE_ME:
+ hdr = (const union radeon_firmware_header *) rdev->me_fw->data;
+ break;
+
+ case KGD_ENGINE_CE:
+ hdr = (const union radeon_firmware_header *) rdev->ce_fw->data;
+ break;
+
+ case KGD_ENGINE_MEC1:
+ hdr = (const union radeon_firmware_header *) rdev->mec_fw->data;
+ break;
+
+ case KGD_ENGINE_MEC2:
+ hdr = (const union radeon_firmware_header *)
+ rdev->mec2_fw->data;
+ break;
+
+ case KGD_ENGINE_RLC:
+ hdr = (const union radeon_firmware_header *) rdev->rlc_fw->data;
+ break;
+
+ case KGD_ENGINE_SDMA:
+ hdr = (const union radeon_firmware_header *)
+ rdev->sdma_fw->data;
+ break;
+
+ default:
+ return 0;
+ }
+
+ if (hdr == NULL)
+ return 0;
+
+ /* Only 12 bit in use*/
+ return hdr->common.ucode_version;
+}
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 7d68223..86fc564 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -529,9 +529,6 @@
u32 current_domain =
radeon_mem_type_to_domain(bo->tbo.mem.mem_type);
- WARN_ONCE(bo->gem_base.dumb,
- "GPU use of dumb buffer is illegal.\n");
-
/* Check if this buffer will be moved and don't move it
* if we have moved too many buffers for this IB already.
*
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
index 23cc910..25c7a99 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
@@ -74,39 +74,77 @@
if (ret < 0)
return ret;
+ ret = clk_prepare_enable(rcrtc->extclock);
+ if (ret < 0)
+ goto error_clock;
+
ret = rcar_du_group_get(rcrtc->group);
if (ret < 0)
- clk_disable_unprepare(rcrtc->clock);
+ goto error_group;
+ return 0;
+
+error_group:
+ clk_disable_unprepare(rcrtc->extclock);
+error_clock:
+ clk_disable_unprepare(rcrtc->clock);
return ret;
}
static void rcar_du_crtc_put(struct rcar_du_crtc *rcrtc)
{
rcar_du_group_put(rcrtc->group);
+
+ clk_disable_unprepare(rcrtc->extclock);
clk_disable_unprepare(rcrtc->clock);
}
static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
{
const struct drm_display_mode *mode = &rcrtc->crtc.mode;
+ unsigned long mode_clock = mode->clock * 1000;
unsigned long clk;
u32 value;
+ u32 escr;
u32 div;
- /* Dot clock */
+ /* Compute the clock divisor and select the internal or external dot
+ * clock based on the requested frequency.
+ */
clk = clk_get_rate(rcrtc->clock);
- div = DIV_ROUND_CLOSEST(clk, mode->clock * 1000);
+ div = DIV_ROUND_CLOSEST(clk, mode_clock);
div = clamp(div, 1U, 64U) - 1;
+ escr = div | ESCR_DCLKSEL_CLKS;
+
+ if (rcrtc->extclock) {
+ unsigned long extclk;
+ unsigned long extrate;
+ unsigned long rate;
+ u32 extdiv;
+
+ extclk = clk_get_rate(rcrtc->extclock);
+ extdiv = DIV_ROUND_CLOSEST(extclk, mode_clock);
+ extdiv = clamp(extdiv, 1U, 64U) - 1;
+
+ rate = clk / (div + 1);
+ extrate = extclk / (extdiv + 1);
+
+ if (abs((long)extrate - (long)mode_clock) <
+ abs((long)rate - (long)mode_clock)) {
+ dev_dbg(rcrtc->group->dev->dev,
+ "crtc%u: using external clock\n", rcrtc->index);
+ escr = extdiv | ESCR_DCLKSEL_DCLKIN;
+ }
+ }
rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? ESCR2 : ESCR,
- ESCR_DCLKSEL_CLKS | div);
+ escr);
rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? OTAR2 : OTAR, 0);
/* Signal polarities */
value = ((mode->flags & DRM_MODE_FLAG_PVSYNC) ? 0 : DSMR_VSL)
| ((mode->flags & DRM_MODE_FLAG_PHSYNC) ? 0 : DSMR_HSL)
- | DSMR_DIPM_DE;
+ | DSMR_DIPM_DE | DSMR_CSPM;
rcar_du_crtc_write(rcrtc, DSMR, value);
/* Display timings */
@@ -117,12 +155,15 @@
mode->hsync_start - 1);
rcar_du_crtc_write(rcrtc, HCR, mode->htotal - 1);
- rcar_du_crtc_write(rcrtc, VDSR, mode->vtotal - mode->vsync_end - 2);
- rcar_du_crtc_write(rcrtc, VDER, mode->vtotal - mode->vsync_end +
- mode->vdisplay - 2);
- rcar_du_crtc_write(rcrtc, VSPR, mode->vtotal - mode->vsync_end +
- mode->vsync_start - 1);
- rcar_du_crtc_write(rcrtc, VCR, mode->vtotal - 1);
+ rcar_du_crtc_write(rcrtc, VDSR, mode->crtc_vtotal -
+ mode->crtc_vsync_end - 2);
+ rcar_du_crtc_write(rcrtc, VDER, mode->crtc_vtotal -
+ mode->crtc_vsync_end +
+ mode->crtc_vdisplay - 2);
+ rcar_du_crtc_write(rcrtc, VSPR, mode->crtc_vtotal -
+ mode->crtc_vsync_end +
+ mode->crtc_vsync_start - 1);
+ rcar_du_crtc_write(rcrtc, VCR, mode->crtc_vtotal - 1);
rcar_du_crtc_write(rcrtc, DESR, mode->htotal - mode->hsync_start);
rcar_du_crtc_write(rcrtc, DEWR, mode->hdisplay);
@@ -139,9 +180,10 @@
*/
rcrtc->outputs |= BIT(output);
- /* Store RGB routing to DPAD0 for R8A7790. */
- if (rcar_du_has(rcdu, RCAR_DU_FEATURE_DEFR8) &&
- output == RCAR_DU_OUTPUT_DPAD0)
+ /* Store RGB routing to DPAD0, the hardware will be configured when
+ * starting the CRTC.
+ */
+ if (output == RCAR_DU_OUTPUT_DPAD0)
rcdu->dpad0_source = rcrtc->index;
}
@@ -217,6 +259,7 @@
static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc)
{
struct drm_crtc *crtc = &rcrtc->crtc;
+ bool interlaced;
unsigned int i;
if (rcrtc->started)
@@ -252,7 +295,10 @@
* sync mode (with the HSYNC and VSYNC signals configured as outputs and
* actively driven).
*/
- rcar_du_crtc_clr_set(rcrtc, DSYSR, DSYSR_TVM_MASK, DSYSR_TVM_MASTER);
+ interlaced = rcrtc->crtc.mode.flags & DRM_MODE_FLAG_INTERLACE;
+ rcar_du_crtc_clr_set(rcrtc, DSYSR, DSYSR_TVM_MASK | DSYSR_SCM_MASK,
+ (interlaced ? DSYSR_SCM_INT_VIDEO : 0) |
+ DSYSR_TVM_MASTER);
rcar_du_group_start_stop(rcrtc->group, true);
@@ -308,6 +354,9 @@
{
struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
+ if (mode != DRM_MODE_DPMS_ON)
+ mode = DRM_MODE_DPMS_OFF;
+
if (rcrtc->dpms == mode)
return;
@@ -486,7 +535,7 @@
status = rcar_du_crtc_read(rcrtc, DSSR);
rcar_du_crtc_write(rcrtc, DSRCR, status & DSRCR_MASK);
- if (status & DSSR_VBK) {
+ if (status & DSSR_FRM) {
drm_handle_vblank(rcrtc->crtc.dev, rcrtc->index);
rcar_du_crtc_finish_page_flip(rcrtc);
ret = IRQ_HANDLED;
@@ -542,12 +591,13 @@
struct rcar_du_crtc *rcrtc = &rcdu->crtcs[index];
struct drm_crtc *crtc = &rcrtc->crtc;
unsigned int irqflags;
- char clk_name[5];
+ struct clk *clk;
+ char clk_name[9];
char *name;
int irq;
int ret;
- /* Get the CRTC clock. */
+ /* Get the CRTC clock and the optional external clock. */
if (rcar_du_has(rcdu, RCAR_DU_FEATURE_CRTC_IRQ_CLOCK)) {
sprintf(clk_name, "du.%u", index);
name = clk_name;
@@ -561,6 +611,15 @@
return PTR_ERR(rcrtc->clock);
}
+ sprintf(clk_name, "dclkin.%u", index);
+ clk = devm_clk_get(rcdu->dev, clk_name);
+ if (!IS_ERR(clk)) {
+ rcrtc->extclock = clk;
+ } else if (PTR_ERR(rcrtc->clock) == -EPROBE_DEFER) {
+ dev_info(rcdu->dev, "can't get external clock %u\n", index);
+ return -EPROBE_DEFER;
+ }
+
rcrtc->group = rgrp;
rcrtc->mmio_offset = mmio_offsets[index];
rcrtc->index = index;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
index 984e608..d2f89f7 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
@@ -26,6 +26,7 @@
struct drm_crtc crtc;
struct clk *clock;
+ struct clk *extclock;
unsigned int mmio_offset;
unsigned int index;
bool started;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index 7bfa09c..e0d74f8 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -56,7 +56,8 @@
};
static const struct rcar_du_device_info rcar_du_r8a7790_info = {
- .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK | RCAR_DU_FEATURE_DEFR8,
+ .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
+ | RCAR_DU_FEATURE_EXT_CTRL_REGS,
.quirks = RCAR_DU_QUIRK_ALIGN_128B | RCAR_DU_QUIRK_LVDS_LANES,
.num_crtcs = 3,
.routes = {
@@ -83,7 +84,8 @@
};
static const struct rcar_du_device_info rcar_du_r8a7791_info = {
- .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK | RCAR_DU_FEATURE_DEFR8,
+ .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
+ | RCAR_DU_FEATURE_EXT_CTRL_REGS,
.num_crtcs = 2,
.routes = {
/* R8A7791 has one RGB output, one LVDS output and one
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.h b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
index 0a72466..c5b9ea6 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
@@ -27,7 +27,7 @@
struct rcar_du_lvdsenc;
#define RCAR_DU_FEATURE_CRTC_IRQ_CLOCK (1 << 0) /* Per-CRTC IRQ and clock */
-#define RCAR_DU_FEATURE_DEFR8 (1 << 1) /* Has DEFR8 register */
+#define RCAR_DU_FEATURE_EXT_CTRL_REGS (1 << 1) /* Has extended control registers */
#define RCAR_DU_QUIRK_ALIGN_128B (1 << 0) /* Align pitches to 128 bytes */
#define RCAR_DU_QUIRK_LVDS_LANES (1 << 1) /* LVDS lanes 1 and 3 inverted */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
index 34a122a..279167f 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
@@ -46,6 +46,9 @@
{
struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
+ if (mode != DRM_MODE_DPMS_ON)
+ mode = DRM_MODE_DPMS_OFF;
+
if (renc->lvds)
rcar_du_lvdsenc_dpms(renc->lvds, encoder->crtc, mode);
}
@@ -190,35 +193,42 @@
}
if (type == RCAR_DU_ENCODER_HDMI) {
- if (renc->lvds) {
- dev_err(rcdu->dev,
- "Chaining LVDS and HDMI encoders not supported\n");
- return -EINVAL;
- }
-
ret = rcar_du_hdmienc_init(rcdu, renc, enc_node);
if (ret < 0)
- return ret;
+ goto done;
} else {
ret = drm_encoder_init(rcdu->ddev, encoder, &encoder_funcs,
encoder_type);
if (ret < 0)
- return ret;
+ goto done;
drm_encoder_helper_add(encoder, &encoder_helper_funcs);
}
switch (encoder_type) {
case DRM_MODE_ENCODER_LVDS:
- return rcar_du_lvds_connector_init(rcdu, renc, con_node);
+ ret = rcar_du_lvds_connector_init(rcdu, renc, con_node);
+ break;
case DRM_MODE_ENCODER_DAC:
- return rcar_du_vga_connector_init(rcdu, renc);
+ ret = rcar_du_vga_connector_init(rcdu, renc);
+ break;
case DRM_MODE_ENCODER_TMDS:
- return rcar_du_hdmi_connector_init(rcdu, renc);
+ ret = rcar_du_hdmi_connector_init(rcdu, renc);
+ break;
default:
- return -EINVAL;
+ ret = -EINVAL;
+ break;
}
+
+done:
+ if (ret < 0) {
+ if (encoder->name)
+ encoder->funcs->destroy(encoder);
+ devm_kfree(rcdu->dev, renc);
+ }
+
+ return ret;
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_group.c b/drivers/gpu/drm/rcar-du/rcar_du_group.c
index 4e7614b..1bdc0ee 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_group.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_group.c
@@ -48,9 +48,6 @@
{
u32 defr8 = DEFR8_CODE | DEFR8_DEFE8;
- if (!rcar_du_has(rgrp->dev, RCAR_DU_FEATURE_DEFR8))
- return;
-
/* The DEFR8 register for the first group also controls RGB output
* routing to DPAD0
*/
@@ -69,7 +66,20 @@
rcar_du_group_write(rgrp, DEFR4, DEFR4_CODE);
rcar_du_group_write(rgrp, DEFR5, DEFR5_CODE | DEFR5_DEFE5);
- rcar_du_group_setup_defr8(rgrp);
+ if (rcar_du_has(rgrp->dev, RCAR_DU_FEATURE_EXT_CTRL_REGS)) {
+ rcar_du_group_setup_defr8(rgrp);
+
+ /* Configure input dot clock routing. We currently hardcode the
+ * configuration to routing DOTCLKINn to DUn.
+ */
+ rcar_du_group_write(rgrp, DIDSR, DIDSR_CODE |
+ DIDSR_LCDS_DCLKIN(2) |
+ DIDSR_LCDS_DCLKIN(1) |
+ DIDSR_LCDS_DCLKIN(0) |
+ DIDSR_PDCS_CLK(2, 0) |
+ DIDSR_PDCS_CLK(1, 0) |
+ DIDSR_PDCS_CLK(0, 0));
+ }
/* Use DS1PR and DS2PR to configure planes priorities and connects the
* superposition 0 to DU0 pins. DU1 pins will be configured dynamically.
@@ -149,6 +159,9 @@
{
int ret;
+ if (!rcar_du_has(rcdu, RCAR_DU_FEATURE_EXT_CTRL_REGS))
+ return 0;
+
/* RGB output routing to DPAD0 is configured in the DEFR8 register of
* the first group. As this function can be called with the DU0 and DU1
* CRTCs disabled, we need to enable the first group clock before
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c b/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c
index 4d7d4dd..ca94b02 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c
@@ -95,6 +95,8 @@
connector = &rcon->connector;
connector->display_info.width_mm = 0;
connector->display_info.height_mm = 0;
+ connector->interlace_allowed = true;
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
ret = drm_connector_init(rcdu->ddev, connector, &connector_funcs,
DRM_MODE_CONNECTOR_HDMIA);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c b/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c
index 359bc99..221f0a1 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c
@@ -21,6 +21,7 @@
#include "rcar_du_drv.h"
#include "rcar_du_encoder.h"
#include "rcar_du_hdmienc.h"
+#include "rcar_du_lvdsenc.h"
struct rcar_du_hdmienc {
struct rcar_du_encoder *renc;
@@ -36,12 +37,21 @@
struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder);
struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
+ if (mode != DRM_MODE_DPMS_ON)
+ mode = DRM_MODE_DPMS_OFF;
+
if (hdmienc->dpms == mode)
return;
+ if (mode == DRM_MODE_DPMS_ON && hdmienc->renc->lvds)
+ rcar_du_lvdsenc_dpms(hdmienc->renc->lvds, encoder->crtc, mode);
+
if (sfuncs->dpms)
sfuncs->dpms(encoder, mode);
+ if (mode != DRM_MODE_DPMS_ON && hdmienc->renc->lvds)
+ rcar_du_lvdsenc_dpms(hdmienc->renc->lvds, encoder->crtc, mode);
+
hdmienc->dpms = mode;
}
@@ -49,8 +59,16 @@
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
+ struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder);
struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
+ /* The internal LVDS encoder has a clock frequency operating range of
+ * 30MHz to 150MHz. Clamp the clock accordingly.
+ */
+ if (hdmienc->renc->lvds)
+ adjusted_mode->clock = clamp(adjusted_mode->clock,
+ 30000, 150000);
+
if (sfuncs->mode_fixup == NULL)
return true;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index 0c5ee61..cc9136e 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -346,8 +346,14 @@
/* Process the output pipeline. */
ret = rcar_du_encoders_init_one(rcdu, output, &ep);
if (ret < 0) {
- of_node_put(ep_node);
- return ret;
+ if (ret == -EPROBE_DEFER) {
+ of_node_put(ep_node);
+ return ret;
+ }
+
+ dev_info(rcdu->dev,
+ "encoder initialization failed, skipping\n");
+ continue;
}
num_encoders += ret;
@@ -413,6 +419,11 @@
if (ret < 0)
return ret;
+ if (ret == 0) {
+ dev_err(rcdu->dev, "error: no encoder could be initialized\n");
+ return -EINVAL;
+ }
+
num_encoders = ret;
/* Set the possible CRTCs and possible clones. There's always at least
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.c b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
index 72a7cb4..50f2f2b 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_plane.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
@@ -104,14 +104,22 @@
{
struct rcar_du_group *rgrp = plane->group;
unsigned int index = plane->hwindex;
+ bool interlaced;
u32 mwr;
- /* Memory pitch (expressed in pixels) */
+ interlaced = plane->crtc->mode.flags & DRM_MODE_FLAG_INTERLACE;
+
+ /* Memory pitch (expressed in pixels). Must be doubled for interlaced
+ * operation with 32bpp formats.
+ */
if (plane->format->planes == 2)
mwr = plane->pitch;
else
mwr = plane->pitch * 8 / plane->format->bpp;
+ if (interlaced && plane->format->bpp == 32)
+ mwr *= 2;
+
rcar_du_plane_write(rgrp, index, PnMWR, mwr);
/* The Y position is expressed in raster line units and must be doubled
@@ -119,17 +127,23 @@
* doubling the Y position is found in the R8A7779 datasheet, but the
* rule seems to apply there as well.
*
+ * Despite not being documented, doubling seem not to be needed when
+ * operating in interlaced mode.
+ *
* Similarly, for the second plane, NV12 and NV21 formats seem to
- * require a halved Y position value.
+ * require a halved Y position value, in both progressive and interlaced
+ * modes.
*/
rcar_du_plane_write(rgrp, index, PnSPXR, plane->src_x);
rcar_du_plane_write(rgrp, index, PnSPYR, plane->src_y *
- (plane->format->bpp == 32 ? 2 : 1));
+ (!interlaced && plane->format->bpp == 32 ? 2 : 1));
rcar_du_plane_write(rgrp, index, PnDSA0R, plane->dma[0]);
if (plane->format->planes == 2) {
index = (index + 1) % 8;
+ rcar_du_plane_write(rgrp, index, PnMWR, plane->pitch);
+
rcar_du_plane_write(rgrp, index, PnSPXR, plane->src_x);
rcar_du_plane_write(rgrp, index, PnSPYR, plane->src_y *
(plane->format->bpp == 16 ? 2 : 1) / 2);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_regs.h b/drivers/gpu/drm/rcar-du/rcar_du_regs.h
index 73f7347..70fcbc4 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_regs.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_regs.h
@@ -34,6 +34,7 @@
#define DSYSR_SCM_INT_NONE (0 << 4)
#define DSYSR_SCM_INT_SYNC (2 << 4)
#define DSYSR_SCM_INT_VIDEO (3 << 4)
+#define DSYSR_SCM_MASK (3 << 4)
#define DSMR 0x00004
#define DSMR_VSPM (1 << 28)
@@ -256,8 +257,8 @@
#define DIDSR_LCDS_LVDS0(n) (2 << (8 + (n) * 2))
#define DIDSR_LCDS_LVDS1(n) (3 << (8 + (n) * 2))
#define DIDSR_LCDS_MASK(n) (3 << (8 + (n) * 2))
-#define DIDSR_PCDS_CLK(n, clk) (clk << ((n) * 2))
-#define DIDSR_PCDS_MASK(n) (3 << ((n) * 2))
+#define DIDSR_PDCS_CLK(n, clk) (clk << ((n) * 2))
+#define DIDSR_PDCS_MASK(n) (3 << ((n) * 2))
/* -----------------------------------------------------------------------------
* Display Timing Generation Registers
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
index 752747a..9d48799 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
@@ -64,6 +64,7 @@
connector = &rcon->connector;
connector->display_info.width_mm = 0;
connector->display_info.height_mm = 0;
+ connector->interlace_allowed = true;
ret = drm_connector_init(rcdu->ddev, connector, &connector_funcs,
DRM_MODE_CONNECTOR_VGA);
diff --git a/drivers/gpu/drm/sti/sti_drm_crtc.c b/drivers/gpu/drm/sti/sti_drm_crtc.c
index 4c651c2..e6f6ef7 100644
--- a/drivers/gpu/drm/sti/sti_drm_crtc.c
+++ b/drivers/gpu/drm/sti/sti_drm_crtc.c
@@ -190,11 +190,6 @@
return ret;
}
-static void sti_drm_crtc_load_lut(struct drm_crtc *crtc)
-{
- /* do nothing */
-}
-
static void sti_drm_crtc_disable(struct drm_crtc *crtc)
{
struct sti_mixer *mixer = to_sti_mixer(crtc);
@@ -249,7 +244,6 @@
.mode_fixup = sti_drm_crtc_mode_fixup,
.mode_set = sti_drm_crtc_mode_set,
.mode_set_base = sti_drm_crtc_mode_set_base,
- .load_lut = sti_drm_crtc_load_lut,
.disable = sti_drm_crtc_disable,
};
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 3367960..ae26cc0 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -168,7 +168,7 @@
const struct tegra_dc_window *window)
{
unsigned h_offset, v_offset, h_size, v_size, h_dda, v_dda, bpp;
- unsigned long value;
+ unsigned long value, flags;
bool yuv, planar;
/*
@@ -181,6 +181,8 @@
else
bpp = planar ? 1 : 2;
+ spin_lock_irqsave(&dc->lock, flags);
+
value = WINDOW_A_SELECT << index;
tegra_dc_writel(dc, value, DC_CMD_DISPLAY_WINDOW_HEADER);
@@ -273,6 +275,7 @@
case TEGRA_BO_TILING_MODE_BLOCK:
DRM_ERROR("hardware doesn't support block linear mode\n");
+ spin_unlock_irqrestore(&dc->lock, flags);
return -EINVAL;
}
@@ -331,6 +334,8 @@
tegra_dc_window_commit(dc, index);
+ spin_unlock_irqrestore(&dc->lock, flags);
+
return 0;
}
@@ -338,11 +343,14 @@
{
struct tegra_dc *dc = to_tegra_dc(plane->crtc);
struct tegra_plane *p = to_tegra_plane(plane);
+ unsigned long flags;
u32 value;
if (!plane->crtc)
return 0;
+ spin_lock_irqsave(&dc->lock, flags);
+
value = WINDOW_A_SELECT << p->index;
tegra_dc_writel(dc, value, DC_CMD_DISPLAY_WINDOW_HEADER);
@@ -352,6 +360,8 @@
tegra_dc_window_commit(dc, p->index);
+ spin_unlock_irqrestore(&dc->lock, flags);
+
return 0;
}
@@ -699,14 +709,16 @@
struct tegra_bo *bo = tegra_fb_get_plane(fb, 0);
unsigned int h_offset = 0, v_offset = 0;
struct tegra_bo_tiling tiling;
+ unsigned long value, flags;
unsigned int format, swap;
- unsigned long value;
int err;
err = tegra_fb_get_tiling(fb, &tiling);
if (err < 0)
return err;
+ spin_lock_irqsave(&dc->lock, flags);
+
tegra_dc_writel(dc, WINDOW_A_SELECT, DC_CMD_DISPLAY_WINDOW_HEADER);
value = fb->offsets[0] + y * fb->pitches[0] +
@@ -752,6 +764,7 @@
case TEGRA_BO_TILING_MODE_BLOCK:
DRM_ERROR("hardware doesn't support block linear mode\n");
+ spin_unlock_irqrestore(&dc->lock, flags);
return -EINVAL;
}
@@ -778,6 +791,8 @@
tegra_dc_writel(dc, value << 8, DC_CMD_STATE_CONTROL);
tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL);
+ spin_unlock_irqrestore(&dc->lock, flags);
+
return 0;
}
@@ -814,23 +829,32 @@
unsigned long flags, base;
struct tegra_bo *bo;
- if (!dc->event)
+ spin_lock_irqsave(&drm->event_lock, flags);
+
+ if (!dc->event) {
+ spin_unlock_irqrestore(&drm->event_lock, flags);
return;
+ }
bo = tegra_fb_get_plane(crtc->primary->fb, 0);
+ spin_lock_irqsave(&dc->lock, flags);
+
/* check if new start address has been latched */
+ tegra_dc_writel(dc, WINDOW_A_SELECT, DC_CMD_DISPLAY_WINDOW_HEADER);
tegra_dc_writel(dc, READ_MUX, DC_CMD_STATE_ACCESS);
base = tegra_dc_readl(dc, DC_WINBUF_START_ADDR);
tegra_dc_writel(dc, 0, DC_CMD_STATE_ACCESS);
+ spin_unlock_irqrestore(&dc->lock, flags);
+
if (base == bo->paddr + crtc->primary->fb->offsets[0]) {
- spin_lock_irqsave(&drm->event_lock, flags);
- drm_send_vblank_event(drm, dc->pipe, dc->event);
- drm_vblank_put(drm, dc->pipe);
+ drm_crtc_send_vblank_event(crtc, dc->event);
+ drm_crtc_vblank_put(crtc);
dc->event = NULL;
- spin_unlock_irqrestore(&drm->event_lock, flags);
}
+
+ spin_unlock_irqrestore(&drm->event_lock, flags);
}
void tegra_dc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file)
@@ -843,7 +867,7 @@
if (dc->event && dc->event->base.file_priv == file) {
dc->event->base.destroy(&dc->event->base);
- drm_vblank_put(drm, dc->pipe);
+ drm_crtc_vblank_put(crtc);
dc->event = NULL;
}
@@ -853,16 +877,16 @@
static int tegra_dc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event, uint32_t page_flip_flags)
{
+ unsigned int pipe = drm_crtc_index(crtc);
struct tegra_dc *dc = to_tegra_dc(crtc);
- struct drm_device *drm = crtc->dev;
if (dc->event)
return -EBUSY;
if (event) {
- event->pipe = dc->pipe;
+ event->pipe = pipe;
dc->event = event;
- drm_vblank_get(drm, dc->pipe);
+ drm_crtc_vblank_get(crtc);
}
tegra_dc_set_base(dc, 0, 0, fb);
@@ -1095,10 +1119,6 @@
tegra_dc_commit(dc);
}
-static void tegra_crtc_load_lut(struct drm_crtc *crtc)
-{
-}
-
static const struct drm_crtc_helper_funcs tegra_crtc_helper_funcs = {
.disable = tegra_crtc_disable,
.mode_fixup = tegra_crtc_mode_fixup,
@@ -1106,7 +1126,6 @@
.mode_set_base = tegra_crtc_mode_set_base,
.prepare = tegra_crtc_prepare,
.commit = tegra_crtc_commit,
- .load_lut = tegra_crtc_load_lut,
};
static irqreturn_t tegra_dc_irq(int irq, void *data)
@@ -1127,7 +1146,7 @@
/*
dev_dbg(dc->dev, "%s(): vertical blank\n", __func__);
*/
- drm_handle_vblank(dc->base.dev, dc->pipe);
+ drm_crtc_handle_vblank(&dc->base);
tegra_dc_finish_page_flip(dc);
}
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index e549afe..d4f8275 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -694,24 +694,28 @@
.llseek = noop_llseek,
};
-static struct drm_crtc *tegra_crtc_from_pipe(struct drm_device *drm, int pipe)
+static struct drm_crtc *tegra_crtc_from_pipe(struct drm_device *drm,
+ unsigned int pipe)
{
struct drm_crtc *crtc;
list_for_each_entry(crtc, &drm->mode_config.crtc_list, head) {
- struct tegra_dc *dc = to_tegra_dc(crtc);
-
- if (dc->pipe == pipe)
+ if (pipe == drm_crtc_index(crtc))
return crtc;
}
return NULL;
}
-static u32 tegra_drm_get_vblank_counter(struct drm_device *dev, int crtc)
+static u32 tegra_drm_get_vblank_counter(struct drm_device *drm, int pipe)
{
+ struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe);
+
+ if (!crtc)
+ return 0;
+
/* TODO: implement real hardware counter using syncpoints */
- return drm_vblank_count(dev, crtc);
+ return drm_crtc_vblank_count(crtc);
}
static int tegra_drm_enable_vblank(struct drm_device *drm, int pipe)
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index da32086..8777b7f 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -216,32 +216,58 @@
}
}
-static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo,
- size_t size)
+static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo)
{
+ struct scatterlist *s;
+ struct sg_table *sgt;
+ unsigned int i;
+
bo->pages = drm_gem_get_pages(&bo->gem);
if (IS_ERR(bo->pages))
return PTR_ERR(bo->pages);
- bo->num_pages = size >> PAGE_SHIFT;
+ bo->num_pages = bo->gem.size >> PAGE_SHIFT;
- bo->sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages);
- if (IS_ERR(bo->sgt)) {
- drm_gem_put_pages(&bo->gem, bo->pages, false, false);
- return PTR_ERR(bo->sgt);
+ sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages);
+ if (IS_ERR(sgt))
+ goto put_pages;
+
+ /*
+ * Fake up the SG table so that dma_map_sg() can be used to flush the
+ * pages associated with it. Note that this relies on the fact that
+ * the DMA API doesn't hook into IOMMU on Tegra, therefore mapping is
+ * only cache maintenance.
+ *
+ * TODO: Replace this by drm_clflash_sg() once it can be implemented
+ * without relying on symbols that are not exported.
+ */
+ for_each_sg(sgt->sgl, s, sgt->nents, i)
+ sg_dma_address(s) = sg_phys(s);
+
+ if (dma_map_sg(drm->dev, sgt->sgl, sgt->nents, DMA_TO_DEVICE) == 0) {
+ sgt = ERR_PTR(-ENOMEM);
+ goto release_sgt;
}
+ bo->sgt = sgt;
+
return 0;
+
+release_sgt:
+ sg_free_table(sgt);
+ kfree(sgt);
+put_pages:
+ drm_gem_put_pages(&bo->gem, bo->pages, false, false);
+ return PTR_ERR(sgt);
}
-static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo,
- size_t size)
+static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo)
{
struct tegra_drm *tegra = drm->dev_private;
int err;
if (tegra->domain) {
- err = tegra_bo_get_pages(drm, bo, size);
+ err = tegra_bo_get_pages(drm, bo);
if (err < 0)
return err;
@@ -251,6 +277,8 @@
return err;
}
} else {
+ size_t size = bo->gem.size;
+
bo->vaddr = dma_alloc_writecombine(drm->dev, size, &bo->paddr,
GFP_KERNEL | __GFP_NOWARN);
if (!bo->vaddr) {
@@ -274,7 +302,7 @@
if (IS_ERR(bo))
return bo;
- err = tegra_bo_alloc(drm, bo, size);
+ err = tegra_bo_alloc(drm, bo);
if (err < 0)
goto release;
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index 8afa28e..18d4b2c 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -28,6 +28,13 @@
#include <linux/cdev.h>
#include "input-compat.h"
+enum evdev_clock_type {
+ EV_CLK_REAL = 0,
+ EV_CLK_MONO,
+ EV_CLK_BOOT,
+ EV_CLK_MAX
+};
+
struct evdev {
int open;
struct input_handle handle;
@@ -49,12 +56,32 @@
struct fasync_struct *fasync;
struct evdev *evdev;
struct list_head node;
- int clkid;
+ int clk_type;
bool revoked;
unsigned int bufsize;
struct input_event buffer[];
};
+static int evdev_set_clk_type(struct evdev_client *client, unsigned int clkid)
+{
+ switch (clkid) {
+
+ case CLOCK_REALTIME:
+ client->clk_type = EV_CLK_REAL;
+ break;
+ case CLOCK_MONOTONIC:
+ client->clk_type = EV_CLK_MONO;
+ break;
+ case CLOCK_BOOTTIME:
+ client->clk_type = EV_CLK_BOOT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
/* flush queued events of type @type, caller must hold client->buffer_lock */
static void __evdev_flush_queue(struct evdev_client *client, unsigned int type)
{
@@ -108,8 +135,11 @@
struct input_event ev;
ktime_t time;
- time = (client->clkid == CLOCK_MONOTONIC) ?
- ktime_get() : ktime_get_real();
+ time = client->clk_type == EV_CLK_REAL ?
+ ktime_get_real() :
+ client->clk_type == EV_CLK_MONO ?
+ ktime_get() :
+ ktime_get_boottime();
ev.time = ktime_to_timeval(time);
ev.type = EV_SYN;
@@ -159,7 +189,7 @@
static void evdev_pass_values(struct evdev_client *client,
const struct input_value *vals, unsigned int count,
- ktime_t mono, ktime_t real)
+ ktime_t *ev_time)
{
struct evdev *evdev = client->evdev;
const struct input_value *v;
@@ -169,8 +199,7 @@
if (client->revoked)
return;
- event.time = ktime_to_timeval(client->clkid == CLOCK_MONOTONIC ?
- mono : real);
+ event.time = ktime_to_timeval(ev_time[client->clk_type]);
/* Interrupts are disabled, just acquire the lock. */
spin_lock(&client->buffer_lock);
@@ -198,21 +227,22 @@
{
struct evdev *evdev = handle->private;
struct evdev_client *client;
- ktime_t time_mono, time_real;
+ ktime_t ev_time[EV_CLK_MAX];
- time_mono = ktime_get();
- time_real = ktime_mono_to_real(time_mono);
+ ev_time[EV_CLK_MONO] = ktime_get();
+ ev_time[EV_CLK_REAL] = ktime_mono_to_real(ev_time[EV_CLK_MONO]);
+ ev_time[EV_CLK_BOOT] = ktime_mono_to_any(ev_time[EV_CLK_MONO],
+ TK_OFFS_BOOT);
rcu_read_lock();
client = rcu_dereference(evdev->grab);
if (client)
- evdev_pass_values(client, vals, count, time_mono, time_real);
+ evdev_pass_values(client, vals, count, ev_time);
else
list_for_each_entry_rcu(client, &evdev->client_list, node)
- evdev_pass_values(client, vals, count,
- time_mono, time_real);
+ evdev_pass_values(client, vals, count, ev_time);
rcu_read_unlock();
}
@@ -877,10 +907,8 @@
case EVIOCSCLOCKID:
if (copy_from_user(&i, p, sizeof(unsigned int)))
return -EFAULT;
- if (i != CLOCK_MONOTONIC && i != CLOCK_REALTIME)
- return -EINVAL;
- client->clkid = i;
- return 0;
+
+ return evdev_set_clk_type(client, i);
case EVIOCGKEYCODE:
return evdev_handle_get_keycode(dev, p);
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 04217c2..213e3a1 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -1974,18 +1974,22 @@
events = mt_slots + 1; /* count SYN_MT_REPORT and SYN_REPORT */
- for (i = 0; i < ABS_CNT; i++) {
- if (test_bit(i, dev->absbit)) {
- if (input_is_mt_axis(i))
- events += mt_slots;
- else
- events++;
+ if (test_bit(EV_ABS, dev->evbit)) {
+ for (i = 0; i < ABS_CNT; i++) {
+ if (test_bit(i, dev->absbit)) {
+ if (input_is_mt_axis(i))
+ events += mt_slots;
+ else
+ events++;
+ }
}
}
- for (i = 0; i < REL_CNT; i++)
- if (test_bit(i, dev->relbit))
- events++;
+ if (test_bit(EV_REL, dev->evbit)) {
+ for (i = 0; i < REL_CNT; i++)
+ if (test_bit(i, dev->relbit))
+ events++;
+ }
/* Make room for KEY and MSC events */
events += 7;
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 96ee26c..a5d9b3f 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -559,6 +559,7 @@
config KEYBOARD_STMPE
tristate "STMPE keypad support"
depends on MFD_STMPE
+ depends on OF
select INPUT_MATRIXKMAP
help
Say Y here if you want to use the keypad controller on STMPE I/O
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c
index d4dd78a..883d6aed 100644
--- a/drivers/input/keyboard/gpio_keys.c
+++ b/drivers/input/keyboard/gpio_keys.c
@@ -35,9 +35,13 @@
struct gpio_button_data {
const struct gpio_keys_button *button;
struct input_dev *input;
- struct timer_list timer;
- struct work_struct work;
- unsigned int timer_debounce; /* in msecs */
+
+ struct timer_list release_timer;
+ unsigned int release_delay; /* in msecs, for IRQ-only buttons */
+
+ struct delayed_work work;
+ unsigned int software_debounce; /* in msecs, for GPIO-driven buttons */
+
unsigned int irq;
spinlock_t lock;
bool disabled;
@@ -116,11 +120,14 @@
{
if (!bdata->disabled) {
/*
- * Disable IRQ and possible debouncing timer.
+ * Disable IRQ and associated timer/work structure.
*/
disable_irq(bdata->irq);
- if (bdata->timer_debounce)
- del_timer_sync(&bdata->timer);
+
+ if (gpio_is_valid(bdata->button->gpio))
+ cancel_delayed_work_sync(&bdata->work);
+ else
+ del_timer_sync(&bdata->release_timer);
bdata->disabled = true;
}
@@ -343,7 +350,7 @@
static void gpio_keys_gpio_work_func(struct work_struct *work)
{
struct gpio_button_data *bdata =
- container_of(work, struct gpio_button_data, work);
+ container_of(work, struct gpio_button_data, work.work);
gpio_keys_gpio_report_event(bdata);
@@ -351,13 +358,6 @@
pm_relax(bdata->input->dev.parent);
}
-static void gpio_keys_gpio_timer(unsigned long _data)
-{
- struct gpio_button_data *bdata = (struct gpio_button_data *)_data;
-
- schedule_work(&bdata->work);
-}
-
static irqreturn_t gpio_keys_gpio_isr(int irq, void *dev_id)
{
struct gpio_button_data *bdata = dev_id;
@@ -366,11 +366,10 @@
if (bdata->button->wakeup)
pm_stay_awake(bdata->input->dev.parent);
- if (bdata->timer_debounce)
- mod_timer(&bdata->timer,
- jiffies + msecs_to_jiffies(bdata->timer_debounce));
- else
- schedule_work(&bdata->work);
+
+ mod_delayed_work(system_wq,
+ &bdata->work,
+ msecs_to_jiffies(bdata->software_debounce));
return IRQ_HANDLED;
}
@@ -408,7 +407,7 @@
input_event(input, EV_KEY, button->code, 1);
input_sync(input);
- if (!bdata->timer_debounce) {
+ if (!bdata->release_delay) {
input_event(input, EV_KEY, button->code, 0);
input_sync(input);
goto out;
@@ -417,9 +416,9 @@
bdata->key_pressed = true;
}
- if (bdata->timer_debounce)
- mod_timer(&bdata->timer,
- jiffies + msecs_to_jiffies(bdata->timer_debounce));
+ if (bdata->release_delay)
+ mod_timer(&bdata->release_timer,
+ jiffies + msecs_to_jiffies(bdata->release_delay));
out:
spin_unlock_irqrestore(&bdata->lock, flags);
return IRQ_HANDLED;
@@ -429,10 +428,10 @@
{
struct gpio_button_data *bdata = data;
- if (bdata->timer_debounce)
- del_timer_sync(&bdata->timer);
-
- cancel_work_sync(&bdata->work);
+ if (gpio_is_valid(bdata->button->gpio))
+ cancel_delayed_work_sync(&bdata->work);
+ else
+ del_timer_sync(&bdata->release_timer);
}
static int gpio_keys_setup_key(struct platform_device *pdev,
@@ -466,23 +465,25 @@
button->debounce_interval * 1000);
/* use timer if gpiolib doesn't provide debounce */
if (error < 0)
- bdata->timer_debounce =
+ bdata->software_debounce =
button->debounce_interval;
}
- irq = gpio_to_irq(button->gpio);
- if (irq < 0) {
- error = irq;
- dev_err(dev,
- "Unable to get irq number for GPIO %d, error %d\n",
- button->gpio, error);
- return error;
+ if (button->irq) {
+ bdata->irq = button->irq;
+ } else {
+ irq = gpio_to_irq(button->gpio);
+ if (irq < 0) {
+ error = irq;
+ dev_err(dev,
+ "Unable to get irq number for GPIO %d, error %d\n",
+ button->gpio, error);
+ return error;
+ }
+ bdata->irq = irq;
}
- bdata->irq = irq;
- INIT_WORK(&bdata->work, gpio_keys_gpio_work_func);
- setup_timer(&bdata->timer,
- gpio_keys_gpio_timer, (unsigned long)bdata);
+ INIT_DELAYED_WORK(&bdata->work, gpio_keys_gpio_work_func);
isr = gpio_keys_gpio_isr;
irqflags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING;
@@ -499,8 +500,8 @@
return -EINVAL;
}
- bdata->timer_debounce = button->debounce_interval;
- setup_timer(&bdata->timer,
+ bdata->release_delay = button->debounce_interval;
+ setup_timer(&bdata->release_timer,
gpio_keys_irq_timer, (unsigned long)bdata);
isr = gpio_keys_irq_isr;
@@ -510,7 +511,7 @@
input_set_capability(input, button->type ?: EV_KEY, button->code);
/*
- * Install custom action to cancel debounce timer and
+ * Install custom action to cancel release timer and
* workqueue item.
*/
error = devm_add_action(&pdev->dev, gpio_keys_quiesce_key, bdata);
@@ -618,33 +619,30 @@
i = 0;
for_each_child_of_node(node, pp) {
- int gpio = -1;
enum of_gpio_flags flags;
button = &pdata->buttons[i++];
- if (!of_find_property(pp, "gpios", NULL)) {
- button->irq = irq_of_parse_and_map(pp, 0);
- if (button->irq == 0) {
- i--;
- pdata->nbuttons--;
- dev_warn(dev, "Found button without gpios or irqs\n");
- continue;
- }
- } else {
- gpio = of_get_gpio_flags(pp, 0, &flags);
- if (gpio < 0) {
- error = gpio;
+ button->gpio = of_get_gpio_flags(pp, 0, &flags);
+ if (button->gpio < 0) {
+ error = button->gpio;
+ if (error != -ENOENT) {
if (error != -EPROBE_DEFER)
dev_err(dev,
"Failed to get gpio flags, error: %d\n",
error);
return ERR_PTR(error);
}
+ } else {
+ button->active_low = flags & OF_GPIO_ACTIVE_LOW;
}
- button->gpio = gpio;
- button->active_low = flags & OF_GPIO_ACTIVE_LOW;
+ button->irq = irq_of_parse_and_map(pp, 0);
+
+ if (!gpio_is_valid(button->gpio) && !button->irq) {
+ dev_err(dev, "Found button without gpios or irqs\n");
+ return ERR_PTR(-EINVAL);
+ }
if (of_property_read_u32(pp, "linux,code", &button->code)) {
dev_err(dev, "Button without keycode: 0x%x\n",
@@ -659,6 +657,8 @@
button->wakeup = !!of_get_property(pp, "gpio-key,wakeup", NULL);
+ button->can_disable = !!of_get_property(pp, "linux,can-disable", NULL);
+
if (of_property_read_u32(pp, "debounce-interval",
&button->debounce_interval))
button->debounce_interval = 5;
diff --git a/drivers/input/keyboard/hil_kbd.c b/drivers/input/keyboard/hil_kbd.c
index 610a8af..5b152f2 100644
--- a/drivers/input/keyboard/hil_kbd.c
+++ b/drivers/input/keyboard/hil_kbd.c
@@ -473,7 +473,7 @@
if (error)
goto bail1;
- init_completion(&dev->cmd_done);
+ reinit_completion(&dev->cmd_done);
serio_write(serio, 0);
serio_write(serio, 0);
serio_write(serio, HIL_PKT_CMD >> 8);
@@ -482,7 +482,7 @@
if (error)
goto bail1;
- init_completion(&dev->cmd_done);
+ reinit_completion(&dev->cmd_done);
serio_write(serio, 0);
serio_write(serio, 0);
serio_write(serio, HIL_PKT_CMD >> 8);
@@ -491,7 +491,7 @@
if (error)
goto bail1;
- init_completion(&dev->cmd_done);
+ reinit_completion(&dev->cmd_done);
serio_write(serio, 0);
serio_write(serio, 0);
serio_write(serio, HIL_PKT_CMD >> 8);
diff --git a/drivers/input/keyboard/stmpe-keypad.c b/drivers/input/keyboard/stmpe-keypad.c
index ef5e67f..fe6e3f2 100644
--- a/drivers/input/keyboard/stmpe-keypad.c
+++ b/drivers/input/keyboard/stmpe-keypad.c
@@ -45,13 +45,14 @@
#define STMPE_KEYPAD_MAX_ROWS 8
#define STMPE_KEYPAD_MAX_COLS 8
#define STMPE_KEYPAD_ROW_SHIFT 3
-#define STMPE_KEYPAD_KEYMAP_SIZE \
+#define STMPE_KEYPAD_KEYMAP_MAX_SIZE \
(STMPE_KEYPAD_MAX_ROWS * STMPE_KEYPAD_MAX_COLS)
/**
* struct stmpe_keypad_variant - model-specific attributes
* @auto_increment: whether the KPC_DATA_BYTE register address
* auto-increments on multiple read
+ * @set_pullup: whether the pins need to have their pull-ups set
* @num_data: number of data bytes
* @num_normal_data: number of normal keys' data bytes
* @max_cols: maximum number of columns supported
@@ -61,6 +62,7 @@
*/
struct stmpe_keypad_variant {
bool auto_increment;
+ bool set_pullup;
int num_data;
int num_normal_data;
int max_cols;
@@ -81,6 +83,7 @@
},
[STMPE2401] = {
.auto_increment = false,
+ .set_pullup = true,
.num_data = 3,
.num_normal_data = 2,
.max_cols = 8,
@@ -90,6 +93,7 @@
},
[STMPE2403] = {
.auto_increment = true,
+ .set_pullup = true,
.num_data = 5,
.num_normal_data = 3,
.max_cols = 8,
@@ -99,16 +103,30 @@
},
};
+/**
+ * struct stmpe_keypad - STMPE keypad state container
+ * @stmpe: pointer to parent STMPE device
+ * @input: spawned input device
+ * @variant: STMPE variant
+ * @debounce_ms: debounce interval, in ms. Maximum is
+ * %STMPE_KEYPAD_MAX_DEBOUNCE.
+ * @scan_count: number of key scanning cycles to confirm key data.
+ * Maximum is %STMPE_KEYPAD_MAX_SCAN_COUNT.
+ * @no_autorepeat: disable key autorepeat
+ * @rows: bitmask for the rows
+ * @cols: bitmask for the columns
+ * @keymap: the keymap
+ */
struct stmpe_keypad {
struct stmpe *stmpe;
struct input_dev *input;
const struct stmpe_keypad_variant *variant;
- const struct stmpe_keypad_platform_data *plat;
-
+ unsigned int debounce_ms;
+ unsigned int scan_count;
+ bool no_autorepeat;
unsigned int rows;
unsigned int cols;
-
- unsigned short keymap[STMPE_KEYPAD_KEYMAP_SIZE];
+ unsigned short keymap[STMPE_KEYPAD_KEYMAP_MAX_SIZE];
};
static int stmpe_keypad_read_data(struct stmpe_keypad *keypad, u8 *data)
@@ -171,7 +189,10 @@
unsigned int col_gpios = variant->col_gpios;
unsigned int row_gpios = variant->row_gpios;
struct stmpe *stmpe = keypad->stmpe;
+ u8 pureg = stmpe->regs[STMPE_IDX_GPPUR_LSB];
unsigned int pins = 0;
+ unsigned int pu_pins = 0;
+ int ret;
int i;
/*
@@ -188,8 +209,10 @@
for (i = 0; i < variant->max_cols; i++) {
int num = __ffs(col_gpios);
- if (keypad->cols & (1 << i))
+ if (keypad->cols & (1 << i)) {
pins |= 1 << num;
+ pu_pins |= 1 << num;
+ }
col_gpios &= ~(1 << num);
}
@@ -203,20 +226,43 @@
row_gpios &= ~(1 << num);
}
- return stmpe_set_altfunc(stmpe, pins, STMPE_BLOCK_KEYPAD);
+ ret = stmpe_set_altfunc(stmpe, pins, STMPE_BLOCK_KEYPAD);
+ if (ret)
+ return ret;
+
+ /*
+ * On STMPE24xx, set pin bias to pull-up on all keypad input
+ * pins (columns), this incidentally happen to be maximum 8 pins
+ * and placed at GPIO0-7 so only the LSB of the pull up register
+ * ever needs to be written.
+ */
+ if (variant->set_pullup) {
+ u8 val;
+
+ ret = stmpe_reg_read(stmpe, pureg);
+ if (ret)
+ return ret;
+
+ /* Do not touch unused pins, may be used for GPIO */
+ val = ret & ~pu_pins;
+ val |= pu_pins;
+
+ ret = stmpe_reg_write(stmpe, pureg, val);
+ }
+
+ return 0;
}
static int stmpe_keypad_chip_init(struct stmpe_keypad *keypad)
{
- const struct stmpe_keypad_platform_data *plat = keypad->plat;
const struct stmpe_keypad_variant *variant = keypad->variant;
struct stmpe *stmpe = keypad->stmpe;
int ret;
- if (plat->debounce_ms > STMPE_KEYPAD_MAX_DEBOUNCE)
+ if (keypad->debounce_ms > STMPE_KEYPAD_MAX_DEBOUNCE)
return -EINVAL;
- if (plat->scan_count > STMPE_KEYPAD_MAX_SCAN_COUNT)
+ if (keypad->scan_count > STMPE_KEYPAD_MAX_SCAN_COUNT)
return -EINVAL;
ret = stmpe_enable(stmpe, STMPE_BLOCK_KEYPAD);
@@ -245,7 +291,7 @@
ret = stmpe_set_bits(stmpe, STMPE_KPC_CTRL_MSB,
STMPE_KPC_CTRL_MSB_SCAN_COUNT,
- plat->scan_count << 4);
+ keypad->scan_count << 4);
if (ret < 0)
return ret;
@@ -253,17 +299,18 @@
STMPE_KPC_CTRL_LSB_SCAN |
STMPE_KPC_CTRL_LSB_DEBOUNCE,
STMPE_KPC_CTRL_LSB_SCAN |
- (plat->debounce_ms << 1));
+ (keypad->debounce_ms << 1));
}
-static void stmpe_keypad_fill_used_pins(struct stmpe_keypad *keypad)
+static void stmpe_keypad_fill_used_pins(struct stmpe_keypad *keypad,
+ u32 used_rows, u32 used_cols)
{
int row, col;
- for (row = 0; row < STMPE_KEYPAD_MAX_ROWS; row++) {
- for (col = 0; col < STMPE_KEYPAD_MAX_COLS; col++) {
+ for (row = 0; row < used_rows; row++) {
+ for (col = 0; col < used_cols; col++) {
int code = MATRIX_SCAN_CODE(row, col,
- STMPE_KEYPAD_ROW_SHIFT);
+ STMPE_KEYPAD_ROW_SHIFT);
if (keypad->keymap[code] != KEY_RESERVED) {
keypad->rows |= 1 << row;
keypad->cols |= 1 << col;
@@ -272,51 +319,17 @@
}
}
-#ifdef CONFIG_OF
-static const struct stmpe_keypad_platform_data *
-stmpe_keypad_of_probe(struct device *dev)
-{
- struct device_node *np = dev->of_node;
- struct stmpe_keypad_platform_data *plat;
-
- if (!np)
- return ERR_PTR(-ENODEV);
-
- plat = devm_kzalloc(dev, sizeof(*plat), GFP_KERNEL);
- if (!plat)
- return ERR_PTR(-ENOMEM);
-
- of_property_read_u32(np, "debounce-interval", &plat->debounce_ms);
- of_property_read_u32(np, "st,scan-count", &plat->scan_count);
-
- plat->no_autorepeat = of_property_read_bool(np, "st,no-autorepeat");
-
- return plat;
-}
-#else
-static inline const struct stmpe_keypad_platform_data *
-stmpe_keypad_of_probe(struct device *dev)
-{
- return ERR_PTR(-EINVAL);
-}
-#endif
-
static int stmpe_keypad_probe(struct platform_device *pdev)
{
struct stmpe *stmpe = dev_get_drvdata(pdev->dev.parent);
- const struct stmpe_keypad_platform_data *plat;
+ struct device_node *np = pdev->dev.of_node;
struct stmpe_keypad *keypad;
struct input_dev *input;
+ u32 rows;
+ u32 cols;
int error;
int irq;
- plat = stmpe->pdata->keypad;
- if (!plat) {
- plat = stmpe_keypad_of_probe(&pdev->dev);
- if (IS_ERR(plat))
- return PTR_ERR(plat);
- }
-
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
@@ -326,6 +339,13 @@
if (!keypad)
return -ENOMEM;
+ keypad->stmpe = stmpe;
+ keypad->variant = &stmpe_keypad_variants[stmpe->partnum];
+
+ of_property_read_u32(np, "debounce-interval", &keypad->debounce_ms);
+ of_property_read_u32(np, "st,scan-count", &keypad->scan_count);
+ keypad->no_autorepeat = of_property_read_bool(np, "st,no-autorepeat");
+
input = devm_input_allocate_device(&pdev->dev);
if (!input)
return -ENOMEM;
@@ -334,23 +354,22 @@
input->id.bustype = BUS_I2C;
input->dev.parent = &pdev->dev;
- error = matrix_keypad_build_keymap(plat->keymap_data, NULL,
- STMPE_KEYPAD_MAX_ROWS,
- STMPE_KEYPAD_MAX_COLS,
+ error = matrix_keypad_parse_of_params(&pdev->dev, &rows, &cols);
+ if (error)
+ return error;
+
+ error = matrix_keypad_build_keymap(NULL, NULL, rows, cols,
keypad->keymap, input);
if (error)
return error;
input_set_capability(input, EV_MSC, MSC_SCAN);
- if (!plat->no_autorepeat)
+ if (!keypad->no_autorepeat)
__set_bit(EV_REP, input->evbit);
- stmpe_keypad_fill_used_pins(keypad);
+ stmpe_keypad_fill_used_pins(keypad, rows, cols);
- keypad->stmpe = stmpe;
- keypad->plat = plat;
keypad->input = input;
- keypad->variant = &stmpe_keypad_variants[stmpe->partnum];
error = stmpe_keypad_chip_init(keypad);
if (error < 0)
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index d125a01..d88d73d 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -881,6 +881,34 @@
unsigned char *pkt,
unsigned char pkt_id)
{
+ /*
+ * packet-fmt b7 b6 b5 b4 b3 b2 b1 b0
+ * Byte0 TWO & MULTI L 1 R M 1 Y0-2 Y0-1 Y0-0
+ * Byte0 NEW L 1 X1-5 1 1 Y0-2 Y0-1 Y0-0
+ * Byte1 Y0-10 Y0-9 Y0-8 Y0-7 Y0-6 Y0-5 Y0-4 Y0-3
+ * Byte2 X0-11 1 X0-10 X0-9 X0-8 X0-7 X0-6 X0-5
+ * Byte3 X1-11 1 X0-4 X0-3 1 X0-2 X0-1 X0-0
+ * Byte4 TWO X1-10 TWO X1-9 X1-8 X1-7 X1-6 X1-5 X1-4
+ * Byte4 MULTI X1-10 TWO X1-9 X1-8 X1-7 X1-6 Y1-5 1
+ * Byte4 NEW X1-10 TWO X1-9 X1-8 X1-7 X1-6 0 0
+ * Byte5 TWO & NEW Y1-10 0 Y1-9 Y1-8 Y1-7 Y1-6 Y1-5 Y1-4
+ * Byte5 MULTI Y1-10 0 Y1-9 Y1-8 Y1-7 Y1-6 F-1 F-0
+ * L: Left button
+ * R / M: Non-clickpads: Right / Middle button
+ * Clickpads: When > 2 fingers are down, and some fingers
+ * are in the button area, then the 2 coordinates reported
+ * are for fingers outside the button area and these report
+ * extra fingers being present in the right / left button
+ * area. Note these fingers are not added to the F field!
+ * so if a TWO packet is received and R = 1 then there are
+ * 3 fingers down, etc.
+ * TWO: 1: Two touches present, byte 0/4/5 are in TWO fmt
+ * 0: If byte 4 bit 0 is 1, then byte 0/4/5 are in MULTI fmt
+ * otherwise byte 0 bit 4 must be set and byte 0/4/5 are
+ * in NEW fmt
+ * F: Number of fingers - 3, 0 means 3 fingers, 1 means 4 ...
+ */
+
mt[0].x = ((pkt[2] & 0x80) << 4);
mt[0].x |= ((pkt[2] & 0x3F) << 5);
mt[0].x |= ((pkt[3] & 0x30) >> 1);
@@ -919,18 +947,21 @@
static int alps_get_mt_count(struct input_mt_pos *mt)
{
- int i;
+ int i, fingers = 0;
- for (i = 0; i < MAX_TOUCHES && mt[i].x != 0 && mt[i].y != 0; i++)
- /* empty */;
+ for (i = 0; i < MAX_TOUCHES; i++) {
+ if (mt[i].x != 0 || mt[i].y != 0)
+ fingers++;
+ }
- return i;
+ return fingers;
}
static int alps_decode_packet_v7(struct alps_fields *f,
unsigned char *p,
struct psmouse *psmouse)
{
+ struct alps_data *priv = psmouse->private;
unsigned char pkt_id;
pkt_id = alps_get_packet_id_v7(p);
@@ -938,19 +969,52 @@
return 0;
if (pkt_id == V7_PACKET_ID_UNKNOWN)
return -1;
+ /*
+ * NEW packets are send to indicate a discontinuity in the finger
+ * coordinate reporting. Specifically a finger may have moved from
+ * slot 0 to 1 or vice versa. INPUT_MT_TRACK takes care of this for
+ * us.
+ *
+ * NEW packets have 3 problems:
+ * 1) They do not contain middle / right button info (on non clickpads)
+ * this can be worked around by preserving the old button state
+ * 2) They do not contain an accurate fingercount, and they are
+ * typically send when the number of fingers changes. We cannot use
+ * the old finger count as that may mismatch with the amount of
+ * touch coordinates we've available in the NEW packet
+ * 3) Their x data for the second touch is inaccurate leading to
+ * a possible jump of the x coordinate by 16 units when the first
+ * non NEW packet comes in
+ * Since problems 2 & 3 cannot be worked around, just ignore them.
+ */
+ if (pkt_id == V7_PACKET_ID_NEW)
+ return 1;
alps_get_finger_coordinate_v7(f->mt, p, pkt_id);
- if (pkt_id == V7_PACKET_ID_TWO || pkt_id == V7_PACKET_ID_MULTI) {
- f->left = (p[0] & 0x80) >> 7;
+ if (pkt_id == V7_PACKET_ID_TWO)
+ f->fingers = alps_get_mt_count(f->mt);
+ else /* pkt_id == V7_PACKET_ID_MULTI */
+ f->fingers = 3 + (p[5] & 0x03);
+
+ f->left = (p[0] & 0x80) >> 7;
+ if (priv->flags & ALPS_BUTTONPAD) {
+ if (p[0] & 0x20)
+ f->fingers++;
+ if (p[0] & 0x10)
+ f->fingers++;
+ } else {
f->right = (p[0] & 0x20) >> 5;
f->middle = (p[0] & 0x10) >> 4;
}
- if (pkt_id == V7_PACKET_ID_TWO)
- f->fingers = alps_get_mt_count(f->mt);
- else if (pkt_id == V7_PACKET_ID_MULTI)
- f->fingers = 3 + (p[5] & 0x03);
+ /* Sometimes a single touch is reported in mt[1] rather then mt[0] */
+ if (f->fingers == 1 && f->mt[0].x == 0 && f->mt[0].y == 0) {
+ f->mt[0].x = f->mt[1].x;
+ f->mt[0].y = f->mt[1].y;
+ f->mt[1].x = 0;
+ f->mt[1].y = 0;
+ }
return 0;
}
diff --git a/drivers/input/mouse/trackpoint.c b/drivers/input/mouse/trackpoint.c
index 30c8b69..354d47e 100644
--- a/drivers/input/mouse/trackpoint.c
+++ b/drivers/input/mouse/trackpoint.c
@@ -227,6 +227,7 @@
TRACKPOINT_INT_ATTR(upthresh, TP_UP_THRESH, TP_DEF_UP_THRESH);
TRACKPOINT_INT_ATTR(ztime, TP_Z_TIME, TP_DEF_Z_TIME);
TRACKPOINT_INT_ATTR(jenks, TP_JENKS_CURV, TP_DEF_JENKS_CURV);
+TRACKPOINT_INT_ATTR(drift_time, TP_DRIFT_TIME, TP_DEF_DRIFT_TIME);
TRACKPOINT_BIT_ATTR(press_to_select, TP_TOGGLE_PTSON, TP_MASK_PTSON, 0,
TP_DEF_PTSON);
@@ -246,6 +247,7 @@
&psmouse_attr_upthresh.dattr.attr,
&psmouse_attr_ztime.dattr.attr,
&psmouse_attr_jenks.dattr.attr,
+ &psmouse_attr_drift_time.dattr.attr,
&psmouse_attr_press_to_select.dattr.attr,
&psmouse_attr_skipback.dattr.attr,
&psmouse_attr_ext_dev.dattr.attr,
@@ -312,6 +314,7 @@
TRACKPOINT_UPDATE(in_power_on_state, psmouse, tp, upthresh);
TRACKPOINT_UPDATE(in_power_on_state, psmouse, tp, ztime);
TRACKPOINT_UPDATE(in_power_on_state, psmouse, tp, jenks);
+ TRACKPOINT_UPDATE(in_power_on_state, psmouse, tp, drift_time);
/* toggles */
TRACKPOINT_UPDATE(in_power_on_state, psmouse, tp, press_to_select);
@@ -332,6 +335,7 @@
TRACKPOINT_SET_POWER_ON_DEFAULT(tp, upthresh);
TRACKPOINT_SET_POWER_ON_DEFAULT(tp, ztime);
TRACKPOINT_SET_POWER_ON_DEFAULT(tp, jenks);
+ TRACKPOINT_SET_POWER_ON_DEFAULT(tp, drift_time);
TRACKPOINT_SET_POWER_ON_DEFAULT(tp, inertia);
/* toggles */
diff --git a/drivers/input/mouse/trackpoint.h b/drivers/input/mouse/trackpoint.h
index ecd0547..5617ed3 100644
--- a/drivers/input/mouse/trackpoint.h
+++ b/drivers/input/mouse/trackpoint.h
@@ -70,6 +70,9 @@
#define TP_UP_THRESH 0x5A /* Used to generate a 'click' on Z-axis */
#define TP_Z_TIME 0x5E /* How sharp of a press */
#define TP_JENKS_CURV 0x5D /* Minimum curvature for double click */
+#define TP_DRIFT_TIME 0x5F /* How long a 'hands off' condition */
+ /* must last (x*107ms) for drift */
+ /* correction to occur */
/*
* Toggling Flag bits
@@ -120,6 +123,7 @@
#define TP_DEF_UP_THRESH 0xFF
#define TP_DEF_Z_TIME 0x26
#define TP_DEF_JENKS_CURV 0x87
+#define TP_DEF_DRIFT_TIME 0x05
/* Toggles */
#define TP_DEF_MB 0x00
@@ -137,6 +141,7 @@
unsigned char draghys, mindrag;
unsigned char thresh, upthresh;
unsigned char ztime, jenks;
+ unsigned char drift_time;
/* toggles */
unsigned char press_to_select;
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index bb07020..95ee92a 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -99,13 +99,9 @@
#define MXT_T6_STATUS_COMSERR (1 << 2)
/* MXT_GEN_POWER_T7 field */
-struct t7_config {
- u8 idle;
- u8 active;
-} __packed;
-
-#define MXT_POWER_CFG_RUN 0
-#define MXT_POWER_CFG_DEEPSLEEP 1
+#define MXT_POWER_IDLEACQINT 0
+#define MXT_POWER_ACTVACQINT 1
+#define MXT_POWER_ACTV2IDLETO 2
/* MXT_GEN_ACQUIRE_T8 field */
#define MXT_ACQUIRE_CHRGTIME 0
@@ -117,6 +113,7 @@
#define MXT_ACQUIRE_ATCHCALSTHR 7
/* MXT_TOUCH_MULTI_T9 field */
+#define MXT_TOUCH_CTRL 0
#define MXT_T9_ORIENT 9
#define MXT_T9_RANGE 18
@@ -256,7 +253,6 @@
bool update_input;
u8 last_message_count;
u8 num_touchids;
- struct t7_config t7_cfg;
/* Cached parameters from object table */
u16 T5_address;
@@ -672,6 +668,20 @@
data->t6_status = status;
}
+static int mxt_write_object(struct mxt_data *data,
+ u8 type, u8 offset, u8 val)
+{
+ struct mxt_object *object;
+ u16 reg;
+
+ object = mxt_get_object(data, type);
+ if (!object || offset >= mxt_obj_size(object))
+ return -EINVAL;
+
+ reg = object->start_address;
+ return mxt_write_reg(data->client, reg + offset, val);
+}
+
static void mxt_input_button(struct mxt_data *data, u8 *message)
{
struct input_dev *input = data->input_dev;
@@ -1742,60 +1752,6 @@
return error;
}
-static int mxt_set_t7_power_cfg(struct mxt_data *data, u8 sleep)
-{
- struct device *dev = &data->client->dev;
- int error;
- struct t7_config *new_config;
- struct t7_config deepsleep = { .active = 0, .idle = 0 };
-
- if (sleep == MXT_POWER_CFG_DEEPSLEEP)
- new_config = &deepsleep;
- else
- new_config = &data->t7_cfg;
-
- error = __mxt_write_reg(data->client, data->T7_address,
- sizeof(data->t7_cfg), new_config);
- if (error)
- return error;
-
- dev_dbg(dev, "Set T7 ACTV:%d IDLE:%d\n",
- new_config->active, new_config->idle);
-
- return 0;
-}
-
-static int mxt_init_t7_power_cfg(struct mxt_data *data)
-{
- struct device *dev = &data->client->dev;
- int error;
- bool retry = false;
-
-recheck:
- error = __mxt_read_reg(data->client, data->T7_address,
- sizeof(data->t7_cfg), &data->t7_cfg);
- if (error)
- return error;
-
- if (data->t7_cfg.active == 0 || data->t7_cfg.idle == 0) {
- if (!retry) {
- dev_dbg(dev, "T7 cfg zero, resetting\n");
- mxt_soft_reset(data);
- retry = true;
- goto recheck;
- } else {
- dev_dbg(dev, "T7 cfg zero after reset, overriding\n");
- data->t7_cfg.active = 20;
- data->t7_cfg.idle = 100;
- return mxt_set_t7_power_cfg(data, MXT_POWER_CFG_RUN);
- }
- }
-
- dev_dbg(dev, "Initialized power cfg: ACTV %d, IDLE %d\n",
- data->t7_cfg.active, data->t7_cfg.idle);
- return 0;
-}
-
static int mxt_configure_objects(struct mxt_data *data,
const struct firmware *cfg)
{
@@ -1809,12 +1765,6 @@
dev_warn(dev, "Error %d updating config\n", error);
}
- error = mxt_init_t7_power_cfg(data);
- if (error) {
- dev_err(dev, "Failed to initialize power cfg\n");
- return error;
- }
-
error = mxt_initialize_t9_input_device(data);
if (error)
return error;
@@ -2093,15 +2043,16 @@
static void mxt_start(struct mxt_data *data)
{
- mxt_set_t7_power_cfg(data, MXT_POWER_CFG_RUN);
-
- /* Recalibrate since chip has been in deep sleep */
- mxt_t6_command(data, MXT_COMMAND_CALIBRATE, 1, false);
+ /* Touch enable */
+ mxt_write_object(data,
+ MXT_TOUCH_MULTI_T9, MXT_TOUCH_CTRL, 0x83);
}
static void mxt_stop(struct mxt_data *data)
{
- mxt_set_t7_power_cfg(data, MXT_POWER_CFG_DEEPSLEEP);
+ /* Touch disable */
+ mxt_write_object(data,
+ MXT_TOUCH_MULTI_T9, MXT_TOUCH_CTRL, 0);
}
static int mxt_input_open(struct input_dev *dev)
@@ -2266,6 +2217,8 @@
struct mxt_data *data = i2c_get_clientdata(client);
struct input_dev *input_dev = data->input_dev;
+ mxt_soft_reset(data);
+
mutex_lock(&input_dev->mutex);
if (input_dev->users)
diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c
index 3793fcc..d4c24fb 100644
--- a/drivers/input/touchscreen/edt-ft5x06.c
+++ b/drivers/input/touchscreen/edt-ft5x06.c
@@ -850,9 +850,11 @@
}
#define EDT_ATTR_CHECKSET(name, reg) \
+do { \
if (pdata->name >= edt_ft5x06_attr_##name.limit_low && \
pdata->name <= edt_ft5x06_attr_##name.limit_high) \
- edt_ft5x06_register_write(tsdata, reg, pdata->name)
+ edt_ft5x06_register_write(tsdata, reg, pdata->name); \
+} while (0)
#define EDT_GET_PROP(name, reg) { \
u32 val; \
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 8735543..4934789 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -1127,6 +1127,24 @@
schedule_zero(tc, virt_block, data_dest, cell, bio);
}
+static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);
+
+static void check_for_space(struct pool *pool)
+{
+ int r;
+ dm_block_t nr_free;
+
+ if (get_pool_mode(pool) != PM_OUT_OF_DATA_SPACE)
+ return;
+
+ r = dm_pool_get_free_block_count(pool->pmd, &nr_free);
+ if (r)
+ return;
+
+ if (nr_free)
+ set_pool_mode(pool, PM_WRITE);
+}
+
/*
* A non-zero return indicates read_only or fail_io mode.
* Many callers don't care about the return value.
@@ -1141,6 +1159,8 @@
r = dm_pool_commit_metadata(pool->pmd);
if (r)
metadata_operation_failed(pool, "dm_pool_commit_metadata", r);
+ else
+ check_for_space(pool);
return r;
}
@@ -1159,8 +1179,6 @@
}
}
-static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);
-
static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
{
int r;
@@ -2155,7 +2173,7 @@
pool->process_cell = process_cell_read_only;
pool->process_discard_cell = process_discard_cell;
pool->process_prepared_mapping = process_prepared_mapping;
- pool->process_prepared_discard = process_prepared_discard_passdown;
+ pool->process_prepared_discard = process_prepared_discard;
if (!pool->pf.error_if_no_space && no_space_timeout)
queue_delayed_work(pool->wq, &pool->no_space_timeout, no_space_timeout);
@@ -3814,6 +3832,8 @@
r = -EINVAL;
goto bad;
}
+ atomic_set(&tc->refcount, 1);
+ init_completion(&tc->can_destroy);
list_add_tail_rcu(&tc->list, &tc->pool->active_thins);
spin_unlock_irqrestore(&tc->pool->lock, flags);
/*
@@ -3826,9 +3846,6 @@
dm_put(pool_md);
- atomic_set(&tc->refcount, 1);
- init_completion(&tc->can_destroy);
-
return 0;
bad:
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 4c06585..b98cd9d 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -899,7 +899,7 @@
static void clone_endio(struct bio *bio, int error)
{
- int r = 0;
+ int r = error;
struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
struct dm_io *io = tio->io;
struct mapped_device *md = tio->io->md;
diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c
index e2f9df1..2d7fae9 100644
--- a/drivers/mfd/stmpe.c
+++ b/drivers/mfd/stmpe.c
@@ -519,6 +519,7 @@
[STMPE_IDX_GPDR_LSB] = STMPE1601_REG_GPIO_SET_DIR_LSB,
[STMPE_IDX_GPRER_LSB] = STMPE1601_REG_GPIO_RE_LSB,
[STMPE_IDX_GPFER_LSB] = STMPE1601_REG_GPIO_FE_LSB,
+ [STMPE_IDX_GPPUR_LSB] = STMPE1601_REG_GPIO_PU_LSB,
[STMPE_IDX_GPAFR_U_MSB] = STMPE1601_REG_GPIO_AF_U_MSB,
[STMPE_IDX_IEGPIOR_LSB] = STMPE1601_REG_INT_EN_GPIO_MASK_LSB,
[STMPE_IDX_ISGPIOR_MSB] = STMPE1601_REG_INT_STA_GPIO_MSB,
@@ -667,6 +668,7 @@
[STMPE_IDX_GPDR_LSB] = STMPE1801_REG_GPIO_SET_DIR_LOW,
[STMPE_IDX_GPRER_LSB] = STMPE1801_REG_GPIO_RE_LOW,
[STMPE_IDX_GPFER_LSB] = STMPE1801_REG_GPIO_FE_LOW,
+ [STMPE_IDX_GPPUR_LSB] = STMPE1801_REG_GPIO_PULL_UP_LOW,
[STMPE_IDX_IEGPIOR_LSB] = STMPE1801_REG_INT_EN_GPIO_MASK_LOW,
[STMPE_IDX_ISGPIOR_LSB] = STMPE1801_REG_INT_STA_GPIO_LOW,
};
@@ -750,6 +752,8 @@
[STMPE_IDX_GPDR_LSB] = STMPE24XX_REG_GPDR_LSB,
[STMPE_IDX_GPRER_LSB] = STMPE24XX_REG_GPRER_LSB,
[STMPE_IDX_GPFER_LSB] = STMPE24XX_REG_GPFER_LSB,
+ [STMPE_IDX_GPPUR_LSB] = STMPE24XX_REG_GPPUR_LSB,
+ [STMPE_IDX_GPPDR_LSB] = STMPE24XX_REG_GPPDR_LSB,
[STMPE_IDX_GPAFR_U_MSB] = STMPE24XX_REG_GPAFR_U_MSB,
[STMPE_IDX_IEGPIOR_LSB] = STMPE24XX_REG_IEGPIOR_LSB,
[STMPE_IDX_ISGPIOR_MSB] = STMPE24XX_REG_ISGPIOR_MSB,
diff --git a/drivers/mfd/stmpe.h b/drivers/mfd/stmpe.h
index bee0abf..84adb46 100644
--- a/drivers/mfd/stmpe.h
+++ b/drivers/mfd/stmpe.h
@@ -188,6 +188,7 @@
#define STMPE1601_REG_GPIO_ED_MSB 0x8A
#define STMPE1601_REG_GPIO_RE_LSB 0x8D
#define STMPE1601_REG_GPIO_FE_LSB 0x8F
+#define STMPE1601_REG_GPIO_PU_LSB 0x91
#define STMPE1601_REG_GPIO_AF_U_MSB 0x92
#define STMPE1601_SYS_CTRL_ENABLE_GPIO (1 << 3)
@@ -276,6 +277,8 @@
#define STMPE24XX_REG_GPEDR_MSB 0x8C
#define STMPE24XX_REG_GPRER_LSB 0x91
#define STMPE24XX_REG_GPFER_LSB 0x94
+#define STMPE24XX_REG_GPPUR_LSB 0x97
+#define STMPE24XX_REG_GPPDR_LSB 0x9a
#define STMPE24XX_REG_GPAFR_U_MSB 0x9B
#define STMPE24XX_SYS_CTRL_ENABLE_GPIO (1 << 3)
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 02ad792..7466ce0 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -886,7 +886,7 @@
unsigned idx, bus_width = 0;
int err = 0;
- if (!mmc_can_ext_csd(card) &&
+ if (!mmc_can_ext_csd(card) ||
!(host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA)))
return 0;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 184c434..0dceba1 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1648,7 +1648,7 @@
/* slave is not a slave or master is not master of this slave */
if (!(slave_dev->flags & IFF_SLAVE) ||
!netdev_has_upper_dev(slave_dev, bond_dev)) {
- netdev_err(bond_dev, "cannot release %s\n",
+ netdev_dbg(bond_dev, "cannot release %s\n",
slave_dev->name);
return -EINVAL;
}
diff --git a/drivers/net/caif/caif_virtio.c b/drivers/net/caif/caif_virtio.c
index a5fefb9..b306210 100644
--- a/drivers/net/caif/caif_virtio.c
+++ b/drivers/net/caif/caif_virtio.c
@@ -257,7 +257,6 @@
struct vringh_kiov *riov = &cfv->ctx.riov;
unsigned int skb_len;
-again:
do {
skb = NULL;
@@ -322,7 +321,6 @@
napi_schedule_prep(napi)) {
vringh_notify_disable_kern(cfv->vr_rx);
__napi_schedule(napi);
- goto again;
}
break;
diff --git a/drivers/net/ethernet/8390/ne2k-pci.c b/drivers/net/ethernet/8390/ne2k-pci.c
index 89c8d9f..57e9791 100644
--- a/drivers/net/ethernet/8390/ne2k-pci.c
+++ b/drivers/net/ethernet/8390/ne2k-pci.c
@@ -246,13 +246,13 @@
if (!ioaddr || ((pci_resource_flags (pdev, 0) & IORESOURCE_IO) == 0)) {
dev_err(&pdev->dev, "no I/O resource at PCI BAR #0\n");
- return -ENODEV;
+ goto err_out;
}
if (request_region (ioaddr, NE_IO_EXTENT, DRV_NAME) == NULL) {
dev_err(&pdev->dev, "I/O resource 0x%x @ 0x%lx busy\n",
NE_IO_EXTENT, ioaddr);
- return -EBUSY;
+ goto err_out;
}
reg0 = inb(ioaddr);
@@ -392,6 +392,8 @@
free_netdev (dev);
err_out_free_res:
release_region (ioaddr, NE_IO_EXTENT);
+err_out:
+ pci_disable_device(pdev);
return -ENODEV;
}
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index df76050..eadcb05 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -156,18 +156,6 @@
source "drivers/net/ethernet/renesas/Kconfig"
source "drivers/net/ethernet/rdc/Kconfig"
source "drivers/net/ethernet/rocker/Kconfig"
-
-config S6GMAC
- tristate "S6105 GMAC ethernet support"
- depends on XTENSA_VARIANT_S6000
- select PHYLIB
- ---help---
- This driver supports the on chip ethernet device on the
- S6105 xtensa processor.
-
- To compile this driver as a module, choose M here. The module
- will be called s6gmac.
-
source "drivers/net/ethernet/samsung/Kconfig"
source "drivers/net/ethernet/seeq/Kconfig"
source "drivers/net/ethernet/silan/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index bf56f8b..1367afc 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -66,7 +66,6 @@
obj-$(CONFIG_SH_ETH) += renesas/
obj-$(CONFIG_NET_VENDOR_RDC) += rdc/
obj-$(CONFIG_NET_VENDOR_ROCKER) += rocker/
-obj-$(CONFIG_S6GMAC) += s6gmac.o
obj-$(CONFIG_NET_VENDOR_SAMSUNG) += samsung/
obj-$(CONFIG_NET_VENDOR_SEEQ) += seeq/
obj-$(CONFIG_NET_VENDOR_SILAN) += silan/
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 9f5e387..72eef9f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -12553,9 +12553,11 @@
return 0;
}
-static bool bnx2x_gso_check(struct sk_buff *skb, struct net_device *dev)
+static netdev_features_t bnx2x_features_check(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features)
{
- return vxlan_gso_check(skb);
+ return vxlan_features_check(skb, features);
}
static const struct net_device_ops bnx2x_netdev_ops = {
@@ -12589,7 +12591,7 @@
#endif
.ndo_get_phys_port_id = bnx2x_get_phys_port_id,
.ndo_set_vf_link_state = bnx2x_set_vf_link_state,
- .ndo_gso_check = bnx2x_gso_check,
+ .ndo_features_check = bnx2x_features_check,
};
static int bnx2x_set_coherency_mask(struct bnx2x *bp)
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index bb48a61..553dcd8 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -17800,23 +17800,6 @@
goto err_out_apeunmap;
}
- /*
- * Reset chip in case UNDI or EFI driver did not shutdown
- * DMA self test will enable WDMAC and we'll see (spurious)
- * pending DMA on the PCI bus at that point.
- */
- if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
- (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
- tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
- tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
- }
-
- err = tg3_test_dma(tp);
- if (err) {
- dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
- goto err_out_apeunmap;
- }
-
intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
@@ -17861,6 +17844,23 @@
sndmbx += 0xc;
}
+ /*
+ * Reset chip in case UNDI or EFI driver did not shutdown
+ * DMA self test will enable WDMAC and we'll see (spurious)
+ * pending DMA on the PCI bus at that point.
+ */
+ if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
+ (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
+ tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
+ tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+ }
+
+ err = tg3_test_dma(tp);
+ if (err) {
+ dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
+ goto err_out_apeunmap;
+ }
+
tg3_init_coal(tp);
pci_set_drvdata(pdev, dev);
diff --git a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
index 7d6aa8c..619083a 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
@@ -172,7 +172,7 @@
/* Retrieve flash partition info */
fcomp.comp_status = 0;
- init_completion(&fcomp.comp);
+ reinit_completion(&fcomp.comp);
spin_lock_irqsave(&bnad->bna_lock, flags);
ret = bfa_nw_flash_get_attr(&bnad->bna.flash, &drvinfo->flash_attr,
bnad_cb_completion, &fcomp);
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
index d00a751..6049f70 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
@@ -96,6 +96,9 @@
s16 xact_addr_filt; /* index of our MAC address filter */
u16 rss_size; /* size of VI's RSS table slice */
u8 pidx; /* index into adapter port[] */
+ s8 mdio_addr;
+ u8 port_type; /* firmware port type */
+ u8 mod_type; /* firmware module type */
u8 port_id; /* physical port ID */
u8 nqsets; /* # of "Queue Sets" */
u8 first_qset; /* index of first "Queue Set" */
@@ -522,6 +525,7 @@
* is "contracted" to provide for the common code.
*/
void t4vf_os_link_changed(struct adapter *, int, int);
+void t4vf_os_portmod_changed(struct adapter *, int);
/*
* SGE function prototype declarations.
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index aa74ec3..2215d43 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -44,6 +44,7 @@
#include <linux/etherdevice.h>
#include <linux/debugfs.h>
#include <linux/ethtool.h>
+#include <linux/mdio.h>
#include "t4vf_common.h"
#include "t4vf_defs.h"
@@ -210,6 +211,38 @@
}
/*
+ * THe port module type has changed on the indicated "port" (Virtual
+ * Interface).
+ */
+void t4vf_os_portmod_changed(struct adapter *adapter, int pidx)
+{
+ static const char * const mod_str[] = {
+ NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
+ };
+ const struct net_device *dev = adapter->port[pidx];
+ const struct port_info *pi = netdev_priv(dev);
+
+ if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
+ dev_info(adapter->pdev_dev, "%s: port module unplugged\n",
+ dev->name);
+ else if (pi->mod_type < ARRAY_SIZE(mod_str))
+ dev_info(adapter->pdev_dev, "%s: %s port module inserted\n",
+ dev->name, mod_str[pi->mod_type]);
+ else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
+ dev_info(adapter->pdev_dev, "%s: unsupported optical port "
+ "module inserted\n", dev->name);
+ else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
+ dev_info(adapter->pdev_dev, "%s: unknown port module inserted,"
+ "forcing TWINAX\n", dev->name);
+ else if (pi->mod_type == FW_PORT_MOD_TYPE_ERROR)
+ dev_info(adapter->pdev_dev, "%s: transceiver module error\n",
+ dev->name);
+ else
+ dev_info(adapter->pdev_dev, "%s: unknown module type %d "
+ "inserted\n", dev->name, pi->mod_type);
+}
+
+/*
* Net device operations.
* ======================
*/
@@ -1193,24 +1226,103 @@
* state of the port to which we're linked.
*/
-/*
- * Return current port link settings.
- */
-static int cxgb4vf_get_settings(struct net_device *dev,
- struct ethtool_cmd *cmd)
+static unsigned int t4vf_from_fw_linkcaps(enum fw_port_type type,
+ unsigned int caps)
{
- const struct port_info *pi = netdev_priv(dev);
+ unsigned int v = 0;
- cmd->supported = pi->link_cfg.supported;
- cmd->advertising = pi->link_cfg.advertising;
+ if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
+ type == FW_PORT_TYPE_BT_XAUI) {
+ v |= SUPPORTED_TP;
+ if (caps & FW_PORT_CAP_SPEED_100M)
+ v |= SUPPORTED_100baseT_Full;
+ if (caps & FW_PORT_CAP_SPEED_1G)
+ v |= SUPPORTED_1000baseT_Full;
+ if (caps & FW_PORT_CAP_SPEED_10G)
+ v |= SUPPORTED_10000baseT_Full;
+ } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
+ v |= SUPPORTED_Backplane;
+ if (caps & FW_PORT_CAP_SPEED_1G)
+ v |= SUPPORTED_1000baseKX_Full;
+ if (caps & FW_PORT_CAP_SPEED_10G)
+ v |= SUPPORTED_10000baseKX4_Full;
+ } else if (type == FW_PORT_TYPE_KR)
+ v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
+ else if (type == FW_PORT_TYPE_BP_AP)
+ v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
+ SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full;
+ else if (type == FW_PORT_TYPE_BP4_AP)
+ v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
+ SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
+ SUPPORTED_10000baseKX4_Full;
+ else if (type == FW_PORT_TYPE_FIBER_XFI ||
+ type == FW_PORT_TYPE_FIBER_XAUI ||
+ type == FW_PORT_TYPE_SFP ||
+ type == FW_PORT_TYPE_QSFP_10G ||
+ type == FW_PORT_TYPE_QSA) {
+ v |= SUPPORTED_FIBRE;
+ if (caps & FW_PORT_CAP_SPEED_1G)
+ v |= SUPPORTED_1000baseT_Full;
+ if (caps & FW_PORT_CAP_SPEED_10G)
+ v |= SUPPORTED_10000baseT_Full;
+ } else if (type == FW_PORT_TYPE_BP40_BA ||
+ type == FW_PORT_TYPE_QSFP) {
+ v |= SUPPORTED_40000baseSR4_Full;
+ v |= SUPPORTED_FIBRE;
+ }
+
+ if (caps & FW_PORT_CAP_ANEG)
+ v |= SUPPORTED_Autoneg;
+ return v;
+}
+
+static int cxgb4vf_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ const struct port_info *p = netdev_priv(dev);
+
+ if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
+ p->port_type == FW_PORT_TYPE_BT_XFI ||
+ p->port_type == FW_PORT_TYPE_BT_XAUI)
+ cmd->port = PORT_TP;
+ else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
+ p->port_type == FW_PORT_TYPE_FIBER_XAUI)
+ cmd->port = PORT_FIBRE;
+ else if (p->port_type == FW_PORT_TYPE_SFP ||
+ p->port_type == FW_PORT_TYPE_QSFP_10G ||
+ p->port_type == FW_PORT_TYPE_QSA ||
+ p->port_type == FW_PORT_TYPE_QSFP) {
+ if (p->mod_type == FW_PORT_MOD_TYPE_LR ||
+ p->mod_type == FW_PORT_MOD_TYPE_SR ||
+ p->mod_type == FW_PORT_MOD_TYPE_ER ||
+ p->mod_type == FW_PORT_MOD_TYPE_LRM)
+ cmd->port = PORT_FIBRE;
+ else if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
+ p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
+ cmd->port = PORT_DA;
+ else
+ cmd->port = PORT_OTHER;
+ } else
+ cmd->port = PORT_OTHER;
+
+ if (p->mdio_addr >= 0) {
+ cmd->phy_address = p->mdio_addr;
+ cmd->transceiver = XCVR_EXTERNAL;
+ cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
+ MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
+ } else {
+ cmd->phy_address = 0; /* not really, but no better option */
+ cmd->transceiver = XCVR_INTERNAL;
+ cmd->mdio_support = 0;
+ }
+
+ cmd->supported = t4vf_from_fw_linkcaps(p->port_type,
+ p->link_cfg.supported);
+ cmd->advertising = t4vf_from_fw_linkcaps(p->port_type,
+ p->link_cfg.advertising);
ethtool_cmd_speed_set(cmd,
- netif_carrier_ok(dev) ? pi->link_cfg.speed : -1);
+ netif_carrier_ok(dev) ? p->link_cfg.speed : 0);
cmd->duplex = DUPLEX_FULL;
-
- cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
- cmd->phy_address = pi->port_id;
- cmd->transceiver = XCVR_EXTERNAL;
- cmd->autoneg = pi->link_cfg.autoneg;
+ cmd->autoneg = p->link_cfg.autoneg;
cmd->maxtxpkt = 0;
cmd->maxrxpkt = 0;
return 0;
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
index 8d3237f..b9debb4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
@@ -230,7 +230,7 @@
static inline bool is_10g_port(const struct link_config *lc)
{
- return (lc->supported & SUPPORTED_10000baseT_Full) != 0;
+ return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0;
}
static inline bool is_x_10g_port(const struct link_config *lc)
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index 02e8833..21dc9a2 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -245,6 +245,10 @@
return a & 0x3f;
}
+#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
+ FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \
+ FW_PORT_CAP_SPEED_100G | FW_PORT_CAP_ANEG)
+
/**
* init_link_config - initialize a link's SW state
* @lc: structure holding the link state
@@ -259,8 +263,8 @@
lc->requested_speed = 0;
lc->speed = 0;
lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
- if (lc->supported & SUPPORTED_Autoneg) {
- lc->advertising = lc->supported;
+ if (lc->supported & FW_PORT_CAP_ANEG) {
+ lc->advertising = lc->supported & ADVERT_MASK;
lc->autoneg = AUTONEG_ENABLE;
lc->requested_fc |= PAUSE_AUTONEG;
} else {
@@ -280,7 +284,6 @@
struct fw_vi_cmd vi_cmd, vi_rpl;
struct fw_port_cmd port_cmd, port_rpl;
int v;
- u32 word;
/*
* Execute a VI Read command to get our Virtual Interface information
@@ -319,19 +322,11 @@
if (v)
return v;
- v = 0;
- word = be16_to_cpu(port_rpl.u.info.pcap);
- if (word & FW_PORT_CAP_SPEED_100M)
- v |= SUPPORTED_100baseT_Full;
- if (word & FW_PORT_CAP_SPEED_1G)
- v |= SUPPORTED_1000baseT_Full;
- if (word & FW_PORT_CAP_SPEED_10G)
- v |= SUPPORTED_10000baseT_Full;
- if (word & FW_PORT_CAP_SPEED_40G)
- v |= SUPPORTED_40000baseSR4_Full;
- if (word & FW_PORT_CAP_ANEG)
- v |= SUPPORTED_Autoneg;
- init_link_config(&pi->link_cfg, v);
+ v = be32_to_cpu(port_rpl.u.info.lstatus_to_modtype);
+ pi->port_type = FW_PORT_CMD_PTYPE_G(v);
+ pi->mod_type = FW_PORT_MOD_TYPE_NA;
+
+ init_link_config(&pi->link_cfg, be16_to_cpu(port_rpl.u.info.pcap));
return 0;
}
@@ -1491,7 +1486,7 @@
*/
const struct fw_port_cmd *port_cmd =
(const struct fw_port_cmd *)rpl;
- u32 word;
+ u32 stat, mod;
int action, port_id, link_ok, speed, fc, pidx;
/*
@@ -1509,21 +1504,21 @@
port_id = FW_PORT_CMD_PORTID_G(
be32_to_cpu(port_cmd->op_to_portid));
- word = be32_to_cpu(port_cmd->u.info.lstatus_to_modtype);
- link_ok = (word & FW_PORT_CMD_LSTATUS_F) != 0;
+ stat = be32_to_cpu(port_cmd->u.info.lstatus_to_modtype);
+ link_ok = (stat & FW_PORT_CMD_LSTATUS_F) != 0;
speed = 0;
fc = 0;
- if (word & FW_PORT_CMD_RXPAUSE_F)
+ if (stat & FW_PORT_CMD_RXPAUSE_F)
fc |= PAUSE_RX;
- if (word & FW_PORT_CMD_TXPAUSE_F)
+ if (stat & FW_PORT_CMD_TXPAUSE_F)
fc |= PAUSE_TX;
- if (word & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M))
+ if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M))
speed = 100;
- else if (word & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G))
+ else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G))
speed = 1000;
- else if (word & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G))
+ else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G))
speed = 10000;
- else if (word & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G))
+ else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G))
speed = 40000;
/*
@@ -1540,12 +1535,21 @@
continue;
lc = &pi->link_cfg;
+
+ mod = FW_PORT_CMD_MODTYPE_G(stat);
+ if (mod != pi->mod_type) {
+ pi->mod_type = mod;
+ t4vf_os_portmod_changed(adapter, pidx);
+ }
+
if (link_ok != lc->link_ok || speed != lc->speed ||
fc != lc->fc) {
/* something changed */
lc->link_ok = link_ok;
lc->speed = speed;
lc->fc = fc;
+ lc->supported =
+ be16_to_cpu(port_cmd->u.info.pcap);
t4vf_os_link_changed(adapter, pidx, link_ok);
}
}
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 868d0f6..705f334 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -1060,10 +1060,14 @@
PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
}
- if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc) {
- skb->csum = htons(checksum);
- skb->ip_summed = CHECKSUM_COMPLETE;
- }
+ /* Hardware does not provide whole packet checksum. It only
+ * provides pseudo checksum. Since hw validates the packet
+ * checksum but not provide us the checksum value. use
+ * CHECSUM_UNNECESSARY.
+ */
+ if ((netdev->features & NETIF_F_RXCSUM) && tcp_udp_csum_ok &&
+ ipv4_csum_ok)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
if (vlan_stripped)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 1960731..41a0a54 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -4459,9 +4459,11 @@
adapter->vxlan_port_count--;
}
-static bool be_gso_check(struct sk_buff *skb, struct net_device *dev)
+static netdev_features_t be_features_check(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features)
{
- return vxlan_gso_check(skb);
+ return vxlan_features_check(skb, features);
}
#endif
@@ -4492,7 +4494,7 @@
#ifdef CONFIG_BE2NET_VXLAN
.ndo_add_vxlan_port = be_add_vxlan_port,
.ndo_del_vxlan_port = be_del_vxlan_port,
- .ndo_gso_check = be_gso_check,
+ .ndo_features_check = be_features_check,
#endif
};
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 190cbd9..d0d6dc1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -2365,9 +2365,11 @@
queue_work(priv->mdev->workqueue, &priv->vxlan_del_task);
}
-static bool mlx4_en_gso_check(struct sk_buff *skb, struct net_device *dev)
+static netdev_features_t mlx4_en_features_check(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features)
{
- return vxlan_gso_check(skb);
+ return vxlan_features_check(skb, features);
}
#endif
@@ -2400,7 +2402,7 @@
#ifdef CONFIG_MLX4_EN_VXLAN
.ndo_add_vxlan_port = mlx4_en_add_vxlan_port,
.ndo_del_vxlan_port = mlx4_en_del_vxlan_port,
- .ndo_gso_check = mlx4_en_gso_check,
+ .ndo_features_check = mlx4_en_features_check,
#endif
};
@@ -2434,7 +2436,7 @@
#ifdef CONFIG_MLX4_EN_VXLAN
.ndo_add_vxlan_port = mlx4_en_add_vxlan_port,
.ndo_del_vxlan_port = mlx4_en_del_vxlan_port,
- .ndo_gso_check = mlx4_en_gso_check,
+ .ndo_features_check = mlx4_en_features_check,
#endif
};
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index a308d41..e3357bf 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -962,7 +962,17 @@
tx_desc->ctrl.owner_opcode = op_own;
if (send_doorbell) {
wmb();
- iowrite32(ring->doorbell_qpn,
+ /* Since there is no iowrite*_native() that writes the
+ * value as is, without byteswapping - using the one
+ * the doesn't do byteswapping in the relevant arch
+ * endianness.
+ */
+#if defined(__LITTLE_ENDIAN)
+ iowrite32(
+#else
+ iowrite32be(
+#endif
+ ring->doorbell_qpn,
ring->bf.uar->map + MLX4_SEND_DOORBELL);
} else {
ring->xmit_more++;
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index f1ebed6c..2fa6ae0 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -2303,12 +2303,6 @@
/* Spanning Tree */
-static inline void port_cfg_dis_learn(struct ksz_hw *hw, int p, int set)
-{
- port_cfg(hw, p,
- KS8842_PORT_CTRL_2_OFFSET, PORT_LEARN_DISABLE, set);
-}
-
static inline void port_cfg_rx(struct ksz_hw *hw, int p, int set)
{
port_cfg(hw, p,
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 1aa25b1..9929b97 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -505,9 +505,11 @@
adapter->flags |= QLCNIC_DEL_VXLAN_PORT;
}
-static bool qlcnic_gso_check(struct sk_buff *skb, struct net_device *dev)
+static netdev_features_t qlcnic_features_check(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features)
{
- return vxlan_gso_check(skb);
+ return vxlan_features_check(skb, features);
}
#endif
@@ -532,7 +534,7 @@
#ifdef CONFIG_QLCNIC_VXLAN
.ndo_add_vxlan_port = qlcnic_add_vxlan_port,
.ndo_del_vxlan_port = qlcnic_del_vxlan_port,
- .ndo_gso_check = qlcnic_gso_check,
+ .ndo_features_check = qlcnic_features_check,
#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = qlcnic_poll_controller,
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index 6d0b9df..78bb4ce 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -787,10 +787,10 @@
if (rc)
goto err_out;
+ disable_dev_on_err = 1;
rc = pci_request_regions (pdev, DRV_NAME);
if (rc)
goto err_out;
- disable_dev_on_err = 1;
pci_set_master (pdev);
@@ -1110,6 +1110,7 @@
return 0;
err_out:
+ netif_napi_del(&tp->napi);
__rtl8139_cleanup_dev (dev);
pci_disable_device (pdev);
return i;
@@ -1124,6 +1125,7 @@
assert (dev != NULL);
cancel_delayed_work_sync(&tp->thread);
+ netif_napi_del(&tp->napi);
unregister_netdev (dev);
diff --git a/drivers/net/ethernet/s6gmac.c b/drivers/net/ethernet/s6gmac.c
deleted file mode 100644
index f537cbe..0000000
--- a/drivers/net/ethernet/s6gmac.c
+++ /dev/null
@@ -1,1058 +0,0 @@
-/*
- * Ethernet driver for S6105 on chip network device
- * (c)2008 emlix GmbH http://www.emlix.com
- * Authors: Oskar Schirmer <oskar@scara.com>
- * Daniel Gloeckner <dg@emlix.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/types.h>
-#include <linux/delay.h>
-#include <linux/spinlock.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/if.h>
-#include <linux/stddef.h>
-#include <linux/mii.h>
-#include <linux/phy.h>
-#include <linux/platform_device.h>
-#include <variant/hardware.h>
-#include <variant/dmac.h>
-
-#define DRV_NAME "s6gmac"
-#define DRV_PRMT DRV_NAME ": "
-
-
-/* register declarations */
-
-#define S6_GMAC_MACCONF1 0x000
-#define S6_GMAC_MACCONF1_TXENA 0
-#define S6_GMAC_MACCONF1_SYNCTX 1
-#define S6_GMAC_MACCONF1_RXENA 2
-#define S6_GMAC_MACCONF1_SYNCRX 3
-#define S6_GMAC_MACCONF1_TXFLOWCTRL 4
-#define S6_GMAC_MACCONF1_RXFLOWCTRL 5
-#define S6_GMAC_MACCONF1_LOOPBACK 8
-#define S6_GMAC_MACCONF1_RESTXFUNC 16
-#define S6_GMAC_MACCONF1_RESRXFUNC 17
-#define S6_GMAC_MACCONF1_RESTXMACCTRL 18
-#define S6_GMAC_MACCONF1_RESRXMACCTRL 19
-#define S6_GMAC_MACCONF1_SIMULRES 30
-#define S6_GMAC_MACCONF1_SOFTRES 31
-#define S6_GMAC_MACCONF2 0x004
-#define S6_GMAC_MACCONF2_FULL 0
-#define S6_GMAC_MACCONF2_CRCENA 1
-#define S6_GMAC_MACCONF2_PADCRCENA 2
-#define S6_GMAC_MACCONF2_LENGTHFCHK 4
-#define S6_GMAC_MACCONF2_HUGEFRAMENA 5
-#define S6_GMAC_MACCONF2_IFMODE 8
-#define S6_GMAC_MACCONF2_IFMODE_NIBBLE 1
-#define S6_GMAC_MACCONF2_IFMODE_BYTE 2
-#define S6_GMAC_MACCONF2_IFMODE_MASK 3
-#define S6_GMAC_MACCONF2_PREAMBLELEN 12
-#define S6_GMAC_MACCONF2_PREAMBLELEN_MASK 0x0F
-#define S6_GMAC_MACIPGIFG 0x008
-#define S6_GMAC_MACIPGIFG_B2BINTERPGAP 0
-#define S6_GMAC_MACIPGIFG_B2BINTERPGAP_MASK 0x7F
-#define S6_GMAC_MACIPGIFG_MINIFGENFORCE 8
-#define S6_GMAC_MACIPGIFG_B2BINTERPGAP2 16
-#define S6_GMAC_MACIPGIFG_B2BINTERPGAP1 24
-#define S6_GMAC_MACHALFDUPLEX 0x00C
-#define S6_GMAC_MACHALFDUPLEX_COLLISWIN 0
-#define S6_GMAC_MACHALFDUPLEX_COLLISWIN_MASK 0x3F
-#define S6_GMAC_MACHALFDUPLEX_RETXMAX 12
-#define S6_GMAC_MACHALFDUPLEX_RETXMAX_MASK 0x0F
-#define S6_GMAC_MACHALFDUPLEX_EXCESSDEF 16
-#define S6_GMAC_MACHALFDUPLEX_NOBACKOFF 17
-#define S6_GMAC_MACHALFDUPLEX_BPNOBCKOF 18
-#define S6_GMAC_MACHALFDUPLEX_ALTBEBENA 19
-#define S6_GMAC_MACHALFDUPLEX_ALTBEBTRN 20
-#define S6_GMAC_MACHALFDUPLEX_ALTBEBTR_MASK 0x0F
-#define S6_GMAC_MACMAXFRAMELEN 0x010
-#define S6_GMAC_MACMIICONF 0x020
-#define S6_GMAC_MACMIICONF_CSEL 0
-#define S6_GMAC_MACMIICONF_CSEL_DIV10 0
-#define S6_GMAC_MACMIICONF_CSEL_DIV12 1
-#define S6_GMAC_MACMIICONF_CSEL_DIV14 2
-#define S6_GMAC_MACMIICONF_CSEL_DIV18 3
-#define S6_GMAC_MACMIICONF_CSEL_DIV24 4
-#define S6_GMAC_MACMIICONF_CSEL_DIV34 5
-#define S6_GMAC_MACMIICONF_CSEL_DIV68 6
-#define S6_GMAC_MACMIICONF_CSEL_DIV168 7
-#define S6_GMAC_MACMIICONF_CSEL_MASK 7
-#define S6_GMAC_MACMIICONF_PREAMBLESUPR 4
-#define S6_GMAC_MACMIICONF_SCANAUTOINCR 5
-#define S6_GMAC_MACMIICMD 0x024
-#define S6_GMAC_MACMIICMD_READ 0
-#define S6_GMAC_MACMIICMD_SCAN 1
-#define S6_GMAC_MACMIIADDR 0x028
-#define S6_GMAC_MACMIIADDR_REG 0
-#define S6_GMAC_MACMIIADDR_REG_MASK 0x1F
-#define S6_GMAC_MACMIIADDR_PHY 8
-#define S6_GMAC_MACMIIADDR_PHY_MASK 0x1F
-#define S6_GMAC_MACMIICTRL 0x02C
-#define S6_GMAC_MACMIISTAT 0x030
-#define S6_GMAC_MACMIIINDI 0x034
-#define S6_GMAC_MACMIIINDI_BUSY 0
-#define S6_GMAC_MACMIIINDI_SCAN 1
-#define S6_GMAC_MACMIIINDI_INVAL 2
-#define S6_GMAC_MACINTERFSTAT 0x03C
-#define S6_GMAC_MACINTERFSTAT_LINKFAIL 3
-#define S6_GMAC_MACINTERFSTAT_EXCESSDEF 9
-#define S6_GMAC_MACSTATADDR1 0x040
-#define S6_GMAC_MACSTATADDR2 0x044
-
-#define S6_GMAC_FIFOCONF0 0x048
-#define S6_GMAC_FIFOCONF0_HSTRSTWT 0
-#define S6_GMAC_FIFOCONF0_HSTRSTSR 1
-#define S6_GMAC_FIFOCONF0_HSTRSTFR 2
-#define S6_GMAC_FIFOCONF0_HSTRSTST 3
-#define S6_GMAC_FIFOCONF0_HSTRSTFT 4
-#define S6_GMAC_FIFOCONF0_WTMENREQ 8
-#define S6_GMAC_FIFOCONF0_SRFENREQ 9
-#define S6_GMAC_FIFOCONF0_FRFENREQ 10
-#define S6_GMAC_FIFOCONF0_STFENREQ 11
-#define S6_GMAC_FIFOCONF0_FTFENREQ 12
-#define S6_GMAC_FIFOCONF0_WTMENRPLY 16
-#define S6_GMAC_FIFOCONF0_SRFENRPLY 17
-#define S6_GMAC_FIFOCONF0_FRFENRPLY 18
-#define S6_GMAC_FIFOCONF0_STFENRPLY 19
-#define S6_GMAC_FIFOCONF0_FTFENRPLY 20
-#define S6_GMAC_FIFOCONF1 0x04C
-#define S6_GMAC_FIFOCONF2 0x050
-#define S6_GMAC_FIFOCONF2_CFGLWM 0
-#define S6_GMAC_FIFOCONF2_CFGHWM 16
-#define S6_GMAC_FIFOCONF3 0x054
-#define S6_GMAC_FIFOCONF3_CFGFTTH 0
-#define S6_GMAC_FIFOCONF3_CFGHWMFT 16
-#define S6_GMAC_FIFOCONF4 0x058
-#define S6_GMAC_FIFOCONF_RSV_PREVDROP 0
-#define S6_GMAC_FIFOCONF_RSV_RUNT 1
-#define S6_GMAC_FIFOCONF_RSV_FALSECAR 2
-#define S6_GMAC_FIFOCONF_RSV_CODEERR 3
-#define S6_GMAC_FIFOCONF_RSV_CRCERR 4
-#define S6_GMAC_FIFOCONF_RSV_LENGTHERR 5
-#define S6_GMAC_FIFOCONF_RSV_LENRANGE 6
-#define S6_GMAC_FIFOCONF_RSV_OK 7
-#define S6_GMAC_FIFOCONF_RSV_MULTICAST 8
-#define S6_GMAC_FIFOCONF_RSV_BROADCAST 9
-#define S6_GMAC_FIFOCONF_RSV_DRIBBLE 10
-#define S6_GMAC_FIFOCONF_RSV_CTRLFRAME 11
-#define S6_GMAC_FIFOCONF_RSV_PAUSECTRL 12
-#define S6_GMAC_FIFOCONF_RSV_UNOPCODE 13
-#define S6_GMAC_FIFOCONF_RSV_VLANTAG 14
-#define S6_GMAC_FIFOCONF_RSV_LONGEVENT 15
-#define S6_GMAC_FIFOCONF_RSV_TRUNCATED 16
-#define S6_GMAC_FIFOCONF_RSV_MASK 0x3FFFF
-#define S6_GMAC_FIFOCONF5 0x05C
-#define S6_GMAC_FIFOCONF5_DROPLT64 18
-#define S6_GMAC_FIFOCONF5_CFGBYTM 19
-#define S6_GMAC_FIFOCONF5_RXDROPSIZE 20
-#define S6_GMAC_FIFOCONF5_RXDROPSIZE_MASK 0xF
-
-#define S6_GMAC_STAT_REGS 0x080
-#define S6_GMAC_STAT_SIZE_MIN 12
-#define S6_GMAC_STATTR64 0x080
-#define S6_GMAC_STATTR64_SIZE 18
-#define S6_GMAC_STATTR127 0x084
-#define S6_GMAC_STATTR127_SIZE 18
-#define S6_GMAC_STATTR255 0x088
-#define S6_GMAC_STATTR255_SIZE 18
-#define S6_GMAC_STATTR511 0x08C
-#define S6_GMAC_STATTR511_SIZE 18
-#define S6_GMAC_STATTR1K 0x090
-#define S6_GMAC_STATTR1K_SIZE 18
-#define S6_GMAC_STATTRMAX 0x094
-#define S6_GMAC_STATTRMAX_SIZE 18
-#define S6_GMAC_STATTRMGV 0x098
-#define S6_GMAC_STATTRMGV_SIZE 18
-#define S6_GMAC_STATRBYT 0x09C
-#define S6_GMAC_STATRBYT_SIZE 24
-#define S6_GMAC_STATRPKT 0x0A0
-#define S6_GMAC_STATRPKT_SIZE 18
-#define S6_GMAC_STATRFCS 0x0A4
-#define S6_GMAC_STATRFCS_SIZE 12
-#define S6_GMAC_STATRMCA 0x0A8
-#define S6_GMAC_STATRMCA_SIZE 18
-#define S6_GMAC_STATRBCA 0x0AC
-#define S6_GMAC_STATRBCA_SIZE 22
-#define S6_GMAC_STATRXCF 0x0B0
-#define S6_GMAC_STATRXCF_SIZE 18
-#define S6_GMAC_STATRXPF 0x0B4
-#define S6_GMAC_STATRXPF_SIZE 12
-#define S6_GMAC_STATRXUO 0x0B8
-#define S6_GMAC_STATRXUO_SIZE 12
-#define S6_GMAC_STATRALN 0x0BC
-#define S6_GMAC_STATRALN_SIZE 12
-#define S6_GMAC_STATRFLR 0x0C0
-#define S6_GMAC_STATRFLR_SIZE 16
-#define S6_GMAC_STATRCDE 0x0C4
-#define S6_GMAC_STATRCDE_SIZE 12
-#define S6_GMAC_STATRCSE 0x0C8
-#define S6_GMAC_STATRCSE_SIZE 12
-#define S6_GMAC_STATRUND 0x0CC
-#define S6_GMAC_STATRUND_SIZE 12
-#define S6_GMAC_STATROVR 0x0D0
-#define S6_GMAC_STATROVR_SIZE 12
-#define S6_GMAC_STATRFRG 0x0D4
-#define S6_GMAC_STATRFRG_SIZE 12
-#define S6_GMAC_STATRJBR 0x0D8
-#define S6_GMAC_STATRJBR_SIZE 12
-#define S6_GMAC_STATRDRP 0x0DC
-#define S6_GMAC_STATRDRP_SIZE 12
-#define S6_GMAC_STATTBYT 0x0E0
-#define S6_GMAC_STATTBYT_SIZE 24
-#define S6_GMAC_STATTPKT 0x0E4
-#define S6_GMAC_STATTPKT_SIZE 18
-#define S6_GMAC_STATTMCA 0x0E8
-#define S6_GMAC_STATTMCA_SIZE 18
-#define S6_GMAC_STATTBCA 0x0EC
-#define S6_GMAC_STATTBCA_SIZE 18
-#define S6_GMAC_STATTXPF 0x0F0
-#define S6_GMAC_STATTXPF_SIZE 12
-#define S6_GMAC_STATTDFR 0x0F4
-#define S6_GMAC_STATTDFR_SIZE 12
-#define S6_GMAC_STATTEDF 0x0F8
-#define S6_GMAC_STATTEDF_SIZE 12
-#define S6_GMAC_STATTSCL 0x0FC
-#define S6_GMAC_STATTSCL_SIZE 12
-#define S6_GMAC_STATTMCL 0x100
-#define S6_GMAC_STATTMCL_SIZE 12
-#define S6_GMAC_STATTLCL 0x104
-#define S6_GMAC_STATTLCL_SIZE 12
-#define S6_GMAC_STATTXCL 0x108
-#define S6_GMAC_STATTXCL_SIZE 12
-#define S6_GMAC_STATTNCL 0x10C
-#define S6_GMAC_STATTNCL_SIZE 13
-#define S6_GMAC_STATTPFH 0x110
-#define S6_GMAC_STATTPFH_SIZE 12
-#define S6_GMAC_STATTDRP 0x114
-#define S6_GMAC_STATTDRP_SIZE 12
-#define S6_GMAC_STATTJBR 0x118
-#define S6_GMAC_STATTJBR_SIZE 12
-#define S6_GMAC_STATTFCS 0x11C
-#define S6_GMAC_STATTFCS_SIZE 12
-#define S6_GMAC_STATTXCF 0x120
-#define S6_GMAC_STATTXCF_SIZE 12
-#define S6_GMAC_STATTOVR 0x124
-#define S6_GMAC_STATTOVR_SIZE 12
-#define S6_GMAC_STATTUND 0x128
-#define S6_GMAC_STATTUND_SIZE 12
-#define S6_GMAC_STATTFRG 0x12C
-#define S6_GMAC_STATTFRG_SIZE 12
-#define S6_GMAC_STATCARRY(n) (0x130 + 4*(n))
-#define S6_GMAC_STATCARRYMSK(n) (0x138 + 4*(n))
-#define S6_GMAC_STATCARRY1_RDRP 0
-#define S6_GMAC_STATCARRY1_RJBR 1
-#define S6_GMAC_STATCARRY1_RFRG 2
-#define S6_GMAC_STATCARRY1_ROVR 3
-#define S6_GMAC_STATCARRY1_RUND 4
-#define S6_GMAC_STATCARRY1_RCSE 5
-#define S6_GMAC_STATCARRY1_RCDE 6
-#define S6_GMAC_STATCARRY1_RFLR 7
-#define S6_GMAC_STATCARRY1_RALN 8
-#define S6_GMAC_STATCARRY1_RXUO 9
-#define S6_GMAC_STATCARRY1_RXPF 10
-#define S6_GMAC_STATCARRY1_RXCF 11
-#define S6_GMAC_STATCARRY1_RBCA 12
-#define S6_GMAC_STATCARRY1_RMCA 13
-#define S6_GMAC_STATCARRY1_RFCS 14
-#define S6_GMAC_STATCARRY1_RPKT 15
-#define S6_GMAC_STATCARRY1_RBYT 16
-#define S6_GMAC_STATCARRY1_TRMGV 25
-#define S6_GMAC_STATCARRY1_TRMAX 26
-#define S6_GMAC_STATCARRY1_TR1K 27
-#define S6_GMAC_STATCARRY1_TR511 28
-#define S6_GMAC_STATCARRY1_TR255 29
-#define S6_GMAC_STATCARRY1_TR127 30
-#define S6_GMAC_STATCARRY1_TR64 31
-#define S6_GMAC_STATCARRY2_TDRP 0
-#define S6_GMAC_STATCARRY2_TPFH 1
-#define S6_GMAC_STATCARRY2_TNCL 2
-#define S6_GMAC_STATCARRY2_TXCL 3
-#define S6_GMAC_STATCARRY2_TLCL 4
-#define S6_GMAC_STATCARRY2_TMCL 5
-#define S6_GMAC_STATCARRY2_TSCL 6
-#define S6_GMAC_STATCARRY2_TEDF 7
-#define S6_GMAC_STATCARRY2_TDFR 8
-#define S6_GMAC_STATCARRY2_TXPF 9
-#define S6_GMAC_STATCARRY2_TBCA 10
-#define S6_GMAC_STATCARRY2_TMCA 11
-#define S6_GMAC_STATCARRY2_TPKT 12
-#define S6_GMAC_STATCARRY2_TBYT 13
-#define S6_GMAC_STATCARRY2_TFRG 14
-#define S6_GMAC_STATCARRY2_TUND 15
-#define S6_GMAC_STATCARRY2_TOVR 16
-#define S6_GMAC_STATCARRY2_TXCF 17
-#define S6_GMAC_STATCARRY2_TFCS 18
-#define S6_GMAC_STATCARRY2_TJBR 19
-
-#define S6_GMAC_HOST_PBLKCTRL 0x140
-#define S6_GMAC_HOST_PBLKCTRL_TXENA 0
-#define S6_GMAC_HOST_PBLKCTRL_RXENA 1
-#define S6_GMAC_HOST_PBLKCTRL_TXSRES 2
-#define S6_GMAC_HOST_PBLKCTRL_RXSRES 3
-#define S6_GMAC_HOST_PBLKCTRL_TXBSIZ 8
-#define S6_GMAC_HOST_PBLKCTRL_RXBSIZ 12
-#define S6_GMAC_HOST_PBLKCTRL_SIZ_16 4
-#define S6_GMAC_HOST_PBLKCTRL_SIZ_32 5
-#define S6_GMAC_HOST_PBLKCTRL_SIZ_64 6
-#define S6_GMAC_HOST_PBLKCTRL_SIZ_128 7
-#define S6_GMAC_HOST_PBLKCTRL_SIZ_MASK 0xF
-#define S6_GMAC_HOST_PBLKCTRL_STATENA 16
-#define S6_GMAC_HOST_PBLKCTRL_STATAUTOZ 17
-#define S6_GMAC_HOST_PBLKCTRL_STATCLEAR 18
-#define S6_GMAC_HOST_PBLKCTRL_RGMII 19
-#define S6_GMAC_HOST_INTMASK 0x144
-#define S6_GMAC_HOST_INTSTAT 0x148
-#define S6_GMAC_HOST_INT_TXBURSTOVER 3
-#define S6_GMAC_HOST_INT_TXPREWOVER 4
-#define S6_GMAC_HOST_INT_RXBURSTUNDER 5
-#define S6_GMAC_HOST_INT_RXPOSTRFULL 6
-#define S6_GMAC_HOST_INT_RXPOSTRUNDER 7
-#define S6_GMAC_HOST_RXFIFOHWM 0x14C
-#define S6_GMAC_HOST_CTRLFRAMXP 0x150
-#define S6_GMAC_HOST_DSTADDRLO(n) (0x160 + 8*(n))
-#define S6_GMAC_HOST_DSTADDRHI(n) (0x164 + 8*(n))
-#define S6_GMAC_HOST_DSTMASKLO(n) (0x180 + 8*(n))
-#define S6_GMAC_HOST_DSTMASKHI(n) (0x184 + 8*(n))
-
-#define S6_GMAC_BURST_PREWR 0x1B0
-#define S6_GMAC_BURST_PREWR_LEN 0
-#define S6_GMAC_BURST_PREWR_LEN_MASK ((1 << 20) - 1)
-#define S6_GMAC_BURST_PREWR_CFE 20
-#define S6_GMAC_BURST_PREWR_PPE 21
-#define S6_GMAC_BURST_PREWR_FCS 22
-#define S6_GMAC_BURST_PREWR_PAD 23
-#define S6_GMAC_BURST_POSTRD 0x1D0
-#define S6_GMAC_BURST_POSTRD_LEN 0
-#define S6_GMAC_BURST_POSTRD_LEN_MASK ((1 << 20) - 1)
-#define S6_GMAC_BURST_POSTRD_DROP 20
-
-
-/* data handling */
-
-#define S6_NUM_TX_SKB 8 /* must be larger than TX fifo size */
-#define S6_NUM_RX_SKB 16
-#define S6_MAX_FRLEN 1536
-
-struct s6gmac {
- u32 reg;
- u32 tx_dma;
- u32 rx_dma;
- u32 io;
- u8 tx_chan;
- u8 rx_chan;
- spinlock_t lock;
- u8 tx_skb_i, tx_skb_o;
- u8 rx_skb_i, rx_skb_o;
- struct sk_buff *tx_skb[S6_NUM_TX_SKB];
- struct sk_buff *rx_skb[S6_NUM_RX_SKB];
- unsigned long carry[sizeof(struct net_device_stats) / sizeof(long)];
- unsigned long stats[sizeof(struct net_device_stats) / sizeof(long)];
- struct phy_device *phydev;
- struct {
- struct mii_bus *bus;
- int irq[PHY_MAX_ADDR];
- } mii;
- struct {
- unsigned int mbit;
- u8 giga;
- u8 isup;
- u8 full;
- } link;
-};
-
-static void s6gmac_rx_fillfifo(struct net_device *dev)
-{
- struct s6gmac *pd = netdev_priv(dev);
- struct sk_buff *skb;
- while ((((u8)(pd->rx_skb_i - pd->rx_skb_o)) < S6_NUM_RX_SKB) &&
- (!s6dmac_fifo_full(pd->rx_dma, pd->rx_chan)) &&
- (skb = netdev_alloc_skb(dev, S6_MAX_FRLEN + 2))) {
- pd->rx_skb[(pd->rx_skb_i++) % S6_NUM_RX_SKB] = skb;
- s6dmac_put_fifo_cache(pd->rx_dma, pd->rx_chan,
- pd->io, (u32)skb->data, S6_MAX_FRLEN);
- }
-}
-
-static void s6gmac_rx_interrupt(struct net_device *dev)
-{
- struct s6gmac *pd = netdev_priv(dev);
- u32 pfx;
- struct sk_buff *skb;
- while (((u8)(pd->rx_skb_i - pd->rx_skb_o)) >
- s6dmac_pending_count(pd->rx_dma, pd->rx_chan)) {
- skb = pd->rx_skb[(pd->rx_skb_o++) % S6_NUM_RX_SKB];
- pfx = readl(pd->reg + S6_GMAC_BURST_POSTRD);
- if (pfx & (1 << S6_GMAC_BURST_POSTRD_DROP)) {
- dev_kfree_skb_irq(skb);
- } else {
- skb_put(skb, (pfx >> S6_GMAC_BURST_POSTRD_LEN)
- & S6_GMAC_BURST_POSTRD_LEN_MASK);
- skb->protocol = eth_type_trans(skb, dev);
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- netif_rx(skb);
- }
- }
-}
-
-static void s6gmac_tx_interrupt(struct net_device *dev)
-{
- struct s6gmac *pd = netdev_priv(dev);
- while (((u8)(pd->tx_skb_i - pd->tx_skb_o)) >
- s6dmac_pending_count(pd->tx_dma, pd->tx_chan)) {
- dev_kfree_skb_irq(pd->tx_skb[(pd->tx_skb_o++) % S6_NUM_TX_SKB]);
- }
- if (!s6dmac_fifo_full(pd->tx_dma, pd->tx_chan))
- netif_wake_queue(dev);
-}
-
-struct s6gmac_statinf {
- unsigned reg_size : 4; /* 0: unused */
- unsigned reg_off : 6;
- unsigned net_index : 6;
-};
-
-#define S6_STATS_B (8 * sizeof(u32))
-#define S6_STATS_C(b, r, f) [b] = { \
- BUILD_BUG_ON_ZERO(r##_SIZE < S6_GMAC_STAT_SIZE_MIN) + \
- BUILD_BUG_ON_ZERO((r##_SIZE - (S6_GMAC_STAT_SIZE_MIN - 1)) \
- >= (1<<4)) + \
- r##_SIZE - (S6_GMAC_STAT_SIZE_MIN - 1), \
- BUILD_BUG_ON_ZERO(((unsigned)((r - S6_GMAC_STAT_REGS) / sizeof(u32))) \
- >= ((1<<6)-1)) + \
- (r - S6_GMAC_STAT_REGS) / sizeof(u32), \
- BUILD_BUG_ON_ZERO((offsetof(struct net_device_stats, f)) \
- % sizeof(unsigned long)) + \
- BUILD_BUG_ON_ZERO((((unsigned)(offsetof(struct net_device_stats, f)) \
- / sizeof(unsigned long)) >= (1<<6))) + \
- BUILD_BUG_ON_ZERO((sizeof(((struct net_device_stats *)0)->f) \
- != sizeof(unsigned long))) + \
- (offsetof(struct net_device_stats, f)) / sizeof(unsigned long)},
-
-static const struct s6gmac_statinf statinf[2][S6_STATS_B] = { {
- S6_STATS_C(S6_GMAC_STATCARRY1_RBYT, S6_GMAC_STATRBYT, rx_bytes)
- S6_STATS_C(S6_GMAC_STATCARRY1_RPKT, S6_GMAC_STATRPKT, rx_packets)
- S6_STATS_C(S6_GMAC_STATCARRY1_RFCS, S6_GMAC_STATRFCS, rx_crc_errors)
- S6_STATS_C(S6_GMAC_STATCARRY1_RMCA, S6_GMAC_STATRMCA, multicast)
- S6_STATS_C(S6_GMAC_STATCARRY1_RALN, S6_GMAC_STATRALN, rx_frame_errors)
- S6_STATS_C(S6_GMAC_STATCARRY1_RFLR, S6_GMAC_STATRFLR, rx_length_errors)
- S6_STATS_C(S6_GMAC_STATCARRY1_RCDE, S6_GMAC_STATRCDE, rx_missed_errors)
- S6_STATS_C(S6_GMAC_STATCARRY1_RUND, S6_GMAC_STATRUND, rx_length_errors)
- S6_STATS_C(S6_GMAC_STATCARRY1_ROVR, S6_GMAC_STATROVR, rx_length_errors)
- S6_STATS_C(S6_GMAC_STATCARRY1_RFRG, S6_GMAC_STATRFRG, rx_crc_errors)
- S6_STATS_C(S6_GMAC_STATCARRY1_RJBR, S6_GMAC_STATRJBR, rx_crc_errors)
- S6_STATS_C(S6_GMAC_STATCARRY1_RDRP, S6_GMAC_STATRDRP, rx_dropped)
-}, {
- S6_STATS_C(S6_GMAC_STATCARRY2_TBYT, S6_GMAC_STATTBYT, tx_bytes)
- S6_STATS_C(S6_GMAC_STATCARRY2_TPKT, S6_GMAC_STATTPKT, tx_packets)
- S6_STATS_C(S6_GMAC_STATCARRY2_TEDF, S6_GMAC_STATTEDF, tx_aborted_errors)
- S6_STATS_C(S6_GMAC_STATCARRY2_TXCL, S6_GMAC_STATTXCL, tx_aborted_errors)
- S6_STATS_C(S6_GMAC_STATCARRY2_TNCL, S6_GMAC_STATTNCL, collisions)
- S6_STATS_C(S6_GMAC_STATCARRY2_TDRP, S6_GMAC_STATTDRP, tx_dropped)
- S6_STATS_C(S6_GMAC_STATCARRY2_TJBR, S6_GMAC_STATTJBR, tx_errors)
- S6_STATS_C(S6_GMAC_STATCARRY2_TFCS, S6_GMAC_STATTFCS, tx_errors)
- S6_STATS_C(S6_GMAC_STATCARRY2_TOVR, S6_GMAC_STATTOVR, tx_errors)
- S6_STATS_C(S6_GMAC_STATCARRY2_TUND, S6_GMAC_STATTUND, tx_errors)
- S6_STATS_C(S6_GMAC_STATCARRY2_TFRG, S6_GMAC_STATTFRG, tx_errors)
-} };
-
-static void s6gmac_stats_collect(struct s6gmac *pd,
- const struct s6gmac_statinf *inf)
-{
- int b;
- for (b = 0; b < S6_STATS_B; b++) {
- if (inf[b].reg_size) {
- pd->stats[inf[b].net_index] +=
- readl(pd->reg + S6_GMAC_STAT_REGS
- + sizeof(u32) * inf[b].reg_off);
- }
- }
-}
-
-static void s6gmac_stats_carry(struct s6gmac *pd,
- const struct s6gmac_statinf *inf, u32 mask)
-{
- int b;
- while (mask) {
- b = fls(mask) - 1;
- mask &= ~(1 << b);
- pd->carry[inf[b].net_index] += (1 << inf[b].reg_size);
- }
-}
-
-static inline u32 s6gmac_stats_pending(struct s6gmac *pd, int carry)
-{
- int r = readl(pd->reg + S6_GMAC_STATCARRY(carry)) &
- ~readl(pd->reg + S6_GMAC_STATCARRYMSK(carry));
- return r;
-}
-
-static inline void s6gmac_stats_interrupt(struct s6gmac *pd, int carry)
-{
- u32 mask;
- mask = s6gmac_stats_pending(pd, carry);
- if (mask) {
- writel(mask, pd->reg + S6_GMAC_STATCARRY(carry));
- s6gmac_stats_carry(pd, &statinf[carry][0], mask);
- }
-}
-
-static irqreturn_t s6gmac_interrupt(int irq, void *dev_id)
-{
- struct net_device *dev = (struct net_device *)dev_id;
- struct s6gmac *pd = netdev_priv(dev);
- if (!dev)
- return IRQ_NONE;
- spin_lock(&pd->lock);
- if (s6dmac_termcnt_irq(pd->rx_dma, pd->rx_chan))
- s6gmac_rx_interrupt(dev);
- s6gmac_rx_fillfifo(dev);
- if (s6dmac_termcnt_irq(pd->tx_dma, pd->tx_chan))
- s6gmac_tx_interrupt(dev);
- s6gmac_stats_interrupt(pd, 0);
- s6gmac_stats_interrupt(pd, 1);
- spin_unlock(&pd->lock);
- return IRQ_HANDLED;
-}
-
-static inline void s6gmac_set_dstaddr(struct s6gmac *pd, int n,
- u32 addrlo, u32 addrhi, u32 masklo, u32 maskhi)
-{
- writel(addrlo, pd->reg + S6_GMAC_HOST_DSTADDRLO(n));
- writel(addrhi, pd->reg + S6_GMAC_HOST_DSTADDRHI(n));
- writel(masklo, pd->reg + S6_GMAC_HOST_DSTMASKLO(n));
- writel(maskhi, pd->reg + S6_GMAC_HOST_DSTMASKHI(n));
-}
-
-static inline void s6gmac_stop_device(struct net_device *dev)
-{
- struct s6gmac *pd = netdev_priv(dev);
- writel(0, pd->reg + S6_GMAC_MACCONF1);
-}
-
-static inline void s6gmac_init_device(struct net_device *dev)
-{
- struct s6gmac *pd = netdev_priv(dev);
- int is_rgmii = !!(pd->phydev->supported
- & (SUPPORTED_1000baseT_Full | SUPPORTED_1000baseT_Half));
-#if 0
- writel(1 << S6_GMAC_MACCONF1_SYNCTX |
- 1 << S6_GMAC_MACCONF1_SYNCRX |
- 1 << S6_GMAC_MACCONF1_TXFLOWCTRL |
- 1 << S6_GMAC_MACCONF1_RXFLOWCTRL |
- 1 << S6_GMAC_MACCONF1_RESTXFUNC |
- 1 << S6_GMAC_MACCONF1_RESRXFUNC |
- 1 << S6_GMAC_MACCONF1_RESTXMACCTRL |
- 1 << S6_GMAC_MACCONF1_RESRXMACCTRL,
- pd->reg + S6_GMAC_MACCONF1);
-#endif
- writel(1 << S6_GMAC_MACCONF1_SOFTRES, pd->reg + S6_GMAC_MACCONF1);
- udelay(1000);
- writel(1 << S6_GMAC_MACCONF1_TXENA | 1 << S6_GMAC_MACCONF1_RXENA,
- pd->reg + S6_GMAC_MACCONF1);
- writel(1 << S6_GMAC_HOST_PBLKCTRL_TXSRES |
- 1 << S6_GMAC_HOST_PBLKCTRL_RXSRES,
- pd->reg + S6_GMAC_HOST_PBLKCTRL);
- writel(S6_GMAC_HOST_PBLKCTRL_SIZ_128 << S6_GMAC_HOST_PBLKCTRL_TXBSIZ |
- S6_GMAC_HOST_PBLKCTRL_SIZ_128 << S6_GMAC_HOST_PBLKCTRL_RXBSIZ |
- 1 << S6_GMAC_HOST_PBLKCTRL_STATENA |
- 1 << S6_GMAC_HOST_PBLKCTRL_STATCLEAR |
- is_rgmii << S6_GMAC_HOST_PBLKCTRL_RGMII,
- pd->reg + S6_GMAC_HOST_PBLKCTRL);
- writel(1 << S6_GMAC_MACCONF1_TXENA |
- 1 << S6_GMAC_MACCONF1_RXENA |
- (dev->flags & IFF_LOOPBACK ? 1 : 0)
- << S6_GMAC_MACCONF1_LOOPBACK,
- pd->reg + S6_GMAC_MACCONF1);
- writel(dev->mtu && (dev->mtu < (S6_MAX_FRLEN - ETH_HLEN-ETH_FCS_LEN)) ?
- dev->mtu+ETH_HLEN+ETH_FCS_LEN : S6_MAX_FRLEN,
- pd->reg + S6_GMAC_MACMAXFRAMELEN);
- writel((pd->link.full ? 1 : 0) << S6_GMAC_MACCONF2_FULL |
- 1 << S6_GMAC_MACCONF2_PADCRCENA |
- 1 << S6_GMAC_MACCONF2_LENGTHFCHK |
- (pd->link.giga ?
- S6_GMAC_MACCONF2_IFMODE_BYTE :
- S6_GMAC_MACCONF2_IFMODE_NIBBLE)
- << S6_GMAC_MACCONF2_IFMODE |
- 7 << S6_GMAC_MACCONF2_PREAMBLELEN,
- pd->reg + S6_GMAC_MACCONF2);
- writel(0, pd->reg + S6_GMAC_MACSTATADDR1);
- writel(0, pd->reg + S6_GMAC_MACSTATADDR2);
- writel(1 << S6_GMAC_FIFOCONF0_WTMENREQ |
- 1 << S6_GMAC_FIFOCONF0_SRFENREQ |
- 1 << S6_GMAC_FIFOCONF0_FRFENREQ |
- 1 << S6_GMAC_FIFOCONF0_STFENREQ |
- 1 << S6_GMAC_FIFOCONF0_FTFENREQ,
- pd->reg + S6_GMAC_FIFOCONF0);
- writel(128 << S6_GMAC_FIFOCONF3_CFGFTTH |
- 128 << S6_GMAC_FIFOCONF3_CFGHWMFT,
- pd->reg + S6_GMAC_FIFOCONF3);
- writel((S6_GMAC_FIFOCONF_RSV_MASK & ~(
- 1 << S6_GMAC_FIFOCONF_RSV_RUNT |
- 1 << S6_GMAC_FIFOCONF_RSV_CRCERR |
- 1 << S6_GMAC_FIFOCONF_RSV_OK |
- 1 << S6_GMAC_FIFOCONF_RSV_DRIBBLE |
- 1 << S6_GMAC_FIFOCONF_RSV_CTRLFRAME |
- 1 << S6_GMAC_FIFOCONF_RSV_PAUSECTRL |
- 1 << S6_GMAC_FIFOCONF_RSV_UNOPCODE |
- 1 << S6_GMAC_FIFOCONF_RSV_TRUNCATED)) |
- 1 << S6_GMAC_FIFOCONF5_DROPLT64 |
- pd->link.giga << S6_GMAC_FIFOCONF5_CFGBYTM |
- 1 << S6_GMAC_FIFOCONF5_RXDROPSIZE,
- pd->reg + S6_GMAC_FIFOCONF5);
- writel(1 << S6_GMAC_FIFOCONF_RSV_RUNT |
- 1 << S6_GMAC_FIFOCONF_RSV_CRCERR |
- 1 << S6_GMAC_FIFOCONF_RSV_DRIBBLE |
- 1 << S6_GMAC_FIFOCONF_RSV_CTRLFRAME |
- 1 << S6_GMAC_FIFOCONF_RSV_PAUSECTRL |
- 1 << S6_GMAC_FIFOCONF_RSV_UNOPCODE |
- 1 << S6_GMAC_FIFOCONF_RSV_TRUNCATED,
- pd->reg + S6_GMAC_FIFOCONF4);
- s6gmac_set_dstaddr(pd, 0,
- 0xFFFFFFFF, 0x0000FFFF, 0xFFFFFFFF, 0x0000FFFF);
- s6gmac_set_dstaddr(pd, 1,
- dev->dev_addr[5] |
- dev->dev_addr[4] << 8 |
- dev->dev_addr[3] << 16 |
- dev->dev_addr[2] << 24,
- dev->dev_addr[1] |
- dev->dev_addr[0] << 8,
- 0xFFFFFFFF, 0x0000FFFF);
- s6gmac_set_dstaddr(pd, 2,
- 0x00000000, 0x00000100, 0x00000000, 0x00000100);
- s6gmac_set_dstaddr(pd, 3,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000);
- writel(1 << S6_GMAC_HOST_PBLKCTRL_TXENA |
- 1 << S6_GMAC_HOST_PBLKCTRL_RXENA |
- S6_GMAC_HOST_PBLKCTRL_SIZ_128 << S6_GMAC_HOST_PBLKCTRL_TXBSIZ |
- S6_GMAC_HOST_PBLKCTRL_SIZ_128 << S6_GMAC_HOST_PBLKCTRL_RXBSIZ |
- 1 << S6_GMAC_HOST_PBLKCTRL_STATENA |
- 1 << S6_GMAC_HOST_PBLKCTRL_STATCLEAR |
- is_rgmii << S6_GMAC_HOST_PBLKCTRL_RGMII,
- pd->reg + S6_GMAC_HOST_PBLKCTRL);
-}
-
-static void s6mii_enable(struct s6gmac *pd)
-{
- writel(readl(pd->reg + S6_GMAC_MACCONF1) &
- ~(1 << S6_GMAC_MACCONF1_SOFTRES),
- pd->reg + S6_GMAC_MACCONF1);
- writel((readl(pd->reg + S6_GMAC_MACMIICONF)
- & ~(S6_GMAC_MACMIICONF_CSEL_MASK << S6_GMAC_MACMIICONF_CSEL))
- | (S6_GMAC_MACMIICONF_CSEL_DIV168 << S6_GMAC_MACMIICONF_CSEL),
- pd->reg + S6_GMAC_MACMIICONF);
-}
-
-static int s6mii_busy(struct s6gmac *pd, int tmo)
-{
- while (readl(pd->reg + S6_GMAC_MACMIIINDI)) {
- if (--tmo == 0)
- return -ETIME;
- udelay(64);
- }
- return 0;
-}
-
-static int s6mii_read(struct mii_bus *bus, int phy_addr, int regnum)
-{
- struct s6gmac *pd = bus->priv;
- s6mii_enable(pd);
- if (s6mii_busy(pd, 256))
- return -ETIME;
- writel(phy_addr << S6_GMAC_MACMIIADDR_PHY |
- regnum << S6_GMAC_MACMIIADDR_REG,
- pd->reg + S6_GMAC_MACMIIADDR);
- writel(1 << S6_GMAC_MACMIICMD_READ, pd->reg + S6_GMAC_MACMIICMD);
- writel(0, pd->reg + S6_GMAC_MACMIICMD);
- if (s6mii_busy(pd, 256))
- return -ETIME;
- return (u16)readl(pd->reg + S6_GMAC_MACMIISTAT);
-}
-
-static int s6mii_write(struct mii_bus *bus, int phy_addr, int regnum, u16 value)
-{
- struct s6gmac *pd = bus->priv;
- s6mii_enable(pd);
- if (s6mii_busy(pd, 256))
- return -ETIME;
- writel(phy_addr << S6_GMAC_MACMIIADDR_PHY |
- regnum << S6_GMAC_MACMIIADDR_REG,
- pd->reg + S6_GMAC_MACMIIADDR);
- writel(value, pd->reg + S6_GMAC_MACMIICTRL);
- if (s6mii_busy(pd, 256))
- return -ETIME;
- return 0;
-}
-
-static int s6mii_reset(struct mii_bus *bus)
-{
- struct s6gmac *pd = bus->priv;
- s6mii_enable(pd);
- if (s6mii_busy(pd, PHY_INIT_TIMEOUT))
- return -ETIME;
- return 0;
-}
-
-static void s6gmac_set_rgmii_txclock(struct s6gmac *pd)
-{
- u32 pllsel = readl(S6_REG_GREG1 + S6_GREG1_PLLSEL);
- pllsel &= ~(S6_GREG1_PLLSEL_GMAC_MASK << S6_GREG1_PLLSEL_GMAC);
- switch (pd->link.mbit) {
- case 10:
- pllsel |= S6_GREG1_PLLSEL_GMAC_2500KHZ << S6_GREG1_PLLSEL_GMAC;
- break;
- case 100:
- pllsel |= S6_GREG1_PLLSEL_GMAC_25MHZ << S6_GREG1_PLLSEL_GMAC;
- break;
- case 1000:
- pllsel |= S6_GREG1_PLLSEL_GMAC_125MHZ << S6_GREG1_PLLSEL_GMAC;
- break;
- default:
- return;
- }
- writel(pllsel, S6_REG_GREG1 + S6_GREG1_PLLSEL);
-}
-
-static inline void s6gmac_linkisup(struct net_device *dev, int isup)
-{
- struct s6gmac *pd = netdev_priv(dev);
- struct phy_device *phydev = pd->phydev;
-
- pd->link.full = phydev->duplex;
- pd->link.giga = (phydev->speed == 1000);
- if (pd->link.mbit != phydev->speed) {
- pd->link.mbit = phydev->speed;
- s6gmac_set_rgmii_txclock(pd);
- }
- pd->link.isup = isup;
- if (isup)
- netif_carrier_on(dev);
- phy_print_status(phydev);
-}
-
-static void s6gmac_adjust_link(struct net_device *dev)
-{
- struct s6gmac *pd = netdev_priv(dev);
- struct phy_device *phydev = pd->phydev;
- if (pd->link.isup &&
- (!phydev->link ||
- (pd->link.mbit != phydev->speed) ||
- (pd->link.full != phydev->duplex))) {
- pd->link.isup = 0;
- netif_tx_disable(dev);
- if (!phydev->link) {
- netif_carrier_off(dev);
- phy_print_status(phydev);
- }
- }
- if (!pd->link.isup && phydev->link) {
- if (pd->link.full != phydev->duplex) {
- u32 maccfg = readl(pd->reg + S6_GMAC_MACCONF2);
- if (phydev->duplex)
- maccfg |= 1 << S6_GMAC_MACCONF2_FULL;
- else
- maccfg &= ~(1 << S6_GMAC_MACCONF2_FULL);
- writel(maccfg, pd->reg + S6_GMAC_MACCONF2);
- }
-
- if (pd->link.giga != (phydev->speed == 1000)) {
- u32 fifocfg = readl(pd->reg + S6_GMAC_FIFOCONF5);
- u32 maccfg = readl(pd->reg + S6_GMAC_MACCONF2);
- maccfg &= ~(S6_GMAC_MACCONF2_IFMODE_MASK
- << S6_GMAC_MACCONF2_IFMODE);
- if (phydev->speed == 1000) {
- fifocfg |= 1 << S6_GMAC_FIFOCONF5_CFGBYTM;
- maccfg |= S6_GMAC_MACCONF2_IFMODE_BYTE
- << S6_GMAC_MACCONF2_IFMODE;
- } else {
- fifocfg &= ~(1 << S6_GMAC_FIFOCONF5_CFGBYTM);
- maccfg |= S6_GMAC_MACCONF2_IFMODE_NIBBLE
- << S6_GMAC_MACCONF2_IFMODE;
- }
- writel(fifocfg, pd->reg + S6_GMAC_FIFOCONF5);
- writel(maccfg, pd->reg + S6_GMAC_MACCONF2);
- }
-
- if (!s6dmac_fifo_full(pd->tx_dma, pd->tx_chan))
- netif_wake_queue(dev);
- s6gmac_linkisup(dev, 1);
- }
-}
-
-static inline int s6gmac_phy_start(struct net_device *dev)
-{
- struct s6gmac *pd = netdev_priv(dev);
- int i = 0;
- struct phy_device *p = NULL;
- while ((i < PHY_MAX_ADDR) && (!(p = pd->mii.bus->phy_map[i])))
- i++;
- p = phy_connect(dev, dev_name(&p->dev), &s6gmac_adjust_link,
- PHY_INTERFACE_MODE_RGMII);
- if (IS_ERR(p)) {
- printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
- return PTR_ERR(p);
- }
- p->supported &= PHY_GBIT_FEATURES;
- p->advertising = p->supported;
- pd->phydev = p;
- return 0;
-}
-
-static inline void s6gmac_init_stats(struct net_device *dev)
-{
- struct s6gmac *pd = netdev_priv(dev);
- u32 mask;
- mask = 1 << S6_GMAC_STATCARRY1_RDRP |
- 1 << S6_GMAC_STATCARRY1_RJBR |
- 1 << S6_GMAC_STATCARRY1_RFRG |
- 1 << S6_GMAC_STATCARRY1_ROVR |
- 1 << S6_GMAC_STATCARRY1_RUND |
- 1 << S6_GMAC_STATCARRY1_RCDE |
- 1 << S6_GMAC_STATCARRY1_RFLR |
- 1 << S6_GMAC_STATCARRY1_RALN |
- 1 << S6_GMAC_STATCARRY1_RMCA |
- 1 << S6_GMAC_STATCARRY1_RFCS |
- 1 << S6_GMAC_STATCARRY1_RPKT |
- 1 << S6_GMAC_STATCARRY1_RBYT;
- writel(mask, pd->reg + S6_GMAC_STATCARRY(0));
- writel(~mask, pd->reg + S6_GMAC_STATCARRYMSK(0));
- mask = 1 << S6_GMAC_STATCARRY2_TDRP |
- 1 << S6_GMAC_STATCARRY2_TNCL |
- 1 << S6_GMAC_STATCARRY2_TXCL |
- 1 << S6_GMAC_STATCARRY2_TEDF |
- 1 << S6_GMAC_STATCARRY2_TPKT |
- 1 << S6_GMAC_STATCARRY2_TBYT |
- 1 << S6_GMAC_STATCARRY2_TFRG |
- 1 << S6_GMAC_STATCARRY2_TUND |
- 1 << S6_GMAC_STATCARRY2_TOVR |
- 1 << S6_GMAC_STATCARRY2_TFCS |
- 1 << S6_GMAC_STATCARRY2_TJBR;
- writel(mask, pd->reg + S6_GMAC_STATCARRY(1));
- writel(~mask, pd->reg + S6_GMAC_STATCARRYMSK(1));
-}
-
-static inline void s6gmac_init_dmac(struct net_device *dev)
-{
- struct s6gmac *pd = netdev_priv(dev);
- s6dmac_disable_chan(pd->tx_dma, pd->tx_chan);
- s6dmac_disable_chan(pd->rx_dma, pd->rx_chan);
- s6dmac_disable_error_irqs(pd->tx_dma, 1 << S6_HIFDMA_GMACTX);
- s6dmac_disable_error_irqs(pd->rx_dma, 1 << S6_HIFDMA_GMACRX);
-}
-
-static int s6gmac_tx(struct sk_buff *skb, struct net_device *dev)
-{
- struct s6gmac *pd = netdev_priv(dev);
- unsigned long flags;
-
- spin_lock_irqsave(&pd->lock, flags);
- writel(skb->len << S6_GMAC_BURST_PREWR_LEN |
- 0 << S6_GMAC_BURST_PREWR_CFE |
- 1 << S6_GMAC_BURST_PREWR_PPE |
- 1 << S6_GMAC_BURST_PREWR_FCS |
- ((skb->len < ETH_ZLEN) ? 1 : 0) << S6_GMAC_BURST_PREWR_PAD,
- pd->reg + S6_GMAC_BURST_PREWR);
- s6dmac_put_fifo_cache(pd->tx_dma, pd->tx_chan,
- (u32)skb->data, pd->io, skb->len);
- if (s6dmac_fifo_full(pd->tx_dma, pd->tx_chan))
- netif_stop_queue(dev);
- if (((u8)(pd->tx_skb_i - pd->tx_skb_o)) >= S6_NUM_TX_SKB) {
- printk(KERN_ERR "GMAC BUG: skb tx ring overflow [%x, %x]\n",
- pd->tx_skb_o, pd->tx_skb_i);
- BUG();
- }
- pd->tx_skb[(pd->tx_skb_i++) % S6_NUM_TX_SKB] = skb;
- spin_unlock_irqrestore(&pd->lock, flags);
- return 0;
-}
-
-static void s6gmac_tx_timeout(struct net_device *dev)
-{
- struct s6gmac *pd = netdev_priv(dev);
- unsigned long flags;
- spin_lock_irqsave(&pd->lock, flags);
- s6gmac_tx_interrupt(dev);
- spin_unlock_irqrestore(&pd->lock, flags);
-}
-
-static int s6gmac_open(struct net_device *dev)
-{
- struct s6gmac *pd = netdev_priv(dev);
- unsigned long flags;
- phy_read_status(pd->phydev);
- spin_lock_irqsave(&pd->lock, flags);
- pd->link.mbit = 0;
- s6gmac_linkisup(dev, pd->phydev->link);
- s6gmac_init_device(dev);
- s6gmac_init_stats(dev);
- s6gmac_init_dmac(dev);
- s6gmac_rx_fillfifo(dev);
- s6dmac_enable_chan(pd->rx_dma, pd->rx_chan,
- 2, 1, 0, 1, 0, 0, 0, 7, -1, 2, 0, 1);
- s6dmac_enable_chan(pd->tx_dma, pd->tx_chan,
- 2, 0, 1, 0, 0, 0, 0, 7, -1, 2, 0, 1);
- writel(0 << S6_GMAC_HOST_INT_TXBURSTOVER |
- 0 << S6_GMAC_HOST_INT_TXPREWOVER |
- 0 << S6_GMAC_HOST_INT_RXBURSTUNDER |
- 0 << S6_GMAC_HOST_INT_RXPOSTRFULL |
- 0 << S6_GMAC_HOST_INT_RXPOSTRUNDER,
- pd->reg + S6_GMAC_HOST_INTMASK);
- spin_unlock_irqrestore(&pd->lock, flags);
- phy_start(pd->phydev);
- netif_start_queue(dev);
- return 0;
-}
-
-static int s6gmac_stop(struct net_device *dev)
-{
- struct s6gmac *pd = netdev_priv(dev);
- unsigned long flags;
- netif_stop_queue(dev);
- phy_stop(pd->phydev);
- spin_lock_irqsave(&pd->lock, flags);
- s6gmac_init_dmac(dev);
- s6gmac_stop_device(dev);
- while (pd->tx_skb_i != pd->tx_skb_o)
- dev_kfree_skb(pd->tx_skb[(pd->tx_skb_o++) % S6_NUM_TX_SKB]);
- while (pd->rx_skb_i != pd->rx_skb_o)
- dev_kfree_skb(pd->rx_skb[(pd->rx_skb_o++) % S6_NUM_RX_SKB]);
- spin_unlock_irqrestore(&pd->lock, flags);
- return 0;
-}
-
-static struct net_device_stats *s6gmac_stats(struct net_device *dev)
-{
- struct s6gmac *pd = netdev_priv(dev);
- struct net_device_stats *st = (struct net_device_stats *)&pd->stats;
- int i;
- do {
- unsigned long flags;
- spin_lock_irqsave(&pd->lock, flags);
- for (i = 0; i < ARRAY_SIZE(pd->stats); i++)
- pd->stats[i] =
- pd->carry[i] << (S6_GMAC_STAT_SIZE_MIN - 1);
- s6gmac_stats_collect(pd, &statinf[0][0]);
- s6gmac_stats_collect(pd, &statinf[1][0]);
- i = s6gmac_stats_pending(pd, 0) |
- s6gmac_stats_pending(pd, 1);
- spin_unlock_irqrestore(&pd->lock, flags);
- } while (i);
- st->rx_errors = st->rx_crc_errors +
- st->rx_frame_errors +
- st->rx_length_errors +
- st->rx_missed_errors;
- st->tx_errors += st->tx_aborted_errors;
- return st;
-}
-
-static int s6gmac_probe(struct platform_device *pdev)
-{
- struct net_device *dev;
- struct s6gmac *pd;
- int res;
- unsigned long i;
- struct mii_bus *mb;
-
- dev = alloc_etherdev(sizeof(*pd));
- if (!dev)
- return -ENOMEM;
-
- dev->open = s6gmac_open;
- dev->stop = s6gmac_stop;
- dev->hard_start_xmit = s6gmac_tx;
- dev->tx_timeout = s6gmac_tx_timeout;
- dev->watchdog_timeo = HZ;
- dev->get_stats = s6gmac_stats;
- dev->irq = platform_get_irq(pdev, 0);
- pd = netdev_priv(dev);
- memset(pd, 0, sizeof(*pd));
- spin_lock_init(&pd->lock);
- pd->reg = platform_get_resource(pdev, IORESOURCE_MEM, 0)->start;
- i = platform_get_resource(pdev, IORESOURCE_DMA, 0)->start;
- pd->tx_dma = DMA_MASK_DMAC(i);
- pd->tx_chan = DMA_INDEX_CHNL(i);
- i = platform_get_resource(pdev, IORESOURCE_DMA, 1)->start;
- pd->rx_dma = DMA_MASK_DMAC(i);
- pd->rx_chan = DMA_INDEX_CHNL(i);
- pd->io = platform_get_resource(pdev, IORESOURCE_IO, 0)->start;
- res = request_irq(dev->irq, s6gmac_interrupt, 0, dev->name, dev);
- if (res) {
- printk(KERN_ERR DRV_PRMT "irq request failed: %d\n", dev->irq);
- goto errirq;
- }
- res = register_netdev(dev);
- if (res) {
- printk(KERN_ERR DRV_PRMT "error registering device %s\n",
- dev->name);
- goto errdev;
- }
- mb = mdiobus_alloc();
- if (!mb) {
- printk(KERN_ERR DRV_PRMT "error allocating mii bus\n");
- res = -ENOMEM;
- goto errmii;
- }
- mb->name = "s6gmac_mii";
- mb->read = s6mii_read;
- mb->write = s6mii_write;
- mb->reset = s6mii_reset;
- mb->priv = pd;
- snprintf(mb->id, MII_BUS_ID_SIZE, "%s-%x", pdev->name, pdev->id);
- mb->phy_mask = ~(1 << 0);
- mb->irq = &pd->mii.irq[0];
- for (i = 0; i < PHY_MAX_ADDR; i++) {
- int n = platform_get_irq(pdev, i + 1);
- if (n < 0)
- n = PHY_POLL;
- pd->mii.irq[i] = n;
- }
- mdiobus_register(mb);
- pd->mii.bus = mb;
- res = s6gmac_phy_start(dev);
- if (res)
- return res;
- platform_set_drvdata(pdev, dev);
- return 0;
-errmii:
- unregister_netdev(dev);
-errdev:
- free_irq(dev->irq, dev);
-errirq:
- free_netdev(dev);
- return res;
-}
-
-static int s6gmac_remove(struct platform_device *pdev)
-{
- struct net_device *dev = platform_get_drvdata(pdev);
- if (dev) {
- struct s6gmac *pd = netdev_priv(dev);
- mdiobus_unregister(pd->mii.bus);
- unregister_netdev(dev);
- free_irq(dev->irq, dev);
- free_netdev(dev);
- }
- return 0;
-}
-
-static struct platform_driver s6gmac_driver = {
- .probe = s6gmac_probe,
- .remove = s6gmac_remove,
- .driver = {
- .name = "s6gmac",
- },
-};
-
-module_platform_driver(s6gmac_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("S6105 on chip Ethernet driver");
-MODULE_AUTHOR("Oskar Schirmer <oskar@scara.com>");
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 118a427..8c6b7c1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1671,7 +1671,7 @@
* 0 on success and an appropriate (-)ve integer as defined in errno.h
* file on failure.
*/
-static int stmmac_hw_setup(struct net_device *dev)
+static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
{
struct stmmac_priv *priv = netdev_priv(dev);
int ret;
@@ -1708,9 +1708,11 @@
stmmac_mmc_setup(priv);
- ret = stmmac_init_ptp(priv);
- if (ret && ret != -EOPNOTSUPP)
- pr_warn("%s: failed PTP initialisation\n", __func__);
+ if (init_ptp) {
+ ret = stmmac_init_ptp(priv);
+ if (ret && ret != -EOPNOTSUPP)
+ pr_warn("%s: failed PTP initialisation\n", __func__);
+ }
#ifdef CONFIG_DEBUG_FS
ret = stmmac_init_fs(dev);
@@ -1787,7 +1789,7 @@
goto init_error;
}
- ret = stmmac_hw_setup(dev);
+ ret = stmmac_hw_setup(dev, true);
if (ret < 0) {
pr_err("%s: Hw setup failed\n", __func__);
goto init_error;
@@ -3036,7 +3038,7 @@
netif_device_attach(ndev);
init_dma_desc_rings(ndev, GFP_ATOMIC);
- stmmac_hw_setup(ndev);
+ stmmac_hw_setup(ndev, false);
stmmac_init_tx_coalesce(priv);
napi_enable(&priv->napi);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 4032b17..3039de2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -430,7 +430,6 @@
.remove = stmmac_pltfr_remove,
.driver = {
.name = STMMAC_RESOURCE_NAME,
- .owner = THIS_MODULE,
.pm = &stmmac_pltfr_pm_ops,
.of_match_table = of_match_ptr(stmmac_dt_ids),
},
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 45c408e..d2835bf 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -1201,6 +1201,7 @@
segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO);
if (IS_ERR(segs)) {
dev->stats.tx_dropped++;
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h
index 44b8d2b..4c9b4fa 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet.h
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h
@@ -388,7 +388,6 @@
* @dma_err_tasklet: Tasklet structure to process Axi DMA errors
* @tx_irq: Axidma TX IRQ number
* @rx_irq: Axidma RX IRQ number
- * @temac_type: axienet type to identify between soft and hard temac
* @phy_type: Phy type to identify between MII/GMII/RGMII/SGMII/1000 Base-X
* @options: AxiEthernet option word
* @last_link: Phy link state in which the PHY was negotiated earlier
@@ -431,7 +430,6 @@
int tx_irq;
int rx_irq;
- u32 temac_type;
u32 phy_type;
u32 options; /* Current options word */
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 4ea2d4e..c18a0c6 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -1555,10 +1555,6 @@
if ((be32_to_cpup(p)) >= 0x4000)
lp->jumbo_support = 1;
}
- p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,temac-type",
- NULL);
- if (p)
- lp->temac_type = be32_to_cpup(p);
p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,phy-type", NULL);
if (p)
lp->phy_type = be32_to_cpup(p);
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 2f48f79..384ca4f 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -590,6 +590,7 @@
#define NETVSC_RECEIVE_BUFFER_ID 0xcafe
+#define NETVSC_SEND_BUFFER_ID 0
#define NETVSC_PACKET_SIZE 4096
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index dd867e6..9f49c01 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -161,8 +161,8 @@
/* Deal with the send buffer we may have setup.
* If we got a send section size, it means we received a
- * SendsendBufferComplete msg (ie sent
- * NvspMessage1TypeSendReceiveBuffer msg) therefore, we need
+ * NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE msg (ie sent
+ * NVSP_MSG1_TYPE_SEND_SEND_BUF msg) therefore, we need
* to send a revoke msg here
*/
if (net_device->send_section_size) {
@@ -172,7 +172,8 @@
revoke_packet->hdr.msg_type =
NVSP_MSG1_TYPE_REVOKE_SEND_BUF;
- revoke_packet->msg.v1_msg.revoke_recv_buf.id = 0;
+ revoke_packet->msg.v1_msg.revoke_send_buf.id =
+ NETVSC_SEND_BUFFER_ID;
ret = vmbus_sendpacket(net_device->dev->channel,
revoke_packet,
@@ -204,7 +205,7 @@
net_device->send_buf_gpadl_handle = 0;
}
if (net_device->send_buf) {
- /* Free up the receive buffer */
+ /* Free up the send buffer */
vfree(net_device->send_buf);
net_device->send_buf = NULL;
}
@@ -339,9 +340,9 @@
init_packet = &net_device->channel_init_pkt;
memset(init_packet, 0, sizeof(struct nvsp_message));
init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_SEND_BUF;
- init_packet->msg.v1_msg.send_recv_buf.gpadl_handle =
+ init_packet->msg.v1_msg.send_send_buf.gpadl_handle =
net_device->send_buf_gpadl_handle;
- init_packet->msg.v1_msg.send_recv_buf.id = 0;
+ init_packet->msg.v1_msg.send_send_buf.id = NETVSC_SEND_BUFFER_ID;
/* Send the gpadl notification request */
ret = vmbus_sendpacket(device->channel, init_packet,
@@ -364,7 +365,7 @@
netdev_err(ndev, "Unable to complete send buffer "
"initialization with NetVsp - status %d\n",
init_packet->msg.v1_msg.
- send_recv_buf_complete.status);
+ send_send_buf_complete.status);
ret = -EINVAL;
goto cleanup;
}
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index c530de1..3ad8ca7 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -88,6 +88,7 @@
static const struct kszphy_type ksz8021_type = {
.led_mode_reg = MII_KSZPHY_CTRL_2,
+ .has_broadcast_disable = true,
.has_rmii_ref_clk_sel = true,
};
@@ -258,19 +259,6 @@
return 0;
}
-static int ksz8021_config_init(struct phy_device *phydev)
-{
- int rc;
-
- rc = kszphy_config_init(phydev);
- if (rc)
- return rc;
-
- rc = kszphy_broadcast_disable(phydev);
-
- return rc < 0 ? rc : 0;
-}
-
static int ksz9021_load_values_from_of(struct phy_device *phydev,
struct device_node *of_node, u16 reg,
char *field1, char *field2,
@@ -584,7 +572,7 @@
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
.driver_data = &ksz8021_type,
.probe = kszphy_probe,
- .config_init = ksz8021_config_init,
+ .config_init = kszphy_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
@@ -601,7 +589,7 @@
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
.driver_data = &ksz8021_type,
.probe = kszphy_probe,
- .config_init = ksz8021_config_init,
+ .config_init = kszphy_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index b8bd719..5ca9771 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -760,7 +760,6 @@
container_of(napi, struct receive_queue, napi);
unsigned int r, received = 0;
-again:
received += virtnet_receive(rq, budget - received);
/* Out of packets? */
@@ -771,7 +770,6 @@
napi_schedule_prep(napi)) {
virtqueue_disable_cb(rq->vq);
__napi_schedule(napi);
- goto again;
}
}
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 49d9f22..7fbd89f 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -1579,8 +1579,10 @@
bool udp_sum = !udp_get_no_check6_tx(vs->sock->sk);
skb = udp_tunnel_handle_offloads(skb, udp_sum);
- if (IS_ERR(skb))
- return -EINVAL;
+ if (IS_ERR(skb)) {
+ err = -EINVAL;
+ goto err;
+ }
skb_scrub_packet(skb, xnet);
@@ -1590,12 +1592,16 @@
/* Need space for new headers (invalidates iph ptr) */
err = skb_cow_head(skb, min_headroom);
- if (unlikely(err))
- return err;
+ if (unlikely(err)) {
+ kfree_skb(skb);
+ goto err;
+ }
skb = vlan_hwaccel_push_inside(skb);
- if (WARN_ON(!skb))
- return -ENOMEM;
+ if (WARN_ON(!skb)) {
+ err = -ENOMEM;
+ goto err;
+ }
vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
vxh->vx_flags = htonl(VXLAN_FLAGS);
@@ -1606,6 +1612,9 @@
udp_tunnel6_xmit_skb(vs->sock, dst, skb, dev, saddr, daddr, prio,
ttl, src_port, dst_port);
return 0;
+err:
+ dst_release(dst);
+ return err;
}
#endif
@@ -1621,7 +1630,7 @@
skb = udp_tunnel_handle_offloads(skb, udp_sum);
if (IS_ERR(skb))
- return -EINVAL;
+ return PTR_ERR(skb);
min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
+ VXLAN_HLEN + sizeof(struct iphdr)
@@ -1629,8 +1638,10 @@
/* Need space for new headers (invalidates iph ptr) */
err = skb_cow_head(skb, min_headroom);
- if (unlikely(err))
+ if (unlikely(err)) {
+ kfree_skb(skb);
return err;
+ }
skb = vlan_hwaccel_push_inside(skb);
if (WARN_ON(!skb))
@@ -1776,9 +1787,12 @@
tos, ttl, df, src_port, dst_port,
htonl(vni << 8),
!net_eq(vxlan->net, dev_net(vxlan->dev)));
-
- if (err < 0)
+ if (err < 0) {
+ /* skb is already freed. */
+ skb = NULL;
goto rt_tx_error;
+ }
+
iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
#if IS_ENABLED(CONFIG_IPV6)
} else {
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
index 3c06e93..9880dae 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
@@ -1070,7 +1070,7 @@
*/
if ((sdio_get_host_pm_caps(sdiodev->func[1]) & MMC_PM_KEEP_POWER) &&
((sdio_get_host_pm_caps(sdiodev->func[1]) & MMC_PM_WAKE_SDIO_IRQ) ||
- (sdiodev->pdata->oob_irq_supported)))
+ (sdiodev->pdata && sdiodev->pdata->oob_irq_supported)))
bus_if->wowl_supported = true;
#endif
@@ -1167,7 +1167,7 @@
struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
brcmf_dbg(SDIO, "Enter\n");
- if (sdiodev->pdata->oob_irq_supported)
+ if (sdiodev->pdata && sdiodev->pdata->oob_irq_supported)
disable_irq_wake(sdiodev->pdata->oob_irq_nr);
brcmf_sdio_wd_timer(sdiodev->bus, BRCMF_WD_POLL_MS);
atomic_set(&sdiodev->suspend, false);
diff --git a/drivers/net/wireless/ipw2x00/Kconfig b/drivers/net/wireless/ipw2x00/Kconfig
index 91c0cb3..21de4fe 100644
--- a/drivers/net/wireless/ipw2x00/Kconfig
+++ b/drivers/net/wireless/ipw2x00/Kconfig
@@ -65,7 +65,8 @@
config IPW2200
tristate "Intel PRO/Wireless 2200BG and 2915ABG Network Connection"
- depends on PCI && CFG80211 && CFG80211_WEXT
+ depends on PCI && CFG80211
+ select CFG80211_WEXT
select WIRELESS_EXT
select WEXT_SPY
select WEXT_PRIV
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index 38de151..850b85a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -1323,10 +1323,10 @@
try_again:
/* try next, if any */
- kfree(pieces);
release_firmware(ucode_raw);
if (iwl_request_firmware(drv, false))
goto out_unbind;
+ kfree(pieces);
return;
out_free_fw:
diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h
index 9564ae1..1f7f15e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fh.h
@@ -310,6 +310,7 @@
#define FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (0x01000000)
#define FH_MEM_TFDIB_REG1_ADDR_BITSHIFT 28
+#define FH_MEM_TB_MAX_LENGTH (0x00020000)
/* TFDB Area - TFDs buffer table */
#define FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK (0xFFFFFFFF)
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index 31a5b3f..e880f9d 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -1004,8 +1004,13 @@
{
lockdep_assert_held(&mvm->mutex);
- /* disallow low power states when the FW is down */
- iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
+ /*
+ * Disallow low power states when the FW is down by taking
+ * the UCODE_DOWN ref. in case of ongoing hw restart the
+ * ref is already taken, so don't take it again.
+ */
+ if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+ iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
/* async_handlers_wk is now blocked */
@@ -1023,6 +1028,12 @@
/* the fw is stopped, the aux sta is dead: clean up driver state */
iwl_mvm_del_aux_sta(mvm);
+ /*
+ * Clear IN_HW_RESTART flag when stopping the hw (as restart_complete()
+ * won't be called in this case).
+ */
+ clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
+
mvm->ucode_loaded = false;
}
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index 3ee8e38..2f0c4b1 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -367,7 +367,11 @@
/* 3165 Series */
{IWL_PCI_DEVICE(0x3165, 0x4010, iwl3165_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x3165, 0x4012, iwl3165_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x3165, 0x4110, iwl3165_2ac_cfg)},
{IWL_PCI_DEVICE(0x3165, 0x4210, iwl3165_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x3165, 0x4410, iwl3165_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x3165, 0x4510, iwl3165_2ac_cfg)},
/* 7265 Series */
{IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)},
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 5d79a1f..523fe0c 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -614,7 +614,7 @@
{
u8 *v_addr;
dma_addr_t p_addr;
- u32 offset, chunk_sz = section->len;
+ u32 offset, chunk_sz = min_t(u32, FH_MEM_TB_MAX_LENGTH, section->len);
int ret = 0;
IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n",
@@ -1012,16 +1012,21 @@
/* Stop the device, and put it in low power state */
iwl_pcie_apm_stop(trans);
- /* Upon stop, the APM issues an interrupt if HW RF kill is set.
- * Clean again the interrupt here
+ /* stop and reset the on-board processor */
+ iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
+ udelay(20);
+
+ /*
+ * Upon stop, the APM issues an interrupt if HW RF kill is set.
+ * This is a bug in certain verions of the hardware.
+ * Certain devices also keep sending HW RF kill interrupt all
+ * the time, unless the interrupt is ACKed even if the interrupt
+ * should be masked. Re-ACK all the interrupts here.
*/
spin_lock(&trans_pcie->irq_lock);
iwl_disable_interrupts(trans);
spin_unlock(&trans_pcie->irq_lock);
- /* stop and reset the on-board processor */
- iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
- udelay(20);
/* clear all status bits */
clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c
index c71443c..97b5e4e 100644
--- a/drivers/powercap/intel_rapl.c
+++ b/drivers/powercap/intel_rapl.c
@@ -1041,6 +1041,7 @@
RAPL_CPU(0x45, rapl_defaults_core),/* Haswell ULT */
RAPL_CPU(0x4C, rapl_defaults_atom),/* Braswell */
RAPL_CPU(0x4A, rapl_defaults_atom),/* Tangier */
+ RAPL_CPU(0x56, rapl_defaults_core),/* Future Xeon */
RAPL_CPU(0x5A, rapl_defaults_atom),/* Annidale */
{}
};
diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
index c1444c3..2809ae0 100644
--- a/drivers/regulator/s2mps11.c
+++ b/drivers/regulator/s2mps11.c
@@ -570,7 +570,7 @@
.enable_mask = S2MPS14_ENABLE_MASK \
}
-#define regulator_desc_s2mps14_buck(num, min, step) { \
+#define regulator_desc_s2mps14_buck(num, min, step, min_sel) { \
.name = "BUCK"#num, \
.id = S2MPS14_BUCK##num, \
.ops = &s2mps14_reg_ops, \
@@ -579,7 +579,7 @@
.min_uV = min, \
.uV_step = step, \
.n_voltages = S2MPS14_BUCK_N_VOLTAGES, \
- .linear_min_sel = S2MPS14_BUCK1235_START_SEL, \
+ .linear_min_sel = min_sel, \
.ramp_delay = S2MPS14_BUCK_RAMP_DELAY, \
.vsel_reg = S2MPS14_REG_B1CTRL2 + (num - 1) * 2, \
.vsel_mask = S2MPS14_BUCK_VSEL_MASK, \
@@ -613,11 +613,16 @@
regulator_desc_s2mps14_ldo(23, MIN_800_MV, STEP_25_MV),
regulator_desc_s2mps14_ldo(24, MIN_1800_MV, STEP_25_MV),
regulator_desc_s2mps14_ldo(25, MIN_1800_MV, STEP_25_MV),
- regulator_desc_s2mps14_buck(1, MIN_600_MV, STEP_6_25_MV),
- regulator_desc_s2mps14_buck(2, MIN_600_MV, STEP_6_25_MV),
- regulator_desc_s2mps14_buck(3, MIN_600_MV, STEP_6_25_MV),
- regulator_desc_s2mps14_buck(4, MIN_1400_MV, STEP_12_5_MV),
- regulator_desc_s2mps14_buck(5, MIN_600_MV, STEP_6_25_MV),
+ regulator_desc_s2mps14_buck(1, MIN_600_MV, STEP_6_25_MV,
+ S2MPS14_BUCK1235_START_SEL),
+ regulator_desc_s2mps14_buck(2, MIN_600_MV, STEP_6_25_MV,
+ S2MPS14_BUCK1235_START_SEL),
+ regulator_desc_s2mps14_buck(3, MIN_600_MV, STEP_6_25_MV,
+ S2MPS14_BUCK1235_START_SEL),
+ regulator_desc_s2mps14_buck(4, MIN_1400_MV, STEP_12_5_MV,
+ S2MPS14_BUCK4_START_SEL),
+ regulator_desc_s2mps14_buck(5, MIN_600_MV, STEP_6_25_MV,
+ S2MPS14_BUCK1235_START_SEL),
};
static int s2mps14_pmic_enable_ext_control(struct s2mps11_info *s2mps11,
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index 3b73b96..26270c3 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -39,7 +39,7 @@
#define DRV_NAME "fnic"
#define DRV_DESCRIPTION "Cisco FCoE HBA Driver"
-#define DRV_VERSION "1.6.0.16"
+#define DRV_VERSION "1.6.0.17"
#define PFX DRV_NAME ": "
#define DFX DRV_NAME "%d: "
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index 2097de4..155b286 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -1892,6 +1892,21 @@
goto fnic_abort_cmd_end;
}
+ /* IO out of order */
+
+ if (!(CMD_FLAGS(sc) & (FNIC_IO_ABORTED | FNIC_IO_DONE))) {
+ spin_unlock_irqrestore(io_lock, flags);
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "Issuing Host reset due to out of order IO\n");
+
+ if (fnic_host_reset(sc) == FAILED) {
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "fnic_host_reset failed.\n");
+ }
+ ret = FAILED;
+ goto fnic_abort_cmd_end;
+ }
+
CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
/*
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index e42fff6..8afb016 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -1041,7 +1041,7 @@
}
/* signal not to enter either branch of the if () below */
timeleft = 0;
- rtn = NEEDS_RETRY;
+ rtn = FAILED;
} else {
timeleft = wait_for_completion_timeout(&done, timeout);
rtn = SUCCESS;
@@ -1081,7 +1081,7 @@
rtn = FAILED;
break;
}
- } else if (!rtn) {
+ } else if (rtn != FAILED) {
scsi_abort_eh_cmnd(scmd);
rtn = FAILED;
}
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index fedab3c..3995169 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2623,8 +2623,9 @@
sd_config_discard(sdkp, SD_LBP_WS16);
} else { /* LBP VPD page tells us what to use */
-
- if (sdkp->lbpws)
+ if (sdkp->lbpu && sdkp->max_unmap_blocks && !sdkp->lbprz)
+ sd_config_discard(sdkp, SD_LBP_UNMAP);
+ else if (sdkp->lbpws)
sd_config_discard(sdkp, SD_LBP_WS16);
else if (sdkp->lbpws10)
sd_config_discard(sdkp, SD_LBP_WS10);
diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c
index b410499..aad6683 100644
--- a/drivers/spi/spi-img-spfi.c
+++ b/drivers/spi/spi-img-spfi.c
@@ -341,7 +341,7 @@
default:
rxconf.src_addr = spfi->phys + SPFI_RX_8BIT_VALID_DATA;
rxconf.src_addr_width = 1;
- rxconf.src_maxburst = 1;
+ rxconf.src_maxburst = 4;
}
dmaengine_slave_config(spfi->rx_ch, &rxconf);
@@ -368,7 +368,7 @@
default:
txconf.dst_addr = spfi->phys + SPFI_TX_8BIT_VALID_DATA;
txconf.dst_addr_width = 1;
- txconf.dst_maxburst = 1;
+ txconf.dst_maxburst = 4;
break;
}
dmaengine_slave_config(spfi->tx_ch, &txconf);
@@ -390,14 +390,14 @@
dma_async_issue_pending(spfi->rx_ch);
}
+ spfi_start(spfi);
+
if (xfer->tx_buf) {
spfi->tx_dma_busy = true;
dmaengine_submit(txdesc);
dma_async_issue_pending(spfi->tx_ch);
}
- spfi_start(spfi);
-
return 1;
stop_dma:
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index 239be7c..96a5fc0 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -480,6 +480,8 @@
struct device_node *np = spi->master->dev.of_node;
struct sh_msiof_spi_priv *p = spi_master_get_devdata(spi->master);
+ pm_runtime_get_sync(&p->pdev->dev);
+
if (!np) {
/*
* Use spi->controller_data for CS (same strategy as spi_gpio),
@@ -498,6 +500,9 @@
if (spi->cs_gpio >= 0)
gpio_set_value(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
+
+ pm_runtime_put_sync(&p->pdev->dev);
+
return 0;
}
diff --git a/drivers/staging/lustre/lustre/llite/namei.c b/drivers/staging/lustre/lustre/llite/namei.c
index 1bf891b..4f361b7 100644
--- a/drivers/staging/lustre/lustre/llite/namei.c
+++ b/drivers/staging/lustre/lustre/llite/namei.c
@@ -264,7 +264,7 @@
if ((bits & (MDS_INODELOCK_LOOKUP | MDS_INODELOCK_PERM)) &&
inode->i_sb->s_root != NULL &&
- is_root_inode(inode))
+ !is_root_inode(inode))
ll_invalidate_aliases(inode);
iput(inode);
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index ad09e51..f65f0d1 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -4,6 +4,8 @@
* Copyright (C) 2012 Samsung Electronics Co., Ltd(http://www.samsung.com)
* Copyright (C) 2012 Amit Daniel <amit.kachhap@linaro.org>
*
+ * Copyright (C) 2014 Viresh Kumar <viresh.kumar@linaro.org>
+ *
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -28,6 +30,20 @@
#include <linux/cpu.h>
#include <linux/cpu_cooling.h>
+/*
+ * Cooling state <-> CPUFreq frequency
+ *
+ * Cooling states are translated to frequencies throughout this driver and this
+ * is the relation between them.
+ *
+ * Highest cooling state corresponds to lowest possible frequency.
+ *
+ * i.e.
+ * level 0 --> 1st Max Freq
+ * level 1 --> 2nd Max Freq
+ * ...
+ */
+
/**
* struct cpufreq_cooling_device - data for cooling device with cpufreq
* @id: unique integer value corresponding to each cpufreq_cooling_device
@@ -38,25 +54,27 @@
* cooling devices.
* @cpufreq_val: integer value representing the absolute value of the clipped
* frequency.
+ * @max_level: maximum cooling level. One less than total number of valid
+ * cpufreq frequencies.
* @allowed_cpus: all the cpus involved for this cpufreq_cooling_device.
+ * @node: list_head to link all cpufreq_cooling_device together.
*
- * This structure is required for keeping information of each
- * cpufreq_cooling_device registered. In order to prevent corruption of this a
- * mutex lock cooling_cpufreq_lock is used.
+ * This structure is required for keeping information of each registered
+ * cpufreq_cooling_device.
*/
struct cpufreq_cooling_device {
int id;
struct thermal_cooling_device *cool_dev;
unsigned int cpufreq_state;
unsigned int cpufreq_val;
+ unsigned int max_level;
+ unsigned int *freq_table; /* In descending order */
struct cpumask allowed_cpus;
struct list_head node;
};
static DEFINE_IDR(cpufreq_idr);
static DEFINE_MUTEX(cooling_cpufreq_lock);
-static unsigned int cpufreq_dev_count;
-
static LIST_HEAD(cpufreq_dev_list);
/**
@@ -98,120 +116,30 @@
/* Below code defines functions to be used for cpufreq as cooling device */
/**
- * is_cpufreq_valid - function to check frequency transitioning capability.
- * @cpu: cpu for which check is needed.
+ * get_level: Find the level for a particular frequency
+ * @cpufreq_dev: cpufreq_dev for which the property is required
+ * @freq: Frequency
*
- * This function will check the current state of the system if
- * it is capable of changing the frequency for a given @cpu.
- *
- * Return: 0 if the system is not currently capable of changing
- * the frequency of given cpu. !0 in case the frequency is changeable.
+ * Return: level on success, THERMAL_CSTATE_INVALID on error.
*/
-static int is_cpufreq_valid(int cpu)
+static unsigned long get_level(struct cpufreq_cooling_device *cpufreq_dev,
+ unsigned int freq)
{
- struct cpufreq_policy policy;
+ unsigned long level;
- return !cpufreq_get_policy(&policy, cpu);
-}
+ for (level = 0; level <= cpufreq_dev->max_level; level++) {
+ if (freq == cpufreq_dev->freq_table[level])
+ return level;
-enum cpufreq_cooling_property {
- GET_LEVEL,
- GET_FREQ,
- GET_MAXL,
-};
-
-/**
- * get_property - fetch a property of interest for a give cpu.
- * @cpu: cpu for which the property is required
- * @input: query parameter
- * @output: query return
- * @property: type of query (frequency, level, max level)
- *
- * This is the common function to
- * 1. get maximum cpu cooling states
- * 2. translate frequency to cooling state
- * 3. translate cooling state to frequency
- * Note that the code may be not in good shape
- * but it is written in this way in order to:
- * a) reduce duplicate code as most of the code can be shared.
- * b) make sure the logic is consistent when translating between
- * cooling states and frequencies.
- *
- * Return: 0 on success, -EINVAL when invalid parameters are passed.
- */
-static int get_property(unsigned int cpu, unsigned long input,
- unsigned int *output,
- enum cpufreq_cooling_property property)
-{
- int i;
- unsigned long max_level = 0, level = 0;
- unsigned int freq = CPUFREQ_ENTRY_INVALID;
- int descend = -1;
- struct cpufreq_frequency_table *pos, *table =
- cpufreq_frequency_get_table(cpu);
-
- if (!output)
- return -EINVAL;
-
- if (!table)
- return -EINVAL;
-
- cpufreq_for_each_valid_entry(pos, table) {
- /* ignore duplicate entry */
- if (freq == pos->frequency)
- continue;
-
- /* get the frequency order */
- if (freq != CPUFREQ_ENTRY_INVALID && descend == -1)
- descend = freq > pos->frequency;
-
- freq = pos->frequency;
- max_level++;
+ if (freq > cpufreq_dev->freq_table[level])
+ break;
}
- /* No valid cpu frequency entry */
- if (max_level == 0)
- return -EINVAL;
-
- /* max_level is an index, not a counter */
- max_level--;
-
- /* get max level */
- if (property == GET_MAXL) {
- *output = (unsigned int)max_level;
- return 0;
- }
-
- if (property == GET_FREQ)
- level = descend ? input : (max_level - input);
-
- i = 0;
- cpufreq_for_each_valid_entry(pos, table) {
- /* ignore duplicate entry */
- if (freq == pos->frequency)
- continue;
-
- /* now we have a valid frequency entry */
- freq = pos->frequency;
-
- if (property == GET_LEVEL && (unsigned int)input == freq) {
- /* get level by frequency */
- *output = descend ? i : (max_level - i);
- return 0;
- }
- if (property == GET_FREQ && level == i) {
- /* get frequency by level */
- *output = freq;
- return 0;
- }
- i++;
- }
-
- return -EINVAL;
+ return THERMAL_CSTATE_INVALID;
}
/**
- * cpufreq_cooling_get_level - for a give cpu, return the cooling level.
+ * cpufreq_cooling_get_level - for a given cpu, return the cooling level.
* @cpu: cpu for which the level is required
* @freq: the frequency of interest
*
@@ -223,79 +151,23 @@
*/
unsigned long cpufreq_cooling_get_level(unsigned int cpu, unsigned int freq)
{
- unsigned int val;
+ struct cpufreq_cooling_device *cpufreq_dev;
- if (get_property(cpu, (unsigned long)freq, &val, GET_LEVEL))
- return THERMAL_CSTATE_INVALID;
+ mutex_lock(&cooling_cpufreq_lock);
+ list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
+ if (cpumask_test_cpu(cpu, &cpufreq_dev->allowed_cpus)) {
+ mutex_unlock(&cooling_cpufreq_lock);
+ return get_level(cpufreq_dev, freq);
+ }
+ }
+ mutex_unlock(&cooling_cpufreq_lock);
- return (unsigned long)val;
+ pr_err("%s: cpu:%d not part of any cooling device\n", __func__, cpu);
+ return THERMAL_CSTATE_INVALID;
}
EXPORT_SYMBOL_GPL(cpufreq_cooling_get_level);
/**
- * get_cpu_frequency - get the absolute value of frequency from level.
- * @cpu: cpu for which frequency is fetched.
- * @level: cooling level
- *
- * This function matches cooling level with frequency. Based on a cooling level
- * of frequency, equals cooling state of cpu cooling device, it will return
- * the corresponding frequency.
- * e.g level=0 --> 1st MAX FREQ, level=1 ---> 2nd MAX FREQ, .... etc
- *
- * Return: 0 on error, the corresponding frequency otherwise.
- */
-static unsigned int get_cpu_frequency(unsigned int cpu, unsigned long level)
-{
- int ret = 0;
- unsigned int freq;
-
- ret = get_property(cpu, level, &freq, GET_FREQ);
- if (ret)
- return 0;
-
- return freq;
-}
-
-/**
- * cpufreq_apply_cooling - function to apply frequency clipping.
- * @cpufreq_device: cpufreq_cooling_device pointer containing frequency
- * clipping data.
- * @cooling_state: value of the cooling state.
- *
- * Function used to make sure the cpufreq layer is aware of current thermal
- * limits. The limits are applied by updating the cpufreq policy.
- *
- * Return: 0 on success, an error code otherwise (-EINVAL in case wrong
- * cooling state).
- */
-static int cpufreq_apply_cooling(struct cpufreq_cooling_device *cpufreq_device,
- unsigned long cooling_state)
-{
- unsigned int cpuid, clip_freq;
- struct cpumask *mask = &cpufreq_device->allowed_cpus;
- unsigned int cpu = cpumask_any(mask);
-
-
- /* Check if the old cooling action is same as new cooling action */
- if (cpufreq_device->cpufreq_state == cooling_state)
- return 0;
-
- clip_freq = get_cpu_frequency(cpu, cooling_state);
- if (!clip_freq)
- return -EINVAL;
-
- cpufreq_device->cpufreq_state = cooling_state;
- cpufreq_device->cpufreq_val = clip_freq;
-
- for_each_cpu(cpuid, mask) {
- if (is_cpufreq_valid(cpuid))
- cpufreq_update_policy(cpuid);
- }
-
- return 0;
-}
-
-/**
* cpufreq_thermal_notifier - notifier callback for cpufreq policy change.
* @nb: struct notifier_block * with callback info.
* @event: value showing cpufreq event for which this function invoked.
@@ -323,11 +195,6 @@
&cpufreq_dev->allowed_cpus))
continue;
- if (!cpufreq_dev->cpufreq_val)
- cpufreq_dev->cpufreq_val = get_cpu_frequency(
- cpumask_any(&cpufreq_dev->allowed_cpus),
- cpufreq_dev->cpufreq_state);
-
max_freq = cpufreq_dev->cpufreq_val;
if (policy->max != max_freq)
@@ -354,19 +221,9 @@
unsigned long *state)
{
struct cpufreq_cooling_device *cpufreq_device = cdev->devdata;
- struct cpumask *mask = &cpufreq_device->allowed_cpus;
- unsigned int cpu;
- unsigned int count = 0;
- int ret;
- cpu = cpumask_any(mask);
-
- ret = get_property(cpu, 0, &count, GET_MAXL);
-
- if (count > 0)
- *state = count;
-
- return ret;
+ *state = cpufreq_device->max_level;
+ return 0;
}
/**
@@ -403,8 +260,24 @@
unsigned long state)
{
struct cpufreq_cooling_device *cpufreq_device = cdev->devdata;
+ unsigned int cpu = cpumask_any(&cpufreq_device->allowed_cpus);
+ unsigned int clip_freq;
- return cpufreq_apply_cooling(cpufreq_device, state);
+ /* Request state should be less than max_level */
+ if (WARN_ON(state > cpufreq_device->max_level))
+ return -EINVAL;
+
+ /* Check if the old cooling action is same as new cooling action */
+ if (cpufreq_device->cpufreq_state == state)
+ return 0;
+
+ clip_freq = cpufreq_device->freq_table[state];
+ cpufreq_device->cpufreq_state = state;
+ cpufreq_device->cpufreq_val = clip_freq;
+
+ cpufreq_update_policy(cpu);
+
+ return 0;
}
/* Bind cpufreq callbacks to thermal cooling device ops */
@@ -419,10 +292,25 @@
.notifier_call = cpufreq_thermal_notifier,
};
+static unsigned int find_next_max(struct cpufreq_frequency_table *table,
+ unsigned int prev_max)
+{
+ struct cpufreq_frequency_table *pos;
+ unsigned int max = 0;
+
+ cpufreq_for_each_valid_entry(pos, table) {
+ if (pos->frequency > max && pos->frequency < prev_max)
+ max = pos->frequency;
+ }
+
+ return max;
+}
+
/**
* __cpufreq_cooling_register - helper function to create cpufreq cooling device
* @np: a valid struct device_node to the cooling device device tree node
* @clip_cpus: cpumask of cpus where the frequency constraints will happen.
+ * Normally this should be same as cpufreq policy->related_cpus.
*
* This interface function registers the cpufreq cooling device with the name
* "thermal-cpufreq-%x". This api can support multiple instances of cpufreq
@@ -437,37 +325,42 @@
const struct cpumask *clip_cpus)
{
struct thermal_cooling_device *cool_dev;
- struct cpufreq_cooling_device *cpufreq_dev = NULL;
- unsigned int min = 0, max = 0;
+ struct cpufreq_cooling_device *cpufreq_dev;
char dev_name[THERMAL_NAME_LENGTH];
- int ret = 0, i;
- struct cpufreq_policy policy;
+ struct cpufreq_frequency_table *pos, *table;
+ unsigned int freq, i;
+ int ret;
- /* Verify that all the clip cpus have same freq_min, freq_max limit */
- for_each_cpu(i, clip_cpus) {
- /* continue if cpufreq policy not found and not return error */
- if (!cpufreq_get_policy(&policy, i))
- continue;
- if (min == 0 && max == 0) {
- min = policy.cpuinfo.min_freq;
- max = policy.cpuinfo.max_freq;
- } else {
- if (min != policy.cpuinfo.min_freq ||
- max != policy.cpuinfo.max_freq)
- return ERR_PTR(-EINVAL);
- }
+ table = cpufreq_frequency_get_table(cpumask_first(clip_cpus));
+ if (!table) {
+ pr_debug("%s: CPUFreq table not found\n", __func__);
+ return ERR_PTR(-EPROBE_DEFER);
}
- cpufreq_dev = kzalloc(sizeof(struct cpufreq_cooling_device),
- GFP_KERNEL);
+
+ cpufreq_dev = kzalloc(sizeof(*cpufreq_dev), GFP_KERNEL);
if (!cpufreq_dev)
return ERR_PTR(-ENOMEM);
+ /* Find max levels */
+ cpufreq_for_each_valid_entry(pos, table)
+ cpufreq_dev->max_level++;
+
+ cpufreq_dev->freq_table = kmalloc(sizeof(*cpufreq_dev->freq_table) *
+ cpufreq_dev->max_level, GFP_KERNEL);
+ if (!cpufreq_dev->freq_table) {
+ cool_dev = ERR_PTR(-ENOMEM);
+ goto free_cdev;
+ }
+
+ /* max_level is an index, not a counter */
+ cpufreq_dev->max_level--;
+
cpumask_copy(&cpufreq_dev->allowed_cpus, clip_cpus);
ret = get_idr(&cpufreq_idr, &cpufreq_dev->id);
if (ret) {
- kfree(cpufreq_dev);
- return ERR_PTR(-EINVAL);
+ cool_dev = ERR_PTR(ret);
+ goto free_table;
}
snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d",
@@ -475,25 +368,44 @@
cool_dev = thermal_of_cooling_device_register(np, dev_name, cpufreq_dev,
&cpufreq_cooling_ops);
- if (IS_ERR(cool_dev)) {
- release_idr(&cpufreq_idr, cpufreq_dev->id);
- kfree(cpufreq_dev);
- return cool_dev;
+ if (IS_ERR(cool_dev))
+ goto remove_idr;
+
+ /* Fill freq-table in descending order of frequencies */
+ for (i = 0, freq = -1; i <= cpufreq_dev->max_level; i++) {
+ freq = find_next_max(table, freq);
+ cpufreq_dev->freq_table[i] = freq;
+
+ /* Warn for duplicate entries */
+ if (!freq)
+ pr_warn("%s: table has duplicate entries\n", __func__);
+ else
+ pr_debug("%s: freq:%u KHz\n", __func__, freq);
}
+
+ cpufreq_dev->cpufreq_val = cpufreq_dev->freq_table[0];
cpufreq_dev->cool_dev = cool_dev;
- cpufreq_dev->cpufreq_state = 0;
+
mutex_lock(&cooling_cpufreq_lock);
/* Register the notifier for first cpufreq cooling device */
- if (cpufreq_dev_count == 0)
+ if (list_empty(&cpufreq_dev_list))
cpufreq_register_notifier(&thermal_cpufreq_notifier_block,
CPUFREQ_POLICY_NOTIFIER);
- cpufreq_dev_count++;
list_add(&cpufreq_dev->node, &cpufreq_dev_list);
mutex_unlock(&cooling_cpufreq_lock);
return cool_dev;
+
+remove_idr:
+ release_idr(&cpufreq_idr, cpufreq_dev->id);
+free_table:
+ kfree(cpufreq_dev->freq_table);
+free_cdev:
+ kfree(cpufreq_dev);
+
+ return cool_dev;
}
/**
@@ -554,16 +466,16 @@
cpufreq_dev = cdev->devdata;
mutex_lock(&cooling_cpufreq_lock);
list_del(&cpufreq_dev->node);
- cpufreq_dev_count--;
/* Unregister the notifier for the last cpufreq cooling device */
- if (cpufreq_dev_count == 0)
+ if (list_empty(&cpufreq_dev_list))
cpufreq_unregister_notifier(&thermal_cpufreq_notifier_block,
CPUFREQ_POLICY_NOTIFIER);
mutex_unlock(&cooling_cpufreq_lock);
thermal_cooling_device_unregister(cpufreq_dev->cool_dev);
release_idr(&cpufreq_idr, cpufreq_dev->id);
+ kfree(cpufreq_dev->freq_table);
kfree(cpufreq_dev);
}
EXPORT_SYMBOL_GPL(cpufreq_cooling_unregister);
diff --git a/drivers/thermal/db8500_cpufreq_cooling.c b/drivers/thermal/db8500_cpufreq_cooling.c
index 000d53e..607b62c 100644
--- a/drivers/thermal/db8500_cpufreq_cooling.c
+++ b/drivers/thermal/db8500_cpufreq_cooling.c
@@ -18,7 +18,6 @@
*/
#include <linux/cpu_cooling.h>
-#include <linux/cpufreq.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -28,18 +27,17 @@
static int db8500_cpufreq_cooling_probe(struct platform_device *pdev)
{
struct thermal_cooling_device *cdev;
- struct cpumask mask_val;
- /* make sure cpufreq driver has been initialized */
- if (!cpufreq_frequency_get_table(0))
- return -EPROBE_DEFER;
-
- cpumask_set_cpu(0, &mask_val);
- cdev = cpufreq_cooling_register(&mask_val);
-
+ cdev = cpufreq_cooling_register(cpu_present_mask);
if (IS_ERR(cdev)) {
- dev_err(&pdev->dev, "Failed to register cooling device\n");
- return PTR_ERR(cdev);
+ int ret = PTR_ERR(cdev);
+
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "Failed to register cooling device %d\n",
+ ret);
+
+ return ret;
}
platform_set_drvdata(pdev, cdev);
diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c
index 88b32f9..c1188ac 100644
--- a/drivers/thermal/imx_thermal.c
+++ b/drivers/thermal/imx_thermal.c
@@ -9,7 +9,6 @@
#include <linux/clk.h>
#include <linux/cpu_cooling.h>
-#include <linux/cpufreq.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/init.h>
@@ -454,15 +453,10 @@
const struct of_device_id *of_id =
of_match_device(of_imx_thermal_match, &pdev->dev);
struct imx_thermal_data *data;
- struct cpumask clip_cpus;
struct regmap *map;
int measure_freq;
int ret;
- if (!cpufreq_get_current_driver()) {
- dev_dbg(&pdev->dev, "no cpufreq driver!");
- return -EPROBE_DEFER;
- }
data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -516,12 +510,13 @@
regmap_write(map, MISC0 + REG_SET, MISC0_REFTOP_SELBIASOFF);
regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_POWER_DOWN);
- cpumask_set_cpu(0, &clip_cpus);
- data->cdev = cpufreq_cooling_register(&clip_cpus);
+ data->cdev = cpufreq_cooling_register(cpu_present_mask);
if (IS_ERR(data->cdev)) {
ret = PTR_ERR(data->cdev);
- dev_err(&pdev->dev,
- "failed to register cpufreq cooling device: %d\n", ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "failed to register cpufreq cooling device: %d\n",
+ ret);
return ret;
}
diff --git a/drivers/thermal/int340x_thermal/Makefile b/drivers/thermal/int340x_thermal/Makefile
index ffe40bf..d441369 100644
--- a/drivers/thermal/int340x_thermal/Makefile
+++ b/drivers/thermal/int340x_thermal/Makefile
@@ -1,4 +1,5 @@
obj-$(CONFIG_INT340X_THERMAL) += int3400_thermal.o
obj-$(CONFIG_INT340X_THERMAL) += int3402_thermal.o
obj-$(CONFIG_INT340X_THERMAL) += int3403_thermal.o
+obj-$(CONFIG_INT340X_THERMAL) += processor_thermal_device.o
obj-$(CONFIG_ACPI_THERMAL_REL) += acpi_thermal_rel.o
diff --git a/drivers/thermal/int340x_thermal/acpi_thermal_rel.c b/drivers/thermal/int340x_thermal/acpi_thermal_rel.c
index e4e61b3..231cabc 100644
--- a/drivers/thermal/int340x_thermal/acpi_thermal_rel.c
+++ b/drivers/thermal/int340x_thermal/acpi_thermal_rel.c
@@ -82,7 +82,7 @@
struct acpi_buffer trt_format = { sizeof("RRNNNNNN"), "RRNNNNNN" };
if (!acpi_has_method(handle, "_TRT"))
- return 0;
+ return -ENODEV;
status = acpi_evaluate_object(handle, "_TRT", NULL, &buffer);
if (ACPI_FAILURE(status))
@@ -167,7 +167,7 @@
sizeof("RRNNNNNNNNNNN"), "RRNNNNNNNNNNN" };
if (!acpi_has_method(handle, "_ART"))
- return 0;
+ return -ENODEV;
status = acpi_evaluate_object(handle, "_ART", NULL, &buffer);
if (ACPI_FAILURE(status))
@@ -321,8 +321,8 @@
unsigned long length = 0;
int count = 0;
char __user *arg = (void __user *)__arg;
- struct trt *trts;
- struct art *arts;
+ struct trt *trts = NULL;
+ struct art *arts = NULL;
switch (cmd) {
case ACPI_THERMAL_GET_TRT_COUNT:
diff --git a/drivers/thermal/int340x_thermal/int3400_thermal.c b/drivers/thermal/int340x_thermal/int3400_thermal.c
index dcb306e..65a98a9 100644
--- a/drivers/thermal/int340x_thermal/int3400_thermal.c
+++ b/drivers/thermal/int340x_thermal/int3400_thermal.c
@@ -335,7 +335,6 @@
.remove = int3400_thermal_remove,
.driver = {
.name = "int3400 thermal",
- .owner = THIS_MODULE,
.acpi_match_table = ACPI_PTR(int3400_thermal_match),
},
};
diff --git a/drivers/thermal/int340x_thermal/int3402_thermal.c b/drivers/thermal/int340x_thermal/int3402_thermal.c
index a5d08c1..c5cbc3a 100644
--- a/drivers/thermal/int340x_thermal/int3402_thermal.c
+++ b/drivers/thermal/int340x_thermal/int3402_thermal.c
@@ -231,7 +231,6 @@
.remove = int3402_thermal_remove,
.driver = {
.name = "int3402 thermal",
- .owner = THIS_MODULE,
.acpi_match_table = int3402_thermal_match,
},
};
diff --git a/drivers/thermal/int340x_thermal/int3403_thermal.c b/drivers/thermal/int340x_thermal/int3403_thermal.c
index 1bfa6a6..0faf500 100644
--- a/drivers/thermal/int340x_thermal/int3403_thermal.c
+++ b/drivers/thermal/int340x_thermal/int3403_thermal.c
@@ -301,6 +301,8 @@
{
struct int3403_sensor *obj = priv->priv;
+ acpi_remove_notify_handler(priv->adev->handle,
+ ACPI_DEVICE_NOTIFY, int3403_notify);
thermal_zone_device_unregister(obj->tzone);
return 0;
}
@@ -369,6 +371,7 @@
p = buf.pointer;
if (!p || (p->type != ACPI_TYPE_PACKAGE)) {
printk(KERN_WARNING "Invalid PPSS data\n");
+ kfree(buf.pointer);
return -EFAULT;
}
@@ -381,6 +384,7 @@
priv->priv = obj;
+ kfree(buf.pointer);
/* TODO: add ACPI notification support */
return result;
diff --git a/drivers/thermal/int340x_thermal/processor_thermal_device.c b/drivers/thermal/int340x_thermal/processor_thermal_device.c
new file mode 100644
index 0000000..31bb553
--- /dev/null
+++ b/drivers/thermal/int340x_thermal/processor_thermal_device.c
@@ -0,0 +1,309 @@
+/*
+ * processor_thermal_device.c
+ * Copyright (c) 2014, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/acpi.h>
+
+/* Broadwell-U/HSB thermal reporting device */
+#define PCI_DEVICE_ID_PROC_BDW_THERMAL 0x1603
+#define PCI_DEVICE_ID_PROC_HSB_THERMAL 0x0A03
+
+/* Braswell thermal reporting device */
+#define PCI_DEVICE_ID_PROC_BSW_THERMAL 0x22DC
+
+struct power_config {
+ u32 index;
+ u32 min_uw;
+ u32 max_uw;
+ u32 tmin_us;
+ u32 tmax_us;
+ u32 step_uw;
+};
+
+struct proc_thermal_device {
+ struct device *dev;
+ struct acpi_device *adev;
+ struct power_config power_limits[2];
+};
+
+enum proc_thermal_emum_mode_type {
+ PROC_THERMAL_NONE,
+ PROC_THERMAL_PCI,
+ PROC_THERMAL_PLATFORM_DEV
+};
+
+/*
+ * We can have only one type of enumeration, PCI or Platform,
+ * not both. So we don't need instance specific data.
+ */
+static enum proc_thermal_emum_mode_type proc_thermal_emum_mode =
+ PROC_THERMAL_NONE;
+
+#define POWER_LIMIT_SHOW(index, suffix) \
+static ssize_t power_limit_##index##_##suffix##_show(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct pci_dev *pci_dev; \
+ struct platform_device *pdev; \
+ struct proc_thermal_device *proc_dev; \
+\
+ if (proc_thermal_emum_mode == PROC_THERMAL_PLATFORM_DEV) { \
+ pdev = to_platform_device(dev); \
+ proc_dev = platform_get_drvdata(pdev); \
+ } else { \
+ pci_dev = to_pci_dev(dev); \
+ proc_dev = pci_get_drvdata(pci_dev); \
+ } \
+ return sprintf(buf, "%lu\n",\
+ (unsigned long)proc_dev->power_limits[index].suffix * 1000); \
+}
+
+POWER_LIMIT_SHOW(0, min_uw)
+POWER_LIMIT_SHOW(0, max_uw)
+POWER_LIMIT_SHOW(0, step_uw)
+POWER_LIMIT_SHOW(0, tmin_us)
+POWER_LIMIT_SHOW(0, tmax_us)
+
+POWER_LIMIT_SHOW(1, min_uw)
+POWER_LIMIT_SHOW(1, max_uw)
+POWER_LIMIT_SHOW(1, step_uw)
+POWER_LIMIT_SHOW(1, tmin_us)
+POWER_LIMIT_SHOW(1, tmax_us)
+
+static DEVICE_ATTR_RO(power_limit_0_min_uw);
+static DEVICE_ATTR_RO(power_limit_0_max_uw);
+static DEVICE_ATTR_RO(power_limit_0_step_uw);
+static DEVICE_ATTR_RO(power_limit_0_tmin_us);
+static DEVICE_ATTR_RO(power_limit_0_tmax_us);
+
+static DEVICE_ATTR_RO(power_limit_1_min_uw);
+static DEVICE_ATTR_RO(power_limit_1_max_uw);
+static DEVICE_ATTR_RO(power_limit_1_step_uw);
+static DEVICE_ATTR_RO(power_limit_1_tmin_us);
+static DEVICE_ATTR_RO(power_limit_1_tmax_us);
+
+static struct attribute *power_limit_attrs[] = {
+ &dev_attr_power_limit_0_min_uw.attr,
+ &dev_attr_power_limit_1_min_uw.attr,
+ &dev_attr_power_limit_0_max_uw.attr,
+ &dev_attr_power_limit_1_max_uw.attr,
+ &dev_attr_power_limit_0_step_uw.attr,
+ &dev_attr_power_limit_1_step_uw.attr,
+ &dev_attr_power_limit_0_tmin_us.attr,
+ &dev_attr_power_limit_1_tmin_us.attr,
+ &dev_attr_power_limit_0_tmax_us.attr,
+ &dev_attr_power_limit_1_tmax_us.attr,
+ NULL
+};
+
+static struct attribute_group power_limit_attribute_group = {
+ .attrs = power_limit_attrs,
+ .name = "power_limits"
+};
+
+static int proc_thermal_add(struct device *dev,
+ struct proc_thermal_device **priv)
+{
+ struct proc_thermal_device *proc_priv;
+ struct acpi_device *adev;
+ acpi_status status;
+ struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *elements, *ppcc;
+ union acpi_object *p;
+ int i;
+ int ret;
+
+ adev = ACPI_COMPANION(dev);
+
+ status = acpi_evaluate_object(adev->handle, "PPCC", NULL, &buf);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ p = buf.pointer;
+ if (!p || (p->type != ACPI_TYPE_PACKAGE)) {
+ dev_err(dev, "Invalid PPCC data\n");
+ ret = -EFAULT;
+ goto free_buffer;
+ }
+ if (!p->package.count) {
+ dev_err(dev, "Invalid PPCC package size\n");
+ ret = -EFAULT;
+ goto free_buffer;
+ }
+
+ proc_priv = devm_kzalloc(dev, sizeof(*proc_priv), GFP_KERNEL);
+ if (!proc_priv) {
+ ret = -ENOMEM;
+ goto free_buffer;
+ }
+
+ proc_priv->dev = dev;
+ proc_priv->adev = adev;
+
+ for (i = 0; i < min((int)p->package.count - 1, 2); ++i) {
+ elements = &(p->package.elements[i+1]);
+ if (elements->type != ACPI_TYPE_PACKAGE ||
+ elements->package.count != 6) {
+ ret = -EFAULT;
+ goto free_buffer;
+ }
+ ppcc = elements->package.elements;
+ proc_priv->power_limits[i].index = ppcc[0].integer.value;
+ proc_priv->power_limits[i].min_uw = ppcc[1].integer.value;
+ proc_priv->power_limits[i].max_uw = ppcc[2].integer.value;
+ proc_priv->power_limits[i].tmin_us = ppcc[3].integer.value;
+ proc_priv->power_limits[i].tmax_us = ppcc[4].integer.value;
+ proc_priv->power_limits[i].step_uw = ppcc[5].integer.value;
+ }
+
+ *priv = proc_priv;
+
+ ret = sysfs_create_group(&dev->kobj,
+ &power_limit_attribute_group);
+
+free_buffer:
+ kfree(buf.pointer);
+
+ return ret;
+}
+
+void proc_thermal_remove(struct proc_thermal_device *proc_priv)
+{
+ sysfs_remove_group(&proc_priv->dev->kobj,
+ &power_limit_attribute_group);
+}
+
+static int int3401_add(struct platform_device *pdev)
+{
+ struct proc_thermal_device *proc_priv;
+ int ret;
+
+ if (proc_thermal_emum_mode == PROC_THERMAL_PCI) {
+ dev_err(&pdev->dev, "error: enumerated as PCI dev\n");
+ return -ENODEV;
+ }
+
+ ret = proc_thermal_add(&pdev->dev, &proc_priv);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, proc_priv);
+ proc_thermal_emum_mode = PROC_THERMAL_PLATFORM_DEV;
+
+ return 0;
+}
+
+static int int3401_remove(struct platform_device *pdev)
+{
+ proc_thermal_remove(platform_get_drvdata(pdev));
+
+ return 0;
+}
+
+static int proc_thermal_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *unused)
+{
+ struct proc_thermal_device *proc_priv;
+ int ret;
+
+ if (proc_thermal_emum_mode == PROC_THERMAL_PLATFORM_DEV) {
+ dev_err(&pdev->dev, "error: enumerated as platform dev\n");
+ return -ENODEV;
+ }
+
+ ret = pci_enable_device(pdev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "error: could not enable device\n");
+ return ret;
+ }
+
+ ret = proc_thermal_add(&pdev->dev, &proc_priv);
+ if (ret) {
+ pci_disable_device(pdev);
+ return ret;
+ }
+
+ pci_set_drvdata(pdev, proc_priv);
+ proc_thermal_emum_mode = PROC_THERMAL_PCI;
+
+ return 0;
+}
+
+static void proc_thermal_pci_remove(struct pci_dev *pdev)
+{
+ proc_thermal_remove(pci_get_drvdata(pdev));
+ pci_disable_device(pdev);
+}
+
+static const struct pci_device_id proc_thermal_pci_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_BDW_THERMAL)},
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_HSB_THERMAL)},
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_BSW_THERMAL)},
+ { 0, },
+};
+
+MODULE_DEVICE_TABLE(pci, proc_thermal_pci_ids);
+
+static struct pci_driver proc_thermal_pci_driver = {
+ .name = "proc_thermal",
+ .probe = proc_thermal_pci_probe,
+ .remove = proc_thermal_pci_remove,
+ .id_table = proc_thermal_pci_ids,
+};
+
+static const struct acpi_device_id int3401_device_ids[] = {
+ {"INT3401", 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, int3401_device_ids);
+
+static struct platform_driver int3401_driver = {
+ .probe = int3401_add,
+ .remove = int3401_remove,
+ .driver = {
+ .name = "int3401 thermal",
+ .acpi_match_table = int3401_device_ids,
+ },
+};
+
+static int __init proc_thermal_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&int3401_driver);
+ if (ret)
+ return ret;
+
+ ret = pci_register_driver(&proc_thermal_pci_driver);
+
+ return ret;
+}
+
+static void __exit proc_thermal_exit(void)
+{
+ platform_driver_unregister(&int3401_driver);
+ pci_unregister_driver(&proc_thermal_pci_driver);
+}
+
+module_init(proc_thermal_init);
+module_exit(proc_thermal_exit);
+
+MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>");
+MODULE_DESCRIPTION("Processor Thermal Reporting Device Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c
index e98b424..6ceebd6 100644
--- a/drivers/thermal/intel_powerclamp.c
+++ b/drivers/thermal/intel_powerclamp.c
@@ -688,6 +688,7 @@
{ X86_VENDOR_INTEL, 6, 0x45},
{ X86_VENDOR_INTEL, 6, 0x46},
{ X86_VENDOR_INTEL, 6, 0x4c},
+ { X86_VENDOR_INTEL, 6, 0x56},
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_powerclamp_ids);
diff --git a/drivers/thermal/rockchip_thermal.c b/drivers/thermal/rockchip_thermal.c
index 1bcddfc..9c6ce54 100644
--- a/drivers/thermal/rockchip_thermal.c
+++ b/drivers/thermal/rockchip_thermal.c
@@ -677,7 +677,6 @@
static struct platform_driver rockchip_thermal_driver = {
.driver = {
.name = "rockchip-thermal",
- .owner = THIS_MODULE,
.pm = &rockchip_thermal_pm_ops,
.of_match_table = of_rockchip_thermal_match,
},
diff --git a/drivers/thermal/samsung/Kconfig b/drivers/thermal/samsung/Kconfig
index f760389..c43306e 100644
--- a/drivers/thermal/samsung/Kconfig
+++ b/drivers/thermal/samsung/Kconfig
@@ -1,6 +1,6 @@
config EXYNOS_THERMAL
tristate "Exynos thermal management unit driver"
- depends on ARCH_HAS_BANDGAP && OF
+ depends on OF
help
If you say yes here you get support for the TMU (Thermal Management
Unit) driver for SAMSUNG EXYNOS series of SoCs. This driver initialises
diff --git a/drivers/thermal/samsung/exynos_thermal_common.c b/drivers/thermal/samsung/exynos_thermal_common.c
index b6be572..6dc3815 100644
--- a/drivers/thermal/samsung/exynos_thermal_common.c
+++ b/drivers/thermal/samsung/exynos_thermal_common.c
@@ -347,7 +347,6 @@
int exynos_register_thermal(struct thermal_sensor_conf *sensor_conf)
{
int ret;
- struct cpumask mask_val;
struct exynos_thermal_zone *th_zone;
if (!sensor_conf || !sensor_conf->read_temperature) {
@@ -367,13 +366,14 @@
* sensor
*/
if (sensor_conf->cooling_data.freq_clip_count > 0) {
- cpumask_set_cpu(0, &mask_val);
th_zone->cool_dev[th_zone->cool_dev_size] =
- cpufreq_cooling_register(&mask_val);
+ cpufreq_cooling_register(cpu_present_mask);
if (IS_ERR(th_zone->cool_dev[th_zone->cool_dev_size])) {
- dev_err(sensor_conf->dev,
- "Failed to register cpufreq cooling device\n");
- ret = -EINVAL;
+ ret = PTR_ERR(th_zone->cool_dev[th_zone->cool_dev_size]);
+ if (ret != -EPROBE_DEFER)
+ dev_err(sensor_conf->dev,
+ "Failed to register cpufreq cooling device: %d\n",
+ ret);
goto err_unregister;
}
th_zone->cool_dev_size++;
diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c
index d44d91d..d2f1e62 100644
--- a/drivers/thermal/samsung/exynos_tmu.c
+++ b/drivers/thermal/samsung/exynos_tmu.c
@@ -927,7 +927,10 @@
/* Register the sensor with thermal management interface */
ret = exynos_register_thermal(sensor_conf);
if (ret) {
- dev_err(&pdev->dev, "Failed to register thermal interface\n");
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "Failed to register thermal interface: %d\n",
+ ret);
goto err_clk;
}
data->reg_conf = sensor_conf;
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 84fdf07..87e0b07 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -930,7 +930,7 @@
struct thermal_zone_device *pos1;
struct thermal_cooling_device *pos2;
unsigned long max_state;
- int result;
+ int result, ret;
if (trip >= tz->trips || (trip < 0 && trip != THERMAL_TRIPS_NONE))
return -EINVAL;
@@ -947,7 +947,9 @@
if (tz != pos1 || cdev != pos2)
return -EINVAL;
- cdev->ops->get_max_state(cdev, &max_state);
+ ret = cdev->ops->get_max_state(cdev, &max_state);
+ if (ret)
+ return ret;
/* lower default 0, upper default max_state */
lower = lower == THERMAL_NO_LIMIT ? 0 : lower;
diff --git a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
index 5fd0386..3fb054a 100644
--- a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
+++ b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
@@ -28,7 +28,6 @@
#include <linux/kernel.h>
#include <linux/workqueue.h>
#include <linux/thermal.h>
-#include <linux/cpufreq.h>
#include <linux/cpumask.h>
#include <linux/cpu_cooling.h>
#include <linux/of.h>
@@ -407,17 +406,17 @@
if (!data)
return -EINVAL;
- if (!cpufreq_get_current_driver()) {
- dev_dbg(bgp->dev, "no cpufreq driver yet\n");
- return -EPROBE_DEFER;
- }
-
/* Register cooling device */
data->cool_dev = cpufreq_cooling_register(cpu_present_mask);
if (IS_ERR(data->cool_dev)) {
- dev_err(bgp->dev,
- "Failed to register cpufreq cooling device\n");
- return PTR_ERR(data->cool_dev);
+ int ret = PTR_ERR(data->cool_dev);
+
+ if (ret != -EPROBE_DEFER)
+ dev_err(bgp->dev,
+ "Failed to register cpu cooling device %d\n",
+ ret);
+
+ return ret;
}
ti_bandgap_set_sensor_data(bgp, id, data);
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index ed71b53..cb807d0 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -713,9 +713,13 @@
r = -EFAULT;
break;
}
- if ((a.avail_user_addr & (sizeof *vq->avail->ring - 1)) ||
- (a.used_user_addr & (sizeof *vq->used->ring - 1)) ||
- (a.log_guest_addr & (sizeof *vq->used->ring - 1))) {
+
+ /* Make sure it's safe to cast pointers to vring types. */
+ BUILD_BUG_ON(__alignof__ *vq->avail > VRING_AVAIL_ALIGN_SIZE);
+ BUILD_BUG_ON(__alignof__ *vq->used > VRING_USED_ALIGN_SIZE);
+ if ((a.avail_user_addr & (VRING_AVAIL_ALIGN_SIZE - 1)) ||
+ (a.used_user_addr & (VRING_USED_ALIGN_SIZE - 1)) ||
+ (a.log_guest_addr & (sizeof(u64) - 1))) {
r = -EINVAL;
break;
}
diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c
index 900aa4e..d6cab1f 100644
--- a/drivers/video/fbdev/core/fb_defio.c
+++ b/drivers/video/fbdev/core/fb_defio.c
@@ -83,9 +83,10 @@
cancel_delayed_work_sync(&info->deferred_work);
/* Run it immediately */
- err = schedule_delayed_work(&info->deferred_work, 0);
+ schedule_delayed_work(&info->deferred_work, 0);
mutex_unlock(&inode->i_mutex);
- return err;
+
+ return 0;
}
EXPORT_SYMBOL_GPL(fb_deferred_io_fsync);
diff --git a/drivers/video/fbdev/omap2/dss/hdmi_pll.c b/drivers/video/fbdev/omap2/dss/hdmi_pll.c
index 87accdb..ac83ef5 100644
--- a/drivers/video/fbdev/omap2/dss/hdmi_pll.c
+++ b/drivers/video/fbdev/omap2/dss/hdmi_pll.c
@@ -132,7 +132,6 @@
.mX_max = 127,
.fint_min = 500000,
.fint_max = 2500000,
- .clkdco_max = 1800000000,
.clkdco_min = 500000000,
.clkdco_low = 1000000000,
@@ -156,7 +155,6 @@
.mX_max = 127,
.fint_min = 620000,
.fint_max = 2500000,
- .clkdco_max = 1800000000,
.clkdco_min = 750000000,
.clkdco_low = 1500000000,
diff --git a/drivers/video/fbdev/omap2/dss/pll.c b/drivers/video/fbdev/omap2/dss/pll.c
index 50bc62c5..335ffac 100644
--- a/drivers/video/fbdev/omap2/dss/pll.c
+++ b/drivers/video/fbdev/omap2/dss/pll.c
@@ -97,7 +97,8 @@
return 0;
err_enable:
- regulator_disable(pll->regulator);
+ if (pll->regulator)
+ regulator_disable(pll->regulator);
err_reg:
clk_disable_unprepare(pll->clkin);
return r;
diff --git a/drivers/video/fbdev/omap2/dss/sdi.c b/drivers/video/fbdev/omap2/dss/sdi.c
index d51a9830..5c2ccab 100644
--- a/drivers/video/fbdev/omap2/dss/sdi.c
+++ b/drivers/video/fbdev/omap2/dss/sdi.c
@@ -342,6 +342,8 @@
out->output_type = OMAP_DISPLAY_TYPE_SDI;
out->name = "sdi.0";
out->dispc_channel = OMAP_DSS_CHANNEL_LCD;
+ /* We have SDI only on OMAP3, where it's on port 1 */
+ out->port_num = 1;
out->ops.sdi = &sdi_ops;
out->owner = THIS_MODULE;
diff --git a/drivers/video/logo/logo.c b/drivers/video/logo/logo.c
index 940cd19..10fbfd8 100644
--- a/drivers/video/logo/logo.c
+++ b/drivers/video/logo/logo.c
@@ -21,6 +21,21 @@
module_param(nologo, bool, 0);
MODULE_PARM_DESC(nologo, "Disables startup logo");
+/*
+ * Logos are located in the initdata, and will be freed in kernel_init.
+ * Use late_init to mark the logos as freed to prevent any further use.
+ */
+
+static bool logos_freed;
+
+static int __init fb_logo_late_init(void)
+{
+ logos_freed = true;
+ return 0;
+}
+
+late_initcall(fb_logo_late_init);
+
/* logo's are marked __initdata. Use __init_refok to tell
* modpost that it is intended that this function uses data
* marked __initdata.
@@ -29,7 +44,7 @@
{
const struct linux_logo *logo = NULL;
- if (nologo)
+ if (nologo || logos_freed)
return NULL;
if (depth >= 1) {
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 6e13911..22b289a 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -661,16 +661,16 @@
server->ops->set_credits(server, val);
}
-static inline __u64
+static inline __le64
get_next_mid64(struct TCP_Server_Info *server)
{
- return server->ops->get_next_mid(server);
+ return cpu_to_le64(server->ops->get_next_mid(server));
}
static inline __le16
get_next_mid(struct TCP_Server_Info *server)
{
- __u16 mid = get_next_mid64(server);
+ __u16 mid = server->ops->get_next_mid(server);
/*
* The value in the SMB header should be little endian for easy
* on-the-wire decoding.
diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c
index b333ff6..abae6dd 100644
--- a/fs/cifs/netmisc.c
+++ b/fs/cifs/netmisc.c
@@ -926,6 +926,7 @@
/* Subtract the NTFS time offset, then convert to 1s intervals. */
s64 t = le64_to_cpu(ntutc) - NTFS_TIME_OFFSET;
+ u64 abs_t;
/*
* Unfortunately can not use normal 64 bit division on 32 bit arch, but
@@ -933,13 +934,14 @@
* to special case them
*/
if (t < 0) {
- t = -t;
- ts.tv_nsec = (long)(do_div(t, 10000000) * 100);
+ abs_t = -t;
+ ts.tv_nsec = (long)(do_div(abs_t, 10000000) * 100);
ts.tv_nsec = -ts.tv_nsec;
- ts.tv_sec = -t;
+ ts.tv_sec = -abs_t;
} else {
- ts.tv_nsec = (long)do_div(t, 10000000) * 100;
- ts.tv_sec = t;
+ abs_t = t;
+ ts.tv_nsec = (long)do_div(abs_t, 10000000) * 100;
+ ts.tv_sec = abs_t;
}
return ts;
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index 8eaf20a..c295338 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -69,7 +69,8 @@
* Attempt to preload the dcache with the results from the FIND_FIRST/NEXT
*
* Find the dentry that matches "name". If there isn't one, create one. If it's
- * a negative dentry or the uniqueid changed, then drop it and recreate it.
+ * a negative dentry or the uniqueid or filetype(mode) changed,
+ * then drop it and recreate it.
*/
static void
cifs_prime_dcache(struct dentry *parent, struct qstr *name,
@@ -97,8 +98,11 @@
if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM))
fattr->cf_uniqueid = CIFS_I(inode)->uniqueid;
- /* update inode in place if i_ino didn't change */
- if (CIFS_I(inode)->uniqueid == fattr->cf_uniqueid) {
+ /* update inode in place
+ * if both i_ino and i_mode didn't change */
+ if (CIFS_I(inode)->uniqueid == fattr->cf_uniqueid &&
+ (inode->i_mode & S_IFMT) ==
+ (fattr->cf_mode & S_IFMT)) {
cifs_fattr_to_inode(inode, fattr);
goto out;
}
diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
index f1cefc9..689f035 100644
--- a/fs/cifs/smb2misc.c
+++ b/fs/cifs/smb2misc.c
@@ -32,12 +32,14 @@
static int
check_smb2_hdr(struct smb2_hdr *hdr, __u64 mid)
{
+ __u64 wire_mid = le64_to_cpu(hdr->MessageId);
+
/*
* Make sure that this really is an SMB, that it is a response,
* and that the message ids match.
*/
if ((*(__le32 *)hdr->ProtocolId == SMB2_PROTO_NUMBER) &&
- (mid == hdr->MessageId)) {
+ (mid == wire_mid)) {
if (hdr->Flags & SMB2_FLAGS_SERVER_TO_REDIR)
return 0;
else {
@@ -51,11 +53,11 @@
if (*(__le32 *)hdr->ProtocolId != SMB2_PROTO_NUMBER)
cifs_dbg(VFS, "Bad protocol string signature header %x\n",
*(unsigned int *) hdr->ProtocolId);
- if (mid != hdr->MessageId)
+ if (mid != wire_mid)
cifs_dbg(VFS, "Mids do not match: %llu and %llu\n",
- mid, hdr->MessageId);
+ mid, wire_mid);
}
- cifs_dbg(VFS, "Bad SMB detected. The Mid=%llu\n", hdr->MessageId);
+ cifs_dbg(VFS, "Bad SMB detected. The Mid=%llu\n", wire_mid);
return 1;
}
@@ -95,7 +97,7 @@
{
struct smb2_hdr *hdr = (struct smb2_hdr *)buf;
struct smb2_pdu *pdu = (struct smb2_pdu *)hdr;
- __u64 mid = hdr->MessageId;
+ __u64 mid = le64_to_cpu(hdr->MessageId);
__u32 len = get_rfc1002_length(buf);
__u32 clc_len; /* calculated length */
int command;
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index 93fd058..96b5d40 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -176,10 +176,11 @@
{
struct mid_q_entry *mid;
struct smb2_hdr *hdr = (struct smb2_hdr *)buf;
+ __u64 wire_mid = le64_to_cpu(hdr->MessageId);
spin_lock(&GlobalMid_Lock);
list_for_each_entry(mid, &server->pending_mid_q, qhead) {
- if ((mid->mid == hdr->MessageId) &&
+ if ((mid->mid == wire_mid) &&
(mid->mid_state == MID_REQUEST_SUBMITTED) &&
(mid->command == hdr->Command)) {
spin_unlock(&GlobalMid_Lock);
diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
index ce85847..70867d5 100644
--- a/fs/cifs/smb2pdu.h
+++ b/fs/cifs/smb2pdu.h
@@ -110,7 +110,7 @@
__le16 CreditRequest; /* CreditResponse */
__le32 Flags;
__le32 NextCommand;
- __u64 MessageId; /* opaque - so can stay little endian */
+ __le64 MessageId;
__le32 ProcessId;
__u32 TreeId; /* opaque - so do not make little endian */
__u64 SessionId; /* opaque - so do not make little endian */
diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c
index 5111e72..d4c5b6f 100644
--- a/fs/cifs/smb2transport.c
+++ b/fs/cifs/smb2transport.c
@@ -490,7 +490,7 @@
return temp;
else {
memset(temp, 0, sizeof(struct mid_q_entry));
- temp->mid = smb_buffer->MessageId; /* always LE */
+ temp->mid = le64_to_cpu(smb_buffer->MessageId);
temp->pid = current->pid;
temp->command = smb_buffer->Command; /* Always LE */
temp->when_alloc = jiffies;
diff --git a/fs/isofs/rock.c b/fs/isofs/rock.c
index bb63254..735d752 100644
--- a/fs/isofs/rock.c
+++ b/fs/isofs/rock.c
@@ -362,6 +362,9 @@
rs.cont_size = isonum_733(rr->u.CE.size);
break;
case SIG('E', 'R'):
+ /* Invalid length of ER tag id? */
+ if (rr->u.ER.len_id + offsetof(struct rock_ridge, u.ER.data) > rr->len)
+ goto out;
ISOFS_SB(inode->i_sb)->s_rock = 1;
printk(KERN_DEBUG "ISO 9660 Extensions: ");
{
diff --git a/fs/udf/dir.c b/fs/udf/dir.c
index a012c51..05e90ed 100644
--- a/fs/udf/dir.c
+++ b/fs/udf/dir.c
@@ -57,6 +57,7 @@
sector_t offset;
int i, num, ret = 0;
struct extent_position epos = { NULL, 0, {0, 0} };
+ struct super_block *sb = dir->i_sb;
if (ctx->pos == 0) {
if (!dir_emit_dot(file, ctx))
@@ -76,16 +77,16 @@
if (nf_pos == 0)
nf_pos = udf_ext0_offset(dir);
- fibh.soffset = fibh.eoffset = nf_pos & (dir->i_sb->s_blocksize - 1);
+ fibh.soffset = fibh.eoffset = nf_pos & (sb->s_blocksize - 1);
if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
- if (inode_bmap(dir, nf_pos >> dir->i_sb->s_blocksize_bits,
+ if (inode_bmap(dir, nf_pos >> sb->s_blocksize_bits,
&epos, &eloc, &elen, &offset)
!= (EXT_RECORDED_ALLOCATED >> 30)) {
ret = -ENOENT;
goto out;
}
- block = udf_get_lb_pblock(dir->i_sb, &eloc, offset);
- if ((++offset << dir->i_sb->s_blocksize_bits) < elen) {
+ block = udf_get_lb_pblock(sb, &eloc, offset);
+ if ((++offset << sb->s_blocksize_bits) < elen) {
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
epos.offset -= sizeof(struct short_ad);
else if (iinfo->i_alloc_type ==
@@ -95,18 +96,18 @@
offset = 0;
}
- if (!(fibh.sbh = fibh.ebh = udf_tread(dir->i_sb, block))) {
+ if (!(fibh.sbh = fibh.ebh = udf_tread(sb, block))) {
ret = -EIO;
goto out;
}
- if (!(offset & ((16 >> (dir->i_sb->s_blocksize_bits - 9)) - 1))) {
- i = 16 >> (dir->i_sb->s_blocksize_bits - 9);
- if (i + offset > (elen >> dir->i_sb->s_blocksize_bits))
- i = (elen >> dir->i_sb->s_blocksize_bits) - offset;
+ if (!(offset & ((16 >> (sb->s_blocksize_bits - 9)) - 1))) {
+ i = 16 >> (sb->s_blocksize_bits - 9);
+ if (i + offset > (elen >> sb->s_blocksize_bits))
+ i = (elen >> sb->s_blocksize_bits) - offset;
for (num = 0; i > 0; i--) {
- block = udf_get_lb_pblock(dir->i_sb, &eloc, offset + i);
- tmp = udf_tgetblk(dir->i_sb, block);
+ block = udf_get_lb_pblock(sb, &eloc, offset + i);
+ tmp = udf_tgetblk(sb, block);
if (tmp && !buffer_uptodate(tmp) && !buffer_locked(tmp))
bha[num++] = tmp;
else
@@ -152,12 +153,12 @@
}
if ((cfi.fileCharacteristics & FID_FILE_CHAR_DELETED) != 0) {
- if (!UDF_QUERY_FLAG(dir->i_sb, UDF_FLAG_UNDELETE))
+ if (!UDF_QUERY_FLAG(sb, UDF_FLAG_UNDELETE))
continue;
}
if ((cfi.fileCharacteristics & FID_FILE_CHAR_HIDDEN) != 0) {
- if (!UDF_QUERY_FLAG(dir->i_sb, UDF_FLAG_UNHIDE))
+ if (!UDF_QUERY_FLAG(sb, UDF_FLAG_UNHIDE))
continue;
}
@@ -167,12 +168,12 @@
continue;
}
- flen = udf_get_filename(dir->i_sb, nameptr, fname, lfi);
+ flen = udf_get_filename(sb, nameptr, lfi, fname, UDF_NAME_LEN);
if (!flen)
continue;
tloc = lelb_to_cpu(cfi.icb.extLocation);
- iblock = udf_get_lb_pblock(dir->i_sb, &tloc, 0);
+ iblock = udf_get_lb_pblock(sb, &tloc, 0);
if (!dir_emit(ctx, fname, flen, iblock, DT_UNKNOWN))
goto out;
} /* end while */
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index c9b4df5..5bc71d9 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -1489,6 +1489,20 @@
}
inode->i_generation = iinfo->i_unique;
+ /* Sanity checks for files in ICB so that we don't get confused later */
+ if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
+ /*
+ * For file in ICB data is stored in allocation descriptor
+ * so sizes should match
+ */
+ if (iinfo->i_lenAlloc != inode->i_size)
+ goto out;
+ /* File in ICB has to fit in there... */
+ if (inode->i_size > inode->i_sb->s_blocksize -
+ udf_file_entry_alloc_offset(inode))
+ goto out;
+ }
+
switch (fe->icbTag.fileType) {
case ICBTAG_FILE_TYPE_DIRECTORY:
inode->i_op = &udf_dir_inode_operations;
diff --git a/fs/udf/namei.c b/fs/udf/namei.c
index c12e260..33b246b 100644
--- a/fs/udf/namei.c
+++ b/fs/udf/namei.c
@@ -159,18 +159,19 @@
struct udf_inode_info *dinfo = UDF_I(dir);
int isdotdot = child->len == 2 &&
child->name[0] == '.' && child->name[1] == '.';
+ struct super_block *sb = dir->i_sb;
size = udf_ext0_offset(dir) + dir->i_size;
f_pos = udf_ext0_offset(dir);
fibh->sbh = fibh->ebh = NULL;
- fibh->soffset = fibh->eoffset = f_pos & (dir->i_sb->s_blocksize - 1);
+ fibh->soffset = fibh->eoffset = f_pos & (sb->s_blocksize - 1);
if (dinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
- if (inode_bmap(dir, f_pos >> dir->i_sb->s_blocksize_bits, &epos,
+ if (inode_bmap(dir, f_pos >> sb->s_blocksize_bits, &epos,
&eloc, &elen, &offset) != (EXT_RECORDED_ALLOCATED >> 30))
goto out_err;
- block = udf_get_lb_pblock(dir->i_sb, &eloc, offset);
- if ((++offset << dir->i_sb->s_blocksize_bits) < elen) {
+ block = udf_get_lb_pblock(sb, &eloc, offset);
+ if ((++offset << sb->s_blocksize_bits) < elen) {
if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
epos.offset -= sizeof(struct short_ad);
else if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
@@ -178,7 +179,7 @@
} else
offset = 0;
- fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block);
+ fibh->sbh = fibh->ebh = udf_tread(sb, block);
if (!fibh->sbh)
goto out_err;
}
@@ -217,12 +218,12 @@
}
if ((cfi->fileCharacteristics & FID_FILE_CHAR_DELETED) != 0) {
- if (!UDF_QUERY_FLAG(dir->i_sb, UDF_FLAG_UNDELETE))
+ if (!UDF_QUERY_FLAG(sb, UDF_FLAG_UNDELETE))
continue;
}
if ((cfi->fileCharacteristics & FID_FILE_CHAR_HIDDEN) != 0) {
- if (!UDF_QUERY_FLAG(dir->i_sb, UDF_FLAG_UNHIDE))
+ if (!UDF_QUERY_FLAG(sb, UDF_FLAG_UNHIDE))
continue;
}
@@ -233,7 +234,7 @@
if (!lfi)
continue;
- flen = udf_get_filename(dir->i_sb, nameptr, fname, lfi);
+ flen = udf_get_filename(sb, nameptr, lfi, fname, UDF_NAME_LEN);
if (flen && udf_match(flen, fname, child->len, child->name))
goto out_ok;
}
diff --git a/fs/udf/symlink.c b/fs/udf/symlink.c
index 6fb7945..ac10ca9 100644
--- a/fs/udf/symlink.c
+++ b/fs/udf/symlink.c
@@ -30,49 +30,73 @@
#include <linux/buffer_head.h>
#include "udf_i.h"
-static void udf_pc_to_char(struct super_block *sb, unsigned char *from,
- int fromlen, unsigned char *to)
+static int udf_pc_to_char(struct super_block *sb, unsigned char *from,
+ int fromlen, unsigned char *to, int tolen)
{
struct pathComponent *pc;
int elen = 0;
+ int comp_len;
unsigned char *p = to;
+ /* Reserve one byte for terminating \0 */
+ tolen--;
while (elen < fromlen) {
pc = (struct pathComponent *)(from + elen);
+ elen += sizeof(struct pathComponent);
switch (pc->componentType) {
case 1:
/*
* Symlink points to some place which should be agreed
* upon between originator and receiver of the media. Ignore.
*/
- if (pc->lengthComponentIdent > 0)
+ if (pc->lengthComponentIdent > 0) {
+ elen += pc->lengthComponentIdent;
break;
+ }
/* Fall through */
case 2:
+ if (tolen == 0)
+ return -ENAMETOOLONG;
p = to;
*p++ = '/';
+ tolen--;
break;
case 3:
+ if (tolen < 3)
+ return -ENAMETOOLONG;
memcpy(p, "../", 3);
p += 3;
+ tolen -= 3;
break;
case 4:
+ if (tolen < 2)
+ return -ENAMETOOLONG;
memcpy(p, "./", 2);
p += 2;
+ tolen -= 2;
/* that would be . - just ignore */
break;
case 5:
- p += udf_get_filename(sb, pc->componentIdent, p,
- pc->lengthComponentIdent);
+ elen += pc->lengthComponentIdent;
+ if (elen > fromlen)
+ return -EIO;
+ comp_len = udf_get_filename(sb, pc->componentIdent,
+ pc->lengthComponentIdent,
+ p, tolen);
+ p += comp_len;
+ tolen -= comp_len;
+ if (tolen == 0)
+ return -ENAMETOOLONG;
*p++ = '/';
+ tolen--;
break;
}
- elen += sizeof(struct pathComponent) + pc->lengthComponentIdent;
}
if (p > to + 1)
p[-1] = '\0';
else
p[0] = '\0';
+ return 0;
}
static int udf_symlink_filler(struct file *file, struct page *page)
@@ -80,11 +104,17 @@
struct inode *inode = page->mapping->host;
struct buffer_head *bh = NULL;
unsigned char *symlink;
- int err = -EIO;
+ int err;
unsigned char *p = kmap(page);
struct udf_inode_info *iinfo;
uint32_t pos;
+ /* We don't support symlinks longer than one block */
+ if (inode->i_size > inode->i_sb->s_blocksize) {
+ err = -ENAMETOOLONG;
+ goto out_unmap;
+ }
+
iinfo = UDF_I(inode);
pos = udf_block_map(inode, 0);
@@ -94,14 +124,18 @@
} else {
bh = sb_bread(inode->i_sb, pos);
- if (!bh)
- goto out;
+ if (!bh) {
+ err = -EIO;
+ goto out_unlock_inode;
+ }
symlink = bh->b_data;
}
- udf_pc_to_char(inode->i_sb, symlink, inode->i_size, p);
+ err = udf_pc_to_char(inode->i_sb, symlink, inode->i_size, p, PAGE_SIZE);
brelse(bh);
+ if (err)
+ goto out_unlock_inode;
up_read(&iinfo->i_data_sem);
SetPageUptodate(page);
@@ -109,9 +143,10 @@
unlock_page(page);
return 0;
-out:
+out_unlock_inode:
up_read(&iinfo->i_data_sem);
SetPageError(page);
+out_unmap:
kunmap(page);
unlock_page(page);
return err;
diff --git a/fs/udf/udfdecl.h b/fs/udf/udfdecl.h
index 1cc3c99..47bb3f5 100644
--- a/fs/udf/udfdecl.h
+++ b/fs/udf/udfdecl.h
@@ -211,7 +211,8 @@
}
/* unicode.c */
-extern int udf_get_filename(struct super_block *, uint8_t *, uint8_t *, int);
+extern int udf_get_filename(struct super_block *, uint8_t *, int, uint8_t *,
+ int);
extern int udf_put_filename(struct super_block *, const uint8_t *, uint8_t *,
int);
extern int udf_build_ustr(struct ustr *, dstring *, int);
diff --git a/fs/udf/unicode.c b/fs/udf/unicode.c
index afd470e..b84fee3 100644
--- a/fs/udf/unicode.c
+++ b/fs/udf/unicode.c
@@ -28,7 +28,8 @@
#include "udf_sb.h"
-static int udf_translate_to_linux(uint8_t *, uint8_t *, int, uint8_t *, int);
+static int udf_translate_to_linux(uint8_t *, int, uint8_t *, int, uint8_t *,
+ int);
static int udf_char_to_ustr(struct ustr *dest, const uint8_t *src, int strlen)
{
@@ -333,8 +334,8 @@
return u_len + 1;
}
-int udf_get_filename(struct super_block *sb, uint8_t *sname, uint8_t *dname,
- int flen)
+int udf_get_filename(struct super_block *sb, uint8_t *sname, int slen,
+ uint8_t *dname, int dlen)
{
struct ustr *filename, *unifilename;
int len = 0;
@@ -347,7 +348,7 @@
if (!unifilename)
goto out1;
- if (udf_build_ustr_exact(unifilename, sname, flen))
+ if (udf_build_ustr_exact(unifilename, sname, slen))
goto out2;
if (UDF_QUERY_FLAG(sb, UDF_FLAG_UTF8)) {
@@ -366,7 +367,8 @@
} else
goto out2;
- len = udf_translate_to_linux(dname, filename->u_name, filename->u_len,
+ len = udf_translate_to_linux(dname, dlen,
+ filename->u_name, filename->u_len,
unifilename->u_name, unifilename->u_len);
out2:
kfree(unifilename);
@@ -403,10 +405,12 @@
#define EXT_MARK '.'
#define CRC_MARK '#'
#define EXT_SIZE 5
+/* Number of chars we need to store generated CRC to make filename unique */
+#define CRC_LEN 5
-static int udf_translate_to_linux(uint8_t *newName, uint8_t *udfName,
- int udfLen, uint8_t *fidName,
- int fidNameLen)
+static int udf_translate_to_linux(uint8_t *newName, int newLen,
+ uint8_t *udfName, int udfLen,
+ uint8_t *fidName, int fidNameLen)
{
int index, newIndex = 0, needsCRC = 0;
int extIndex = 0, newExtIndex = 0, hasExt = 0;
@@ -439,7 +443,7 @@
newExtIndex = newIndex;
}
}
- if (newIndex < 256)
+ if (newIndex < newLen)
newName[newIndex++] = curr;
else
needsCRC = 1;
@@ -467,13 +471,13 @@
}
ext[localExtIndex++] = curr;
}
- maxFilenameLen = 250 - localExtIndex;
+ maxFilenameLen = newLen - CRC_LEN - localExtIndex;
if (newIndex > maxFilenameLen)
newIndex = maxFilenameLen;
else
newIndex = newExtIndex;
- } else if (newIndex > 250)
- newIndex = 250;
+ } else if (newIndex > newLen - CRC_LEN)
+ newIndex = newLen - CRC_LEN;
newName[newIndex++] = CRC_MARK;
valueCRC = crc_itu_t(0, fidName, fidNameLen);
newName[newIndex++] = hex_asc_upper_hi(valueCRC >> 8);
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 8ba35c6..e928625 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -143,6 +143,7 @@
#define DRIVER_MODESET 0x2000
#define DRIVER_PRIME 0x4000
#define DRIVER_RENDER 0x8000
+#define DRIVER_ATOMIC 0x10000
/***********************************************************************/
/** \name Macros to make printk easier */
@@ -283,6 +284,8 @@
* in the plane list
*/
unsigned universal_planes:1;
+ /* true if client understands atomic properties */
+ unsigned atomic:1;
struct pid *pid;
kuid_t uid;
@@ -744,8 +747,6 @@
/** \name Context support */
/*@{ */
- bool irq_enabled; /**< True if irq handler is enabled */
- int irq;
__volatile__ long context_flag; /**< Context swapping flag */
int last_context; /**< Last current context */
@@ -753,6 +754,8 @@
/** \name VBLANK IRQ support */
/*@{ */
+ bool irq_enabled;
+ int irq;
/*
* At load time, disabling the vblank interrupt won't be allowed since
@@ -901,11 +904,15 @@
extern int drm_wait_vblank(struct drm_device *dev, void *data,
struct drm_file *filp);
extern u32 drm_vblank_count(struct drm_device *dev, int crtc);
+extern u32 drm_crtc_vblank_count(struct drm_crtc *crtc);
extern u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
struct timeval *vblanktime);
extern void drm_send_vblank_event(struct drm_device *dev, int crtc,
struct drm_pending_vblank_event *e);
+extern void drm_crtc_send_vblank_event(struct drm_crtc *crtc,
+ struct drm_pending_vblank_event *e);
extern bool drm_handle_vblank(struct drm_device *dev, int crtc);
+extern bool drm_crtc_handle_vblank(struct drm_crtc *crtc);
extern int drm_vblank_get(struct drm_device *dev, int crtc);
extern void drm_vblank_put(struct drm_device *dev, int crtc);
extern int drm_crtc_vblank_get(struct drm_crtc *crtc);
@@ -950,6 +957,7 @@
extern void drm_put_dev(struct drm_device *dev);
extern void drm_unplug_dev(struct drm_device *dev);
extern unsigned int drm_debug;
+extern bool drm_atomic;
/* Debugfs support */
#if defined(CONFIG_DEBUG_FS)
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index ad22295..51168a8 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -38,16 +38,25 @@
struct drm_crtc_state * __must_check
drm_atomic_get_crtc_state(struct drm_atomic_state *state,
struct drm_crtc *crtc);
+int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
+ struct drm_crtc_state *state, struct drm_property *property,
+ uint64_t val);
struct drm_plane_state * __must_check
drm_atomic_get_plane_state(struct drm_atomic_state *state,
struct drm_plane *plane);
+int drm_atomic_plane_set_property(struct drm_plane *plane,
+ struct drm_plane_state *state, struct drm_property *property,
+ uint64_t val);
struct drm_connector_state * __must_check
drm_atomic_get_connector_state(struct drm_atomic_state *state,
struct drm_connector *connector);
+int drm_atomic_connector_set_property(struct drm_connector *connector,
+ struct drm_connector_state *state, struct drm_property *property,
+ uint64_t val);
int __must_check
-drm_atomic_set_crtc_for_plane(struct drm_atomic_state *state,
- struct drm_plane *plane, struct drm_crtc *crtc);
+drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state,
+ struct drm_crtc *crtc);
void drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state,
struct drm_framebuffer *fb);
int __must_check
diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h
index f956b41..2095917 100644
--- a/include/drm/drm_atomic_helper.h
+++ b/include/drm/drm_atomic_helper.h
@@ -30,6 +30,10 @@
#include <drm/drm_crtc.h>
+int drm_atomic_helper_check_modeset(struct drm_device *dev,
+ struct drm_atomic_state *state);
+int drm_atomic_helper_check_planes(struct drm_device *dev,
+ struct drm_atomic_state *state);
int drm_atomic_helper_check(struct drm_device *dev,
struct drm_atomic_state *state);
int drm_atomic_helper_commit(struct drm_device *dev,
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index b863298..f444263 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -63,8 +63,16 @@
#define DRM_OBJECT_MAX_PROPERTY 24
struct drm_object_properties {
- int count;
- uint32_t ids[DRM_OBJECT_MAX_PROPERTY];
+ int count, atomic_count;
+ /* NOTE: if we ever start dynamically destroying properties (ie.
+ * not at drm_mode_config_cleanup() time), then we'd have to do
+ * a better job of detaching property from mode objects to avoid
+ * dangling property pointers:
+ */
+ struct drm_property *properties[DRM_OBJECT_MAX_PROPERTY];
+ /* do not read/write values directly, but use drm_object_property_get_value()
+ * and drm_object_property_set_value():
+ */
uint64_t values[DRM_OBJECT_MAX_PROPERTY];
};
@@ -237,7 +245,9 @@
/**
* struct drm_crtc_state - mutable CRTC state
+ * @crtc: backpointer to the CRTC
* @enable: whether the CRTC should be enabled, gates all other state
+ * @active: whether the CRTC is actively displaying (used for DPMS)
* @mode_changed: for use by helpers and drivers when computing state updates
* @plane_mask: bitmask of (1 << drm_plane_index(plane)) of attached planes
* @last_vblank_count: for helpers and drivers to capture the vblank of the
@@ -248,9 +258,18 @@
* @event: optional pointer to a DRM event to signal upon completion of the
* state update
* @state: backpointer to global drm_atomic_state
+ *
+ * Note that the distinction between @enable and @active is rather subtile:
+ * Flipping @active while @enable is set without changing anything else may
+ * never return in a failure from the ->atomic_check callback. Userspace assumes
+ * that a DPMS On will always succeed. In other words: @enable controls resource
+ * assignment, @active controls the actual hardware state.
*/
struct drm_crtc_state {
+ struct drm_crtc *crtc;
+
bool enable;
+ bool active;
/* computed state bits used by helpers and drivers */
bool planes_changed : 1;
@@ -292,6 +311,9 @@
* @atomic_duplicate_state: duplicate the atomic state for this CRTC
* @atomic_destroy_state: destroy an atomic state for this CRTC
* @atomic_set_property: set a property on an atomic state for this CRTC
+ * (do not call directly, use drm_atomic_crtc_set_property())
+ * @atomic_get_property: get a property on an atomic state for this CRTC
+ * (do not call directly, use drm_atomic_crtc_get_property())
*
* The drm_crtc_funcs structure is the central CRTC management structure
* in the DRM. Each CRTC controls one or more connectors (note that the name
@@ -351,6 +373,10 @@
struct drm_crtc_state *state,
struct drm_property *property,
uint64_t val);
+ int (*atomic_get_property)(struct drm_crtc *crtc,
+ const struct drm_crtc_state *state,
+ struct drm_property *property,
+ uint64_t *val);
};
/**
@@ -449,11 +475,14 @@
/**
* struct drm_connector_state - mutable connector state
+ * @connector: backpointer to the connector
* @crtc: CRTC to connect connector to, NULL if disabled
* @best_encoder: can be used by helpers and drivers to select the encoder
* @state: backpointer to global drm_atomic_state
*/
struct drm_connector_state {
+ struct drm_connector *connector;
+
struct drm_crtc *crtc; /* do not write directly, use drm_atomic_set_crtc_for_connector() */
struct drm_encoder *best_encoder;
@@ -463,7 +492,7 @@
/**
* struct drm_connector_funcs - control connectors on a given device
- * @dpms: set power state (see drm_crtc_funcs above)
+ * @dpms: set power state
* @save: save connector state
* @restore: restore connector state
* @reset: reset connector after state has been invalidated (e.g. resume)
@@ -475,6 +504,9 @@
* @atomic_duplicate_state: duplicate the atomic state for this connector
* @atomic_destroy_state: destroy an atomic state for this connector
* @atomic_set_property: set a property on an atomic state for this connector
+ * (do not call directly, use drm_atomic_connector_set_property())
+ * @atomic_get_property: get a property on an atomic state for this connector
+ * (do not call directly, use drm_atomic_connector_get_property())
*
* Each CRTC may have one or more connectors attached to it. The functions
* below allow the core DRM code to control connectors, enumerate available modes,
@@ -508,6 +540,10 @@
struct drm_connector_state *state,
struct drm_property *property,
uint64_t val);
+ int (*atomic_get_property)(struct drm_connector *connector,
+ const struct drm_connector_state *state,
+ struct drm_property *property,
+ uint64_t *val);
};
/**
@@ -693,6 +729,7 @@
/**
* struct drm_plane_state - mutable plane state
+ * @plane: backpointer to the plane
* @crtc: currently bound CRTC, NULL if disabled
* @fb: currently bound framebuffer
* @fence: optional fence to wait for before scanning out @fb
@@ -709,6 +746,8 @@
* @state: backpointer to global drm_atomic_state
*/
struct drm_plane_state {
+ struct drm_plane *plane;
+
struct drm_crtc *crtc; /* do not write directly, use drm_atomic_set_crtc_for_plane() */
struct drm_framebuffer *fb; /* do not write directly, use drm_atomic_set_fb_for_plane() */
struct fence *fence;
@@ -735,6 +774,9 @@
* @atomic_duplicate_state: duplicate the atomic state for this plane
* @atomic_destroy_state: destroy an atomic state for this plane
* @atomic_set_property: set a property on an atomic state for this plane
+ * (do not call directly, use drm_atomic_plane_set_property())
+ * @atomic_get_property: get a property on an atomic state for this plane
+ * (do not call directly, use drm_atomic_plane_get_property())
*/
struct drm_plane_funcs {
int (*update_plane)(struct drm_plane *plane,
@@ -758,6 +800,10 @@
struct drm_plane_state *state,
struct drm_property *property,
uint64_t val);
+ int (*atomic_get_property)(struct drm_plane *plane,
+ const struct drm_plane_state *state,
+ struct drm_property *property,
+ uint64_t *val);
};
enum drm_plane_type {
@@ -856,7 +902,7 @@
/**
* struct struct drm_atomic_state - the global state object for atomic updates
* @dev: parent DRM device
- * @flags: state flags like async update
+ * @allow_modeset: allow full modeset
* @planes: pointer to array of plane pointers
* @plane_states: pointer to array of plane states pointers
* @crtcs: pointer to array of CRTC pointers
@@ -868,7 +914,7 @@
*/
struct drm_atomic_state {
struct drm_device *dev;
- uint32_t flags;
+ bool allow_modeset : 1;
struct drm_plane **planes;
struct drm_plane_state **plane_states;
struct drm_crtc **crtcs;
@@ -1053,6 +1099,16 @@
struct drm_property *tile_property;
struct drm_property *plane_type_property;
struct drm_property *rotation_property;
+ struct drm_property *prop_src_x;
+ struct drm_property *prop_src_y;
+ struct drm_property *prop_src_w;
+ struct drm_property *prop_src_h;
+ struct drm_property *prop_crtc_x;
+ struct drm_property *prop_crtc_y;
+ struct drm_property *prop_crtc_w;
+ struct drm_property *prop_crtc_h;
+ struct drm_property *prop_fb_id;
+ struct drm_property *prop_crtc_id;
/* DVI-I properties */
struct drm_property *dvi_i_subconnector_property;
@@ -1191,6 +1247,8 @@
extern void drm_plane_cleanup(struct drm_plane *plane);
extern unsigned int drm_plane_index(struct drm_plane *plane);
extern void drm_plane_force_disable(struct drm_plane *plane);
+extern void drm_crtc_get_hv_timing(const struct drm_display_mode *mode,
+ int *hdisplay, int *vdisplay);
extern int drm_crtc_check_viewport(const struct drm_crtc *crtc,
int x, int y,
const struct drm_display_mode *mode,
@@ -1290,6 +1348,10 @@
extern int drm_mode_create_aspect_ratio_property(struct drm_device *dev);
extern int drm_mode_create_dirty_info_property(struct drm_device *dev);
extern int drm_mode_create_suggested_offset_properties(struct drm_device *dev);
+extern bool drm_property_change_valid_get(struct drm_property *property,
+ uint64_t value, struct drm_mode_object **ref);
+extern void drm_property_change_valid_put(struct drm_property *property,
+ struct drm_mode_object *ref);
extern int drm_mode_connector_attach_encoder(struct drm_connector *connector,
struct drm_encoder *encoder);
@@ -1381,6 +1443,8 @@
extern int drm_mode_plane_set_obj_prop(struct drm_plane *plane,
struct drm_property *property,
uint64_t value);
+extern int drm_mode_atomic_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
extern void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
int *bpp);
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
index 7adbb65..e76828d 100644
--- a/include/drm/drm_crtc_helper.h
+++ b/include/drm/drm_crtc_helper.h
@@ -39,15 +39,28 @@
#include <linux/fb.h>
+#include <drm/drm_crtc.h>
+
enum mode_set_atomic {
LEAVE_ATOMIC_MODE_SET,
ENTER_ATOMIC_MODE_SET,
};
/**
- * drm_crtc_helper_funcs - helper operations for CRTCs
- * @mode_fixup: try to fixup proposed mode for this connector
+ * struct drm_crtc_helper_funcs - helper operations for CRTCs
+ * @dpms: set power state
+ * @prepare: prepare the CRTC, called before @mode_set
+ * @commit: commit changes to CRTC, called after @mode_set
+ * @mode_fixup: try to fixup proposed mode for this CRTC
* @mode_set: set this mode
+ * @mode_set_nofb: set mode only (no scanout buffer attached)
+ * @mode_set_base: update the scanout buffer
+ * @mode_set_base_atomic: non-blocking mode set (used for kgdb support)
+ * @load_lut: load color palette
+ * @disable: disable CRTC when no longer in use
+ * @atomic_check: check for validity of an atomic state
+ * @atomic_begin: begin atomic update
+ * @atomic_flush: flush atomic update
*
* The helper operations are called by the mid-layer CRTC helper.
*/
@@ -91,9 +104,17 @@
};
/**
- * drm_encoder_helper_funcs - helper operations for encoders
+ * struct drm_encoder_helper_funcs - helper operations for encoders
+ * @dpms: set power state
+ * @save: save connector state
+ * @restore: restore connector state
* @mode_fixup: try to fixup proposed mode for this connector
+ * @prepare: part of the disable sequence, called before the CRTC modeset
+ * @commit: called after the CRTC modeset
* @mode_set: set this mode
+ * @get_crtc: return CRTC that the encoder is currently attached to
+ * @detect: connection status detection
+ * @disable: disable encoder when not in use (overrides DPMS off)
*
* The helper operations are called by the mid-layer CRTC helper.
*/
@@ -119,9 +140,10 @@
};
/**
- * drm_connector_helper_funcs - helper operations for connectors
+ * struct drm_connector_helper_funcs - helper operations for connectors
* @get_modes: get mode list for this connector
- * @mode_valid (optional): is this mode valid on the given connector?
+ * @mode_valid: is this mode valid on the given connector? (optional)
+ * @best_encoder: return the preferred encoder for this connector
*
* The helper operations are called by the mid-layer CRTC helper.
*/
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
index 780511a..1e6ae14 100644
--- a/include/drm/drm_gem.h
+++ b/include/drm/drm_gem.h
@@ -119,13 +119,6 @@
* simply leave it as NULL.
*/
struct dma_buf_attachment *import_attach;
-
- /**
- * dumb - created as dumb buffer
- * Whether the gem object was created using the dumb buffer interface
- * as such it may not be used for GPU rendering.
- */
- bool dumb;
};
void drm_gem_object_release(struct drm_gem_object *obj);
diff --git a/include/drm/drm_modes.h b/include/drm/drm_modes.h
index 321ae64..d92f6dd 100644
--- a/include/drm/drm_modes.h
+++ b/include/drm/drm_modes.h
@@ -90,6 +90,9 @@
#define CRTC_INTERLACE_HALVE_V (1 << 0) /* halve V values for interlacing */
#define CRTC_STEREO_DOUBLE (1 << 1) /* adjust timings for stereo modes */
+#define CRTC_NO_DBLSCAN (1 << 2) /* don't adjust doublescan */
+#define CRTC_NO_VSCAN (1 << 3) /* don't adjust doublescan */
+#define CRTC_STEREO_DOUBLE_ONLY (CRTC_NO_DBLSCAN | CRTC_NO_VSCAN)
#define DRM_MODE_FLAG_3D_MAX DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF
@@ -219,9 +222,9 @@
const struct drm_display_mode *mode2);
/* for use by the crtc helper probe functions */
-void drm_mode_validate_size(struct drm_device *dev,
- struct list_head *mode_list,
- int maxX, int maxY);
+enum drm_mode_status drm_mode_validate_basic(const struct drm_display_mode *mode);
+enum drm_mode_status drm_mode_validate_size(const struct drm_display_mode *mode,
+ int maxX, int maxY);
void drm_mode_prune_invalid(struct drm_device *dev,
struct list_head *mode_list, bool verbose);
void drm_mode_sort(struct list_head *mode_list);
diff --git a/include/dt-bindings/thermal/thermal.h b/include/dt-bindings/thermal/thermal.h
index 59822a9..b5e6b00 100644
--- a/include/dt-bindings/thermal/thermal.h
+++ b/include/dt-bindings/thermal/thermal.h
@@ -11,7 +11,7 @@
#define _DT_BINDINGS_THERMAL_THERMAL_H
/* On cooling devices upper and lower limits */
-#define THERMAL_NO_LIMIT (-1UL)
+#define THERMAL_NO_LIMIT (~0)
#endif
diff --git a/include/linux/audit.h b/include/linux/audit.h
index 0c04917..af84234 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -47,6 +47,7 @@
struct audit_krule {
int vers_ops;
+ u32 pflags;
u32 flags;
u32 listnr;
u32 action;
@@ -64,6 +65,9 @@
u64 prio;
};
+/* Flag to indicate legacy AUDIT_LOGINUID unset usage */
+#define AUDIT_LOGINUID_LEGACY 0x1
+
struct audit_field {
u32 type;
union {
diff --git a/include/linux/cpu_cooling.h b/include/linux/cpu_cooling.h
index c303d38..bd95527 100644
--- a/include/linux/cpu_cooling.h
+++ b/include/linux/cpu_cooling.h
@@ -50,7 +50,7 @@
of_cpufreq_cooling_register(struct device_node *np,
const struct cpumask *clip_cpus)
{
- return NULL;
+ return ERR_PTR(-ENOSYS);
}
#endif
@@ -65,13 +65,13 @@
static inline struct thermal_cooling_device *
cpufreq_cooling_register(const struct cpumask *clip_cpus)
{
- return NULL;
+ return ERR_PTR(-ENOSYS);
}
static inline struct thermal_cooling_device *
of_cpufreq_cooling_register(struct device_node *np,
const struct cpumask *clip_cpus)
{
- return NULL;
+ return ERR_PTR(-ENOSYS);
}
static inline
void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index a07e087..ab70f3b 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -53,7 +53,6 @@
};
/* Idle State Flags */
-#define CPUIDLE_FLAG_TIME_INVALID (0x01) /* is residency time measurable? */
#define CPUIDLE_FLAG_COUPLED (0x02) /* state applies to multiple cpus */
#define CPUIDLE_FLAG_TIMER_STOP (0x04) /* timer is stopped on this state */
@@ -89,8 +88,6 @@
/**
* cpuidle_get_last_residency - retrieves the last state's residency time
* @dev: the target CPU
- *
- * NOTE: this value is invalid if CPUIDLE_FLAG_TIME_INVALID is set
*/
static inline int cpuidle_get_last_residency(struct cpuidle_device *dev)
{
diff --git a/include/linux/mfd/stmpe.h b/include/linux/mfd/stmpe.h
index 575a86c..f742b67 100644
--- a/include/linux/mfd/stmpe.h
+++ b/include/linux/mfd/stmpe.h
@@ -50,6 +50,8 @@
STMPE_IDX_GPEDR_MSB,
STMPE_IDX_GPRER_LSB,
STMPE_IDX_GPFER_LSB,
+ STMPE_IDX_GPPUR_LSB,
+ STMPE_IDX_GPPDR_LSB,
STMPE_IDX_GPAFR_U_MSB,
STMPE_IDX_IEGPIOR_LSB,
STMPE_IDX_ISGPIOR_LSB,
@@ -113,24 +115,6 @@
extern int stmpe_enable(struct stmpe *stmpe, unsigned int blocks);
extern int stmpe_disable(struct stmpe *stmpe, unsigned int blocks);
-struct matrix_keymap_data;
-
-/**
- * struct stmpe_keypad_platform_data - STMPE keypad platform data
- * @keymap_data: key map table and size
- * @debounce_ms: debounce interval, in ms. Maximum is
- * %STMPE_KEYPAD_MAX_DEBOUNCE.
- * @scan_count: number of key scanning cycles to confirm key data.
- * Maximum is %STMPE_KEYPAD_MAX_SCAN_COUNT.
- * @no_autorepeat: disable key autorepeat
- */
-struct stmpe_keypad_platform_data {
- const struct matrix_keymap_data *keymap_data;
- unsigned int debounce_ms;
- unsigned int scan_count;
- bool no_autorepeat;
-};
-
#define STMPE_GPIO_NOREQ_811_TOUCH (0xf0)
/**
@@ -199,7 +183,6 @@
* @irq_gpio: gpio number over which irq will be requested (significant only if
* irq_over_gpio is true)
* @gpio: GPIO-specific platform data
- * @keypad: keypad-specific platform data
* @ts: touchscreen-specific platform data
*/
struct stmpe_platform_data {
@@ -212,7 +195,6 @@
int autosleep_timeout;
struct stmpe_gpio_platform_data *gpio;
- struct stmpe_keypad_platform_data *keypad;
struct stmpe_ts_platform_data *ts;
};
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index c31f74d..679e6e90 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1012,12 +1012,15 @@
* Callback to use for xmit over the accelerated station. This
* is used in place of ndo_start_xmit on accelerated net
* devices.
- * bool (*ndo_gso_check) (struct sk_buff *skb,
- * struct net_device *dev);
+ * netdev_features_t (*ndo_features_check) (struct sk_buff *skb,
+ * struct net_device *dev
+ * netdev_features_t features);
* Called by core transmit path to determine if device is capable of
- * performing GSO on a packet. The device returns true if it is
- * able to GSO the packet, false otherwise. If the return value is
- * false the stack will do software GSO.
+ * performing offload operations on a given packet. This is to give
+ * the device an opportunity to implement any restrictions that cannot
+ * be otherwise expressed by feature flags. The check is called with
+ * the set of features that the stack has calculated and it returns
+ * those the driver believes to be appropriate.
*
* int (*ndo_switch_parent_id_get)(struct net_device *dev,
* struct netdev_phys_item_id *psid);
@@ -1178,8 +1181,9 @@
struct net_device *dev,
void *priv);
int (*ndo_get_lock_subclass)(struct net_device *dev);
- bool (*ndo_gso_check) (struct sk_buff *skb,
- struct net_device *dev);
+ netdev_features_t (*ndo_features_check) (struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features);
#ifdef CONFIG_NET_SWITCHDEV
int (*ndo_switch_parent_id_get)(struct net_device *dev,
struct netdev_phys_item_id *psid);
@@ -3611,8 +3615,6 @@
netdev_features_t features)
{
return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
- (dev->netdev_ops->ndo_gso_check &&
- !dev->netdev_ops->ndo_gso_check(skb, dev)) ||
unlikely((skb->ip_summed != CHECKSUM_PARTIAL) &&
(skb->ip_summed != CHECKSUM_UNNECESSARY)));
}
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 9e572da..02fc86d 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -46,8 +46,8 @@
unsigned int flags;
void (*input)(struct sk_buff *skb);
struct mutex *cb_mutex;
- int (*bind)(int group);
- void (*unbind)(int group);
+ int (*bind)(struct net *net, int group);
+ void (*unbind)(struct net *net, int group);
bool (*compare)(struct net *net, struct sock *sk);
};
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 7ea069c..4b3736f 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -251,7 +251,7 @@
#define FGP_NOWAIT 0x00000020
struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
- int fgp_flags, gfp_t cache_gfp_mask, gfp_t radix_gfp_mask);
+ int fgp_flags, gfp_t cache_gfp_mask);
/**
* find_get_page - find and get a page reference
@@ -266,13 +266,13 @@
static inline struct page *find_get_page(struct address_space *mapping,
pgoff_t offset)
{
- return pagecache_get_page(mapping, offset, 0, 0, 0);
+ return pagecache_get_page(mapping, offset, 0, 0);
}
static inline struct page *find_get_page_flags(struct address_space *mapping,
pgoff_t offset, int fgp_flags)
{
- return pagecache_get_page(mapping, offset, fgp_flags, 0, 0);
+ return pagecache_get_page(mapping, offset, fgp_flags, 0);
}
/**
@@ -292,7 +292,7 @@
static inline struct page *find_lock_page(struct address_space *mapping,
pgoff_t offset)
{
- return pagecache_get_page(mapping, offset, FGP_LOCK, 0, 0);
+ return pagecache_get_page(mapping, offset, FGP_LOCK, 0);
}
/**
@@ -319,7 +319,7 @@
{
return pagecache_get_page(mapping, offset,
FGP_LOCK|FGP_ACCESSED|FGP_CREAT,
- gfp_mask, gfp_mask & GFP_RECLAIM_MASK);
+ gfp_mask);
}
/**
@@ -340,8 +340,7 @@
{
return pagecache_get_page(mapping, index,
FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
- mapping_gfp_mask(mapping),
- GFP_NOFS);
+ mapping_gfp_mask(mapping));
}
struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index 6cd20d5..a9edab2 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -271,6 +271,8 @@
int __of_genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
void *data);
void of_genpd_del_provider(struct device_node *np);
+struct generic_pm_domain *of_genpd_get_from_provider(
+ struct of_phandle_args *genpdspec);
struct generic_pm_domain *__of_genpd_xlate_simple(
struct of_phandle_args *genpdspec,
@@ -288,6 +290,12 @@
}
static inline void of_genpd_del_provider(struct device_node *np) {}
+static inline struct generic_pm_domain *of_genpd_get_from_provider(
+ struct of_phandle_args *genpdspec)
+{
+ return NULL;
+}
+
#define __of_genpd_xlate_simple NULL
#define __of_genpd_xlate_onecell NULL
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index c611a02..fc52e30 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -38,7 +38,7 @@
#define THERMAL_CSTATE_INVALID -1UL
/* No upper/lower limit requirement */
-#define THERMAL_NO_LIMIT THERMAL_CSTATE_INVALID
+#define THERMAL_NO_LIMIT ((u32)~0)
/* Unit conversion macros */
#define KELVIN_TO_CELSIUS(t) (long)(((long)t-2732 >= 0) ? \
diff --git a/include/net/genetlink.h b/include/net/genetlink.h
index af10c2c..8412508 100644
--- a/include/net/genetlink.h
+++ b/include/net/genetlink.h
@@ -31,6 +31,9 @@
* do additional, common, filtering and return an error
* @post_doit: called after an operation's doit callback, it may
* undo operations done by pre_doit, for example release locks
+ * @mcast_bind: a socket bound to the given multicast group (which
+ * is given as the offset into the groups array)
+ * @mcast_unbind: a socket was unbound from the given multicast group
* @attrbuf: buffer to store parsed attributes
* @family_list: family list
* @mcgrps: multicast groups used by this family (private)
@@ -53,6 +56,8 @@
void (*post_doit)(const struct genl_ops *ops,
struct sk_buff *skb,
struct genl_info *info);
+ int (*mcast_bind)(struct net *net, int group);
+ void (*mcast_unbind)(struct net *net, int group);
struct nlattr ** attrbuf; /* private */
const struct genl_ops * ops; /* private */
const struct genl_multicast_group *mcgrps; /* private */
@@ -395,11 +400,11 @@
}
static inline int genl_has_listeners(struct genl_family *family,
- struct sock *sk, unsigned int group)
+ struct net *net, unsigned int group)
{
if (WARN_ON_ONCE(group >= family->n_mcgrps))
return -EINVAL;
group = family->mcgrp_offset + group;
- return netlink_has_listeners(sk, group);
+ return netlink_has_listeners(net->genl_sock, group);
}
#endif /* __NET_GENERIC_NETLINK_H */
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index eb070b3..76f7084 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -190,7 +190,6 @@
struct neigh_table {
- struct neigh_table *next;
int family;
int entry_size;
int key_len;
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index 57cccd0..903461a 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -1,6 +1,9 @@
#ifndef __NET_VXLAN_H
#define __NET_VXLAN_H 1
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/if_vlan.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/udp.h>
@@ -51,16 +54,33 @@
__be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
__be16 src_port, __be16 dst_port, __be32 vni, bool xnet);
-static inline bool vxlan_gso_check(struct sk_buff *skb)
+static inline netdev_features_t vxlan_features_check(struct sk_buff *skb,
+ netdev_features_t features)
{
- if ((skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL) &&
+ u8 l4_hdr = 0;
+
+ if (!skb->encapsulation)
+ return features;
+
+ switch (vlan_get_protocol(skb)) {
+ case htons(ETH_P_IP):
+ l4_hdr = ip_hdr(skb)->protocol;
+ break;
+ case htons(ETH_P_IPV6):
+ l4_hdr = ipv6_hdr(skb)->nexthdr;
+ break;
+ default:
+ return features;;
+ }
+
+ if ((l4_hdr == IPPROTO_UDP) &&
(skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
skb->inner_protocol != htons(ETH_P_TEB) ||
(skb_inner_mac_header(skb) - skb_transport_header(skb) !=
sizeof(struct udphdr) + sizeof(struct vxlanhdr))))
- return false;
+ return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
- return true;
+ return features;
}
/* IP header + UDP + VXLAN + Ethernet header */
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index 1e7f74a..b429b73 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -857,7 +857,7 @@
}
/**
- * params_channels - Get the sample rate from the hw params
+ * params_rate - Get the sample rate from the hw params
* @p: hw params
*/
static inline unsigned int params_rate(const struct snd_pcm_hw_params *p)
@@ -866,7 +866,7 @@
}
/**
- * params_channels - Get the period size (in frames) from the hw params
+ * params_period_size - Get the period size (in frames) from the hw params
* @p: hw params
*/
static inline unsigned int params_period_size(const struct snd_pcm_hw_params *p)
@@ -875,7 +875,7 @@
}
/**
- * params_channels - Get the number of periods from the hw params
+ * params_periods - Get the number of periods from the hw params
* @p: hw params
*/
static inline unsigned int params_periods(const struct snd_pcm_hw_params *p)
@@ -884,7 +884,7 @@
}
/**
- * params_channels - Get the buffer size (in frames) from the hw params
+ * params_buffer_size - Get the buffer size (in frames) from the hw params
* @p: hw params
*/
static inline unsigned int params_buffer_size(const struct snd_pcm_hw_params *p)
@@ -893,7 +893,7 @@
}
/**
- * params_channels - Get the buffer size (in bytes) from the hw params
+ * params_buffer_bytes - Get the buffer size (in bytes) from the hw params
* @p: hw params
*/
static inline unsigned int params_buffer_bytes(const struct snd_pcm_hw_params *p)
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index b0b8556..01b2d6d 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -654,6 +654,13 @@
*/
#define DRM_CLIENT_CAP_UNIVERSAL_PLANES 2
+/**
+ * DRM_CLIENT_CAP_ATOMIC
+ *
+ * If set to 1, the DRM core will expose atomic properties to userspace
+ */
+#define DRM_CLIENT_CAP_ATOMIC 3
+
/** DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */
struct drm_set_client_cap {
__u64 capability;
@@ -777,6 +784,7 @@
#define DRM_IOCTL_MODE_OBJ_GETPROPERTIES DRM_IOWR(0xB9, struct drm_mode_obj_get_properties)
#define DRM_IOCTL_MODE_OBJ_SETPROPERTY DRM_IOWR(0xBA, struct drm_mode_obj_set_property)
#define DRM_IOCTL_MODE_CURSOR2 DRM_IOWR(0xBB, struct drm_mode_cursor2)
+#define DRM_IOCTL_MODE_ATOMIC DRM_IOWR(0xBC, struct drm_mode_atomic)
/**
* Device specific ioctls should only be in their respective headers
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index 86574b0..ca788e0 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -272,6 +272,13 @@
#define DRM_MODE_PROP_OBJECT DRM_MODE_PROP_TYPE(1)
#define DRM_MODE_PROP_SIGNED_RANGE DRM_MODE_PROP_TYPE(2)
+/* the PROP_ATOMIC flag is used to hide properties from userspace that
+ * is not aware of atomic properties. This is mostly to work around
+ * older userspace (DDX drivers) that read/write each prop they find,
+ * witout being aware that this could be triggering a lengthy modeset.
+ */
+#define DRM_MODE_PROP_ATOMIC 0x80000000
+
struct drm_mode_property_enum {
__u64 value;
char name[DRM_PROP_NAME_LEN];
@@ -338,7 +345,7 @@
/*
* In case of planar formats, this ioctl allows up to 4
- * buffer objects with offets and pitches per plane.
+ * buffer objects with offsets and pitches per plane.
* The pitch and offset order is dictated by the fourcc,
* e.g. NV12 (http://fourcc.org/yuv.php#NV12) is described as:
*
@@ -346,9 +353,9 @@
* followed by an interleaved U/V plane containing
* 8 bit 2x2 subsampled colour difference samples.
*
- * So it would consist of Y as offset[0] and UV as
- * offeset[1]. Note that offset[0] will generally
- * be 0.
+ * So it would consist of Y as offsets[0] and UV as
+ * offsets[1]. Note that offsets[0] will generally
+ * be 0 (but this is not required).
*/
__u32 handles[4];
__u32 pitches[4]; /* pitch for each plane */
@@ -519,4 +526,27 @@
uint32_t handle;
};
+/* page-flip flags are valid, plus: */
+#define DRM_MODE_ATOMIC_TEST_ONLY 0x0100
+#define DRM_MODE_ATOMIC_NONBLOCK 0x0200
+#define DRM_MODE_ATOMIC_ALLOW_MODESET 0x0400
+
+#define DRM_MODE_ATOMIC_FLAGS (\
+ DRM_MODE_PAGE_FLIP_EVENT |\
+ DRM_MODE_PAGE_FLIP_ASYNC |\
+ DRM_MODE_ATOMIC_TEST_ONLY |\
+ DRM_MODE_ATOMIC_NONBLOCK |\
+ DRM_MODE_ATOMIC_ALLOW_MODESET)
+
+struct drm_mode_atomic {
+ __u32 flags;
+ __u32 count_objs;
+ __u64 objs_ptr;
+ __u64 count_props_ptr;
+ __u64 props_ptr;
+ __u64 prop_values_ptr;
+ __u64 reserved;
+ __u64 user_data;
+};
+
#endif
diff --git a/include/uapi/linux/in6.h b/include/uapi/linux/in6.h
index 74a2a17..79b12b0 100644
--- a/include/uapi/linux/in6.h
+++ b/include/uapi/linux/in6.h
@@ -149,7 +149,7 @@
/*
* IPV6 socket options
*/
-
+#if __UAPI_DEF_IPV6_OPTIONS
#define IPV6_ADDRFORM 1
#define IPV6_2292PKTINFO 2
#define IPV6_2292HOPOPTS 3
@@ -196,6 +196,7 @@
#define IPV6_IPSEC_POLICY 34
#define IPV6_XFRM_POLICY 35
+#endif
/*
* Multicast:
diff --git a/include/uapi/linux/libc-compat.h b/include/uapi/linux/libc-compat.h
index c140620..e28807a 100644
--- a/include/uapi/linux/libc-compat.h
+++ b/include/uapi/linux/libc-compat.h
@@ -69,6 +69,7 @@
#define __UAPI_DEF_SOCKADDR_IN6 0
#define __UAPI_DEF_IPV6_MREQ 0
#define __UAPI_DEF_IPPROTO_V6 0
+#define __UAPI_DEF_IPV6_OPTIONS 0
#else
@@ -82,6 +83,7 @@
#define __UAPI_DEF_SOCKADDR_IN6 1
#define __UAPI_DEF_IPV6_MREQ 1
#define __UAPI_DEF_IPPROTO_V6 1
+#define __UAPI_DEF_IPV6_OPTIONS 1
#endif /* _NETINET_IN_H */
@@ -103,6 +105,7 @@
#define __UAPI_DEF_SOCKADDR_IN6 1
#define __UAPI_DEF_IPV6_MREQ 1
#define __UAPI_DEF_IPPROTO_V6 1
+#define __UAPI_DEF_IPV6_OPTIONS 1
/* Definitions for xattr.h */
#define __UAPI_DEF_XATTR 1
diff --git a/include/uapi/linux/virtio_ring.h b/include/uapi/linux/virtio_ring.h
index 61c818a..a3318f3 100644
--- a/include/uapi/linux/virtio_ring.h
+++ b/include/uapi/linux/virtio_ring.h
@@ -101,6 +101,13 @@
struct vring_used *used;
};
+/* Alignment requirements for vring elements.
+ * When using pre-virtio 1.0 layout, these fall out naturally.
+ */
+#define VRING_AVAIL_ALIGN_SIZE 2
+#define VRING_USED_ALIGN_SIZE 4
+#define VRING_DESC_ALIGN_SIZE 16
+
/* The standard layout for the ring is a continuous chunk of memory which looks
* like this. We assume num is a power of 2.
*
diff --git a/kernel/audit.c b/kernel/audit.c
index f8f203e..72ab759 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -429,7 +429,7 @@
* This function doesn't consume an skb as might be expected since it has to
* copy it anyways.
*/
-static void kauditd_send_multicast_skb(struct sk_buff *skb)
+static void kauditd_send_multicast_skb(struct sk_buff *skb, gfp_t gfp_mask)
{
struct sk_buff *copy;
struct audit_net *aunet = net_generic(&init_net, audit_net_id);
@@ -448,11 +448,11 @@
* no reason for new multicast clients to continue with this
* non-compliance.
*/
- copy = skb_copy(skb, GFP_KERNEL);
+ copy = skb_copy(skb, gfp_mask);
if (!copy)
return;
- nlmsg_multicast(sock, copy, 0, AUDIT_NLGRP_READLOG, GFP_KERNEL);
+ nlmsg_multicast(sock, copy, 0, AUDIT_NLGRP_READLOG, gfp_mask);
}
/*
@@ -1100,7 +1100,7 @@
}
/* Run custom bind function on netlink socket group connect or bind requests. */
-static int audit_bind(int group)
+static int audit_bind(struct net *net, int group)
{
if (!capable(CAP_AUDIT_READ))
return -EPERM;
@@ -1940,7 +1940,7 @@
struct nlmsghdr *nlh = nlmsg_hdr(ab->skb);
nlh->nlmsg_len = ab->skb->len;
- kauditd_send_multicast_skb(ab->skb);
+ kauditd_send_multicast_skb(ab->skb, ab->gfp_mask);
/*
* The original kaudit unicast socket sends up messages with
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
index 3598e13..4f68a32 100644
--- a/kernel/auditfilter.c
+++ b/kernel/auditfilter.c
@@ -442,19 +442,7 @@
if ((f->type == AUDIT_LOGINUID) && (f->val == AUDIT_UID_UNSET)) {
f->type = AUDIT_LOGINUID_SET;
f->val = 0;
- }
-
- if ((f->type == AUDIT_PID) || (f->type == AUDIT_PPID)) {
- struct pid *pid;
- rcu_read_lock();
- pid = find_vpid(f->val);
- if (!pid) {
- rcu_read_unlock();
- err = -ESRCH;
- goto exit_free;
- }
- f->val = pid_nr(pid);
- rcu_read_unlock();
+ entry->rule.pflags |= AUDIT_LOGINUID_LEGACY;
}
err = audit_field_valid(entry, f);
@@ -630,6 +618,13 @@
data->buflen += data->values[i] =
audit_pack_string(&bufp, krule->filterkey);
break;
+ case AUDIT_LOGINUID_SET:
+ if (krule->pflags & AUDIT_LOGINUID_LEGACY && !f->val) {
+ data->fields[i] = AUDIT_LOGINUID;
+ data->values[i] = AUDIT_UID_UNSET;
+ break;
+ }
+ /* fallthrough if set */
default:
data->values[i] = f->val;
}
@@ -646,6 +641,7 @@
int i;
if (a->flags != b->flags ||
+ a->pflags != b->pflags ||
a->listnr != b->listnr ||
a->action != b->action ||
a->field_count != b->field_count)
@@ -764,6 +760,7 @@
new = &entry->rule;
new->vers_ops = old->vers_ops;
new->flags = old->flags;
+ new->pflags = old->pflags;
new->listnr = old->listnr;
new->action = old->action;
for (i = 0; i < AUDIT_BITMASK_SIZE; i++)
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index c75522a..072566d 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -72,6 +72,8 @@
#include <linux/fs_struct.h>
#include <linux/compat.h>
#include <linux/ctype.h>
+#include <linux/string.h>
+#include <uapi/linux/limits.h>
#include "audit.h"
@@ -1861,8 +1863,7 @@
}
list_for_each_entry_reverse(n, &context->names_list, list) {
- /* does the name pointer match? */
- if (!n->name || n->name->name != name->name)
+ if (!n->name || strcmp(n->name->name, name->name))
continue;
/* match the correct record type */
@@ -1877,12 +1878,48 @@
}
out_alloc:
- /* unable to find the name from a previous getname(). Allocate a new
- * anonymous entry.
- */
- n = audit_alloc_name(context, AUDIT_TYPE_NORMAL);
+ /* unable to find an entry with both a matching name and type */
+ n = audit_alloc_name(context, AUDIT_TYPE_UNKNOWN);
if (!n)
return;
+ /* unfortunately, while we may have a path name to record with the
+ * inode, we can't always rely on the string lasting until the end of
+ * the syscall so we need to create our own copy, it may fail due to
+ * memory allocation issues, but we do our best */
+ if (name) {
+ /* we can't use getname_kernel() due to size limits */
+ size_t len = strlen(name->name) + 1;
+ struct filename *new = __getname();
+
+ if (unlikely(!new))
+ goto out;
+
+ if (len <= (PATH_MAX - sizeof(*new))) {
+ new->name = (char *)(new) + sizeof(*new);
+ new->separate = false;
+ } else if (len <= PATH_MAX) {
+ /* this looks odd, but is due to final_putname() */
+ struct filename *new2;
+
+ new2 = kmalloc(sizeof(*new2), GFP_KERNEL);
+ if (unlikely(!new2)) {
+ __putname(new);
+ goto out;
+ }
+ new2->name = (char *)new;
+ new2->separate = true;
+ new = new2;
+ } else {
+ /* we should never get here, but let's be safe */
+ __putname(new);
+ goto out;
+ }
+ strlcpy((char *)new->name, name->name, len);
+ new->uptr = NULL;
+ new->aname = n;
+ n->name = new;
+ n->name_put = true;
+ }
out:
if (parent) {
n->name_len = n->name ? parent_len(n->name->name) : AUDIT_NAME_FULL;
diff --git a/mm/filemap.c b/mm/filemap.c
index bd8543c..673e458 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1046,8 +1046,7 @@
* @mapping: the address_space to search
* @offset: the page index
* @fgp_flags: PCG flags
- * @cache_gfp_mask: gfp mask to use for the page cache data page allocation
- * @radix_gfp_mask: gfp mask to use for radix tree node allocation
+ * @gfp_mask: gfp mask to use for the page cache data page allocation
*
* Looks up the page cache slot at @mapping & @offset.
*
@@ -1056,11 +1055,9 @@
* FGP_ACCESSED: the page will be marked accessed
* FGP_LOCK: Page is return locked
* FGP_CREAT: If page is not present then a new page is allocated using
- * @cache_gfp_mask and added to the page cache and the VM's LRU
- * list. If radix tree nodes are allocated during page cache
- * insertion then @radix_gfp_mask is used. The page is returned
- * locked and with an increased refcount. Otherwise, %NULL is
- * returned.
+ * @gfp_mask and added to the page cache and the VM's LRU
+ * list. The page is returned locked and with an increased
+ * refcount. Otherwise, %NULL is returned.
*
* If FGP_LOCK or FGP_CREAT are specified then the function may sleep even
* if the GFP flags specified for FGP_CREAT are atomic.
@@ -1068,7 +1065,7 @@
* If there is a page cache page, it is returned with an increased refcount.
*/
struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
- int fgp_flags, gfp_t cache_gfp_mask, gfp_t radix_gfp_mask)
+ int fgp_flags, gfp_t gfp_mask)
{
struct page *page;
@@ -1105,13 +1102,11 @@
if (!page && (fgp_flags & FGP_CREAT)) {
int err;
if ((fgp_flags & FGP_WRITE) && mapping_cap_account_dirty(mapping))
- cache_gfp_mask |= __GFP_WRITE;
- if (fgp_flags & FGP_NOFS) {
- cache_gfp_mask &= ~__GFP_FS;
- radix_gfp_mask &= ~__GFP_FS;
- }
+ gfp_mask |= __GFP_WRITE;
+ if (fgp_flags & FGP_NOFS)
+ gfp_mask &= ~__GFP_FS;
- page = __page_cache_alloc(cache_gfp_mask);
+ page = __page_cache_alloc(gfp_mask);
if (!page)
return NULL;
@@ -1122,7 +1117,8 @@
if (fgp_flags & FGP_ACCESSED)
__SetPageReferenced(page);
- err = add_to_page_cache_lru(page, mapping, offset, radix_gfp_mask);
+ err = add_to_page_cache_lru(page, mapping, offset,
+ gfp_mask & GFP_RECLAIM_MASK);
if (unlikely(err)) {
page_cache_release(page);
page = NULL;
@@ -2443,8 +2439,7 @@
fgp_flags |= FGP_NOFS;
page = pagecache_get_page(mapping, index, fgp_flags,
- mapping_gfp_mask(mapping),
- GFP_KERNEL);
+ mapping_gfp_mask(mapping));
if (page)
wait_for_stable_page(page);
diff --git a/mm/memory.c b/mm/memory.c
index 649e7d44..ca920d1 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2378,12 +2378,12 @@
details.last_index = ULONG_MAX;
- i_mmap_lock_read(mapping);
+ i_mmap_lock_write(mapping);
if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap)))
unmap_mapping_range_tree(&mapping->i_mmap, &details);
if (unlikely(!list_empty(&mapping->i_mmap_nonlinear)))
unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details);
- i_mmap_unlock_read(mapping);
+ i_mmap_unlock_write(mapping);
}
EXPORT_SYMBOL(unmap_mapping_range);
diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
index fc1835c..00f9e14 100644
--- a/net/batman-adv/fragmentation.c
+++ b/net/batman-adv/fragmentation.c
@@ -251,7 +251,7 @@
kfree(entry);
/* Make room for the rest of the fragments. */
- if (pskb_expand_head(skb_out, 0, size - skb->len, GFP_ATOMIC) < 0) {
+ if (pskb_expand_head(skb_out, 0, size - skb_out->len, GFP_ATOMIC) < 0) {
kfree_skb(skb_out);
skb_out = NULL;
goto free;
@@ -434,7 +434,7 @@
* fragments larger than BATADV_FRAG_MAX_FRAG_SIZE
*/
mtu = min_t(unsigned, mtu, BATADV_FRAG_MAX_FRAG_SIZE);
- max_fragment_size = (mtu - header_size - ETH_HLEN);
+ max_fragment_size = mtu - header_size;
max_packet_size = max_fragment_size * BATADV_FRAG_MAX_FRAGMENTS;
/* Don't even try to fragment, if we need more than 16 fragments */
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index 90cff58..e0bcf9e 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -810,7 +810,7 @@
goto out;
gw_node = batadv_gw_node_get(bat_priv, orig_dst_node);
- if (!gw_node->bandwidth_down == 0)
+ if (!gw_node)
goto out;
switch (atomic_read(&bat_priv->gw_mode)) {
diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c
index 76617be..c989253 100644
--- a/net/bluetooth/6lowpan.c
+++ b/net/bluetooth/6lowpan.c
@@ -390,7 +390,6 @@
drop:
dev->stats.rx_dropped++;
- kfree_skb(skb);
return NET_RX_DROP;
}
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index 85bcc21..ce82722d 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -533,6 +533,9 @@
BT_DBG("");
+ if (!l2cap_is_socket(sock))
+ return -EBADFD;
+
baswap((void *) dst, &l2cap_pi(sock->sk)->chan->dst);
baswap((void *) src, &l2cap_pi(sock->sk)->chan->src);
diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c
index 67fe5e8..278a194 100644
--- a/net/bluetooth/cmtp/core.c
+++ b/net/bluetooth/cmtp/core.c
@@ -334,6 +334,9 @@
BT_DBG("");
+ if (!l2cap_is_socket(sock))
+ return -EBADFD;
+
session = kzalloc(sizeof(struct cmtp_session), GFP_KERNEL);
if (!session)
return -ENOMEM;
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 39a5c8a..3f2e8b8 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -242,7 +242,8 @@
if (rp->status)
return;
- if (test_bit(HCI_SETUP, &hdev->dev_flags))
+ if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
+ test_bit(HCI_CONFIG, &hdev->dev_flags))
memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
}
@@ -509,7 +510,8 @@
if (rp->status)
return;
- if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
+ if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
+ test_bit(HCI_CONFIG, &hdev->dev_flags)) {
hdev->hci_ver = rp->hci_ver;
hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
hdev->lmp_ver = rp->lmp_ver;
@@ -528,7 +530,8 @@
if (rp->status)
return;
- if (test_bit(HCI_SETUP, &hdev->dev_flags))
+ if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
+ test_bit(HCI_CONFIG, &hdev->dev_flags))
memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
}
@@ -2194,7 +2197,12 @@
return;
}
- if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags) &&
+ /* Require HCI_CONNECTABLE or a whitelist entry to accept the
+ * connection. These features are only touched through mgmt so
+ * only do the checks if HCI_MGMT is set.
+ */
+ if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
+ !test_bit(HCI_CONNECTABLE, &hdev->dev_flags) &&
!hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr,
BDADDR_BREDR)) {
hci_reject_conn(hdev, &ev->bdaddr);
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index cc25d0b..07348e1 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -1314,13 +1314,14 @@
{
struct hidp_session *session;
struct l2cap_conn *conn;
- struct l2cap_chan *chan = l2cap_pi(ctrl_sock->sk)->chan;
+ struct l2cap_chan *chan;
int ret;
ret = hidp_verify_sockets(ctrl_sock, intr_sock);
if (ret)
return ret;
+ chan = l2cap_pi(ctrl_sock->sk)->chan;
conn = NULL;
l2cap_chan_lock(chan);
if (chan->conn)
diff --git a/net/core/dev.c b/net/core/dev.c
index f411c28..683d493 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1694,6 +1694,7 @@
skb_scrub_packet(skb, true);
skb->protocol = eth_type_trans(skb, dev);
+ skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
return 0;
}
@@ -2522,7 +2523,7 @@
/* If MPLS offload request, verify we are testing hardware MPLS features
* instead of standard features for the netdev.
*/
-#ifdef CONFIG_NET_MPLS_GSO
+#if IS_ENABLED(CONFIG_NET_MPLS_GSO)
static netdev_features_t net_mpls_features(struct sk_buff *skb,
netdev_features_t features,
__be16 type)
@@ -2562,7 +2563,7 @@
netdev_features_t netif_skb_features(struct sk_buff *skb)
{
- const struct net_device *dev = skb->dev;
+ struct net_device *dev = skb->dev;
netdev_features_t features = dev->features;
u16 gso_segs = skb_shinfo(skb)->gso_segs;
__be16 protocol = skb->protocol;
@@ -2570,11 +2571,21 @@
if (gso_segs > dev->gso_max_segs || gso_segs < dev->gso_min_segs)
features &= ~NETIF_F_GSO_MASK;
- if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) {
- struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
- protocol = veh->h_vlan_encapsulated_proto;
- } else if (!vlan_tx_tag_present(skb)) {
- return harmonize_features(skb, features);
+ /* If encapsulation offload request, verify we are testing
+ * hardware encapsulation features instead of standard
+ * features for the netdev
+ */
+ if (skb->encapsulation)
+ features &= dev->hw_enc_features;
+
+ if (!vlan_tx_tag_present(skb)) {
+ if (unlikely(protocol == htons(ETH_P_8021Q) ||
+ protocol == htons(ETH_P_8021AD))) {
+ struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
+ protocol = veh->h_vlan_encapsulated_proto;
+ } else {
+ goto finalize;
+ }
}
features = netdev_intersect_features(features,
@@ -2591,6 +2602,11 @@
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_STAG_TX);
+finalize:
+ if (dev->netdev_ops->ndo_features_check)
+ features &= dev->netdev_ops->ndo_features_check(skb, dev,
+ features);
+
return harmonize_features(skb, features);
}
EXPORT_SYMBOL(netif_skb_features);
@@ -2661,19 +2677,12 @@
if (unlikely(!skb))
goto out_null;
- /* If encapsulation offload request, verify we are testing
- * hardware encapsulation features instead of standard
- * features for the netdev
- */
- if (skb->encapsulation)
- features &= dev->hw_enc_features;
-
if (netif_needs_gso(dev, skb, features)) {
struct sk_buff *segs;
segs = skb_gso_segment(skb, features);
if (IS_ERR(segs)) {
- segs = NULL;
+ goto out_kfree_skb;
} else if (segs) {
consume_skb(skb);
skb = segs;
@@ -4557,6 +4566,68 @@
}
EXPORT_SYMBOL(netif_napi_del);
+static int napi_poll(struct napi_struct *n, struct list_head *repoll)
+{
+ void *have;
+ int work, weight;
+
+ list_del_init(&n->poll_list);
+
+ have = netpoll_poll_lock(n);
+
+ weight = n->weight;
+
+ /* This NAPI_STATE_SCHED test is for avoiding a race
+ * with netpoll's poll_napi(). Only the entity which
+ * obtains the lock and sees NAPI_STATE_SCHED set will
+ * actually make the ->poll() call. Therefore we avoid
+ * accidentally calling ->poll() when NAPI is not scheduled.
+ */
+ work = 0;
+ if (test_bit(NAPI_STATE_SCHED, &n->state)) {
+ work = n->poll(n, weight);
+ trace_napi_poll(n);
+ }
+
+ WARN_ON_ONCE(work > weight);
+
+ if (likely(work < weight))
+ goto out_unlock;
+
+ /* Drivers must not modify the NAPI state if they
+ * consume the entire weight. In such cases this code
+ * still "owns" the NAPI instance and therefore can
+ * move the instance around on the list at-will.
+ */
+ if (unlikely(napi_disable_pending(n))) {
+ napi_complete(n);
+ goto out_unlock;
+ }
+
+ if (n->gro_list) {
+ /* flush too old packets
+ * If HZ < 1000, flush all packets.
+ */
+ napi_gro_flush(n, HZ >= 1000);
+ }
+
+ /* Some drivers may have called napi_schedule
+ * prior to exhausting their budget.
+ */
+ if (unlikely(!list_empty(&n->poll_list))) {
+ pr_warn_once("%s: Budget exhausted after napi rescheduled\n",
+ n->dev ? n->dev->name : "backlog");
+ goto out_unlock;
+ }
+
+ list_add_tail(&n->poll_list, repoll);
+
+out_unlock:
+ netpoll_poll_unlock(have);
+
+ return work;
+}
+
static void net_rx_action(struct softirq_action *h)
{
struct softnet_data *sd = this_cpu_ptr(&softnet_data);
@@ -4564,74 +4635,34 @@
int budget = netdev_budget;
LIST_HEAD(list);
LIST_HEAD(repoll);
- void *have;
local_irq_disable();
list_splice_init(&sd->poll_list, &list);
local_irq_enable();
- while (!list_empty(&list)) {
+ for (;;) {
struct napi_struct *n;
- int work, weight;
+
+ if (list_empty(&list)) {
+ if (!sd_has_rps_ipi_waiting(sd) && list_empty(&repoll))
+ return;
+ break;
+ }
+
+ n = list_first_entry(&list, struct napi_struct, poll_list);
+ budget -= napi_poll(n, &repoll);
/* If softirq window is exhausted then punt.
* Allow this to run for 2 jiffies since which will allow
* an average latency of 1.5/HZ.
*/
- if (unlikely(budget <= 0 || time_after_eq(jiffies, time_limit)))
- goto softnet_break;
-
-
- n = list_first_entry(&list, struct napi_struct, poll_list);
- list_del_init(&n->poll_list);
-
- have = netpoll_poll_lock(n);
-
- weight = n->weight;
-
- /* This NAPI_STATE_SCHED test is for avoiding a race
- * with netpoll's poll_napi(). Only the entity which
- * obtains the lock and sees NAPI_STATE_SCHED set will
- * actually make the ->poll() call. Therefore we avoid
- * accidentally calling ->poll() when NAPI is not scheduled.
- */
- work = 0;
- if (test_bit(NAPI_STATE_SCHED, &n->state)) {
- work = n->poll(n, weight);
- trace_napi_poll(n);
+ if (unlikely(budget <= 0 ||
+ time_after_eq(jiffies, time_limit))) {
+ sd->time_squeeze++;
+ break;
}
-
- WARN_ON_ONCE(work > weight);
-
- budget -= work;
-
- /* Drivers must not modify the NAPI state if they
- * consume the entire weight. In such cases this code
- * still "owns" the NAPI instance and therefore can
- * move the instance around on the list at-will.
- */
- if (unlikely(work == weight)) {
- if (unlikely(napi_disable_pending(n))) {
- napi_complete(n);
- } else {
- if (n->gro_list) {
- /* flush too old packets
- * If HZ < 1000, flush all packets.
- */
- napi_gro_flush(n, HZ >= 1000);
- }
- list_add_tail(&n->poll_list, &repoll);
- }
- }
-
- netpoll_poll_unlock(have);
}
- if (!sd_has_rps_ipi_waiting(sd) &&
- list_empty(&list) &&
- list_empty(&repoll))
- return;
-out:
local_irq_disable();
list_splice_tail_init(&sd->poll_list, &list);
@@ -4641,12 +4672,6 @@
__raise_softirq_irqoff(NET_RX_SOFTIRQ);
net_rps_action_and_irq_enable(sd);
-
- return;
-
-softnet_break:
- sd->time_squeeze++;
- goto out;
}
struct netdev_adjacent {
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index ae13ef6..395c15b 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -4148,6 +4148,7 @@
skb->ignore_df = 0;
skb_dst_drop(skb);
skb->mark = 0;
+ skb_init_secmark(skb);
secpath_reset(skb);
nf_reset(skb);
nf_reset_trace(skb);
diff --git a/net/ipv4/geneve.c b/net/ipv4/geneve.c
index 95e47c9..394a200 100644
--- a/net/ipv4/geneve.c
+++ b/net/ipv4/geneve.c
@@ -122,14 +122,18 @@
int err;
skb = udp_tunnel_handle_offloads(skb, !gs->sock->sk->sk_no_check_tx);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
+ GENEVE_BASE_HLEN + opt_len + sizeof(struct iphdr)
+ (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
err = skb_cow_head(skb, min_headroom);
- if (unlikely(err))
+ if (unlikely(err)) {
+ kfree_skb(skb);
return err;
+ }
skb = vlan_hwaccel_push_inside(skb);
if (unlikely(!skb))
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 5ff8780..9c0b54e 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1387,6 +1387,28 @@
return 0;
}
+static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
+ const struct tcphdr *th)
+{
+ /* This is tricky: we move IP6CB at its correct location into
+ * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
+ * _decode_session6() uses IP6CB().
+ * barrier() makes sure compiler won't play aliasing games.
+ */
+ memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
+ sizeof(struct inet6_skb_parm));
+ barrier();
+
+ TCP_SKB_CB(skb)->seq = ntohl(th->seq);
+ TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
+ skb->len - th->doff*4);
+ TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
+ TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
+ TCP_SKB_CB(skb)->tcp_tw_isn = 0;
+ TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
+ TCP_SKB_CB(skb)->sacked = 0;
+}
+
static int tcp_v6_rcv(struct sk_buff *skb)
{
const struct tcphdr *th;
@@ -1418,24 +1440,9 @@
th = tcp_hdr(skb);
hdr = ipv6_hdr(skb);
- /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
- * barrier() makes sure compiler wont play fool^Waliasing games.
- */
- memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
- sizeof(struct inet6_skb_parm));
- barrier();
-
- TCP_SKB_CB(skb)->seq = ntohl(th->seq);
- TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
- skb->len - th->doff*4);
- TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
- TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
- TCP_SKB_CB(skb)->tcp_tw_isn = 0;
- TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
- TCP_SKB_CB(skb)->sacked = 0;
sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest,
- tcp_v6_iif(skb));
+ inet6_iif(skb));
if (!sk)
goto no_tcp_socket;
@@ -1451,6 +1458,8 @@
if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
goto discard_and_relse;
+ tcp_v6_fill_cb(skb, hdr, th);
+
#ifdef CONFIG_TCP_MD5SIG
if (tcp_v6_inbound_md5_hash(sk, skb))
goto discard_and_relse;
@@ -1482,6 +1491,8 @@
if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
goto discard_it;
+ tcp_v6_fill_cb(skb, hdr, th);
+
if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
csum_error:
TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
@@ -1505,6 +1516,8 @@
goto discard_it;
}
+ tcp_v6_fill_cb(skb, hdr, th);
+
if (skb->len < (th->doff<<2)) {
inet_twsk_put(inet_twsk(sk));
goto bad_packet;
diff --git a/net/mpls/mpls_gso.c b/net/mpls/mpls_gso.c
index ca27837..349295d 100644
--- a/net/mpls/mpls_gso.c
+++ b/net/mpls/mpls_gso.c
@@ -31,10 +31,7 @@
SKB_GSO_TCPV6 |
SKB_GSO_UDP |
SKB_GSO_DODGY |
- SKB_GSO_TCP_ECN |
- SKB_GSO_GRE |
- SKB_GSO_GRE_CSUM |
- SKB_GSO_IPIP)))
+ SKB_GSO_TCP_ECN)))
goto out;
/* Setup inner SKB. */
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index 13c2e17..cde4a67 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -463,7 +463,7 @@
}
#ifdef CONFIG_MODULES
-static int nfnetlink_bind(int group)
+static int nfnetlink_bind(struct net *net, int group)
{
const struct nfnetlink_subsystem *ss;
int type;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 074cf3e..84ea76c 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1091,8 +1091,10 @@
mutex_unlock(&nl_sk_hash_lock);
netlink_table_grab();
- if (nlk_sk(sk)->subscriptions)
+ if (nlk_sk(sk)->subscriptions) {
__sk_del_bind_node(sk);
+ netlink_update_listeners(sk);
+ }
netlink_table_ungrab();
}
@@ -1139,8 +1141,8 @@
struct module *module = NULL;
struct mutex *cb_mutex;
struct netlink_sock *nlk;
- int (*bind)(int group);
- void (*unbind)(int group);
+ int (*bind)(struct net *net, int group);
+ void (*unbind)(struct net *net, int group);
int err = 0;
sock->state = SS_UNCONNECTED;
@@ -1226,8 +1228,8 @@
module_put(nlk->module);
- netlink_table_grab();
if (netlink_is_kernel(sk)) {
+ netlink_table_grab();
BUG_ON(nl_table[sk->sk_protocol].registered == 0);
if (--nl_table[sk->sk_protocol].registered == 0) {
struct listeners *old;
@@ -1241,11 +1243,16 @@
nl_table[sk->sk_protocol].flags = 0;
nl_table[sk->sk_protocol].registered = 0;
}
- } else if (nlk->subscriptions) {
- netlink_update_listeners(sk);
+ netlink_table_ungrab();
}
- netlink_table_ungrab();
+ if (nlk->netlink_unbind) {
+ int i;
+
+ for (i = 0; i < nlk->ngroups; i++)
+ if (test_bit(i, nlk->groups))
+ nlk->netlink_unbind(sock_net(sk), i + 1);
+ }
kfree(nlk->groups);
nlk->groups = NULL;
@@ -1410,9 +1417,10 @@
return err;
}
-static void netlink_unbind(int group, long unsigned int groups,
- struct netlink_sock *nlk)
+static void netlink_undo_bind(int group, long unsigned int groups,
+ struct sock *sk)
{
+ struct netlink_sock *nlk = nlk_sk(sk);
int undo;
if (!nlk->netlink_unbind)
@@ -1420,7 +1428,7 @@
for (undo = 0; undo < group; undo++)
if (test_bit(undo, &groups))
- nlk->netlink_unbind(undo);
+ nlk->netlink_unbind(sock_net(sk), undo);
}
static int netlink_bind(struct socket *sock, struct sockaddr *addr,
@@ -1458,10 +1466,10 @@
for (group = 0; group < nlk->ngroups; group++) {
if (!test_bit(group, &groups))
continue;
- err = nlk->netlink_bind(group);
+ err = nlk->netlink_bind(net, group);
if (!err)
continue;
- netlink_unbind(group, groups, nlk);
+ netlink_undo_bind(group, groups, sk);
return err;
}
}
@@ -1471,7 +1479,7 @@
netlink_insert(sk, net, nladdr->nl_pid) :
netlink_autobind(sock);
if (err) {
- netlink_unbind(nlk->ngroups, groups, nlk);
+ netlink_undo_bind(nlk->ngroups, groups, sk);
return err;
}
}
@@ -2122,7 +2130,7 @@
if (!val || val - 1 >= nlk->ngroups)
return -EINVAL;
if (optname == NETLINK_ADD_MEMBERSHIP && nlk->netlink_bind) {
- err = nlk->netlink_bind(val);
+ err = nlk->netlink_bind(sock_net(sk), val);
if (err)
return err;
}
@@ -2131,7 +2139,7 @@
optname == NETLINK_ADD_MEMBERSHIP);
netlink_table_ungrab();
if (optname == NETLINK_DROP_MEMBERSHIP && nlk->netlink_unbind)
- nlk->netlink_unbind(val);
+ nlk->netlink_unbind(sock_net(sk), val);
err = 0;
break;
diff --git a/net/netlink/af_netlink.h b/net/netlink/af_netlink.h
index b20a173..f123a88 100644
--- a/net/netlink/af_netlink.h
+++ b/net/netlink/af_netlink.h
@@ -39,8 +39,8 @@
struct mutex *cb_mutex;
struct mutex cb_def_mutex;
void (*netlink_rcv)(struct sk_buff *skb);
- int (*netlink_bind)(int group);
- void (*netlink_unbind)(int group);
+ int (*netlink_bind)(struct net *net, int group);
+ void (*netlink_unbind)(struct net *net, int group);
struct module *module;
#ifdef CONFIG_NETLINK_MMAP
struct mutex pg_vec_lock;
@@ -65,8 +65,8 @@
unsigned int groups;
struct mutex *cb_mutex;
struct module *module;
- int (*bind)(int group);
- void (*unbind)(int group);
+ int (*bind)(struct net *net, int group);
+ void (*unbind)(struct net *net, int group);
bool (*compare)(struct net *net, struct sock *sock);
int registered;
};
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 76393f2..2e11061e 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -983,11 +983,67 @@
{ .name = "notify", },
};
+static int genl_bind(struct net *net, int group)
+{
+ int i, err = 0;
+
+ down_read(&cb_lock);
+ for (i = 0; i < GENL_FAM_TAB_SIZE; i++) {
+ struct genl_family *f;
+
+ list_for_each_entry(f, genl_family_chain(i), family_list) {
+ if (group >= f->mcgrp_offset &&
+ group < f->mcgrp_offset + f->n_mcgrps) {
+ int fam_grp = group - f->mcgrp_offset;
+
+ if (!f->netnsok && net != &init_net)
+ err = -ENOENT;
+ else if (f->mcast_bind)
+ err = f->mcast_bind(net, fam_grp);
+ else
+ err = 0;
+ break;
+ }
+ }
+ }
+ up_read(&cb_lock);
+
+ return err;
+}
+
+static void genl_unbind(struct net *net, int group)
+{
+ int i;
+ bool found = false;
+
+ down_read(&cb_lock);
+ for (i = 0; i < GENL_FAM_TAB_SIZE; i++) {
+ struct genl_family *f;
+
+ list_for_each_entry(f, genl_family_chain(i), family_list) {
+ if (group >= f->mcgrp_offset &&
+ group < f->mcgrp_offset + f->n_mcgrps) {
+ int fam_grp = group - f->mcgrp_offset;
+
+ if (f->mcast_unbind)
+ f->mcast_unbind(net, fam_grp);
+ found = true;
+ break;
+ }
+ }
+ }
+ up_read(&cb_lock);
+
+ WARN_ON(!found);
+}
+
static int __net_init genl_pernet_init(struct net *net)
{
struct netlink_kernel_cfg cfg = {
.input = genl_rcv,
.flags = NL_CFG_F_NONROOT_RECV,
+ .bind = genl_bind,
+ .unbind = genl_unbind,
};
/* we'll bump the group number right afterwards */
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index 764fdc3..770064c 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -147,7 +147,8 @@
hdr = eth_hdr(skb);
hdr->h_proto = mpls->mpls_ethertype;
- skb_set_inner_protocol(skb, skb->protocol);
+ if (!skb->inner_protocol)
+ skb_set_inner_protocol(skb, skb->protocol);
skb->protocol = mpls->mpls_ethertype;
invalidate_flow_key(key);
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 332b5a0..4e9a5f0 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -83,8 +83,7 @@
unsigned int group)
{
return info->nlhdr->nlmsg_flags & NLM_F_ECHO ||
- genl_has_listeners(family, genl_info_net(info)->genl_sock,
- group);
+ genl_has_listeners(family, genl_info_net(info), group);
}
static void ovs_notify(struct genl_family *family,
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index 9645a21..d1eecf7 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -1753,7 +1753,6 @@
__be16 eth_type, __be16 vlan_tci, bool log)
{
const struct nlattr *a;
- bool out_tnl_port = false;
int rem, err;
if (depth >= SAMPLE_ACTION_DEPTH)
@@ -1796,8 +1795,6 @@
case OVS_ACTION_ATTR_OUTPUT:
if (nla_get_u32(a) >= DP_MAX_PORTS)
return -EINVAL;
- out_tnl_port = false;
-
break;
case OVS_ACTION_ATTR_HASH: {
@@ -1832,12 +1829,6 @@
case OVS_ACTION_ATTR_PUSH_MPLS: {
const struct ovs_action_push_mpls *mpls = nla_data(a);
- /* Networking stack do not allow simultaneous Tunnel
- * and MPLS GSO.
- */
- if (out_tnl_port)
- return -EINVAL;
-
if (!eth_p_mpls(mpls->mpls_ethertype))
return -EINVAL;
/* Prohibit push MPLS other than to a white list
@@ -1873,11 +1864,9 @@
case OVS_ACTION_ATTR_SET:
err = validate_set(a, key, sfa,
- &out_tnl_port, eth_type, log);
+ &skip_copy, eth_type, log);
if (err)
return err;
-
- skip_copy = out_tnl_port;
break;
case OVS_ACTION_ATTR_SAMPLE:
diff --git a/net/openvswitch/vport-geneve.c b/net/openvswitch/vport-geneve.c
index 347fa23..484864d 100644
--- a/net/openvswitch/vport-geneve.c
+++ b/net/openvswitch/vport-geneve.c
@@ -219,7 +219,10 @@
false);
if (err < 0)
ip_rt_put(rt);
+ return err;
+
error:
+ kfree_skb(skb);
return err;
}
diff --git a/net/openvswitch/vport-gre.c b/net/openvswitch/vport-gre.c
index 6b69df5..d4168c4 100644
--- a/net/openvswitch/vport-gre.c
+++ b/net/openvswitch/vport-gre.c
@@ -73,7 +73,7 @@
skb = gre_handle_offloads(skb, !!(tun_key->tun_flags & TUNNEL_CSUM));
if (IS_ERR(skb))
- return NULL;
+ return skb;
tpi.flags = filter_tnl_flags(tun_key->tun_flags);
tpi.proto = htons(ETH_P_TEB);
@@ -144,7 +144,7 @@
if (unlikely(!OVS_CB(skb)->egress_tun_info)) {
err = -EINVAL;
- goto error;
+ goto err_free_skb;
}
tun_key = &OVS_CB(skb)->egress_tun_info->tunnel;
@@ -157,8 +157,10 @@
fl.flowi4_proto = IPPROTO_GRE;
rt = ip_route_output_key(net, &fl);
- if (IS_ERR(rt))
- return PTR_ERR(rt);
+ if (IS_ERR(rt)) {
+ err = PTR_ERR(rt);
+ goto err_free_skb;
+ }
tunnel_hlen = ip_gre_calc_hlen(tun_key->tun_flags);
@@ -183,8 +185,9 @@
/* Push Tunnel header. */
skb = __build_header(skb, tunnel_hlen);
- if (unlikely(!skb)) {
- err = 0;
+ if (IS_ERR(skb)) {
+ err = PTR_ERR(skb);
+ skb = NULL;
goto err_free_rt;
}
@@ -198,7 +201,8 @@
tun_key->ipv4_tos, tun_key->ipv4_ttl, df, false);
err_free_rt:
ip_rt_put(rt);
-error:
+err_free_skb:
+ kfree_skb(skb);
return err;
}
diff --git a/net/openvswitch/vport-vxlan.c b/net/openvswitch/vport-vxlan.c
index 38f95a5..d7c46b3 100644
--- a/net/openvswitch/vport-vxlan.c
+++ b/net/openvswitch/vport-vxlan.c
@@ -187,7 +187,9 @@
false);
if (err < 0)
ip_rt_put(rt);
+ return err;
error:
+ kfree_skb(skb);
return err;
}
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c
index 9584526..53f3ebb 100644
--- a/net/openvswitch/vport.c
+++ b/net/openvswitch/vport.c
@@ -519,10 +519,9 @@
u64_stats_update_end(&stats->syncp);
} else if (sent < 0) {
ovs_vport_record_error(vport, VPORT_E_TX_ERROR);
- kfree_skb(skb);
- } else
+ } else {
ovs_vport_record_error(vport, VPORT_E_TX_DROPPED);
-
+ }
return sent;
}
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index e52a447..6880f34 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -785,6 +785,7 @@
struct tpacket3_hdr *last_pkt;
struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
+ struct sock *sk = &po->sk;
if (po->stats.stats3.tp_drops)
status |= TP_STATUS_LOSING;
@@ -809,6 +810,8 @@
/* Flush the block */
prb_flush_block(pkc1, pbd1, status);
+ sk->sk_data_ready(sk);
+
pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1);
}
@@ -2052,12 +2055,12 @@
smp_wmb();
#endif
- if (po->tp_version <= TPACKET_V2)
+ if (po->tp_version <= TPACKET_V2) {
__packet_set_status(po, h.raw, status);
- else
+ sk->sk_data_ready(sk);
+ } else {
prb_clear_blk_fill_status(&po->rx_ring);
-
- sk->sk_data_ready(sk);
+ }
drop_n_restore:
if (skb_head != skb->data && skb_shared(skb)) {
diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig
index 22ba971..29c8675 100644
--- a/net/wireless/Kconfig
+++ b/net/wireless/Kconfig
@@ -175,7 +175,7 @@
Most distributions have a CRDA package. So if unsure, say N.
config CFG80211_WEXT
- bool
+ bool "cfg80211 wireless extensions compatibility"
depends on CFG80211
select WEXT_CORE
help
diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
index 8276a74..0cfc9c8 100644
--- a/sound/pci/hda/hda_controller.c
+++ b/sound/pci/hda/hda_controller.c
@@ -1922,10 +1922,18 @@
EXPORT_SYMBOL_GPL(azx_mixer_create);
+static bool is_input_stream(struct azx *chip, unsigned char index)
+{
+ return (index >= chip->capture_index_offset &&
+ index < chip->capture_index_offset + chip->capture_streams);
+}
+
/* initialize SD streams */
int azx_init_stream(struct azx *chip)
{
int i;
+ int in_stream_tag = 0;
+ int out_stream_tag = 0;
/* initialize each stream (aka device)
* assign the starting bdl address to each stream (device)
@@ -1938,9 +1946,21 @@
azx_dev->sd_addr = chip->remap_addr + (0x20 * i + 0x80);
/* int mask: SDI0=0x01, SDI1=0x02, ... SDO3=0x80 */
azx_dev->sd_int_sta_mask = 1 << i;
- /* stream tag: must be non-zero and unique */
azx_dev->index = i;
- azx_dev->stream_tag = i + 1;
+
+ /* stream tag must be unique throughout
+ * the stream direction group,
+ * valid values 1...15
+ * use separate stream tag if the flag
+ * AZX_DCAPS_SEPARATE_STREAM_TAG is used
+ */
+ if (chip->driver_caps & AZX_DCAPS_SEPARATE_STREAM_TAG)
+ azx_dev->stream_tag =
+ is_input_stream(chip, i) ?
+ ++in_stream_tag :
+ ++out_stream_tag;
+ else
+ azx_dev->stream_tag = i + 1;
}
return 0;
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 2bf0b56..d426a0b 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -299,6 +299,9 @@
AZX_DCAPS_PM_RUNTIME | AZX_DCAPS_I915_POWERWELL |\
AZX_DCAPS_SNOOP_TYPE(SCH))
+#define AZX_DCAPS_INTEL_SKYLAKE \
+ (AZX_DCAPS_INTEL_PCH | AZX_DCAPS_SEPARATE_STREAM_TAG)
+
/* quirks for ATI SB / AMD Hudson */
#define AZX_DCAPS_PRESET_ATI_SB \
(AZX_DCAPS_NO_TCSEL | AZX_DCAPS_SYNC_WRITE | AZX_DCAPS_POSFIX_LPIB |\
@@ -2027,7 +2030,7 @@
.driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
/* Sunrise Point-LP */
{ PCI_DEVICE(0x8086, 0x9d70),
- .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
+ .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
/* Haswell */
{ PCI_DEVICE(0x8086, 0x0a0c),
.driver_data = AZX_DRIVER_HDMI | AZX_DCAPS_INTEL_HASWELL },
diff --git a/sound/pci/hda/hda_priv.h b/sound/pci/hda/hda_priv.h
index aa484fd..166e3e8 100644
--- a/sound/pci/hda/hda_priv.h
+++ b/sound/pci/hda/hda_priv.h
@@ -171,6 +171,7 @@
#define AZX_DCAPS_I915_POWERWELL (1 << 27) /* HSW i915 powerwell support */
#define AZX_DCAPS_CORBRP_SELF_CLEAR (1 << 28) /* CORBRP clears itself after reset */
#define AZX_DCAPS_NO_MSI64 (1 << 29) /* Stick to 32-bit MSIs */
+#define AZX_DCAPS_SEPARATE_STREAM_TAG (1 << 30) /* capture and playback use separate stream tag */
enum {
AZX_SNOOP_TYPE_NONE ,
diff --git a/sound/soc/codecs/rt5677.c b/sound/soc/codecs/rt5677.c
index 81fe146..c0fbe18 100644
--- a/sound/soc/codecs/rt5677.c
+++ b/sound/soc/codecs/rt5677.c
@@ -784,8 +784,8 @@
static int rt5677_dsp_vad_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
- struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec);
+ struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
+ struct rt5677_priv *rt5677 = snd_soc_component_get_drvdata(component);
ucontrol->value.integer.value[0] = rt5677->dsp_vad_en;
@@ -795,8 +795,9 @@
static int rt5677_dsp_vad_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
- struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec);
+ struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
+ struct rt5677_priv *rt5677 = snd_soc_component_get_drvdata(component);
+ struct snd_soc_codec *codec = snd_soc_component_to_codec(component);
rt5677->dsp_vad_en = !!ucontrol->value.integer.value[0];
diff --git a/sound/soc/dwc/designware_i2s.c b/sound/soc/dwc/designware_i2s.c
index b93168d..8d18bbd 100644
--- a/sound/soc/dwc/designware_i2s.c
+++ b/sound/soc/dwc/designware_i2s.c
@@ -209,16 +209,9 @@
switch (config->chan_nr) {
case EIGHT_CHANNEL_SUPPORT:
- ch_reg = 3;
- break;
case SIX_CHANNEL_SUPPORT:
- ch_reg = 2;
- break;
case FOUR_CHANNEL_SUPPORT:
- ch_reg = 1;
- break;
case TWO_CHANNEL_SUPPORT:
- ch_reg = 0;
break;
default:
dev_err(dev->dev, "channel not supported\n");
@@ -227,18 +220,22 @@
i2s_disable_channels(dev, substream->stream);
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
- i2s_write_reg(dev->i2s_base, TCR(ch_reg), xfer_resolution);
- i2s_write_reg(dev->i2s_base, TFCR(ch_reg), 0x02);
- irq = i2s_read_reg(dev->i2s_base, IMR(ch_reg));
- i2s_write_reg(dev->i2s_base, IMR(ch_reg), irq & ~0x30);
- i2s_write_reg(dev->i2s_base, TER(ch_reg), 1);
- } else {
- i2s_write_reg(dev->i2s_base, RCR(ch_reg), xfer_resolution);
- i2s_write_reg(dev->i2s_base, RFCR(ch_reg), 0x07);
- irq = i2s_read_reg(dev->i2s_base, IMR(ch_reg));
- i2s_write_reg(dev->i2s_base, IMR(ch_reg), irq & ~0x03);
- i2s_write_reg(dev->i2s_base, RER(ch_reg), 1);
+ for (ch_reg = 0; ch_reg < (config->chan_nr / 2); ch_reg++) {
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ i2s_write_reg(dev->i2s_base, TCR(ch_reg),
+ xfer_resolution);
+ i2s_write_reg(dev->i2s_base, TFCR(ch_reg), 0x02);
+ irq = i2s_read_reg(dev->i2s_base, IMR(ch_reg));
+ i2s_write_reg(dev->i2s_base, IMR(ch_reg), irq & ~0x30);
+ i2s_write_reg(dev->i2s_base, TER(ch_reg), 1);
+ } else {
+ i2s_write_reg(dev->i2s_base, RCR(ch_reg),
+ xfer_resolution);
+ i2s_write_reg(dev->i2s_base, RFCR(ch_reg), 0x07);
+ irq = i2s_read_reg(dev->i2s_base, IMR(ch_reg));
+ i2s_write_reg(dev->i2s_base, IMR(ch_reg), irq & ~0x03);
+ i2s_write_reg(dev->i2s_base, RER(ch_reg), 1);
+ }
}
i2s_write_reg(dev->i2s_base, CCR, ccr);
@@ -263,6 +260,19 @@
snd_soc_dai_set_dma_data(dai, substream, NULL);
}
+static int dw_i2s_prepare(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct dw_i2s_dev *dev = snd_soc_dai_get_drvdata(dai);
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ i2s_write_reg(dev->i2s_base, TXFFR, 1);
+ else
+ i2s_write_reg(dev->i2s_base, RXFFR, 1);
+
+ return 0;
+}
+
static int dw_i2s_trigger(struct snd_pcm_substream *substream,
int cmd, struct snd_soc_dai *dai)
{
@@ -294,6 +304,7 @@
.startup = dw_i2s_startup,
.shutdown = dw_i2s_shutdown,
.hw_params = dw_i2s_hw_params,
+ .prepare = dw_i2s_prepare,
.trigger = dw_i2s_trigger,
};
diff --git a/sound/soc/intel/Kconfig b/sound/soc/intel/Kconfig
index e989ecf..f86de12 100644
--- a/sound/soc/intel/Kconfig
+++ b/sound/soc/intel/Kconfig
@@ -89,7 +89,7 @@
config SND_SOC_INTEL_BYTCR_RT5640_MACH
tristate "ASoC Audio DSP Support for MID BYT Platform"
- depends on X86
+ depends on X86 && I2C
select SND_SOC_RT5640
select SND_SST_MFLD_PLATFORM
select SND_SST_IPC_ACPI
@@ -101,7 +101,7 @@
config SND_SOC_INTEL_CHT_BSW_RT5672_MACH
tristate "ASoC Audio driver for Intel Cherrytrail & Braswell with RT5672 codec"
- depends on X86_INTEL_LPSS
+ depends on X86_INTEL_LPSS && I2C
select SND_SOC_RT5670
select SND_SST_MFLD_PLATFORM
select SND_SST_IPC_ACPI
diff --git a/sound/soc/intel/bytcr_dpcm_rt5640.c b/sound/soc/intel/bytcr_dpcm_rt5640.c
index f5d0fc1..eef0c56 100644
--- a/sound/soc/intel/bytcr_dpcm_rt5640.c
+++ b/sound/soc/intel/bytcr_dpcm_rt5640.c
@@ -227,4 +227,4 @@
MODULE_DESCRIPTION("ASoC Intel(R) Baytrail CR Machine driver");
MODULE_AUTHOR("Subhransu S. Prusty <subhransu.s.prusty@intel.com>");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:bytrt5640-audio");
+MODULE_ALIAS("platform:bytt100_rt5640");
diff --git a/sound/soc/intel/sst-firmware.c b/sound/soc/intel/sst-firmware.c
index 4a5bde9..ef2e8b5 100644
--- a/sound/soc/intel/sst-firmware.c
+++ b/sound/soc/intel/sst-firmware.c
@@ -763,8 +763,12 @@
/* does block span more than 1 section */
if (ba->offset >= block->offset && ba->offset < block_end) {
+ /* add block */
+ list_move(&block->list, &dsp->used_block_list);
+ list_add(&block->module_list, block_list);
/* align ba to block boundary */
- ba->offset = block->offset;
+ ba->size -= block_end - ba->offset;
+ ba->offset = block_end;
err = block_alloc_contiguous(dsp, ba, block_list);
if (err < 0)
diff --git a/sound/soc/intel/sst/sst_acpi.c b/sound/soc/intel/sst/sst_acpi.c
index 3abc29e..2ac72eb 100644
--- a/sound/soc/intel/sst/sst_acpi.c
+++ b/sound/soc/intel/sst/sst_acpi.c
@@ -343,7 +343,7 @@
}
static struct sst_machines sst_acpi_bytcr[] = {
- {"10EC5640", "T100", "bytt100_rt5640", NULL, "fw_sst_0f28.bin",
+ {"10EC5640", "T100", "bytt100_rt5640", NULL, "intel/fw_sst_0f28.bin",
&byt_rvp_platform_data },
{},
};
diff --git a/sound/soc/rockchip/rockchip_i2s.c b/sound/soc/rockchip/rockchip_i2s.c
index 26ec511..13d8507 100644
--- a/sound/soc/rockchip/rockchip_i2s.c
+++ b/sound/soc/rockchip/rockchip_i2s.c
@@ -454,11 +454,11 @@
i2s->playback_dma_data.addr = res->start + I2S_TXDR;
i2s->playback_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- i2s->playback_dma_data.maxburst = 16;
+ i2s->playback_dma_data.maxburst = 4;
i2s->capture_dma_data.addr = res->start + I2S_RXDR;
i2s->capture_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- i2s->capture_dma_data.maxburst = 16;
+ i2s->capture_dma_data.maxburst = 4;
i2s->dev = &pdev->dev;
dev_set_drvdata(&pdev->dev, i2s);
diff --git a/sound/soc/rockchip/rockchip_i2s.h b/sound/soc/rockchip/rockchip_i2s.h
index 89a5d8b..93f456f 100644
--- a/sound/soc/rockchip/rockchip_i2s.h
+++ b/sound/soc/rockchip/rockchip_i2s.h
@@ -127,7 +127,7 @@
#define I2S_DMACR_TDE_DISABLE (0 << I2S_DMACR_TDE_SHIFT)
#define I2S_DMACR_TDE_ENABLE (1 << I2S_DMACR_TDE_SHIFT)
#define I2S_DMACR_TDL_SHIFT 0
-#define I2S_DMACR_TDL(x) ((x - 1) << I2S_DMACR_TDL_SHIFT)
+#define I2S_DMACR_TDL(x) ((x) << I2S_DMACR_TDL_SHIFT)
#define I2S_DMACR_TDL_MASK (0x1f << I2S_DMACR_TDL_SHIFT)
/*
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 985052b..2c62620 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -3230,7 +3230,7 @@
const char *propname)
{
struct device_node *np = card->dev->of_node;
- int num_routes, old_routes;
+ int num_routes;
struct snd_soc_dapm_route *routes;
int i, ret;
@@ -3248,9 +3248,7 @@
return -EINVAL;
}
- old_routes = card->num_dapm_routes;
- routes = devm_kzalloc(card->dev,
- (old_routes + num_routes) * sizeof(*routes),
+ routes = devm_kzalloc(card->dev, num_routes * sizeof(*routes),
GFP_KERNEL);
if (!routes) {
dev_err(card->dev,
@@ -3258,11 +3256,9 @@
return -EINVAL;
}
- memcpy(routes, card->dapm_routes, old_routes * sizeof(*routes));
-
for (i = 0; i < num_routes; i++) {
ret = of_property_read_string_index(np, propname,
- 2 * i, &routes[old_routes + i].sink);
+ 2 * i, &routes[i].sink);
if (ret) {
dev_err(card->dev,
"ASoC: Property '%s' index %d could not be read: %d\n",
@@ -3270,7 +3266,7 @@
return -EINVAL;
}
ret = of_property_read_string_index(np, propname,
- (2 * i) + 1, &routes[old_routes + i].source);
+ (2 * i) + 1, &routes[i].source);
if (ret) {
dev_err(card->dev,
"ASoC: Property '%s' index %d could not be read: %d\n",
@@ -3279,7 +3275,7 @@
}
}
- card->num_dapm_routes += num_routes;
+ card->num_dapm_routes = num_routes;
card->dapm_routes = routes;
return 0;
diff --git a/tools/power/cpupower/utils/cpupower.c b/tools/power/cpupower/utils/cpupower.c
index 7cdcf88..9ea9143 100644
--- a/tools/power/cpupower/utils/cpupower.c
+++ b/tools/power/cpupower/utils/cpupower.c
@@ -199,7 +199,7 @@
}
get_cpu_info(0, &cpupower_cpu_info);
- run_as_root = !getuid();
+ run_as_root = !geteuid();
if (run_as_root) {
ret = uname(&uts);
if (!ret && !strcmp(uts.machine, "x86_64") &&
diff --git a/tools/power/cpupower/utils/helpers/sysfs.c b/tools/power/cpupower/utils/helpers/sysfs.c
index 09afe5d..4e8fe2c 100644
--- a/tools/power/cpupower/utils/helpers/sysfs.c
+++ b/tools/power/cpupower/utils/helpers/sysfs.c
@@ -361,7 +361,7 @@
snprintf(file, SYSFS_PATH_MAX, PATH_TO_CPU "cpuidle");
if (stat(file, &statbuf) != 0 || !S_ISDIR(statbuf.st_mode))
- return -ENODEV;
+ return 0;
snprintf(file, SYSFS_PATH_MAX, PATH_TO_CPU "cpu%u/cpuidle/state0", cpu);
if (stat(file, &statbuf) != 0 || !S_ISDIR(statbuf.st_mode))
diff --git a/tools/testing/selftests/exec/execveat.c b/tools/testing/selftests/exec/execveat.c
index 33a5c06..d273624 100644
--- a/tools/testing/selftests/exec/execveat.c
+++ b/tools/testing/selftests/exec/execveat.c
@@ -179,11 +179,11 @@
*/
fd = open(longpath, O_RDONLY);
if (fd > 0) {
- printf("Invoke copy of '%s' via filename of length %lu:\n",
+ printf("Invoke copy of '%s' via filename of length %zu:\n",
src, strlen(longpath));
fail += check_execveat(fd, "", AT_EMPTY_PATH);
} else {
- printf("Failed to open length %lu filename, errno=%d (%s)\n",
+ printf("Failed to open length %zu filename, errno=%d (%s)\n",
strlen(longpath), errno, strerror(errno));
fail++;
}
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index f528343..1cc6e2e 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -671,6 +671,7 @@
WARN_ON(mslots[i].id != id);
if (!new->npages) {
+ WARN_ON(!mslots[i].npages);
new->base_gfn = 0;
if (mslots[i].npages)
slots->used_slots--;
@@ -687,12 +688,25 @@
slots->id_to_index[mslots[i].id] = i;
i++;
}
- while (i > 0 &&
- new->base_gfn > mslots[i - 1].base_gfn) {
- mslots[i] = mslots[i - 1];
- slots->id_to_index[mslots[i].id] = i;
- i--;
- }
+
+ /*
+ * The ">=" is needed when creating a slot with base_gfn == 0,
+ * so that it moves before all those with base_gfn == npages == 0.
+ *
+ * On the other hand, if new->npages is zero, the above loop has
+ * already left i pointing to the beginning of the empty part of
+ * mslots, and the ">=" would move the hole backwards in this
+ * case---which is wrong. So skip the loop when deleting a slot.
+ */
+ if (new->npages) {
+ while (i > 0 &&
+ new->base_gfn >= mslots[i - 1].base_gfn) {
+ mslots[i] = mslots[i - 1];
+ slots->id_to_index[mslots[i].id] = i;
+ i--;
+ }
+ } else
+ WARN_ON_ONCE(i != slots->used_slots);
mslots[i] = *new;
slots->id_to_index[mslots[i].id] = i;