Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux into drm-next

There was some merge conflicts in -next and they weren't so pretty, so
backmerge now to avoid them.

Conflicts:
	drivers/gpu/drm/i915/i915_gem.c
	drivers/gpu/drm/i915/intel_modes.c
diff --git a/Documentation/DocBook/drm.tmpl b/Documentation/DocBook/drm.tmpl
index 196b8b9..b030052 100644
--- a/Documentation/DocBook/drm.tmpl
+++ b/Documentation/DocBook/drm.tmpl
@@ -6,11 +6,36 @@
   <bookinfo>
     <title>Linux DRM Developer's Guide</title>
 
+    <authorgroup>
+      <author>
+	<firstname>Jesse</firstname>
+	<surname>Barnes</surname>
+	<contrib>Initial version</contrib>
+	<affiliation>
+	  <orgname>Intel Corporation</orgname>
+	  <address>
+	    <email>jesse.barnes@intel.com</email>
+	  </address>
+	</affiliation>
+      </author>
+      <author>
+	<firstname>Laurent</firstname>
+	<surname>Pinchart</surname>
+	<contrib>Driver internals</contrib>
+	<affiliation>
+	  <orgname>Ideas on board SPRL</orgname>
+	  <address>
+	    <email>laurent.pinchart@ideasonboard.com</email>
+	  </address>
+	</affiliation>
+      </author>
+    </authorgroup>
+
     <copyright>
       <year>2008-2009</year>
-      <holder>
-	Intel Corporation (Jesse Barnes &lt;jesse.barnes@intel.com&gt;)
-      </holder>
+      <year>2012</year>
+      <holder>Intel Corporation</holder>
+      <holder>Laurent Pinchart</holder>
     </copyright>
 
     <legalnotice>
@@ -20,6 +45,17 @@
 	the kernel source COPYING file.
       </para>
     </legalnotice>
+
+    <revhistory>
+      <!-- Put document revisions here, newest first. -->
+      <revision>
+	<revnumber>1.0</revnumber>
+	<date>2012-07-13</date>
+	<authorinitials>LP</authorinitials>
+	<revremark>Added extensive documentation about driver internals.
+	</revremark>
+      </revision>
+    </revhistory>
   </bookinfo>
 
 <toc></toc>
@@ -72,342 +108,361 @@
       submission &amp; fencing, suspend/resume support, and DMA
       services.
     </para>
-    <para>
-      The core of every DRM driver is struct drm_driver.  Drivers
-      typically statically initialize a drm_driver structure,
-      then pass it to drm_init() at load time.
-    </para>
 
   <!-- Internals: driver init -->
 
   <sect1>
-    <title>Driver initialization</title>
+    <title>Driver Initialization</title>
     <para>
-      Before calling the DRM initialization routines, the driver must
-      first create and fill out a struct drm_driver structure.
+      At the core of every DRM driver is a <structname>drm_driver</structname>
+      structure. Drivers typically statically initialize a drm_driver structure,
+      and then pass it to one of the <function>drm_*_init()</function> functions
+      to register it with the DRM subsystem.
     </para>
-    <programlisting>
-      static struct drm_driver driver = {
-	/* Don't use MTRRs here; the Xserver or userspace app should
-	 * deal with them for Intel hardware.
-	 */
-	.driver_features =
-	    DRIVER_USE_AGP | DRIVER_REQUIRE_AGP |
-	    DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_MODESET,
-	.load = i915_driver_load,
-	.unload = i915_driver_unload,
-	.firstopen = i915_driver_firstopen,
-	.lastclose = i915_driver_lastclose,
-	.preclose = i915_driver_preclose,
-	.save = i915_save,
-	.restore = i915_restore,
-	.device_is_agp = i915_driver_device_is_agp,
-	.get_vblank_counter = i915_get_vblank_counter,
-	.enable_vblank = i915_enable_vblank,
-	.disable_vblank = i915_disable_vblank,
-	.irq_preinstall = i915_driver_irq_preinstall,
-	.irq_postinstall = i915_driver_irq_postinstall,
-	.irq_uninstall = i915_driver_irq_uninstall,
-	.irq_handler = i915_driver_irq_handler,
-	.reclaim_buffers = drm_core_reclaim_buffers,
-	.get_map_ofs = drm_core_get_map_ofs,
-	.get_reg_ofs = drm_core_get_reg_ofs,
-	.fb_probe = intelfb_probe,
-	.fb_remove = intelfb_remove,
-	.fb_resize = intelfb_resize,
-	.master_create = i915_master_create,
-	.master_destroy = i915_master_destroy,
-#if defined(CONFIG_DEBUG_FS)
-	.debugfs_init = i915_debugfs_init,
-	.debugfs_cleanup = i915_debugfs_cleanup,
-#endif
-	.gem_init_object = i915_gem_init_object,
-	.gem_free_object = i915_gem_free_object,
-	.gem_vm_ops = &amp;i915_gem_vm_ops,
-	.ioctls = i915_ioctls,
-	.fops = {
-		.owner = THIS_MODULE,
-		.open = drm_open,
-		.release = drm_release,
-		.ioctl = drm_ioctl,
-		.mmap = drm_mmap,
-		.poll = drm_poll,
-		.fasync = drm_fasync,
-#ifdef CONFIG_COMPAT
-		.compat_ioctl = i915_compat_ioctl,
-#endif
-		.llseek = noop_llseek,
-		},
-	.pci_driver = {
-		.name = DRIVER_NAME,
-		.id_table = pciidlist,
-		.probe = probe,
-		.remove = __devexit_p(drm_cleanup_pci),
-		},
-	.name = DRIVER_NAME,
-	.desc = DRIVER_DESC,
-	.date = DRIVER_DATE,
-	.major = DRIVER_MAJOR,
-	.minor = DRIVER_MINOR,
-	.patchlevel = DRIVER_PATCHLEVEL,
-      };
-    </programlisting>
     <para>
-      In the example above, taken from the i915 DRM driver, the driver
-      sets several flags indicating what core features it supports;
-      we go over the individual callbacks in later sections.  Since
-      flags indicate which features your driver supports to the DRM
-      core, you need to set most of them prior to calling drm_init().  Some,
-      like DRIVER_MODESET can be set later based on user supplied parameters,
-      but that's the exception rather than the rule.
+      The <structname>drm_driver</structname> structure contains static
+      information that describes the driver and features it supports, and
+      pointers to methods that the DRM core will call to implement the DRM API.
+      We will first go through the <structname>drm_driver</structname> static
+      information fields, and will then describe individual operations in
+      details as they get used in later sections.
     </para>
-    <variablelist>
-      <title>Driver flags</title>
-      <varlistentry>
-	<term>DRIVER_USE_AGP</term>
-	<listitem><para>
-	    Driver uses AGP interface
-	</para></listitem>
-      </varlistentry>
-      <varlistentry>
-	<term>DRIVER_REQUIRE_AGP</term>
-	<listitem><para>
-	    Driver needs AGP interface to function.
-	</para></listitem>
-      </varlistentry>
-      <varlistentry>
-	<term>DRIVER_USE_MTRR</term>
-	<listitem>
-	  <para>
-	    Driver uses MTRR interface for mapping memory.  Deprecated.
-	  </para>
-	</listitem>
-      </varlistentry>
-      <varlistentry>
-	<term>DRIVER_PCI_DMA</term>
-	<listitem><para>
-	    Driver is capable of PCI DMA.  Deprecated.
-	</para></listitem>
-      </varlistentry>
-      <varlistentry>
-	<term>DRIVER_SG</term>
-	<listitem><para>
-	    Driver can perform scatter/gather DMA.  Deprecated.
-	</para></listitem>
-      </varlistentry>
-      <varlistentry>
-	<term>DRIVER_HAVE_DMA</term>
-	<listitem><para>Driver supports DMA.  Deprecated.</para></listitem>
-      </varlistentry>
-      <varlistentry>
-	<term>DRIVER_HAVE_IRQ</term><term>DRIVER_IRQ_SHARED</term>
-	<listitem>
-	  <para>
-	    DRIVER_HAVE_IRQ indicates whether the driver has an IRQ
-	    handler.  DRIVER_IRQ_SHARED indicates whether the device &amp;
-	    handler support shared IRQs (note that this is required of
-	    PCI drivers).
-	  </para>
-	</listitem>
-      </varlistentry>
-      <varlistentry>
-	<term>DRIVER_DMA_QUEUE</term>
-	<listitem>
-	  <para>
-	    Should be set if the driver queues DMA requests and completes them
-	    asynchronously.  Deprecated.
-	  </para>
-	</listitem>
-      </varlistentry>
-      <varlistentry>
-	<term>DRIVER_FB_DMA</term>
-	<listitem>
-	  <para>
-	    Driver supports DMA to/from the framebuffer.  Deprecated.
-	  </para>
-	</listitem>
-      </varlistentry>
-      <varlistentry>
-	<term>DRIVER_MODESET</term>
-	<listitem>
-	  <para>
-	    Driver supports mode setting interfaces.
-	  </para>
-	</listitem>
-      </varlistentry>
-    </variablelist>
-    <para>
-      In this specific case, the driver requires AGP and supports
-      IRQs.  DMA, as discussed later, is handled by device-specific ioctls
-      in this case.  It also supports the kernel mode setting APIs, though
-      unlike in the actual i915 driver source, this example unconditionally
-      exports KMS capability.
-    </para>
+    <sect2>
+      <title>Driver Information</title>
+      <sect3>
+        <title>Driver Features</title>
+        <para>
+          Drivers inform the DRM core about their requirements and supported
+          features by setting appropriate flags in the
+          <structfield>driver_features</structfield> field. Since those flags
+          influence the DRM core behaviour since registration time, most of them
+          must be set to registering the <structname>drm_driver</structname>
+          instance.
+        </para>
+        <synopsis>u32 driver_features;</synopsis>
+        <variablelist>
+          <title>Driver Feature Flags</title>
+          <varlistentry>
+            <term>DRIVER_USE_AGP</term>
+            <listitem><para>
+              Driver uses AGP interface, the DRM core will manage AGP resources.
+            </para></listitem>
+          </varlistentry>
+          <varlistentry>
+            <term>DRIVER_REQUIRE_AGP</term>
+            <listitem><para>
+              Driver needs AGP interface to function. AGP initialization failure
+              will become a fatal error.
+            </para></listitem>
+          </varlistentry>
+          <varlistentry>
+            <term>DRIVER_USE_MTRR</term>
+            <listitem><para>
+              Driver uses MTRR interface for mapping memory, the DRM core will
+              manage MTRR resources. Deprecated.
+            </para></listitem>
+          </varlistentry>
+          <varlistentry>
+            <term>DRIVER_PCI_DMA</term>
+            <listitem><para>
+              Driver is capable of PCI DMA, mapping of PCI DMA buffers to
+              userspace will be enabled. Deprecated.
+            </para></listitem>
+          </varlistentry>
+          <varlistentry>
+            <term>DRIVER_SG</term>
+            <listitem><para>
+              Driver can perform scatter/gather DMA, allocation and mapping of
+              scatter/gather buffers will be enabled. Deprecated.
+            </para></listitem>
+          </varlistentry>
+          <varlistentry>
+            <term>DRIVER_HAVE_DMA</term>
+            <listitem><para>
+              Driver supports DMA, the userspace DMA API will be supported.
+              Deprecated.
+            </para></listitem>
+          </varlistentry>
+          <varlistentry>
+            <term>DRIVER_HAVE_IRQ</term><term>DRIVER_IRQ_SHARED</term>
+            <listitem><para>
+              DRIVER_HAVE_IRQ indicates whether the driver has an IRQ handler. The
+              DRM core will automatically register an interrupt handler when the
+              flag is set. DRIVER_IRQ_SHARED indicates whether the device &amp;
+              handler support shared IRQs (note that this is required of PCI
+              drivers).
+            </para></listitem>
+          </varlistentry>
+          <varlistentry>
+            <term>DRIVER_IRQ_VBL</term>
+            <listitem><para>Unused. Deprecated.</para></listitem>
+          </varlistentry>
+          <varlistentry>
+            <term>DRIVER_DMA_QUEUE</term>
+            <listitem><para>
+              Should be set if the driver queues DMA requests and completes them
+              asynchronously.  Deprecated.
+            </para></listitem>
+          </varlistentry>
+          <varlistentry>
+            <term>DRIVER_FB_DMA</term>
+            <listitem><para>
+              Driver supports DMA to/from the framebuffer, mapping of frambuffer
+              DMA buffers to userspace will be supported. Deprecated.
+            </para></listitem>
+          </varlistentry>
+          <varlistentry>
+            <term>DRIVER_IRQ_VBL2</term>
+            <listitem><para>Unused. Deprecated.</para></listitem>
+          </varlistentry>
+          <varlistentry>
+            <term>DRIVER_GEM</term>
+            <listitem><para>
+              Driver use the GEM memory manager.
+            </para></listitem>
+          </varlistentry>
+          <varlistentry>
+            <term>DRIVER_MODESET</term>
+            <listitem><para>
+              Driver supports mode setting interfaces (KMS).
+            </para></listitem>
+          </varlistentry>
+          <varlistentry>
+            <term>DRIVER_PRIME</term>
+            <listitem><para>
+              Driver implements DRM PRIME buffer sharing.
+            </para></listitem>
+          </varlistentry>
+        </variablelist>
+      </sect3>
+      <sect3>
+        <title>Major, Minor and Patchlevel</title>
+        <synopsis>int major;
+int minor;
+int patchlevel;</synopsis>
+        <para>
+          The DRM core identifies driver versions by a major, minor and patch
+          level triplet. The information is printed to the kernel log at
+          initialization time and passed to userspace through the
+          DRM_IOCTL_VERSION ioctl.
+        </para>
+        <para>
+          The major and minor numbers are also used to verify the requested driver
+          API version passed to DRM_IOCTL_SET_VERSION. When the driver API changes
+          between minor versions, applications can call DRM_IOCTL_SET_VERSION to
+          select a specific version of the API. If the requested major isn't equal
+          to the driver major, or the requested minor is larger than the driver
+          minor, the DRM_IOCTL_SET_VERSION call will return an error. Otherwise
+          the driver's set_version() method will be called with the requested
+          version.
+        </para>
+      </sect3>
+      <sect3>
+        <title>Name, Description and Date</title>
+        <synopsis>char *name;
+char *desc;
+char *date;</synopsis>
+        <para>
+          The driver name is printed to the kernel log at initialization time,
+          used for IRQ registration and passed to userspace through
+          DRM_IOCTL_VERSION.
+        </para>
+        <para>
+          The driver description is a purely informative string passed to
+          userspace through the DRM_IOCTL_VERSION ioctl and otherwise unused by
+          the kernel.
+        </para>
+        <para>
+          The driver date, formatted as YYYYMMDD, is meant to identify the date of
+          the latest modification to the driver. However, as most drivers fail to
+          update it, its value is mostly useless. The DRM core prints it to the
+          kernel log at initialization time and passes it to userspace through the
+          DRM_IOCTL_VERSION ioctl.
+        </para>
+      </sect3>
+    </sect2>
+    <sect2>
+      <title>Driver Load</title>
+      <para>
+        The <methodname>load</methodname> method is the driver and device
+        initialization entry point. The method is responsible for allocating and
+        initializing driver private data, specifying supported performance
+        counters, performing resource allocation and mapping (e.g. acquiring
+        clocks, mapping registers or allocating command buffers), initializing
+        the memory manager (<xref linkend="drm-memory-management"/>), installing
+        the IRQ handler (<xref linkend="drm-irq-registration"/>), setting up
+        vertical blanking handling (<xref linkend="drm-vertical-blank"/>), mode
+	setting (<xref linkend="drm-mode-setting"/>) and initial output
+	configuration (<xref linkend="drm-kms-init"/>).
+      </para>
+      <note><para>
+        If compatibility is a concern (e.g. with drivers converted over from
+        User Mode Setting to Kernel Mode Setting), care must be taken to prevent
+        device initialization and control that is incompatible with currently
+        active userspace drivers. For instance, if user level mode setting
+        drivers are in use, it would be problematic to perform output discovery
+        &amp; configuration at load time. Likewise, if user-level drivers
+        unaware of memory management are in use, memory management and command
+        buffer setup may need to be omitted. These requirements are
+        driver-specific, and care needs to be taken to keep both old and new
+        applications and libraries working.
+      </para></note>
+      <synopsis>int (*load) (struct drm_device *, unsigned long flags);</synopsis>
+      <para>
+        The method takes two arguments, a pointer to the newly created
+	<structname>drm_device</structname> and flags. The flags are used to
+	pass the <structfield>driver_data</structfield> field of the device id
+	corresponding to the device passed to <function>drm_*_init()</function>.
+	Only PCI devices currently use this, USB and platform DRM drivers have
+	their <methodname>load</methodname> method called with flags to 0.
+      </para>
+      <sect3>
+        <title>Driver Private &amp; Performance Counters</title>
+        <para>
+          The driver private hangs off the main
+          <structname>drm_device</structname> structure and can be used for
+          tracking various device-specific bits of information, like register
+          offsets, command buffer status, register state for suspend/resume, etc.
+          At load time, a driver may simply allocate one and set
+          <structname>drm_device</structname>.<structfield>dev_priv</structfield>
+          appropriately; it should be freed and
+          <structname>drm_device</structname>.<structfield>dev_priv</structfield>
+          set to NULL when the driver is unloaded.
+        </para>
+        <para>
+          DRM supports several counters which were used for rough performance
+          characterization. This stat counter system is deprecated and should not
+          be used. If performance monitoring is desired, the developer should
+          investigate and potentially enhance the kernel perf and tracing
+          infrastructure to export GPU related performance information for
+          consumption by performance monitoring tools and applications.
+        </para>
+      </sect3>
+      <sect3 id="drm-irq-registration">
+        <title>IRQ Registration</title>
+        <para>
+          The DRM core tries to facilitate IRQ handler registration and
+          unregistration by providing <function>drm_irq_install</function> and
+          <function>drm_irq_uninstall</function> functions. Those functions only
+          support a single interrupt per device.
+        </para>
+  <!--!Fdrivers/char/drm/drm_irq.c drm_irq_install-->
+        <para>
+          Both functions get the device IRQ by calling
+          <function>drm_dev_to_irq</function>. This inline function will call a
+          bus-specific operation to retrieve the IRQ number. For platform devices,
+          <function>platform_get_irq</function>(..., 0) is used to retrieve the
+          IRQ number.
+        </para>
+        <para>
+          <function>drm_irq_install</function> starts by calling the
+          <methodname>irq_preinstall</methodname> driver operation. The operation
+          is optional and must make sure that the interrupt will not get fired by
+          clearing all pending interrupt flags or disabling the interrupt.
+        </para>
+        <para>
+          The IRQ will then be requested by a call to
+          <function>request_irq</function>. If the DRIVER_IRQ_SHARED driver
+          feature flag is set, a shared (IRQF_SHARED) IRQ handler will be
+          requested.
+        </para>
+        <para>
+          The IRQ handler function must be provided as the mandatory irq_handler
+          driver operation. It will get passed directly to
+          <function>request_irq</function> and thus has the same prototype as all
+          IRQ handlers. It will get called with a pointer to the DRM device as the
+          second argument.
+        </para>
+        <para>
+          Finally the function calls the optional
+          <methodname>irq_postinstall</methodname> driver operation. The operation
+          usually enables interrupts (excluding the vblank interrupt, which is
+          enabled separately), but drivers may choose to enable/disable interrupts
+          at a different time.
+        </para>
+        <para>
+          <function>drm_irq_uninstall</function> is similarly used to uninstall an
+          IRQ handler. It starts by waking up all processes waiting on a vblank
+          interrupt to make sure they don't hang, and then calls the optional
+          <methodname>irq_uninstall</methodname> driver operation. The operation
+          must disable all hardware interrupts. Finally the function frees the IRQ
+          by calling <function>free_irq</function>.
+        </para>
+      </sect3>
+      <sect3>
+        <title>Memory Manager Initialization</title>
+        <para>
+          Every DRM driver requires a memory manager which must be initialized at
+          load time. DRM currently contains two memory managers, the Translation
+          Table Manager (TTM) and the Graphics Execution Manager (GEM).
+          This document describes the use of the GEM memory manager only. See
+          <xref linkend="drm-memory-management"/> for details.
+        </para>
+      </sect3>
+      <sect3>
+        <title>Miscellaneous Device Configuration</title>
+        <para>
+          Another task that may be necessary for PCI devices during configuration
+          is mapping the video BIOS. On many devices, the VBIOS describes device
+          configuration, LCD panel timings (if any), and contains flags indicating
+          device state. Mapping the BIOS can be done using the pci_map_rom() call,
+          a convenience function that takes care of mapping the actual ROM,
+          whether it has been shadowed into memory (typically at address 0xc0000)
+          or exists on the PCI device in the ROM BAR. Note that after the ROM has
+          been mapped and any necessary information has been extracted, it should
+          be unmapped; on many devices, the ROM address decoder is shared with
+          other BARs, so leaving it mapped could cause undesired behaviour like
+          hangs or memory corruption.
+  <!--!Fdrivers/pci/rom.c pci_map_rom-->
+        </para>
+      </sect3>
+    </sect2>
   </sect1>
 
-  <!-- Internals: driver load -->
+  <!-- Internals: memory management -->
 
-  <sect1>
-    <title>Driver load</title>
+  <sect1 id="drm-memory-management">
+    <title>Memory management</title>
     <para>
-      In the previous section, we saw what a typical drm_driver
-      structure might look like.  One of the more important fields in
-      the structure is the hook for the load function.
-    </para>
-    <programlisting>
-      static struct drm_driver driver = {
-      	...
-      	.load = i915_driver_load,
-        ...
-      };
-    </programlisting>
-    <para>
-      The load function has many responsibilities: allocating a driver
-      private structure, specifying supported performance counters,
-      configuring the device (e.g. mapping registers &amp; command
-      buffers), initializing the memory manager, and setting up the
-      initial output configuration.
+      Modern Linux systems require large amount of graphics memory to store
+      frame buffers, textures, vertices and other graphics-related data. Given
+      the very dynamic nature of many of that data, managing graphics memory
+      efficiently is thus crucial for the graphics stack and plays a central
+      role in the DRM infrastructure.
     </para>
     <para>
-      If compatibility is a concern (e.g. with drivers converted over
-      to the new interfaces from the old ones), care must be taken to
-      prevent device initialization and control that is incompatible with
-      currently active userspace drivers.  For instance, if user
-      level mode setting drivers are in use, it would be problematic
-      to perform output discovery &amp; configuration at load time.
-      Likewise, if user-level drivers unaware of memory management are
-      in use, memory management and command buffer setup may need to
-      be omitted.  These requirements are driver-specific, and care
-      needs to be taken to keep both old and new applications and
-      libraries working.  The i915 driver supports the "modeset"
-      module parameter to control whether advanced features are
-      enabled at load time or in legacy fashion.
+      The DRM core includes two memory managers, namely Translation Table Maps
+      (TTM) and Graphics Execution Manager (GEM). TTM was the first DRM memory
+      manager to be developed and tried to be a one-size-fits-them all
+      solution. It provides a single userspace API to accomodate the need of
+      all hardware, supporting both Unified Memory Architecture (UMA) devices
+      and devices with dedicated video RAM (i.e. most discrete video cards).
+      This resulted in a large, complex piece of code that turned out to be
+      hard to use for driver development.
     </para>
-
+    <para>
+      GEM started as an Intel-sponsored project in reaction to TTM's
+      complexity. Its design philosophy is completely different: instead of
+      providing a solution to every graphics memory-related problems, GEM
+      identified common code between drivers and created a support library to
+      share it. GEM has simpler initialization and execution requirements than
+      TTM, but has no video RAM management capabitilies and is thus limited to
+      UMA devices.
+    </para>
     <sect2>
-      <title>Driver private &amp; performance counters</title>
+      <title>The Translation Table Manager (TTM)</title>
       <para>
-	The driver private hangs off the main drm_device structure and
-	can be used for tracking various device-specific bits of
-	information, like register offsets, command buffer status,
-	register state for suspend/resume, etc.  At load time, a
-	driver may simply allocate one and set drm_device.dev_priv
-	appropriately; it should be freed and drm_device.dev_priv set
-	to NULL when the driver is unloaded.
-      </para>
-      <para>
-	The DRM supports several counters which may be used for rough
-	performance characterization.  Note that the DRM stat counter
-	system is not often used by applications, and supporting
-	additional counters is completely optional.
-      </para>
-      <para>
-	These interfaces are deprecated and should not be used.  If performance
-	monitoring is desired, the developer should investigate and
-	potentially enhance the kernel perf and tracing infrastructure to export
-	GPU related performance information for consumption by performance
-	monitoring tools and applications.
-      </para>
-    </sect2>
-
-    <sect2>
-      <title>Configuring the device</title>
-      <para>
-	Obviously, device configuration is device-specific.
-	However, there are several common operations: finding a
-	device's PCI resources, mapping them, and potentially setting
-	up an IRQ handler.
-      </para>
-      <para>
-	Finding &amp; mapping resources is fairly straightforward.  The
-	DRM wrapper functions, drm_get_resource_start() and
-	drm_get_resource_len(), may be used to find BARs on the given
-	drm_device struct.  Once those values have been retrieved, the
-	driver load function can call drm_addmap() to create a new
-	mapping for the BAR in question.  Note that you probably want a
-	drm_local_map_t in your driver private structure to track any
-	mappings you create.
-<!-- !Fdrivers/gpu/drm/drm_bufs.c drm_get_resource_* -->
-<!-- !Finclude/drm/drmP.h drm_local_map_t -->
-      </para>
-      <para>
-	if compatibility with other operating systems isn't a concern
-	(DRM drivers can run under various BSD variants and OpenSolaris),
-	native Linux calls may be used for the above, e.g. pci_resource_*
-	and iomap*/iounmap.  See the Linux device driver book for more
-	info.
-      </para>
-      <para>
-	Once you have a register map, you may use the DRM_READn() and
-	DRM_WRITEn() macros to access the registers on your device, or
-	use driver-specific versions to offset into your MMIO space
-	relative to a driver-specific base pointer (see I915_READ for
-	an example).
-      </para>
-      <para>
-	If your device supports interrupt generation, you may want to
-	set up an interrupt handler when the driver is loaded.  This
-	is done using the drm_irq_install() function.  If your device
-	supports vertical blank interrupts, it should call
-	drm_vblank_init() to initialize the core vblank handling code before
-	enabling interrupts on your device.  This ensures the vblank related
-	structures are allocated and allows the core to handle vblank events.
-      </para>
-<!--!Fdrivers/char/drm/drm_irq.c drm_irq_install-->
-      <para>
-	Once your interrupt handler is registered (it uses your
-	drm_driver.irq_handler as the actual interrupt handling
-	function), you can safely enable interrupts on your device,
-	assuming any other state your interrupt handler uses is also
-	initialized.
-      </para>
-      <para>
-	Another task that may be necessary during configuration is
-	mapping the video BIOS.  On many devices, the VBIOS describes
-	device configuration, LCD panel timings (if any), and contains
-	flags indicating device state.  Mapping the BIOS can be done
-	using the pci_map_rom() call, a convenience function that
-	takes care of mapping the actual ROM, whether it has been
-	shadowed into memory (typically at address 0xc0000) or exists
-	on the PCI device in the ROM BAR.  Note that after the ROM
-	has been mapped and any necessary information has been extracted,
-	it should be unmapped; on many devices, the ROM address decoder is
-	shared with other BARs, so leaving it mapped could cause
-	undesired behavior like hangs or memory corruption.
-<!--!Fdrivers/pci/rom.c pci_map_rom-->
-      </para>
-    </sect2>
-
-    <sect2>
-      <title>Memory manager initialization</title>
-      <para>
-	In order to allocate command buffers, cursor memory, scanout
-	buffers, etc., as well as support the latest features provided
-	by packages like Mesa and the X.Org X server, your driver
-	should support a memory manager.
-      </para>
-      <para>
-	If your driver supports memory management (it should!), you
-	need to set that up at load time as well.  How you initialize
-	it depends on which memory manager you're using: TTM or GEM.
+	TTM design background and information belongs here.
       </para>
       <sect3>
 	<title>TTM initialization</title>
-	<para>
-	  TTM (for Translation Table Manager) manages video memory and
-	  aperture space for graphics devices. TTM supports both UMA devices
-	  and devices with dedicated video RAM (VRAM), i.e. most discrete
-	  graphics devices.  If your device has dedicated RAM, supporting
-	  TTM is desirable.  TTM also integrates tightly with your
-	  driver-specific buffer execution function.  See the radeon
-	  driver for examples.
-	</para>
-	<para>
-	  The core TTM structure is the ttm_bo_driver struct.  It contains
-	  several fields with function pointers for initializing the TTM,
-	  allocating and freeing memory, waiting for command completion
-	  and fence synchronization, and memory migration.  See the
-	  radeon_ttm.c file for an example of usage.
+        <warning><para>This section is outdated.</para></warning>
+        <para>
+          Drivers wishing to support TTM must fill out a drm_bo_driver
+          structure. The structure contains several fields with function
+          pointers for initializing the TTM, allocating and freeing memory,
+          waiting for command completion and fence synchronization, and memory
+          migration. See the radeon_ttm.c file for an example of usage.
 	</para>
 	<para>
 	  The ttm_global_reference structure is made up of several fields:
@@ -445,82 +500,1081 @@
 	  count for the TTM, which will call your initialization function.
 	</para>
       </sect3>
-      <sect3>
-	<title>GEM initialization</title>
-	<para>
-	  GEM is an alternative to TTM, designed specifically for UMA
-	  devices.  It has simpler initialization and execution requirements
-	  than TTM, but has no VRAM management capability.  Core GEM
-	  is initialized by calling drm_mm_init() to create
-	  a GTT DRM MM object, which provides an address space pool for
-	  object allocation.  In a KMS configuration, the driver
-	  needs to allocate and initialize a command ring buffer following
-	  core GEM initialization.  A UMA device usually has what is called a
-	  "stolen" memory region, which provides space for the initial
-	  framebuffer and large, contiguous memory regions required by the
-	  device.  This space is not typically managed by GEM, and it must
-	  be initialized separately into its own DRM MM object.
-	</para>
-	<para>
-	  Initialization is driver-specific. In the case of Intel
-	  integrated graphics chips like 965GM, GEM initialization can
-	  be done by calling the internal GEM init function,
-	  i915_gem_do_init().  Since the 965GM is a UMA device
-	  (i.e. it doesn't have dedicated VRAM), GEM manages
-	  making regular RAM available for GPU operations.  Memory set
-	  aside by the BIOS (called "stolen" memory by the i915
-	  driver) is managed by the DRM memrange allocator; the
-	  rest of the aperture is managed by GEM.
-	  <programlisting>
-	    /* Basic memrange allocator for stolen space (aka vram) */
-	    drm_memrange_init(&amp;dev_priv->vram, 0, prealloc_size);
-	    /* Let GEM Manage from end of prealloc space to end of aperture */
-	    i915_gem_do_init(dev, prealloc_size, agp_size);
-	  </programlisting>
-<!--!Edrivers/char/drm/drm_memrange.c-->
-	</para>
-	<para>
-	  Once the memory manager has been set up, we may allocate the
-	  command buffer.  In the i915 case, this is also done with a
-	  GEM function, i915_gem_init_ringbuffer().
-	</para>
-      </sect3>
     </sect2>
-
-    <sect2>
-      <title>Output configuration</title>
+    <sect2 id="drm-gem">
+      <title>The Graphics Execution Manager (GEM)</title>
       <para>
-	The final initialization task is output configuration.  This involves:
+        The GEM design approach has resulted in a memory manager that doesn't
+        provide full coverage of all (or even all common) use cases in its
+        userspace or kernel API. GEM exposes a set of standard memory-related
+        operations to userspace and a set of helper functions to drivers, and let
+        drivers implement hardware-specific operations with their own private API.
+      </para>
+      <para>
+        The GEM userspace API is described in the
+        <ulink url="http://lwn.net/Articles/283798/"><citetitle>GEM - the Graphics
+        Execution Manager</citetitle></ulink> article on LWN. While slightly
+        outdated, the document provides a good overview of the GEM API principles.
+        Buffer allocation and read and write operations, described as part of the
+        common GEM API, are currently implemented using driver-specific ioctls.
+      </para>
+      <para>
+        GEM is data-agnostic. It manages abstract buffer objects without knowing
+        what individual buffers contain. APIs that require knowledge of buffer
+        contents or purpose, such as buffer allocation or synchronization
+        primitives, are thus outside of the scope of GEM and must be implemented
+        using driver-specific ioctls.
+      </para>
+      <para>
+	On a fundamental level, GEM involves several operations:
 	<itemizedlist>
-	  <listitem>
-	    Finding and initializing the CRTCs, encoders, and connectors
-	    for the device.
-	  </listitem>
-	  <listitem>
-	    Creating an initial configuration.
-	  </listitem>
-	  <listitem>
-	    Registering a framebuffer console driver.
-	  </listitem>
+	  <listitem>Memory allocation and freeing</listitem>
+	  <listitem>Command execution</listitem>
+	  <listitem>Aperture management at command execution time</listitem>
 	</itemizedlist>
+	Buffer object allocation is relatively straightforward and largely
+        provided by Linux's shmem layer, which provides memory to back each
+        object.
+      </para>
+      <para>
+        Device-specific operations, such as command execution, pinning, buffer
+	read &amp; write, mapping, and domain ownership transfers are left to
+        driver-specific ioctls.
       </para>
       <sect3>
-	<title>Output discovery and initialization</title>
-	<para>
-	  Several core functions exist to create CRTCs, encoders, and
-	  connectors, namely: drm_crtc_init(), drm_connector_init(), and
-	  drm_encoder_init(), along with several "helper" functions to
-	  perform common tasks.
+        <title>GEM Initialization</title>
+        <para>
+          Drivers that use GEM must set the DRIVER_GEM bit in the struct
+          <structname>drm_driver</structname>
+          <structfield>driver_features</structfield> field. The DRM core will
+          then automatically initialize the GEM core before calling the
+          <methodname>load</methodname> operation. Behind the scene, this will
+          create a DRM Memory Manager object which provides an address space
+          pool for object allocation.
+        </para>
+        <para>
+          In a KMS configuration, drivers need to allocate and initialize a
+          command ring buffer following core GEM initialization if required by
+          the hardware. UMA devices usually have what is called a "stolen"
+          memory region, which provides space for the initial framebuffer and
+          large, contiguous memory regions required by the device. This space is
+          typically not managed by GEM, and must be initialized separately into
+          its own DRM MM object.
+        </para>
+      </sect3>
+      <sect3>
+        <title>GEM Objects Creation</title>
+        <para>
+          GEM splits creation of GEM objects and allocation of the memory that
+          backs them in two distinct operations.
+        </para>
+        <para>
+          GEM objects are represented by an instance of struct
+          <structname>drm_gem_object</structname>. Drivers usually need to extend
+          GEM objects with private information and thus create a driver-specific
+          GEM object structure type that embeds an instance of struct
+          <structname>drm_gem_object</structname>.
+        </para>
+        <para>
+          To create a GEM object, a driver allocates memory for an instance of its
+          specific GEM object type and initializes the embedded struct
+          <structname>drm_gem_object</structname> with a call to
+          <function>drm_gem_object_init</function>. The function takes a pointer to
+          the DRM device, a pointer to the GEM object and the buffer object size
+          in bytes.
+        </para>
+        <para>
+          GEM uses shmem to allocate anonymous pageable memory.
+          <function>drm_gem_object_init</function> will create an shmfs file of
+          the requested size and store it into the struct
+          <structname>drm_gem_object</structname> <structfield>filp</structfield>
+          field. The memory is used as either main storage for the object when the
+          graphics hardware uses system memory directly or as a backing store
+          otherwise.
+        </para>
+        <para>
+          Drivers are responsible for the actual physical pages allocation by
+          calling <function>shmem_read_mapping_page_gfp</function> for each page.
+          Note that they can decide to allocate pages when initializing the GEM
+          object, or to delay allocation until the memory is needed (for instance
+          when a page fault occurs as a result of a userspace memory access or
+          when the driver needs to start a DMA transfer involving the memory).
+        </para>
+        <para>
+          Anonymous pageable memory allocation is not always desired, for instance
+          when the hardware requires physically contiguous system memory as is
+          often the case in embedded devices. Drivers can create GEM objects with
+          no shmfs backing (called private GEM objects) by initializing them with
+          a call to <function>drm_gem_private_object_init</function> instead of
+          <function>drm_gem_object_init</function>. Storage for private GEM
+          objects must be managed by drivers.
+        </para>
+        <para>
+          Drivers that do not need to extend GEM objects with private information
+          can call the <function>drm_gem_object_alloc</function> function to
+          allocate and initialize a struct <structname>drm_gem_object</structname>
+          instance. The GEM core will call the optional driver
+          <methodname>gem_init_object</methodname> operation after initializing
+          the GEM object with <function>drm_gem_object_init</function>.
+          <synopsis>int (*gem_init_object) (struct drm_gem_object *obj);</synopsis>
+        </para>
+        <para>
+          No alloc-and-init function exists for private GEM objects.
+        </para>
+      </sect3>
+      <sect3>
+        <title>GEM Objects Lifetime</title>
+        <para>
+          All GEM objects are reference-counted by the GEM core. References can be
+          acquired and release by <function>calling drm_gem_object_reference</function>
+          and <function>drm_gem_object_unreference</function> respectively. The
+          caller must hold the <structname>drm_device</structname>
+          <structfield>struct_mutex</structfield> lock. As a convenience, GEM
+          provides the <function>drm_gem_object_reference_unlocked</function> and
+          <function>drm_gem_object_unreference_unlocked</function> functions that
+          can be called without holding the lock.
+        </para>
+        <para>
+          When the last reference to a GEM object is released the GEM core calls
+          the <structname>drm_driver</structname>
+          <methodname>gem_free_object</methodname> operation. That operation is
+          mandatory for GEM-enabled drivers and must free the GEM object and all
+          associated resources.
+        </para>
+        <para>
+          <synopsis>void (*gem_free_object) (struct drm_gem_object *obj);</synopsis>
+          Drivers are responsible for freeing all GEM object resources, including
+          the resources created by the GEM core. If an mmap offset has been
+          created for the object (in which case
+          <structname>drm_gem_object</structname>::<structfield>map_list</structfield>::<structfield>map</structfield>
+          is not NULL) it must be freed by a call to
+          <function>drm_gem_free_mmap_offset</function>. The shmfs backing store
+          must be released by calling <function>drm_gem_object_release</function>
+          (that function can safely be called if no shmfs backing store has been
+          created).
+        </para>
+      </sect3>
+      <sect3>
+        <title>GEM Objects Naming</title>
+        <para>
+          Communication between userspace and the kernel refers to GEM objects
+          using local handles, global names or, more recently, file descriptors.
+          All of those are 32-bit integer values; the usual Linux kernel limits
+          apply to the file descriptors.
+        </para>
+        <para>
+          GEM handles are local to a DRM file. Applications get a handle to a GEM
+          object through a driver-specific ioctl, and can use that handle to refer
+          to the GEM object in other standard or driver-specific ioctls. Closing a
+          DRM file handle frees all its GEM handles and dereferences the
+          associated GEM objects.
+        </para>
+        <para>
+          To create a handle for a GEM object drivers call
+          <function>drm_gem_handle_create</function>. The function takes a pointer
+          to the DRM file and the GEM object and returns a locally unique handle.
+          When the handle is no longer needed drivers delete it with a call to
+          <function>drm_gem_handle_delete</function>. Finally the GEM object
+          associated with a handle can be retrieved by a call to
+          <function>drm_gem_object_lookup</function>.
+        </para>
+        <para>
+          Handles don't take ownership of GEM objects, they only take a reference
+          to the object that will be dropped when the handle is destroyed. To
+          avoid leaking GEM objects, drivers must make sure they drop the
+          reference(s) they own (such as the initial reference taken at object
+          creation time) as appropriate, without any special consideration for the
+          handle. For example, in the particular case of combined GEM object and
+          handle creation in the implementation of the
+          <methodname>dumb_create</methodname> operation, drivers must drop the
+          initial reference to the GEM object before returning the handle.
+        </para>
+        <para>
+          GEM names are similar in purpose to handles but are not local to DRM
+          files. They can be passed between processes to reference a GEM object
+          globally. Names can't be used directly to refer to objects in the DRM
+          API, applications must convert handles to names and names to handles
+          using the DRM_IOCTL_GEM_FLINK and DRM_IOCTL_GEM_OPEN ioctls
+          respectively. The conversion is handled by the DRM core without any
+          driver-specific support.
+        </para>
+        <para>
+          Similar to global names, GEM file descriptors are also used to share GEM
+          objects across processes. They offer additional security: as file
+          descriptors must be explictly sent over UNIX domain sockets to be shared
+          between applications, they can't be guessed like the globally unique GEM
+          names.
+        </para>
+        <para>
+          Drivers that support GEM file descriptors, also known as the DRM PRIME
+          API, must set the DRIVER_PRIME bit in the struct
+          <structname>drm_driver</structname>
+          <structfield>driver_features</structfield> field, and implement the
+          <methodname>prime_handle_to_fd</methodname> and
+          <methodname>prime_fd_to_handle</methodname> operations.
+        </para>
+        <para>
+          <synopsis>int (*prime_handle_to_fd)(struct drm_device *dev,
+                            struct drm_file *file_priv, uint32_t handle,
+                            uint32_t flags, int *prime_fd);
+  int (*prime_fd_to_handle)(struct drm_device *dev,
+                            struct drm_file *file_priv, int prime_fd,
+                            uint32_t *handle);</synopsis>
+          Those two operations convert a handle to a PRIME file descriptor and
+          vice versa. Drivers must use the kernel dma-buf buffer sharing framework
+          to manage the PRIME file descriptors.
+        </para>
+        <para>
+          While non-GEM drivers must implement the operations themselves, GEM
+          drivers must use the <function>drm_gem_prime_handle_to_fd</function>
+          and <function>drm_gem_prime_fd_to_handle</function> helper functions.
+          Those helpers rely on the driver
+          <methodname>gem_prime_export</methodname> and
+          <methodname>gem_prime_import</methodname> operations to create a dma-buf
+          instance from a GEM object (dma-buf exporter role) and to create a GEM
+          object from a dma-buf instance (dma-buf importer role).
+        </para>
+        <para>
+          <synopsis>struct dma_buf * (*gem_prime_export)(struct drm_device *dev,
+                                       struct drm_gem_object *obj,
+                                       int flags);
+  struct drm_gem_object * (*gem_prime_import)(struct drm_device *dev,
+                                              struct dma_buf *dma_buf);</synopsis>
+          These two operations are mandatory for GEM drivers that support DRM
+          PRIME.
+        </para>
+      </sect3>
+      <sect3 id="drm-gem-objects-mapping">
+        <title>GEM Objects Mapping</title>
+        <para>
+          Because mapping operations are fairly heavyweight GEM favours
+          read/write-like access to buffers, implemented through driver-specific
+          ioctls, over mapping buffers to userspace. However, when random access
+          to the buffer is needed (to perform software rendering for instance),
+          direct access to the object can be more efficient.
+        </para>
+        <para>
+          The mmap system call can't be used directly to map GEM objects, as they
+          don't have their own file handle. Two alternative methods currently
+          co-exist to map GEM objects to userspace. The first method uses a
+          driver-specific ioctl to perform the mapping operation, calling
+          <function>do_mmap</function> under the hood. This is often considered
+          dubious, seems to be discouraged for new GEM-enabled drivers, and will
+          thus not be described here.
+        </para>
+        <para>
+          The second method uses the mmap system call on the DRM file handle.
+          <synopsis>void *mmap(void *addr, size_t length, int prot, int flags, int fd,
+             off_t offset);</synopsis>
+          DRM identifies the GEM object to be mapped by a fake offset passed
+          through the mmap offset argument. Prior to being mapped, a GEM object
+          must thus be associated with a fake offset. To do so, drivers must call
+          <function>drm_gem_create_mmap_offset</function> on the object. The
+          function allocates a fake offset range from a pool and stores the
+          offset divided by PAGE_SIZE in
+          <literal>obj-&gt;map_list.hash.key</literal>. Care must be taken not to
+          call <function>drm_gem_create_mmap_offset</function> if a fake offset
+          has already been allocated for the object. This can be tested by
+          <literal>obj-&gt;map_list.map</literal> being non-NULL.
+        </para>
+        <para>
+          Once allocated, the fake offset value
+          (<literal>obj-&gt;map_list.hash.key &lt;&lt; PAGE_SHIFT</literal>)
+          must be passed to the application in a driver-specific way and can then
+          be used as the mmap offset argument.
+        </para>
+        <para>
+          The GEM core provides a helper method <function>drm_gem_mmap</function>
+          to handle object mapping. The method can be set directly as the mmap
+          file operation handler. It will look up the GEM object based on the
+          offset value and set the VMA operations to the
+          <structname>drm_driver</structname> <structfield>gem_vm_ops</structfield>
+          field. Note that <function>drm_gem_mmap</function> doesn't map memory to
+          userspace, but relies on the driver-provided fault handler to map pages
+          individually.
+        </para>
+        <para>
+          To use <function>drm_gem_mmap</function>, drivers must fill the struct
+          <structname>drm_driver</structname> <structfield>gem_vm_ops</structfield>
+          field with a pointer to VM operations.
+        </para>
+        <para>
+          <synopsis>struct vm_operations_struct *gem_vm_ops
+
+  struct vm_operations_struct {
+          void (*open)(struct vm_area_struct * area);
+          void (*close)(struct vm_area_struct * area);
+          int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
+  };</synopsis>
+        </para>
+        <para>
+          The <methodname>open</methodname> and <methodname>close</methodname>
+          operations must update the GEM object reference count. Drivers can use
+          the <function>drm_gem_vm_open</function> and
+          <function>drm_gem_vm_close</function> helper functions directly as open
+          and close handlers.
+        </para>
+        <para>
+          The fault operation handler is responsible for mapping individual pages
+          to userspace when a page fault occurs. Depending on the memory
+          allocation scheme, drivers can allocate pages at fault time, or can
+          decide to allocate memory for the GEM object at the time the object is
+          created.
+        </para>
+        <para>
+          Drivers that want to map the GEM object upfront instead of handling page
+          faults can implement their own mmap file operation handler.
+        </para>
+      </sect3>
+      <sect3>
+        <title>Dumb GEM Objects</title>
+        <para>
+          The GEM API doesn't standardize GEM objects creation and leaves it to
+          driver-specific ioctls. While not an issue for full-fledged graphics
+          stacks that include device-specific userspace components (in libdrm for
+          instance), this limit makes DRM-based early boot graphics unnecessarily
+          complex.
+        </para>
+        <para>
+          Dumb GEM objects partly alleviate the problem by providing a standard
+          API to create dumb buffers suitable for scanout, which can then be used
+          to create KMS frame buffers.
+        </para>
+        <para>
+          To support dumb GEM objects drivers must implement the
+          <methodname>dumb_create</methodname>,
+          <methodname>dumb_destroy</methodname> and
+          <methodname>dumb_map_offset</methodname> operations.
+        </para>
+        <itemizedlist>
+          <listitem>
+            <synopsis>int (*dumb_create)(struct drm_file *file_priv, struct drm_device *dev,
+                     struct drm_mode_create_dumb *args);</synopsis>
+            <para>
+              The <methodname>dumb_create</methodname> operation creates a GEM
+              object suitable for scanout based on the width, height and depth
+              from the struct <structname>drm_mode_create_dumb</structname>
+              argument. It fills the argument's <structfield>handle</structfield>,
+              <structfield>pitch</structfield> and <structfield>size</structfield>
+              fields with a handle for the newly created GEM object and its line
+              pitch and size in bytes.
+            </para>
+          </listitem>
+          <listitem>
+            <synopsis>int (*dumb_destroy)(struct drm_file *file_priv, struct drm_device *dev,
+                      uint32_t handle);</synopsis>
+            <para>
+              The <methodname>dumb_destroy</methodname> operation destroys a dumb
+              GEM object created by <methodname>dumb_create</methodname>.
+            </para>
+          </listitem>
+          <listitem>
+            <synopsis>int (*dumb_map_offset)(struct drm_file *file_priv, struct drm_device *dev,
+                         uint32_t handle, uint64_t *offset);</synopsis>
+            <para>
+              The <methodname>dumb_map_offset</methodname> operation associates an
+              mmap fake offset with the GEM object given by the handle and returns
+              it. Drivers must use the
+              <function>drm_gem_create_mmap_offset</function> function to
+              associate the fake offset as described in
+              <xref linkend="drm-gem-objects-mapping"/>.
+            </para>
+          </listitem>
+        </itemizedlist>
+      </sect3>
+      <sect3>
+        <title>Memory Coherency</title>
+        <para>
+          When mapped to the device or used in a command buffer, backing pages
+          for an object are flushed to memory and marked write combined so as to
+          be coherent with the GPU. Likewise, if the CPU accesses an object
+          after the GPU has finished rendering to the object, then the object
+          must be made coherent with the CPU's view of memory, usually involving
+          GPU cache flushing of various kinds. This core CPU&lt;-&gt;GPU
+          coherency management is provided by a device-specific ioctl, which
+          evaluates an object's current domain and performs any necessary
+          flushing or synchronization to put the object into the desired
+          coherency domain (note that the object may be busy, i.e. an active
+          render target; in that case, setting the domain blocks the client and
+          waits for rendering to complete before performing any necessary
+          flushing operations).
+        </para>
+      </sect3>
+      <sect3>
+        <title>Command Execution</title>
+        <para>
+	  Perhaps the most important GEM function for GPU devices is providing a
+          command execution interface to clients. Client programs construct
+          command buffers containing references to previously allocated memory
+          objects, and then submit them to GEM. At that point, GEM takes care to
+          bind all the objects into the GTT, execute the buffer, and provide
+          necessary synchronization between clients accessing the same buffers.
+          This often involves evicting some objects from the GTT and re-binding
+          others (a fairly expensive operation), and providing relocation
+          support which hides fixed GTT offsets from clients. Clients must take
+          care not to submit command buffers that reference more objects than
+          can fit in the GTT; otherwise, GEM will reject them and no rendering
+          will occur. Similarly, if several objects in the buffer require fence
+          registers to be allocated for correct rendering (e.g. 2D blits on
+          pre-965 chips), care must be taken not to require more fence registers
+          than are available to the client. Such resource management should be
+          abstracted from the client in libdrm.
+        </para>
+      </sect3>
+    </sect2>
+  </sect1>
+
+  <!-- Internals: mode setting -->
+
+  <sect1 id="drm-mode-setting">
+    <title>Mode Setting</title>
+    <para>
+      Drivers must initialize the mode setting core by calling
+      <function>drm_mode_config_init</function> on the DRM device. The function
+      initializes the <structname>drm_device</structname>
+      <structfield>mode_config</structfield> field and never fails. Once done,
+      mode configuration must be setup by initializing the following fields.
+    </para>
+    <itemizedlist>
+      <listitem>
+        <synopsis>int min_width, min_height;
+int max_width, max_height;</synopsis>
+        <para>
+	  Minimum and maximum width and height of the frame buffers in pixel
+	  units.
 	</para>
-	<para>
-	  Connectors should be registered with sysfs once they've been
-	  detected and initialized, using the
-	  drm_sysfs_connector_add() function.  Likewise, when they're
-	  removed from the system, they should be destroyed with
-	  drm_sysfs_connector_remove().
-	</para>
-	<programlisting>
-<![CDATA[
+      </listitem>
+      <listitem>
+        <synopsis>struct drm_mode_config_funcs *funcs;</synopsis>
+	<para>Mode setting functions.</para>
+      </listitem>
+    </itemizedlist>
+    <sect2>
+      <title>Frame Buffer Creation</title>
+      <synopsis>struct drm_framebuffer *(*fb_create)(struct drm_device *dev,
+				     struct drm_file *file_priv,
+				     struct drm_mode_fb_cmd2 *mode_cmd);</synopsis>
+      <para>
+        Frame buffers are abstract memory objects that provide a source of
+        pixels to scanout to a CRTC. Applications explicitly request the
+        creation of frame buffers through the DRM_IOCTL_MODE_ADDFB(2) ioctls and
+        receive an opaque handle that can be passed to the KMS CRTC control,
+        plane configuration and page flip functions.
+      </para>
+      <para>
+        Frame buffers rely on the underneath memory manager for low-level memory
+        operations. When creating a frame buffer applications pass a memory
+        handle (or a list of memory handles for multi-planar formats) through
+        the <parameter>drm_mode_fb_cmd2</parameter> argument. This document
+        assumes that the driver uses GEM, those handles thus reference GEM
+        objects.
+      </para>
+      <para>
+        Drivers must first validate the requested frame buffer parameters passed
+        through the mode_cmd argument. In particular this is where invalid
+        sizes, pixel formats or pitches can be caught.
+      </para>
+      <para>
+        If the parameters are deemed valid, drivers then create, initialize and
+        return an instance of struct <structname>drm_framebuffer</structname>.
+        If desired the instance can be embedded in a larger driver-specific
+        structure. The new instance is initialized with a call to
+        <function>drm_framebuffer_init</function> which takes a pointer to DRM
+        frame buffer operations (struct
+        <structname>drm_framebuffer_funcs</structname>). Frame buffer operations are
+        <itemizedlist>
+          <listitem>
+            <synopsis>int (*create_handle)(struct drm_framebuffer *fb,
+		     struct drm_file *file_priv, unsigned int *handle);</synopsis>
+            <para>
+              Create a handle to the frame buffer underlying memory object. If
+              the frame buffer uses a multi-plane format, the handle will
+              reference the memory object associated with the first plane.
+            </para>
+            <para>
+              Drivers call <function>drm_gem_handle_create</function> to create
+              the handle.
+            </para>
+          </listitem>
+          <listitem>
+            <synopsis>void (*destroy)(struct drm_framebuffer *framebuffer);</synopsis>
+            <para>
+              Destroy the frame buffer object and frees all associated
+              resources. Drivers must call
+              <function>drm_framebuffer_cleanup</function> to free resources
+              allocated by the DRM core for the frame buffer object, and must
+              make sure to unreference all memory objects associated with the
+              frame buffer. Handles created by the
+              <methodname>create_handle</methodname> operation are released by
+              the DRM core.
+            </para>
+          </listitem>
+          <listitem>
+            <synopsis>int (*dirty)(struct drm_framebuffer *framebuffer,
+	     struct drm_file *file_priv, unsigned flags, unsigned color,
+	     struct drm_clip_rect *clips, unsigned num_clips);</synopsis>
+            <para>
+              This optional operation notifies the driver that a region of the
+              frame buffer has changed in response to a DRM_IOCTL_MODE_DIRTYFB
+              ioctl call.
+            </para>
+          </listitem>
+        </itemizedlist>
+      </para>
+      <para>
+        After initializing the <structname>drm_framebuffer</structname>
+        instance drivers must fill its <structfield>width</structfield>,
+        <structfield>height</structfield>, <structfield>pitches</structfield>,
+        <structfield>offsets</structfield>, <structfield>depth</structfield>,
+        <structfield>bits_per_pixel</structfield> and
+        <structfield>pixel_format</structfield> fields from the values passed
+        through the <parameter>drm_mode_fb_cmd2</parameter> argument. They
+        should call the <function>drm_helper_mode_fill_fb_struct</function>
+        helper function to do so.
+      </para>
+    </sect2>
+    <sect2>
+      <title>Output Polling</title>
+      <synopsis>void (*output_poll_changed)(struct drm_device *dev);</synopsis>
+      <para>
+        This operation notifies the driver that the status of one or more
+        connectors has changed. Drivers that use the fb helper can just call the
+        <function>drm_fb_helper_hotplug_event</function> function to handle this
+        operation.
+      </para>
+    </sect2>
+  </sect1>
+
+  <!-- Internals: kms initialization and cleanup -->
+
+  <sect1 id="drm-kms-init">
+    <title>KMS Initialization and Cleanup</title>
+    <para>
+      A KMS device is abstracted and exposed as a set of planes, CRTCs, encoders
+      and connectors. KMS drivers must thus create and initialize all those
+      objects at load time after initializing mode setting.
+    </para>
+    <sect2>
+      <title>CRTCs (struct <structname>drm_crtc</structname>)</title>
+      <para>
+        A CRTC is an abstraction representing a part of the chip that contains a
+	pointer to a scanout buffer. Therefore, the number of CRTCs available
+	determines how many independent scanout buffers can be active at any
+	given time. The CRTC structure contains several fields to support this:
+	a pointer to some video memory (abstracted as a frame buffer object), a
+	display mode, and an (x, y) offset into the video memory to support
+	panning or configurations where one piece of video memory spans multiple
+	CRTCs.
+      </para>
+      <sect3>
+        <title>CRTC Initialization</title>
+        <para>
+          A KMS device must create and register at least one struct
+          <structname>drm_crtc</structname> instance. The instance is allocated
+          and zeroed by the driver, possibly as part of a larger structure, and
+          registered with a call to <function>drm_crtc_init</function> with a
+          pointer to CRTC functions.
+        </para>
+      </sect3>
+      <sect3>
+        <title>CRTC Operations</title>
+        <sect4>
+          <title>Set Configuration</title>
+          <synopsis>int (*set_config)(struct drm_mode_set *set);</synopsis>
+          <para>
+            Apply a new CRTC configuration to the device. The configuration
+            specifies a CRTC, a frame buffer to scan out from, a (x,y) position in
+            the frame buffer, a display mode and an array of connectors to drive
+            with the CRTC if possible.
+          </para>
+          <para>
+            If the frame buffer specified in the configuration is NULL, the driver
+            must detach all encoders connected to the CRTC and all connectors
+            attached to those encoders and disable them.
+          </para>
+          <para>
+            This operation is called with the mode config lock held.
+          </para>
+          <note><para>
+            FIXME: How should set_config interact with DPMS? If the CRTC is
+            suspended, should it be resumed?
+          </para></note>
+        </sect4>
+        <sect4>
+          <title>Page Flipping</title>
+          <synopsis>int (*page_flip)(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+                   struct drm_pending_vblank_event *event);</synopsis>
+          <para>
+            Schedule a page flip to the given frame buffer for the CRTC. This
+            operation is called with the mode config mutex held.
+          </para>
+          <para>
+            Page flipping is a synchronization mechanism that replaces the frame
+            buffer being scanned out by the CRTC with a new frame buffer during
+            vertical blanking, avoiding tearing. When an application requests a page
+            flip the DRM core verifies that the new frame buffer is large enough to
+            be scanned out by  the CRTC in the currently configured mode and then
+            calls the CRTC <methodname>page_flip</methodname> operation with a
+            pointer to the new frame buffer.
+          </para>
+          <para>
+            The <methodname>page_flip</methodname> operation schedules a page flip.
+            Once any pending rendering targetting the new frame buffer has
+            completed, the CRTC will be reprogrammed to display that frame buffer
+            after the next vertical refresh. The operation must return immediately
+            without waiting for rendering or page flip to complete and must block
+            any new rendering to the frame buffer until the page flip completes.
+          </para>
+          <para>
+            If a page flip is already pending, the
+            <methodname>page_flip</methodname> operation must return
+            -<errorname>EBUSY</errorname>.
+          </para>
+          <para>
+            To synchronize page flip to vertical blanking the driver will likely
+            need to enable vertical blanking interrupts. It should call
+            <function>drm_vblank_get</function> for that purpose, and call
+            <function>drm_vblank_put</function> after the page flip completes.
+          </para>
+          <para>
+            If the application has requested to be notified when page flip completes
+            the <methodname>page_flip</methodname> operation will be called with a
+            non-NULL <parameter>event</parameter> argument pointing to a
+            <structname>drm_pending_vblank_event</structname> instance. Upon page
+            flip completion the driver must fill the
+            <parameter>event</parameter>::<structfield>event</structfield>
+            <structfield>sequence</structfield>, <structfield>tv_sec</structfield>
+            and <structfield>tv_usec</structfield> fields with the associated
+            vertical blanking count and timestamp, add the event to the
+            <parameter>drm_file</parameter> list of events to be signaled, and wake
+            up any waiting process. This can be performed with
+            <programlisting><![CDATA[
+            struct timeval now;
+
+            event->event.sequence = drm_vblank_count_and_time(..., &now);
+            event->event.tv_sec = now.tv_sec;
+            event->event.tv_usec = now.tv_usec;
+
+            spin_lock_irqsave(&dev->event_lock, flags);
+            list_add_tail(&event->base.link, &event->base.file_priv->event_list);
+            wake_up_interruptible(&event->base.file_priv->event_wait);
+            spin_unlock_irqrestore(&dev->event_lock, flags);
+            ]]></programlisting>
+          </para>
+          <note><para>
+            FIXME: Could drivers that don't need to wait for rendering to complete
+            just add the event to <literal>dev-&gt;vblank_event_list</literal> and
+            let the DRM core handle everything, as for "normal" vertical blanking
+            events?
+          </para></note>
+          <para>
+            While waiting for the page flip to complete, the
+            <literal>event-&gt;base.link</literal> list head can be used freely by
+            the driver to store the pending event in a driver-specific list.
+          </para>
+          <para>
+            If the file handle is closed before the event is signaled, drivers must
+            take care to destroy the event in their
+            <methodname>preclose</methodname> operation (and, if needed, call
+            <function>drm_vblank_put</function>).
+          </para>
+        </sect4>
+        <sect4>
+          <title>Miscellaneous</title>
+          <itemizedlist>
+            <listitem>
+              <synopsis>void (*gamma_set)(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
+                        uint32_t start, uint32_t size);</synopsis>
+              <para>
+                Apply a gamma table to the device. The operation is optional.
+              </para>
+            </listitem>
+            <listitem>
+              <synopsis>void (*destroy)(struct drm_crtc *crtc);</synopsis>
+              <para>
+                Destroy the CRTC when not needed anymore. See
+                <xref linkend="drm-kms-init"/>.
+              </para>
+            </listitem>
+          </itemizedlist>
+        </sect4>
+      </sect3>
+    </sect2>
+    <sect2>
+      <title>Planes (struct <structname>drm_plane</structname>)</title>
+      <para>
+        A plane represents an image source that can be blended with or overlayed
+	on top of a CRTC during the scanout process. Planes are associated with
+	a frame buffer to crop a portion of the image memory (source) and
+	optionally scale it to a destination size. The result is then blended
+	with or overlayed on top of a CRTC.
+      </para>
+      <sect3>
+        <title>Plane Initialization</title>
+        <para>
+          Planes are optional. To create a plane, a KMS drivers allocates and
+          zeroes an instances of struct <structname>drm_plane</structname>
+          (possibly as part of a larger structure) and registers it with a call
+          to <function>drm_plane_init</function>. The function takes a bitmask
+          of the CRTCs that can be associated with the plane, a pointer to the
+          plane functions and a list of format supported formats.
+        </para>
+      </sect3>
+      <sect3>
+        <title>Plane Operations</title>
+        <itemizedlist>
+          <listitem>
+            <synopsis>int (*update_plane)(struct drm_plane *plane, struct drm_crtc *crtc,
+                        struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+                        unsigned int crtc_w, unsigned int crtc_h,
+                        uint32_t src_x, uint32_t src_y,
+                        uint32_t src_w, uint32_t src_h);</synopsis>
+            <para>
+              Enable and configure the plane to use the given CRTC and frame buffer.
+            </para>
+            <para>
+              The source rectangle in frame buffer memory coordinates is given by
+              the <parameter>src_x</parameter>, <parameter>src_y</parameter>,
+              <parameter>src_w</parameter> and <parameter>src_h</parameter>
+              parameters (as 16.16 fixed point values). Devices that don't support
+              subpixel plane coordinates can ignore the fractional part.
+            </para>
+            <para>
+              The destination rectangle in CRTC coordinates is given by the
+              <parameter>crtc_x</parameter>, <parameter>crtc_y</parameter>,
+              <parameter>crtc_w</parameter> and <parameter>crtc_h</parameter>
+              parameters (as integer values). Devices scale the source rectangle to
+              the destination rectangle. If scaling is not supported, and the source
+              rectangle size doesn't match the destination rectangle size, the
+              driver must return a -<errorname>EINVAL</errorname> error.
+            </para>
+          </listitem>
+          <listitem>
+            <synopsis>int (*disable_plane)(struct drm_plane *plane);</synopsis>
+            <para>
+              Disable the plane. The DRM core calls this method in response to a
+              DRM_IOCTL_MODE_SETPLANE ioctl call with the frame buffer ID set to 0.
+              Disabled planes must not be processed by the CRTC.
+            </para>
+          </listitem>
+          <listitem>
+            <synopsis>void (*destroy)(struct drm_plane *plane);</synopsis>
+            <para>
+              Destroy the plane when not needed anymore. See
+              <xref linkend="drm-kms-init"/>.
+            </para>
+          </listitem>
+        </itemizedlist>
+      </sect3>
+    </sect2>
+    <sect2>
+      <title>Encoders (struct <structname>drm_encoder</structname>)</title>
+      <para>
+        An encoder takes pixel data from a CRTC and converts it to a format
+	suitable for any attached connectors. On some devices, it may be
+	possible to have a CRTC send data to more than one encoder. In that
+	case, both encoders would receive data from the same scanout buffer,
+	resulting in a "cloned" display configuration across the connectors
+	attached to each encoder.
+      </para>
+      <sect3>
+        <title>Encoder Initialization</title>
+        <para>
+          As for CRTCs, a KMS driver must create, initialize and register at
+          least one struct <structname>drm_encoder</structname> instance. The
+          instance is allocated and zeroed by the driver, possibly as part of a
+          larger structure.
+        </para>
+        <para>
+          Drivers must initialize the struct <structname>drm_encoder</structname>
+          <structfield>possible_crtcs</structfield> and
+          <structfield>possible_clones</structfield> fields before registering the
+          encoder. Both fields are bitmasks of respectively the CRTCs that the
+          encoder can be connected to, and sibling encoders candidate for cloning.
+        </para>
+        <para>
+          After being initialized, the encoder must be registered with a call to
+          <function>drm_encoder_init</function>. The function takes a pointer to
+          the encoder functions and an encoder type. Supported types are
+          <itemizedlist>
+            <listitem>
+              DRM_MODE_ENCODER_DAC for VGA and analog on DVI-I/DVI-A
+              </listitem>
+            <listitem>
+              DRM_MODE_ENCODER_TMDS for DVI, HDMI and (embedded) DisplayPort
+            </listitem>
+            <listitem>
+              DRM_MODE_ENCODER_LVDS for display panels
+            </listitem>
+            <listitem>
+              DRM_MODE_ENCODER_TVDAC for TV output (Composite, S-Video, Component,
+              SCART)
+            </listitem>
+            <listitem>
+              DRM_MODE_ENCODER_VIRTUAL for virtual machine displays
+            </listitem>
+          </itemizedlist>
+        </para>
+        <para>
+          Encoders must be attached to a CRTC to be used. DRM drivers leave
+          encoders unattached at initialization time. Applications (or the fbdev
+          compatibility layer when implemented) are responsible for attaching the
+          encoders they want to use to a CRTC.
+        </para>
+      </sect3>
+      <sect3>
+        <title>Encoder Operations</title>
+        <itemizedlist>
+          <listitem>
+            <synopsis>void (*destroy)(struct drm_encoder *encoder);</synopsis>
+            <para>
+              Called to destroy the encoder when not needed anymore. See
+              <xref linkend="drm-kms-init"/>.
+            </para>
+          </listitem>
+        </itemizedlist>
+      </sect3>
+    </sect2>
+    <sect2>
+      <title>Connectors (struct <structname>drm_connector</structname>)</title>
+      <para>
+        A connector is the final destination for pixel data on a device, and
+	usually connects directly to an external display device like a monitor
+	or laptop panel. A connector can only be attached to one encoder at a
+	time. The connector is also the structure where information about the
+	attached display is kept, so it contains fields for display data, EDID
+	data, DPMS &amp; connection status, and information about modes
+	supported on the attached displays.
+      </para>
+      <sect3>
+        <title>Connector Initialization</title>
+        <para>
+          Finally a KMS driver must create, initialize, register and attach at
+          least one struct <structname>drm_connector</structname> instance. The
+          instance is created as other KMS objects and initialized by setting the
+          following fields.
+        </para>
+        <variablelist>
+          <varlistentry>
+            <term><structfield>interlace_allowed</structfield></term>
+            <listitem><para>
+              Whether the connector can handle interlaced modes.
+            </para></listitem>
+          </varlistentry>
+          <varlistentry>
+            <term><structfield>doublescan_allowed</structfield></term>
+            <listitem><para>
+              Whether the connector can handle doublescan.
+            </para></listitem>
+          </varlistentry>
+          <varlistentry>
+            <term><structfield>display_info
+            </structfield></term>
+            <listitem><para>
+              Display information is filled from EDID information when a display
+              is detected. For non hot-pluggable displays such as flat panels in
+              embedded systems, the driver should initialize the
+              <structfield>display_info</structfield>.<structfield>width_mm</structfield>
+              and
+              <structfield>display_info</structfield>.<structfield>height_mm</structfield>
+              fields with the physical size of the display.
+            </para></listitem>
+          </varlistentry>
+          <varlistentry>
+            <term id="drm-kms-connector-polled"><structfield>polled</structfield></term>
+            <listitem><para>
+              Connector polling mode, a combination of
+              <variablelist>
+                <varlistentry>
+                  <term>DRM_CONNECTOR_POLL_HPD</term>
+                  <listitem><para>
+                    The connector generates hotplug events and doesn't need to be
+                    periodically polled. The CONNECT and DISCONNECT flags must not
+                    be set together with the HPD flag.
+                  </para></listitem>
+                </varlistentry>
+                <varlistentry>
+                  <term>DRM_CONNECTOR_POLL_CONNECT</term>
+                  <listitem><para>
+                    Periodically poll the connector for connection.
+                  </para></listitem>
+                </varlistentry>
+                <varlistentry>
+                  <term>DRM_CONNECTOR_POLL_DISCONNECT</term>
+                  <listitem><para>
+                    Periodically poll the connector for disconnection.
+                  </para></listitem>
+                </varlistentry>
+              </variablelist>
+              Set to 0 for connectors that don't support connection status
+              discovery.
+            </para></listitem>
+          </varlistentry>
+        </variablelist>
+        <para>
+          The connector is then registered with a call to
+          <function>drm_connector_init</function> with a pointer to the connector
+          functions and a connector type, and exposed through sysfs with a call to
+          <function>drm_sysfs_connector_add</function>.
+        </para>
+        <para>
+          Supported connector types are
+          <itemizedlist>
+            <listitem>DRM_MODE_CONNECTOR_VGA</listitem>
+            <listitem>DRM_MODE_CONNECTOR_DVII</listitem>
+            <listitem>DRM_MODE_CONNECTOR_DVID</listitem>
+            <listitem>DRM_MODE_CONNECTOR_DVIA</listitem>
+            <listitem>DRM_MODE_CONNECTOR_Composite</listitem>
+            <listitem>DRM_MODE_CONNECTOR_SVIDEO</listitem>
+            <listitem>DRM_MODE_CONNECTOR_LVDS</listitem>
+            <listitem>DRM_MODE_CONNECTOR_Component</listitem>
+            <listitem>DRM_MODE_CONNECTOR_9PinDIN</listitem>
+            <listitem>DRM_MODE_CONNECTOR_DisplayPort</listitem>
+            <listitem>DRM_MODE_CONNECTOR_HDMIA</listitem>
+            <listitem>DRM_MODE_CONNECTOR_HDMIB</listitem>
+            <listitem>DRM_MODE_CONNECTOR_TV</listitem>
+            <listitem>DRM_MODE_CONNECTOR_eDP</listitem>
+            <listitem>DRM_MODE_CONNECTOR_VIRTUAL</listitem>
+          </itemizedlist>
+        </para>
+        <para>
+          Connectors must be attached to an encoder to be used. For devices that
+          map connectors to encoders 1:1, the connector should be attached at
+          initialization time with a call to
+          <function>drm_mode_connector_attach_encoder</function>. The driver must
+          also set the <structname>drm_connector</structname>
+          <structfield>encoder</structfield> field to point to the attached
+          encoder.
+        </para>
+        <para>
+          Finally, drivers must initialize the connectors state change detection
+          with a call to <function>drm_kms_helper_poll_init</function>. If at
+          least one connector is pollable but can't generate hotplug interrupts
+          (indicated by the DRM_CONNECTOR_POLL_CONNECT and
+          DRM_CONNECTOR_POLL_DISCONNECT connector flags), a delayed work will
+          automatically be queued to periodically poll for changes. Connectors
+          that can generate hotplug interrupts must be marked with the
+          DRM_CONNECTOR_POLL_HPD flag instead, and their interrupt handler must
+          call <function>drm_helper_hpd_irq_event</function>. The function will
+          queue a delayed work to check the state of all connectors, but no
+          periodic polling will be done.
+        </para>
+      </sect3>
+      <sect3>
+        <title>Connector Operations</title>
+        <note><para>
+          Unless otherwise state, all operations are mandatory.
+        </para></note>
+        <sect4>
+          <title>DPMS</title>
+          <synopsis>void (*dpms)(struct drm_connector *connector, int mode);</synopsis>
+          <para>
+            The DPMS operation sets the power state of a connector. The mode
+            argument is one of
+            <itemizedlist>
+              <listitem><para>DRM_MODE_DPMS_ON</para></listitem>
+              <listitem><para>DRM_MODE_DPMS_STANDBY</para></listitem>
+              <listitem><para>DRM_MODE_DPMS_SUSPEND</para></listitem>
+              <listitem><para>DRM_MODE_DPMS_OFF</para></listitem>
+            </itemizedlist>
+          </para>
+          <para>
+            In all but DPMS_ON mode the encoder to which the connector is attached
+            should put the display in low-power mode by driving its signals
+            appropriately. If more than one connector is attached to the encoder
+            care should be taken not to change the power state of other displays as
+            a side effect. Low-power mode should be propagated to the encoders and
+            CRTCs when all related connectors are put in low-power mode.
+          </para>
+        </sect4>
+        <sect4>
+          <title>Modes</title>
+          <synopsis>int (*fill_modes)(struct drm_connector *connector, uint32_t max_width,
+                      uint32_t max_height);</synopsis>
+          <para>
+            Fill the mode list with all supported modes for the connector. If the
+            <parameter>max_width</parameter> and <parameter>max_height</parameter>
+            arguments are non-zero, the implementation must ignore all modes wider
+            than <parameter>max_width</parameter> or higher than
+            <parameter>max_height</parameter>.
+          </para>
+          <para>
+            The connector must also fill in this operation its
+            <structfield>display_info</structfield>
+            <structfield>width_mm</structfield> and
+            <structfield>height_mm</structfield> fields with the connected display
+            physical size in millimeters. The fields should be set to 0 if the value
+            isn't known or is not applicable (for instance for projector devices).
+          </para>
+        </sect4>
+        <sect4>
+          <title>Connection Status</title>
+          <para>
+            The connection status is updated through polling or hotplug events when
+            supported (see <xref linkend="drm-kms-connector-polled"/>). The status
+            value is reported to userspace through ioctls and must not be used
+            inside the driver, as it only gets initialized by a call to
+            <function>drm_mode_getconnector</function> from userspace.
+          </para>
+          <synopsis>enum drm_connector_status (*detect)(struct drm_connector *connector,
+                                        bool force);</synopsis>
+          <para>
+            Check to see if anything is attached to the connector. The
+            <parameter>force</parameter> parameter is set to false whilst polling or
+            to true when checking the connector due to user request.
+            <parameter>force</parameter> can be used by the driver to avoid
+            expensive, destructive operations during automated probing.
+          </para>
+          <para>
+            Return connector_status_connected if something is connected to the
+            connector, connector_status_disconnected if nothing is connected and
+            connector_status_unknown if the connection state isn't known.
+          </para>
+          <para>
+            Drivers should only return connector_status_connected if the connection
+            status has really been probed as connected. Connectors that can't detect
+            the connection status, or failed connection status probes, should return
+            connector_status_unknown.
+          </para>
+        </sect4>
+        <sect4>
+          <title>Miscellaneous</title>
+          <itemizedlist>
+            <listitem>
+              <synopsis>void (*destroy)(struct drm_connector *connector);</synopsis>
+              <para>
+                Destroy the connector when not needed anymore. See
+                <xref linkend="drm-kms-init"/>.
+              </para>
+            </listitem>
+          </itemizedlist>
+        </sect4>
+      </sect3>
+    </sect2>
+    <sect2>
+      <title>Cleanup</title>
+      <para>
+        The DRM core manages its objects' lifetime. When an object is not needed
+	anymore the core calls its destroy function, which must clean up and
+	free every resource allocated for the object. Every
+	<function>drm_*_init</function> call must be matched with a
+	corresponding <function>drm_*_cleanup</function> call to cleanup CRTCs
+	(<function>drm_crtc_cleanup</function>), planes
+	(<function>drm_plane_cleanup</function>), encoders
+	(<function>drm_encoder_cleanup</function>) and connectors
+	(<function>drm_connector_cleanup</function>). Furthermore, connectors
+	that have been added to sysfs must be removed by a call to
+	<function>drm_sysfs_connector_remove</function> before calling
+	<function>drm_connector_cleanup</function>.
+      </para>
+      <para>
+        Connectors state change detection must be cleanup up with a call to
+	<function>drm_kms_helper_poll_fini</function>.
+      </para>
+    </sect2>
+    <sect2>
+      <title>Output discovery and initialization example</title>
+      <programlisting><![CDATA[
 void intel_crt_init(struct drm_device *dev)
 {
 	struct drm_connector *connector;
@@ -556,252 +1610,741 @@
 	drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
 
 	drm_sysfs_connector_add(connector);
-}
-]]>
+}]]></programlisting>
+      <para>
+        In the example above (taken from the i915 driver), a CRTC, connector and
+        encoder combination is created. A device-specific i2c bus is also
+        created for fetching EDID data and performing monitor detection. Once
+        the process is complete, the new connector is registered with sysfs to
+        make its properties available to applications.
+      </para>
+    </sect2>
+  </sect1>
+
+  <!-- Internals: mid-layer helper functions -->
+
+  <sect1>
+    <title>Mid-layer Helper Functions</title>
+    <para>
+      The CRTC, encoder and connector functions provided by the drivers
+      implement the DRM API. They're called by the DRM core and ioctl handlers
+      to handle device state changes and configuration request. As implementing
+      those functions often requires logic not specific to drivers, mid-layer
+      helper functions are available to avoid duplicating boilerplate code.
+    </para>
+    <para>
+      The DRM core contains one mid-layer implementation. The mid-layer provides
+      implementations of several CRTC, encoder and connector functions (called
+      from the top of the mid-layer) that pre-process requests and call
+      lower-level functions provided by the driver (at the bottom of the
+      mid-layer). For instance, the
+      <function>drm_crtc_helper_set_config</function> function can be used to
+      fill the struct <structname>drm_crtc_funcs</structname>
+      <structfield>set_config</structfield> field. When called, it will split
+      the <methodname>set_config</methodname> operation in smaller, simpler
+      operations and call the driver to handle them.
+    </para>
+    <para>
+      To use the mid-layer, drivers call <function>drm_crtc_helper_add</function>,
+      <function>drm_encoder_helper_add</function> and
+      <function>drm_connector_helper_add</function> functions to install their
+      mid-layer bottom operations handlers, and fill the
+      <structname>drm_crtc_funcs</structname>,
+      <structname>drm_encoder_funcs</structname> and
+      <structname>drm_connector_funcs</structname> structures with pointers to
+      the mid-layer top API functions. Installing the mid-layer bottom operation
+      handlers is best done right after registering the corresponding KMS object.
+    </para>
+    <para>
+      The mid-layer is not split between CRTC, encoder and connector operations.
+      To use it, a driver must provide bottom functions for all of the three KMS
+      entities.
+    </para>
+    <sect2>
+      <title>Helper Functions</title>
+      <itemizedlist>
+        <listitem>
+          <synopsis>int drm_crtc_helper_set_config(struct drm_mode_set *set);</synopsis>
+          <para>
+            The <function>drm_crtc_helper_set_config</function> helper function
+            is a CRTC <methodname>set_config</methodname> implementation. It
+            first tries to locate the best encoder for each connector by calling
+            the connector <methodname>best_encoder</methodname> helper
+            operation.
+          </para>
+          <para>
+            After locating the appropriate encoders, the helper function will
+            call the <methodname>mode_fixup</methodname> encoder and CRTC helper
+            operations to adjust the requested mode, or reject it completely in
+            which case an error will be returned to the application. If the new
+            configuration after mode adjustment is identical to the current
+            configuration the helper function will return without performing any
+            other operation.
+          </para>
+          <para>
+            If the adjusted mode is identical to the current mode but changes to
+            the frame buffer need to be applied, the
+            <function>drm_crtc_helper_set_config</function> function will call
+            the CRTC <methodname>mode_set_base</methodname> helper operation. If
+            the adjusted mode differs from the current mode, or if the
+            <methodname>mode_set_base</methodname> helper operation is not
+            provided, the helper function performs a full mode set sequence by
+            calling the <methodname>prepare</methodname>,
+            <methodname>mode_set</methodname> and
+            <methodname>commit</methodname> CRTC and encoder helper operations,
+            in that order.
+          </para>
+        </listitem>
+        <listitem>
+          <synopsis>void drm_helper_connector_dpms(struct drm_connector *connector, int mode);</synopsis>
+          <para>
+            The <function>drm_helper_connector_dpms</function> helper function
+            is a connector <methodname>dpms</methodname> implementation that
+            tracks power state of connectors. To use the function, drivers must
+            provide <methodname>dpms</methodname> helper operations for CRTCs
+            and encoders to apply the DPMS state to the device.
+          </para>
+          <para>
+            The mid-layer doesn't track the power state of CRTCs and encoders.
+            The <methodname>dpms</methodname> helper operations can thus be
+            called with a mode identical to the currently active mode.
+          </para>
+        </listitem>
+        <listitem>
+          <synopsis>int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
+                                            uint32_t maxX, uint32_t maxY);</synopsis>
+          <para>
+            The <function>drm_helper_probe_single_connector_modes</function> helper
+            function is a connector <methodname>fill_modes</methodname>
+            implementation that updates the connection status for the connector
+            and then retrieves a list of modes by calling the connector
+            <methodname>get_modes</methodname> helper operation.
+          </para>
+          <para>
+            The function filters out modes larger than
+            <parameter>max_width</parameter> and <parameter>max_height</parameter>
+            if specified. It then calls the connector
+            <methodname>mode_valid</methodname> helper operation for  each mode in
+            the probed list to check whether the mode is valid for the connector.
+          </para>
+        </listitem>
+      </itemizedlist>
+    </sect2>
+    <sect2>
+      <title>CRTC Helper Operations</title>
+      <itemizedlist>
+        <listitem id="drm-helper-crtc-mode-fixup">
+          <synopsis>bool (*mode_fixup)(struct drm_crtc *crtc,
+                       const struct drm_display_mode *mode,
+                       struct drm_display_mode *adjusted_mode);</synopsis>
+          <para>
+            Let CRTCs adjust the requested mode or reject it completely. This
+            operation returns true if the mode is accepted (possibly after being
+            adjusted) or false if it is rejected.
+          </para>
+          <para>
+            The <methodname>mode_fixup</methodname> operation should reject the
+            mode if it can't reasonably use it. The definition of "reasonable"
+            is currently fuzzy in this context. One possible behaviour would be
+            to set the adjusted mode to the panel timings when a fixed-mode
+            panel is used with hardware capable of scaling. Another behaviour
+            would be to accept any input mode and adjust it to the closest mode
+            supported by the hardware (FIXME: This needs to be clarified).
+          </para>
+        </listitem>
+        <listitem>
+          <synopsis>int (*mode_set_base)(struct drm_crtc *crtc, int x, int y,
+                     struct drm_framebuffer *old_fb)</synopsis>
+          <para>
+            Move the CRTC on the current frame buffer (stored in
+            <literal>crtc-&gt;fb</literal>) to position (x,y). Any of the frame
+            buffer, x position or y position may have been modified.
+          </para>
+          <para>
+            This helper operation is optional. If not provided, the
+            <function>drm_crtc_helper_set_config</function> function will fall
+            back to the <methodname>mode_set</methodname> helper operation.
+          </para>
+          <note><para>
+            FIXME: Why are x and y passed as arguments, as they can be accessed
+            through <literal>crtc-&gt;x</literal> and
+            <literal>crtc-&gt;y</literal>?
+          </para></note>
+        </listitem>
+        <listitem>
+          <synopsis>void (*prepare)(struct drm_crtc *crtc);</synopsis>
+          <para>
+            Prepare the CRTC for mode setting. This operation is called after
+            validating the requested mode. Drivers use it to perform
+            device-specific operations required before setting the new mode.
+          </para>
+        </listitem>
+        <listitem>
+          <synopsis>int (*mode_set)(struct drm_crtc *crtc, struct drm_display_mode *mode,
+                struct drm_display_mode *adjusted_mode, int x, int y,
+                struct drm_framebuffer *old_fb);</synopsis>
+          <para>
+            Set a new mode, position and frame buffer. Depending on the device
+            requirements, the mode can be stored internally by the driver and
+            applied in the <methodname>commit</methodname> operation, or
+            programmed to the hardware immediately.
+          </para>
+          <para>
+            The <methodname>mode_set</methodname> operation returns 0 on success
+	    or a negative error code if an error occurs.
+          </para>
+        </listitem>
+        <listitem>
+          <synopsis>void (*commit)(struct drm_crtc *crtc);</synopsis>
+          <para>
+            Commit a mode. This operation is called after setting the new mode.
+            Upon return the device must use the new mode and be fully
+            operational.
+          </para>
+        </listitem>
+      </itemizedlist>
+    </sect2>
+    <sect2>
+      <title>Encoder Helper Operations</title>
+      <itemizedlist>
+        <listitem>
+          <synopsis>bool (*mode_fixup)(struct drm_encoder *encoder,
+                       const struct drm_display_mode *mode,
+                       struct drm_display_mode *adjusted_mode);</synopsis>
+          <note><para>
+            FIXME: The mode argument be const, but the i915 driver modifies
+            mode-&gt;clock in <function>intel_dp_mode_fixup</function>.
+          </para></note>
+          <para>
+            Let encoders adjust the requested mode or reject it completely. This
+            operation returns true if the mode is accepted (possibly after being
+            adjusted) or false if it is rejected. See the
+            <link linkend="drm-helper-crtc-mode-fixup">mode_fixup CRTC helper
+            operation</link> for an explanation of the allowed adjustments.
+          </para>
+        </listitem>
+        <listitem>
+          <synopsis>void (*prepare)(struct drm_encoder *encoder);</synopsis>
+          <para>
+            Prepare the encoder for mode setting. This operation is called after
+            validating the requested mode. Drivers use it to perform
+            device-specific operations required before setting the new mode.
+          </para>
+        </listitem>
+        <listitem>
+          <synopsis>void (*mode_set)(struct drm_encoder *encoder,
+                 struct drm_display_mode *mode,
+                 struct drm_display_mode *adjusted_mode);</synopsis>
+          <para>
+            Set a new mode. Depending on the device requirements, the mode can
+            be stored internally by the driver and applied in the
+            <methodname>commit</methodname> operation, or programmed to the
+            hardware immediately.
+          </para>
+        </listitem>
+        <listitem>
+          <synopsis>void (*commit)(struct drm_encoder *encoder);</synopsis>
+          <para>
+            Commit a mode. This operation is called after setting the new mode.
+            Upon return the device must use the new mode and be fully
+            operational.
+          </para>
+        </listitem>
+      </itemizedlist>
+    </sect2>
+    <sect2>
+      <title>Connector Helper Operations</title>
+      <itemizedlist>
+        <listitem>
+          <synopsis>struct drm_encoder *(*best_encoder)(struct drm_connector *connector);</synopsis>
+          <para>
+            Return a pointer to the best encoder for the connecter. Device that
+            map connectors to encoders 1:1 simply return the pointer to the
+            associated encoder. This operation is mandatory.
+          </para>
+        </listitem>
+        <listitem>
+          <synopsis>int (*get_modes)(struct drm_connector *connector);</synopsis>
+          <para>
+            Fill the connector's <structfield>probed_modes</structfield> list
+            by parsing EDID data with <function>drm_add_edid_modes</function> or
+            calling <function>drm_mode_probed_add</function> directly for every
+            supported mode and return the number of modes it has detected. This
+            operation is mandatory.
+          </para>
+          <para>
+            When adding modes manually the driver creates each mode with a call to
+            <function>drm_mode_create</function> and must fill the following fields.
+            <itemizedlist>
+              <listitem>
+                <synopsis>__u32 type;</synopsis>
+                <para>
+                  Mode type bitmask, a combination of
+                  <variablelist>
+                    <varlistentry>
+                      <term>DRM_MODE_TYPE_BUILTIN</term>
+                      <listitem><para>not used?</para></listitem>
+                    </varlistentry>
+                    <varlistentry>
+                      <term>DRM_MODE_TYPE_CLOCK_C</term>
+                      <listitem><para>not used?</para></listitem>
+                    </varlistentry>
+                    <varlistentry>
+                      <term>DRM_MODE_TYPE_CRTC_C</term>
+                      <listitem><para>not used?</para></listitem>
+                    </varlistentry>
+                    <varlistentry>
+                      <term>
+        DRM_MODE_TYPE_PREFERRED - The preferred mode for the connector
+                      </term>
+                      <listitem>
+                        <para>not used?</para>
+                      </listitem>
+                    </varlistentry>
+                    <varlistentry>
+                      <term>DRM_MODE_TYPE_DEFAULT</term>
+                      <listitem><para>not used?</para></listitem>
+                    </varlistentry>
+                    <varlistentry>
+                      <term>DRM_MODE_TYPE_USERDEF</term>
+                      <listitem><para>not used?</para></listitem>
+                    </varlistentry>
+                    <varlistentry>
+                      <term>DRM_MODE_TYPE_DRIVER</term>
+                      <listitem>
+                        <para>
+                          The mode has been created by the driver (as opposed to
+                          to user-created modes).
+                        </para>
+                      </listitem>
+                    </varlistentry>
+                  </variablelist>
+                  Drivers must set the DRM_MODE_TYPE_DRIVER bit for all modes they
+                  create, and set the DRM_MODE_TYPE_PREFERRED bit for the preferred
+                  mode.
+                </para>
+              </listitem>
+              <listitem>
+                <synopsis>__u32 clock;</synopsis>
+                <para>Pixel clock frequency in kHz unit</para>
+              </listitem>
+              <listitem>
+                <synopsis>__u16 hdisplay, hsync_start, hsync_end, htotal;
+    __u16 vdisplay, vsync_start, vsync_end, vtotal;</synopsis>
+                <para>Horizontal and vertical timing information</para>
+                <screen><![CDATA[
+             Active                 Front           Sync           Back
+             Region                 Porch                          Porch
+    <-----------------------><----------------><-------------><-------------->
+
+      //////////////////////|
+     ////////////////////// |
+    //////////////////////  |..................               ................
+                                               _______________
+
+    <----- [hv]display ----->
+    <------------- [hv]sync_start ------------>
+    <--------------------- [hv]sync_end --------------------->
+    <-------------------------------- [hv]total ----------------------------->
+]]></screen>
+              </listitem>
+              <listitem>
+                <synopsis>__u16 hskew;
+    __u16 vscan;</synopsis>
+                <para>Unknown</para>
+              </listitem>
+              <listitem>
+                <synopsis>__u32 flags;</synopsis>
+                <para>
+                  Mode flags, a combination of
+                  <variablelist>
+                    <varlistentry>
+                      <term>DRM_MODE_FLAG_PHSYNC</term>
+                      <listitem><para>
+                        Horizontal sync is active high
+                      </para></listitem>
+                    </varlistentry>
+                    <varlistentry>
+                      <term>DRM_MODE_FLAG_NHSYNC</term>
+                      <listitem><para>
+                        Horizontal sync is active low
+                      </para></listitem>
+                    </varlistentry>
+                    <varlistentry>
+                      <term>DRM_MODE_FLAG_PVSYNC</term>
+                      <listitem><para>
+                        Vertical sync is active high
+                      </para></listitem>
+                    </varlistentry>
+                    <varlistentry>
+                      <term>DRM_MODE_FLAG_NVSYNC</term>
+                      <listitem><para>
+                        Vertical sync is active low
+                      </para></listitem>
+                    </varlistentry>
+                    <varlistentry>
+                      <term>DRM_MODE_FLAG_INTERLACE</term>
+                      <listitem><para>
+                        Mode is interlaced
+                      </para></listitem>
+                    </varlistentry>
+                    <varlistentry>
+                      <term>DRM_MODE_FLAG_DBLSCAN</term>
+                      <listitem><para>
+                        Mode uses doublescan
+                      </para></listitem>
+                    </varlistentry>
+                    <varlistentry>
+                      <term>DRM_MODE_FLAG_CSYNC</term>
+                      <listitem><para>
+                        Mode uses composite sync
+                      </para></listitem>
+                    </varlistentry>
+                    <varlistentry>
+                      <term>DRM_MODE_FLAG_PCSYNC</term>
+                      <listitem><para>
+                        Composite sync is active high
+                      </para></listitem>
+                    </varlistentry>
+                    <varlistentry>
+                      <term>DRM_MODE_FLAG_NCSYNC</term>
+                      <listitem><para>
+                        Composite sync is active low
+                      </para></listitem>
+                    </varlistentry>
+                    <varlistentry>
+                      <term>DRM_MODE_FLAG_HSKEW</term>
+                      <listitem><para>
+                        hskew provided (not used?)
+                      </para></listitem>
+                    </varlistentry>
+                    <varlistentry>
+                      <term>DRM_MODE_FLAG_BCAST</term>
+                      <listitem><para>
+                        not used?
+                      </para></listitem>
+                    </varlistentry>
+                    <varlistentry>
+                      <term>DRM_MODE_FLAG_PIXMUX</term>
+                      <listitem><para>
+                        not used?
+                      </para></listitem>
+                    </varlistentry>
+                    <varlistentry>
+                      <term>DRM_MODE_FLAG_DBLCLK</term>
+                      <listitem><para>
+                        not used?
+                      </para></listitem>
+                    </varlistentry>
+                    <varlistentry>
+                      <term>DRM_MODE_FLAG_CLKDIV2</term>
+                      <listitem><para>
+                        ?
+                      </para></listitem>
+                    </varlistentry>
+                  </variablelist>
+                </para>
+                <para>
+                  Note that modes marked with the INTERLACE or DBLSCAN flags will be
+                  filtered out by
+                  <function>drm_helper_probe_single_connector_modes</function> if
+                  the connector's <structfield>interlace_allowed</structfield> or
+                  <structfield>doublescan_allowed</structfield> field is set to 0.
+                </para>
+              </listitem>
+              <listitem>
+                <synopsis>char name[DRM_DISPLAY_MODE_LEN];</synopsis>
+                <para>
+                  Mode name. The driver must call
+                  <function>drm_mode_set_name</function> to fill the mode name from
+                  <structfield>hdisplay</structfield>,
+                  <structfield>vdisplay</structfield> and interlace flag after
+                  filling the corresponding fields.
+                </para>
+              </listitem>
+            </itemizedlist>
+          </para>
+          <para>
+            The <structfield>vrefresh</structfield> value is computed by
+            <function>drm_helper_probe_single_connector_modes</function>.
+          </para>
+          <para>
+            When parsing EDID data, <function>drm_add_edid_modes</function> fill the
+            connector <structfield>display_info</structfield>
+            <structfield>width_mm</structfield> and
+            <structfield>height_mm</structfield> fields. When creating modes
+            manually the <methodname>get_modes</methodname> helper operation must
+            set the <structfield>display_info</structfield>
+            <structfield>width_mm</structfield> and
+            <structfield>height_mm</structfield> fields if they haven't been set
+            already (for instance at initilization time when a fixed-size panel is
+            attached to the connector). The mode <structfield>width_mm</structfield>
+            and <structfield>height_mm</structfield> fields are only used internally
+            during EDID parsing and should not be set when creating modes manually.
+          </para>
+        </listitem>
+        <listitem>
+          <synopsis>int (*mode_valid)(struct drm_connector *connector,
+		  struct drm_display_mode *mode);</synopsis>
+          <para>
+            Verify whether a mode is valid for the connector. Return MODE_OK for
+            supported modes and one of the enum drm_mode_status values (MODE_*)
+            for unsupported modes. This operation is mandatory.
+          </para>
+          <para>
+            As the mode rejection reason is currently not used beside for
+            immediately removing the unsupported mode, an implementation can
+            return MODE_BAD regardless of the exact reason why the mode is not
+            valid.
+          </para>
+          <note><para>
+            Note that the <methodname>mode_valid</methodname> helper operation is
+            only called for modes detected by the device, and
+            <emphasis>not</emphasis> for modes set by the user through the CRTC
+            <methodname>set_config</methodname> operation.
+          </para></note>
+        </listitem>
+      </itemizedlist>
+    </sect2>
+  </sect1>
+
+  <!-- Internals: vertical blanking -->
+
+  <sect1 id="drm-vertical-blank">
+    <title>Vertical Blanking</title>
+    <para>
+      Vertical blanking plays a major role in graphics rendering. To achieve
+      tear-free display, users must synchronize page flips and/or rendering to
+      vertical blanking. The DRM API offers ioctls to perform page flips
+      synchronized to vertical blanking and wait for vertical blanking.
+    </para>
+    <para>
+      The DRM core handles most of the vertical blanking management logic, which
+      involves filtering out spurious interrupts, keeping race-free blanking
+      counters, coping with counter wrap-around and resets and keeping use
+      counts. It relies on the driver to generate vertical blanking interrupts
+      and optionally provide a hardware vertical blanking counter. Drivers must
+      implement the following operations.
+    </para>
+    <itemizedlist>
+      <listitem>
+        <synopsis>int (*enable_vblank) (struct drm_device *dev, int crtc);
+void (*disable_vblank) (struct drm_device *dev, int crtc);</synopsis>
+        <para>
+	  Enable or disable vertical blanking interrupts for the given CRTC.
+	</para>
+      </listitem>
+      <listitem>
+        <synopsis>u32 (*get_vblank_counter) (struct drm_device *dev, int crtc);</synopsis>
+        <para>
+	  Retrieve the value of the vertical blanking counter for the given
+	  CRTC. If the hardware maintains a vertical blanking counter its value
+	  should be returned. Otherwise drivers can use the
+	  <function>drm_vblank_count</function> helper function to handle this
+	  operation.
+	</para>
+      </listitem>
+    </itemizedlist>
+    <para>
+      Drivers must initialize the vertical blanking handling core with a call to
+      <function>drm_vblank_init</function> in their
+      <methodname>load</methodname> operation. The function will set the struct
+      <structname>drm_device</structname>
+      <structfield>vblank_disable_allowed</structfield> field to 0. This will
+      keep vertical blanking interrupts enabled permanently until the first mode
+      set operation, where <structfield>vblank_disable_allowed</structfield> is
+      set to 1. The reason behind this is not clear. Drivers can set the field
+      to 1 after <function>calling drm_vblank_init</function> to make vertical
+      blanking interrupts dynamically managed from the beginning.
+    </para>
+    <para>
+      Vertical blanking interrupts can be enabled by the DRM core or by drivers
+      themselves (for instance to handle page flipping operations). The DRM core
+      maintains a vertical blanking use count to ensure that the interrupts are
+      not disabled while a user still needs them. To increment the use count,
+      drivers call <function>drm_vblank_get</function>. Upon return vertical
+      blanking interrupts are guaranteed to be enabled.
+    </para>
+    <para>
+      To decrement the use count drivers call
+      <function>drm_vblank_put</function>. Only when the use count drops to zero
+      will the DRM core disable the vertical blanking interrupts after a delay
+      by scheduling a timer. The delay is accessible through the vblankoffdelay
+      module parameter or the <varname>drm_vblank_offdelay</varname> global
+      variable and expressed in milliseconds. Its default value is 5000 ms.
+    </para>
+    <para>
+      When a vertical blanking interrupt occurs drivers only need to call the
+      <function>drm_handle_vblank</function> function to account for the
+      interrupt.
+    </para>
+    <para>
+      Resources allocated by <function>drm_vblank_init</function> must be freed
+      with a call to <function>drm_vblank_cleanup</function> in the driver
+      <methodname>unload</methodname> operation handler.
+    </para>
+  </sect1>
+
+  <!-- Internals: open/close, file operations and ioctls -->
+
+  <sect1>
+    <title>Open/Close, File Operations and IOCTLs</title>
+    <sect2>
+      <title>Open and Close</title>
+      <synopsis>int (*firstopen) (struct drm_device *);
+void (*lastclose) (struct drm_device *);
+int (*open) (struct drm_device *, struct drm_file *);
+void (*preclose) (struct drm_device *, struct drm_file *);
+void (*postclose) (struct drm_device *, struct drm_file *);</synopsis>
+      <abstract>Open and close handlers. None of those methods are mandatory.
+      </abstract>
+      <para>
+        The <methodname>firstopen</methodname> method is called by the DRM core
+	when an application opens a device that has no other opened file handle.
+	Similarly the <methodname>lastclose</methodname> method is called when
+	the last application holding a file handle opened on the device closes
+	it. Both methods are mostly used for UMS (User Mode Setting) drivers to
+	acquire and release device resources which should be done in the
+	<methodname>load</methodname> and <methodname>unload</methodname>
+	methods for KMS drivers.
+      </para>
+      <para>
+        Note that the <methodname>lastclose</methodname> method is also called
+	at module unload time or, for hot-pluggable devices, when the device is
+	unplugged. The <methodname>firstopen</methodname> and
+	<methodname>lastclose</methodname> calls can thus be unbalanced.
+      </para>
+      <para>
+        The <methodname>open</methodname> method is called every time the device
+	is opened by an application. Drivers can allocate per-file private data
+	in this method and store them in the struct
+	<structname>drm_file</structname> <structfield>driver_priv</structfield>
+	field. Note that the <methodname>open</methodname> method is called
+	before <methodname>firstopen</methodname>.
+      </para>
+      <para>
+        The close operation is split into <methodname>preclose</methodname> and
+	<methodname>postclose</methodname> methods. Drivers must stop and
+	cleanup all per-file operations in the <methodname>preclose</methodname>
+	method. For instance pending vertical blanking and page flip events must
+	be cancelled. No per-file operation is allowed on the file handle after
+	returning from the <methodname>preclose</methodname> method.
+      </para>
+      <para>
+        Finally the <methodname>postclose</methodname> method is called as the
+	last step of the close operation, right before calling the
+	<methodname>lastclose</methodname> method if no other open file handle
+	exists for the device. Drivers that have allocated per-file private data
+	in the <methodname>open</methodname> method should free it here.
+      </para>
+      <para>
+        The <methodname>lastclose</methodname> method should restore CRTC and
+	plane properties to default value, so that a subsequent open of the
+	device will not inherit state from the previous user.
+      </para>
+    </sect2>
+    <sect2>
+      <title>File Operations</title>
+      <synopsis>const struct file_operations *fops</synopsis>
+      <abstract>File operations for the DRM device node.</abstract>
+      <para>
+        Drivers must define the file operations structure that forms the DRM
+	userspace API entry point, even though most of those operations are
+	implemented in the DRM core. The <methodname>open</methodname>,
+	<methodname>release</methodname> and <methodname>ioctl</methodname>
+	operations are handled by
+	<programlisting>
+	.owner = THIS_MODULE,
+	.open = drm_open,
+	.release = drm_release,
+	.unlocked_ioctl = drm_ioctl,
+  #ifdef CONFIG_COMPAT
+	.compat_ioctl = drm_compat_ioctl,
+  #endif
+        </programlisting>
+      </para>
+      <para>
+        Drivers that implement private ioctls that requires 32/64bit
+	compatibility support must provide their own
+	<methodname>compat_ioctl</methodname> handler that processes private
+	ioctls and calls <function>drm_compat_ioctl</function> for core ioctls.
+      </para>
+      <para>
+        The <methodname>read</methodname> and <methodname>poll</methodname>
+	operations provide support for reading DRM events and polling them. They
+	are implemented by
+	<programlisting>
+	.poll = drm_poll,
+	.read = drm_read,
+	.fasync = drm_fasync,
+	.llseek = no_llseek,
 	</programlisting>
+      </para>
+      <para>
+        The memory mapping implementation varies depending on how the driver
+	manages memory. Pre-GEM drivers will use <function>drm_mmap</function>,
+	while GEM-aware drivers will use <function>drm_gem_mmap</function>. See
+	<xref linkend="drm-gem"/>.
+	<programlisting>
+	.mmap = drm_gem_mmap,
+	</programlisting>
+      </para>
+      <para>
+        No other file operation is supported by the DRM API.
+      </para>
+    </sect2>
+    <sect2>
+      <title>IOCTLs</title>
+      <synopsis>struct drm_ioctl_desc *ioctls;
+int num_ioctls;</synopsis>
+      <abstract>Driver-specific ioctls descriptors table.</abstract>
+      <para>
+        Driver-specific ioctls numbers start at DRM_COMMAND_BASE. The ioctls
+	descriptors table is indexed by the ioctl number offset from the base
+	value. Drivers can use the DRM_IOCTL_DEF_DRV() macro to initialize the
+	table entries.
+      </para>
+      <para>
+        <programlisting>DRM_IOCTL_DEF_DRV(ioctl, func, flags)</programlisting>
 	<para>
-	  In the example above (again, taken from the i915 driver), a
-	  CRT connector and encoder combination is created.  A device-specific
-	  i2c bus is also created for fetching EDID data and
-	  performing monitor detection.  Once the process is complete,
-	  the new connector is registered with sysfs to make its
-	  properties available to applications.
+	  <parameter>ioctl</parameter> is the ioctl name. Drivers must define
+	  the DRM_##ioctl and DRM_IOCTL_##ioctl macros to the ioctl number
+	  offset from DRM_COMMAND_BASE and the ioctl number respectively. The
+	  first macro is private to the device while the second must be exposed
+	  to userspace in a public header.
 	</para>
-	<sect4>
-	  <title>Helper functions and core functions</title>
-	  <para>
-	    Since many PC-class graphics devices have similar display output
-	    designs, the DRM provides a set of helper functions to make
-	    output management easier.  The core helper routines handle
-	    encoder re-routing and the disabling of unused functions following
-	    mode setting.  Using the helpers is optional, but recommended for
-	    devices with PC-style architectures (i.e. a set of display planes
-	    for feeding pixels to encoders which are in turn routed to
-	    connectors).  Devices with more complex requirements needing
-	    finer grained management may opt to use the core callbacks
-	    directly.
-	  </para>
-	  <para>
-	    [Insert typical diagram here.]  [Insert OMAP style config here.]
-	  </para>
-	</sect4>
 	<para>
-	  Each encoder object needs to provide:
+	  <parameter>func</parameter> is a pointer to the ioctl handler function
+	  compatible with the <type>drm_ioctl_t</type> type.
+	  <programlisting>typedef int drm_ioctl_t(struct drm_device *dev, void *data,
+		struct drm_file *file_priv);</programlisting>
+	</para>
+	<para>
+	  <parameter>flags</parameter> is a bitmask combination of the following
+	  values. It restricts how the ioctl is allowed to be called.
 	  <itemizedlist>
-	    <listitem>
-	      A DPMS (basically on/off) function.
-	    </listitem>
-	    <listitem>
-	      A mode-fixup function (for converting requested modes into
-	      native hardware timings).
-	    </listitem>
-	    <listitem>
-	      Functions (prepare, set, and commit) for use by the core DRM
-	      helper functions.
-	    </listitem>
+	    <listitem><para>
+	      DRM_AUTH - Only authenticated callers allowed
+	    </para></listitem>
+	    <listitem><para>
+	      DRM_MASTER - The ioctl can only be called on the master file
+	      handle
+	    </para></listitem>
+            <listitem><para>
+	      DRM_ROOT_ONLY - Only callers with the SYSADMIN capability allowed
+	    </para></listitem>
+            <listitem><para>
+	      DRM_CONTROL_ALLOW - The ioctl can only be called on a control
+	      device
+	    </para></listitem>
+            <listitem><para>
+	      DRM_UNLOCKED - The ioctl handler will be called without locking
+	      the DRM global mutex
+	    </para></listitem>
 	  </itemizedlist>
-	  Connector helpers need to provide functions (mode-fetch, validity,
-	  and encoder-matching) for returning an ideal encoder for a given
-	  connector.  The core connector functions include a DPMS callback,
-	  save/restore routines (deprecated), detection, mode probing,
-	  property handling, and cleanup functions.
 	</para>
-<!--!Edrivers/char/drm/drm_crtc.h-->
-<!--!Edrivers/char/drm/drm_crtc.c-->
-<!--!Edrivers/char/drm/drm_crtc_helper.c-->
-      </sect3>
-    </sect2>
-  </sect1>
-
-  <!-- Internals: vblank handling -->
-
-  <sect1>
-    <title>VBlank event handling</title>
-    <para>
-      The DRM core exposes two vertical blank related ioctls:
-      <variablelist>
-        <varlistentry>
-          <term>DRM_IOCTL_WAIT_VBLANK</term>
-          <listitem>
-            <para>
-              This takes a struct drm_wait_vblank structure as its argument,
-              and it is used to block or request a signal when a specified
-              vblank event occurs.
-            </para>
-          </listitem>
-        </varlistentry>
-        <varlistentry>
-          <term>DRM_IOCTL_MODESET_CTL</term>
-          <listitem>
-            <para>
-              This should be called by application level drivers before and
-              after mode setting, since on many devices the vertical blank
-              counter is reset at that time.  Internally, the DRM snapshots
-              the last vblank count when the ioctl is called with the
-              _DRM_PRE_MODESET command, so that the counter won't go backwards
-              (which is dealt with when _DRM_POST_MODESET is used).
-            </para>
-          </listitem>
-        </varlistentry>
-      </variablelist>
-<!--!Edrivers/char/drm/drm_irq.c-->
-    </para>
-    <para>
-      To support the functions above, the DRM core provides several
-      helper functions for tracking vertical blank counters, and
-      requires drivers to provide several callbacks:
-      get_vblank_counter(), enable_vblank() and disable_vblank().  The
-      core uses get_vblank_counter() to keep the counter accurate
-      across interrupt disable periods.  It should return the current
-      vertical blank event count, which is often tracked in a device
-      register.  The enable and disable vblank callbacks should enable
-      and disable vertical blank interrupts, respectively.  In the
-      absence of DRM clients waiting on vblank events, the core DRM
-      code uses the disable_vblank() function to disable
-      interrupts, which saves power.  They are re-enabled again when
-      a client calls the vblank wait ioctl above.
-    </para>
-    <para>
-      A device that doesn't provide a count register may simply use an
-      internal atomic counter incremented on every vertical blank
-      interrupt (and then treat the enable_vblank() and disable_vblank()
-      callbacks as no-ops).
-    </para>
-  </sect1>
-
-  <sect1>
-    <title>Memory management</title>
-    <para>
-      The memory manager lies at the heart of many DRM operations; it
-      is required to support advanced client features like OpenGL
-      pbuffers.  The DRM currently contains two memory managers: TTM
-      and GEM.
-    </para>
-
-    <sect2>
-      <title>The Translation Table Manager (TTM)</title>
-      <para>
-	TTM was developed by Tungsten Graphics, primarily by Thomas
-	Hellström, and is intended to be a flexible, high performance
-	graphics memory manager.
-      </para>
-      <para>
-	Drivers wishing to support TTM must fill out a drm_bo_driver
-	structure.
-      </para>
-      <para>
-	TTM design background and information belongs here.
       </para>
     </sect2>
-
-    <sect2>
-      <title>The Graphics Execution Manager (GEM)</title>
-      <para>
-	GEM is an Intel project, authored by Eric Anholt and Keith
-	Packard.  It provides simpler interfaces than TTM, and is well
-	suited for UMA devices.
-      </para>
-      <para>
-	GEM-enabled drivers must provide gem_init_object() and
-	gem_free_object() callbacks to support the core memory
-	allocation routines.  They should also provide several driver-specific
-	ioctls to support command execution, pinning, buffer
-	read &amp; write, mapping, and domain ownership transfers.
-      </para>
-      <para>
-	On a fundamental level, GEM involves several operations:
-	<itemizedlist>
-	  <listitem>Memory allocation and freeing</listitem>
-	  <listitem>Command execution</listitem>
-	  <listitem>Aperture management at command execution time</listitem>
-	</itemizedlist>
-	Buffer object allocation is relatively
-	straightforward and largely provided by Linux's shmem layer, which
-	provides memory to back each object.  When mapped into the GTT
-	or used in a command buffer, the backing pages for an object are
-	flushed to memory and marked write combined so as to be coherent
-	with the GPU.  Likewise, if the CPU accesses an object after the GPU
-	has finished rendering to the object, then the object must be made
-	coherent with the CPU's view
-	of memory, usually involving GPU cache flushing of various kinds.
-	This core CPU&lt;-&gt;GPU coherency management is provided by a
-	device-specific ioctl, which evaluates an object's current domain and
-	performs any necessary flushing or synchronization to put the object
-	into the desired coherency domain (note that the object may be busy,
-	i.e. an active render target; in that case, setting the domain
-	blocks the client and waits for rendering to complete before
-	performing any necessary flushing operations).
-      </para>
-      <para>
-	Perhaps the most important GEM function is providing a command
-	execution interface to clients.  Client programs construct command
-	buffers containing references to previously allocated memory objects,
-	and then submit them to GEM.  At that point, GEM takes care to bind
-	all the objects into the GTT, execute the buffer, and provide
-	necessary synchronization between clients accessing the same buffers.
-	This often involves evicting some objects from the GTT and re-binding
-	others (a fairly expensive operation), and providing relocation
-	support which hides fixed GTT offsets from clients.  Clients must
-	take care not to submit command buffers that reference more objects
-	than can fit in the GTT; otherwise, GEM will reject them and no rendering
-	will occur.  Similarly, if several objects in the buffer require
-	fence registers to be allocated for correct rendering (e.g. 2D blits
-	on pre-965 chips), care must be taken not to require more fence
-	registers than are available to the client.  Such resource management
-	should be abstracted from the client in libdrm.
-      </para>
-    </sect2>
-
-  </sect1>
-
-  <!-- Output management -->
-  <sect1>
-    <title>Output management</title>
-    <para>
-      At the core of the DRM output management code is a set of
-      structures representing CRTCs, encoders, and connectors.
-    </para>
-    <para>
-      A CRTC is an abstraction representing a part of the chip that
-      contains a pointer to a scanout buffer.  Therefore, the number
-      of CRTCs available determines how many independent scanout
-      buffers can be active at any given time.  The CRTC structure
-      contains several fields to support this: a pointer to some video
-      memory, a display mode, and an (x, y) offset into the video
-      memory to support panning or configurations where one piece of
-      video memory spans multiple CRTCs.
-    </para>
-    <para>
-      An encoder takes pixel data from a CRTC and converts it to a
-      format suitable for any attached connectors.  On some devices,
-      it may be possible to have a CRTC send data to more than one
-      encoder.  In that case, both encoders would receive data from
-      the same scanout buffer, resulting in a "cloned" display
-      configuration across the connectors attached to each encoder.
-    </para>
-    <para>
-      A connector is the final destination for pixel data on a device,
-      and usually connects directly to an external display device like
-      a monitor or laptop panel.  A connector can only be attached to
-      one encoder at a time.  The connector is also the structure
-      where information about the attached display is kept, so it
-      contains fields for display data, EDID data, DPMS &amp;
-      connection status, and information about modes supported on the
-      attached displays.
-    </para>
-<!--!Edrivers/char/drm/drm_crtc.c-->
-  </sect1>
-
-  <sect1>
-    <title>Framebuffer management</title>
-    <para>
-      Clients need to provide a framebuffer object which provides a source
-      of pixels for a CRTC to deliver to the encoder(s) and ultimately the
-      connector(s). A framebuffer is fundamentally a driver-specific memory
-      object, made into an opaque handle by the DRM's addfb() function.
-      Once a framebuffer has been created this way, it may be passed to the
-      KMS mode setting routines for use in a completed configuration.
-    </para>
   </sect1>
 
   <sect1>
@@ -812,15 +2355,24 @@
     </para>
   </sect1>
 
+  <!-- Internals: suspend/resume -->
+
   <sect1>
-    <title>Suspend/resume</title>
+    <title>Suspend/Resume</title>
     <para>
-      The DRM core provides some suspend/resume code, but drivers
-      wanting full suspend/resume support should provide save() and
-      restore() functions.  These are called at suspend,
-      hibernate, or resume time, and should perform any state save or
-      restore required by your device across suspend or hibernate
-      states.
+      The DRM core provides some suspend/resume code, but drivers wanting full
+      suspend/resume support should provide save() and restore() functions.
+      These are called at suspend, hibernate, or resume time, and should perform
+      any state save or restore required by your device across suspend or
+      hibernate states.
+    </para>
+    <synopsis>int (*suspend) (struct drm_device *, pm_message_t state);
+int (*resume) (struct drm_device *);</synopsis>
+    <para>
+      Those are legacy suspend and resume methods. New driver should use the
+      power management interface provided by their bus type (usually through
+      the struct <structname>device_driver</structname> dev_pm_ops) and set
+      these methods to NULL.
     </para>
   </sect1>
 
@@ -833,6 +2385,35 @@
   </sect1>
   </chapter>
 
+<!-- TODO
+
+- Add a glossary
+- Document the struct_mutex catch-all lock
+- Document connector properties
+
+- Why is the load method optional?
+- What are drivers supposed to set the initial display state to, and how?
+  Connector's DPMS states are not initialized and are thus equal to
+  DRM_MODE_DPMS_ON. The fbcon compatibility layer calls
+  drm_helper_disable_unused_functions(), which disables unused encoders and
+  CRTCs, but doesn't touch the connectors' DPMS state, and
+  drm_helper_connector_dpms() in reaction to fbdev blanking events. Do drivers
+  that don't implement (or just don't use) fbcon compatibility need to call
+  those functions themselves?
+- KMS drivers must call drm_vblank_pre_modeset() and drm_vblank_post_modeset()
+  around mode setting. Should this be done in the DRM core?
+- vblank_disable_allowed is set to 1 in the first drm_vblank_post_modeset()
+  call and never set back to 0. It seems to be safe to permanently set it to 1
+  in drm_vblank_init() for KMS driver, and it might be safe for UMS drivers as
+  well. This should be investigated.
+- crtc and connector .save and .restore operations are only used internally in
+  drivers, should they be removed from the core?
+- encoder mid-layer .save and .restore operations are only used internally in
+  drivers, should they be removed from the core?
+- encoder mid-layer .detect operation is only used internally in drivers,
+  should it be removed from the core?
+-->
+
   <!-- External interfaces -->
 
   <chapter id="drmExternals">
@@ -853,6 +2434,42 @@
       Cover generic ioctls and sysfs layout here.  We only need high-level
       info, since man pages should cover the rest.
     </para>
+
+  <!-- External: vblank handling -->
+
+    <sect1>
+      <title>VBlank event handling</title>
+      <para>
+        The DRM core exposes two vertical blank related ioctls:
+        <variablelist>
+          <varlistentry>
+            <term>DRM_IOCTL_WAIT_VBLANK</term>
+            <listitem>
+              <para>
+                This takes a struct drm_wait_vblank structure as its argument,
+                and it is used to block or request a signal when a specified
+                vblank event occurs.
+              </para>
+            </listitem>
+          </varlistentry>
+          <varlistentry>
+            <term>DRM_IOCTL_MODESET_CTL</term>
+            <listitem>
+              <para>
+                This should be called by application level drivers before and
+                after mode setting, since on many devices the vertical blank
+                counter is reset at that time.  Internally, the DRM snapshots
+                the last vblank count when the ioctl is called with the
+                _DRM_PRE_MODESET command, so that the counter won't go backwards
+                (which is dealt with when _DRM_POST_MODESET is used).
+              </para>
+            </listitem>
+          </varlistentry>
+        </variablelist>
+<!--!Edrivers/char/drm/drm_irq.c-->
+      </para>
+    </sect1>
+
   </chapter>
 
   <!-- API reference -->
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 90e2808..3a8c683 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -24,6 +24,7 @@
 	depends on DRM
 	depends on USB_ARCH_HAS_HCD
 	select USB
+	select USB_SUPPORT
 
 config DRM_KMS_HELPER
 	tristate
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index d4af9ed..20a437f 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -94,7 +94,6 @@
 		struct drm_global_reference mem_global_ref;
 		struct ttm_bo_global_ref bo_global_ref;
 		struct ttm_bo_device bdev;
-		atomic_t validate_sequence;
 	} ttm;
 
 	struct drm_gem_object *cursor_cache;
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 7282c08..866e9d4 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -737,6 +737,7 @@
 	if (edid) {
 		drm_mode_connector_update_edid_property(&ast_connector->base, edid);
 		ret = drm_add_edid_modes(connector, edid);
+		kfree(edid);
 		return ret;
 	} else
 		drm_mode_connector_update_edid_property(&ast_connector->base, NULL);
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
index 64ea597cb..5d04564 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.h
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.h
@@ -143,7 +143,6 @@
 		struct drm_global_reference mem_global_ref;
 		struct ttm_bo_global_ref bo_global_ref;
 		struct ttm_bo_device bdev;
-		atomic_t validate_sequence;
 	} ttm;
 	bool mm_inited;
 };
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index a8743c3..bcc4725 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -399,10 +399,7 @@
 	if (drm_probe_ddc(adapter))
 		edid = (struct edid *)drm_do_get_edid(connector, adapter);
 
-	connector->display_info.raw_edid = (char *)edid;
-
 	return edid;
-
 }
 EXPORT_SYMBOL(drm_get_edid);
 
diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c
index 0303935..186832e 100644
--- a/drivers/gpu/drm/drm_edid_load.c
+++ b/drivers/gpu/drm/drm_edid_load.c
@@ -114,8 +114,8 @@
 	},
 };
 
-static int edid_load(struct drm_connector *connector, char *name,
-		     char *connector_name)
+static u8 *edid_load(struct drm_connector *connector, char *name,
+			char *connector_name)
 {
 	const struct firmware *fw;
 	struct platform_device *pdev;
@@ -205,7 +205,6 @@
 		edid = new_edid;
 	}
 
-	connector->display_info.raw_edid = edid;
 	DRM_INFO("Got %s EDID base block and %d extension%s from "
 	    "\"%s\" for connector \"%s\"\n", builtin ? "built-in" :
 	    "external", valid_extensions, valid_extensions == 1 ? "" : "s",
@@ -215,7 +214,10 @@
 	release_firmware(fw);
 
 out:
-	return err;
+	if (err)
+		return ERR_PTR(err);
+
+	return edid;
 }
 
 int drm_load_edid_firmware(struct drm_connector *connector)
@@ -223,6 +225,7 @@
 	char *connector_name = drm_get_connector_name(connector);
 	char *edidname = edid_firmware, *last, *colon;
 	int ret;
+	struct edid *edid;
 
 	if (*edidname == '\0')
 		return 0;
@@ -240,13 +243,13 @@
 	if (*last == '\n')
 		*last = '\0';
 
-	ret = edid_load(connector, edidname, connector_name);
-	if (ret)
+	edid = (struct edid *) edid_load(connector, edidname, connector_name);
+	if (IS_ERR_OR_NULL(edid))
 		return 0;
 
-	drm_mode_connector_update_edid_property(connector,
-	    (struct edid *) connector->display_info.raw_edid);
+	drm_mode_connector_update_edid_property(connector, edid);
+	ret = drm_add_edid_modes(connector, edid);
+	kfree(edid);
 
-	return drm_add_edid_modes(connector, (struct edid *)
-	    connector->display_info.raw_edid);
+	return ret;
 }
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index f546d1e..061d26d 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -236,7 +236,7 @@
 }
 EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode);
 
-bool drm_fb_helper_force_kernel_mode(void)
+static bool drm_fb_helper_force_kernel_mode(void)
 {
 	bool ret, error = false;
 	struct drm_fb_helper *helper;
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 03f16f3..076c4a8 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -1236,7 +1236,7 @@
 	return ret;
 }
 
-void drm_handle_vblank_events(struct drm_device *dev, int crtc)
+static void drm_handle_vblank_events(struct drm_device *dev, int crtc)
 {
 	struct drm_pending_vblank_event *e, *t;
 	struct timeval now;
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index 961ee08..3f06166 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -62,7 +62,7 @@
 		tmp = pgprot_writecombine(tmp);
 	else
 		tmp = pgprot_noncached(tmp);
-#elif defined(__sparc__) || defined(__arm__)
+#elif defined(__sparc__) || defined(__arm__) || defined(__mips__)
 	tmp = pgprot_noncached(tmp);
 #endif
 	return tmp;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.c b/drivers/gpu/drm/exynos/exynos_drm_connector.c
index d956819..9dce3b9 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_connector.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_connector.c
@@ -147,9 +147,7 @@
 
 		drm_mode_connector_update_edid_property(connector, edid);
 		count = drm_add_edid_modes(connector, edid);
-
-		kfree(connector->display_info.raw_edid);
-		connector->display_info.raw_edid = edid;
+		kfree(edid);
 	} else {
 		struct drm_display_mode *mode = drm_mode_create(connector->dev);
 		struct exynos_drm_panel_info *panel;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index bb1550c..9239525 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -102,7 +102,6 @@
 				u8 *edid, int len)
 {
 	struct vidi_context *ctx = get_vidi_context(dev);
-	struct edid *raw_edid;
 
 	DRM_DEBUG_KMS("%s\n", __FILE__);
 
@@ -115,18 +114,6 @@
 		return -EFAULT;
 	}
 
-	raw_edid = kzalloc(len, GFP_KERNEL);
-	if (!raw_edid) {
-		DRM_DEBUG_KMS("failed to allocate raw_edid.\n");
-		return -ENOMEM;
-	}
-
-	memcpy(raw_edid, ctx->raw_edid, min((1 + ctx->raw_edid->extensions)
-						* EDID_LENGTH, len));
-
-	/* attach the edid data to connector. */
-	connector->display_info.raw_edid = (char *)raw_edid;
-
 	memcpy(edid, ctx->raw_edid, min((1 + ctx->raw_edid->extensions)
 					* EDID_LENGTH, len));
 
diff --git a/drivers/gpu/drm/gma500/Makefile b/drivers/gpu/drm/gma500/Makefile
index abfa2a93..7a2d40a 100644
--- a/drivers/gpu/drm/gma500/Makefile
+++ b/drivers/gpu/drm/gma500/Makefile
@@ -3,7 +3,7 @@
 #
 ccflags-y += -I$(srctree)/include/drm
 
-gma500_gfx-y += gem_glue.o \
+gma500_gfx-y += \
 	  accel_2d.o \
 	  backlight.o \
 	  framebuffer.o \
@@ -30,7 +30,8 @@
 	  cdv_intel_crt.o \
 	  cdv_intel_display.o \
 	  cdv_intel_hdmi.o \
-	  cdv_intel_lvds.o
+	  cdv_intel_lvds.o \
+	  cdv_intel_dp.o
 
 gma500_gfx-$(CONFIG_DRM_GMA600) += oaktrail_device.o \
 	  oaktrail_crtc.o \
diff --git a/drivers/gpu/drm/gma500/backlight.c b/drivers/gpu/drm/gma500/backlight.c
index 2079395..143eba3 100644
--- a/drivers/gpu/drm/gma500/backlight.c
+++ b/drivers/gpu/drm/gma500/backlight.c
@@ -26,10 +26,55 @@
 #include "intel_bios.h"
 #include "power.h"
 
+static void do_gma_backlight_set(struct drm_device *dev)
+{
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	backlight_update_status(dev_priv->backlight_device);
+#endif	
+}
+
+void gma_backlight_enable(struct drm_device *dev)
+{
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	dev_priv->backlight_enabled = true;
+	if (dev_priv->backlight_device) {
+		dev_priv->backlight_device->props.brightness = dev_priv->backlight_level;
+		do_gma_backlight_set(dev);
+	}
+#endif	
+}
+
+void gma_backlight_disable(struct drm_device *dev)
+{
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	dev_priv->backlight_enabled = false;
+	if (dev_priv->backlight_device) {
+		dev_priv->backlight_device->props.brightness = 0;
+		do_gma_backlight_set(dev);
+	}
+#endif	
+}
+
+void gma_backlight_set(struct drm_device *dev, int v)
+{
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	dev_priv->backlight_level = v;
+	if (dev_priv->backlight_device && dev_priv->backlight_enabled) {
+		dev_priv->backlight_device->props.brightness = v;
+		do_gma_backlight_set(dev);
+	}
+#endif	
+}
+
 int gma_backlight_init(struct drm_device *dev)
 {
 #ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
 	struct drm_psb_private *dev_priv = dev->dev_private;
+	dev_priv->backlight_enabled = true;
 	return dev_priv->ops->backlight_init(dev);
 #else
 	return 0;
diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c
index b7e7b49..bfc2f39 100644
--- a/drivers/gpu/drm/gma500/cdv_device.c
+++ b/drivers/gpu/drm/gma500/cdv_device.c
@@ -58,10 +58,17 @@
 	cdv_intel_lvds_init(dev, &dev_priv->mode_dev);
 
 	/* These bits indicate HDMI not SDVO on CDV */
-	if (REG_READ(SDVOB) & SDVO_DETECTED)
+	if (REG_READ(SDVOB) & SDVO_DETECTED) {
 		cdv_hdmi_init(dev, &dev_priv->mode_dev, SDVOB);
-	if (REG_READ(SDVOC) & SDVO_DETECTED)
+		if (REG_READ(DP_B) & DP_DETECTED)
+			cdv_intel_dp_init(dev, &dev_priv->mode_dev, DP_B);
+	}
+
+	if (REG_READ(SDVOC) & SDVO_DETECTED) {
 		cdv_hdmi_init(dev, &dev_priv->mode_dev, SDVOC);
+		if (REG_READ(DP_C) & DP_DETECTED)
+			cdv_intel_dp_init(dev, &dev_priv->mode_dev, DP_C);
+	}
 	return 0;
 }
 
@@ -163,6 +170,7 @@
 			cdv_get_brightness(cdv_backlight_device);
 	backlight_update_status(cdv_backlight_device);
 	dev_priv->backlight_device = cdv_backlight_device;
+	dev_priv->backlight_enabled = true;
 	return 0;
 }
 
@@ -449,6 +457,7 @@
 	case 6:
 	case 7:
 		dev_priv->core_freq = 266;
+		break;
 	default:
 		dev_priv->core_freq = 0;
 	}
@@ -488,6 +497,65 @@
 	}	
 }
 
+static const char *force_audio_names[] = {
+	"off",
+	"auto",
+	"on",
+};
+
+void cdv_intel_attach_force_audio_property(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct drm_property *prop;
+	int i;
+
+	prop = dev_priv->force_audio_property;
+	if (prop == NULL) {
+		prop = drm_property_create(dev, DRM_MODE_PROP_ENUM,
+					   "audio",
+					   ARRAY_SIZE(force_audio_names));
+		if (prop == NULL)
+			return;
+
+		for (i = 0; i < ARRAY_SIZE(force_audio_names); i++)
+			drm_property_add_enum(prop, i, i-1, force_audio_names[i]);
+
+		dev_priv->force_audio_property = prop;
+	}
+	drm_connector_attach_property(connector, prop, 0);
+}
+
+
+static const char *broadcast_rgb_names[] = {
+	"Full",
+	"Limited 16:235",
+};
+
+void cdv_intel_attach_broadcast_rgb_property(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct drm_property *prop;
+	int i;
+
+	prop = dev_priv->broadcast_rgb_property;
+	if (prop == NULL) {
+		prop = drm_property_create(dev, DRM_MODE_PROP_ENUM,
+					   "Broadcast RGB",
+					   ARRAY_SIZE(broadcast_rgb_names));
+		if (prop == NULL)
+			return;
+
+		for (i = 0; i < ARRAY_SIZE(broadcast_rgb_names); i++)
+			drm_property_add_enum(prop, i, i, broadcast_rgb_names[i]);
+
+		dev_priv->broadcast_rgb_property = prop;
+	}
+
+	drm_connector_attach_property(connector, prop, 0);
+}
+
 /* Cedarview */
 static const struct psb_offset cdv_regmap[2] = {
 	{
diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c
index a68509b..3cfd093 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_display.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_display.c
@@ -57,15 +57,26 @@
 struct cdv_intel_limit_t {
 	struct cdv_intel_range_t dot, vco, n, m, m1, m2, p, p1;
 	struct cdv_intel_p2_t p2;
+	bool (*find_pll)(const struct cdv_intel_limit_t *, struct drm_crtc *,
+			int, int, struct cdv_intel_clock_t *);
 };
 
+static bool cdv_intel_find_best_PLL(const struct cdv_intel_limit_t *limit,
+	struct drm_crtc *crtc, int target, int refclk,
+	struct cdv_intel_clock_t *best_clock);
+static bool cdv_intel_find_dp_pll(const struct cdv_intel_limit_t *limit, struct drm_crtc *crtc, int target,
+				int refclk,
+				struct cdv_intel_clock_t *best_clock);
+
 #define CDV_LIMIT_SINGLE_LVDS_96	0
 #define CDV_LIMIT_SINGLE_LVDS_100	1
 #define CDV_LIMIT_DAC_HDMI_27		2
 #define CDV_LIMIT_DAC_HDMI_96		3
+#define CDV_LIMIT_DP_27			4
+#define CDV_LIMIT_DP_100		5
 
 static const struct cdv_intel_limit_t cdv_intel_limits[] = {
-	{			/* CDV_SIGNLE_LVDS_96MHz */
+	{			/* CDV_SINGLE_LVDS_96MHz */
 	 .dot = {.min = 20000, .max = 115500},
 	 .vco = {.min = 1800000, .max = 3600000},
 	 .n = {.min = 2, .max = 6},
@@ -76,6 +87,7 @@
 	 .p1 = {.min = 2, .max = 10},
 	 .p2 = {.dot_limit = 200000,
 		.p2_slow = 14, .p2_fast = 14},
+		.find_pll = cdv_intel_find_best_PLL,
 	 },
 	{			/* CDV_SINGLE_LVDS_100MHz */
 	 .dot = {.min = 20000, .max = 115500},
@@ -90,6 +102,7 @@
 	  * is 80-224Mhz.  Prefer single channel as much as possible.
 	  */
 	 .p2 = {.dot_limit = 200000, .p2_slow = 14, .p2_fast = 14},
+	.find_pll = cdv_intel_find_best_PLL,
 	 },
 	{			/* CDV_DAC_HDMI_27MHz */
 	 .dot = {.min = 20000, .max = 400000},
@@ -101,6 +114,7 @@
 	 .p = {.min = 5, .max = 90},
 	 .p1 = {.min = 1, .max = 9},
 	 .p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 5},
+	.find_pll = cdv_intel_find_best_PLL,
 	 },
 	{			/* CDV_DAC_HDMI_96MHz */
 	 .dot = {.min = 20000, .max = 400000},
@@ -112,7 +126,32 @@
 	 .p = {.min = 5, .max = 100},
 	 .p1 = {.min = 1, .max = 10},
 	 .p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 5},
+	.find_pll = cdv_intel_find_best_PLL,
 	 },
+	{			/* CDV_DP_27MHz */
+	 .dot = {.min = 160000, .max = 272000},
+	 .vco = {.min = 1809000, .max = 3564000},
+	 .n = {.min = 1, .max = 1},
+	 .m = {.min = 67, .max = 132},
+	 .m1 = {.min = 0, .max = 0},
+	 .m2 = {.min = 65, .max = 130},
+	 .p = {.min = 5, .max = 90},
+	 .p1 = {.min = 1, .max = 9},
+	 .p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 10},
+	 .find_pll = cdv_intel_find_dp_pll,
+	 },
+	{			/* CDV_DP_100MHz */
+	 .dot = {.min = 160000, .max = 272000},
+	 .vco = {.min = 1800000, .max = 3600000},
+	 .n = {.min = 2, .max = 6},
+	 .m = {.min = 60, .max = 164},
+	 .m1 = {.min = 0, .max = 0},
+	 .m2 = {.min = 58, .max = 162},
+	 .p = {.min = 5, .max = 100},
+	 .p1 = {.min = 1, .max = 10},
+	 .p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 10},
+	 .find_pll = cdv_intel_find_dp_pll,
+	 }	
 };
 
 #define _wait_for(COND, MS, W) ({ \
@@ -132,7 +171,7 @@
 #define wait_for(COND, MS) _wait_for(COND, MS, 1)
 
 
-static int cdv_sb_read(struct drm_device *dev, u32 reg, u32 *val)
+int cdv_sb_read(struct drm_device *dev, u32 reg, u32 *val)
 {
 	int ret;
 
@@ -159,7 +198,7 @@
 	return 0;
 }
 
-static int cdv_sb_write(struct drm_device *dev, u32 reg, u32 val)
+int cdv_sb_write(struct drm_device *dev, u32 reg, u32 val)
 {
 	int ret;
 	static bool dpio_debug = true;
@@ -201,7 +240,7 @@
 /* Reset the DPIO configuration register.  The BIOS does this at every
  * mode set.
  */
-static void cdv_sb_reset(struct drm_device *dev)
+void cdv_sb_reset(struct drm_device *dev)
 {
 
 	REG_WRITE(DPIO_CFG, 0);
@@ -216,7 +255,7 @@
  */
 static int
 cdv_dpll_set_clock_cdv(struct drm_device *dev, struct drm_crtc *crtc,
-			       struct cdv_intel_clock_t *clock, bool is_lvds)
+			       struct cdv_intel_clock_t *clock, bool is_lvds, u32 ddi_select)
 {
 	struct psb_intel_crtc *psb_crtc = to_psb_intel_crtc(crtc);
 	int pipe = psb_crtc->pipe;
@@ -259,7 +298,7 @@
 	ref_value &= ~(REF_CLK_MASK);
 
 	/* use DPLL_A for pipeB on CRT/HDMI */
-	if (pipe == 1 && !is_lvds) {
+	if (pipe == 1 && !is_lvds && !(ddi_select & DP_MASK)) {
 		DRM_DEBUG_KMS("use DPLLA for pipe B\n");
 		ref_value |= REF_CLK_DPLLA;
 	} else {
@@ -336,30 +375,33 @@
 	if (ret)
 		return ret;
 
-	lane_reg = PSB_LANE0;
-	cdv_sb_read(dev, lane_reg, &lane_value);
-	lane_value &= ~(LANE_PLL_MASK);
-	lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe);
-	cdv_sb_write(dev, lane_reg, lane_value);
+	if (ddi_select) {
+		if ((ddi_select & DDI_MASK) == DDI0_SELECT) {
+			lane_reg = PSB_LANE0;
+			cdv_sb_read(dev, lane_reg, &lane_value);
+			lane_value &= ~(LANE_PLL_MASK);
+			lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe);
+			cdv_sb_write(dev, lane_reg, lane_value);
 
-	lane_reg = PSB_LANE1;
-	cdv_sb_read(dev, lane_reg, &lane_value);
-	lane_value &= ~(LANE_PLL_MASK);
-	lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe);
-	cdv_sb_write(dev, lane_reg, lane_value);
+			lane_reg = PSB_LANE1;
+			cdv_sb_read(dev, lane_reg, &lane_value);
+			lane_value &= ~(LANE_PLL_MASK);
+			lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe);
+			cdv_sb_write(dev, lane_reg, lane_value);
+		} else {
+			lane_reg = PSB_LANE2;
+			cdv_sb_read(dev, lane_reg, &lane_value);
+			lane_value &= ~(LANE_PLL_MASK);
+			lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe);
+			cdv_sb_write(dev, lane_reg, lane_value);
 
-	lane_reg = PSB_LANE2;
-	cdv_sb_read(dev, lane_reg, &lane_value);
-	lane_value &= ~(LANE_PLL_MASK);
-	lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe);
-	cdv_sb_write(dev, lane_reg, lane_value);
-
-	lane_reg = PSB_LANE3;
-	cdv_sb_read(dev, lane_reg, &lane_value);
-	lane_value &= ~(LANE_PLL_MASK);
-	lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe);
-	cdv_sb_write(dev, lane_reg, lane_value);
-
+			lane_reg = PSB_LANE3;
+			cdv_sb_read(dev, lane_reg, &lane_value);
+			lane_value &= ~(LANE_PLL_MASK);
+			lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe);
+			cdv_sb_write(dev, lane_reg, lane_value);
+		}
+	}
 	return 0;
 }
 
@@ -396,6 +438,12 @@
 			limit = &cdv_intel_limits[CDV_LIMIT_SINGLE_LVDS_96];
 		else
 			limit = &cdv_intel_limits[CDV_LIMIT_SINGLE_LVDS_100];
+	} else if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
+			psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
+		if (refclk == 27000)
+			limit = &cdv_intel_limits[CDV_LIMIT_DP_27];
+		else
+			limit = &cdv_intel_limits[CDV_LIMIT_DP_100];
 	} else {
 		if (refclk == 27000)
 			limit = &cdv_intel_limits[CDV_LIMIT_DAC_HDMI_27];
@@ -438,13 +486,12 @@
 	return true;
 }
 
-static bool cdv_intel_find_best_PLL(struct drm_crtc *crtc, int target,
-				int refclk,
-				struct cdv_intel_clock_t *best_clock)
+static bool cdv_intel_find_best_PLL(const struct cdv_intel_limit_t *limit,
+	struct drm_crtc *crtc, int target, int refclk,
+	struct cdv_intel_clock_t *best_clock)
 {
 	struct drm_device *dev = crtc->dev;
 	struct cdv_intel_clock_t clock;
-	const struct cdv_intel_limit_t *limit = cdv_intel_limit(crtc, refclk);
 	int err = target;
 
 
@@ -498,6 +545,49 @@
 	return err != target;
 }
 
+static bool cdv_intel_find_dp_pll(const struct cdv_intel_limit_t *limit, struct drm_crtc *crtc, int target,
+				int refclk,
+				struct cdv_intel_clock_t *best_clock)
+{
+	struct cdv_intel_clock_t clock;
+	if (refclk == 27000) {
+		if (target < 200000) {
+			clock.p1 = 2;
+			clock.p2 = 10;
+			clock.n = 1;
+			clock.m1 = 0;
+			clock.m2 = 118;
+		} else {
+			clock.p1 = 1;
+			clock.p2 = 10;
+			clock.n = 1;
+			clock.m1 = 0;
+			clock.m2 = 98;
+		}
+	} else if (refclk == 100000) {
+		if (target < 200000) {
+			clock.p1 = 2;
+			clock.p2 = 10;
+			clock.n = 5;
+			clock.m1 = 0;
+			clock.m2 = 160;
+		} else {
+			clock.p1 = 1;
+			clock.p2 = 10;
+			clock.n = 5;
+			clock.m1 = 0;
+			clock.m2 = 133;
+		}
+	} else
+		return false;
+	clock.m = clock.m2 + 2;
+	clock.p = clock.p1 * clock.p2;
+	clock.vco = (refclk * clock.m) / clock.n;
+	clock.dot = clock.vco / clock.p;
+	memcpy(best_clock, &clock, sizeof(struct cdv_intel_clock_t));
+	return true;
+}
+
 static int cdv_intel_pipe_set_base(struct drm_crtc *crtc,
 			    int x, int y, struct drm_framebuffer *old_fb)
 {
@@ -791,7 +881,7 @@
 	case DRM_MODE_DPMS_STANDBY:
 	case DRM_MODE_DPMS_SUSPEND:
 		if (psb_intel_crtc->active)
-			return;
+			break;
 
 		psb_intel_crtc->active = true;
 
@@ -835,17 +925,15 @@
 		REG_WRITE(map->status, temp);
 		REG_READ(map->status);
 
-		cdv_intel_update_watermark(dev, crtc);
 		cdv_intel_crtc_load_lut(crtc);
 
 		/* Give the overlay scaler a chance to enable
 		 * if it's on this pipe */
 		/* psb_intel_crtc_dpms_video(crtc, true); TODO */
-		psb_intel_crtc->crtc_enable = true;
 		break;
 	case DRM_MODE_DPMS_OFF:
 		if (!psb_intel_crtc->active)
-			return;
+			break;
 
 		psb_intel_crtc->active = false;
 
@@ -892,10 +980,9 @@
 
 		/* Wait for the clocks to turn off. */
 		udelay(150);
-		cdv_intel_update_watermark(dev, crtc);
-		psb_intel_crtc->crtc_enable = false;
 		break;
 	}
+	cdv_intel_update_watermark(dev, crtc);
 	/*Set FIFO Watermarks*/
 	REG_WRITE(DSPARB, 0x3F3E);
 }
@@ -952,9 +1039,12 @@
 	u32 dpll = 0, dspcntr, pipeconf;
 	bool ok;
 	bool is_crt = false, is_lvds = false, is_tv = false;
-	bool is_hdmi = false;
+	bool is_hdmi = false, is_dp = false;
 	struct drm_mode_config *mode_config = &dev->mode_config;
 	struct drm_connector *connector;
+	const struct cdv_intel_limit_t *limit;
+	u32 ddi_select = 0;
+	bool is_edp = false;
 
 	list_for_each_entry(connector, &mode_config->connector_list, head) {
 		struct psb_intel_encoder *psb_intel_encoder =
@@ -964,6 +1054,7 @@
 		    || connector->encoder->crtc != crtc)
 			continue;
 
+		ddi_select = psb_intel_encoder->ddi_select;
 		switch (psb_intel_encoder->type) {
 		case INTEL_OUTPUT_LVDS:
 			is_lvds = true;
@@ -977,6 +1068,15 @@
 		case INTEL_OUTPUT_HDMI:
 			is_hdmi = true;
 			break;
+		case INTEL_OUTPUT_DISPLAYPORT:
+			is_dp = true;
+			break;
+		case INTEL_OUTPUT_EDP:
+			is_edp = true;
+			break;
+		default:
+			DRM_ERROR("invalid output type.\n");
+			return 0;
 		}
 	}
 
@@ -986,6 +1086,20 @@
 	else
 		/* high-end sku, 27/100 mhz */
 		refclk = 27000;
+	if (is_dp || is_edp) {
+		/*
+		 * Based on the spec the low-end SKU has only CRT/LVDS. So it is
+		 * unnecessary to consider it for DP/eDP.
+		 * On the high-end SKU, it will use the 27/100M reference clk
+		 * for DP/eDP. When using SSC clock, the ref clk is 100MHz.Otherwise
+		 * it will be 27MHz. From the VBIOS code it seems that the pipe A choose
+		 * 27MHz for DP/eDP while the Pipe B chooses the 100MHz.
+		 */ 
+		if (pipe == 0)
+			refclk = 27000;
+		else
+			refclk = 100000;
+	}
 
 	if (is_lvds && dev_priv->lvds_use_ssc) {
 		refclk = dev_priv->lvds_ssc_freq * 1000;
@@ -993,8 +1107,10 @@
 	}
 
 	drm_mode_debug_printmodeline(adjusted_mode);
+	
+	limit = cdv_intel_limit(crtc, refclk);
 
-	ok = cdv_intel_find_best_PLL(crtc, adjusted_mode->clock, refclk,
+	ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk,
 				 &clock);
 	if (!ok) {
 		dev_err(dev->dev, "Couldn't find PLL settings for mode!\n");
@@ -1009,6 +1125,15 @@
 	}
 /*		dpll |= PLL_REF_INPUT_DREFCLK; */
 
+	if (is_dp || is_edp) {
+		cdv_intel_dp_set_m_n(crtc, mode, adjusted_mode);
+	} else {
+		REG_WRITE(PIPE_GMCH_DATA_M(pipe), 0);
+		REG_WRITE(PIPE_GMCH_DATA_N(pipe), 0);
+		REG_WRITE(PIPE_DP_LINK_M(pipe), 0);
+		REG_WRITE(PIPE_DP_LINK_N(pipe), 0);
+	}
+
 	dpll |= DPLL_SYNCLOCK_ENABLE;
 /*	if (is_lvds)
 		dpll |= DPLLB_MODE_LVDS;
@@ -1019,6 +1144,31 @@
 	/* setup pipeconf */
 	pipeconf = REG_READ(map->conf);
 
+	pipeconf &= ~(PIPE_BPC_MASK);
+	if (is_edp) {
+		switch (dev_priv->edp.bpp) {
+		case 24:
+			pipeconf |= PIPE_8BPC;
+			break;
+		case 18:
+			pipeconf |= PIPE_6BPC;
+			break;
+		case 30:
+			pipeconf |= PIPE_10BPC;
+			break;
+		default:
+			pipeconf |= PIPE_8BPC;
+			break;
+		}
+	} else if (is_lvds) {
+		/* the BPC will be 6 if it is 18-bit LVDS panel */
+		if ((REG_READ(LVDS) & LVDS_A3_POWER_MASK) == LVDS_A3_POWER_UP)
+			pipeconf |= PIPE_8BPC;
+		else
+			pipeconf |= PIPE_6BPC;
+	} else
+		pipeconf |= PIPE_8BPC;
+			
 	/* Set up the display plane register */
 	dspcntr = DISPPLANE_GAMMA_ENABLE;
 
@@ -1033,7 +1183,7 @@
 	REG_WRITE(map->dpll, dpll | DPLL_VGA_MODE_DIS | DPLL_SYNCLOCK_ENABLE);
 	REG_READ(map->dpll);
 
-	cdv_dpll_set_clock_cdv(dev, crtc, &clock, is_lvds);
+	cdv_dpll_set_clock_cdv(dev, crtc, &clock, is_lvds, ddi_select);
 
 	udelay(150);
 
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
new file mode 100644
index 0000000..c9abc06
--- /dev/null
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -0,0 +1,1951 @@
+/*
+ * Copyright © 2012 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Keith Packard <keithp@keithp.com>
+ *
+ */
+
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+#include "psb_drv.h"
+#include "psb_intel_drv.h"
+#include "psb_intel_reg.h"
+#include "drm_dp_helper.h"
+
+#define _wait_for(COND, MS, W) ({ \
+        unsigned long timeout__ = jiffies + msecs_to_jiffies(MS);       \
+        int ret__ = 0;                                                  \
+        while (! (COND)) {                                              \
+                if (time_after(jiffies, timeout__)) {                   \
+                        ret__ = -ETIMEDOUT;                             \
+                        break;                                          \
+                }                                                       \
+                if (W && !in_dbg_master()) msleep(W);                   \
+        }                                                               \
+        ret__;                                                          \
+})      
+
+#define wait_for(COND, MS) _wait_for(COND, MS, 1)
+
+#define DP_LINK_STATUS_SIZE	6
+#define DP_LINK_CHECK_TIMEOUT	(10 * 1000)
+
+#define DP_LINK_CONFIGURATION_SIZE	9
+
+#define CDV_FAST_LINK_TRAIN	1
+
+struct cdv_intel_dp {
+	uint32_t output_reg;
+	uint32_t DP;
+	uint8_t  link_configuration[DP_LINK_CONFIGURATION_SIZE];
+	bool has_audio;
+	int force_audio;
+	uint32_t color_range;
+	uint8_t link_bw;
+	uint8_t lane_count;
+	uint8_t dpcd[4];
+	struct psb_intel_encoder *encoder;
+	struct i2c_adapter adapter;
+	struct i2c_algo_dp_aux_data algo;
+	uint8_t	train_set[4];
+	uint8_t link_status[DP_LINK_STATUS_SIZE];
+	int panel_power_up_delay;
+	int panel_power_down_delay;
+	int panel_power_cycle_delay;
+	int backlight_on_delay;
+	int backlight_off_delay;
+	struct drm_display_mode *panel_fixed_mode;  /* for eDP */
+	bool panel_on;
+};
+
+struct ddi_regoff {
+	uint32_t	PreEmph1;
+	uint32_t	PreEmph2;
+	uint32_t	VSwing1;
+	uint32_t	VSwing2;
+	uint32_t	VSwing3;
+	uint32_t	VSwing4;
+	uint32_t	VSwing5;
+};
+
+static struct ddi_regoff ddi_DP_train_table[] = {
+	{.PreEmph1 = 0x812c, .PreEmph2 = 0x8124, .VSwing1 = 0x8154,
+	.VSwing2 = 0x8148, .VSwing3 = 0x814C, .VSwing4 = 0x8150,
+	.VSwing5 = 0x8158,},
+	{.PreEmph1 = 0x822c, .PreEmph2 = 0x8224, .VSwing1 = 0x8254,
+	.VSwing2 = 0x8248, .VSwing3 = 0x824C, .VSwing4 = 0x8250,
+	.VSwing5 = 0x8258,},
+};
+
+static uint32_t dp_vswing_premph_table[] = {
+        0x55338954,	0x4000,
+        0x554d8954,	0x2000,
+        0x55668954,	0,
+        0x559ac0d4,	0x6000,
+};
+/**
+ * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
+ * @intel_dp: DP struct
+ *
+ * If a CPU or PCH DP output is attached to an eDP panel, this function
+ * will return true, and false otherwise.
+ */
+static bool is_edp(struct psb_intel_encoder *encoder)
+{
+	return encoder->type == INTEL_OUTPUT_EDP;
+}
+
+
+static void cdv_intel_dp_start_link_train(struct psb_intel_encoder *encoder);
+static void cdv_intel_dp_complete_link_train(struct psb_intel_encoder *encoder);
+static void cdv_intel_dp_link_down(struct psb_intel_encoder *encoder);
+
+static int
+cdv_intel_dp_max_lane_count(struct psb_intel_encoder *encoder)
+{
+	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+	int max_lane_count = 4;
+
+	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
+		max_lane_count = intel_dp->dpcd[DP_MAX_LANE_COUNT] & 0x1f;
+		switch (max_lane_count) {
+		case 1: case 2: case 4:
+			break;
+		default:
+			max_lane_count = 4;
+		}
+	}
+	return max_lane_count;
+}
+
+static int
+cdv_intel_dp_max_link_bw(struct psb_intel_encoder *encoder)
+{
+	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+	int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
+
+	switch (max_link_bw) {
+	case DP_LINK_BW_1_62:
+	case DP_LINK_BW_2_7:
+		break;
+	default:
+		max_link_bw = DP_LINK_BW_1_62;
+		break;
+	}
+	return max_link_bw;
+}
+
+static int
+cdv_intel_dp_link_clock(uint8_t link_bw)
+{
+	if (link_bw == DP_LINK_BW_2_7)
+		return 270000;
+	else
+		return 162000;
+}
+
+static int
+cdv_intel_dp_link_required(int pixel_clock, int bpp)
+{
+	return (pixel_clock * bpp + 7) / 8;
+}
+
+static int
+cdv_intel_dp_max_data_rate(int max_link_clock, int max_lanes)
+{
+	return (max_link_clock * max_lanes * 19) / 20;
+}
+
+static void cdv_intel_edp_panel_vdd_on(struct psb_intel_encoder *intel_encoder)
+{
+	struct drm_device *dev = intel_encoder->base.dev;
+	struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
+	u32 pp;
+
+	if (intel_dp->panel_on) {
+		DRM_DEBUG_KMS("Skip VDD on because of panel on\n");
+		return;
+	}	
+	DRM_DEBUG_KMS("\n");
+
+	pp = REG_READ(PP_CONTROL);
+
+	pp |= EDP_FORCE_VDD;
+	REG_WRITE(PP_CONTROL, pp);
+	REG_READ(PP_CONTROL);
+	msleep(intel_dp->panel_power_up_delay);
+}
+
+static void cdv_intel_edp_panel_vdd_off(struct psb_intel_encoder *intel_encoder)
+{
+	struct drm_device *dev = intel_encoder->base.dev;
+	u32 pp;
+
+	DRM_DEBUG_KMS("\n");
+	pp = REG_READ(PP_CONTROL);
+
+	pp &= ~EDP_FORCE_VDD;
+	REG_WRITE(PP_CONTROL, pp);
+	REG_READ(PP_CONTROL);
+
+}
+
+/* Returns true if the panel was already on when called */
+static bool cdv_intel_edp_panel_on(struct psb_intel_encoder *intel_encoder)
+{
+	struct drm_device *dev = intel_encoder->base.dev;
+	struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
+	u32 pp, idle_on_mask = PP_ON | PP_SEQUENCE_NONE;
+
+	if (intel_dp->panel_on)
+		return true;
+
+	DRM_DEBUG_KMS("\n");
+	pp = REG_READ(PP_CONTROL);
+	pp &= ~PANEL_UNLOCK_MASK;
+
+	pp |= (PANEL_UNLOCK_REGS | POWER_TARGET_ON);
+	REG_WRITE(PP_CONTROL, pp);
+	REG_READ(PP_CONTROL);
+
+	if (wait_for(((REG_READ(PP_STATUS) & idle_on_mask) == idle_on_mask), 1000)) {
+		DRM_DEBUG_KMS("Error in Powering up eDP panel, status %x\n", REG_READ(PP_STATUS));
+		intel_dp->panel_on = false;
+	} else
+		intel_dp->panel_on = true;	
+	msleep(intel_dp->panel_power_up_delay);
+
+	return false;
+}
+
+static void cdv_intel_edp_panel_off (struct psb_intel_encoder *intel_encoder)
+{
+	struct drm_device *dev = intel_encoder->base.dev;
+	u32 pp, idle_off_mask = PP_ON ;
+	struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
+
+	DRM_DEBUG_KMS("\n");
+
+	pp = REG_READ(PP_CONTROL);
+
+	if ((pp & POWER_TARGET_ON) == 0) 
+		return;
+
+	intel_dp->panel_on = false;
+	pp &= ~PANEL_UNLOCK_MASK;
+	/* ILK workaround: disable reset around power sequence */
+
+	pp &= ~POWER_TARGET_ON;
+	pp &= ~EDP_FORCE_VDD;
+	pp &= ~EDP_BLC_ENABLE;
+	REG_WRITE(PP_CONTROL, pp);
+	REG_READ(PP_CONTROL);
+	DRM_DEBUG_KMS("PP_STATUS %x\n", REG_READ(PP_STATUS));
+
+	if (wait_for((REG_READ(PP_STATUS) & idle_off_mask) == 0, 1000)) {
+		DRM_DEBUG_KMS("Error in turning off Panel\n");	
+	}
+
+	msleep(intel_dp->panel_power_cycle_delay);
+	DRM_DEBUG_KMS("Over\n");
+}
+
+static void cdv_intel_edp_backlight_on (struct psb_intel_encoder *intel_encoder)
+{
+	struct drm_device *dev = intel_encoder->base.dev;
+	u32 pp;
+
+	DRM_DEBUG_KMS("\n");
+	/*
+	 * If we enable the backlight right away following a panel power
+	 * on, we may see slight flicker as the panel syncs with the eDP
+	 * link.  So delay a bit to make sure the image is solid before
+	 * allowing it to appear.
+	 */
+	msleep(300);
+	pp = REG_READ(PP_CONTROL);
+
+	pp |= EDP_BLC_ENABLE;
+	REG_WRITE(PP_CONTROL, pp);
+	gma_backlight_enable(dev);
+}
+
+static void cdv_intel_edp_backlight_off (struct psb_intel_encoder *intel_encoder)
+{
+	struct drm_device *dev = intel_encoder->base.dev;
+	struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
+	u32 pp;
+
+	DRM_DEBUG_KMS("\n");
+	gma_backlight_disable(dev);
+	msleep(10);
+	pp = REG_READ(PP_CONTROL);
+
+	pp &= ~EDP_BLC_ENABLE;
+	REG_WRITE(PP_CONTROL, pp);
+	msleep(intel_dp->backlight_off_delay);
+}
+
+static int
+cdv_intel_dp_mode_valid(struct drm_connector *connector,
+		    struct drm_display_mode *mode)
+{
+	struct psb_intel_encoder *encoder = psb_intel_attached_encoder(connector);
+	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+	int max_link_clock = cdv_intel_dp_link_clock(cdv_intel_dp_max_link_bw(encoder));
+	int max_lanes = cdv_intel_dp_max_lane_count(encoder);
+	struct drm_psb_private *dev_priv = connector->dev->dev_private;
+
+	if (is_edp(encoder) && intel_dp->panel_fixed_mode) {
+		if (mode->hdisplay > intel_dp->panel_fixed_mode->hdisplay)
+			return MODE_PANEL;
+		if (mode->vdisplay > intel_dp->panel_fixed_mode->vdisplay)
+			return MODE_PANEL;
+	}
+
+	/* only refuse the mode on non eDP since we have seen some weird eDP panels
+	   which are outside spec tolerances but somehow work by magic */
+	if (!is_edp(encoder) &&
+	    (cdv_intel_dp_link_required(mode->clock, dev_priv->edp.bpp)
+	     > cdv_intel_dp_max_data_rate(max_link_clock, max_lanes)))
+		return MODE_CLOCK_HIGH;
+
+	if (is_edp(encoder)) {
+	    if (cdv_intel_dp_link_required(mode->clock, 24)
+	     	> cdv_intel_dp_max_data_rate(max_link_clock, max_lanes))
+		return MODE_CLOCK_HIGH;
+		
+	}
+	if (mode->clock < 10000)
+		return MODE_CLOCK_LOW;
+
+	return MODE_OK;
+}
+
+static uint32_t
+pack_aux(uint8_t *src, int src_bytes)
+{
+	int	i;
+	uint32_t v = 0;
+
+	if (src_bytes > 4)
+		src_bytes = 4;
+	for (i = 0; i < src_bytes; i++)
+		v |= ((uint32_t) src[i]) << ((3-i) * 8);
+	return v;
+}
+
+static void
+unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
+{
+	int i;
+	if (dst_bytes > 4)
+		dst_bytes = 4;
+	for (i = 0; i < dst_bytes; i++)
+		dst[i] = src >> ((3-i) * 8);
+}
+
+static int
+cdv_intel_dp_aux_ch(struct psb_intel_encoder *encoder,
+		uint8_t *send, int send_bytes,
+		uint8_t *recv, int recv_size)
+{
+	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+	uint32_t output_reg = intel_dp->output_reg;
+	struct drm_device *dev = encoder->base.dev;
+	uint32_t ch_ctl = output_reg + 0x10;
+	uint32_t ch_data = ch_ctl + 4;
+	int i;
+	int recv_bytes;
+	uint32_t status;
+	uint32_t aux_clock_divider;
+	int try, precharge;
+
+	/* The clock divider is based off the hrawclk,
+	 * and would like to run at 2MHz. So, take the
+	 * hrawclk value and divide by 2 and use that
+	 * On CDV platform it uses 200MHz as hrawclk.
+	 *
+	 */
+	aux_clock_divider = 200 / 2;
+
+	precharge = 4;
+	if (is_edp(encoder))
+		precharge = 10;
+
+	if (REG_READ(ch_ctl) & DP_AUX_CH_CTL_SEND_BUSY) {
+		DRM_ERROR("dp_aux_ch not started status 0x%08x\n",
+			  REG_READ(ch_ctl));
+		return -EBUSY;
+	}
+
+	/* Must try at least 3 times according to DP spec */
+	for (try = 0; try < 5; try++) {
+		/* Load the send data into the aux channel data registers */
+		for (i = 0; i < send_bytes; i += 4)
+			REG_WRITE(ch_data + i,
+				   pack_aux(send + i, send_bytes - i));
+	
+		/* Send the command and wait for it to complete */
+		REG_WRITE(ch_ctl,
+			   DP_AUX_CH_CTL_SEND_BUSY |
+			   DP_AUX_CH_CTL_TIME_OUT_400us |
+			   (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
+			   (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
+			   (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
+			   DP_AUX_CH_CTL_DONE |
+			   DP_AUX_CH_CTL_TIME_OUT_ERROR |
+			   DP_AUX_CH_CTL_RECEIVE_ERROR);
+		for (;;) {
+			status = REG_READ(ch_ctl);
+			if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
+				break;
+			udelay(100);
+		}
+	
+		/* Clear done status and any errors */
+		REG_WRITE(ch_ctl,
+			   status |
+			   DP_AUX_CH_CTL_DONE |
+			   DP_AUX_CH_CTL_TIME_OUT_ERROR |
+			   DP_AUX_CH_CTL_RECEIVE_ERROR);
+		if (status & DP_AUX_CH_CTL_DONE)
+			break;
+	}
+
+	if ((status & DP_AUX_CH_CTL_DONE) == 0) {
+		DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
+		return -EBUSY;
+	}
+
+	/* Check for timeout or receive error.
+	 * Timeouts occur when the sink is not connected
+	 */
+	if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
+		DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
+		return -EIO;
+	}
+
+	/* Timeouts occur when the device isn't connected, so they're
+	 * "normal" -- don't fill the kernel log with these */
+	if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
+		DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
+		return -ETIMEDOUT;
+	}
+
+	/* Unload any bytes sent back from the other side */
+	recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
+		      DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
+	if (recv_bytes > recv_size)
+		recv_bytes = recv_size;
+	
+	for (i = 0; i < recv_bytes; i += 4)
+		unpack_aux(REG_READ(ch_data + i),
+			   recv + i, recv_bytes - i);
+
+	return recv_bytes;
+}
+
+/* Write data to the aux channel in native mode */
+static int
+cdv_intel_dp_aux_native_write(struct psb_intel_encoder *encoder,
+			  uint16_t address, uint8_t *send, int send_bytes)
+{
+	int ret;
+	uint8_t	msg[20];
+	int msg_bytes;
+	uint8_t	ack;
+
+	if (send_bytes > 16)
+		return -1;
+	msg[0] = AUX_NATIVE_WRITE << 4;
+	msg[1] = address >> 8;
+	msg[2] = address & 0xff;
+	msg[3] = send_bytes - 1;
+	memcpy(&msg[4], send, send_bytes);
+	msg_bytes = send_bytes + 4;
+	for (;;) {
+		ret = cdv_intel_dp_aux_ch(encoder, msg, msg_bytes, &ack, 1);
+		if (ret < 0)
+			return ret;
+		if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
+			break;
+		else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
+			udelay(100);
+		else
+			return -EIO;
+	}
+	return send_bytes;
+}
+
+/* Write a single byte to the aux channel in native mode */
+static int
+cdv_intel_dp_aux_native_write_1(struct psb_intel_encoder *encoder,
+			    uint16_t address, uint8_t byte)
+{
+	return cdv_intel_dp_aux_native_write(encoder, address, &byte, 1);
+}
+
+/* read bytes from a native aux channel */
+static int
+cdv_intel_dp_aux_native_read(struct psb_intel_encoder *encoder,
+			 uint16_t address, uint8_t *recv, int recv_bytes)
+{
+	uint8_t msg[4];
+	int msg_bytes;
+	uint8_t reply[20];
+	int reply_bytes;
+	uint8_t ack;
+	int ret;
+
+	msg[0] = AUX_NATIVE_READ << 4;
+	msg[1] = address >> 8;
+	msg[2] = address & 0xff;
+	msg[3] = recv_bytes - 1;
+
+	msg_bytes = 4;
+	reply_bytes = recv_bytes + 1;
+
+	for (;;) {
+		ret = cdv_intel_dp_aux_ch(encoder, msg, msg_bytes,
+				      reply, reply_bytes);
+		if (ret == 0)
+			return -EPROTO;
+		if (ret < 0)
+			return ret;
+		ack = reply[0];
+		if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) {
+			memcpy(recv, reply + 1, ret - 1);
+			return ret - 1;
+		}
+		else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
+			udelay(100);
+		else
+			return -EIO;
+	}
+}
+
+static int
+cdv_intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
+		    uint8_t write_byte, uint8_t *read_byte)
+{
+	struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+	struct cdv_intel_dp *intel_dp = container_of(adapter,
+						struct cdv_intel_dp,
+						adapter);
+	struct psb_intel_encoder *encoder = intel_dp->encoder;
+	uint16_t address = algo_data->address;
+	uint8_t msg[5];
+	uint8_t reply[2];
+	unsigned retry;
+	int msg_bytes;
+	int reply_bytes;
+	int ret;
+
+	/* Set up the command byte */
+	if (mode & MODE_I2C_READ)
+		msg[0] = AUX_I2C_READ << 4;
+	else
+		msg[0] = AUX_I2C_WRITE << 4;
+
+	if (!(mode & MODE_I2C_STOP))
+		msg[0] |= AUX_I2C_MOT << 4;
+
+	msg[1] = address >> 8;
+	msg[2] = address;
+
+	switch (mode) {
+	case MODE_I2C_WRITE:
+		msg[3] = 0;
+		msg[4] = write_byte;
+		msg_bytes = 5;
+		reply_bytes = 1;
+		break;
+	case MODE_I2C_READ:
+		msg[3] = 0;
+		msg_bytes = 4;
+		reply_bytes = 2;
+		break;
+	default:
+		msg_bytes = 3;
+		reply_bytes = 1;
+		break;
+	}
+
+	for (retry = 0; retry < 5; retry++) {
+		ret = cdv_intel_dp_aux_ch(encoder,
+				      msg, msg_bytes,
+				      reply, reply_bytes);
+		if (ret < 0) {
+			DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
+			return ret;
+		}
+
+		switch (reply[0] & AUX_NATIVE_REPLY_MASK) {
+		case AUX_NATIVE_REPLY_ACK:
+			/* I2C-over-AUX Reply field is only valid
+			 * when paired with AUX ACK.
+			 */
+			break;
+		case AUX_NATIVE_REPLY_NACK:
+			DRM_DEBUG_KMS("aux_ch native nack\n");
+			return -EREMOTEIO;
+		case AUX_NATIVE_REPLY_DEFER:
+			udelay(100);
+			continue;
+		default:
+			DRM_ERROR("aux_ch invalid native reply 0x%02x\n",
+				  reply[0]);
+			return -EREMOTEIO;
+		}
+
+		switch (reply[0] & AUX_I2C_REPLY_MASK) {
+		case AUX_I2C_REPLY_ACK:
+			if (mode == MODE_I2C_READ) {
+				*read_byte = reply[1];
+			}
+			return reply_bytes - 1;
+		case AUX_I2C_REPLY_NACK:
+			DRM_DEBUG_KMS("aux_i2c nack\n");
+			return -EREMOTEIO;
+		case AUX_I2C_REPLY_DEFER:
+			DRM_DEBUG_KMS("aux_i2c defer\n");
+			udelay(100);
+			break;
+		default:
+			DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]);
+			return -EREMOTEIO;
+		}
+	}
+
+	DRM_ERROR("too many retries, giving up\n");
+	return -EREMOTEIO;
+}
+
+static int
+cdv_intel_dp_i2c_init(struct psb_intel_connector *connector, struct psb_intel_encoder *encoder, const char *name)
+{
+	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+	int ret;
+
+	DRM_DEBUG_KMS("i2c_init %s\n", name);
+
+	intel_dp->algo.running = false;
+	intel_dp->algo.address = 0;
+	intel_dp->algo.aux_ch = cdv_intel_dp_i2c_aux_ch;
+
+	memset(&intel_dp->adapter, '\0', sizeof (intel_dp->adapter));
+	intel_dp->adapter.owner = THIS_MODULE;
+	intel_dp->adapter.class = I2C_CLASS_DDC;
+	strncpy (intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1);
+	intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0';
+	intel_dp->adapter.algo_data = &intel_dp->algo;
+	intel_dp->adapter.dev.parent = &connector->base.kdev;
+
+	if (is_edp(encoder))
+		cdv_intel_edp_panel_vdd_on(encoder);
+	ret = i2c_dp_aux_add_bus(&intel_dp->adapter);
+	if (is_edp(encoder))
+		cdv_intel_edp_panel_vdd_off(encoder);
+	
+	return ret;
+}
+
+void cdv_intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
+	struct drm_display_mode *adjusted_mode)
+{
+	adjusted_mode->hdisplay = fixed_mode->hdisplay;
+	adjusted_mode->hsync_start = fixed_mode->hsync_start;
+	adjusted_mode->hsync_end = fixed_mode->hsync_end;
+	adjusted_mode->htotal = fixed_mode->htotal;
+
+	adjusted_mode->vdisplay = fixed_mode->vdisplay;
+	adjusted_mode->vsync_start = fixed_mode->vsync_start;
+	adjusted_mode->vsync_end = fixed_mode->vsync_end;
+	adjusted_mode->vtotal = fixed_mode->vtotal;
+
+	adjusted_mode->clock = fixed_mode->clock;
+
+	drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
+}
+
+static bool
+cdv_intel_dp_mode_fixup(struct drm_encoder *encoder, const struct drm_display_mode *mode,
+		    struct drm_display_mode *adjusted_mode)
+{
+	struct drm_psb_private *dev_priv = encoder->dev->dev_private;
+	struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder);
+	struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
+	int lane_count, clock;
+	int max_lane_count = cdv_intel_dp_max_lane_count(intel_encoder);
+	int max_clock = cdv_intel_dp_max_link_bw(intel_encoder) == DP_LINK_BW_2_7 ? 1 : 0;
+	static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
+	int refclock = mode->clock;
+	int bpp = 24;
+
+	if (is_edp(intel_encoder) && intel_dp->panel_fixed_mode) {
+		cdv_intel_fixed_panel_mode(intel_dp->panel_fixed_mode, adjusted_mode);
+		refclock = intel_dp->panel_fixed_mode->clock;
+		bpp = dev_priv->edp.bpp;
+	}
+
+	for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
+		for (clock = max_clock; clock >= 0; clock--) {
+			int link_avail = cdv_intel_dp_max_data_rate(cdv_intel_dp_link_clock(bws[clock]), lane_count);
+
+			if (cdv_intel_dp_link_required(refclock, bpp) <= link_avail) {
+				intel_dp->link_bw = bws[clock];
+				intel_dp->lane_count = lane_count;
+				adjusted_mode->clock = cdv_intel_dp_link_clock(intel_dp->link_bw);
+				DRM_DEBUG_KMS("Display port link bw %02x lane "
+						"count %d clock %d\n",
+				       intel_dp->link_bw, intel_dp->lane_count,
+				       adjusted_mode->clock);
+				return true;
+			}
+		}
+	}
+	if (is_edp(intel_encoder)) {
+		/* okay we failed just pick the highest */
+		intel_dp->lane_count = max_lane_count;
+		intel_dp->link_bw = bws[max_clock];
+		adjusted_mode->clock = cdv_intel_dp_link_clock(intel_dp->link_bw);
+		DRM_DEBUG_KMS("Force picking display port link bw %02x lane "
+			      "count %d clock %d\n",
+			      intel_dp->link_bw, intel_dp->lane_count,
+			      adjusted_mode->clock);
+
+		return true;
+	}
+	return false;
+}
+
+struct cdv_intel_dp_m_n {
+	uint32_t	tu;
+	uint32_t	gmch_m;
+	uint32_t	gmch_n;
+	uint32_t	link_m;
+	uint32_t	link_n;
+};
+
+static void
+cdv_intel_reduce_ratio(uint32_t *num, uint32_t *den)
+{
+	/*
+	while (*num > 0xffffff || *den > 0xffffff) {
+		*num >>= 1;
+		*den >>= 1;
+	}*/
+	uint64_t value, m;
+	m = *num;
+	value = m * (0x800000);
+	m = do_div(value, *den);
+	*num = value;
+	*den = 0x800000;
+}
+
+static void
+cdv_intel_dp_compute_m_n(int bpp,
+		     int nlanes,
+		     int pixel_clock,
+		     int link_clock,
+		     struct cdv_intel_dp_m_n *m_n)
+{
+	m_n->tu = 64;
+	m_n->gmch_m = (pixel_clock * bpp + 7) >> 3;
+	m_n->gmch_n = link_clock * nlanes;
+	cdv_intel_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
+	m_n->link_m = pixel_clock;
+	m_n->link_n = link_clock;
+	cdv_intel_reduce_ratio(&m_n->link_m, &m_n->link_n);
+}
+
+void
+cdv_intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
+		 struct drm_display_mode *adjusted_mode)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct drm_mode_config *mode_config = &dev->mode_config;
+	struct drm_encoder *encoder;
+	struct psb_intel_crtc *intel_crtc = to_psb_intel_crtc(crtc);
+	int lane_count = 4, bpp = 24;
+	struct cdv_intel_dp_m_n m_n;
+	int pipe = intel_crtc->pipe;
+
+	/*
+	 * Find the lane count in the intel_encoder private
+	 */
+	list_for_each_entry(encoder, &mode_config->encoder_list, head) {
+		struct psb_intel_encoder *intel_encoder;
+		struct cdv_intel_dp *intel_dp;
+
+		if (encoder->crtc != crtc)
+			continue;
+
+		intel_encoder = to_psb_intel_encoder(encoder);
+		intel_dp = intel_encoder->dev_priv;
+		if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
+			lane_count = intel_dp->lane_count;
+			break;
+		} else if (is_edp(intel_encoder)) {
+			lane_count = intel_dp->lane_count;
+			bpp = dev_priv->edp.bpp;
+			break;
+		}
+	}
+
+	/*
+	 * Compute the GMCH and Link ratios. The '3' here is
+	 * the number of bytes_per_pixel post-LUT, which we always
+	 * set up for 8-bits of R/G/B, or 3 bytes total.
+	 */
+	cdv_intel_dp_compute_m_n(bpp, lane_count,
+			     mode->clock, adjusted_mode->clock, &m_n);
+
+	{
+		REG_WRITE(PIPE_GMCH_DATA_M(pipe),
+			   ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
+			   m_n.gmch_m);
+		REG_WRITE(PIPE_GMCH_DATA_N(pipe), m_n.gmch_n);
+		REG_WRITE(PIPE_DP_LINK_M(pipe), m_n.link_m);
+		REG_WRITE(PIPE_DP_LINK_N(pipe), m_n.link_n);
+	}
+}
+
+static void
+cdv_intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
+		  struct drm_display_mode *adjusted_mode)
+{
+	struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder);
+	struct drm_crtc *crtc = encoder->crtc;
+	struct psb_intel_crtc *intel_crtc = to_psb_intel_crtc(crtc);
+	struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
+	struct drm_device *dev = encoder->dev;
+
+	intel_dp->DP = DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
+	intel_dp->DP |= intel_dp->color_range;
+
+	if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+		intel_dp->DP |= DP_SYNC_HS_HIGH;
+	if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+		intel_dp->DP |= DP_SYNC_VS_HIGH;
+
+	intel_dp->DP |= DP_LINK_TRAIN_OFF;
+
+	switch (intel_dp->lane_count) {
+	case 1:
+		intel_dp->DP |= DP_PORT_WIDTH_1;
+		break;
+	case 2:
+		intel_dp->DP |= DP_PORT_WIDTH_2;
+		break;
+	case 4:
+		intel_dp->DP |= DP_PORT_WIDTH_4;
+		break;
+	}
+	if (intel_dp->has_audio)
+		intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
+
+	memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
+	intel_dp->link_configuration[0] = intel_dp->link_bw;
+	intel_dp->link_configuration[1] = intel_dp->lane_count;
+
+	/*
+	 * Check for DPCD version > 1.1 and enhanced framing support
+	 */
+	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
+	    (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) {
+		intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+		intel_dp->DP |= DP_ENHANCED_FRAMING;
+	}
+
+	/* CPT DP's pipe select is decided in TRANS_DP_CTL */
+	if (intel_crtc->pipe == 1)
+		intel_dp->DP |= DP_PIPEB_SELECT;
+
+	REG_WRITE(intel_dp->output_reg, (intel_dp->DP | DP_PORT_EN));
+	DRM_DEBUG_KMS("DP expected reg is %x\n", intel_dp->DP);
+	if (is_edp(intel_encoder)) {
+		uint32_t pfit_control;
+		cdv_intel_edp_panel_on(intel_encoder);
+
+		if (mode->hdisplay != adjusted_mode->hdisplay ||
+			    mode->vdisplay != adjusted_mode->vdisplay)
+			pfit_control = PFIT_ENABLE;
+		else
+			pfit_control = 0;
+
+		pfit_control |= intel_crtc->pipe << PFIT_PIPE_SHIFT;
+
+		REG_WRITE(PFIT_CONTROL, pfit_control);
+	}
+}
+
+
+/* If the sink supports it, try to set the power state appropriately */
+static void cdv_intel_dp_sink_dpms(struct psb_intel_encoder *encoder, int mode)
+{
+	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+	int ret, i;
+
+	/* Should have a valid DPCD by this point */
+	if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
+		return;
+
+	if (mode != DRM_MODE_DPMS_ON) {
+		ret = cdv_intel_dp_aux_native_write_1(encoder, DP_SET_POWER,
+						  DP_SET_POWER_D3);
+		if (ret != 1)
+			DRM_DEBUG_DRIVER("failed to write sink power state\n");
+	} else {
+		/*
+		 * When turning on, we need to retry for 1ms to give the sink
+		 * time to wake up.
+		 */
+		for (i = 0; i < 3; i++) {
+			ret = cdv_intel_dp_aux_native_write_1(encoder,
+							  DP_SET_POWER,
+							  DP_SET_POWER_D0);
+			if (ret == 1)
+				break;
+			udelay(1000);
+		}
+	}
+}
+
+static void cdv_intel_dp_prepare(struct drm_encoder *encoder)
+{
+	struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder);
+	int edp = is_edp(intel_encoder);
+
+	if (edp) {
+		cdv_intel_edp_backlight_off(intel_encoder);
+		cdv_intel_edp_panel_off(intel_encoder);
+		cdv_intel_edp_panel_vdd_on(intel_encoder);
+        }
+	/* Wake up the sink first */
+	cdv_intel_dp_sink_dpms(intel_encoder, DRM_MODE_DPMS_ON);
+	cdv_intel_dp_link_down(intel_encoder);
+	if (edp)
+		cdv_intel_edp_panel_vdd_off(intel_encoder);
+}
+
+static void cdv_intel_dp_commit(struct drm_encoder *encoder)
+{
+	struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder);
+	int edp = is_edp(intel_encoder);
+
+	if (edp)
+		cdv_intel_edp_panel_on(intel_encoder);
+	cdv_intel_dp_start_link_train(intel_encoder);
+	cdv_intel_dp_complete_link_train(intel_encoder);
+	if (edp)
+		cdv_intel_edp_backlight_on(intel_encoder);
+}
+
+static void
+cdv_intel_dp_dpms(struct drm_encoder *encoder, int mode)
+{
+	struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder);
+	struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
+	struct drm_device *dev = encoder->dev;
+	uint32_t dp_reg = REG_READ(intel_dp->output_reg);
+	int edp = is_edp(intel_encoder);
+
+	if (mode != DRM_MODE_DPMS_ON) {
+		if (edp) {
+			cdv_intel_edp_backlight_off(intel_encoder);
+			cdv_intel_edp_panel_vdd_on(intel_encoder);
+		}
+		cdv_intel_dp_sink_dpms(intel_encoder, mode);
+		cdv_intel_dp_link_down(intel_encoder);
+		if (edp) {
+			cdv_intel_edp_panel_vdd_off(intel_encoder);
+			cdv_intel_edp_panel_off(intel_encoder);
+		}
+	} else {
+        	if (edp)
+			cdv_intel_edp_panel_on(intel_encoder);
+		cdv_intel_dp_sink_dpms(intel_encoder, mode);
+		if (!(dp_reg & DP_PORT_EN)) {
+			cdv_intel_dp_start_link_train(intel_encoder);
+			cdv_intel_dp_complete_link_train(intel_encoder);
+		}
+		if (edp)
+        		cdv_intel_edp_backlight_on(intel_encoder);
+	}
+}
+
+/*
+ * Native read with retry for link status and receiver capability reads for
+ * cases where the sink may still be asleep.
+ */
+static bool
+cdv_intel_dp_aux_native_read_retry(struct psb_intel_encoder *encoder, uint16_t address,
+			       uint8_t *recv, int recv_bytes)
+{
+	int ret, i;
+
+	/*
+	 * Sinks are *supposed* to come up within 1ms from an off state,
+	 * but we're also supposed to retry 3 times per the spec.
+	 */
+	for (i = 0; i < 3; i++) {
+		ret = cdv_intel_dp_aux_native_read(encoder, address, recv,
+					       recv_bytes);
+		if (ret == recv_bytes)
+			return true;
+		udelay(1000);
+	}
+
+	return false;
+}
+
+/*
+ * Fetch AUX CH registers 0x202 - 0x207 which contain
+ * link status information
+ */
+static bool
+cdv_intel_dp_get_link_status(struct psb_intel_encoder *encoder)
+{
+	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+	return cdv_intel_dp_aux_native_read_retry(encoder,
+					      DP_LANE0_1_STATUS,
+					      intel_dp->link_status,
+					      DP_LINK_STATUS_SIZE);
+}
+
+static uint8_t
+cdv_intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
+		     int r)
+{
+	return link_status[r - DP_LANE0_1_STATUS];
+}
+
+static uint8_t
+cdv_intel_get_adjust_request_voltage(uint8_t link_status[DP_LINK_STATUS_SIZE],
+				 int lane)
+{
+	int	    i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
+	int	    s = ((lane & 1) ?
+			 DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
+			 DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
+	uint8_t l = cdv_intel_dp_link_status(link_status, i);
+
+	return ((l >> s) & 3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
+}
+
+static uint8_t
+cdv_intel_get_adjust_request_pre_emphasis(uint8_t link_status[DP_LINK_STATUS_SIZE],
+				      int lane)
+{
+	int	    i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
+	int	    s = ((lane & 1) ?
+			 DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
+			 DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
+	uint8_t l = cdv_intel_dp_link_status(link_status, i);
+
+	return ((l >> s) & 3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
+}
+
+
+#if 0
+static char	*voltage_names[] = {
+	"0.4V", "0.6V", "0.8V", "1.2V"
+};
+static char	*pre_emph_names[] = {
+	"0dB", "3.5dB", "6dB", "9.5dB"
+};
+static char	*link_train_names[] = {
+	"pattern 1", "pattern 2", "idle", "off"
+};
+#endif
+
+#define CDV_DP_VOLTAGE_MAX	    DP_TRAIN_VOLTAGE_SWING_1200
+/*
+static uint8_t
+cdv_intel_dp_pre_emphasis_max(uint8_t voltage_swing)
+{
+	switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+	case DP_TRAIN_VOLTAGE_SWING_400:
+		return DP_TRAIN_PRE_EMPHASIS_6;
+	case DP_TRAIN_VOLTAGE_SWING_600:
+		return DP_TRAIN_PRE_EMPHASIS_6;
+	case DP_TRAIN_VOLTAGE_SWING_800:
+		return DP_TRAIN_PRE_EMPHASIS_3_5;
+	case DP_TRAIN_VOLTAGE_SWING_1200:
+	default:
+		return DP_TRAIN_PRE_EMPHASIS_0;
+	}
+}
+*/
+static void
+cdv_intel_get_adjust_train(struct psb_intel_encoder *encoder)
+{
+	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+	uint8_t v = 0;
+	uint8_t p = 0;
+	int lane;
+
+	for (lane = 0; lane < intel_dp->lane_count; lane++) {
+		uint8_t this_v = cdv_intel_get_adjust_request_voltage(intel_dp->link_status, lane);
+		uint8_t this_p = cdv_intel_get_adjust_request_pre_emphasis(intel_dp->link_status, lane);
+
+		if (this_v > v)
+			v = this_v;
+		if (this_p > p)
+			p = this_p;
+	}
+	
+	if (v >= CDV_DP_VOLTAGE_MAX)
+		v = CDV_DP_VOLTAGE_MAX | DP_TRAIN_MAX_SWING_REACHED;
+
+	if (p == DP_TRAIN_PRE_EMPHASIS_MASK)
+		p |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
+		
+	for (lane = 0; lane < 4; lane++)
+		intel_dp->train_set[lane] = v | p;
+}
+
+
+static uint8_t
+cdv_intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
+		      int lane)
+{
+	int i = DP_LANE0_1_STATUS + (lane >> 1);
+	int s = (lane & 1) * 4;
+	uint8_t l = cdv_intel_dp_link_status(link_status, i);
+
+	return (l >> s) & 0xf;
+}
+
+/* Check for clock recovery is done on all channels */
+static bool
+cdv_intel_clock_recovery_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count)
+{
+	int lane;
+	uint8_t lane_status;
+
+	for (lane = 0; lane < lane_count; lane++) {
+		lane_status = cdv_intel_get_lane_status(link_status, lane);
+		if ((lane_status & DP_LANE_CR_DONE) == 0)
+			return false;
+	}
+	return true;
+}
+
+/* Check to see if channel eq is done on all channels */
+#define CHANNEL_EQ_BITS (DP_LANE_CR_DONE|\
+			 DP_LANE_CHANNEL_EQ_DONE|\
+			 DP_LANE_SYMBOL_LOCKED)
+static bool
+cdv_intel_channel_eq_ok(struct psb_intel_encoder *encoder)
+{
+	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+	uint8_t lane_align;
+	uint8_t lane_status;
+	int lane;
+
+	lane_align = cdv_intel_dp_link_status(intel_dp->link_status,
+					  DP_LANE_ALIGN_STATUS_UPDATED);
+	if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
+		return false;
+	for (lane = 0; lane < intel_dp->lane_count; lane++) {
+		lane_status = cdv_intel_get_lane_status(intel_dp->link_status, lane);
+		if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS)
+			return false;
+	}
+	return true;
+}
+
+static bool
+cdv_intel_dp_set_link_train(struct psb_intel_encoder *encoder,
+			uint32_t dp_reg_value,
+			uint8_t dp_train_pat)
+{
+	
+	struct drm_device *dev = encoder->base.dev;
+	int ret;
+	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+
+	REG_WRITE(intel_dp->output_reg, dp_reg_value);
+	REG_READ(intel_dp->output_reg);
+
+	ret = cdv_intel_dp_aux_native_write_1(encoder,
+				    DP_TRAINING_PATTERN_SET,
+				    dp_train_pat);
+
+	if (ret != 1) {
+		DRM_DEBUG_KMS("Failure in setting link pattern %x\n",
+				dp_train_pat);
+		return false;
+	}
+
+	return true;
+}
+
+
+static bool
+cdv_intel_dplink_set_level(struct psb_intel_encoder *encoder,
+			uint8_t dp_train_pat)
+{
+	
+	int ret;
+	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+
+	ret = cdv_intel_dp_aux_native_write(encoder,
+					DP_TRAINING_LANE0_SET,
+					intel_dp->train_set,
+					intel_dp->lane_count);
+
+	if (ret != intel_dp->lane_count) {
+		DRM_DEBUG_KMS("Failure in setting level %d, lane_cnt= %d\n",
+				intel_dp->train_set[0], intel_dp->lane_count);
+		return false;
+	}
+	return true;
+}
+
+static void
+cdv_intel_dp_set_vswing_premph(struct psb_intel_encoder *encoder, uint8_t signal_level)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+	struct ddi_regoff *ddi_reg;
+	int vswing, premph, index;
+
+	if (intel_dp->output_reg == DP_B)
+		ddi_reg = &ddi_DP_train_table[0];
+	else
+		ddi_reg = &ddi_DP_train_table[1];
+
+	vswing = (signal_level & DP_TRAIN_VOLTAGE_SWING_MASK);
+	premph = ((signal_level & DP_TRAIN_PRE_EMPHASIS_MASK)) >>
+				DP_TRAIN_PRE_EMPHASIS_SHIFT;
+
+	if (vswing + premph > 3)
+		return;
+#ifdef CDV_FAST_LINK_TRAIN
+	return;
+#endif
+	DRM_DEBUG_KMS("Test2\n");
+	//return ;
+	cdv_sb_reset(dev);
+	/* ;Swing voltage programming
+        ;gfx_dpio_set_reg(0xc058, 0x0505313A) */
+	cdv_sb_write(dev, ddi_reg->VSwing5, 0x0505313A);
+
+	/* ;gfx_dpio_set_reg(0x8154, 0x43406055) */
+	cdv_sb_write(dev, ddi_reg->VSwing1, 0x43406055);
+
+	/* ;gfx_dpio_set_reg(0x8148, 0x55338954)
+	 * The VSwing_PreEmph table is also considered based on the vswing/premp
+	 */
+	index = (vswing + premph) * 2;
+	if (premph == 1 && vswing == 1) {
+		cdv_sb_write(dev, ddi_reg->VSwing2, 0x055738954);
+	} else
+		cdv_sb_write(dev, ddi_reg->VSwing2, dp_vswing_premph_table[index]);
+
+	/* ;gfx_dpio_set_reg(0x814c, 0x40802040) */
+	if ((vswing + premph) == DP_TRAIN_VOLTAGE_SWING_1200)
+		cdv_sb_write(dev, ddi_reg->VSwing3, 0x70802040);
+	else
+		cdv_sb_write(dev, ddi_reg->VSwing3, 0x40802040);
+
+	/* ;gfx_dpio_set_reg(0x8150, 0x2b405555) */
+	/* cdv_sb_write(dev, ddi_reg->VSwing4, 0x2b405555); */
+
+	/* ;gfx_dpio_set_reg(0x8154, 0xc3406055) */
+	cdv_sb_write(dev, ddi_reg->VSwing1, 0xc3406055);
+
+	/* ;Pre emphasis programming
+	 * ;gfx_dpio_set_reg(0xc02c, 0x1f030040)
+	 */
+	cdv_sb_write(dev, ddi_reg->PreEmph1, 0x1f030040);
+
+	/* ;gfx_dpio_set_reg(0x8124, 0x00004000) */
+	index = 2 * premph + 1;
+	cdv_sb_write(dev, ddi_reg->PreEmph2, dp_vswing_premph_table[index]);
+	return;	
+}
+
+
+/* Enable corresponding port and start training pattern 1 */
+static void
+cdv_intel_dp_start_link_train(struct psb_intel_encoder *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+	int i;
+	uint8_t voltage;
+	bool clock_recovery = false;
+	int tries;
+	u32 reg;
+	uint32_t DP = intel_dp->DP;
+
+	DP |= DP_PORT_EN;
+	DP &= ~DP_LINK_TRAIN_MASK;
+		
+	reg = DP;	
+	reg |= DP_LINK_TRAIN_PAT_1;
+	/* Enable output, wait for it to become active */
+	REG_WRITE(intel_dp->output_reg, reg);
+	REG_READ(intel_dp->output_reg);
+	psb_intel_wait_for_vblank(dev);
+
+	DRM_DEBUG_KMS("Link config\n");
+	/* Write the link configuration data */
+	cdv_intel_dp_aux_native_write(encoder, DP_LINK_BW_SET,
+				  intel_dp->link_configuration,
+				  2);
+
+	memset(intel_dp->train_set, 0, 4);
+	voltage = 0;
+	tries = 0;
+	clock_recovery = false;
+
+	DRM_DEBUG_KMS("Start train\n");
+		reg = DP | DP_LINK_TRAIN_PAT_1;
+
+
+	for (;;) {
+		/* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
+		DRM_DEBUG_KMS("DP Link Train Set %x, Link_config %x, %x\n",
+				intel_dp->train_set[0],
+				intel_dp->link_configuration[0],
+				intel_dp->link_configuration[1]);
+
+		if (!cdv_intel_dp_set_link_train(encoder, reg, DP_TRAINING_PATTERN_1)) {
+			DRM_DEBUG_KMS("Failure in aux-transfer setting pattern 1\n");
+		}
+		cdv_intel_dp_set_vswing_premph(encoder, intel_dp->train_set[0]);
+		/* Set training pattern 1 */
+
+		cdv_intel_dplink_set_level(encoder, DP_TRAINING_PATTERN_1);
+
+		udelay(200);
+		if (!cdv_intel_dp_get_link_status(encoder))
+			break;
+
+		DRM_DEBUG_KMS("DP Link status %x, %x, %x, %x, %x, %x\n",
+				intel_dp->link_status[0], intel_dp->link_status[1], intel_dp->link_status[2],
+				intel_dp->link_status[3], intel_dp->link_status[4], intel_dp->link_status[5]);
+
+		if (cdv_intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) {
+			DRM_DEBUG_KMS("PT1 train is done\n");
+			clock_recovery = true;
+			break;
+		}
+
+		/* Check to see if we've tried the max voltage */
+		for (i = 0; i < intel_dp->lane_count; i++)
+			if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
+				break;
+		if (i == intel_dp->lane_count)
+			break;
+
+		/* Check to see if we've tried the same voltage 5 times */
+		if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
+			++tries;
+			if (tries == 5)
+				break;
+		} else
+			tries = 0;
+		voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
+
+		/* Compute new intel_dp->train_set as requested by target */
+		cdv_intel_get_adjust_train(encoder);
+
+	}
+
+	if (!clock_recovery) {
+		DRM_DEBUG_KMS("failure in DP patter 1 training, train set %x\n", intel_dp->train_set[0]);
+	}
+	
+	intel_dp->DP = DP;
+}
+
+static void
+cdv_intel_dp_complete_link_train(struct psb_intel_encoder *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+	bool channel_eq = false;
+	int tries, cr_tries;
+	u32 reg;
+	uint32_t DP = intel_dp->DP;
+
+	/* channel equalization */
+	tries = 0;
+	cr_tries = 0;
+	channel_eq = false;
+
+	DRM_DEBUG_KMS("\n");
+		reg = DP | DP_LINK_TRAIN_PAT_2;
+
+	for (;;) {
+
+		DRM_DEBUG_KMS("DP Link Train Set %x, Link_config %x, %x\n",
+				intel_dp->train_set[0],
+				intel_dp->link_configuration[0],
+				intel_dp->link_configuration[1]);
+        	/* channel eq pattern */
+
+		if (!cdv_intel_dp_set_link_train(encoder, reg,
+					     DP_TRAINING_PATTERN_2)) {
+			DRM_DEBUG_KMS("Failure in aux-transfer setting pattern 2\n");
+		}
+		/* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
+
+		if (cr_tries > 5) {
+			DRM_ERROR("failed to train DP, aborting\n");
+			cdv_intel_dp_link_down(encoder);
+			break;
+		}
+
+		cdv_intel_dp_set_vswing_premph(encoder, intel_dp->train_set[0]);
+
+		cdv_intel_dplink_set_level(encoder, DP_TRAINING_PATTERN_2);
+
+		udelay(1000);
+		if (!cdv_intel_dp_get_link_status(encoder))
+			break;
+
+		DRM_DEBUG_KMS("DP Link status %x, %x, %x, %x, %x, %x\n",
+				intel_dp->link_status[0], intel_dp->link_status[1], intel_dp->link_status[2],
+				intel_dp->link_status[3], intel_dp->link_status[4], intel_dp->link_status[5]);
+
+		/* Make sure clock is still ok */
+		if (!cdv_intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) {
+			cdv_intel_dp_start_link_train(encoder);
+			cr_tries++;
+			continue;
+		}
+
+		if (cdv_intel_channel_eq_ok(encoder)) {
+			DRM_DEBUG_KMS("PT2 train is done\n");
+			channel_eq = true;
+			break;
+		}
+
+		/* Try 5 times, then try clock recovery if that fails */
+		if (tries > 5) {
+			cdv_intel_dp_link_down(encoder);
+			cdv_intel_dp_start_link_train(encoder);
+			tries = 0;
+			cr_tries++;
+			continue;
+		}
+
+		/* Compute new intel_dp->train_set as requested by target */
+		cdv_intel_get_adjust_train(encoder);
+		++tries;
+
+	}
+
+	reg = DP | DP_LINK_TRAIN_OFF;
+
+	REG_WRITE(intel_dp->output_reg, reg);
+	REG_READ(intel_dp->output_reg);
+	cdv_intel_dp_aux_native_write_1(encoder,
+				    DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE);
+}
+
+static void
+cdv_intel_dp_link_down(struct psb_intel_encoder *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+	uint32_t DP = intel_dp->DP;
+
+	if ((REG_READ(intel_dp->output_reg) & DP_PORT_EN) == 0)
+		return;
+
+	DRM_DEBUG_KMS("\n");
+
+
+	{
+		DP &= ~DP_LINK_TRAIN_MASK;
+		REG_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
+	}
+	REG_READ(intel_dp->output_reg);
+
+	msleep(17);
+
+	REG_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
+	REG_READ(intel_dp->output_reg);
+}
+
+static enum drm_connector_status
+cdv_dp_detect(struct psb_intel_encoder *encoder)
+{
+	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+	enum drm_connector_status status;
+
+	status = connector_status_disconnected;
+	if (cdv_intel_dp_aux_native_read(encoder, 0x000, intel_dp->dpcd,
+				     sizeof (intel_dp->dpcd)) == sizeof (intel_dp->dpcd))
+	{
+		if (intel_dp->dpcd[DP_DPCD_REV] != 0)
+			status = connector_status_connected;
+	}
+	if (status == connector_status_connected)
+		DRM_DEBUG_KMS("DPCD: Rev=%x LN_Rate=%x LN_CNT=%x LN_DOWNSP=%x\n",
+			intel_dp->dpcd[0], intel_dp->dpcd[1],
+			intel_dp->dpcd[2], intel_dp->dpcd[3]);
+	return status;
+}
+
+/**
+ * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection.
+ *
+ * \return true if DP port is connected.
+ * \return false if DP port is disconnected.
+ */
+static enum drm_connector_status
+cdv_intel_dp_detect(struct drm_connector *connector, bool force)
+{
+	struct psb_intel_encoder *encoder = psb_intel_attached_encoder(connector);
+	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+	enum drm_connector_status status;
+	struct edid *edid = NULL;
+	int edp = is_edp(encoder);
+
+	intel_dp->has_audio = false;
+
+	if (edp)
+		cdv_intel_edp_panel_vdd_on(encoder);
+	status = cdv_dp_detect(encoder);
+	if (status != connector_status_connected) {
+		if (edp)
+			cdv_intel_edp_panel_vdd_off(encoder);
+		return status;
+        }
+
+	if (intel_dp->force_audio) {
+		intel_dp->has_audio = intel_dp->force_audio > 0;
+	} else {
+		edid = drm_get_edid(connector, &intel_dp->adapter);
+		if (edid) {
+			intel_dp->has_audio = drm_detect_monitor_audio(edid);
+			kfree(edid);
+		}
+	}
+	if (edp)
+		cdv_intel_edp_panel_vdd_off(encoder);
+
+	return connector_status_connected;
+}
+
+static int cdv_intel_dp_get_modes(struct drm_connector *connector)
+{
+	struct psb_intel_encoder *intel_encoder = psb_intel_attached_encoder(connector);
+	struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
+	struct edid *edid = NULL;
+	int ret = 0;
+	int edp = is_edp(intel_encoder);
+
+
+	edid = drm_get_edid(connector, &intel_dp->adapter);
+	if (edid) {
+		drm_mode_connector_update_edid_property(connector, edid);
+		ret = drm_add_edid_modes(connector, edid);
+		kfree(edid);
+	}
+
+	if (is_edp(intel_encoder)) {
+		struct drm_device *dev = connector->dev;
+		struct drm_psb_private *dev_priv = dev->dev_private;
+		
+		cdv_intel_edp_panel_vdd_off(intel_encoder);
+		if (ret) {
+			if (edp && !intel_dp->panel_fixed_mode) {
+				struct drm_display_mode *newmode;
+				list_for_each_entry(newmode, &connector->probed_modes,
+					    head) {
+					if (newmode->type & DRM_MODE_TYPE_PREFERRED) {
+						intel_dp->panel_fixed_mode =
+							drm_mode_duplicate(dev, newmode);
+						break;
+					}
+				}
+			}
+
+			return ret;
+		}
+		if (!intel_dp->panel_fixed_mode && dev_priv->lfp_lvds_vbt_mode) {
+			intel_dp->panel_fixed_mode =
+				drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
+			if (intel_dp->panel_fixed_mode) {
+				intel_dp->panel_fixed_mode->type |=
+					DRM_MODE_TYPE_PREFERRED;
+			}
+		}
+		if (intel_dp->panel_fixed_mode != NULL) {
+			struct drm_display_mode *mode;
+			mode = drm_mode_duplicate(dev, intel_dp->panel_fixed_mode);
+			drm_mode_probed_add(connector, mode);
+			return 1;
+		}
+	}
+
+	return ret;
+}
+
+static bool
+cdv_intel_dp_detect_audio(struct drm_connector *connector)
+{
+	struct psb_intel_encoder *encoder = psb_intel_attached_encoder(connector);
+	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+	struct edid *edid;
+	bool has_audio = false;
+	int edp = is_edp(encoder);
+
+	if (edp)
+		cdv_intel_edp_panel_vdd_on(encoder);
+
+	edid = drm_get_edid(connector, &intel_dp->adapter);
+	if (edid) {
+		has_audio = drm_detect_monitor_audio(edid);
+		kfree(edid);
+	}
+	if (edp)
+		cdv_intel_edp_panel_vdd_off(encoder);
+
+	return has_audio;
+}
+
+static int
+cdv_intel_dp_set_property(struct drm_connector *connector,
+		      struct drm_property *property,
+		      uint64_t val)
+{
+	struct drm_psb_private *dev_priv = connector->dev->dev_private;
+	struct psb_intel_encoder *encoder = psb_intel_attached_encoder(connector);
+	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+	int ret;
+
+	ret = drm_connector_property_set_value(connector, property, val);
+	if (ret)
+		return ret;
+
+	if (property == dev_priv->force_audio_property) {
+		int i = val;
+		bool has_audio;
+
+		if (i == intel_dp->force_audio)
+			return 0;
+
+		intel_dp->force_audio = i;
+
+		if (i == 0)
+			has_audio = cdv_intel_dp_detect_audio(connector);
+		else
+			has_audio = i > 0;
+
+		if (has_audio == intel_dp->has_audio)
+			return 0;
+
+		intel_dp->has_audio = has_audio;
+		goto done;
+	}
+
+	if (property == dev_priv->broadcast_rgb_property) {
+		if (val == !!intel_dp->color_range)
+			return 0;
+
+		intel_dp->color_range = val ? DP_COLOR_RANGE_16_235 : 0;
+		goto done;
+	}
+
+	return -EINVAL;
+
+done:
+	if (encoder->base.crtc) {
+		struct drm_crtc *crtc = encoder->base.crtc;
+		drm_crtc_helper_set_mode(crtc, &crtc->mode,
+					 crtc->x, crtc->y,
+					 crtc->fb);
+	}
+
+	return 0;
+}
+
+static void
+cdv_intel_dp_destroy(struct drm_connector *connector)
+{
+	struct psb_intel_encoder *psb_intel_encoder =
+					psb_intel_attached_encoder(connector);
+	struct cdv_intel_dp *intel_dp = psb_intel_encoder->dev_priv;
+
+	if (is_edp(psb_intel_encoder)) {
+	/*	cdv_intel_panel_destroy_backlight(connector->dev); */
+		if (intel_dp->panel_fixed_mode) {
+			kfree(intel_dp->panel_fixed_mode);
+			intel_dp->panel_fixed_mode = NULL;
+		}
+	}
+	i2c_del_adapter(&intel_dp->adapter);
+	drm_sysfs_connector_remove(connector);
+	drm_connector_cleanup(connector);
+	kfree(connector);
+}
+
+static void cdv_intel_dp_encoder_destroy(struct drm_encoder *encoder)
+{
+	drm_encoder_cleanup(encoder);
+}
+
+static const struct drm_encoder_helper_funcs cdv_intel_dp_helper_funcs = {
+	.dpms = cdv_intel_dp_dpms,
+	.mode_fixup = cdv_intel_dp_mode_fixup,
+	.prepare = cdv_intel_dp_prepare,
+	.mode_set = cdv_intel_dp_mode_set,
+	.commit = cdv_intel_dp_commit,
+};
+
+static const struct drm_connector_funcs cdv_intel_dp_connector_funcs = {
+	.dpms = drm_helper_connector_dpms,
+	.detect = cdv_intel_dp_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.set_property = cdv_intel_dp_set_property,
+	.destroy = cdv_intel_dp_destroy,
+};
+
+static const struct drm_connector_helper_funcs cdv_intel_dp_connector_helper_funcs = {
+	.get_modes = cdv_intel_dp_get_modes,
+	.mode_valid = cdv_intel_dp_mode_valid,
+	.best_encoder = psb_intel_best_encoder,
+};
+
+static const struct drm_encoder_funcs cdv_intel_dp_enc_funcs = {
+	.destroy = cdv_intel_dp_encoder_destroy,
+};
+
+
+static void cdv_intel_dp_add_properties(struct drm_connector *connector)
+{
+	cdv_intel_attach_force_audio_property(connector);
+	cdv_intel_attach_broadcast_rgb_property(connector);
+}
+
+/* check the VBT to see whether the eDP is on DP-D port */
+static bool cdv_intel_dpc_is_edp(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct child_device_config *p_child;
+	int i;
+
+	if (!dev_priv->child_dev_num)
+		return false;
+
+	for (i = 0; i < dev_priv->child_dev_num; i++) {
+		p_child = dev_priv->child_dev + i;
+
+		if (p_child->dvo_port == PORT_IDPC &&
+		    p_child->device_type == DEVICE_TYPE_eDP)
+			return true;
+	}
+	return false;
+}
+
+/* Cedarview display clock gating
+
+   We need this disable dot get correct behaviour while enabling
+   DP/eDP. TODO - investigate if we can turn it back to normality
+   after enabling */
+static void cdv_disable_intel_clock_gating(struct drm_device *dev)
+{
+	u32 reg_value;
+	reg_value = REG_READ(DSPCLK_GATE_D);
+
+	reg_value |= (DPUNIT_PIPEB_GATE_DISABLE |
+			DPUNIT_PIPEA_GATE_DISABLE |
+			DPCUNIT_CLOCK_GATE_DISABLE |
+			DPLSUNIT_CLOCK_GATE_DISABLE |
+			DPOUNIT_CLOCK_GATE_DISABLE |
+		 	DPIOUNIT_CLOCK_GATE_DISABLE);	
+
+	REG_WRITE(DSPCLK_GATE_D, reg_value);
+
+	udelay(500);		
+}
+
+void
+cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev, int output_reg)
+{
+	struct psb_intel_encoder *psb_intel_encoder;
+	struct psb_intel_connector *psb_intel_connector;
+	struct drm_connector *connector;
+	struct drm_encoder *encoder;
+	struct cdv_intel_dp *intel_dp;
+	const char *name = NULL;
+	int type = DRM_MODE_CONNECTOR_DisplayPort;
+
+	psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder), GFP_KERNEL);
+	if (!psb_intel_encoder)
+		return;
+        psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector), GFP_KERNEL);
+        if (!psb_intel_connector)
+                goto err_connector;
+	intel_dp = kzalloc(sizeof(struct cdv_intel_dp), GFP_KERNEL);
+	if (!intel_dp)
+	        goto err_priv;
+
+	if ((output_reg == DP_C) && cdv_intel_dpc_is_edp(dev))
+		type = DRM_MODE_CONNECTOR_eDP;
+
+	connector = &psb_intel_connector->base;
+	encoder = &psb_intel_encoder->base;
+
+	drm_connector_init(dev, connector, &cdv_intel_dp_connector_funcs, type);
+	drm_encoder_init(dev, encoder, &cdv_intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS);
+
+	psb_intel_connector_attach_encoder(psb_intel_connector, psb_intel_encoder);
+
+	if (type == DRM_MODE_CONNECTOR_DisplayPort)
+        	psb_intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
+        else
+		psb_intel_encoder->type = INTEL_OUTPUT_EDP;
+
+
+	psb_intel_encoder->dev_priv=intel_dp;
+	intel_dp->encoder = psb_intel_encoder;
+	intel_dp->output_reg = output_reg;
+	
+	drm_encoder_helper_add(encoder, &cdv_intel_dp_helper_funcs);
+	drm_connector_helper_add(connector, &cdv_intel_dp_connector_helper_funcs);
+
+	connector->polled = DRM_CONNECTOR_POLL_HPD;
+	connector->interlace_allowed = false;
+	connector->doublescan_allowed = false;
+
+	drm_sysfs_connector_add(connector);
+
+	/* Set up the DDC bus. */
+	switch (output_reg) {
+		case DP_B:
+			name = "DPDDC-B";
+			psb_intel_encoder->ddi_select = (DP_MASK | DDI0_SELECT);
+			break;
+		case DP_C:
+			name = "DPDDC-C";
+			psb_intel_encoder->ddi_select = (DP_MASK | DDI1_SELECT);
+			break;
+	}
+
+	cdv_disable_intel_clock_gating(dev);
+
+	cdv_intel_dp_i2c_init(psb_intel_connector, psb_intel_encoder, name);
+        /* FIXME:fail check */
+	cdv_intel_dp_add_properties(connector);
+
+	if (is_edp(psb_intel_encoder)) {
+		int ret;
+		struct edp_power_seq cur;
+                u32 pp_on, pp_off, pp_div;
+		u32 pwm_ctrl;
+
+		pp_on = REG_READ(PP_CONTROL);
+		pp_on &= ~PANEL_UNLOCK_MASK;
+	        pp_on |= PANEL_UNLOCK_REGS;
+		
+		REG_WRITE(PP_CONTROL, pp_on);
+
+		pwm_ctrl = REG_READ(BLC_PWM_CTL2);
+		pwm_ctrl |= PWM_PIPE_B;
+		REG_WRITE(BLC_PWM_CTL2, pwm_ctrl);
+
+                pp_on = REG_READ(PP_ON_DELAYS);
+                pp_off = REG_READ(PP_OFF_DELAYS);
+                pp_div = REG_READ(PP_DIVISOR);
+	
+		/* Pull timing values out of registers */
+                cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
+                        PANEL_POWER_UP_DELAY_SHIFT;
+
+                cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
+                        PANEL_LIGHT_ON_DELAY_SHIFT;
+
+                cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
+                        PANEL_LIGHT_OFF_DELAY_SHIFT;
+
+                cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
+                        PANEL_POWER_DOWN_DELAY_SHIFT;
+
+                cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
+                               PANEL_POWER_CYCLE_DELAY_SHIFT);
+
+                DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
+                              cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
+
+
+		intel_dp->panel_power_up_delay = cur.t1_t3 / 10;
+                intel_dp->backlight_on_delay = cur.t8 / 10;
+                intel_dp->backlight_off_delay = cur.t9 / 10;
+                intel_dp->panel_power_down_delay = cur.t10 / 10;
+                intel_dp->panel_power_cycle_delay = (cur.t11_t12 - 1) * 100;
+
+                DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
+                              intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
+                              intel_dp->panel_power_cycle_delay);
+
+                DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
+                              intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
+
+
+		cdv_intel_edp_panel_vdd_on(psb_intel_encoder);
+		ret = cdv_intel_dp_aux_native_read(psb_intel_encoder, DP_DPCD_REV,
+					       intel_dp->dpcd,
+					       sizeof(intel_dp->dpcd));
+		cdv_intel_edp_panel_vdd_off(psb_intel_encoder);
+		if (ret == 0) {
+			/* if this fails, presume the device is a ghost */
+			DRM_INFO("failed to retrieve link info, disabling eDP\n");
+			cdv_intel_dp_encoder_destroy(encoder);
+			cdv_intel_dp_destroy(connector);
+			goto err_priv;
+		} else {
+        		DRM_DEBUG_KMS("DPCD: Rev=%x LN_Rate=%x LN_CNT=%x LN_DOWNSP=%x\n",
+				intel_dp->dpcd[0], intel_dp->dpcd[1], 
+				intel_dp->dpcd[2], intel_dp->dpcd[3]);
+			
+		}
+		/* The CDV reference driver moves pnale backlight setup into the displays that
+		   have a backlight: this is a good idea and one we should probably adopt, however
+		   we need to migrate all the drivers before we can do that */
+                /*cdv_intel_panel_setup_backlight(dev); */
+	}
+	return;
+
+err_priv:
+	kfree(psb_intel_connector);
+err_connector:
+	kfree(psb_intel_encoder);
+}
diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
index a86f87b..b1b77bb 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
@@ -157,8 +157,6 @@
 			hdmi_priv->has_hdmi_audio =
 						drm_detect_monitor_audio(edid);
 		}
-
-		psb_intel_connector->base.display_info.raw_edid = NULL;
 		kfree(edid);
 	}
 	return status;
@@ -352,9 +350,11 @@
 	switch (reg) {
 	case SDVOB:
 		ddc_bus = GPIOE;
+		psb_intel_encoder->ddi_select = DDI0_SELECT;
 		break;
 	case SDVOC:
 		ddc_bus = GPIOD;
+		psb_intel_encoder->ddi_select = DDI1_SELECT;
 		break;
 	default:
 		DRM_ERROR("unknown reg 0x%x for HDMI\n", reg);
diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
index c7f9468..b362dd3 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
@@ -506,16 +506,8 @@
 							property,
 							value))
 			return -1;
-		else {
-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
-			struct drm_psb_private *dev_priv =
-						encoder->dev->dev_private;
-			struct backlight_device *bd =
-						dev_priv->backlight_device;
-			bd->props.brightness = value;
-			backlight_update_status(bd);
-#endif
-		}
+		else
+                        gma_backlight_set(encoder->dev, value);
 	} else if (!strcmp(property->name, "DPMS") && encoder) {
 		struct drm_encoder_helper_funcs *helpers =
 					encoder->helper_private;
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 5732b57..884ba73 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -764,6 +764,13 @@
 		        crtc_mask = dev_priv->ops->hdmi_mask;
 			clone_mask = (1 << INTEL_OUTPUT_HDMI);
 			break;
+		case INTEL_OUTPUT_DISPLAYPORT:
+			crtc_mask = (1 << 0) | (1 << 1);
+			clone_mask = (1 << INTEL_OUTPUT_DISPLAYPORT);
+			break;
+		case INTEL_OUTPUT_EDP:
+			crtc_mask = (1 << 1);
+			clone_mask = (1 << INTEL_OUTPUT_EDP);
 		}
 		encoder->possible_crtcs = crtc_mask;
 		encoder->possible_clones =
diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
index fc7d144..df20546 100644
--- a/drivers/gpu/drm/gma500/gem.c
+++ b/drivers/gpu/drm/gma500/gem.c
@@ -36,7 +36,12 @@
 void psb_gem_free_object(struct drm_gem_object *obj)
 {
 	struct gtt_range *gtt = container_of(obj, struct gtt_range, gem);
-	drm_gem_object_release_wrap(obj);
+
+	/* Remove the list map if one is present */
+	if (obj->map_list.map)
+		drm_gem_free_mmap_offset(obj);
+	drm_gem_object_release(obj);
+
 	/* This must occur last as it frees up the memory of the GEM object */
 	psb_gtt_free_range(obj->dev, gtt);
 }
@@ -77,7 +82,7 @@
 
 	/* Make it mmapable */
 	if (!obj->map_list.map) {
-		ret = gem_create_mmap_offset(obj);
+		ret = drm_gem_create_mmap_offset(obj);
 		if (ret)
 			goto out;
 	}
diff --git a/drivers/gpu/drm/gma500/gem_glue.c b/drivers/gpu/drm/gma500/gem_glue.c
deleted file mode 100644
index 3c17634..0000000
--- a/drivers/gpu/drm/gma500/gem_glue.c
+++ /dev/null
@@ -1,90 +0,0 @@
-/**************************************************************************
- * Copyright (c) 2011, Intel Corporation.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- **************************************************************************/
-
-#include <drm/drmP.h>
-#include <drm/drm.h>
-#include "gem_glue.h"
-
-void drm_gem_object_release_wrap(struct drm_gem_object *obj)
-{
-	/* Remove the list map if one is present */
-	if (obj->map_list.map) {
-		struct drm_gem_mm *mm = obj->dev->mm_private;
-		struct drm_map_list *list = &obj->map_list;
-		drm_ht_remove_item(&mm->offset_hash, &list->hash);
-		drm_mm_put_block(list->file_offset_node);
-		kfree(list->map);
-		list->map = NULL;
-	}
-	drm_gem_object_release(obj);
-}
-
-/**
- *	gem_create_mmap_offset		-	invent an mmap offset
- *	@obj: our object
- *
- *	Standard implementation of offset generation for mmap as is
- *	duplicated in several drivers. This belongs in GEM.
- */
-int gem_create_mmap_offset(struct drm_gem_object *obj)
-{
-	struct drm_device *dev = obj->dev;
-	struct drm_gem_mm *mm = dev->mm_private;
-	struct drm_map_list *list;
-	struct drm_local_map *map;
-	int ret;
-
-	list = &obj->map_list;
-	list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
-	if (list->map == NULL)
-		return -ENOMEM;
-	map = list->map;
-	map->type = _DRM_GEM;
-	map->size = obj->size;
-	map->handle = obj;
-
-	list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
-					obj->size / PAGE_SIZE, 0, 0);
-	if (!list->file_offset_node) {
-		dev_err(dev->dev, "failed to allocate offset for bo %d\n",
-								obj->name);
-		ret = -ENOSPC;
-		goto free_it;
-	}
-	list->file_offset_node = drm_mm_get_block(list->file_offset_node,
-					obj->size / PAGE_SIZE, 0);
-	if (!list->file_offset_node) {
-		ret = -ENOMEM;
-		goto free_it;
-	}
-	list->hash.key = list->file_offset_node->start;
-	ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
-	if (ret) {
-		dev_err(dev->dev, "failed to add to map hash\n");
-		goto free_mm;
-	}
-	return 0;
-
-free_mm:
-	drm_mm_put_block(list->file_offset_node);
-free_it:
-	kfree(list->map);
-	list->map = NULL;
-	return ret;
-}
diff --git a/drivers/gpu/drm/gma500/gem_glue.h b/drivers/gpu/drm/gma500/gem_glue.h
deleted file mode 100644
index ce5ce30..0000000
--- a/drivers/gpu/drm/gma500/gem_glue.h
+++ /dev/null
@@ -1,2 +0,0 @@
-extern void drm_gem_object_release_wrap(struct drm_gem_object *obj);
-extern int gem_create_mmap_offset(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/gma500/intel_bios.c b/drivers/gpu/drm/gma500/intel_bios.c
index 8d7caf0..4fb79cf 100644
--- a/drivers/gpu/drm/gma500/intel_bios.c
+++ b/drivers/gpu/drm/gma500/intel_bios.c
@@ -54,6 +54,98 @@
 	return NULL;
 }
 
+static void
+parse_edp(struct drm_psb_private *dev_priv, struct bdb_header *bdb)
+{
+	struct bdb_edp *edp;
+	struct edp_power_seq *edp_pps;
+	struct edp_link_params *edp_link_params;
+	uint8_t	panel_type;
+
+	edp = find_section(bdb, BDB_EDP);
+	
+	dev_priv->edp.bpp = 18;
+	if (!edp) {
+		if (dev_priv->edp.support) {
+			DRM_DEBUG_KMS("No eDP BDB found but eDP panel supported, assume %dbpp panel color depth.\n",
+				      dev_priv->edp.bpp);
+		}
+		return;
+	}
+
+	panel_type = dev_priv->panel_type;
+	switch ((edp->color_depth >> (panel_type * 2)) & 3) {
+	case EDP_18BPP:
+		dev_priv->edp.bpp = 18;
+		break;
+	case EDP_24BPP:
+		dev_priv->edp.bpp = 24;
+		break;
+	case EDP_30BPP:
+		dev_priv->edp.bpp = 30;
+		break;
+	}
+
+	/* Get the eDP sequencing and link info */
+	edp_pps = &edp->power_seqs[panel_type];
+	edp_link_params = &edp->link_params[panel_type];
+
+	dev_priv->edp.pps = *edp_pps;
+
+	DRM_DEBUG_KMS("EDP timing in vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
+				dev_priv->edp.pps.t1_t3, dev_priv->edp.pps.t8, 
+				dev_priv->edp.pps.t9, dev_priv->edp.pps.t10,
+				dev_priv->edp.pps.t11_t12);
+
+	dev_priv->edp.rate = edp_link_params->rate ? DP_LINK_BW_2_7 :
+		DP_LINK_BW_1_62;
+	switch (edp_link_params->lanes) {
+	case 0:
+		dev_priv->edp.lanes = 1;
+		break;
+	case 1:
+		dev_priv->edp.lanes = 2;
+		break;
+	case 3:
+	default:
+		dev_priv->edp.lanes = 4;
+		break;
+	}
+	DRM_DEBUG_KMS("VBT reports EDP: Lane_count %d, Lane_rate %d, Bpp %d\n",
+			dev_priv->edp.lanes, dev_priv->edp.rate, dev_priv->edp.bpp);
+
+	switch (edp_link_params->preemphasis) {
+	case 0:
+		dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_0;
+		break;
+	case 1:
+		dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_3_5;
+		break;
+	case 2:
+		dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_6;
+		break;
+	case 3:
+		dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_9_5;
+		break;
+	}
+	switch (edp_link_params->vswing) {
+	case 0:
+		dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_400;
+		break;
+	case 1:
+		dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_600;
+		break;
+	case 2:
+		dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_800;
+		break;
+	case 3:
+		dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_1200;
+		break;
+	}
+	DRM_DEBUG_KMS("VBT reports EDP: VSwing  %d, Preemph %d\n",
+			dev_priv->edp.vswing, dev_priv->edp.preemphasis);
+}
+
 static u16
 get_blocksize(void *p)
 {
@@ -154,6 +246,8 @@
 		return;
 
 	dev_priv->lvds_dither = lvds_options->pixel_dither;
+	dev_priv->panel_type = lvds_options->panel_type;
+
 	if (lvds_options->panel_type == 0xff)
 		return;
 
@@ -340,6 +434,9 @@
 	if (!driver)
 		return;
 
+	if (driver->lvds_config == BDB_DRIVER_FEATURE_EDP)
+		dev_priv->edp.support = 1;
+
 	/* This bit means to use 96Mhz for DPLL_A or not */
 	if (driver->primary_lfp_id)
 		dev_priv->dplla_96mhz = true;
@@ -437,6 +534,9 @@
 	size_t size;
 	int i;
 
+
+	dev_priv->panel_type = 0xff;
+
 	/* XXX Should this validation be moved to intel_opregion.c? */
 	if (dev_priv->opregion.vbt) {
 		struct vbt_header *vbt = dev_priv->opregion.vbt;
@@ -477,6 +577,7 @@
 	parse_sdvo_device_mapping(dev_priv, bdb);
 	parse_device_mapping(dev_priv, bdb);
 	parse_backlight_data(dev_priv, bdb);
+	parse_edp(dev_priv, bdb);
 
 	if (bios)
 		pci_unmap_rom(pdev, bios);
diff --git a/drivers/gpu/drm/gma500/intel_bios.h b/drivers/gpu/drm/gma500/intel_bios.h
index 2e95523..c6267c9 100644
--- a/drivers/gpu/drm/gma500/intel_bios.h
+++ b/drivers/gpu/drm/gma500/intel_bios.h
@@ -23,6 +23,7 @@
 #define _I830_BIOS_H_
 
 #include <drm/drmP.h>
+#include <drm/drm_dp_helper.h>
 
 struct vbt_header {
 	u8 signature[20];		/**< Always starts with 'VBT$' */
@@ -93,6 +94,7 @@
 #define BDB_SDVO_LVDS_PNP_IDS	 24
 #define BDB_SDVO_LVDS_POWER_SEQ	 25
 #define BDB_TV_OPTIONS		 26
+#define BDB_EDP			 27
 #define BDB_LVDS_OPTIONS	 40
 #define BDB_LVDS_LFP_DATA_PTRS	 41
 #define BDB_LVDS_LFP_DATA	 42
@@ -391,6 +393,11 @@
 	u8 panel_misc_bits_4;
 } __attribute__((packed));
 
+#define BDB_DRIVER_FEATURE_NO_LVDS		0
+#define BDB_DRIVER_FEATURE_INT_LVDS		1
+#define BDB_DRIVER_FEATURE_SDVO_LVDS		2
+#define BDB_DRIVER_FEATURE_EDP			3
+
 struct bdb_driver_features {
 	u8 boot_dev_algorithm:1;
 	u8 block_display_switch:1;
@@ -431,6 +438,45 @@
 	u8 custom_vbt_version;
 } __attribute__((packed));
 
+#define EDP_18BPP	0
+#define EDP_24BPP	1
+#define EDP_30BPP	2
+#define EDP_RATE_1_62	0
+#define EDP_RATE_2_7	1
+#define EDP_LANE_1	0
+#define EDP_LANE_2	1
+#define EDP_LANE_4	3
+#define EDP_PREEMPHASIS_NONE	0
+#define EDP_PREEMPHASIS_3_5dB	1
+#define EDP_PREEMPHASIS_6dB	2
+#define EDP_PREEMPHASIS_9_5dB	3
+#define EDP_VSWING_0_4V		0
+#define EDP_VSWING_0_6V		1
+#define EDP_VSWING_0_8V		2
+#define EDP_VSWING_1_2V		3
+
+struct edp_power_seq {
+	u16 t1_t3;
+	u16 t8;
+	u16 t9;
+	u16 t10;
+	u16 t11_t12;
+} __attribute__ ((packed));
+
+struct edp_link_params {
+	u8 rate:4;
+	u8 lanes:4;
+	u8 preemphasis:4;
+	u8 vswing:4;
+} __attribute__ ((packed));
+
+struct bdb_edp {
+	struct edp_power_seq power_seqs[16];
+	u32 color_depth;
+	u32 sdrrs_msa_timing_delay;
+	struct edp_link_params link_params[16];
+} __attribute__ ((packed));
+
 extern int psb_intel_init_bios(struct drm_device *dev);
 extern void psb_intel_destroy_bios(struct drm_device *dev);
 
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.c b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
index 5675d93..32dba2a 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_output.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
@@ -299,17 +299,8 @@
 		if (drm_connector_property_set_value(connector, property,
 									value))
 			goto set_prop_error;
-		else {
-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
-			struct backlight_device *psb_bd;
-
-			psb_bd = mdfld_get_backlight_device();
-			if (psb_bd) {
-				psb_bd->props.brightness = value;
-				mdfld_set_brightness(psb_bd);
-			}
-#endif
-		}
+		else
+			gma_backlight_set(encoder->dev, value);
 	}
 set_prop_done:
 	return 0;
diff --git a/drivers/gpu/drm/gma500/mid_bios.c b/drivers/gpu/drm/gma500/mid_bios.c
index b2a790b..850cd3f 100644
--- a/drivers/gpu/drm/gma500/mid_bios.c
+++ b/drivers/gpu/drm/gma500/mid_bios.c
@@ -118,20 +118,20 @@
 					dev_priv->platform_rev_id);
 }
 
-struct vbt_header {
+struct mid_vbt_header {
 	u32 signature;
 	u8 revision;
 } __packed;
 
 /* The same for r0 and r1 */
 struct vbt_r0 {
-	struct vbt_header vbt_header;
+	struct mid_vbt_header vbt_header;
 	u8 size;
 	u8 checksum;
 } __packed;
 
 struct vbt_r10 {
-	struct vbt_header vbt_header;
+	struct mid_vbt_header vbt_header;
 	u8 checksum;
 	u16 size;
 	u8 panel_count;
@@ -281,7 +281,7 @@
 	struct drm_device *dev = dev_priv->dev;
 	u32 addr;
 	u8 __iomem *vbt_virtual;
-	struct vbt_header vbt_header;
+	struct mid_vbt_header vbt_header;
 	struct pci_dev *pci_gfx_root = pci_get_bus_and_slot(0, PCI_DEVFN(2, 0));
 	int ret = -1;
 
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
index 2eb3dc4..69e51e90 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
@@ -252,7 +252,6 @@
 	if (edid) {
 		drm_mode_connector_update_edid_property(connector, edid);
 		ret = drm_add_edid_modes(connector, edid);
-		connector->display_info.raw_edid = NULL;
 	}
 
 	/*
diff --git a/drivers/gpu/drm/gma500/opregion.c b/drivers/gpu/drm/gma500/opregion.c
index c430bd4..ad0d6de 100644
--- a/drivers/gpu/drm/gma500/opregion.c
+++ b/drivers/gpu/drm/gma500/opregion.c
@@ -166,8 +166,7 @@
 
 	if (config_enabled(CONFIG_BACKLIGHT_CLASS_DEVICE)) {
 		int max = bd->props.max_brightness;
-		bd->props.brightness = bclp * max / 255;
-		backlight_update_status(bd);
+		gma_backlight_set(dev, bclp * max / 255);
 	}
 
 	asle->cblv = (bclp * 0x64) / 0xff | ASLE_CBLV_VALID;
diff --git a/drivers/gpu/drm/gma500/psb_device.c b/drivers/gpu/drm/gma500/psb_device.c
index 5971bc8..f1432f0 100644
--- a/drivers/gpu/drm/gma500/psb_device.c
+++ b/drivers/gpu/drm/gma500/psb_device.c
@@ -290,6 +290,7 @@
 	case 6:
 	case 7:
 		dev_priv->core_freq = 266;
+		break;
 	default:
 		dev_priv->core_freq = 0;
 	}
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index 1bd115e..223ff5b 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -24,10 +24,10 @@
 
 #include <drm/drmP.h>
 #include "drm_global.h"
-#include "gem_glue.h"
 #include "gma_drm.h"
 #include "psb_reg.h"
 #include "psb_intel_drv.h"
+#include "intel_bios.h"
 #include "gtt.h"
 #include "power.h"
 #include "opregion.h"
@@ -613,6 +613,8 @@
 	 */
 	struct backlight_device *backlight_device;
 	struct drm_property *backlight_property;
+	bool backlight_enabled;
+	int backlight_level;
 	uint32_t blc_adj1;
 	uint32_t blc_adj2;
 
@@ -640,6 +642,19 @@
 	int mdfld_panel_id;
 
 	bool dplla_96mhz;	/* DPLL data from the VBT */
+
+	struct {
+		int rate;
+		int lanes;
+		int preemphasis;
+		int vswing;
+
+		bool initialized;
+		bool support;
+		int bpp;
+		struct edp_power_seq pps;
+	} edp;
+	uint8_t panel_type;
 };
 
 
@@ -796,6 +811,9 @@
 /* backlight.c */
 int gma_backlight_init(struct drm_device *dev);
 void gma_backlight_exit(struct drm_device *dev);
+void gma_backlight_disable(struct drm_device *dev);
+void gma_backlight_enable(struct drm_device *dev);
+void gma_backlight_set(struct drm_device *dev, int v);
 
 /* oaktrail_crtc.c */
 extern const struct drm_crtc_helper_funcs oaktrail_helper_funcs;
diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h
index ebe1a28..90f2d11 100644
--- a/drivers/gpu/drm/gma500/psb_intel_drv.h
+++ b/drivers/gpu/drm/gma500/psb_intel_drv.h
@@ -29,10 +29,6 @@
  * Display related stuff
  */
 
-/* store information about an Ixxx DVO */
-/* The i830->i865 use multiple DVOs with multiple i2cs */
-/* the i915, i945 have a single sDVO i2c bus - which is different */
-#define MAX_OUTPUTS 6
 /* maximum connectors per crtcs in the mode set */
 #define INTELFB_CONN_LIMIT 4
 
@@ -69,6 +65,8 @@
 #define INTEL_OUTPUT_HDMI 6
 #define INTEL_OUTPUT_MIPI 7
 #define INTEL_OUTPUT_MIPI2 8
+#define INTEL_OUTPUT_DISPLAYPORT 9
+#define INTEL_OUTPUT_EDP 10
 
 #define INTEL_DVO_CHIP_NONE 0
 #define INTEL_DVO_CHIP_LVDS 1
@@ -133,6 +131,11 @@
 	void (*hot_plug)(struct psb_intel_encoder *);
 	int crtc_mask;
 	int clone_mask;
+	u32 ddi_select;	/* Channel info */
+#define DDI0_SELECT	0x01
+#define DDI1_SELECT	0x02
+#define DP_MASK		0x8000
+#define DDI_MASK	0x03
 	void *dev_priv; /* For sdvo_priv, lvds_priv, etc... */
 
 	/* FIXME: Either make SDVO and LVDS store it's i2c here or give CDV it's
@@ -190,7 +193,6 @@
 	u32 mode_flags;
 
 	bool active;
-	bool crtc_enable;
 
 	/* Saved Crtc HW states */
 	struct psb_intel_crtc_state *crtc_state;
@@ -285,4 +287,20 @@
 extern void gma_intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
 extern void gma_intel_teardown_gmbus(struct drm_device *dev);
 
+/* DP support */
+extern void cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev, int output_reg);
+extern void cdv_intel_dp_set_m_n(struct drm_crtc *crtc,
+					struct drm_display_mode *mode,
+					struct drm_display_mode *adjusted_mode);
+
+extern void psb_intel_attach_force_audio_property(struct drm_connector *connector);
+extern void psb_intel_attach_broadcast_rgb_property(struct drm_connector *connector);
+
+extern int cdv_sb_read(struct drm_device *dev, u32 reg, u32 *val);
+extern int cdv_sb_write(struct drm_device *dev, u32 reg, u32 val);
+extern void cdv_sb_reset(struct drm_device *dev);
+
+extern void cdv_intel_attach_force_audio_property(struct drm_connector *connector);
+extern void cdv_intel_attach_broadcast_rgb_property(struct drm_connector *connector);
+
 #endif				/* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
index 37adc9e..2a4c3a9 100644
--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
@@ -630,17 +630,8 @@
 							property,
 							value))
 			goto set_prop_error;
-		else {
-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
-			struct drm_psb_private *devp =
-						encoder->dev->dev_private;
-			struct backlight_device *bd = devp->backlight_device;
-			if (bd) {
-				bd->props.brightness = value;
-				backlight_update_status(bd);
-			}
-#endif
-		}
+		else
+                        gma_backlight_set(encoder->dev, value);
 	} else if (!strcmp(property->name, "DPMS")) {
 		struct drm_encoder_helper_funcs *hfuncs
 						= encoder->helper_private;
diff --git a/drivers/gpu/drm/gma500/psb_intel_reg.h b/drivers/gpu/drm/gma500/psb_intel_reg.h
index 8e8c8ef..d914719 100644
--- a/drivers/gpu/drm/gma500/psb_intel_reg.h
+++ b/drivers/gpu/drm/gma500/psb_intel_reg.h
@@ -173,15 +173,46 @@
 #define PP_SEQUENCE_ON			(1 << 28)
 #define PP_SEQUENCE_OFF			(2 << 28)
 #define PP_SEQUENCE_MASK		0x30000000
+#define	PP_CYCLE_DELAY_ACTIVE		(1 << 27)
+#define	PP_SEQUENCE_STATE_ON_IDLE	(1 << 3)
+#define	PP_SEQUENCE_STATE_MASK		0x0000000f
+
 #define PP_CONTROL		0x61204
 #define POWER_TARGET_ON			(1 << 0)
+#define	PANEL_UNLOCK_REGS		(0xabcd << 16)
+#define	PANEL_UNLOCK_MASK		(0xffff << 16)
+#define	EDP_FORCE_VDD			(1 << 3)
+#define	EDP_BLC_ENABLE			(1 << 2)
+#define	PANEL_POWER_RESET		(1 << 1)
+#define	PANEL_POWER_OFF			(0 << 0)
+#define	PANEL_POWER_ON			(1 << 0)
 
+/* Poulsbo/Oaktrail */
 #define LVDSPP_ON		0x61208
 #define LVDSPP_OFF		0x6120c
 #define PP_CYCLE		0x61210
 
+/* Cedartrail */
 #define PP_ON_DELAYS		0x61208		/* Cedartrail */
+#define PANEL_PORT_SELECT_MASK 		(3 << 30)
+#define PANEL_PORT_SELECT_LVDS 		(0 << 30)
+#define PANEL_PORT_SELECT_EDP		(1 << 30)
+#define PANEL_POWER_UP_DELAY_MASK	(0x1fff0000)
+#define PANEL_POWER_UP_DELAY_SHIFT	16
+#define PANEL_LIGHT_ON_DELAY_MASK	(0x1fff)
+#define PANEL_LIGHT_ON_DELAY_SHIFT	0
+
 #define PP_OFF_DELAYS		0x6120c		/* Cedartrail */
+#define PANEL_POWER_DOWN_DELAY_MASK	(0x1fff0000)
+#define PANEL_POWER_DOWN_DELAY_SHIFT	16
+#define PANEL_LIGHT_OFF_DELAY_MASK	(0x1fff)
+#define PANEL_LIGHT_OFF_DELAY_SHIFT	0
+
+#define PP_DIVISOR		0x61210		/* Cedartrail */
+#define  PP_REFERENCE_DIVIDER_MASK	(0xffffff00)
+#define  PP_REFERENCE_DIVIDER_SHIFT	8
+#define  PANEL_POWER_CYCLE_DELAY_MASK	(0x1f)
+#define  PANEL_POWER_CYCLE_DELAY_SHIFT	0
 
 #define PFIT_CONTROL		0x61230
 #define PFIT_ENABLE			(1 << 31)
@@ -1282,6 +1313,10 @@
 # define VRHUNIT_CLOCK_GATE_DISABLE		(1 << 28) /* Fixed value on CDV */
 # define DPOUNIT_CLOCK_GATE_DISABLE		(1 << 11)
 # define DPIOUNIT_CLOCK_GATE_DISABLE		(1 << 6)
+# define DPUNIT_PIPEB_GATE_DISABLE		(1 << 30)
+# define DPUNIT_PIPEA_GATE_DISABLE		(1 << 25)
+# define DPCUNIT_CLOCK_GATE_DISABLE		(1 << 24)
+# define DPLSUNIT_CLOCK_GATE_DISABLE		(1 << 13)
 
 #define RAMCLK_GATE_D		0x6210
 
@@ -1347,5 +1382,165 @@
 #define LANE_PLL_ENABLE		(0x3 << 20)
 #define LANE_PLL_PIPE(p)	(((p) == 0) ? (1 << 21) : (0 << 21))
 
+#define DP_B				0x64100
+#define DP_C				0x64200
+
+#define   DP_PORT_EN			(1 << 31)
+#define   DP_PIPEB_SELECT		(1 << 30)
+#define   DP_PIPE_MASK			(1 << 30)
+
+/* Link training mode - select a suitable mode for each stage */
+#define   DP_LINK_TRAIN_PAT_1		(0 << 28)
+#define   DP_LINK_TRAIN_PAT_2		(1 << 28)
+#define   DP_LINK_TRAIN_PAT_IDLE	(2 << 28)
+#define   DP_LINK_TRAIN_OFF		(3 << 28)
+#define   DP_LINK_TRAIN_MASK		(3 << 28)
+#define   DP_LINK_TRAIN_SHIFT		28
+
+/* Signal voltages. These are mostly controlled by the other end */
+#define   DP_VOLTAGE_0_4		(0 << 25)
+#define   DP_VOLTAGE_0_6		(1 << 25)
+#define   DP_VOLTAGE_0_8		(2 << 25)
+#define   DP_VOLTAGE_1_2		(3 << 25)
+#define   DP_VOLTAGE_MASK		(7 << 25)
+#define   DP_VOLTAGE_SHIFT		25
+
+/* Signal pre-emphasis levels, like voltages, the other end tells us what
+ * they want
+ */
+#define   DP_PRE_EMPHASIS_0		(0 << 22)
+#define   DP_PRE_EMPHASIS_3_5		(1 << 22)
+#define   DP_PRE_EMPHASIS_6		(2 << 22)
+#define   DP_PRE_EMPHASIS_9_5		(3 << 22)
+#define   DP_PRE_EMPHASIS_MASK		(7 << 22)
+#define   DP_PRE_EMPHASIS_SHIFT		22
+
+/* How many wires to use. I guess 3 was too hard */
+#define   DP_PORT_WIDTH_1		(0 << 19)
+#define   DP_PORT_WIDTH_2		(1 << 19)
+#define   DP_PORT_WIDTH_4		(3 << 19)
+#define   DP_PORT_WIDTH_MASK		(7 << 19)
+
+/* Mystic DPCD version 1.1 special mode */
+#define   DP_ENHANCED_FRAMING		(1 << 18)
+
+/** locked once port is enabled */
+#define   DP_PORT_REVERSAL		(1 << 15)
+
+/** sends the clock on lane 15 of the PEG for debug */
+#define   DP_CLOCK_OUTPUT_ENABLE	(1 << 13)
+
+#define   DP_SCRAMBLING_DISABLE		(1 << 12)
+#define   DP_SCRAMBLING_DISABLE_IRONLAKE	(1 << 7)
+
+/** limit RGB values to avoid confusing TVs */
+#define   DP_COLOR_RANGE_16_235		(1 << 8)
+
+/** Turn on the audio link */
+#define   DP_AUDIO_OUTPUT_ENABLE	(1 << 6)
+
+/** vs and hs sync polarity */
+#define   DP_SYNC_VS_HIGH		(1 << 4)
+#define   DP_SYNC_HS_HIGH		(1 << 3)
+
+/** A fantasy */
+#define   DP_DETECTED			(1 << 2)
+
+/** The aux channel provides a way to talk to the
+ * signal sink for DDC etc. Max packet size supported
+ * is 20 bytes in each direction, hence the 5 fixed
+ * data registers
+ */
+#define DPB_AUX_CH_CTL			0x64110
+#define DPB_AUX_CH_DATA1		0x64114
+#define DPB_AUX_CH_DATA2		0x64118
+#define DPB_AUX_CH_DATA3		0x6411c
+#define DPB_AUX_CH_DATA4		0x64120
+#define DPB_AUX_CH_DATA5		0x64124
+
+#define DPC_AUX_CH_CTL			0x64210
+#define DPC_AUX_CH_DATA1		0x64214
+#define DPC_AUX_CH_DATA2		0x64218
+#define DPC_AUX_CH_DATA3		0x6421c
+#define DPC_AUX_CH_DATA4		0x64220
+#define DPC_AUX_CH_DATA5		0x64224
+
+#define   DP_AUX_CH_CTL_SEND_BUSY	    (1 << 31)
+#define   DP_AUX_CH_CTL_DONE		    (1 << 30)
+#define   DP_AUX_CH_CTL_INTERRUPT	    (1 << 29)
+#define   DP_AUX_CH_CTL_TIME_OUT_ERROR	    (1 << 28)
+#define   DP_AUX_CH_CTL_TIME_OUT_400us	    (0 << 26)
+#define   DP_AUX_CH_CTL_TIME_OUT_600us	    (1 << 26)
+#define   DP_AUX_CH_CTL_TIME_OUT_800us	    (2 << 26)
+#define   DP_AUX_CH_CTL_TIME_OUT_1600us	    (3 << 26)
+#define   DP_AUX_CH_CTL_TIME_OUT_MASK	    (3 << 26)
+#define   DP_AUX_CH_CTL_RECEIVE_ERROR	    (1 << 25)
+#define   DP_AUX_CH_CTL_MESSAGE_SIZE_MASK    (0x1f << 20)
+#define   DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT   20
+#define   DP_AUX_CH_CTL_PRECHARGE_2US_MASK   (0xf << 16)
+#define   DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT  16
+#define   DP_AUX_CH_CTL_AUX_AKSV_SELECT	    (1 << 15)
+#define   DP_AUX_CH_CTL_MANCHESTER_TEST	    (1 << 14)
+#define   DP_AUX_CH_CTL_SYNC_TEST	    (1 << 13)
+#define   DP_AUX_CH_CTL_DEGLITCH_TEST	    (1 << 12)
+#define   DP_AUX_CH_CTL_PRECHARGE_TEST	    (1 << 11)
+#define   DP_AUX_CH_CTL_BIT_CLOCK_2X_MASK    (0x7ff)
+#define   DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT   0
+
+/*
+ * Computing GMCH M and N values for the Display Port link
+ *
+ * GMCH M/N = dot clock * bytes per pixel / ls_clk * # of lanes
+ *
+ * ls_clk (we assume) is the DP link clock (1.62 or 2.7 GHz)
+ *
+ * The GMCH value is used internally
+ *
+ * bytes_per_pixel is the number of bytes coming out of the plane,
+ * which is after the LUTs, so we want the bytes for our color format.
+ * For our current usage, this is always 3, one byte for R, G and B.
+ */
+
+#define _PIPEA_GMCH_DATA_M			0x70050
+#define _PIPEB_GMCH_DATA_M			0x71050
+
+/* Transfer unit size for display port - 1, default is 0x3f (for TU size 64) */
+#define   PIPE_GMCH_DATA_M_TU_SIZE_MASK		(0x3f << 25)
+#define   PIPE_GMCH_DATA_M_TU_SIZE_SHIFT	25
+
+#define   PIPE_GMCH_DATA_M_MASK			(0xffffff)
+
+#define _PIPEA_GMCH_DATA_N			0x70054
+#define _PIPEB_GMCH_DATA_N			0x71054
+#define   PIPE_GMCH_DATA_N_MASK			(0xffffff)
+
+/*
+ * Computing Link M and N values for the Display Port link
+ *
+ * Link M / N = pixel_clock / ls_clk
+ *
+ * (the DP spec calls pixel_clock the 'strm_clk')
+ *
+ * The Link value is transmitted in the Main Stream
+ * Attributes and VB-ID.
+ */
+
+#define _PIPEA_DP_LINK_M				0x70060
+#define _PIPEB_DP_LINK_M				0x71060
+#define   PIPEA_DP_LINK_M_MASK			(0xffffff)
+
+#define _PIPEA_DP_LINK_N				0x70064
+#define _PIPEB_DP_LINK_N				0x71064
+#define   PIPEA_DP_LINK_N_MASK			(0xffffff)
+
+#define PIPE_GMCH_DATA_M(pipe) _PIPE(pipe, _PIPEA_GMCH_DATA_M, _PIPEB_GMCH_DATA_M)
+#define PIPE_GMCH_DATA_N(pipe) _PIPE(pipe, _PIPEA_GMCH_DATA_N, _PIPEB_GMCH_DATA_N)
+#define PIPE_DP_LINK_M(pipe) _PIPE(pipe, _PIPEA_DP_LINK_M, _PIPEB_DP_LINK_M)
+#define PIPE_DP_LINK_N(pipe) _PIPE(pipe, _PIPEA_DP_LINK_N, _PIPEB_DP_LINK_N)
+
+#define   PIPE_BPC_MASK				(7 << 5)
+#define   PIPE_8BPC				(0 << 5)
+#define   PIPE_10BPC				(1 << 5)
+#define   PIPE_6BPC				(2 << 5)
 
 #endif
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index 0466c7b..d35f93b 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -1292,7 +1292,6 @@
 
 	return drm_get_edid(connector,
 			    &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
-	return NULL;
 }
 
 static enum drm_connector_status
@@ -1343,7 +1342,6 @@
 			}
 		} else
 			status = connector_status_disconnected;
-		connector->display_info.raw_edid = NULL;
 		kfree(edid);
 	}
 
@@ -1404,7 +1402,6 @@
 				ret = connector_status_disconnected;
 			else
 				ret = connector_status_connected;
-			connector->display_info.raw_edid = NULL;
 			kfree(edid);
 		} else
 			ret = connector_status_connected;
@@ -1453,7 +1450,6 @@
 			drm_add_edid_modes(connector, edid);
 		}
 
-		connector->display_info.raw_edid = NULL;
 		kfree(edid);
 	}
 }
diff --git a/drivers/gpu/drm/i2c/ch7006_drv.c b/drivers/gpu/drm/i2c/ch7006_drv.c
index 36d9522..599099f 100644
--- a/drivers/gpu/drm/i2c/ch7006_drv.c
+++ b/drivers/gpu/drm/i2c/ch7006_drv.c
@@ -427,15 +427,10 @@
 	return 0;
 }
 
-static int ch7006_suspend(struct i2c_client *client, pm_message_t mesg)
+static int ch7006_resume(struct device *dev)
 {
-	ch7006_dbg(client, "\n");
+	struct i2c_client *client = to_i2c_client(dev);
 
-	return 0;
-}
-
-static int ch7006_resume(struct i2c_client *client)
-{
 	ch7006_dbg(client, "\n");
 
 	ch7006_write(client, 0x3d, 0x0);
@@ -499,15 +494,18 @@
 };
 MODULE_DEVICE_TABLE(i2c, ch7006_ids);
 
+static const struct dev_pm_ops ch7006_pm_ops = {
+	.resume = ch7006_resume,
+};
+
 static struct drm_i2c_encoder_driver ch7006_driver = {
 	.i2c_driver = {
 		.probe = ch7006_probe,
 		.remove = ch7006_remove,
-		.suspend = ch7006_suspend,
-		.resume = ch7006_resume,
 
 		.driver = {
 			.name = "ch7006",
+			.pm = &ch7006_pm_ops,
 		},
 
 		.id_table = ch7006_ids,
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index b0bacdb..0f2c549 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -40,6 +40,7 @@
 	  dvo_ivch.o \
 	  dvo_tfp410.o \
 	  dvo_sil164.o \
+	  dvo_ns2501.o \
 	  i915_gem_dmabuf.o
 
 i915-$(CONFIG_COMPAT)   += i915_ioc32.o
diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h
index 5891469..0c8ac4d 100644
--- a/drivers/gpu/drm/i915/dvo.h
+++ b/drivers/gpu/drm/i915/dvo.h
@@ -140,5 +140,6 @@
 extern struct intel_dvo_dev_ops ivch_ops;
 extern struct intel_dvo_dev_ops tfp410_ops;
 extern struct intel_dvo_dev_ops ch7017_ops;
+extern struct intel_dvo_dev_ops ns2501_ops;
 
 #endif /* _INTEL_DVO_H */
diff --git a/drivers/gpu/drm/i915/dvo_ns2501.c b/drivers/gpu/drm/i915/dvo_ns2501.c
new file mode 100644
index 0000000..1a0bad9
--- /dev/null
+++ b/drivers/gpu/drm/i915/dvo_ns2501.c
@@ -0,0 +1,582 @@
+/*
+ *
+ * Copyright (c) 2012 Gilles Dartiguelongue, Thomas Richter
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "dvo.h"
+#include "i915_reg.h"
+#include "i915_drv.h"
+
+#define NS2501_VID 0x1305
+#define NS2501_DID 0x6726
+
+#define NS2501_VID_LO 0x00
+#define NS2501_VID_HI 0x01
+#define NS2501_DID_LO 0x02
+#define NS2501_DID_HI 0x03
+#define NS2501_REV 0x04
+#define NS2501_RSVD 0x05
+#define NS2501_FREQ_LO 0x06
+#define NS2501_FREQ_HI 0x07
+
+#define NS2501_REG8 0x08
+#define NS2501_8_VEN (1<<5)
+#define NS2501_8_HEN (1<<4)
+#define NS2501_8_DSEL (1<<3)
+#define NS2501_8_BPAS (1<<2)
+#define NS2501_8_RSVD (1<<1)
+#define NS2501_8_PD (1<<0)
+
+#define NS2501_REG9 0x09
+#define NS2501_9_VLOW (1<<7)
+#define NS2501_9_MSEL_MASK (0x7<<4)
+#define NS2501_9_TSEL (1<<3)
+#define NS2501_9_RSEN (1<<2)
+#define NS2501_9_RSVD (1<<1)
+#define NS2501_9_MDI (1<<0)
+
+#define NS2501_REGC 0x0c
+
+struct ns2501_priv {
+	//I2CDevRec d;
+	bool quiet;
+	int reg_8_shadow;
+	int reg_8_set;
+	// Shadow registers for i915
+	int dvoc;
+	int pll_a;
+	int srcdim;
+	int fw_blc;
+};
+
+#define NSPTR(d) ((NS2501Ptr)(d->DriverPrivate.ptr))
+
+/*
+ * Include the PLL launcher prototype
+ */
+extern void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe);
+
+/*
+ * For reasons unclear to me, the ns2501 at least on the Fujitsu/Siemens
+ * laptops does not react on the i2c bus unless
+ * both the PLL is running and the display is configured in its native
+ * resolution.
+ * This function forces the DVO on, and stores the registers it touches.
+ * Afterwards, registers are restored to regular values.
+ *
+ * This is pretty much a hack, though it works.
+ * Without that, ns2501_readb and ns2501_writeb fail
+ * when switching the resolution.
+ */
+
+static void enable_dvo(struct intel_dvo_device *dvo)
+{
+	struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
+	struct i2c_adapter *adapter = dvo->i2c_bus;
+	struct intel_gmbus *bus = container_of(adapter,
+					       struct intel_gmbus,
+					       adapter);
+	struct drm_i915_private *dev_priv = bus->dev_priv;
+
+	DRM_DEBUG_KMS("%s: Trying to re-enable the DVO\n", __FUNCTION__);
+
+	ns->dvoc = I915_READ(DVO_C);
+	ns->pll_a = I915_READ(_DPLL_A);
+	ns->srcdim = I915_READ(DVOC_SRCDIM);
+	ns->fw_blc = I915_READ(FW_BLC);
+
+	I915_WRITE(DVOC, 0x10004084);
+	I915_WRITE(_DPLL_A, 0xd0820000);
+	I915_WRITE(DVOC_SRCDIM, 0x400300);	// 1024x768
+	I915_WRITE(FW_BLC, 0x1080304);
+
+	intel_enable_pll(dev_priv, 0);
+
+	I915_WRITE(DVOC, 0x90004084);
+}
+
+/*
+ * Restore the I915 registers modified by the above
+ * trigger function.
+ */
+static void restore_dvo(struct intel_dvo_device *dvo)
+{
+	struct i2c_adapter *adapter = dvo->i2c_bus;
+	struct intel_gmbus *bus = container_of(adapter,
+					       struct intel_gmbus,
+					       adapter);
+	struct drm_i915_private *dev_priv = bus->dev_priv;
+	struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
+
+	I915_WRITE(DVOC, ns->dvoc);
+	I915_WRITE(_DPLL_A, ns->pll_a);
+	I915_WRITE(DVOC_SRCDIM, ns->srcdim);
+	I915_WRITE(FW_BLC, ns->fw_blc);
+}
+
+/*
+** Read a register from the ns2501.
+** Returns true if successful, false otherwise.
+** If it returns false, it might be wise to enable the
+** DVO with the above function.
+*/
+static bool ns2501_readb(struct intel_dvo_device *dvo, int addr, uint8_t * ch)
+{
+	struct ns2501_priv *ns = dvo->dev_priv;
+	struct i2c_adapter *adapter = dvo->i2c_bus;
+	u8 out_buf[2];
+	u8 in_buf[2];
+
+	struct i2c_msg msgs[] = {
+		{
+		 .addr = dvo->slave_addr,
+		 .flags = 0,
+		 .len = 1,
+		 .buf = out_buf,
+		 },
+		{
+		 .addr = dvo->slave_addr,
+		 .flags = I2C_M_RD,
+		 .len = 1,
+		 .buf = in_buf,
+		 }
+	};
+
+	out_buf[0] = addr;
+	out_buf[1] = 0;
+
+	if (i2c_transfer(adapter, msgs, 2) == 2) {
+		*ch = in_buf[0];
+		return true;
+	};
+
+	if (!ns->quiet) {
+		DRM_DEBUG_KMS
+		    ("Unable to read register 0x%02x from %s:0x%02x.\n", addr,
+		     adapter->name, dvo->slave_addr);
+	}
+
+	return false;
+}
+
+/*
+** Write a register to the ns2501.
+** Returns true if successful, false otherwise.
+** If it returns false, it might be wise to enable the
+** DVO with the above function.
+*/
+static bool ns2501_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
+{
+	struct ns2501_priv *ns = dvo->dev_priv;
+	struct i2c_adapter *adapter = dvo->i2c_bus;
+	uint8_t out_buf[2];
+
+	struct i2c_msg msg = {
+		.addr = dvo->slave_addr,
+		.flags = 0,
+		.len = 2,
+		.buf = out_buf,
+	};
+
+	out_buf[0] = addr;
+	out_buf[1] = ch;
+
+	if (i2c_transfer(adapter, &msg, 1) == 1) {
+		return true;
+	}
+
+	if (!ns->quiet) {
+		DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d\n",
+			      addr, adapter->name, dvo->slave_addr);
+	}
+
+	return false;
+}
+
+/* National Semiconductor 2501 driver for chip on i2c bus
+ * scan for the chip on the bus.
+ * Hope the VBIOS initialized the PLL correctly so we can
+ * talk to it. If not, it will not be seen and not detected.
+ * Bummer!
+ */
+static bool ns2501_init(struct intel_dvo_device *dvo,
+			struct i2c_adapter *adapter)
+{
+	/* this will detect the NS2501 chip on the specified i2c bus */
+	struct ns2501_priv *ns;
+	unsigned char ch;
+
+	ns = kzalloc(sizeof(struct ns2501_priv), GFP_KERNEL);
+	if (ns == NULL)
+		return false;
+
+	dvo->i2c_bus = adapter;
+	dvo->dev_priv = ns;
+	ns->quiet = true;
+
+	if (!ns2501_readb(dvo, NS2501_VID_LO, &ch))
+		goto out;
+
+	if (ch != (NS2501_VID & 0xff)) {
+		DRM_DEBUG_KMS("ns2501 not detected got %d: from %s Slave %d.\n",
+			      ch, adapter->name, dvo->slave_addr);
+		goto out;
+	}
+
+	if (!ns2501_readb(dvo, NS2501_DID_LO, &ch))
+		goto out;
+
+	if (ch != (NS2501_DID & 0xff)) {
+		DRM_DEBUG_KMS("ns2501 not detected got %d: from %s Slave %d.\n",
+			      ch, adapter->name, dvo->slave_addr);
+		goto out;
+	}
+	ns->quiet = false;
+	ns->reg_8_set = 0;
+	ns->reg_8_shadow =
+	    NS2501_8_PD | NS2501_8_BPAS | NS2501_8_VEN | NS2501_8_HEN;
+
+	DRM_DEBUG_KMS("init ns2501 dvo controller successfully!\n");
+	return true;
+
+out:
+	kfree(ns);
+	return false;
+}
+
+static enum drm_connector_status ns2501_detect(struct intel_dvo_device *dvo)
+{
+	/*
+	 * This is a Laptop display, it doesn't have hotplugging.
+	 * Even if not, the detection bit of the 2501 is unreliable as
+	 * it only works for some display types.
+	 * It is even more unreliable as the PLL must be active for
+	 * allowing reading from the chiop.
+	 */
+	return connector_status_connected;
+}
+
+static enum drm_mode_status ns2501_mode_valid(struct intel_dvo_device *dvo,
+					      struct drm_display_mode *mode)
+{
+	DRM_DEBUG_KMS
+	    ("%s: is mode valid (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d)\n",
+	     __FUNCTION__, mode->hdisplay, mode->htotal, mode->vdisplay,
+	     mode->vtotal);
+
+	/*
+	 * Currently, these are all the modes I have data from.
+	 * More might exist. Unclear how to find the native resolution
+	 * of the panel in here so we could always accept it
+	 * by disabling the scaler.
+	 */
+	if ((mode->hdisplay == 800 && mode->vdisplay == 600) ||
+	    (mode->hdisplay == 640 && mode->vdisplay == 480) ||
+	    (mode->hdisplay == 1024 && mode->vdisplay == 768)) {
+		return MODE_OK;
+	} else {
+		return MODE_ONE_SIZE;	/* Is this a reasonable error? */
+	}
+}
+
+static void ns2501_mode_set(struct intel_dvo_device *dvo,
+			    struct drm_display_mode *mode,
+			    struct drm_display_mode *adjusted_mode)
+{
+	bool ok;
+	bool restore = false;
+	struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
+
+	DRM_DEBUG_KMS
+	    ("%s: set mode (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d).\n",
+	     __FUNCTION__, mode->hdisplay, mode->htotal, mode->vdisplay,
+	     mode->vtotal);
+
+	/*
+	 * Where do I find the native resolution for which scaling is not required???
+	 *
+	 * First trigger the DVO on as otherwise the chip does not appear on the i2c
+	 * bus.
+	 */
+	do {
+		ok = true;
+
+		if (mode->hdisplay == 800 && mode->vdisplay == 600) {
+			/* mode 277 */
+			ns->reg_8_shadow &= ~NS2501_8_BPAS;
+			DRM_DEBUG_KMS("%s: switching to 800x600\n",
+				      __FUNCTION__);
+
+			/*
+			 * No, I do not know where this data comes from.
+			 * It is just what the video bios left in the DVO, so
+			 * I'm just copying it here over.
+			 * This also means that I cannot support any other modes
+			 * except the ones supported by the bios.
+			 */
+			ok &= ns2501_writeb(dvo, 0x11, 0xc8);	// 0xc7 also works.
+			ok &= ns2501_writeb(dvo, 0x1b, 0x19);
+			ok &= ns2501_writeb(dvo, 0x1c, 0x62);	// VBIOS left 0x64 here, but 0x62 works nicer
+			ok &= ns2501_writeb(dvo, 0x1d, 0x02);
+
+			ok &= ns2501_writeb(dvo, 0x34, 0x03);
+			ok &= ns2501_writeb(dvo, 0x35, 0xff);
+
+			ok &= ns2501_writeb(dvo, 0x80, 0x27);
+			ok &= ns2501_writeb(dvo, 0x81, 0x03);
+			ok &= ns2501_writeb(dvo, 0x82, 0x41);
+			ok &= ns2501_writeb(dvo, 0x83, 0x05);
+
+			ok &= ns2501_writeb(dvo, 0x8d, 0x02);
+			ok &= ns2501_writeb(dvo, 0x8e, 0x04);
+			ok &= ns2501_writeb(dvo, 0x8f, 0x00);
+
+			ok &= ns2501_writeb(dvo, 0x90, 0xfe);	/* vertical. VBIOS left 0xff here, but 0xfe works better */
+			ok &= ns2501_writeb(dvo, 0x91, 0x07);
+			ok &= ns2501_writeb(dvo, 0x94, 0x00);
+			ok &= ns2501_writeb(dvo, 0x95, 0x00);
+
+			ok &= ns2501_writeb(dvo, 0x96, 0x00);
+
+			ok &= ns2501_writeb(dvo, 0x99, 0x00);
+			ok &= ns2501_writeb(dvo, 0x9a, 0x88);
+
+			ok &= ns2501_writeb(dvo, 0x9c, 0x23);	/* Looks like first and last line of the image. */
+			ok &= ns2501_writeb(dvo, 0x9d, 0x00);
+			ok &= ns2501_writeb(dvo, 0x9e, 0x25);
+			ok &= ns2501_writeb(dvo, 0x9f, 0x03);
+
+			ok &= ns2501_writeb(dvo, 0xa4, 0x80);
+
+			ok &= ns2501_writeb(dvo, 0xb6, 0x00);
+
+			ok &= ns2501_writeb(dvo, 0xb9, 0xc8);	/* horizontal? */
+			ok &= ns2501_writeb(dvo, 0xba, 0x00);	/* horizontal? */
+
+			ok &= ns2501_writeb(dvo, 0xc0, 0x05);	/* horizontal? */
+			ok &= ns2501_writeb(dvo, 0xc1, 0xd7);
+
+			ok &= ns2501_writeb(dvo, 0xc2, 0x00);
+			ok &= ns2501_writeb(dvo, 0xc3, 0xf8);
+
+			ok &= ns2501_writeb(dvo, 0xc4, 0x03);
+			ok &= ns2501_writeb(dvo, 0xc5, 0x1a);
+
+			ok &= ns2501_writeb(dvo, 0xc6, 0x00);
+			ok &= ns2501_writeb(dvo, 0xc7, 0x73);
+			ok &= ns2501_writeb(dvo, 0xc8, 0x02);
+
+		} else if (mode->hdisplay == 640 && mode->vdisplay == 480) {
+			/* mode 274 */
+			DRM_DEBUG_KMS("%s: switching to 640x480\n",
+				      __FUNCTION__);
+			/*
+			 * No, I do not know where this data comes from.
+			 * It is just what the video bios left in the DVO, so
+			 * I'm just copying it here over.
+			 * This also means that I cannot support any other modes
+			 * except the ones supported by the bios.
+			 */
+			ns->reg_8_shadow &= ~NS2501_8_BPAS;
+
+			ok &= ns2501_writeb(dvo, 0x11, 0xa0);
+			ok &= ns2501_writeb(dvo, 0x1b, 0x11);
+			ok &= ns2501_writeb(dvo, 0x1c, 0x54);
+			ok &= ns2501_writeb(dvo, 0x1d, 0x03);
+
+			ok &= ns2501_writeb(dvo, 0x34, 0x03);
+			ok &= ns2501_writeb(dvo, 0x35, 0xff);
+
+			ok &= ns2501_writeb(dvo, 0x80, 0xff);
+			ok &= ns2501_writeb(dvo, 0x81, 0x07);
+			ok &= ns2501_writeb(dvo, 0x82, 0x3d);
+			ok &= ns2501_writeb(dvo, 0x83, 0x05);
+
+			ok &= ns2501_writeb(dvo, 0x8d, 0x02);
+			ok &= ns2501_writeb(dvo, 0x8e, 0x10);
+			ok &= ns2501_writeb(dvo, 0x8f, 0x00);
+
+			ok &= ns2501_writeb(dvo, 0x90, 0xff);	/* vertical */
+			ok &= ns2501_writeb(dvo, 0x91, 0x07);
+			ok &= ns2501_writeb(dvo, 0x94, 0x00);
+			ok &= ns2501_writeb(dvo, 0x95, 0x00);
+
+			ok &= ns2501_writeb(dvo, 0x96, 0x05);
+
+			ok &= ns2501_writeb(dvo, 0x99, 0x00);
+			ok &= ns2501_writeb(dvo, 0x9a, 0x88);
+
+			ok &= ns2501_writeb(dvo, 0x9c, 0x24);
+			ok &= ns2501_writeb(dvo, 0x9d, 0x00);
+			ok &= ns2501_writeb(dvo, 0x9e, 0x25);
+			ok &= ns2501_writeb(dvo, 0x9f, 0x03);
+
+			ok &= ns2501_writeb(dvo, 0xa4, 0x84);
+
+			ok &= ns2501_writeb(dvo, 0xb6, 0x09);
+
+			ok &= ns2501_writeb(dvo, 0xb9, 0xa0);	/* horizontal? */
+			ok &= ns2501_writeb(dvo, 0xba, 0x00);	/* horizontal? */
+
+			ok &= ns2501_writeb(dvo, 0xc0, 0x05);	/* horizontal? */
+			ok &= ns2501_writeb(dvo, 0xc1, 0x90);
+
+			ok &= ns2501_writeb(dvo, 0xc2, 0x00);
+			ok &= ns2501_writeb(dvo, 0xc3, 0x0f);
+
+			ok &= ns2501_writeb(dvo, 0xc4, 0x03);
+			ok &= ns2501_writeb(dvo, 0xc5, 0x16);
+
+			ok &= ns2501_writeb(dvo, 0xc6, 0x00);
+			ok &= ns2501_writeb(dvo, 0xc7, 0x02);
+			ok &= ns2501_writeb(dvo, 0xc8, 0x02);
+
+		} else if (mode->hdisplay == 1024 && mode->vdisplay == 768) {
+			/* mode 280 */
+			DRM_DEBUG_KMS("%s: switching to 1024x768\n",
+				      __FUNCTION__);
+			/*
+			 * This might or might not work, actually. I'm silently
+			 * assuming here that the native panel resolution is
+			 * 1024x768. If not, then this leaves the scaler disabled
+			 * generating a picture that is likely not the expected.
+			 *
+			 * Problem is that I do not know where to take the panel
+			 * dimensions from.
+			 *
+			 * Enable the bypass, scaling not required.
+			 *
+			 * The scaler registers are irrelevant here....
+			 *
+			 */
+			ns->reg_8_shadow |= NS2501_8_BPAS;
+			ok &= ns2501_writeb(dvo, 0x37, 0x44);
+		} else {
+			/*
+			 * Data not known. Bummer!
+			 * Hopefully, the code should not go here
+			 * as mode_OK delivered no other modes.
+			 */
+			ns->reg_8_shadow |= NS2501_8_BPAS;
+		}
+		ok &= ns2501_writeb(dvo, NS2501_REG8, ns->reg_8_shadow);
+
+		if (!ok) {
+			if (restore)
+				restore_dvo(dvo);
+			enable_dvo(dvo);
+			restore = true;
+		}
+	} while (!ok);
+	/*
+	 * Restore the old i915 registers before
+	 * forcing the ns2501 on.
+	 */
+	if (restore)
+		restore_dvo(dvo);
+}
+
+/* set the NS2501 power state */
+static void ns2501_dpms(struct intel_dvo_device *dvo, int mode)
+{
+	bool ok;
+	bool restore = false;
+	struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
+	unsigned char ch;
+
+	DRM_DEBUG_KMS("%s: Trying set the dpms of the DVO to %d\n",
+		      __FUNCTION__, mode);
+
+	ch = ns->reg_8_shadow;
+
+	if (mode == DRM_MODE_DPMS_ON)
+		ch |= NS2501_8_PD;
+	else
+		ch &= ~NS2501_8_PD;
+
+	if (ns->reg_8_set == 0 || ns->reg_8_shadow != ch) {
+		ns->reg_8_set = 1;
+		ns->reg_8_shadow = ch;
+
+		do {
+			ok = true;
+			ok &= ns2501_writeb(dvo, NS2501_REG8, ch);
+			ok &=
+			    ns2501_writeb(dvo, 0x34,
+					  (mode ==
+					   DRM_MODE_DPMS_ON) ? (0x03) : (0x00));
+			ok &=
+			    ns2501_writeb(dvo, 0x35,
+					  (mode ==
+					   DRM_MODE_DPMS_ON) ? (0xff) : (0x00));
+			if (!ok) {
+				if (restore)
+					restore_dvo(dvo);
+				enable_dvo(dvo);
+				restore = true;
+			}
+		} while (!ok);
+
+		if (restore)
+			restore_dvo(dvo);
+	}
+}
+
+static void ns2501_dump_regs(struct intel_dvo_device *dvo)
+{
+	uint8_t val;
+
+	ns2501_readb(dvo, NS2501_FREQ_LO, &val);
+	DRM_LOG_KMS("NS2501_FREQ_LO: 0x%02x\n", val);
+	ns2501_readb(dvo, NS2501_FREQ_HI, &val);
+	DRM_LOG_KMS("NS2501_FREQ_HI: 0x%02x\n", val);
+	ns2501_readb(dvo, NS2501_REG8, &val);
+	DRM_LOG_KMS("NS2501_REG8: 0x%02x\n", val);
+	ns2501_readb(dvo, NS2501_REG9, &val);
+	DRM_LOG_KMS("NS2501_REG9: 0x%02x\n", val);
+	ns2501_readb(dvo, NS2501_REGC, &val);
+	DRM_LOG_KMS("NS2501_REGC: 0x%02x\n", val);
+}
+
+static void ns2501_destroy(struct intel_dvo_device *dvo)
+{
+	struct ns2501_priv *ns = dvo->dev_priv;
+
+	if (ns) {
+		kfree(ns);
+		dvo->dev_priv = NULL;
+	}
+}
+
+struct intel_dvo_dev_ops ns2501_ops = {
+	.init = ns2501_init,
+	.detect = ns2501_detect,
+	.mode_valid = ns2501_mode_valid,
+	.mode_set = ns2501_mode_set,
+	.dpms = ns2501_dpms,
+	.dump_regs = ns2501_dump_regs,
+	.destroy = ns2501_destroy,
+};
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 359f6e8..a18e936 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -44,7 +44,6 @@
 
 enum {
 	ACTIVE_LIST,
-	FLUSHING_LIST,
 	INACTIVE_LIST,
 	PINNED_LIST,
 };
@@ -62,28 +61,11 @@
 
 	seq_printf(m, "gen: %d\n", info->gen);
 	seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev));
-#define B(x) seq_printf(m, #x ": %s\n", yesno(info->x))
-	B(is_mobile);
-	B(is_i85x);
-	B(is_i915g);
-	B(is_i945gm);
-	B(is_g33);
-	B(need_gfx_hws);
-	B(is_g4x);
-	B(is_pineview);
-	B(is_broadwater);
-	B(is_crestline);
-	B(has_fbc);
-	B(has_pipe_cxsr);
-	B(has_hotplug);
-	B(cursor_needs_physical);
-	B(has_overlay);
-	B(overlay_needs_physical);
-	B(supports_tv);
-	B(has_bsd_ring);
-	B(has_blt_ring);
-	B(has_llc);
-#undef B
+#define DEV_INFO_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x))
+#define DEV_INFO_SEP ;
+	DEV_INFO_FLAGS;
+#undef DEV_INFO_FLAG
+#undef DEV_INFO_SEP
 
 	return 0;
 }
@@ -121,14 +103,15 @@
 static void
 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 {
-	seq_printf(m, "%p: %s%s %8zdKiB %04x %04x %d %d%s%s%s",
+	seq_printf(m, "%p: %s%s %8zdKiB %04x %04x %d %d %d%s%s%s",
 		   &obj->base,
 		   get_pin_flag(obj),
 		   get_tiling_flag(obj),
 		   obj->base.size / 1024,
 		   obj->base.read_domains,
 		   obj->base.write_domain,
-		   obj->last_rendering_seqno,
+		   obj->last_read_seqno,
+		   obj->last_write_seqno,
 		   obj->last_fenced_seqno,
 		   cache_level_str(obj->cache_level),
 		   obj->dirty ? " dirty" : "",
@@ -177,10 +160,6 @@
 		seq_printf(m, "Inactive:\n");
 		head = &dev_priv->mm.inactive_list;
 		break;
-	case FLUSHING_LIST:
-		seq_printf(m, "Flushing:\n");
-		head = &dev_priv->mm.flushing_list;
-		break;
 	default:
 		mutex_unlock(&dev->struct_mutex);
 		return -EINVAL;
@@ -238,7 +217,6 @@
 
 	size = count = mappable_size = mappable_count = 0;
 	count_objects(&dev_priv->mm.active_list, mm_list);
-	count_objects(&dev_priv->mm.flushing_list, mm_list);
 	seq_printf(m, "  %u [%u] active objects, %zu [%zu] bytes\n",
 		   count, mappable_count, size, mappable_size);
 
@@ -413,7 +391,7 @@
 {
 	if (ring->get_seqno) {
 		seq_printf(m, "Current sequence (%s): %d\n",
-			   ring->name, ring->get_seqno(ring));
+			   ring->name, ring->get_seqno(ring, false));
 	}
 }
 
@@ -630,12 +608,12 @@
 	seq_printf(m, "%s [%d]:\n", name, count);
 
 	while (count--) {
-		seq_printf(m, "  %08x %8u %04x %04x %08x%s%s%s%s%s%s%s",
+		seq_printf(m, "  %08x %8u %04x %04x %x %x%s%s%s%s%s%s%s",
 			   err->gtt_offset,
 			   err->size,
 			   err->read_domains,
 			   err->write_domain,
-			   err->seqno,
+			   err->rseqno, err->wseqno,
 			   pin_flag(err->pinned),
 			   tiling_flag(err->tiling),
 			   dirty_flag(err->dirty),
@@ -799,10 +777,14 @@
 	struct seq_file *m = filp->private_data;
 	struct i915_error_state_file_priv *error_priv = m->private;
 	struct drm_device *dev = error_priv->dev;
+	int ret;
 
 	DRM_DEBUG_DRIVER("Resetting error state\n");
 
-	mutex_lock(&dev->struct_mutex);
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
 	i915_destroy_error_state(dev);
 	mutex_unlock(&dev->struct_mutex);
 
@@ -1292,7 +1274,8 @@
 
 	seq_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\n");
 
-	for (gpu_freq = dev_priv->min_delay; gpu_freq <= dev_priv->max_delay;
+	for (gpu_freq = dev_priv->rps.min_delay;
+	     gpu_freq <= dev_priv->rps.max_delay;
 	     gpu_freq++) {
 		I915_WRITE(GEN6_PCODE_DATA, gpu_freq);
 		I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
@@ -1472,8 +1455,12 @@
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
 	struct drm_device *dev = node->minor->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret;
 
-	mutex_lock(&dev->struct_mutex);
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
 	seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
 		   swizzle_string(dev_priv->mm.bit_6_swizzle_x));
 	seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
@@ -1674,7 +1661,7 @@
 	struct drm_device *dev = filp->private_data;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	char buf[20];
-	int val = 0;
+	int val = 0, ret;
 
 	if (cnt > 0) {
 		if (cnt > sizeof(buf) - 1)
@@ -1689,7 +1676,10 @@
 
 	DRM_DEBUG_DRIVER("Stopping rings 0x%08x\n", val);
 
-	mutex_lock(&dev->struct_mutex);
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
 	dev_priv->stop_rings = val;
 	mutex_unlock(&dev->struct_mutex);
 
@@ -1713,10 +1703,18 @@
 	struct drm_device *dev = filp->private_data;
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	char buf[80];
-	int len;
+	int len, ret;
+
+	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+		return -ENODEV;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
 
 	len = snprintf(buf, sizeof(buf),
-		       "max freq: %d\n", dev_priv->max_delay * 50);
+		       "max freq: %d\n", dev_priv->rps.max_delay * 50);
+	mutex_unlock(&dev->struct_mutex);
 
 	if (len > sizeof(buf))
 		len = sizeof(buf);
@@ -1733,7 +1731,10 @@
 	struct drm_device *dev = filp->private_data;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	char buf[20];
-	int val = 1;
+	int val = 1, ret;
+
+	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+		return -ENODEV;
 
 	if (cnt > 0) {
 		if (cnt > sizeof(buf) - 1)
@@ -1748,12 +1749,17 @@
 
 	DRM_DEBUG_DRIVER("Manually setting max freq to %d\n", val);
 
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
 	/*
 	 * Turbo will still be enabled, but won't go above the set value.
 	 */
-	dev_priv->max_delay = val / 50;
+	dev_priv->rps.max_delay = val / 50;
 
 	gen6_set_rps(dev, val / 50);
+	mutex_unlock(&dev->struct_mutex);
 
 	return cnt;
 }
@@ -1773,10 +1779,18 @@
 	struct drm_device *dev = filp->private_data;
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	char buf[80];
-	int len;
+	int len, ret;
+
+	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+		return -ENODEV;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
 
 	len = snprintf(buf, sizeof(buf),
-		       "min freq: %d\n", dev_priv->min_delay * 50);
+		       "min freq: %d\n", dev_priv->rps.min_delay * 50);
+	mutex_unlock(&dev->struct_mutex);
 
 	if (len > sizeof(buf))
 		len = sizeof(buf);
@@ -1791,7 +1805,10 @@
 	struct drm_device *dev = filp->private_data;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	char buf[20];
-	int val = 1;
+	int val = 1, ret;
+
+	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+		return -ENODEV;
 
 	if (cnt > 0) {
 		if (cnt > sizeof(buf) - 1)
@@ -1806,12 +1823,17 @@
 
 	DRM_DEBUG_DRIVER("Manually setting min freq to %d\n", val);
 
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
 	/*
 	 * Turbo will still be enabled, but won't go below the set value.
 	 */
-	dev_priv->min_delay = val / 50;
+	dev_priv->rps.min_delay = val / 50;
 
 	gen6_set_rps(dev, val / 50);
+	mutex_unlock(&dev->struct_mutex);
 
 	return cnt;
 }
@@ -1834,9 +1856,15 @@
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	char buf[80];
 	u32 snpcr;
-	int len;
+	int len, ret;
 
-	mutex_lock(&dev_priv->dev->struct_mutex);
+	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+		return -ENODEV;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
 	snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
 	mutex_unlock(&dev_priv->dev->struct_mutex);
 
@@ -1862,6 +1890,9 @@
 	u32 snpcr;
 	int val = 1;
 
+	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+		return -ENODEV;
+
 	if (cnt > 0) {
 		if (cnt > sizeof(buf) - 1)
 			return -EINVAL;
@@ -1925,16 +1956,11 @@
 {
 	struct drm_device *dev = inode->i_private;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	int ret;
 
 	if (INTEL_INFO(dev)->gen < 6)
 		return 0;
 
-	ret = mutex_lock_interruptible(&dev->struct_mutex);
-	if (ret)
-		return ret;
 	gen6_gt_force_wake_get(dev_priv);
-	mutex_unlock(&dev->struct_mutex);
 
 	return 0;
 }
@@ -1947,16 +1973,7 @@
 	if (INTEL_INFO(dev)->gen < 6)
 		return 0;
 
-	/*
-	 * It's bad that we can potentially hang userspace if struct_mutex gets
-	 * forever stuck.  However, if we cannot acquire this lock it means that
-	 * almost certainly the driver has hung, is not unload-able. Therefore
-	 * hanging here is probably a minor inconvenience not to be seen my
-	 * almost every user.
-	 */
-	mutex_lock(&dev->struct_mutex);
 	gen6_gt_force_wake_put(dev_priv);
-	mutex_unlock(&dev->struct_mutex);
 
 	return 0;
 }
@@ -2006,7 +2023,6 @@
 	{"i915_gem_gtt", i915_gem_gtt_info, 0},
 	{"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST},
 	{"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
-	{"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
 	{"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
 	{"i915_gem_pageflip", i915_gem_pageflip_info, 0},
 	{"i915_gem_request", i915_gem_request_info, 0},
@@ -2067,6 +2083,7 @@
 				  &i915_cache_sharing_fops);
 	if (ret)
 		return ret;
+
 	ret = i915_debugfs_create(minor->debugfs_root, minor,
 				  "i915_ring_stop",
 				  &i915_ring_stop_fops);
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 9cf7dfe..0a1b64f 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1009,6 +1009,9 @@
 	case I915_PARAM_HAS_WAIT_TIMEOUT:
 		value = 1;
 		break;
+	case I915_PARAM_HAS_SEMAPHORES:
+		value = i915_semaphore_is_enabled(dev);
+		break;
 	default:
 		DRM_DEBUG_DRIVER("Unknown parameter %d\n",
 				 param->param);
@@ -1425,6 +1428,21 @@
 	kfree(ap);
 }
 
+static void i915_dump_device_info(struct drm_i915_private *dev_priv)
+{
+	const struct intel_device_info *info = dev_priv->info;
+
+#define DEV_INFO_FLAG(name) info->name ? #name "," : ""
+#define DEV_INFO_SEP ,
+	DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x flags="
+			 "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
+			 info->gen,
+			 dev_priv->dev->pdev->device,
+			 DEV_INFO_FLAGS);
+#undef DEV_INFO_FLAG
+#undef DEV_INFO_SEP
+}
+
 /**
  * i915_driver_load - setup chip and create an initial config
  * @dev: DRM device
@@ -1449,7 +1467,6 @@
 	if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET))
 		return -ENODEV;
 
-
 	/* i915 has 4 more counters */
 	dev->counters += 4;
 	dev->types[6] = _DRM_STAT_IRQ;
@@ -1465,6 +1482,8 @@
 	dev_priv->dev = dev;
 	dev_priv->info = info;
 
+	i915_dump_device_info(dev_priv);
+
 	if (i915_get_bridge_dev(dev)) {
 		ret = -EIO;
 		goto free_priv;
@@ -1586,7 +1605,7 @@
 
 	spin_lock_init(&dev_priv->irq_lock);
 	spin_lock_init(&dev_priv->error_lock);
-	spin_lock_init(&dev_priv->rps_lock);
+	spin_lock_init(&dev_priv->rps.lock);
 
 	if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
 		dev_priv->num_pipe = 3;
@@ -1835,6 +1854,8 @@
 	DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
 	DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
 	DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHEING, i915_gem_set_cacheing_ioctl, DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHEING, i915_gem_get_cacheing_ioctl, DRM_UNLOCKED),
 	DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED),
 	DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
 	DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
@@ -1857,6 +1878,7 @@
 	DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED),
 	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED),
 	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED),
 };
 
 int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index a24ffbe..7ebb13b 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1060,7 +1060,7 @@
 	 * This should make it easier to transition modules over to the
 	 * new register block scheme, since we can do it incrementally.
 	 */
-	if (reg >= 0x180000)
+	if (reg >= VLV_DISPLAY_BASE)
 		return false;
 
 	if (reg >= RENDER_RING_BASE &&
@@ -1180,3 +1180,49 @@
 __i915_write(32, l)
 __i915_write(64, q)
 #undef __i915_write
+
+static const struct register_whitelist {
+	uint64_t offset;
+	uint32_t size;
+	uint32_t gen_bitmask; /* support gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
+} whitelist[] = {
+	{ RING_TIMESTAMP(RENDER_RING_BASE), 8, 0xF0 },
+};
+
+int i915_reg_read_ioctl(struct drm_device *dev,
+			void *data, struct drm_file *file)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_reg_read *reg = data;
+	struct register_whitelist const *entry = whitelist;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
+		if (entry->offset == reg->offset &&
+		    (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
+			break;
+	}
+
+	if (i == ARRAY_SIZE(whitelist))
+		return -EINVAL;
+
+	switch (entry->size) {
+	case 8:
+		reg->val = I915_READ64(reg->offset);
+		break;
+	case 4:
+		reg->val = I915_READ(reg->offset);
+		break;
+	case 2:
+		reg->val = I915_READ16(reg->offset);
+		break;
+	case 1:
+		reg->val = I915_READ8(reg->offset);
+		break;
+	default:
+		WARN_ON(1);
+		return -EINVAL;
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 627fe35..261fe21 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -109,6 +109,7 @@
 
 #define WATCH_COHERENCY	0
 #define WATCH_LISTS	0
+#define WATCH_GTT	0
 
 #define I915_GEM_PHYS_CURSOR_0 1
 #define I915_GEM_PHYS_CURSOR_1 2
@@ -221,7 +222,7 @@
 	struct drm_i915_error_buffer {
 		u32 size;
 		u32 name;
-		u32 seqno;
+		u32 rseqno, wseqno;
 		u32 gtt_offset;
 		u32 read_domains;
 		u32 write_domain;
@@ -248,7 +249,6 @@
 	void (*update_wm)(struct drm_device *dev);
 	void (*update_sprite_wm)(struct drm_device *dev, int pipe,
 				 uint32_t sprite_width, int pixel_size);
-	void (*sanitize_pm)(struct drm_device *dev);
 	void (*update_linetime_wm)(struct drm_device *dev, int pipe,
 				 struct drm_display_mode *mode);
 	int (*crtc_mode_set)(struct drm_crtc *crtc,
@@ -279,6 +279,32 @@
 	void (*force_wake_put)(struct drm_i915_private *dev_priv);
 };
 
+#define DEV_INFO_FLAGS \
+	DEV_INFO_FLAG(is_mobile) DEV_INFO_SEP \
+	DEV_INFO_FLAG(is_i85x) DEV_INFO_SEP \
+	DEV_INFO_FLAG(is_i915g) DEV_INFO_SEP \
+	DEV_INFO_FLAG(is_i945gm) DEV_INFO_SEP \
+	DEV_INFO_FLAG(is_g33) DEV_INFO_SEP \
+	DEV_INFO_FLAG(need_gfx_hws) DEV_INFO_SEP \
+	DEV_INFO_FLAG(is_g4x) DEV_INFO_SEP \
+	DEV_INFO_FLAG(is_pineview) DEV_INFO_SEP \
+	DEV_INFO_FLAG(is_broadwater) DEV_INFO_SEP \
+	DEV_INFO_FLAG(is_crestline) DEV_INFO_SEP \
+	DEV_INFO_FLAG(is_ivybridge) DEV_INFO_SEP \
+	DEV_INFO_FLAG(is_valleyview) DEV_INFO_SEP \
+	DEV_INFO_FLAG(is_haswell) DEV_INFO_SEP \
+	DEV_INFO_FLAG(has_force_wake) DEV_INFO_SEP \
+	DEV_INFO_FLAG(has_fbc) DEV_INFO_SEP \
+	DEV_INFO_FLAG(has_pipe_cxsr) DEV_INFO_SEP \
+	DEV_INFO_FLAG(has_hotplug) DEV_INFO_SEP \
+	DEV_INFO_FLAG(cursor_needs_physical) DEV_INFO_SEP \
+	DEV_INFO_FLAG(has_overlay) DEV_INFO_SEP \
+	DEV_INFO_FLAG(overlay_needs_physical) DEV_INFO_SEP \
+	DEV_INFO_FLAG(supports_tv) DEV_INFO_SEP \
+	DEV_INFO_FLAG(has_bsd_ring) DEV_INFO_SEP \
+	DEV_INFO_FLAG(has_blt_ring) DEV_INFO_SEP \
+	DEV_INFO_FLAG(has_llc)
+
 struct intel_device_info {
 	u8 gen;
 	u8 is_mobile:1;
@@ -696,17 +722,6 @@
 		struct list_head active_list;
 
 		/**
-		 * List of objects which are not in the ringbuffer but which
-		 * still have a write_domain which needs to be flushed before
-		 * unbinding.
-		 *
-		 * last_rendering_seqno is 0 while an object is in this list.
-		 *
-		 * A reference is held on the buffer while on this list.
-		 */
-		struct list_head flushing_list;
-
-		/**
 		 * LRU list of objects which are not in the ringbuffer and
 		 * are ready to unbind, but are still in the GTT.
 		 *
@@ -796,9 +811,6 @@
 	bool lvds_downclock_avail;
 	/* indicates the reduced downclock for LVDS*/
 	int lvds_downclock;
-	struct work_struct idle_work;
-	struct timer_list idle_timer;
-	bool busy;
 	u16 orig_clock;
 	int child_dev_num;
 	struct child_device_config *child_dev;
@@ -807,9 +819,21 @@
 
 	bool mchbar_need_disable;
 
-	struct work_struct rps_work;
-	spinlock_t rps_lock;
-	u32 pm_iir;
+	/* gen6+ rps state */
+	struct {
+		struct work_struct work;
+		u32 pm_iir;
+		/* lock - irqsave spinlock that protectects the work_struct and
+		 * pm_iir. */
+		spinlock_t lock;
+
+		/* The below variables an all the rps hw state are protected by
+		 * dev->struct mutext. */
+		u8 cur_delay;
+		u8 min_delay;
+		u8 max_delay;
+	} rps;
+
 
 	u8 cur_delay;
 	u8 min_delay;
@@ -826,7 +850,6 @@
 	int c_m;
 	int r_t;
 	u8 corr;
-	spinlock_t *mchdev_lock;
 
 	enum no_fbc_reason no_fbc_reason;
 
@@ -861,9 +884,9 @@
 };
 
 enum i915_cache_level {
-	I915_CACHE_NONE,
+	I915_CACHE_NONE = 0,
 	I915_CACHE_LLC,
-	I915_CACHE_LLC_MLC, /* gen6+ */
+	I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */
 };
 
 struct drm_i915_gem_object {
@@ -873,18 +896,16 @@
 	struct drm_mm_node *gtt_space;
 	struct list_head gtt_list;
 
-	/** This object's place on the active/flushing/inactive lists */
+	/** This object's place on the active/inactive lists */
 	struct list_head ring_list;
 	struct list_head mm_list;
-	/** This object's place on GPU write list */
-	struct list_head gpu_write_list;
 	/** This object's place in the batchbuffer or on the eviction list */
 	struct list_head exec_list;
 
 	/**
-	 * This is set if the object is on the active or flushing lists
-	 * (has pending rendering), and is not set if it's on inactive (ready
-	 * to be unbound).
+	 * This is set if the object is on the active lists (has pending
+	 * rendering and so a non-zero seqno), and is not set if it i s on
+	 * inactive (ready to be unbound) list.
 	 */
 	unsigned int active:1;
 
@@ -895,12 +916,6 @@
 	unsigned int dirty:1;
 
 	/**
-	 * This is set if the object has been written to since the last
-	 * GPU flush.
-	 */
-	unsigned int pending_gpu_write:1;
-
-	/**
 	 * Fence register bits (if any) for this object.  Will be set
 	 * as needed when mapped into the GTT.
 	 * Protected by dev->struct_mutex.
@@ -992,7 +1007,8 @@
 	struct intel_ring_buffer *ring;
 
 	/** Breadcrumb of last rendering to the buffer. */
-	uint32_t last_rendering_seqno;
+	uint32_t last_read_seqno;
+	uint32_t last_write_seqno;
 	/** Breadcrumb of last fenced GPU access to the buffer. */
 	uint32_t last_fenced_seqno;
 
@@ -1135,6 +1151,8 @@
 
 #define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake)
 
+#define HAS_L3_GPU_CACHE(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
+
 #include "i915_trace.h"
 
 /**
@@ -1256,6 +1274,10 @@
 			 struct drm_file *file_priv);
 int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
 			struct drm_file *file_priv);
+int i915_gem_get_cacheing_ioctl(struct drm_device *dev, void *data,
+				struct drm_file *file);
+int i915_gem_set_cacheing_ioctl(struct drm_device *dev, void *data,
+				struct drm_file *file);
 int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
 			    struct drm_file *file_priv);
 int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
@@ -1274,9 +1296,6 @@
 			struct drm_file *file_priv);
 void i915_gem_load(struct drm_device *dev);
 int i915_gem_init_object(struct drm_gem_object *obj);
-int __must_check i915_gem_flush_ring(struct intel_ring_buffer *ring,
-				     uint32_t invalidate_domains,
-				     uint32_t flush_domains);
 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
 						  size_t size);
 void i915_gem_free_object(struct drm_gem_object *obj);
@@ -1291,7 +1310,6 @@
 int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
 				  gfp_t gfpmask);
 int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
-int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj);
 int i915_gem_object_sync(struct drm_i915_gem_object *obj,
 			 struct intel_ring_buffer *to);
 void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
@@ -1358,9 +1376,9 @@
 void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
 int __must_check i915_gpu_idle(struct drm_device *dev);
 int __must_check i915_gem_idle(struct drm_device *dev);
-int __must_check i915_add_request(struct intel_ring_buffer *ring,
-				  struct drm_file *file,
-				  struct drm_i915_gem_request *request);
+int i915_add_request(struct intel_ring_buffer *ring,
+		     struct drm_file *file,
+		     struct drm_i915_gem_request *request);
 int __must_check i915_wait_seqno(struct intel_ring_buffer *ring,
 				 uint32_t seqno);
 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
@@ -1429,7 +1447,9 @@
 
 /* i915_gem_evict.c */
 int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size,
-					  unsigned alignment, bool mappable);
+					  unsigned alignment,
+					  unsigned cache_level,
+					  bool mappable);
 int i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only);
 
 /* i915_gem_stolen.c */
@@ -1529,6 +1549,8 @@
 extern int intel_enable_rc6(const struct drm_device *dev);
 
 extern bool i915_semaphore_is_enabled(struct drm_device *dev);
+int i915_reg_read_ioctl(struct drm_device *dev, void *data,
+			struct drm_file *file);
 
 /* overlay */
 #ifdef CONFIG_DEBUG_FS
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 489e2b1..31054fa 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -37,7 +37,6 @@
 #include <linux/pci.h>
 #include <linux/dma-buf.h>
 
-static __must_check int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj);
 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
 static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
@@ -1441,7 +1440,7 @@
 	list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
 	list_move_tail(&obj->ring_list, &ring->active_list);
 
-	obj->last_rendering_seqno = seqno;
+	obj->last_read_seqno = seqno;
 
 	if (obj->fenced_gpu_access) {
 		obj->last_fenced_seqno = seqno;
@@ -1458,42 +1457,30 @@
 }
 
 static void
-i915_gem_object_move_off_active(struct drm_i915_gem_object *obj)
-{
-	list_del_init(&obj->ring_list);
-	obj->last_rendering_seqno = 0;
-	obj->last_fenced_seqno = 0;
-}
-
-static void
-i915_gem_object_move_to_flushing(struct drm_i915_gem_object *obj)
-{
-	struct drm_device *dev = obj->base.dev;
-	drm_i915_private_t *dev_priv = dev->dev_private;
-
-	BUG_ON(!obj->active);
-	list_move_tail(&obj->mm_list, &dev_priv->mm.flushing_list);
-
-	i915_gem_object_move_off_active(obj);
-}
-
-static void
 i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
 {
 	struct drm_device *dev = obj->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
+	BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
+	BUG_ON(!obj->active);
+
+	if (obj->pin_count) /* are we a framebuffer? */
+		intel_mark_fb_idle(obj);
+
 	list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
 
-	BUG_ON(!list_empty(&obj->gpu_write_list));
-	BUG_ON(!obj->active);
+	list_del_init(&obj->ring_list);
 	obj->ring = NULL;
 
-	i915_gem_object_move_off_active(obj);
+	obj->last_read_seqno = 0;
+	obj->last_write_seqno = 0;
+	obj->base.write_domain = 0;
+
+	obj->last_fenced_seqno = 0;
 	obj->fenced_gpu_access = false;
 
 	obj->active = 0;
-	obj->pending_gpu_write = false;
 	drm_gem_object_unreference(&obj->base);
 
 	WARN_ON(i915_verify_lists(dev));
@@ -1525,30 +1512,6 @@
 	return obj->madv == I915_MADV_DONTNEED;
 }
 
-static void
-i915_gem_process_flushing_list(struct intel_ring_buffer *ring,
-			       uint32_t flush_domains)
-{
-	struct drm_i915_gem_object *obj, *next;
-
-	list_for_each_entry_safe(obj, next,
-				 &ring->gpu_write_list,
-				 gpu_write_list) {
-		if (obj->base.write_domain & flush_domains) {
-			uint32_t old_write_domain = obj->base.write_domain;
-
-			obj->base.write_domain = 0;
-			list_del_init(&obj->gpu_write_list);
-			i915_gem_object_move_to_active(obj, ring,
-						       i915_gem_next_request_seqno(ring));
-
-			trace_i915_gem_object_change_domain(obj,
-							    obj->base.read_domains,
-							    old_write_domain);
-		}
-	}
-}
-
 static u32
 i915_gem_get_seqno(struct drm_device *dev)
 {
@@ -1589,15 +1552,16 @@
 	 * is that the flush _must_ happen before the next request, no matter
 	 * what.
 	 */
-	if (ring->gpu_caches_dirty) {
-		ret = i915_gem_flush_ring(ring, 0, I915_GEM_GPU_DOMAINS);
-		if (ret)
-			return ret;
+	ret = intel_ring_flush_all_caches(ring);
+	if (ret)
+		return ret;
 
-		ring->gpu_caches_dirty = false;
+	if (request == NULL) {
+		request = kmalloc(sizeof(*request), GFP_KERNEL);
+		if (request == NULL)
+			return -ENOMEM;
 	}
 
-	BUG_ON(request == NULL);
 	seqno = i915_gem_next_request_seqno(ring);
 
 	/* Record the position of the start of the request so that
@@ -1608,8 +1572,10 @@
 	request_ring_position = intel_ring_get_tail(ring);
 
 	ret = ring->add_request(ring, &seqno);
-	if (ret)
-	    return ret;
+	if (ret) {
+		kfree(request);
+		return ret;
+	}
 
 	trace_i915_gem_request_add(ring, seqno);
 
@@ -1619,6 +1585,7 @@
 	request->emitted_jiffies = jiffies;
 	was_empty = list_empty(&ring->request_list);
 	list_add_tail(&request->list, &ring->request_list);
+	request->file_priv = NULL;
 
 	if (file) {
 		struct drm_i915_file_private *file_priv = file->driver_priv;
@@ -1638,13 +1605,13 @@
 				  jiffies +
 				  msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
 		}
-		if (was_empty)
+		if (was_empty) {
 			queue_delayed_work(dev_priv->wq,
 					   &dev_priv->mm.retire_work, HZ);
+			intel_mark_busy(dev_priv->dev);
+		}
 	}
 
-	WARN_ON(!list_empty(&ring->gpu_write_list));
-
 	return 0;
 }
 
@@ -1686,8 +1653,6 @@
 				       struct drm_i915_gem_object,
 				       ring_list);
 
-		obj->base.write_domain = 0;
-		list_del_init(&obj->gpu_write_list);
 		i915_gem_object_move_to_inactive(obj);
 	}
 }
@@ -1723,20 +1688,6 @@
 	for_each_ring(ring, dev_priv, i)
 		i915_gem_reset_ring_lists(dev_priv, ring);
 
-	/* Remove anything from the flushing lists. The GPU cache is likely
-	 * to be lost on reset along with the data, so simply move the
-	 * lost bo to the inactive list.
-	 */
-	while (!list_empty(&dev_priv->mm.flushing_list)) {
-		obj = list_first_entry(&dev_priv->mm.flushing_list,
-				      struct drm_i915_gem_object,
-				      mm_list);
-
-		obj->base.write_domain = 0;
-		list_del_init(&obj->gpu_write_list);
-		i915_gem_object_move_to_inactive(obj);
-	}
-
 	/* Move everything out of the GPU domains to ensure we do any
 	 * necessary invalidation upon reuse.
 	 */
@@ -1765,7 +1716,7 @@
 
 	WARN_ON(i915_verify_lists(ring->dev));
 
-	seqno = ring->get_seqno(ring);
+	seqno = ring->get_seqno(ring, true);
 
 	for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++)
 		if (seqno >= ring->sync_seqno[i])
@@ -1804,13 +1755,10 @@
 				      struct drm_i915_gem_object,
 				      ring_list);
 
-		if (!i915_seqno_passed(seqno, obj->last_rendering_seqno))
+		if (!i915_seqno_passed(seqno, obj->last_read_seqno))
 			break;
 
-		if (obj->base.write_domain != 0)
-			i915_gem_object_move_to_flushing(obj);
-		else
-			i915_gem_object_move_to_inactive(obj);
+		i915_gem_object_move_to_inactive(obj);
 	}
 
 	if (unlikely(ring->trace_irq_seqno &&
@@ -1859,20 +1807,16 @@
 	 */
 	idle = true;
 	for_each_ring(ring, dev_priv, i) {
-		if (ring->gpu_caches_dirty) {
-			struct drm_i915_gem_request *request;
-
-			request = kzalloc(sizeof(*request), GFP_KERNEL);
-			if (request == NULL ||
-			    i915_add_request(ring, NULL, request))
-			    kfree(request);
-		}
+		if (ring->gpu_caches_dirty)
+			i915_add_request(ring, NULL, NULL);
 
 		idle &= list_empty(&ring->request_list);
 	}
 
 	if (!dev_priv->mm.suspended && !idle)
 		queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
+	if (idle)
+		intel_mark_idle(dev);
 
 	mutex_unlock(&dev->struct_mutex);
 }
@@ -1913,25 +1857,13 @@
 static int
 i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
 {
-	int ret = 0;
+	int ret;
 
 	BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
 
-	if (seqno == ring->outstanding_lazy_request) {
-		struct drm_i915_gem_request *request;
-
-		request = kzalloc(sizeof(*request), GFP_KERNEL);
-		if (request == NULL)
-			return -ENOMEM;
-
-		ret = i915_add_request(ring, NULL, request);
-		if (ret) {
-			kfree(request);
-			return ret;
-		}
-
-		BUG_ON(seqno != request->seqno);
-	}
+	ret = 0;
+	if (seqno == ring->outstanding_lazy_request)
+		ret = i915_add_request(ring, NULL, NULL);
 
 	return ret;
 }
@@ -1956,7 +1888,7 @@
 	bool wait_forever = true;
 	int ret;
 
-	if (i915_seqno_passed(ring->get_seqno(ring), seqno))
+	if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
 		return 0;
 
 	trace_i915_gem_request_wait_begin(ring, seqno);
@@ -1975,7 +1907,7 @@
 	getrawmonotonic(&before);
 
 #define EXIT_COND \
-	(i915_seqno_passed(ring->get_seqno(ring), seqno) || \
+	(i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \
 	atomic_read(&dev_priv->mm.wedged))
 	do {
 		if (interruptible)
@@ -2046,26 +1978,37 @@
  * Ensures that all rendering to the object has completed and the object is
  * safe to unbind from the GTT or access from the CPU.
  */
-int
-i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
+static __must_check int
+i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
+			       bool readonly)
 {
+	u32 seqno;
 	int ret;
 
-	/* This function only exists to support waiting for existing rendering,
-	 * not for emitting required flushes.
-	 */
-	BUG_ON((obj->base.write_domain & I915_GEM_GPU_DOMAINS) != 0);
-
 	/* If there is rendering queued on the buffer being evicted, wait for
 	 * it.
 	 */
-	if (obj->active) {
-		ret = i915_wait_seqno(obj->ring, obj->last_rendering_seqno);
-		if (ret)
-			return ret;
-		i915_gem_retire_requests_ring(obj->ring);
+	if (readonly)
+		seqno = obj->last_write_seqno;
+	else
+		seqno = obj->last_read_seqno;
+	if (seqno == 0)
+		return 0;
+
+	ret = i915_wait_seqno(obj->ring, seqno);
+	if (ret)
+		return ret;
+
+	/* Manually manage the write flush as we may have not yet retired
+	 * the buffer.
+	 */
+	if (obj->last_write_seqno &&
+	    i915_seqno_passed(seqno, obj->last_write_seqno)) {
+		obj->last_write_seqno = 0;
+		obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
 	}
 
+	i915_gem_retire_requests_ring(obj->ring);
 	return 0;
 }
 
@@ -2080,14 +2023,10 @@
 	int ret;
 
 	if (obj->active) {
-		ret = i915_gem_object_flush_gpu_write_domain(obj);
+		ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
 		if (ret)
 			return ret;
 
-		ret = i915_gem_check_olr(obj->ring,
-					 obj->last_rendering_seqno);
-		if (ret)
-			return ret;
 		i915_gem_retire_requests_ring(obj->ring);
 	}
 
@@ -2147,7 +2086,7 @@
 		goto out;
 
 	if (obj->active) {
-		seqno = obj->last_rendering_seqno;
+		seqno = obj->last_read_seqno;
 		ring = obj->ring;
 	}
 
@@ -2202,11 +2141,11 @@
 		return 0;
 
 	if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
-		return i915_gem_object_wait_rendering(obj);
+		return i915_gem_object_wait_rendering(obj, false);
 
 	idx = intel_ring_sync_index(from, to);
 
-	seqno = obj->last_rendering_seqno;
+	seqno = obj->last_read_seqno;
 	if (seqno <= from->sync_seqno[idx])
 		return 0;
 
@@ -2318,42 +2257,11 @@
 	return ret;
 }
 
-int
-i915_gem_flush_ring(struct intel_ring_buffer *ring,
-		    uint32_t invalidate_domains,
-		    uint32_t flush_domains)
-{
-	int ret;
-
-	if (((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) == 0)
-		return 0;
-
-	trace_i915_gem_ring_flush(ring, invalidate_domains, flush_domains);
-
-	ret = ring->flush(ring, invalidate_domains, flush_domains);
-	if (ret)
-		return ret;
-
-	if (flush_domains & I915_GEM_GPU_DOMAINS)
-		i915_gem_process_flushing_list(ring, flush_domains);
-
-	return 0;
-}
-
 static int i915_ring_idle(struct intel_ring_buffer *ring)
 {
-	int ret;
-
-	if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
+	if (list_empty(&ring->active_list))
 		return 0;
 
-	if (!list_empty(&ring->gpu_write_list)) {
-		ret = i915_gem_flush_ring(ring,
-				    I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
-		if (ret)
-			return ret;
-	}
-
 	return i915_wait_seqno(ring, i915_gem_next_request_seqno(ring));
 }
 
@@ -2372,10 +2280,6 @@
 		ret = i915_ring_idle(ring);
 		if (ret)
 			return ret;
-
-		/* Is the device fubar? */
-		if (WARN_ON(!list_empty(&ring->gpu_write_list)))
-			return -EBUSY;
 	}
 
 	return 0;
@@ -2548,21 +2452,8 @@
 static int
 i915_gem_object_flush_fence(struct drm_i915_gem_object *obj)
 {
-	int ret;
-
-	if (obj->fenced_gpu_access) {
-		if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
-			ret = i915_gem_flush_ring(obj->ring,
-						  0, obj->base.write_domain);
-			if (ret)
-				return ret;
-		}
-
-		obj->fenced_gpu_access = false;
-	}
-
 	if (obj->last_fenced_seqno) {
-		ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
+		int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
 		if (ret)
 			return ret;
 
@@ -2575,6 +2466,7 @@
 	if (obj->base.read_domains & I915_GEM_DOMAIN_GTT)
 		mb();
 
+	obj->fenced_gpu_access = false;
 	return 0;
 }
 
@@ -2694,6 +2586,76 @@
 	return 0;
 }
 
+static bool i915_gem_valid_gtt_space(struct drm_device *dev,
+				     struct drm_mm_node *gtt_space,
+				     unsigned long cache_level)
+{
+	struct drm_mm_node *other;
+
+	/* On non-LLC machines we have to be careful when putting differing
+	 * types of snoopable memory together to avoid the prefetcher
+	 * crossing memory domains and dieing.
+	 */
+	if (HAS_LLC(dev))
+		return true;
+
+	if (gtt_space == NULL)
+		return true;
+
+	if (list_empty(&gtt_space->node_list))
+		return true;
+
+	other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
+	if (other->allocated && !other->hole_follows && other->color != cache_level)
+		return false;
+
+	other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
+	if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
+		return false;
+
+	return true;
+}
+
+static void i915_gem_verify_gtt(struct drm_device *dev)
+{
+#if WATCH_GTT
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_gem_object *obj;
+	int err = 0;
+
+	list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
+		if (obj->gtt_space == NULL) {
+			printk(KERN_ERR "object found on GTT list with no space reserved\n");
+			err++;
+			continue;
+		}
+
+		if (obj->cache_level != obj->gtt_space->color) {
+			printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
+			       obj->gtt_space->start,
+			       obj->gtt_space->start + obj->gtt_space->size,
+			       obj->cache_level,
+			       obj->gtt_space->color);
+			err++;
+			continue;
+		}
+
+		if (!i915_gem_valid_gtt_space(dev,
+					      obj->gtt_space,
+					      obj->cache_level)) {
+			printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
+			       obj->gtt_space->start,
+			       obj->gtt_space->start + obj->gtt_space->size,
+			       obj->cache_level);
+			err++;
+			continue;
+		}
+	}
+
+	WARN_ON(err);
+#endif
+}
+
 /**
  * Finds free space in the GTT aperture and binds the object there.
  */
@@ -2748,36 +2710,47 @@
  search_free:
 	if (map_and_fenceable)
 		free_space =
-			drm_mm_search_free_in_range(&dev_priv->mm.gtt_space,
-						    size, alignment,
-						    0, dev_priv->mm.gtt_mappable_end,
-						    0);
+			drm_mm_search_free_in_range_color(&dev_priv->mm.gtt_space,
+							  size, alignment, obj->cache_level,
+							  0, dev_priv->mm.gtt_mappable_end,
+							  false);
 	else
-		free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
-						size, alignment, 0);
+		free_space = drm_mm_search_free_color(&dev_priv->mm.gtt_space,
+						      size, alignment, obj->cache_level,
+						      false);
 
 	if (free_space != NULL) {
 		if (map_and_fenceable)
 			obj->gtt_space =
 				drm_mm_get_block_range_generic(free_space,
-							       size, alignment, 0,
+							       size, alignment, obj->cache_level,
 							       0, dev_priv->mm.gtt_mappable_end,
-							       0);
+							       false);
 		else
 			obj->gtt_space =
-				drm_mm_get_block(free_space, size, alignment);
+				drm_mm_get_block_generic(free_space,
+							 size, alignment, obj->cache_level,
+							 false);
 	}
 	if (obj->gtt_space == NULL) {
 		/* If the gtt is empty and we're still having trouble
 		 * fitting our object in, we're out of memory.
 		 */
 		ret = i915_gem_evict_something(dev, size, alignment,
+					       obj->cache_level,
 					       map_and_fenceable);
 		if (ret)
 			return ret;
 
 		goto search_free;
 	}
+	if (WARN_ON(!i915_gem_valid_gtt_space(dev,
+					      obj->gtt_space,
+					      obj->cache_level))) {
+		drm_mm_put_block(obj->gtt_space);
+		obj->gtt_space = NULL;
+		return -EINVAL;
+	}
 
 	ret = i915_gem_object_get_pages_gtt(obj, gfpmask);
 	if (ret) {
@@ -2840,6 +2813,7 @@
 	obj->map_and_fenceable = mappable && fenceable;
 
 	trace_i915_gem_object_bind(obj, map_and_fenceable);
+	i915_gem_verify_gtt(dev);
 	return 0;
 }
 
@@ -2869,17 +2843,6 @@
 	drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE);
 }
 
-/** Flushes any GPU write domain for the object if it's dirty. */
-static int
-i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj)
-{
-	if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0)
-		return 0;
-
-	/* Queue the GPU write cache flushing we need. */
-	return i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain);
-}
-
 /** Flushes the GTT write domain for the object if it's dirty. */
 static void
 i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
@@ -2946,16 +2909,10 @@
 	if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
 		return 0;
 
-	ret = i915_gem_object_flush_gpu_write_domain(obj);
+	ret = i915_gem_object_wait_rendering(obj, !write);
 	if (ret)
 		return ret;
 
-	if (obj->pending_gpu_write || write) {
-		ret = i915_gem_object_wait_rendering(obj);
-		if (ret)
-			return ret;
-	}
-
 	i915_gem_object_flush_cpu_write_domain(obj);
 
 	old_write_domain = obj->base.write_domain;
@@ -2998,6 +2955,12 @@
 		return -EBUSY;
 	}
 
+	if (!i915_gem_valid_gtt_space(dev, obj->gtt_space, cache_level)) {
+		ret = i915_gem_object_unbind(obj);
+		if (ret)
+			return ret;
+	}
+
 	if (obj->gtt_space) {
 		ret = i915_gem_object_finish_gpu(obj);
 		if (ret)
@@ -3009,7 +2972,7 @@
 		 * registers with snooped memory, so relinquish any fences
 		 * currently pointing to our region in the aperture.
 		 */
-		if (INTEL_INFO(obj->base.dev)->gen < 6) {
+		if (INTEL_INFO(dev)->gen < 6) {
 			ret = i915_gem_object_put_fence(obj);
 			if (ret)
 				return ret;
@@ -3020,6 +2983,8 @@
 		if (obj->has_aliasing_ppgtt_mapping)
 			i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
 					       obj, cache_level);
+
+		obj->gtt_space->color = cache_level;
 	}
 
 	if (cache_level == I915_CACHE_NONE) {
@@ -3046,9 +3011,72 @@
 	}
 
 	obj->cache_level = cache_level;
+	i915_gem_verify_gtt(dev);
 	return 0;
 }
 
+int i915_gem_get_cacheing_ioctl(struct drm_device *dev, void *data,
+				struct drm_file *file)
+{
+	struct drm_i915_gem_cacheing *args = data;
+	struct drm_i915_gem_object *obj;
+	int ret;
+
+	ret = i915_mutex_lock_interruptible(dev);
+	if (ret)
+		return ret;
+
+	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+	if (&obj->base == NULL) {
+		ret = -ENOENT;
+		goto unlock;
+	}
+
+	args->cacheing = obj->cache_level != I915_CACHE_NONE;
+
+	drm_gem_object_unreference(&obj->base);
+unlock:
+	mutex_unlock(&dev->struct_mutex);
+	return ret;
+}
+
+int i915_gem_set_cacheing_ioctl(struct drm_device *dev, void *data,
+				struct drm_file *file)
+{
+	struct drm_i915_gem_cacheing *args = data;
+	struct drm_i915_gem_object *obj;
+	enum i915_cache_level level;
+	int ret;
+
+	ret = i915_mutex_lock_interruptible(dev);
+	if (ret)
+		return ret;
+
+	switch (args->cacheing) {
+	case I915_CACHEING_NONE:
+		level = I915_CACHE_NONE;
+		break;
+	case I915_CACHEING_CACHED:
+		level = I915_CACHE_LLC;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+	if (&obj->base == NULL) {
+		ret = -ENOENT;
+		goto unlock;
+	}
+
+	ret = i915_gem_object_set_cache_level(obj, level);
+
+	drm_gem_object_unreference(&obj->base);
+unlock:
+	mutex_unlock(&dev->struct_mutex);
+	return ret;
+}
+
 /*
  * Prepare buffer for display plane (scanout, cursors, etc).
  * Can be called from an uninterruptible phase (modesetting) and allows
@@ -3062,10 +3090,6 @@
 	u32 old_read_domains, old_write_domain;
 	int ret;
 
-	ret = i915_gem_object_flush_gpu_write_domain(obj);
-	if (ret)
-		return ret;
-
 	if (pipelined != obj->ring) {
 		ret = i915_gem_object_sync(obj, pipelined);
 		if (ret)
@@ -3101,7 +3125,7 @@
 	/* It should now be out of any other write domains, and we can update
 	 * the domain values for our changes.
 	 */
-	BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
+	obj->base.write_domain = 0;
 	obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
 
 	trace_i915_gem_object_change_domain(obj,
@@ -3119,13 +3143,7 @@
 	if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
 		return 0;
 
-	if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
-		ret = i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain);
-		if (ret)
-			return ret;
-	}
-
-	ret = i915_gem_object_wait_rendering(obj);
+	ret = i915_gem_object_wait_rendering(obj, false);
 	if (ret)
 		return ret;
 
@@ -3149,16 +3167,10 @@
 	if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
 		return 0;
 
-	ret = i915_gem_object_flush_gpu_write_domain(obj);
+	ret = i915_gem_object_wait_rendering(obj, !write);
 	if (ret)
 		return ret;
 
-	if (write || obj->pending_gpu_write) {
-		ret = i915_gem_object_wait_rendering(obj);
-		if (ret)
-			return ret;
-	}
-
 	i915_gem_object_flush_gtt_write_domain(obj);
 
 	old_write_domain = obj->base.write_domain;
@@ -3400,6 +3412,10 @@
 	ret = i915_gem_object_flush_active(obj);
 
 	args->busy = obj->active;
+	if (obj->ring) {
+		BUILD_BUG_ON(I915_NUM_RINGS > 16);
+		args->busy |= intel_ring_flag(obj->ring) << 16;
+	}
 
 	drm_gem_object_unreference(&obj->base);
 unlock:
@@ -3517,7 +3533,6 @@
 	INIT_LIST_HEAD(&obj->gtt_list);
 	INIT_LIST_HEAD(&obj->ring_list);
 	INIT_LIST_HEAD(&obj->exec_list);
-	INIT_LIST_HEAD(&obj->gpu_write_list);
 	obj->madv = I915_MADV_WILLNEED;
 	/* Avoid an unnecessary call to unbind on the first bind. */
 	obj->map_and_fenceable = true;
@@ -3891,7 +3906,6 @@
 	}
 
 	BUG_ON(!list_empty(&dev_priv->mm.active_list));
-	BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
 	BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
 	mutex_unlock(&dev->struct_mutex);
 
@@ -3939,7 +3953,6 @@
 {
 	INIT_LIST_HEAD(&ring->active_list);
 	INIT_LIST_HEAD(&ring->request_list);
-	INIT_LIST_HEAD(&ring->gpu_write_list);
 }
 
 void
@@ -3949,7 +3962,6 @@
 	drm_i915_private_t *dev_priv = dev->dev_private;
 
 	INIT_LIST_HEAD(&dev_priv->mm.active_list);
-	INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
 	INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
 	INIT_LIST_HEAD(&dev_priv->mm.fence_list);
 	INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
@@ -4200,12 +4212,7 @@
 i915_gpu_is_active(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	int lists_empty;
-
-	lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
-		      list_empty(&dev_priv->mm.active_list);
-
-	return !lists_empty;
+	return !list_empty(&dev_priv->mm.active_list);
 }
 
 static int
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index a9d58d7..5c2d354 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -97,8 +97,7 @@
 
 static struct i915_hw_context *
 i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id);
-static int do_switch(struct drm_i915_gem_object *from_obj,
-		     struct i915_hw_context *to, u32 seqno);
+static int do_switch(struct i915_hw_context *to);
 
 static int get_context_size(struct drm_device *dev)
 {
@@ -113,7 +112,10 @@
 		break;
 	case 7:
 		reg = I915_READ(GEN7_CXT_SIZE);
-		ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
+		if (IS_HASWELL(dev))
+			ret = HSW_CXT_TOTAL_SIZE(reg) * 64;
+		else
+			ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
 		break;
 	default:
 		BUG();
@@ -220,19 +222,20 @@
 	 */
 	dev_priv->ring[RCS].default_context = ctx;
 	ret = i915_gem_object_pin(ctx->obj, CONTEXT_ALIGN, false);
-	if (ret) {
-		do_destroy(ctx);
-		return ret;
-	}
+	if (ret)
+		goto err_destroy;
 
-	ret = do_switch(NULL, ctx, 0);
-	if (ret) {
-		i915_gem_object_unpin(ctx->obj);
-		do_destroy(ctx);
-	} else {
-		DRM_DEBUG_DRIVER("Default HW context loaded\n");
-	}
+	ret = do_switch(ctx);
+	if (ret)
+		goto err_unpin;
 
+	DRM_DEBUG_DRIVER("Default HW context loaded\n");
+	return 0;
+
+err_unpin:
+	i915_gem_object_unpin(ctx->obj);
+err_destroy:
+	do_destroy(ctx);
 	return ret;
 }
 
@@ -359,17 +362,18 @@
 	return ret;
 }
 
-static int do_switch(struct drm_i915_gem_object *from_obj,
-		     struct i915_hw_context *to,
-		     u32 seqno)
+static int do_switch(struct i915_hw_context *to)
 {
-	struct intel_ring_buffer *ring = NULL;
+	struct intel_ring_buffer *ring = to->ring;
+	struct drm_i915_gem_object *from_obj = ring->last_context_obj;
 	u32 hw_flags = 0;
 	int ret;
 
-	BUG_ON(to == NULL);
 	BUG_ON(from_obj != NULL && from_obj->pin_count == 0);
 
+	if (from_obj == to->obj)
+		return 0;
+
 	ret = i915_gem_object_pin(to->obj, CONTEXT_ALIGN, false);
 	if (ret)
 		return ret;
@@ -393,7 +397,6 @@
 	else if (WARN_ON_ONCE(from_obj == to->obj)) /* not yet expected */
 		hw_flags |= MI_FORCE_RESTORE;
 
-	ring = to->ring;
 	ret = mi_set_context(ring, to, hw_flags);
 	if (ret) {
 		i915_gem_object_unpin(to->obj);
@@ -407,6 +410,7 @@
 	 * MI_SET_CONTEXT instead of when the next seqno has completed.
 	 */
 	if (from_obj != NULL) {
+		u32 seqno = i915_gem_next_request_seqno(ring);
 		from_obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
 		i915_gem_object_move_to_active(from_obj, ring, seqno);
 		/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
@@ -417,7 +421,7 @@
 		 * swapped, but there is no way to do that yet.
 		 */
 		from_obj->dirty = 1;
-		BUG_ON(from_obj->ring != to->ring);
+		BUG_ON(from_obj->ring != ring);
 		i915_gem_object_unpin(from_obj);
 
 		drm_gem_object_unreference(&from_obj->base);
@@ -448,9 +452,7 @@
 			int to_id)
 {
 	struct drm_i915_private *dev_priv = ring->dev->dev_private;
-	struct drm_i915_file_private *file_priv = NULL;
 	struct i915_hw_context *to;
-	struct drm_i915_gem_object *from_obj = ring->last_context_obj;
 
 	if (dev_priv->hw_contexts_disabled)
 		return 0;
@@ -458,21 +460,18 @@
 	if (ring != &dev_priv->ring[RCS])
 		return 0;
 
-	if (file)
-		file_priv = file->driver_priv;
-
 	if (to_id == DEFAULT_CONTEXT_ID) {
 		to = ring->default_context;
 	} else {
-		to = i915_gem_context_get(file_priv, to_id);
+		if (file == NULL)
+			return -EINVAL;
+
+		to = i915_gem_context_get(file->driver_priv, to_id);
 		if (to == NULL)
 			return -ENOENT;
 	}
 
-	if (from_obj == to->obj)
-		return 0;
-
-	return do_switch(from_obj, to, i915_gem_next_request_seqno(to->ring));
+	return do_switch(to);
 }
 
 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index eba0308..7279c31 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -44,7 +44,8 @@
 
 int
 i915_gem_evict_something(struct drm_device *dev, int min_size,
-			 unsigned alignment, bool mappable)
+			 unsigned alignment, unsigned cache_level,
+			 bool mappable)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct list_head eviction_list, unwind_list;
@@ -79,11 +80,11 @@
 	INIT_LIST_HEAD(&unwind_list);
 	if (mappable)
 		drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space,
-					    min_size, alignment, 0,
+					    min_size, alignment, cache_level,
 					    0, dev_priv->mm.gtt_mappable_end);
 	else
 		drm_mm_init_scan(&dev_priv->mm.gtt_space,
-				 min_size, alignment, 0);
+				 min_size, alignment, cache_level);
 
 	/* First see if there is a large enough contiguous idle region... */
 	list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) {
@@ -93,23 +94,6 @@
 
 	/* Now merge in the soon-to-be-expired objects... */
 	list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
-		/* Does the object require an outstanding flush? */
-		if (obj->base.write_domain)
-			continue;
-
-		if (mark_free(obj, &unwind_list))
-			goto found;
-	}
-
-	/* Finally add anything with a pending flush (in order of retirement) */
-	list_for_each_entry(obj, &dev_priv->mm.flushing_list, mm_list) {
-		if (mark_free(obj, &unwind_list))
-			goto found;
-	}
-	list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
-		if (!obj->base.write_domain)
-			continue;
-
 		if (mark_free(obj, &unwind_list))
 			goto found;
 	}
@@ -172,7 +156,6 @@
 	int ret;
 
 	lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
-		       list_empty(&dev_priv->mm.flushing_list) &&
 		       list_empty(&dev_priv->mm.active_list));
 	if (lists_empty)
 		return -ENOSPC;
@@ -189,8 +172,6 @@
 
 	i915_gem_retire_requests(dev);
 
-	BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
-
 	/* Having flushed everything, unbind() should never raise an error */
 	list_for_each_entry_safe(obj, next,
 				 &dev_priv->mm.inactive_list, mm_list) {
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index ff2819e..afb312e 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -34,180 +34,6 @@
 #include "intel_drv.h"
 #include <linux/dma_remapping.h>
 
-struct change_domains {
-	uint32_t invalidate_domains;
-	uint32_t flush_domains;
-	uint32_t flush_rings;
-	uint32_t flips;
-};
-
-/*
- * Set the next domain for the specified object. This
- * may not actually perform the necessary flushing/invaliding though,
- * as that may want to be batched with other set_domain operations
- *
- * This is (we hope) the only really tricky part of gem. The goal
- * is fairly simple -- track which caches hold bits of the object
- * and make sure they remain coherent. A few concrete examples may
- * help to explain how it works. For shorthand, we use the notation
- * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
- * a pair of read and write domain masks.
- *
- * Case 1: the batch buffer
- *
- *	1. Allocated
- *	2. Written by CPU
- *	3. Mapped to GTT
- *	4. Read by GPU
- *	5. Unmapped from GTT
- *	6. Freed
- *
- *	Let's take these a step at a time
- *
- *	1. Allocated
- *		Pages allocated from the kernel may still have
- *		cache contents, so we set them to (CPU, CPU) always.
- *	2. Written by CPU (using pwrite)
- *		The pwrite function calls set_domain (CPU, CPU) and
- *		this function does nothing (as nothing changes)
- *	3. Mapped by GTT
- *		This function asserts that the object is not
- *		currently in any GPU-based read or write domains
- *	4. Read by GPU
- *		i915_gem_execbuffer calls set_domain (COMMAND, 0).
- *		As write_domain is zero, this function adds in the
- *		current read domains (CPU+COMMAND, 0).
- *		flush_domains is set to CPU.
- *		invalidate_domains is set to COMMAND
- *		clflush is run to get data out of the CPU caches
- *		then i915_dev_set_domain calls i915_gem_flush to
- *		emit an MI_FLUSH and drm_agp_chipset_flush
- *	5. Unmapped from GTT
- *		i915_gem_object_unbind calls set_domain (CPU, CPU)
- *		flush_domains and invalidate_domains end up both zero
- *		so no flushing/invalidating happens
- *	6. Freed
- *		yay, done
- *
- * Case 2: The shared render buffer
- *
- *	1. Allocated
- *	2. Mapped to GTT
- *	3. Read/written by GPU
- *	4. set_domain to (CPU,CPU)
- *	5. Read/written by CPU
- *	6. Read/written by GPU
- *
- *	1. Allocated
- *		Same as last example, (CPU, CPU)
- *	2. Mapped to GTT
- *		Nothing changes (assertions find that it is not in the GPU)
- *	3. Read/written by GPU
- *		execbuffer calls set_domain (RENDER, RENDER)
- *		flush_domains gets CPU
- *		invalidate_domains gets GPU
- *		clflush (obj)
- *		MI_FLUSH and drm_agp_chipset_flush
- *	4. set_domain (CPU, CPU)
- *		flush_domains gets GPU
- *		invalidate_domains gets CPU
- *		wait_rendering (obj) to make sure all drawing is complete.
- *		This will include an MI_FLUSH to get the data from GPU
- *		to memory
- *		clflush (obj) to invalidate the CPU cache
- *		Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
- *	5. Read/written by CPU
- *		cache lines are loaded and dirtied
- *	6. Read written by GPU
- *		Same as last GPU access
- *
- * Case 3: The constant buffer
- *
- *	1. Allocated
- *	2. Written by CPU
- *	3. Read by GPU
- *	4. Updated (written) by CPU again
- *	5. Read by GPU
- *
- *	1. Allocated
- *		(CPU, CPU)
- *	2. Written by CPU
- *		(CPU, CPU)
- *	3. Read by GPU
- *		(CPU+RENDER, 0)
- *		flush_domains = CPU
- *		invalidate_domains = RENDER
- *		clflush (obj)
- *		MI_FLUSH
- *		drm_agp_chipset_flush
- *	4. Updated (written) by CPU again
- *		(CPU, CPU)
- *		flush_domains = 0 (no previous write domain)
- *		invalidate_domains = 0 (no new read domains)
- *	5. Read by GPU
- *		(CPU+RENDER, 0)
- *		flush_domains = CPU
- *		invalidate_domains = RENDER
- *		clflush (obj)
- *		MI_FLUSH
- *		drm_agp_chipset_flush
- */
-static void
-i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
-				  struct intel_ring_buffer *ring,
-				  struct change_domains *cd)
-{
-	uint32_t invalidate_domains = 0, flush_domains = 0;
-
-	/*
-	 * If the object isn't moving to a new write domain,
-	 * let the object stay in multiple read domains
-	 */
-	if (obj->base.pending_write_domain == 0)
-		obj->base.pending_read_domains |= obj->base.read_domains;
-
-	/*
-	 * Flush the current write domain if
-	 * the new read domains don't match. Invalidate
-	 * any read domains which differ from the old
-	 * write domain
-	 */
-	if (obj->base.write_domain &&
-	    (((obj->base.write_domain != obj->base.pending_read_domains ||
-	       obj->ring != ring)) ||
-	     (obj->fenced_gpu_access && !obj->pending_fenced_gpu_access))) {
-		flush_domains |= obj->base.write_domain;
-		invalidate_domains |=
-			obj->base.pending_read_domains & ~obj->base.write_domain;
-	}
-	/*
-	 * Invalidate any read caches which may have
-	 * stale data. That is, any new read domains.
-	 */
-	invalidate_domains |= obj->base.pending_read_domains & ~obj->base.read_domains;
-	if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU)
-		i915_gem_clflush_object(obj);
-
-	if (obj->base.pending_write_domain)
-		cd->flips |= atomic_read(&obj->pending_flip);
-
-	/* The actual obj->write_domain will be updated with
-	 * pending_write_domain after we emit the accumulated flush for all
-	 * of our domain changes in execbuffers (which clears objects'
-	 * write_domains).  So if we have a current write domain that we
-	 * aren't changing, set pending_write_domain to that.
-	 */
-	if (flush_domains == 0 && obj->base.pending_write_domain == 0)
-		obj->base.pending_write_domain = obj->base.write_domain;
-
-	cd->invalidate_domains |= invalidate_domains;
-	cd->flush_domains |= flush_domains;
-	if (flush_domains & I915_GEM_GPU_DOMAINS)
-		cd->flush_rings |= intel_ring_flag(obj->ring);
-	if (invalidate_domains & I915_GEM_GPU_DOMAINS)
-		cd->flush_rings |= intel_ring_flag(ring);
-}
-
 struct eb_objects {
 	int and;
 	struct hlist_head buckets[0];
@@ -587,6 +413,7 @@
 
 		obj->base.pending_read_domains = 0;
 		obj->base.pending_write_domain = 0;
+		obj->pending_fenced_gpu_access = false;
 	}
 	list_splice(&ordered_objects, objects);
 
@@ -810,18 +637,6 @@
 	return ret;
 }
 
-static void
-i915_gem_execbuffer_flush(struct drm_device *dev,
-			  uint32_t invalidate_domains,
-			  uint32_t flush_domains)
-{
-	if (flush_domains & I915_GEM_DOMAIN_CPU)
-		intel_gtt_chipset_flush();
-
-	if (flush_domains & I915_GEM_DOMAIN_GTT)
-		wmb();
-}
-
 static int
 i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips)
 {
@@ -854,48 +669,45 @@
 	return 0;
 }
 
-
 static int
 i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
 				struct list_head *objects)
 {
 	struct drm_i915_gem_object *obj;
-	struct change_domains cd;
+	uint32_t flush_domains = 0;
+	uint32_t flips = 0;
 	int ret;
 
-	memset(&cd, 0, sizeof(cd));
-	list_for_each_entry(obj, objects, exec_list)
-		i915_gem_object_set_to_gpu_domain(obj, ring, &cd);
-
-	if (cd.invalidate_domains | cd.flush_domains) {
-		i915_gem_execbuffer_flush(ring->dev,
-					  cd.invalidate_domains,
-					  cd.flush_domains);
-	}
-
-	if (cd.flips) {
-		ret = i915_gem_execbuffer_wait_for_flips(ring, cd.flips);
-		if (ret)
-			return ret;
-	}
-
 	list_for_each_entry(obj, objects, exec_list) {
 		ret = i915_gem_object_sync(obj, ring);
 		if (ret)
 			return ret;
+
+		if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
+			i915_gem_clflush_object(obj);
+
+		if (obj->base.pending_write_domain)
+			flips |= atomic_read(&obj->pending_flip);
+
+		flush_domains |= obj->base.write_domain;
 	}
 
+	if (flips) {
+		ret = i915_gem_execbuffer_wait_for_flips(ring, flips);
+		if (ret)
+			return ret;
+	}
+
+	if (flush_domains & I915_GEM_DOMAIN_CPU)
+		intel_gtt_chipset_flush();
+
+	if (flush_domains & I915_GEM_DOMAIN_GTT)
+		wmb();
+
 	/* Unconditionally invalidate gpu caches and ensure that we do flush
 	 * any residual writes from the previous batch.
 	 */
-	ret = i915_gem_flush_ring(ring,
-				  I915_GEM_GPU_DOMAINS,
-				  ring->gpu_caches_dirty ? I915_GEM_GPU_DOMAINS : 0);
-	if (ret)
-		return ret;
-
-	ring->gpu_caches_dirty = false;
-	return 0;
+	return intel_ring_invalidate_all_caches(ring);
 }
 
 static bool
@@ -943,9 +755,8 @@
 	struct drm_i915_gem_object *obj;
 
 	list_for_each_entry(obj, objects, exec_list) {
-		  u32 old_read = obj->base.read_domains;
-		  u32 old_write = obj->base.write_domain;
-
+		u32 old_read = obj->base.read_domains;
+		u32 old_write = obj->base.write_domain;
 
 		obj->base.read_domains = obj->base.pending_read_domains;
 		obj->base.write_domain = obj->base.pending_write_domain;
@@ -954,17 +765,13 @@
 		i915_gem_object_move_to_active(obj, ring, seqno);
 		if (obj->base.write_domain) {
 			obj->dirty = 1;
-			obj->pending_gpu_write = true;
-			list_move_tail(&obj->gpu_write_list,
-				       &ring->gpu_write_list);
+			obj->last_write_seqno = seqno;
 			if (obj->pin_count) /* check for potential scanout */
-				intel_mark_busy(ring->dev, obj);
+				intel_mark_fb_busy(obj);
 		}
 
 		trace_i915_gem_object_change_domain(obj, old_read, old_write);
 	}
-
-	intel_mark_busy(ring->dev, NULL);
 }
 
 static void
@@ -972,16 +779,11 @@
 				    struct drm_file *file,
 				    struct intel_ring_buffer *ring)
 {
-	struct drm_i915_gem_request *request;
-
 	/* Unconditionally force add_request to emit a full flush. */
 	ring->gpu_caches_dirty = true;
 
 	/* Add a breadcrumb for the completion of the batch buffer */
-	request = kzalloc(sizeof(*request), GFP_KERNEL);
-	if (request == NULL || i915_add_request(ring, file, request)) {
-		kfree(request);
-	}
+	(void)i915_add_request(ring, file, NULL);
 }
 
 static int
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index d9a5372..804d653 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -426,6 +426,23 @@
 	undo_idling(dev_priv, interruptible);
 }
 
+static void i915_gtt_color_adjust(struct drm_mm_node *node,
+				  unsigned long color,
+				  unsigned long *start,
+				  unsigned long *end)
+{
+	if (node->color != color)
+		*start += 4096;
+
+	if (!list_empty(&node->node_list)) {
+		node = list_entry(node->node_list.next,
+				  struct drm_mm_node,
+				  node_list);
+		if (node->allocated && node->color != color)
+			*end -= 4096;
+	}
+}
+
 void i915_gem_init_global_gtt(struct drm_device *dev,
 			      unsigned long start,
 			      unsigned long mappable_end,
@@ -435,6 +452,8 @@
 
 	/* Substract the guard page ... */
 	drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE);
+	if (!HAS_LLC(dev))
+		dev_priv->mm.gtt_space.color_adjust = i915_gtt_color_adjust;
 
 	dev_priv->mm.gtt_start = start;
 	dev_priv->mm.gtt_mappable_end = mappable_end;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 8a38285..a61b41a 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -296,11 +296,21 @@
 	drm_helper_hpd_irq_event(dev);
 }
 
-static void i915_handle_rps_change(struct drm_device *dev)
+/* defined intel_pm.c */
+extern spinlock_t mchdev_lock;
+
+static void ironlake_handle_rps_change(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	u32 busy_up, busy_down, max_avg, min_avg;
-	u8 new_delay = dev_priv->cur_delay;
+	u8 new_delay;
+	unsigned long flags;
+
+	spin_lock_irqsave(&mchdev_lock, flags);
+
+	I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
+
+	new_delay = dev_priv->cur_delay;
 
 	I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
 	busy_up = I915_READ(RCPREVBSYTUPAVG);
@@ -324,6 +334,8 @@
 	if (ironlake_set_drps(dev, new_delay))
 		dev_priv->cur_delay = new_delay;
 
+	spin_unlock_irqrestore(&mchdev_lock, flags);
+
 	return;
 }
 
@@ -335,7 +347,7 @@
 	if (ring->obj == NULL)
 		return;
 
-	trace_i915_gem_request_complete(ring, ring->get_seqno(ring));
+	trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false));
 
 	wake_up_all(&ring->irq_queue);
 	if (i915_enable_hangcheck) {
@@ -349,16 +361,16 @@
 static void gen6_pm_rps_work(struct work_struct *work)
 {
 	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
-						    rps_work);
+						    rps.work);
 	u32 pm_iir, pm_imr;
 	u8 new_delay;
 
-	spin_lock_irq(&dev_priv->rps_lock);
-	pm_iir = dev_priv->pm_iir;
-	dev_priv->pm_iir = 0;
+	spin_lock_irq(&dev_priv->rps.lock);
+	pm_iir = dev_priv->rps.pm_iir;
+	dev_priv->rps.pm_iir = 0;
 	pm_imr = I915_READ(GEN6_PMIMR);
 	I915_WRITE(GEN6_PMIMR, 0);
-	spin_unlock_irq(&dev_priv->rps_lock);
+	spin_unlock_irq(&dev_priv->rps.lock);
 
 	if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0)
 		return;
@@ -366,9 +378,9 @@
 	mutex_lock(&dev_priv->dev->struct_mutex);
 
 	if (pm_iir & GEN6_PM_RP_UP_THRESHOLD)
-		new_delay = dev_priv->cur_delay + 1;
+		new_delay = dev_priv->rps.cur_delay + 1;
 	else
-		new_delay = dev_priv->cur_delay - 1;
+		new_delay = dev_priv->rps.cur_delay - 1;
 
 	gen6_set_rps(dev_priv->dev, new_delay);
 
@@ -444,7 +456,7 @@
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 	unsigned long flags;
 
-	if (!IS_IVYBRIDGE(dev))
+	if (!HAS_L3_GPU_CACHE(dev))
 		return;
 
 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -488,19 +500,19 @@
 	 * IIR bits should never already be set because IMR should
 	 * prevent an interrupt from being shown in IIR. The warning
 	 * displays a case where we've unsafely cleared
-	 * dev_priv->pm_iir. Although missing an interrupt of the same
+	 * dev_priv->rps.pm_iir. Although missing an interrupt of the same
 	 * type is not a problem, it displays a problem in the logic.
 	 *
-	 * The mask bit in IMR is cleared by rps_work.
+	 * The mask bit in IMR is cleared by dev_priv->rps.work.
 	 */
 
-	spin_lock_irqsave(&dev_priv->rps_lock, flags);
-	dev_priv->pm_iir |= pm_iir;
-	I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
+	spin_lock_irqsave(&dev_priv->rps.lock, flags);
+	dev_priv->rps.pm_iir |= pm_iir;
+	I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
 	POSTING_READ(GEN6_PMIMR);
-	spin_unlock_irqrestore(&dev_priv->rps_lock, flags);
+	spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
 
-	queue_work(dev_priv->wq, &dev_priv->rps_work);
+	queue_work(dev_priv->wq, &dev_priv->rps.work);
 }
 
 static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS)
@@ -793,10 +805,8 @@
 			ibx_irq_handler(dev, pch_iir);
 	}
 
-	if (de_iir & DE_PCU_EVENT) {
-		I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
-		i915_handle_rps_change(dev);
-	}
+	if (IS_GEN5(dev) &&  de_iir & DE_PCU_EVENT)
+		ironlake_handle_rps_change(dev);
 
 	if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS)
 		gen6_queue_rps_work(dev_priv, pm_iir);
@@ -949,7 +959,8 @@
 {
 	err->size = obj->base.size;
 	err->name = obj->base.name;
-	err->seqno = obj->last_rendering_seqno;
+	err->rseqno = obj->last_read_seqno;
+	err->wseqno = obj->last_write_seqno;
 	err->gtt_offset = obj->gtt_offset;
 	err->read_domains = obj->base.read_domains;
 	err->write_domain = obj->base.write_domain;
@@ -1039,12 +1050,12 @@
 	if (!ring->get_seqno)
 		return NULL;
 
-	seqno = ring->get_seqno(ring);
+	seqno = ring->get_seqno(ring, false);
 	list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
 		if (obj->ring != ring)
 			continue;
 
-		if (i915_seqno_passed(seqno, obj->last_rendering_seqno))
+		if (i915_seqno_passed(seqno, obj->last_read_seqno))
 			continue;
 
 		if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
@@ -1093,7 +1104,7 @@
 
 	error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
 	error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
-	error->seqno[ring->id] = ring->get_seqno(ring);
+	error->seqno[ring->id] = ring->get_seqno(ring, false);
 	error->acthd[ring->id] = intel_ring_get_active_head(ring);
 	error->head[ring->id] = I915_READ_HEAD(ring);
 	error->tail[ring->id] = I915_READ_TAIL(ring);
@@ -1590,7 +1601,8 @@
 static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err)
 {
 	if (list_empty(&ring->request_list) ||
-	    i915_seqno_passed(ring->get_seqno(ring), ring_last_seqno(ring))) {
+	    i915_seqno_passed(ring->get_seqno(ring, false),
+			      ring_last_seqno(ring))) {
 		/* Issue a wake-up to catch stuck h/w. */
 		if (waitqueue_active(&ring->irq_queue)) {
 			DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
@@ -2647,7 +2659,7 @@
 
 	INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
 	INIT_WORK(&dev_priv->error_work, i915_error_work_func);
-	INIT_WORK(&dev_priv->rps_work, gen6_pm_rps_work);
+	INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
 	INIT_WORK(&dev_priv->parity_error_work, ivybridge_parity_work);
 
 	dev->driver->get_vblank_counter = i915_get_vblank_counter;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 28725ce..ab8cffe 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -450,6 +450,7 @@
 #define RING_ACTHD(base)	((base)+0x74)
 #define RING_NOPID(base)	((base)+0x94)
 #define RING_IMR(base)		((base)+0xa8)
+#define RING_TIMESTAMP(base)	((base)+0x358)
 #define   TAIL_ADDR		0x001FFFF8
 #define   HEAD_WRAP_COUNT	0xFFE00000
 #define   HEAD_WRAP_ONE		0x00200000
@@ -529,6 +530,8 @@
 #define   GFX_PSMI_GRANULARITY		(1<<10)
 #define   GFX_PPGTT_ENABLE		(1<<9)
 
+#define VLV_DISPLAY_BASE 0x180000
+
 #define SCPD0		0x0209c /* 915+ only */
 #define IER		0x020a0
 #define IIR		0x020a4
@@ -1496,6 +1499,14 @@
 					 GEN7_CXT_EXTENDED_SIZE(ctx_reg) + \
 					 GEN7_CXT_GT1_SIZE(ctx_reg) + \
 					 GEN7_CXT_VFSTATE_SIZE(ctx_reg))
+#define HSW_CXT_POWER_SIZE(ctx_reg)	((ctx_reg >> 26) & 0x3f)
+#define HSW_CXT_RING_SIZE(ctx_reg)	((ctx_reg >> 23) & 0x7)
+#define HSW_CXT_RENDER_SIZE(ctx_reg)	((ctx_reg >> 15) & 0xff)
+#define HSW_CXT_TOTAL_SIZE(ctx_reg)	(HSW_CXT_POWER_SIZE(ctx_reg) + \
+					 HSW_CXT_RING_SIZE(ctx_reg) + \
+					 HSW_CXT_RENDER_SIZE(ctx_reg) + \
+					 GEN7_CXT_VFSTATE_SIZE(ctx_reg))
+
 
 /*
  * Overlay regs
@@ -1549,12 +1560,35 @@
 
 /* VGA port control */
 #define ADPA			0x61100
+#define PCH_ADPA                0xe1100
+#define VLV_ADPA		(VLV_DISPLAY_BASE + ADPA)
+
 #define   ADPA_DAC_ENABLE	(1<<31)
 #define   ADPA_DAC_DISABLE	0
 #define   ADPA_PIPE_SELECT_MASK	(1<<30)
 #define   ADPA_PIPE_A_SELECT	0
 #define   ADPA_PIPE_B_SELECT	(1<<30)
 #define   ADPA_PIPE_SELECT(pipe) ((pipe) << 30)
+/* CPT uses bits 29:30 for pch transcoder select */
+#define   ADPA_CRT_HOTPLUG_MASK  0x03ff0000 /* bit 25-16 */
+#define   ADPA_CRT_HOTPLUG_MONITOR_NONE  (0<<24)
+#define   ADPA_CRT_HOTPLUG_MONITOR_MASK  (3<<24)
+#define   ADPA_CRT_HOTPLUG_MONITOR_COLOR (3<<24)
+#define   ADPA_CRT_HOTPLUG_MONITOR_MONO  (2<<24)
+#define   ADPA_CRT_HOTPLUG_ENABLE        (1<<23)
+#define   ADPA_CRT_HOTPLUG_PERIOD_64     (0<<22)
+#define   ADPA_CRT_HOTPLUG_PERIOD_128    (1<<22)
+#define   ADPA_CRT_HOTPLUG_WARMUP_5MS    (0<<21)
+#define   ADPA_CRT_HOTPLUG_WARMUP_10MS   (1<<21)
+#define   ADPA_CRT_HOTPLUG_SAMPLE_2S     (0<<20)
+#define   ADPA_CRT_HOTPLUG_SAMPLE_4S     (1<<20)
+#define   ADPA_CRT_HOTPLUG_VOLTAGE_40    (0<<18)
+#define   ADPA_CRT_HOTPLUG_VOLTAGE_50    (1<<18)
+#define   ADPA_CRT_HOTPLUG_VOLTAGE_60    (2<<18)
+#define   ADPA_CRT_HOTPLUG_VOLTAGE_70    (3<<18)
+#define   ADPA_CRT_HOTPLUG_VOLREF_325MV  (0<<17)
+#define   ADPA_CRT_HOTPLUG_VOLREF_475MV  (1<<17)
+#define   ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16)
 #define   ADPA_USE_VGA_HVPOLARITY (1<<15)
 #define   ADPA_SETS_HVPOLARITY	0
 #define   ADPA_VSYNC_CNTL_DISABLE (1<<11)
@@ -3889,31 +3923,6 @@
 #define FDI_PLL_CTL_1           0xfe000
 #define FDI_PLL_CTL_2           0xfe004
 
-/* CRT */
-#define PCH_ADPA                0xe1100
-#define  ADPA_TRANS_SELECT_MASK (1<<30)
-#define  ADPA_TRANS_A_SELECT    0
-#define  ADPA_TRANS_B_SELECT    (1<<30)
-#define  ADPA_CRT_HOTPLUG_MASK  0x03ff0000 /* bit 25-16 */
-#define  ADPA_CRT_HOTPLUG_MONITOR_NONE  (0<<24)
-#define  ADPA_CRT_HOTPLUG_MONITOR_MASK  (3<<24)
-#define  ADPA_CRT_HOTPLUG_MONITOR_COLOR (3<<24)
-#define  ADPA_CRT_HOTPLUG_MONITOR_MONO  (2<<24)
-#define  ADPA_CRT_HOTPLUG_ENABLE        (1<<23)
-#define  ADPA_CRT_HOTPLUG_PERIOD_64     (0<<22)
-#define  ADPA_CRT_HOTPLUG_PERIOD_128    (1<<22)
-#define  ADPA_CRT_HOTPLUG_WARMUP_5MS    (0<<21)
-#define  ADPA_CRT_HOTPLUG_WARMUP_10MS   (1<<21)
-#define  ADPA_CRT_HOTPLUG_SAMPLE_2S     (0<<20)
-#define  ADPA_CRT_HOTPLUG_SAMPLE_4S     (1<<20)
-#define  ADPA_CRT_HOTPLUG_VOLTAGE_40    (0<<18)
-#define  ADPA_CRT_HOTPLUG_VOLTAGE_50    (1<<18)
-#define  ADPA_CRT_HOTPLUG_VOLTAGE_60    (2<<18)
-#define  ADPA_CRT_HOTPLUG_VOLTAGE_70    (3<<18)
-#define  ADPA_CRT_HOTPLUG_VOLREF_325MV  (0<<17)
-#define  ADPA_CRT_HOTPLUG_VOLREF_475MV  (1<<17)
-#define  ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16)
-
 /* or SDVOB */
 #define HDMIB   0xe1140
 #define  PORT_ENABLE    (1 << 31)
@@ -4270,194 +4279,184 @@
 #define   AUD_CONFIG_DISABLE_NCTS		(1 << 3)
 
 /* HSW Power Wells */
-#define HSW_PWR_WELL_CTL1		0x45400		/* BIOS */
-#define HSW_PWR_WELL_CTL2		0x45404		/* Driver */
-#define HSW_PWR_WELL_CTL3		0x45408		/* KVMR */
-#define HSW_PWR_WELL_CTL4		0x4540C		/* Debug */
-#define   HSW_PWR_WELL_ENABLE				(1<<31)
-#define   HSW_PWR_WELL_STATE				(1<<30)
-#define HSW_PWR_WELL_CTL5		0x45410
+#define HSW_PWR_WELL_CTL1			0x45400 /* BIOS */
+#define HSW_PWR_WELL_CTL2			0x45404 /* Driver */
+#define HSW_PWR_WELL_CTL3			0x45408 /* KVMR */
+#define HSW_PWR_WELL_CTL4			0x4540C /* Debug */
+#define   HSW_PWR_WELL_ENABLE			(1<<31)
+#define   HSW_PWR_WELL_STATE			(1<<30)
+#define HSW_PWR_WELL_CTL5			0x45410
 #define   HSW_PWR_WELL_ENABLE_SINGLE_STEP	(1<<31)
 #define   HSW_PWR_WELL_PWR_GATE_OVERRIDE	(1<<20)
-#define   HSW_PWR_WELL_FORCE_ON				(1<<19)
-#define HSW_PWR_WELL_CTL6		0x45414
+#define   HSW_PWR_WELL_FORCE_ON			(1<<19)
+#define HSW_PWR_WELL_CTL6			0x45414
 
 /* Per-pipe DDI Function Control */
-#define PIPE_DDI_FUNC_CTL_A			0x60400
-#define PIPE_DDI_FUNC_CTL_B			0x61400
-#define PIPE_DDI_FUNC_CTL_C			0x62400
+#define PIPE_DDI_FUNC_CTL_A		0x60400
+#define PIPE_DDI_FUNC_CTL_B		0x61400
+#define PIPE_DDI_FUNC_CTL_C		0x62400
 #define PIPE_DDI_FUNC_CTL_EDP		0x6F400
-#define DDI_FUNC_CTL(pipe) _PIPE(pipe, \
-					PIPE_DDI_FUNC_CTL_A, \
-					PIPE_DDI_FUNC_CTL_B)
+#define DDI_FUNC_CTL(pipe) _PIPE(pipe, PIPE_DDI_FUNC_CTL_A, \
+				       PIPE_DDI_FUNC_CTL_B)
 #define  PIPE_DDI_FUNC_ENABLE		(1<<31)
 /* Those bits are ignored by pipe EDP since it can only connect to DDI A */
-#define  PIPE_DDI_PORT_MASK			(7<<28)
-#define  PIPE_DDI_SELECT_PORT(x)		((x)<<28)
-#define  PIPE_DDI_MODE_SELECT_HDMI		(0<<24)
-#define  PIPE_DDI_MODE_SELECT_DVI		(1<<24)
+#define  PIPE_DDI_PORT_MASK		(7<<28)
+#define  PIPE_DDI_SELECT_PORT(x)	((x)<<28)
+#define  PIPE_DDI_MODE_SELECT_MASK	(7<<24)
+#define  PIPE_DDI_MODE_SELECT_HDMI	(0<<24)
+#define  PIPE_DDI_MODE_SELECT_DVI	(1<<24)
 #define  PIPE_DDI_MODE_SELECT_DP_SST	(2<<24)
 #define  PIPE_DDI_MODE_SELECT_DP_MST	(3<<24)
-#define  PIPE_DDI_MODE_SELECT_FDI		(4<<24)
-#define  PIPE_DDI_BPC_8					(0<<20)
-#define  PIPE_DDI_BPC_10				(1<<20)
-#define  PIPE_DDI_BPC_6					(2<<20)
-#define  PIPE_DDI_BPC_12				(3<<20)
-#define  PIPE_DDI_BFI_ENABLE			(1<<4)
-#define  PIPE_DDI_PORT_WIDTH_X1			(0<<1)
-#define  PIPE_DDI_PORT_WIDTH_X2			(1<<1)
-#define  PIPE_DDI_PORT_WIDTH_X4			(3<<1)
+#define  PIPE_DDI_MODE_SELECT_FDI	(4<<24)
+#define  PIPE_DDI_BPC_MASK		(7<<20)
+#define  PIPE_DDI_BPC_8			(0<<20)
+#define  PIPE_DDI_BPC_10		(1<<20)
+#define  PIPE_DDI_BPC_6			(2<<20)
+#define  PIPE_DDI_BPC_12		(3<<20)
+#define  PIPE_DDI_PVSYNC		(1<<17)
+#define  PIPE_DDI_PHSYNC		(1<<16)
+#define  PIPE_DDI_BFI_ENABLE		(1<<4)
+#define  PIPE_DDI_PORT_WIDTH_X1		(0<<1)
+#define  PIPE_DDI_PORT_WIDTH_X2		(1<<1)
+#define  PIPE_DDI_PORT_WIDTH_X4		(3<<1)
 
 /* DisplayPort Transport Control */
 #define DP_TP_CTL_A			0x64040
 #define DP_TP_CTL_B			0x64140
-#define DP_TP_CTL(port) _PORT(port, \
-					DP_TP_CTL_A, \
-					DP_TP_CTL_B)
-#define  DP_TP_CTL_ENABLE		(1<<31)
-#define  DP_TP_CTL_MODE_SST	(0<<27)
-#define  DP_TP_CTL_MODE_MST	(1<<27)
+#define DP_TP_CTL(port) _PORT(port, DP_TP_CTL_A, DP_TP_CTL_B)
+#define  DP_TP_CTL_ENABLE			(1<<31)
+#define  DP_TP_CTL_MODE_SST			(0<<27)
+#define  DP_TP_CTL_MODE_MST			(1<<27)
 #define  DP_TP_CTL_ENHANCED_FRAME_ENABLE	(1<<18)
-#define  DP_TP_CTL_FDI_AUTOTRAIN	(1<<15)
+#define  DP_TP_CTL_FDI_AUTOTRAIN		(1<<15)
 #define  DP_TP_CTL_LINK_TRAIN_MASK		(7<<8)
 #define  DP_TP_CTL_LINK_TRAIN_PAT1		(0<<8)
 #define  DP_TP_CTL_LINK_TRAIN_PAT2		(1<<8)
-#define  DP_TP_CTL_LINK_TRAIN_NORMAL	(3<<8)
+#define  DP_TP_CTL_LINK_TRAIN_NORMAL		(3<<8)
 
 /* DisplayPort Transport Status */
 #define DP_TP_STATUS_A			0x64044
 #define DP_TP_STATUS_B			0x64144
-#define DP_TP_STATUS(port) _PORT(port, \
-					DP_TP_STATUS_A, \
-					DP_TP_STATUS_B)
+#define DP_TP_STATUS(port) _PORT(port, DP_TP_STATUS_A, DP_TP_STATUS_B)
 #define  DP_TP_STATUS_AUTOTRAIN_DONE	(1<<12)
 
 /* DDI Buffer Control */
 #define DDI_BUF_CTL_A				0x64000
 #define DDI_BUF_CTL_B				0x64100
-#define DDI_BUF_CTL(port) _PORT(port, \
-					DDI_BUF_CTL_A, \
-					DDI_BUF_CTL_B)
-#define  DDI_BUF_CTL_ENABLE				(1<<31)
+#define DDI_BUF_CTL(port) _PORT(port, DDI_BUF_CTL_A, DDI_BUF_CTL_B)
+#define  DDI_BUF_CTL_ENABLE			(1<<31)
 #define  DDI_BUF_EMP_400MV_0DB_HSW		(0<<24)   /* Sel0 */
-#define  DDI_BUF_EMP_400MV_3_5DB_HSW	(1<<24)   /* Sel1 */
+#define  DDI_BUF_EMP_400MV_3_5DB_HSW		(1<<24)   /* Sel1 */
 #define  DDI_BUF_EMP_400MV_6DB_HSW		(2<<24)   /* Sel2 */
-#define  DDI_BUF_EMP_400MV_9_5DB_HSW	(3<<24)   /* Sel3 */
+#define  DDI_BUF_EMP_400MV_9_5DB_HSW		(3<<24)   /* Sel3 */
 #define  DDI_BUF_EMP_600MV_0DB_HSW		(4<<24)   /* Sel4 */
-#define  DDI_BUF_EMP_600MV_3_5DB_HSW	(5<<24)   /* Sel5 */
+#define  DDI_BUF_EMP_600MV_3_5DB_HSW		(5<<24)   /* Sel5 */
 #define  DDI_BUF_EMP_600MV_6DB_HSW		(6<<24)   /* Sel6 */
 #define  DDI_BUF_EMP_800MV_0DB_HSW		(7<<24)   /* Sel7 */
-#define  DDI_BUF_EMP_800MV_3_5DB_HSW	(8<<24)   /* Sel8 */
-#define  DDI_BUF_EMP_MASK				(0xf<<24)
-#define  DDI_BUF_IS_IDLE				(1<<7)
-#define  DDI_PORT_WIDTH_X1				(0<<1)
-#define  DDI_PORT_WIDTH_X2				(1<<1)
-#define  DDI_PORT_WIDTH_X4				(3<<1)
+#define  DDI_BUF_EMP_800MV_3_5DB_HSW		(8<<24)   /* Sel8 */
+#define  DDI_BUF_EMP_MASK			(0xf<<24)
+#define  DDI_BUF_IS_IDLE			(1<<7)
+#define  DDI_PORT_WIDTH_X1			(0<<1)
+#define  DDI_PORT_WIDTH_X2			(1<<1)
+#define  DDI_PORT_WIDTH_X4			(3<<1)
 #define  DDI_INIT_DISPLAY_DETECTED		(1<<0)
 
 /* DDI Buffer Translations */
 #define DDI_BUF_TRANS_A				0x64E00
 #define DDI_BUF_TRANS_B				0x64E60
-#define DDI_BUF_TRANS(port) _PORT(port, \
-					DDI_BUF_TRANS_A, \
-					DDI_BUF_TRANS_B)
+#define DDI_BUF_TRANS(port) _PORT(port, DDI_BUF_TRANS_A, DDI_BUF_TRANS_B)
 
 /* Sideband Interface (SBI) is programmed indirectly, via
  * SBI_ADDR, which contains the register offset; and SBI_DATA,
  * which contains the payload */
-#define SBI_ADDR				0xC6000
-#define SBI_DATA				0xC6004
+#define SBI_ADDR			0xC6000
+#define SBI_DATA			0xC6004
 #define SBI_CTL_STAT			0xC6008
 #define  SBI_CTL_OP_CRRD		(0x6<<8)
 #define  SBI_CTL_OP_CRWR		(0x7<<8)
 #define  SBI_RESPONSE_FAIL		(0x1<<1)
-#define  SBI_RESPONSE_SUCCESS	(0x0<<1)
-#define  SBI_BUSY				(0x1<<0)
-#define  SBI_READY				(0x0<<0)
+#define  SBI_RESPONSE_SUCCESS		(0x0<<1)
+#define  SBI_BUSY			(0x1<<0)
+#define  SBI_READY			(0x0<<0)
 
 /* SBI offsets */
-#define  SBI_SSCDIVINTPHASE6		0x0600
+#define  SBI_SSCDIVINTPHASE6			0x0600
 #define   SBI_SSCDIVINTPHASE_DIVSEL_MASK	((0x7f)<<1)
 #define   SBI_SSCDIVINTPHASE_DIVSEL(x)		((x)<<1)
 #define   SBI_SSCDIVINTPHASE_INCVAL_MASK	((0x7f)<<8)
 #define   SBI_SSCDIVINTPHASE_INCVAL(x)		((x)<<8)
-#define   SBI_SSCDIVINTPHASE_DIR(x)			((x)<<15)
+#define   SBI_SSCDIVINTPHASE_DIR(x)		((x)<<15)
 #define   SBI_SSCDIVINTPHASE_PROPAGATE		(1<<0)
-#define  SBI_SSCCTL					0x020c
+#define  SBI_SSCCTL				0x020c
 #define  SBI_SSCCTL6				0x060C
-#define   SBI_SSCCTL_DISABLE		(1<<0)
+#define   SBI_SSCCTL_DISABLE			(1<<0)
 #define  SBI_SSCAUXDIV6				0x0610
 #define   SBI_SSCAUXDIV_FINALDIV2SEL(x)		((x)<<4)
-#define  SBI_DBUFF0					0x2a00
+#define  SBI_DBUFF0				0x2a00
 
 /* LPT PIXCLK_GATE */
-#define PIXCLK_GATE				0xC6020
-#define  PIXCLK_GATE_UNGATE		1<<0
-#define  PIXCLK_GATE_GATE		0<<0
+#define PIXCLK_GATE			0xC6020
+#define  PIXCLK_GATE_UNGATE		(1<<0)
+#define  PIXCLK_GATE_GATE		(0<<0)
 
 /* SPLL */
-#define SPLL_CTL				0x46020
+#define SPLL_CTL			0x46020
 #define  SPLL_PLL_ENABLE		(1<<31)
 #define  SPLL_PLL_SCC			(1<<28)
 #define  SPLL_PLL_NON_SCC		(2<<28)
-#define  SPLL_PLL_FREQ_810MHz	(0<<26)
-#define  SPLL_PLL_FREQ_1350MHz	(1<<26)
+#define  SPLL_PLL_FREQ_810MHz		(0<<26)
+#define  SPLL_PLL_FREQ_1350MHz		(1<<26)
 
 /* WRPLL */
-#define WRPLL_CTL1				0x46040
-#define WRPLL_CTL2				0x46060
-#define  WRPLL_PLL_ENABLE				(1<<31)
-#define  WRPLL_PLL_SELECT_SSC			(0x01<<28)
-#define  WRPLL_PLL_SELECT_NON_SCC		(0x02<<28)
+#define WRPLL_CTL1			0x46040
+#define WRPLL_CTL2			0x46060
+#define  WRPLL_PLL_ENABLE		(1<<31)
+#define  WRPLL_PLL_SELECT_SSC		(0x01<<28)
+#define  WRPLL_PLL_SELECT_NON_SCC	(0x02<<28)
 #define  WRPLL_PLL_SELECT_LCPLL_2700	(0x03<<28)
 /* WRPLL divider programming */
-#define  WRPLL_DIVIDER_REFERENCE(x)		((x)<<0)
-#define  WRPLL_DIVIDER_POST(x)			((x)<<8)
-#define  WRPLL_DIVIDER_FEEDBACK(x)		((x)<<16)
+#define  WRPLL_DIVIDER_REFERENCE(x)	((x)<<0)
+#define  WRPLL_DIVIDER_POST(x)		((x)<<8)
+#define  WRPLL_DIVIDER_FEEDBACK(x)	((x)<<16)
 
 /* Port clock selection */
 #define PORT_CLK_SEL_A			0x46100
 #define PORT_CLK_SEL_B			0x46104
-#define PORT_CLK_SEL(port) _PORT(port, \
-					PORT_CLK_SEL_A, \
-					PORT_CLK_SEL_B)
+#define PORT_CLK_SEL(port) _PORT(port, PORT_CLK_SEL_A, PORT_CLK_SEL_B)
 #define  PORT_CLK_SEL_LCPLL_2700	(0<<29)
 #define  PORT_CLK_SEL_LCPLL_1350	(1<<29)
 #define  PORT_CLK_SEL_LCPLL_810		(2<<29)
-#define  PORT_CLK_SEL_SPLL			(3<<29)
+#define  PORT_CLK_SEL_SPLL		(3<<29)
 #define  PORT_CLK_SEL_WRPLL1		(4<<29)
 #define  PORT_CLK_SEL_WRPLL2		(5<<29)
 
 /* Pipe clock selection */
 #define PIPE_CLK_SEL_A			0x46140
 #define PIPE_CLK_SEL_B			0x46144
-#define PIPE_CLK_SEL(pipe) _PIPE(pipe, \
-					PIPE_CLK_SEL_A, \
-					PIPE_CLK_SEL_B)
+#define PIPE_CLK_SEL(pipe) _PIPE(pipe, PIPE_CLK_SEL_A, PIPE_CLK_SEL_B)
 /* For each pipe, we need to select the corresponding port clock */
-#define  PIPE_CLK_SEL_DISABLED	(0x0<<29)
-#define  PIPE_CLK_SEL_PORT(x)	((x+1)<<29)
+#define  PIPE_CLK_SEL_DISABLED		(0x0<<29)
+#define  PIPE_CLK_SEL_PORT(x)		((x+1)<<29)
 
 /* LCPLL Control */
-#define LCPLL_CTL				0x130040
+#define LCPLL_CTL			0x130040
 #define  LCPLL_PLL_DISABLE		(1<<31)
 #define  LCPLL_PLL_LOCK			(1<<30)
-#define  LCPLL_CD_CLOCK_DISABLE	(1<<25)
+#define  LCPLL_CD_CLOCK_DISABLE		(1<<25)
 #define  LCPLL_CD2X_CLOCK_DISABLE	(1<<23)
 
 /* Pipe WM_LINETIME - watermark line time */
 #define PIPE_WM_LINETIME_A		0x45270
 #define PIPE_WM_LINETIME_B		0x45274
-#define PIPE_WM_LINETIME(pipe) _PIPE(pipe, \
-					PIPE_WM_LINETIME_A, \
-					PIPE_WM_LINETIME_B)
-#define   PIPE_WM_LINETIME_MASK		(0x1ff)
-#define   PIPE_WM_LINETIME_TIME(x)			((x))
+#define PIPE_WM_LINETIME(pipe) _PIPE(pipe, PIPE_WM_LINETIME_A, \
+					   PIPE_WM_LINETIME_B)
+#define   PIPE_WM_LINETIME_MASK			(0x1ff)
+#define   PIPE_WM_LINETIME_TIME(x)		((x))
 #define   PIPE_WM_LINETIME_IPS_LINETIME_MASK	(0x1ff<<16)
-#define   PIPE_WM_LINETIME_IPS_LINETIME(x)		((x)<<16)
+#define   PIPE_WM_LINETIME_IPS_LINETIME(x)	((x)<<16)
 
 /* SFUSE_STRAP */
-#define SFUSE_STRAP				0xc2014
+#define SFUSE_STRAP			0xc2014
 #define  SFUSE_STRAP_DDIB_DETECTED	(1<<2)
 #define  SFUSE_STRAP_DDIC_DETECTED	(1<<1)
 #define  SFUSE_STRAP_DDID_DETECTED	(1<<0)
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 7631807..c5ee7ee 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -213,7 +213,7 @@
 			DRM_ERROR("RC6 residency sysfs setup failed\n");
 	}
 
-	if (IS_IVYBRIDGE(dev)) {
+	if (HAS_L3_GPU_CACHE(dev)) {
 		ret = device_create_bin_file(&dev->primary->kdev, &dpf_attrs);
 		if (ret)
 			DRM_ERROR("l3 parity sysfs setup failed\n");
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 23bdc8c..80bf311 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -47,6 +47,7 @@
 struct intel_crt {
 	struct intel_encoder base;
 	bool force_hotplug_required;
+	u32 adpa_reg;
 };
 
 static struct intel_crt *intel_attached_crt(struct drm_connector *connector)
@@ -55,6 +56,11 @@
 			    struct intel_crt, base);
 }
 
+static struct intel_crt *intel_encoder_to_crt(struct intel_encoder *encoder)
+{
+	return container_of(encoder, struct intel_crt, base);
+}
+
 static void pch_crt_dpms(struct drm_encoder *encoder, int mode)
 {
 	struct drm_device *dev = encoder->dev;
@@ -145,19 +151,15 @@
 
 	struct drm_device *dev = encoder->dev;
 	struct drm_crtc *crtc = encoder->crtc;
+	struct intel_crt *crt =
+		intel_encoder_to_crt(to_intel_encoder(encoder));
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int dpll_md_reg;
 	u32 adpa, dpll_md;
-	u32 adpa_reg;
 
 	dpll_md_reg = DPLL_MD(intel_crtc->pipe);
 
-	if (HAS_PCH_SPLIT(dev))
-		adpa_reg = PCH_ADPA;
-	else
-		adpa_reg = ADPA;
-
 	/*
 	 * Disable separate mode multiplier used when cloning SDVO to CRT
 	 * XXX this needs to be adjusted when we really are cloning
@@ -185,7 +187,7 @@
 	if (!HAS_PCH_SPLIT(dev))
 		I915_WRITE(BCLRPAT(intel_crtc->pipe), 0);
 
-	I915_WRITE(adpa_reg, adpa);
+	I915_WRITE(crt->adpa_reg, adpa);
 }
 
 static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
@@ -688,9 +690,7 @@
 	intel_connector_attach_encoder(intel_connector, &crt->base);
 
 	crt->base.type = INTEL_OUTPUT_ANALOG;
-	crt->base.clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT |
-				1 << INTEL_ANALOG_CLONE_BIT |
-				1 << INTEL_SDVO_LVDS_CLONE_BIT);
+	crt->base.cloneable = true;
 	if (IS_HASWELL(dev))
 		crt->base.crtc_mask = (1 << 0);
 	else
@@ -707,6 +707,13 @@
 	else
 		encoder_helper_funcs = &gmch_encoder_funcs;
 
+	if (HAS_PCH_SPLIT(dev))
+		crt->adpa_reg = PCH_ADPA;
+	else if (IS_VALLEYVIEW(dev))
+		crt->adpa_reg = VLV_ADPA;
+	else
+		crt->adpa_reg = ADPA;
+
 	drm_encoder_helper_add(&crt->base.base, encoder_helper_funcs);
 	drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
 
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 933c748..9584226 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -250,7 +250,7 @@
 	case PORT_B:
 	case PORT_C:
 	case PORT_D:
-		intel_hdmi_init(dev, DDI_BUF_CTL(port));
+		intel_hdmi_init(dev, DDI_BUF_CTL(port), port);
 		break;
 	default:
 		DRM_DEBUG_DRIVER("No handlers defined for port %d, skipping DDI initialization\n",
@@ -267,7 +267,8 @@
 	u16 r2;		/* Reference divider */
 };
 
-/* Table of matching values for WRPLL clocks programming for each frequency */
+/* Table of matching values for WRPLL clocks programming for each frequency.
+ * The code assumes this table is sorted. */
 static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = {
 	{19750,	38,	25,	18},
 	{20000,	48,	32,	18},
@@ -277,7 +278,6 @@
 	{23000,	36,	23,	15},
 	{23500,	40,	40,	23},
 	{23750,	26,	16,	14},
-	{23750,	26,	16,	14},
 	{24000,	36,	24,	15},
 	{25000,	36,	25,	15},
 	{25175,	26,	40,	33},
@@ -437,7 +437,6 @@
 	{108000,	8,	24,	15},
 	{108108,	8,	173,	108},
 	{109000,	6,	23,	19},
-	{109000,	6,	23,	19},
 	{110000,	6,	22,	18},
 	{110013,	6,	22,	18},
 	{110250,	8,	49,	30},
@@ -614,7 +613,6 @@
 	{218250,	4,	42,	26},
 	{218750,	4,	34,	21},
 	{219000,	4,	47,	29},
-	{219000,	4,	47,	29},
 	{220000,	4,	44,	27},
 	{220640,	4,	49,	30},
 	{220750,	4,	36,	22},
@@ -658,7 +656,7 @@
 	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
 	int port = intel_hdmi->ddi_port;
 	int pipe = intel_crtc->pipe;
-	int p, n2, r2, valid=0;
+	int p, n2, r2;
 	u32 temp, i;
 
 	/* On Haswell, we need to enable the clocks and prepare DDI function to
@@ -666,26 +664,23 @@
 	 */
 	DRM_DEBUG_KMS("Preparing HDMI DDI mode for Haswell on port %c, pipe %c\n", port_name(port), pipe_name(pipe));
 
-	for (i=0; i < ARRAY_SIZE(wrpll_tmds_clock_table); i++) {
-		if (crtc->mode.clock == wrpll_tmds_clock_table[i].clock) {
-			p = wrpll_tmds_clock_table[i].p;
-			n2 = wrpll_tmds_clock_table[i].n2;
-			r2 = wrpll_tmds_clock_table[i].r2;
-
-			DRM_DEBUG_KMS("WR PLL clock: found settings for %dKHz refresh rate: p=%d, n2=%d, r2=%d\n",
-					crtc->mode.clock,
-					p, n2, r2);
-
-			valid = 1;
+	for (i = 0; i < ARRAY_SIZE(wrpll_tmds_clock_table); i++)
+		if (crtc->mode.clock <= wrpll_tmds_clock_table[i].clock)
 			break;
-		}
-	}
 
-	if (!valid) {
-		DRM_ERROR("Unable to find WR PLL clock settings for %dKHz refresh rate\n",
-				crtc->mode.clock);
-		return;
-	}
+	if (i == ARRAY_SIZE(wrpll_tmds_clock_table))
+		i--;
+
+	p = wrpll_tmds_clock_table[i].p;
+	n2 = wrpll_tmds_clock_table[i].n2;
+	r2 = wrpll_tmds_clock_table[i].r2;
+
+	if (wrpll_tmds_clock_table[i].clock != crtc->mode.clock)
+		DRM_INFO("WR PLL: using settings for %dKHz on %dKHz mode\n",
+			 wrpll_tmds_clock_table[i].clock, crtc->mode.clock);
+
+	DRM_DEBUG_KMS("WR PLL: %dKHz refresh rate with p=%d, n2=%d r2=%d\n",
+		      crtc->mode.clock, p, n2, r2);
 
 	/* Enable LCPLL if disabled */
 	temp = I915_READ(LCPLL_CTL);
@@ -723,15 +718,35 @@
 	}
 
 	/* Enable PIPE_DDI_FUNC_CTL for the pipe to work in HDMI mode */
-	temp = I915_READ(DDI_FUNC_CTL(pipe));
-	temp &= ~PIPE_DDI_PORT_MASK;
-	temp &= ~PIPE_DDI_BPC_12;
-	temp |= PIPE_DDI_SELECT_PORT(port) |
-			PIPE_DDI_MODE_SELECT_HDMI |
-			((intel_crtc->bpp > 24) ?
-				PIPE_DDI_BPC_12 :
-				PIPE_DDI_BPC_8) |
-			PIPE_DDI_FUNC_ENABLE;
+	temp = PIPE_DDI_FUNC_ENABLE | PIPE_DDI_SELECT_PORT(port);
+
+	switch (intel_crtc->bpp) {
+	case 18:
+		temp |= PIPE_DDI_BPC_6;
+		break;
+	case 24:
+		temp |= PIPE_DDI_BPC_8;
+		break;
+	case 30:
+		temp |= PIPE_DDI_BPC_10;
+		break;
+	case 36:
+		temp |= PIPE_DDI_BPC_12;
+		break;
+	default:
+		WARN(1, "%d bpp unsupported by pipe DDI function\n",
+		     intel_crtc->bpp);
+	}
+
+	if (intel_hdmi->has_hdmi_sink)
+		temp |= PIPE_DDI_MODE_SELECT_HDMI;
+	else
+		temp |= PIPE_DDI_MODE_SELECT_DVI;
+
+	if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+		temp |= PIPE_DDI_PVSYNC;
+	if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+		temp |= PIPE_DDI_PHSYNC;
 
 	I915_WRITE(DDI_FUNC_CTL(pipe), temp);
 
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index a69a3d0..a9ab1af 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1429,8 +1429,10 @@
  * protect mechanism may be enabled.
  *
  * Note!  This is for pre-ILK only.
+ *
+ * Unfortunately needed by dvo_ns2501 since the dvo depends on it running.
  */
-static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
+void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
 {
 	int reg;
 	u32 val;
@@ -2836,13 +2838,13 @@
 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
 {
 	struct drm_device *dev = crtc->dev;
-	struct intel_encoder *encoder;
+	struct intel_encoder *intel_encoder;
 
 	/*
 	 * If there's a non-PCH eDP on this crtc, it must be DP_A, and that
 	 * must be driven by its own crtc; no sharing is possible.
 	 */
-	for_each_encoder_on_crtc(dev, crtc, encoder) {
+	for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
 
 		/* On Haswell, LPT PCH handles the VGA connection via FDI, and Haswell
 		 * CPU handles all others */
@@ -2850,19 +2852,19 @@
 			/* It is still unclear how this will work on PPT, so throw up a warning */
 			WARN_ON(!HAS_PCH_LPT(dev));
 
-			if (encoder->type == DRM_MODE_ENCODER_DAC) {
+			if (intel_encoder->type == INTEL_OUTPUT_ANALOG) {
 				DRM_DEBUG_KMS("Haswell detected DAC encoder, assuming is PCH\n");
 				return true;
 			} else {
 				DRM_DEBUG_KMS("Haswell detected encoder %d, assuming is CPU\n",
-						encoder->type);
+					      intel_encoder->type);
 				return false;
 			}
 		}
 
-		switch (encoder->type) {
+		switch (intel_encoder->type) {
 		case INTEL_OUTPUT_EDP:
-			if (!intel_encoder_is_pch_edp(&encoder->base))
+			if (!intel_encoder_is_pch_edp(&intel_encoder->base))
 				return false;
 			continue;
 		}
@@ -5848,46 +5850,6 @@
 	return mode;
 }
 
-#define GPU_IDLE_TIMEOUT 500 /* ms */
-
-/* When this timer fires, we've been idle for awhile */
-static void intel_gpu_idle_timer(unsigned long arg)
-{
-	struct drm_device *dev = (struct drm_device *)arg;
-	drm_i915_private_t *dev_priv = dev->dev_private;
-
-	if (!list_empty(&dev_priv->mm.active_list)) {
-		/* Still processing requests, so just re-arm the timer. */
-		mod_timer(&dev_priv->idle_timer, jiffies +
-			  msecs_to_jiffies(GPU_IDLE_TIMEOUT));
-		return;
-	}
-
-	dev_priv->busy = false;
-	queue_work(dev_priv->wq, &dev_priv->idle_work);
-}
-
-#define CRTC_IDLE_TIMEOUT 1000 /* ms */
-
-static void intel_crtc_idle_timer(unsigned long arg)
-{
-	struct intel_crtc *intel_crtc = (struct intel_crtc *)arg;
-	struct drm_crtc *crtc = &intel_crtc->base;
-	drm_i915_private_t *dev_priv = crtc->dev->dev_private;
-	struct intel_framebuffer *intel_fb;
-
-	intel_fb = to_intel_framebuffer(crtc->fb);
-	if (intel_fb && intel_fb->obj->active) {
-		/* The framebuffer is still being accessed by the GPU. */
-		mod_timer(&intel_crtc->idle_timer, jiffies +
-			  msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
-		return;
-	}
-
-	intel_crtc->busy = false;
-	queue_work(dev_priv->wq, &dev_priv->idle_work);
-}
-
 static void intel_increase_pllclock(struct drm_crtc *crtc)
 {
 	struct drm_device *dev = crtc->dev;
@@ -5917,10 +5879,6 @@
 		if (dpll & DISPLAY_RATE_SELECT_FPA1)
 			DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
 	}
-
-	/* Schedule downclock */
-	mod_timer(&intel_crtc->idle_timer, jiffies +
-		  msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
 }
 
 static void intel_decrease_pllclock(struct drm_crtc *crtc)
@@ -5959,89 +5917,46 @@
 
 }
 
-/**
- * intel_idle_update - adjust clocks for idleness
- * @work: work struct
- *
- * Either the GPU or display (or both) went idle.  Check the busy status
- * here and adjust the CRTC and GPU clocks as necessary.
- */
-static void intel_idle_update(struct work_struct *work)
+void intel_mark_busy(struct drm_device *dev)
 {
-	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
-						    idle_work);
-	struct drm_device *dev = dev_priv->dev;
+	i915_update_gfx_val(dev->dev_private);
+}
+
+void intel_mark_idle(struct drm_device *dev)
+{
+}
+
+void intel_mark_fb_busy(struct drm_i915_gem_object *obj)
+{
+	struct drm_device *dev = obj->base.dev;
 	struct drm_crtc *crtc;
-	struct intel_crtc *intel_crtc;
 
 	if (!i915_powersave)
 		return;
 
-	mutex_lock(&dev->struct_mutex);
-
-	i915_update_gfx_val(dev_priv);
-
 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-		/* Skip inactive CRTCs */
 		if (!crtc->fb)
 			continue;
 
-		intel_crtc = to_intel_crtc(crtc);
-		if (!intel_crtc->busy)
-			intel_decrease_pllclock(crtc);
+		if (to_intel_framebuffer(crtc->fb)->obj == obj)
+			intel_increase_pllclock(crtc);
 	}
-
-
-	mutex_unlock(&dev->struct_mutex);
 }
 
-/**
- * intel_mark_busy - mark the GPU and possibly the display busy
- * @dev: drm device
- * @obj: object we're operating on
- *
- * Callers can use this function to indicate that the GPU is busy processing
- * commands.  If @obj matches one of the CRTC objects (i.e. it's a scanout
- * buffer), we'll also mark the display as busy, so we know to increase its
- * clock frequency.
- */
-void intel_mark_busy(struct drm_device *dev, struct drm_i915_gem_object *obj)
+void intel_mark_fb_idle(struct drm_i915_gem_object *obj)
 {
-	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_crtc *crtc = NULL;
-	struct intel_framebuffer *intel_fb;
-	struct intel_crtc *intel_crtc;
+	struct drm_device *dev = obj->base.dev;
+	struct drm_crtc *crtc;
 
-	if (!drm_core_check_feature(dev, DRIVER_MODESET))
-		return;
-
-	if (!dev_priv->busy) {
-		intel_sanitize_pm(dev);
-		dev_priv->busy = true;
-	} else
-		mod_timer(&dev_priv->idle_timer, jiffies +
-			  msecs_to_jiffies(GPU_IDLE_TIMEOUT));
-
-	if (obj == NULL)
+	if (!i915_powersave)
 		return;
 
 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 		if (!crtc->fb)
 			continue;
 
-		intel_crtc = to_intel_crtc(crtc);
-		intel_fb = to_intel_framebuffer(crtc->fb);
-		if (intel_fb->obj == obj) {
-			if (!intel_crtc->busy) {
-				/* Non-busy -> busy, upclock */
-				intel_increase_pllclock(crtc);
-				intel_crtc->busy = true;
-			} else {
-				/* Busy -> busy, put off timer */
-				mod_timer(&intel_crtc->idle_timer, jiffies +
-					  msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
-			}
-		}
+		if (to_intel_framebuffer(crtc->fb)->obj == obj)
+			intel_decrease_pllclock(crtc);
 	}
 }
 
@@ -6392,7 +6307,7 @@
 	default:
 		WARN_ONCE(1, "unknown plane in flip command\n");
 		ret = -ENODEV;
-		goto err;
+		goto err_unpin;
 	}
 
 	ret = intel_ring_begin(ring, 4);
@@ -6500,7 +6415,7 @@
 		goto cleanup_pending;
 
 	intel_disable_fbc(dev);
-	intel_mark_busy(dev, obj);
+	intel_mark_fb_busy(obj);
 	mutex_unlock(&dev->struct_mutex);
 
 	trace_i915_flip_request(intel_crtc->plane, obj);
@@ -6666,11 +6581,6 @@
 	}
 
 	drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
-
-	intel_crtc->busy = false;
-
-	setup_timer(&intel_crtc->idle_timer, intel_crtc_idle_timer,
-		    (unsigned long)intel_crtc);
 }
 
 int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
@@ -6697,15 +6607,23 @@
 	return 0;
 }
 
-static int intel_encoder_clones(struct drm_device *dev, int type_mask)
+static int intel_encoder_clones(struct intel_encoder *encoder)
 {
-	struct intel_encoder *encoder;
+	struct drm_device *dev = encoder->base.dev;
+	struct intel_encoder *source_encoder;
 	int index_mask = 0;
 	int entry = 0;
 
-	list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
-		if (type_mask & encoder->clone_mask)
+	list_for_each_entry(source_encoder,
+			    &dev->mode_config.encoder_list, base.head) {
+
+		if (encoder == source_encoder)
 			index_mask |= (1 << entry);
+
+		/* Intel hw has only one MUX where enocoders could be cloned. */
+		if (encoder->cloneable && source_encoder->cloneable)
+			index_mask |= (1 << entry);
+
 		entry++;
 	}
 
@@ -6746,10 +6664,10 @@
 		dpd_is_edp = intel_dpd_is_edp(dev);
 
 		if (has_edp_a(dev))
-			intel_dp_init(dev, DP_A);
+			intel_dp_init(dev, DP_A, PORT_A);
 
 		if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
-			intel_dp_init(dev, PCH_DP_D);
+			intel_dp_init(dev, PCH_DP_D, PORT_D);
 	}
 
 	intel_crt_init(dev);
@@ -6780,22 +6698,22 @@
 			/* PCH SDVOB multiplex with HDMIB */
 			found = intel_sdvo_init(dev, PCH_SDVOB, true);
 			if (!found)
-				intel_hdmi_init(dev, HDMIB);
+				intel_hdmi_init(dev, HDMIB, PORT_B);
 			if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
-				intel_dp_init(dev, PCH_DP_B);
+				intel_dp_init(dev, PCH_DP_B, PORT_B);
 		}
 
 		if (I915_READ(HDMIC) & PORT_DETECTED)
-			intel_hdmi_init(dev, HDMIC);
+			intel_hdmi_init(dev, HDMIC, PORT_C);
 
 		if (!dpd_is_edp && I915_READ(HDMID) & PORT_DETECTED)
-			intel_hdmi_init(dev, HDMID);
+			intel_hdmi_init(dev, HDMID, PORT_D);
 
 		if (I915_READ(PCH_DP_C) & DP_DETECTED)
-			intel_dp_init(dev, PCH_DP_C);
+			intel_dp_init(dev, PCH_DP_C, PORT_C);
 
 		if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
-			intel_dp_init(dev, PCH_DP_D);
+			intel_dp_init(dev, PCH_DP_D, PORT_D);
 	} else if (IS_VALLEYVIEW(dev)) {
 		int found;
 
@@ -6803,17 +6721,17 @@
 			/* SDVOB multiplex with HDMIB */
 			found = intel_sdvo_init(dev, SDVOB, true);
 			if (!found)
-				intel_hdmi_init(dev, SDVOB);
+				intel_hdmi_init(dev, SDVOB, PORT_B);
 			if (!found && (I915_READ(DP_B) & DP_DETECTED))
-				intel_dp_init(dev, DP_B);
+				intel_dp_init(dev, DP_B, PORT_B);
 		}
 
 		if (I915_READ(SDVOC) & PORT_DETECTED)
-			intel_hdmi_init(dev, SDVOC);
+			intel_hdmi_init(dev, SDVOC, PORT_C);
 
 		/* Shares lanes with HDMI on SDVOC */
 		if (I915_READ(DP_C) & DP_DETECTED)
-			intel_dp_init(dev, DP_C);
+			intel_dp_init(dev, DP_C, PORT_C);
 	} else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
 		bool found = false;
 
@@ -6822,12 +6740,12 @@
 			found = intel_sdvo_init(dev, SDVOB, true);
 			if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
 				DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
-				intel_hdmi_init(dev, SDVOB);
+				intel_hdmi_init(dev, SDVOB, PORT_B);
 			}
 
 			if (!found && SUPPORTS_INTEGRATED_DP(dev)) {
 				DRM_DEBUG_KMS("probing DP_B\n");
-				intel_dp_init(dev, DP_B);
+				intel_dp_init(dev, DP_B, PORT_B);
 			}
 		}
 
@@ -6842,18 +6760,18 @@
 
 			if (SUPPORTS_INTEGRATED_HDMI(dev)) {
 				DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
-				intel_hdmi_init(dev, SDVOC);
+				intel_hdmi_init(dev, SDVOC, PORT_C);
 			}
 			if (SUPPORTS_INTEGRATED_DP(dev)) {
 				DRM_DEBUG_KMS("probing DP_C\n");
-				intel_dp_init(dev, DP_C);
+				intel_dp_init(dev, DP_C, PORT_C);
 			}
 		}
 
 		if (SUPPORTS_INTEGRATED_DP(dev) &&
 		    (I915_READ(DP_D) & DP_DETECTED)) {
 			DRM_DEBUG_KMS("probing DP_D\n");
-			intel_dp_init(dev, DP_D);
+			intel_dp_init(dev, DP_D, PORT_D);
 		}
 	} else if (IS_GEN2(dev))
 		intel_dvo_init(dev);
@@ -6864,7 +6782,7 @@
 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
 		encoder->base.possible_crtcs = encoder->crtc_mask;
 		encoder->base.possible_clones =
-			intel_encoder_clones(dev, encoder->clone_mask);
+			intel_encoder_clones(encoder);
 	}
 
 	/* disable all the possible outputs/crtcs before entering KMS mode */
@@ -7229,10 +7147,6 @@
 	/* Just disable it once at startup */
 	i915_disable_vga(dev);
 	intel_setup_outputs(dev);
-
-	INIT_WORK(&dev_priv->idle_work, intel_idle_update);
-	setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
-		    (unsigned long)dev);
 }
 
 void intel_modeset_gem_init(struct drm_device *dev)
@@ -7278,19 +7192,11 @@
 	 * enqueue unpin/hotplug work. */
 	drm_irq_uninstall(dev);
 	cancel_work_sync(&dev_priv->hotplug_work);
-	cancel_work_sync(&dev_priv->rps_work);
+	cancel_work_sync(&dev_priv->rps.work);
 
 	/* flush any delayed tasks or pending work */
 	flush_scheduled_work();
 
-	/* Shut off idle work before the crtcs get freed. */
-	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-		intel_crtc = to_intel_crtc(crtc);
-		del_timer_sync(&intel_crtc->idle_timer);
-	}
-	del_timer_sync(&dev_priv->idle_timer);
-	cancel_work_sync(&dev_priv->idle_work);
-
 	drm_mode_config_cleanup(dev);
 }
 
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index a6c426a..d14b1e3 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -36,42 +36,10 @@
 #include "intel_drv.h"
 #include "i915_drm.h"
 #include "i915_drv.h"
-#include "drm_dp_helper.h"
 
-#define DP_RECEIVER_CAP_SIZE	0xf
 #define DP_LINK_STATUS_SIZE	6
 #define DP_LINK_CHECK_TIMEOUT	(10 * 1000)
 
-#define DP_LINK_CONFIGURATION_SIZE	9
-
-struct intel_dp {
-	struct intel_encoder base;
-	uint32_t output_reg;
-	uint32_t DP;
-	uint8_t  link_configuration[DP_LINK_CONFIGURATION_SIZE];
-	bool has_audio;
-	enum hdmi_force_audio force_audio;
-	uint32_t color_range;
-	int dpms_mode;
-	uint8_t link_bw;
-	uint8_t lane_count;
-	uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
-	struct i2c_adapter adapter;
-	struct i2c_algo_dp_aux_data algo;
-	bool is_pch_edp;
-	uint8_t	train_set[4];
-	int panel_power_up_delay;
-	int panel_power_down_delay;
-	int panel_power_cycle_delay;
-	int backlight_on_delay;
-	int backlight_off_delay;
-	struct drm_display_mode *panel_fixed_mode;  /* for eDP */
-	struct delayed_work panel_vdd_work;
-	bool want_panel_vdd;
-	struct edid *edid; /* cached EDID for eDP */
-	int edid_mode_count;
-};
-
 /**
  * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
  * @intel_dp: DP struct
@@ -1668,6 +1636,45 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int ret;
 
+	if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
+		dp_reg_value &= ~DP_LINK_TRAIN_MASK_CPT;
+
+		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
+		case DP_TRAINING_PATTERN_DISABLE:
+			dp_reg_value |= DP_LINK_TRAIN_OFF_CPT;
+			break;
+		case DP_TRAINING_PATTERN_1:
+			dp_reg_value |= DP_LINK_TRAIN_PAT_1_CPT;
+			break;
+		case DP_TRAINING_PATTERN_2:
+			dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT;
+			break;
+		case DP_TRAINING_PATTERN_3:
+			DRM_ERROR("DP training pattern 3 not supported\n");
+			dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT;
+			break;
+		}
+
+	} else {
+		dp_reg_value &= ~DP_LINK_TRAIN_MASK;
+
+		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
+		case DP_TRAINING_PATTERN_DISABLE:
+			dp_reg_value |= DP_LINK_TRAIN_OFF;
+			break;
+		case DP_TRAINING_PATTERN_1:
+			dp_reg_value |= DP_LINK_TRAIN_PAT_1;
+			break;
+		case DP_TRAINING_PATTERN_2:
+			dp_reg_value |= DP_LINK_TRAIN_PAT_2;
+			break;
+		case DP_TRAINING_PATTERN_3:
+			DRM_ERROR("DP training pattern 3 not supported\n");
+			dp_reg_value |= DP_LINK_TRAIN_PAT_2;
+			break;
+		}
+	}
+
 	I915_WRITE(intel_dp->output_reg, dp_reg_value);
 	POSTING_READ(intel_dp->output_reg);
 
@@ -1675,12 +1682,15 @@
 				    DP_TRAINING_PATTERN_SET,
 				    dp_train_pat);
 
-	ret = intel_dp_aux_native_write(intel_dp,
-					DP_TRAINING_LANE0_SET,
-					intel_dp->train_set,
-					intel_dp->lane_count);
-	if (ret != intel_dp->lane_count)
-		return false;
+	if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) !=
+	    DP_TRAINING_PATTERN_DISABLE) {
+		ret = intel_dp_aux_native_write(intel_dp,
+						DP_TRAINING_LANE0_SET,
+						intel_dp->train_set,
+						intel_dp->lane_count);
+		if (ret != intel_dp->lane_count)
+			return false;
+	}
 
 	return true;
 }
@@ -1696,7 +1706,6 @@
 	uint8_t voltage;
 	bool clock_recovery = false;
 	int voltage_tries, loop_tries;
-	u32 reg;
 	uint32_t DP = intel_dp->DP;
 
 	/*
@@ -1717,10 +1726,6 @@
 
 	DP |= DP_PORT_EN;
 
-	if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
-		DP &= ~DP_LINK_TRAIN_MASK_CPT;
-	else
-		DP &= ~DP_LINK_TRAIN_MASK;
 	memset(intel_dp->train_set, 0, 4);
 	voltage = 0xff;
 	voltage_tries = 0;
@@ -1744,12 +1749,7 @@
 			DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
 		}
 
-		if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
-			reg = DP | DP_LINK_TRAIN_PAT_1_CPT;
-		else
-			reg = DP | DP_LINK_TRAIN_PAT_1;
-
-		if (!intel_dp_set_link_train(intel_dp, reg,
+		if (!intel_dp_set_link_train(intel_dp, DP,
 					     DP_TRAINING_PATTERN_1 |
 					     DP_LINK_SCRAMBLING_DISABLE))
 			break;
@@ -1804,10 +1804,8 @@
 intel_dp_complete_link_train(struct intel_dp *intel_dp)
 {
 	struct drm_device *dev = intel_dp->base.base.dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	bool channel_eq = false;
 	int tries, cr_tries;
-	u32 reg;
 	uint32_t DP = intel_dp->DP;
 
 	/* channel equalization */
@@ -1836,13 +1834,8 @@
 			DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
 		}
 
-		if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
-			reg = DP | DP_LINK_TRAIN_PAT_2_CPT;
-		else
-			reg = DP | DP_LINK_TRAIN_PAT_2;
-
 		/* channel eq pattern */
-		if (!intel_dp_set_link_train(intel_dp, reg,
+		if (!intel_dp_set_link_train(intel_dp, DP,
 					     DP_TRAINING_PATTERN_2 |
 					     DP_LINK_SCRAMBLING_DISABLE))
 			break;
@@ -1877,15 +1870,7 @@
 		++tries;
 	}
 
-	if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
-		reg = DP | DP_LINK_TRAIN_OFF_CPT;
-	else
-		reg = DP | DP_LINK_TRAIN_OFF;
-
-	I915_WRITE(intel_dp->output_reg, reg);
-	POSTING_READ(intel_dp->output_reg);
-	intel_dp_aux_native_write_1(intel_dp,
-				    DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE);
+	intel_dp_set_link_train(intel_dp, DP, DP_TRAINING_PATTERN_DISABLE);
 }
 
 static void
@@ -2160,7 +2145,6 @@
 		ret = drm_add_edid_modes(connector, intel_dp->edid);
 		drm_edid_to_eld(connector,
 				intel_dp->edid);
-		connector->display_info.raw_edid = NULL;
 		return intel_dp->edid_mode_count;
 	}
 
@@ -2206,7 +2190,6 @@
 		edid = intel_dp_get_edid(connector, &intel_dp->adapter);
 		if (edid) {
 			intel_dp->has_audio = drm_detect_monitor_audio(edid);
-			connector->display_info.raw_edid = NULL;
 			kfree(edid);
 		}
 	}
@@ -2271,8 +2254,6 @@
 	edid = intel_dp_get_edid(connector, &intel_dp->adapter);
 	if (edid) {
 		has_audio = drm_detect_monitor_audio(edid);
-
-		connector->display_info.raw_edid = NULL;
 		kfree(edid);
 	}
 
@@ -2441,7 +2422,7 @@
 }
 
 void
-intel_dp_init(struct drm_device *dev, int output_reg)
+intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_connector *connector;
@@ -2456,6 +2437,7 @@
 		return;
 
 	intel_dp->output_reg = output_reg;
+	intel_dp->port = port;
 	intel_dp->dpms_mode = -1;
 
 	intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
@@ -2483,18 +2465,10 @@
 
 	connector->polled = DRM_CONNECTOR_POLL_HPD;
 
-	if (output_reg == DP_B || output_reg == PCH_DP_B)
-		intel_encoder->clone_mask = (1 << INTEL_DP_B_CLONE_BIT);
-	else if (output_reg == DP_C || output_reg == PCH_DP_C)
-		intel_encoder->clone_mask = (1 << INTEL_DP_C_CLONE_BIT);
-	else if (output_reg == DP_D || output_reg == PCH_DP_D)
-		intel_encoder->clone_mask = (1 << INTEL_DP_D_CLONE_BIT);
+	intel_encoder->cloneable = false;
 
-	if (is_edp(intel_dp)) {
-		intel_encoder->clone_mask = (1 << INTEL_EDP_CLONE_BIT);
-		INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
-				  ironlake_panel_vdd_work);
-	}
+	INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
+			  ironlake_panel_vdd_work);
 
 	intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
 
@@ -2509,28 +2483,25 @@
 	drm_sysfs_connector_add(connector);
 
 	/* Set up the DDC bus. */
-	switch (output_reg) {
-		case DP_A:
-			name = "DPDDC-A";
-			break;
-		case DP_B:
-		case PCH_DP_B:
-			dev_priv->hotplug_supported_mask |=
-				DPB_HOTPLUG_INT_STATUS;
-			name = "DPDDC-B";
-			break;
-		case DP_C:
-		case PCH_DP_C:
-			dev_priv->hotplug_supported_mask |=
-				DPC_HOTPLUG_INT_STATUS;
-			name = "DPDDC-C";
-			break;
-		case DP_D:
-		case PCH_DP_D:
-			dev_priv->hotplug_supported_mask |=
-				DPD_HOTPLUG_INT_STATUS;
-			name = "DPDDC-D";
-			break;
+	switch (port) {
+	case PORT_A:
+		name = "DPDDC-A";
+		break;
+	case PORT_B:
+		dev_priv->hotplug_supported_mask |= DPB_HOTPLUG_INT_STATUS;
+		name = "DPDDC-B";
+		break;
+	case PORT_C:
+		dev_priv->hotplug_supported_mask |= DPC_HOTPLUG_INT_STATUS;
+		name = "DPDDC-C";
+		break;
+	case PORT_D:
+		dev_priv->hotplug_supported_mask |= DPD_HOTPLUG_INT_STATUS;
+		name = "DPDDC-D";
+		break;
+	default:
+		WARN(1, "Invalid port %c\n", port_name(port));
+		break;
 	}
 
 	intel_dp_i2c_init(intel_dp, intel_connector, name);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index cd54cf8..e86b3a2 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -31,6 +31,7 @@
 #include "drm_crtc.h"
 #include "drm_crtc_helper.h"
 #include "drm_fb_helper.h"
+#include "drm_dp_helper.h"
 
 #define _wait_for(COND, MS, W) ({ \
 	unsigned long timeout__ = jiffies + msecs_to_jiffies(MS);	\
@@ -90,25 +91,6 @@
 #define INTEL_OUTPUT_DISPLAYPORT 7
 #define INTEL_OUTPUT_EDP 8
 
-/* Intel Pipe Clone Bit */
-#define INTEL_HDMIB_CLONE_BIT 1
-#define INTEL_HDMIC_CLONE_BIT 2
-#define INTEL_HDMID_CLONE_BIT 3
-#define INTEL_HDMIE_CLONE_BIT 4
-#define INTEL_HDMIF_CLONE_BIT 5
-#define INTEL_SDVO_NON_TV_CLONE_BIT 6
-#define INTEL_SDVO_TV_CLONE_BIT 7
-#define INTEL_SDVO_LVDS_CLONE_BIT 8
-#define INTEL_ANALOG_CLONE_BIT 9
-#define INTEL_TV_CLONE_BIT 10
-#define INTEL_DP_B_CLONE_BIT 11
-#define INTEL_DP_C_CLONE_BIT 12
-#define INTEL_DP_D_CLONE_BIT 13
-#define INTEL_LVDS_CLONE_BIT 14
-#define INTEL_DVO_TMDS_CLONE_BIT 15
-#define INTEL_DVO_LVDS_CLONE_BIT 16
-#define INTEL_EDP_CLONE_BIT 17
-
 #define INTEL_DVO_CHIP_NONE 0
 #define INTEL_DVO_CHIP_LVDS 1
 #define INTEL_DVO_CHIP_TMDS 2
@@ -153,9 +135,13 @@
 	struct drm_encoder base;
 	int type;
 	bool needs_tv_clock;
+	/*
+	 * Intel hw has only one MUX where encoders could be clone, hence a
+	 * simple flag is enough to compute the possible_clones mask.
+	 */
+	bool cloneable;
 	void (*hot_plug)(struct intel_encoder *);
 	int crtc_mask;
-	int clone_mask;
 };
 
 struct intel_connector {
@@ -171,8 +157,6 @@
 	int dpms_mode;
 	bool active; /* is the crtc on? independent of the dpms mode */
 	bool primary_disabled; /* is the crtc obscured by a plane? */
-	bool busy; /* is scanout buffer being updated frequently? */
-	struct timer_list idle_timer;
 	bool lowfreq_avail;
 	struct intel_overlay *overlay;
 	struct intel_unpin_work *unpin_work;
@@ -311,6 +295,38 @@
 			       struct drm_display_mode *adjusted_mode);
 };
 
+#define DP_RECEIVER_CAP_SIZE		0xf
+#define DP_LINK_CONFIGURATION_SIZE	9
+
+struct intel_dp {
+	struct intel_encoder base;
+	uint32_t output_reg;
+	uint32_t DP;
+	uint8_t  link_configuration[DP_LINK_CONFIGURATION_SIZE];
+	bool has_audio;
+	enum hdmi_force_audio force_audio;
+	enum port port;
+	uint32_t color_range;
+	int dpms_mode;
+	uint8_t link_bw;
+	uint8_t lane_count;
+	uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
+	struct i2c_adapter adapter;
+	struct i2c_algo_dp_aux_data algo;
+	bool is_pch_edp;
+	uint8_t train_set[4];
+	int panel_power_up_delay;
+	int panel_power_down_delay;
+	int panel_power_cycle_delay;
+	int backlight_on_delay;
+	int backlight_off_delay;
+	struct drm_display_mode *panel_fixed_mode;  /* for eDP */
+	struct delayed_work panel_vdd_work;
+	bool want_panel_vdd;
+	struct edid *edid; /* cached EDID for eDP */
+	int edid_mode_count;
+};
+
 static inline struct drm_crtc *
 intel_get_crtc_for_pipe(struct drm_device *dev, int pipe)
 {
@@ -350,17 +366,21 @@
 extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
 
 extern void intel_crt_init(struct drm_device *dev);
-extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg);
+extern void intel_hdmi_init(struct drm_device *dev,
+			    int sdvox_reg, enum port port);
 extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
 extern void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
 extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg,
 			    bool is_sdvob);
 extern void intel_dvo_init(struct drm_device *dev);
 extern void intel_tv_init(struct drm_device *dev);
-extern void intel_mark_busy(struct drm_device *dev,
-			    struct drm_i915_gem_object *obj);
+extern void intel_mark_busy(struct drm_device *dev);
+extern void intel_mark_idle(struct drm_device *dev);
+extern void intel_mark_fb_busy(struct drm_i915_gem_object *obj);
+extern void intel_mark_fb_idle(struct drm_i915_gem_object *obj);
 extern bool intel_lvds_init(struct drm_device *dev);
-extern void intel_dp_init(struct drm_device *dev, int dp_reg);
+extern void intel_dp_init(struct drm_device *dev, int output_reg,
+			  enum port port);
 void
 intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
 		 struct drm_display_mode *adjusted_mode);
@@ -373,8 +393,6 @@
 extern void intel_flush_display_plane(struct drm_i915_private *dev_priv,
 				      enum plane plane);
 
-void intel_sanitize_pm(struct drm_device *dev);
-
 /* intel_panel.c */
 extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
 				   struct drm_display_mode *adjusted_mode);
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 36c542e..03dfdff 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -37,6 +37,7 @@
 #define SIL164_ADDR	0x38
 #define CH7xxx_ADDR	0x76
 #define TFP410_ADDR	0x38
+#define NS2501_ADDR     0x38
 
 static const struct intel_dvo_device intel_dvo_devices[] = {
 	{
@@ -74,7 +75,14 @@
 		.slave_addr = 0x75,
 		.gpio = GMBUS_PORT_DPB,
 		.dev_ops = &ch7017_ops,
-	}
+	},
+	{
+	        .type = INTEL_DVO_CHIP_TMDS,
+		.name = "ns2501",
+		.dvo_reg = DVOC,
+		.slave_addr = NS2501_ADDR,
+		.dev_ops = &ns2501_ops,
+       }
 };
 
 struct intel_dvo {
@@ -396,17 +404,14 @@
 		intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
 		switch (dvo->type) {
 		case INTEL_DVO_CHIP_TMDS:
-			intel_encoder->clone_mask =
-				(1 << INTEL_DVO_TMDS_CLONE_BIT) |
-				(1 << INTEL_ANALOG_CLONE_BIT);
+			intel_encoder->cloneable = true;
 			drm_connector_init(dev, connector,
 					   &intel_dvo_connector_funcs,
 					   DRM_MODE_CONNECTOR_DVII);
 			encoder_type = DRM_MODE_ENCODER_TMDS;
 			break;
 		case INTEL_DVO_CHIP_LVDS:
-			intel_encoder->clone_mask =
-				(1 << INTEL_DVO_LVDS_CLONE_BIT);
+			intel_encoder->cloneable = false;
 			drm_connector_init(dev, connector,
 					   &intel_dvo_connector_funcs,
 					   DRM_MODE_CONNECTOR_LVDS);
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 98f6024..35a6ee7 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -737,7 +737,6 @@
 						drm_detect_hdmi_monitor(edid);
 			intel_hdmi->has_audio = drm_detect_monitor_audio(edid);
 		}
-		connector->display_info.raw_edid = NULL;
 		kfree(edid);
 	}
 
@@ -778,8 +777,6 @@
 	if (edid) {
 		if (edid->input & DRM_EDID_INPUT_DIGITAL)
 			has_audio = drm_detect_monitor_audio(edid);
-
-		connector->display_info.raw_edid = NULL;
 		kfree(edid);
 	}
 
@@ -889,7 +886,7 @@
 	intel_attach_broadcast_rgb_property(connector);
 }
 
-void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
+void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_connector *connector;
@@ -923,48 +920,25 @@
 	connector->doublescan_allowed = 0;
 	intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
 
-	/* Set up the DDC bus. */
-	if (sdvox_reg == SDVOB) {
-		intel_encoder->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT);
+	intel_encoder->cloneable = false;
+
+	intel_hdmi->ddi_port = port;
+	switch (port) {
+	case PORT_B:
 		intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
 		dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
-	} else if (sdvox_reg == SDVOC) {
-		intel_encoder->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT);
+		break;
+	case PORT_C:
 		intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
 		dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
-	} else if (sdvox_reg == HDMIB) {
-		intel_encoder->clone_mask = (1 << INTEL_HDMID_CLONE_BIT);
-		intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
-		dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
-	} else if (sdvox_reg == HDMIC) {
-		intel_encoder->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT);
-		intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
-		dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
-	} else if (sdvox_reg == HDMID) {
-		intel_encoder->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT);
+		break;
+	case PORT_D:
 		intel_hdmi->ddc_bus = GMBUS_PORT_DPD;
 		dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS;
-	} else if (sdvox_reg == DDI_BUF_CTL(PORT_B)) {
-		DRM_DEBUG_DRIVER("LPT: detected output on DDI B\n");
-		intel_encoder->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT);
-		intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
-		intel_hdmi->ddi_port = PORT_B;
-		dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
-	} else if (sdvox_reg == DDI_BUF_CTL(PORT_C)) {
-		DRM_DEBUG_DRIVER("LPT: detected output on DDI C\n");
-		intel_encoder->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT);
-		intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
-		intel_hdmi->ddi_port = PORT_C;
-		dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
-	} else if (sdvox_reg == DDI_BUF_CTL(PORT_D)) {
-		DRM_DEBUG_DRIVER("LPT: detected output on DDI D\n");
-		intel_encoder->clone_mask = (1 << INTEL_HDMID_CLONE_BIT);
-		intel_hdmi->ddc_bus = GMBUS_PORT_DPD;
-		intel_hdmi->ddi_port = PORT_D;
-		dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS;
-	} else {
-		/* If we got an unknown sdvox_reg, things are pretty much broken
-		 * in a way that we should let the kernel know about it */
+		break;
+	case PORT_A:
+		/* Internal port only for eDP. */
+	default:
 		BUG();
 	}
 
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index e05c0d3..d789fdad 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -967,7 +967,7 @@
 	intel_connector_attach_encoder(intel_connector, intel_encoder);
 	intel_encoder->type = INTEL_OUTPUT_LVDS;
 
-	intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT);
+	intel_encoder->cloneable = false;
 	if (HAS_PCH_SPLIT(dev))
 		intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
 	else if (IS_GEN4(dev))
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index 29b7259..4bc1c0f 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -45,7 +45,6 @@
 	drm_mode_connector_update_edid_property(connector, edid);
 	ret = drm_add_edid_modes(connector, edid);
 	drm_edid_to_eld(connector, edid);
-	connector->display_info.raw_edid = NULL;
 	kfree(edid);
 
 	return ret;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 1881c8c..9b05f78 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2160,11 +2160,28 @@
 	return NULL;
 }
 
+/**
+ * Lock protecting IPS related data structures
+ *   - i915_mch_dev
+ *   - dev_priv->max_delay
+ *   - dev_priv->min_delay
+ *   - dev_priv->fmax
+ *   - dev_priv->gpu_busy
+ *   - dev_priv->gfx_power
+ */
+DEFINE_SPINLOCK(mchdev_lock);
+
+/* Global for IPS driver to get at the current i915 device. Protected by
+ * mchdev_lock. */
+static struct drm_i915_private *i915_mch_dev;
+
 bool ironlake_set_drps(struct drm_device *dev, u8 val)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u16 rgvswctl;
 
+	assert_spin_locked(&mchdev_lock);
+
 	rgvswctl = I915_READ16(MEMSWCTL);
 	if (rgvswctl & MEMCTL_CMD_STS) {
 		DRM_DEBUG("gpu busy, RCS change rejected\n");
@@ -2188,6 +2205,8 @@
 	u32 rgvmodectl = I915_READ(MEMMODECTL);
 	u8 fmax, fmin, fstart, vstart;
 
+	spin_lock_irq(&mchdev_lock);
+
 	/* Enable temp reporting */
 	I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
 	I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
@@ -2233,9 +2252,9 @@
 	rgvmodectl |= MEMMODE_SWMODE_EN;
 	I915_WRITE(MEMMODECTL, rgvmodectl);
 
-	if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
+	if (wait_for_atomic((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
 		DRM_ERROR("stuck trying to change perf mode\n");
-	msleep(1);
+	mdelay(1);
 
 	ironlake_set_drps(dev, fstart);
 
@@ -2244,12 +2263,18 @@
 	dev_priv->last_time1 = jiffies_to_msecs(jiffies);
 	dev_priv->last_count2 = I915_READ(0x112f4);
 	getrawmonotonic(&dev_priv->last_time2);
+
+	spin_unlock_irq(&mchdev_lock);
 }
 
 static void ironlake_disable_drps(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	u16 rgvswctl = I915_READ16(MEMSWCTL);
+	u16 rgvswctl;
+
+	spin_lock_irq(&mchdev_lock);
+
+	rgvswctl = I915_READ16(MEMSWCTL);
 
 	/* Ack interrupts, disable EFC interrupt */
 	I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
@@ -2260,30 +2285,51 @@
 
 	/* Go back to the starting frequency */
 	ironlake_set_drps(dev, dev_priv->fstart);
-	msleep(1);
+	mdelay(1);
 	rgvswctl |= MEMCTL_CMD_STS;
 	I915_WRITE(MEMSWCTL, rgvswctl);
-	msleep(1);
+	mdelay(1);
 
+	spin_unlock_irq(&mchdev_lock);
+}
+
+/* There's a funny hw issue where the hw returns all 0 when reading from
+ * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value
+ * ourselves, instead of doing a rmw cycle (which might result in us clearing
+ * all limits and the gpu stuck at whatever frequency it is at atm).
+ */
+static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 *val)
+{
+	u32 limits;
+
+	limits = 0;
+
+	if (*val >= dev_priv->rps.max_delay)
+		*val = dev_priv->rps.max_delay;
+	limits |= dev_priv->rps.max_delay << 24;
+
+	/* Only set the down limit when we've reached the lowest level to avoid
+	 * getting more interrupts, otherwise leave this clear. This prevents a
+	 * race in the hw when coming out of rc6: There's a tiny window where
+	 * the hw runs at the minimal clock before selecting the desired
+	 * frequency, if the down threshold expires in that window we will not
+	 * receive a down interrupt. */
+	if (*val <= dev_priv->rps.min_delay) {
+		*val = dev_priv->rps.min_delay;
+		limits |= dev_priv->rps.min_delay << 16;
+	}
+
+	return limits;
 }
 
 void gen6_set_rps(struct drm_device *dev, u8 val)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	u32 limits;
+	u32 limits = gen6_rps_limits(dev_priv, &val);
 
-	limits = 0;
-	if (val >= dev_priv->max_delay)
-		val = dev_priv->max_delay;
-	else
-		limits |= dev_priv->max_delay << 24;
+	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 
-	if (val <= dev_priv->min_delay)
-		val = dev_priv->min_delay;
-	else
-		limits |= dev_priv->min_delay << 16;
-
-	if (val == dev_priv->cur_delay)
+	if (val == dev_priv->rps.cur_delay)
 		return;
 
 	I915_WRITE(GEN6_RPNSWREQ,
@@ -2296,7 +2342,7 @@
 	 */
 	I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits);
 
-	dev_priv->cur_delay = val;
+	dev_priv->rps.cur_delay = val;
 }
 
 static void gen6_disable_rps(struct drm_device *dev)
@@ -2312,40 +2358,35 @@
 	 * register (PMIMR) to mask PM interrupts. The only risk is in leaving
 	 * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
 
-	spin_lock_irq(&dev_priv->rps_lock);
-	dev_priv->pm_iir = 0;
-	spin_unlock_irq(&dev_priv->rps_lock);
+	spin_lock_irq(&dev_priv->rps.lock);
+	dev_priv->rps.pm_iir = 0;
+	spin_unlock_irq(&dev_priv->rps.lock);
 
 	I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
 }
 
 int intel_enable_rc6(const struct drm_device *dev)
 {
-	/*
-	 * Respect the kernel parameter if it is set
-	 */
+	/* Respect the kernel parameter if it is set */
 	if (i915_enable_rc6 >= 0)
 		return i915_enable_rc6;
 
-	/*
-	 * Disable RC6 on Ironlake
-	 */
-	if (INTEL_INFO(dev)->gen == 5)
-		return 0;
-
-	/* On Haswell, only RC6 is available. So let's enable it by default to
-	 * provide better testing and coverage since the beginning.
-	 */
-	if (IS_HASWELL(dev))
+	if (INTEL_INFO(dev)->gen == 5) {
+		DRM_DEBUG_DRIVER("Ironlake: only RC6 available\n");
 		return INTEL_RC6_ENABLE;
+	}
 
-	/*
-	 * Disable rc6 on Sandybridge
-	 */
+	if (IS_HASWELL(dev)) {
+		DRM_DEBUG_DRIVER("Haswell: only RC6 available\n");
+		return INTEL_RC6_ENABLE;
+	}
+
+	/* snb/ivb have more than one rc6 state. */
 	if (INTEL_INFO(dev)->gen == 6) {
 		DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
 		return INTEL_RC6_ENABLE;
 	}
+
 	DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n");
 	return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
 }
@@ -2383,9 +2424,9 @@
 	gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
 
 	/* In units of 100MHz */
-	dev_priv->max_delay = rp_state_cap & 0xff;
-	dev_priv->min_delay = (rp_state_cap & 0xff0000) >> 16;
-	dev_priv->cur_delay = 0;
+	dev_priv->rps.max_delay = rp_state_cap & 0xff;
+	dev_priv->rps.min_delay = (rp_state_cap & 0xff0000) >> 16;
+	dev_priv->rps.cur_delay = 0;
 
 	/* disable the counters and set deterministic thresholds */
 	I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -2438,8 +2479,8 @@
 
 	I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
 	I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
-		   dev_priv->max_delay << 24 |
-		   dev_priv->min_delay << 16);
+		   dev_priv->rps.max_delay << 24 |
+		   dev_priv->rps.min_delay << 16);
 
 	I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
 	I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
@@ -2477,7 +2518,7 @@
 		     500))
 		DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
 	if (pcu_mbox & (1<<31)) { /* OC supported */
-		dev_priv->max_delay = pcu_mbox & 0xff;
+		dev_priv->rps.max_delay = pcu_mbox & 0xff;
 		DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
 	}
 
@@ -2485,10 +2526,10 @@
 
 	/* requires MSI enabled */
 	I915_WRITE(GEN6_PMIER, GEN6_PM_DEFERRED_EVENTS);
-	spin_lock_irq(&dev_priv->rps_lock);
-	WARN_ON(dev_priv->pm_iir != 0);
+	spin_lock_irq(&dev_priv->rps.lock);
+	WARN_ON(dev_priv->rps.pm_iir != 0);
 	I915_WRITE(GEN6_PMIMR, 0);
-	spin_unlock_irq(&dev_priv->rps_lock);
+	spin_unlock_irq(&dev_priv->rps.lock);
 	/* enable all PM interrupts */
 	I915_WRITE(GEN6_PMINTRMSK, 0);
 
@@ -2520,9 +2561,9 @@
 	 * to use for memory access.  We do this by specifying the IA frequency
 	 * the PCU should use as a reference to determine the ring frequency.
 	 */
-	for (gpu_freq = dev_priv->max_delay; gpu_freq >= dev_priv->min_delay;
+	for (gpu_freq = dev_priv->rps.max_delay; gpu_freq >= dev_priv->rps.min_delay;
 	     gpu_freq--) {
-		int diff = dev_priv->max_delay - gpu_freq;
+		int diff = dev_priv->rps.max_delay - gpu_freq;
 
 		/*
 		 * For GPU frequencies less than 750MHz, just use the lowest
@@ -2693,6 +2734,8 @@
 	unsigned long now = jiffies_to_msecs(jiffies), diff1;
 	int i;
 
+	assert_spin_locked(&mchdev_lock);
+
 	diff1 = now - dev_priv->last_time1;
 
 	/* Prevent division-by-zero if we are asking too fast.
@@ -2894,15 +2937,14 @@
 		return v_table[pxvid].vd;
 }
 
-void i915_update_gfx_val(struct drm_i915_private *dev_priv)
+static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
 {
 	struct timespec now, diff1;
 	u64 diff;
 	unsigned long diffms;
 	u32 count;
 
-	if (dev_priv->info->gen != 5)
-		return;
+	assert_spin_locked(&mchdev_lock);
 
 	getrawmonotonic(&now);
 	diff1 = timespec_sub(now, dev_priv->last_time2);
@@ -2930,12 +2972,26 @@
 	dev_priv->gfx_power = diff;
 }
 
+void i915_update_gfx_val(struct drm_i915_private *dev_priv)
+{
+	if (dev_priv->info->gen != 5)
+		return;
+
+	spin_lock_irq(&mchdev_lock);
+
+	__i915_update_gfx_val(dev_priv);
+
+	spin_unlock_irq(&mchdev_lock);
+}
+
 unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
 {
 	unsigned long t, corr, state1, corr2, state2;
 	u32 pxvid, ext_v;
 
-	pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->cur_delay * 4));
+	assert_spin_locked(&mchdev_lock);
+
+	pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_delay * 4));
 	pxvid = (pxvid >> 24) & 0x7f;
 	ext_v = pvid_to_extvid(dev_priv, pxvid);
 
@@ -2960,23 +3016,11 @@
 	state2 = (corr2 * state1) / 10000;
 	state2 /= 100; /* convert to mW */
 
-	i915_update_gfx_val(dev_priv);
+	__i915_update_gfx_val(dev_priv);
 
 	return dev_priv->gfx_power + state2;
 }
 
-/* Global for IPS driver to get at the current i915 device */
-static struct drm_i915_private *i915_mch_dev;
-/*
- * Lock protecting IPS related data structures
- *   - i915_mch_dev
- *   - dev_priv->max_delay
- *   - dev_priv->min_delay
- *   - dev_priv->fmax
- *   - dev_priv->gpu_busy
- */
-static DEFINE_SPINLOCK(mchdev_lock);
-
 /**
  * i915_read_mch_val - return value for IPS use
  *
@@ -2988,7 +3032,7 @@
 	struct drm_i915_private *dev_priv;
 	unsigned long chipset_val, graphics_val, ret = 0;
 
-	spin_lock(&mchdev_lock);
+	spin_lock_irq(&mchdev_lock);
 	if (!i915_mch_dev)
 		goto out_unlock;
 	dev_priv = i915_mch_dev;
@@ -2999,7 +3043,7 @@
 	ret = chipset_val + graphics_val;
 
 out_unlock:
-	spin_unlock(&mchdev_lock);
+	spin_unlock_irq(&mchdev_lock);
 
 	return ret;
 }
@@ -3015,7 +3059,7 @@
 	struct drm_i915_private *dev_priv;
 	bool ret = true;
 
-	spin_lock(&mchdev_lock);
+	spin_lock_irq(&mchdev_lock);
 	if (!i915_mch_dev) {
 		ret = false;
 		goto out_unlock;
@@ -3026,7 +3070,7 @@
 		dev_priv->max_delay--;
 
 out_unlock:
-	spin_unlock(&mchdev_lock);
+	spin_unlock_irq(&mchdev_lock);
 
 	return ret;
 }
@@ -3043,7 +3087,7 @@
 	struct drm_i915_private *dev_priv;
 	bool ret = true;
 
-	spin_lock(&mchdev_lock);
+	spin_lock_irq(&mchdev_lock);
 	if (!i915_mch_dev) {
 		ret = false;
 		goto out_unlock;
@@ -3054,7 +3098,7 @@
 		dev_priv->max_delay++;
 
 out_unlock:
-	spin_unlock(&mchdev_lock);
+	spin_unlock_irq(&mchdev_lock);
 
 	return ret;
 }
@@ -3068,17 +3112,20 @@
 bool i915_gpu_busy(void)
 {
 	struct drm_i915_private *dev_priv;
+	struct intel_ring_buffer *ring;
 	bool ret = false;
+	int i;
 
-	spin_lock(&mchdev_lock);
+	spin_lock_irq(&mchdev_lock);
 	if (!i915_mch_dev)
 		goto out_unlock;
 	dev_priv = i915_mch_dev;
 
-	ret = dev_priv->busy;
+	for_each_ring(ring, dev_priv, i)
+		ret |= !list_empty(&ring->request_list);
 
 out_unlock:
-	spin_unlock(&mchdev_lock);
+	spin_unlock_irq(&mchdev_lock);
 
 	return ret;
 }
@@ -3095,7 +3142,7 @@
 	struct drm_i915_private *dev_priv;
 	bool ret = true;
 
-	spin_lock(&mchdev_lock);
+	spin_lock_irq(&mchdev_lock);
 	if (!i915_mch_dev) {
 		ret = false;
 		goto out_unlock;
@@ -3108,7 +3155,7 @@
 		ret = false;
 
 out_unlock:
-	spin_unlock(&mchdev_lock);
+	spin_unlock_irq(&mchdev_lock);
 
 	return ret;
 }
@@ -3136,19 +3183,20 @@
 
 void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
 {
-	spin_lock(&mchdev_lock);
+	/* We only register the i915 ips part with intel-ips once everything is
+	 * set up, to avoid intel-ips sneaking in and reading bogus values. */
+	spin_lock_irq(&mchdev_lock);
 	i915_mch_dev = dev_priv;
-	dev_priv->mchdev_lock = &mchdev_lock;
-	spin_unlock(&mchdev_lock);
+	spin_unlock_irq(&mchdev_lock);
 
 	ips_ping_for_i915_load();
 }
 
 void intel_gpu_ips_teardown(void)
 {
-	spin_lock(&mchdev_lock);
+	spin_lock_irq(&mchdev_lock);
 	i915_mch_dev = NULL;
-	spin_unlock(&mchdev_lock);
+	spin_unlock_irq(&mchdev_lock);
 }
 static void intel_init_emon(struct drm_device *dev)
 {
@@ -3728,42 +3776,6 @@
 		dev_priv->display.init_pch_clock_gating(dev);
 }
 
-static void gen6_sanitize_pm(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	u32 limits, delay, old;
-
-	gen6_gt_force_wake_get(dev_priv);
-
-	old = limits = I915_READ(GEN6_RP_INTERRUPT_LIMITS);
-	/* Make sure we continue to get interrupts
-	 * until we hit the minimum or maximum frequencies.
-	 */
-	limits &= ~(0x3f << 16 | 0x3f << 24);
-	delay = dev_priv->cur_delay;
-	if (delay < dev_priv->max_delay)
-		limits |= (dev_priv->max_delay & 0x3f) << 24;
-	if (delay > dev_priv->min_delay)
-		limits |= (dev_priv->min_delay & 0x3f) << 16;
-
-	if (old != limits) {
-		/* Note that the known failure case is to read back 0. */
-		DRM_DEBUG_DRIVER("Power management discrepancy: GEN6_RP_INTERRUPT_LIMITS "
-				 "expected %08x, was %08x\n", limits, old);
-		I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits);
-	}
-
-	gen6_gt_force_wake_put(dev_priv);
-}
-
-void intel_sanitize_pm(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
-	if (dev_priv->display.sanitize_pm)
-		dev_priv->display.sanitize_pm(dev);
-}
-
 /* Starting with Haswell, we have different power wells for
  * different parts of the GPU. This attempts to enable them all.
  */
@@ -3849,7 +3861,6 @@
 				dev_priv->display.update_wm = NULL;
 			}
 			dev_priv->display.init_clock_gating = gen6_init_clock_gating;
-			dev_priv->display.sanitize_pm = gen6_sanitize_pm;
 		} else if (IS_IVYBRIDGE(dev)) {
 			/* FIXME: detect B0+ stepping and use auto training */
 			if (SNB_READ_WM0_LATENCY()) {
@@ -3861,7 +3872,6 @@
 				dev_priv->display.update_wm = NULL;
 			}
 			dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
-			dev_priv->display.sanitize_pm = gen6_sanitize_pm;
 		} else if (IS_HASWELL(dev)) {
 			if (SNB_READ_WM0_LATENCY()) {
 				dev_priv->display.update_wm = sandybridge_update_wm;
@@ -3873,7 +3883,6 @@
 				dev_priv->display.update_wm = NULL;
 			}
 			dev_priv->display.init_clock_gating = haswell_init_clock_gating;
-			dev_priv->display.sanitize_pm = gen6_sanitize_pm;
 		} else
 			dev_priv->display.update_wm = NULL;
 	} else if (IS_VALLEYVIEW(dev)) {
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index e2a73b3..c828169 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -218,11 +218,6 @@
 	u32 scratch_addr = pc->gtt_offset + 128;
 	int ret;
 
-	/* Force SNB workarounds for PIPE_CONTROL flushes */
-	ret = intel_emit_post_sync_nonzero_flush(ring);
-	if (ret)
-		return ret;
-
 	/* Just flush everything.  Experiments have shown that reducing the
 	 * number of bits based on the write domains has little performance
 	 * impact.
@@ -262,6 +257,20 @@
 	return 0;
 }
 
+static int
+gen6_render_ring_flush__wa(struct intel_ring_buffer *ring,
+			   u32 invalidate_domains, u32 flush_domains)
+{
+	int ret;
+
+	/* Force SNB workarounds for PIPE_CONTROL flushes */
+	ret = intel_emit_post_sync_nonzero_flush(ring);
+	if (ret)
+		return ret;
+
+	return gen6_render_ring_flush(ring, invalidate_domains, flush_domains);
+}
+
 static void ring_write_tail(struct intel_ring_buffer *ring,
 			    u32 value)
 {
@@ -462,7 +471,7 @@
 	if (INTEL_INFO(dev)->gen >= 6)
 		I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
 
-	if (IS_IVYBRIDGE(dev))
+	if (HAS_L3_GPU_CACHE(dev))
 		I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR);
 
 	return ret;
@@ -628,26 +637,24 @@
 }
 
 static u32
-gen6_ring_get_seqno(struct intel_ring_buffer *ring)
+gen6_ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
 {
-	struct drm_device *dev = ring->dev;
-
 	/* Workaround to force correct ordering between irq and seqno writes on
 	 * ivb (and maybe also on snb) by reading from a CS register (like
 	 * ACTHD) before reading the status page. */
-	if (IS_GEN6(dev) || IS_GEN7(dev))
+	if (!lazy_coherency)
 		intel_ring_get_active_head(ring);
 	return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
 }
 
 static u32
-ring_get_seqno(struct intel_ring_buffer *ring)
+ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
 {
 	return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
 }
 
 static u32
-pc_render_get_seqno(struct intel_ring_buffer *ring)
+pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
 {
 	struct pipe_control *pc = ring->private;
 	return pc->cpu_page[0];
@@ -852,7 +859,7 @@
 
 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
 	if (ring->irq_refcount++ == 0) {
-		if (IS_IVYBRIDGE(dev) && ring->id == RCS)
+		if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
 			I915_WRITE_IMR(ring, ~(ring->irq_enable_mask |
 						GEN6_RENDER_L3_PARITY_ERROR));
 		else
@@ -875,7 +882,7 @@
 
 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
 	if (--ring->irq_refcount == 0) {
-		if (IS_IVYBRIDGE(dev) && ring->id == RCS)
+		if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
 			I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR);
 		else
 			I915_WRITE_IMR(ring, ~0);
@@ -1010,7 +1017,6 @@
 	ring->dev = dev;
 	INIT_LIST_HEAD(&ring->active_list);
 	INIT_LIST_HEAD(&ring->request_list);
-	INIT_LIST_HEAD(&ring->gpu_write_list);
 	ring->size = 32 * PAGE_SIZE;
 
 	init_waitqueue_head(&ring->irq_queue);
@@ -1380,6 +1386,8 @@
 	if (INTEL_INFO(dev)->gen >= 6) {
 		ring->add_request = gen6_add_request;
 		ring->flush = gen6_render_ring_flush;
+		if (INTEL_INFO(dev)->gen == 6)
+			ring->flush = gen6_render_ring_flush__wa;
 		ring->irq_get = gen6_ring_get_irq;
 		ring->irq_put = gen6_ring_put_irq;
 		ring->irq_enable_mask = GT_USER_INTERRUPT;
@@ -1481,7 +1489,6 @@
 	ring->dev = dev;
 	INIT_LIST_HEAD(&ring->active_list);
 	INIT_LIST_HEAD(&ring->request_list);
-	INIT_LIST_HEAD(&ring->gpu_write_list);
 
 	ring->size = size;
 	ring->effective_size = ring->size;
@@ -1574,3 +1581,41 @@
 
 	return intel_init_ring_buffer(dev, ring);
 }
+
+int
+intel_ring_flush_all_caches(struct intel_ring_buffer *ring)
+{
+	int ret;
+
+	if (!ring->gpu_caches_dirty)
+		return 0;
+
+	ret = ring->flush(ring, 0, I915_GEM_GPU_DOMAINS);
+	if (ret)
+		return ret;
+
+	trace_i915_gem_ring_flush(ring, 0, I915_GEM_GPU_DOMAINS);
+
+	ring->gpu_caches_dirty = false;
+	return 0;
+}
+
+int
+intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring)
+{
+	uint32_t flush_domains;
+	int ret;
+
+	flush_domains = 0;
+	if (ring->gpu_caches_dirty)
+		flush_domains = I915_GEM_GPU_DOMAINS;
+
+	ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
+	if (ret)
+		return ret;
+
+	trace_i915_gem_ring_flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
+
+	ring->gpu_caches_dirty = false;
+	return 0;
+}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 1d3c81f..2ea7a31 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -72,7 +72,14 @@
 				  u32	flush_domains);
 	int		(*add_request)(struct intel_ring_buffer *ring,
 				       u32 *seqno);
-	u32		(*get_seqno)(struct intel_ring_buffer *ring);
+	/* Some chipsets are not quite as coherent as advertised and need
+	 * an expensive kick to force a true read of the up-to-date seqno.
+	 * However, the up-to-date seqno is not always required and the last
+	 * seen value is good enough. Note that the seqno will always be
+	 * monotonic, even if not coherent.
+	 */
+	u32		(*get_seqno)(struct intel_ring_buffer *ring,
+				     bool lazy_coherency);
 	int		(*dispatch_execbuffer)(struct intel_ring_buffer *ring,
 					       u32 offset, u32 length);
 	void		(*cleanup)(struct intel_ring_buffer *ring);
@@ -101,15 +108,6 @@
 	struct list_head request_list;
 
 	/**
-	 * List of objects currently pending a GPU write flush.
-	 *
-	 * All elements on this list will belong to either the
-	 * active_list or flushing_list, last_rendering_seqno can
-	 * be used to differentiate between the two elements.
-	 */
-	struct list_head gpu_write_list;
-
-	/**
 	 * Do we have some not yet emitted requests outstanding?
 	 */
 	u32 outstanding_lazy_request;
@@ -204,6 +202,8 @@
 void intel_ring_advance(struct intel_ring_buffer *ring);
 
 u32 intel_ring_get_seqno(struct intel_ring_buffer *ring);
+int intel_ring_flush_all_caches(struct intel_ring_buffer *ring);
+int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring);
 
 int intel_init_render_ring_buffer(struct drm_device *dev);
 int intel_init_bsd_ring_buffer(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index d81bb0b..434b1d1 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1345,7 +1345,6 @@
 			}
 		} else
 			status = connector_status_disconnected;
-		connector->display_info.raw_edid = NULL;
 		kfree(edid);
 	}
 
@@ -1419,7 +1418,6 @@
 			else
 				ret = connector_status_disconnected;
 
-			connector->display_info.raw_edid = NULL;
 			kfree(edid);
 		} else
 			ret = connector_status_connected;
@@ -1465,7 +1463,6 @@
 			drm_add_edid_modes(connector, edid);
 		}
 
-		connector->display_info.raw_edid = NULL;
 		kfree(edid);
 	}
 }
@@ -2082,8 +2079,7 @@
 		connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
 		intel_sdvo->is_hdmi = true;
 	}
-	intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
-				       (1 << INTEL_ANALOG_CLONE_BIT));
+	intel_sdvo->base.cloneable = true;
 
 	intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
 	if (intel_sdvo->is_hdmi)
@@ -2114,7 +2110,7 @@
 
 	intel_sdvo->is_tv = true;
 	intel_sdvo->base.needs_tv_clock = true;
-	intel_sdvo->base.clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
+	intel_sdvo->base.cloneable = false;
 
 	intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
 
@@ -2157,8 +2153,7 @@
 		intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
 	}
 
-	intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
-				       (1 << INTEL_ANALOG_CLONE_BIT));
+	intel_sdvo->base.cloneable = true;
 
 	intel_sdvo_connector_init(intel_sdvo_connector,
 				  intel_sdvo);
@@ -2190,8 +2185,10 @@
 		intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
 	}
 
-	intel_sdvo->base.clone_mask = ((1 << INTEL_ANALOG_CLONE_BIT) |
-				       (1 << INTEL_SDVO_LVDS_CLONE_BIT));
+	/* SDVO LVDS is cloneable because the SDVO encoder does the upscaling,
+	 * as opposed to native LVDS, where we upscale with the panel-fitter
+	 * (and hence only the native LVDS resolution could be cloned). */
+	intel_sdvo->base.cloneable = true;
 
 	intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
 	if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index befce6c..1a0bab0 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1622,7 +1622,7 @@
 	intel_connector_attach_encoder(intel_connector, intel_encoder);
 	intel_encoder->type = INTEL_OUTPUT_TVOUT;
 	intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
-	intel_encoder->clone_mask = (1 << INTEL_TV_CLONE_BIT);
+	intel_encoder->cloneable = false;
 	intel_encoder->base.possible_crtcs = ((1 << 0) | (1 << 1));
 	intel_encoder->base.possible_clones = (1 << INTEL_OUTPUT_TVOUT);
 	intel_tv->type = DRM_MODE_CONNECTOR_Unknown;
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index 6f13b35..d22cbbf 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -195,7 +195,6 @@
 		struct drm_global_reference mem_global_ref;
 		struct ttm_bo_global_ref bo_global_ref;
 		struct ttm_bo_device bdev;
-		atomic_t validate_sequence;
 	} ttm;
 
 	u32 reg_1e24; /* SE model number */
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index b69642d..c7420e8 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -1399,7 +1399,6 @@
 	if (edid) {
 		drm_mode_connector_update_edid_property(connector, edid);
 		ret = drm_add_edid_modes(connector, edid);
-		connector->display_info.raw_edid = NULL;
 		kfree(edid);
 	}
 	return ret;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index f8187ea..0df71ea 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -472,7 +472,7 @@
 	else
 		tmp = pgprot_noncached(tmp);
 #endif
-#if defined(__sparc__)
+#if defined(__sparc__) || defined(__mips__)
 	if (!(caching_flags & TTM_PL_FLAG_CACHED))
 		tmp = pgprot_noncached(tmp);
 #endif
diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c
index ba055e9..2d98ff9 100644
--- a/drivers/gpu/drm/udl/udl_connector.c
+++ b/drivers/gpu/drm/udl/udl_connector.c
@@ -57,11 +57,8 @@
 
 	edid = (struct edid *)udl_get_edid(udl);
 
-	connector->display_info.raw_edid = (char *)edid;
-
 	drm_mode_connector_update_edid_property(connector, edid);
 	ret = drm_add_edid_modes(connector, edid);
-	connector->display_info.raw_edid = NULL;
 	kfree(edid);
 	return ret;
 }
diff --git a/drivers/staging/omapdrm/omap_connector.c b/drivers/staging/omapdrm/omap_connector.c
index 5e2856c..9c2287b 100644
--- a/drivers/staging/omapdrm/omap_connector.c
+++ b/drivers/staging/omapdrm/omap_connector.c
@@ -177,14 +177,11 @@
 			drm_mode_connector_update_edid_property(
 					connector, edid);
 			n = drm_add_edid_modes(connector, edid);
-			kfree(connector->display_info.raw_edid);
-			connector->display_info.raw_edid = edid;
 		} else {
 			drm_mode_connector_update_edid_property(
 					connector, NULL);
-			connector->display_info.raw_edid = NULL;
-			kfree(edid);
 		}
+		kfree(edid);
 	} else {
 		struct drm_display_mode *mode = drm_mode_create(dev);
 		struct omap_video_timings timings;
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index ced3625..617d87a 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -214,8 +214,6 @@
 	u32 color_formats;
 
 	u8 cea_rev;
-
-	char *raw_edid; /* if any */
 };
 
 struct drm_framebuffer_funcs {
diff --git a/include/drm/drm_sarea.h b/include/drm/drm_sarea.h
index ee5389d..1d1a858 100644
--- a/include/drm/drm_sarea.h
+++ b/include/drm/drm_sarea.h
@@ -37,6 +37,8 @@
 /* SAREA area needs to be at least a page */
 #if defined(__alpha__)
 #define SAREA_MAX                       0x2000U
+#elif defined(__mips__)
+#define SAREA_MAX                       0x4000U
 #elif defined(__ia64__)
 #define SAREA_MAX                       0x10000U	/* 64kB */
 #else
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index 8cc7083..d8a79bf 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -203,6 +203,9 @@
 #define DRM_I915_GEM_WAIT	0x2c
 #define DRM_I915_GEM_CONTEXT_CREATE	0x2d
 #define DRM_I915_GEM_CONTEXT_DESTROY	0x2e
+#define DRM_I915_GEM_SET_CACHEING	0x2f
+#define DRM_I915_GEM_GET_CACHEING	0x30
+#define DRM_I915_REG_READ		0x31
 
 #define DRM_IOCTL_I915_INIT		DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
 #define DRM_IOCTL_I915_FLUSH		DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
@@ -227,6 +230,8 @@
 #define DRM_IOCTL_I915_GEM_PIN		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin)
 #define DRM_IOCTL_I915_GEM_UNPIN	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin)
 #define DRM_IOCTL_I915_GEM_BUSY		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy)
+#define DRM_IOCTL_I915_GEM_SET_CACHEING		DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_SET_CACHEING, struct drm_i915_gem_cacheing)
+#define DRM_IOCTL_I915_GEM_GET_CACHEING		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_GET_CACHEING, struct drm_i915_gem_cacheing)
 #define DRM_IOCTL_I915_GEM_THROTTLE	DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE)
 #define DRM_IOCTL_I915_GEM_ENTERVT	DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT)
 #define DRM_IOCTL_I915_GEM_LEAVEVT	DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT)
@@ -249,6 +254,7 @@
 #define DRM_IOCTL_I915_GEM_WAIT		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait)
 #define DRM_IOCTL_I915_GEM_CONTEXT_CREATE	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create)
 #define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy)
+#define DRM_IOCTL_I915_REG_READ			DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read)
 
 /* Allow drivers to submit batchbuffers directly to hardware, relying
  * on the security mechanisms provided by hardware.
@@ -305,6 +311,7 @@
 #define I915_PARAM_HAS_LLC     	 	 17
 #define I915_PARAM_HAS_ALIASING_PPGTT	 18
 #define I915_PARAM_HAS_WAIT_TIMEOUT	 19
+#define I915_PARAM_HAS_SEMAPHORES	 20
 
 typedef struct drm_i915_getparam {
 	int param;
@@ -698,10 +705,31 @@
 	/** Handle of the buffer to check for busy */
 	__u32 handle;
 
-	/** Return busy status (1 if busy, 0 if idle) */
+	/** Return busy status (1 if busy, 0 if idle).
+	 * The high word is used to indicate on which rings the object
+	 * currently resides:
+	 *  16:31 - busy (r or r/w) rings (16 render, 17 bsd, 18 blt, etc)
+	 */
 	__u32 busy;
 };
 
+#define I915_CACHEING_NONE		0
+#define I915_CACHEING_CACHED		1
+
+struct drm_i915_gem_cacheing {
+	/**
+	 * Handle of the buffer to set/get the cacheing level of. */
+	__u32 handle;
+
+	/**
+	 * Cacheing level to apply or return value
+	 *
+	 * bits0-15 are for generic cacheing control (i.e. the above defined
+	 * values). bits16-31 are reserved for platform-specific variations
+	 * (e.g. l3$ caching on gen7). */
+	__u32 cacheing;
+};
+
 #define I915_TILING_NONE	0
 #define I915_TILING_X		1
 #define I915_TILING_Y		2
@@ -918,4 +946,8 @@
 	__u32 pad;
 };
 
+struct drm_i915_reg_read {
+	__u64 offset;
+	__u64 val; /* Return value */
+};
 #endif				/* _I915_DRM_H_ */