Merge branch 'tty-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/tty-2.6

* 'tty-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/tty-2.6:
  n_gsm: Fix length handling
  n_gsm: Copy n2 over when configuring via ioctl interface
  serial: bfin_5xx: grab port lock before making port termios changes
  serial: bfin_5xx: disable CON_PRINTBUFFER for consoles
  serial: bfin_5xx: remove redundant SSYNC to improve TX speed
  serial: bfin_5xx: always include DMA headers
  vcs: make proper usage of the poll flags
  amiserial: Remove unused variable icount
  8250: Fix tcsetattr to avoid ioctl(TIOCMIWAIT) hang
  tty_ldisc: Fix BUG() on hangup
  TTY: restore tty_ldisc_wait_idle
  SERIAL: blacklist si3052 chip
  drivers/serial/bfin_5xx.c: Fix line continuation defects
  tty: prevent DOS in the flush_to_ldisc
  8250: add support for Kouwell KW-L221N-2
  nozomi: Fix warning from the previous TIOCGCOUNT changes
  tty: fix warning in synclink driver
  tty: Fix formatting in tty.h
  tty: the development tree is now done in git
diff --git a/Documentation/ABI/obsolete/proc-pid-oom_adj b/Documentation/ABI/obsolete/proc-pid-oom_adj
new file mode 100644
index 0000000..cf63f26
--- /dev/null
+++ b/Documentation/ABI/obsolete/proc-pid-oom_adj
@@ -0,0 +1,22 @@
+What:	/proc/<pid>/oom_adj
+When:	August 2012
+Why:	/proc/<pid>/oom_adj allows userspace to influence the oom killer's
+	badness heuristic used to determine which task to kill when the kernel
+	is out of memory.
+
+	The badness heuristic has since been rewritten since the introduction of
+	this tunable such that its meaning is deprecated.  The value was
+	implemented as a bitshift on a score generated by the badness()
+	function that did not have any precise units of measure.  With the
+	rewrite, the score is given as a proportion of available memory to the
+	task allocating pages, so using a bitshift which grows the score
+	exponentially is, thus, impossible to tune with fine granularity.
+
+	A much more powerful interface, /proc/<pid>/oom_score_adj, was
+	introduced with the oom killer rewrite that allows users to increase or
+	decrease the badness() score linearly.  This interface will replace
+	/proc/<pid>/oom_adj.
+
+	A warning will be emitted to the kernel log if an application uses this
+	deprecated interface.  After it is printed once, future warnings will be
+	suppressed until the kernel is rebooted.
diff --git a/Documentation/block/switching-sched.txt b/Documentation/block/switching-sched.txt
index d5af3f6..71cfbdc 100644
--- a/Documentation/block/switching-sched.txt
+++ b/Documentation/block/switching-sched.txt
@@ -16,7 +16,7 @@
 As of the Linux 2.6.10 kernel, it is now possible to change the
 IO scheduler for a given block device on the fly (thus making it possible,
 for instance, to set the CFQ scheduler for the system default, but
-set a specific device to use the anticipatory or noop schedulers - which
+set a specific device to use the deadline or noop schedulers - which
 can improve that device's throughput).
 
 To set a specific scheduler, simply do this:
@@ -31,7 +31,7 @@
 will be displayed, with the currently selected scheduler in brackets:
 
 # cat /sys/block/hda/queue/scheduler
-noop anticipatory deadline [cfq]
-# echo anticipatory > /sys/block/hda/queue/scheduler
+noop deadline [cfq]
+# echo deadline > /sys/block/hda/queue/scheduler
 # cat /sys/block/hda/queue/scheduler
-noop [anticipatory] deadline cfq
+noop [deadline] cfq
diff --git a/Documentation/filesystems/xfs-delayed-logging-design.txt b/Documentation/filesystems/xfs-delayed-logging-design.txt
index 96d0df2..7445bf3 100644
--- a/Documentation/filesystems/xfs-delayed-logging-design.txt
+++ b/Documentation/filesystems/xfs-delayed-logging-design.txt
@@ -794,17 +794,6 @@
 
 Roadmap:
 
-2.6.37 Remove experimental tag from mount option
-	=> should be roughly 6 months after initial merge
-	=> enough time to:
-		=> gain confidence and fix problems reported by early
-		   adopters (a.k.a. guinea pigs)
-		=> address worst performance regressions and undesired
-		   behaviours
-		=> start tuning/optimising code for parallelism
-		=> start tuning/optimising algorithms consuming
-		   excessive CPU time
-
 2.6.39 Switch default mount option to use delayed logging
 	=> should be roughly 12 months after initial merge
 	=> enough time to shake out remaining problems before next round of
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index ed45e98..92e83e5 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -706,7 +706,7 @@
 			arch/x86/kernel/cpu/cpufreq/elanfreq.c.
 
 	elevator=	[IOSCHED]
-			Format: {"anticipatory" | "cfq" | "deadline" | "noop"}
+			Format: {"cfq" | "deadline" | "noop"}
 			See Documentation/block/as-iosched.txt and
 			Documentation/block/deadline-iosched.txt for details.
 
diff --git a/Documentation/leds-class.txt b/Documentation/leds-class.txt
index 8fd5ca2..58b266b 100644
--- a/Documentation/leds-class.txt
+++ b/Documentation/leds-class.txt
@@ -60,15 +60,18 @@
 
 Some LEDs can be programmed to blink without any CPU interaction. To
 support this feature, a LED driver can optionally implement the
-blink_set() function (see <linux/leds.h>). If implemented, triggers can
-attempt to use it before falling back to software timers. The blink_set()
-function should return 0 if the blink setting is supported, or -EINVAL
-otherwise, which means that LED blinking will be handled by software.
+blink_set() function (see <linux/leds.h>). To set an LED to blinking,
+however, it is better to use use the API function led_blink_set(),
+as it will check and implement software fallback if necessary.
 
-The blink_set() function should choose a user friendly blinking
-value if it is called with *delay_on==0 && *delay_off==0 parameters. In
-this case the driver should give back the chosen value through delay_on
-and delay_off parameters to the leds subsystem.
+To turn off blinking again, use the API function led_brightness_set()
+as that will not just set the LED brightness but also stop any software
+timers that may have been required for blinking.
+
+The blink_set() function should choose a user friendly blinking value
+if it is called with *delay_on==0 && *delay_off==0 parameters. In this
+case the driver should give back the chosen value through delay_on and
+delay_off parameters to the leds subsystem.
 
 Setting the brightness to zero with brightness_set() callback function
 should completely turn off the LED and cancel the previously programmed
diff --git a/Documentation/leds/leds-lp5521.txt b/Documentation/leds/leds-lp5521.txt
new file mode 100644
index 0000000..c4d8d15
--- /dev/null
+++ b/Documentation/leds/leds-lp5521.txt
@@ -0,0 +1,88 @@
+Kernel driver for lp5521
+========================
+
+* National Semiconductor LP5521 led driver chip
+* Datasheet: http://www.national.com/pf/LP/LP5521.html
+
+Authors: Mathias Nyman, Yuri Zaporozhets, Samu Onkalo
+Contact: Samu Onkalo (samu.p.onkalo-at-nokia.com)
+
+Description
+-----------
+
+LP5521 can drive up to 3 channels. Leds can be controlled directly via
+the led class control interface. Channels have generic names:
+lp5521:channelx, where x is 0 .. 2
+
+All three channels can be also controlled using the engine micro programs.
+More details of the instructions can be found from the public data sheet.
+
+Control interface for the engines:
+x is 1 .. 3
+enginex_mode : disabled, load, run
+enginex_load : store program (visible only in engine load mode)
+
+Example (start to blink the channel 2 led):
+cd   /sys/class/leds/lp5521:channel2/device
+echo "load" > engine3_mode
+echo "037f4d0003ff6000" > engine3_load
+echo "run" > engine3_mode
+
+stop the engine:
+echo "disabled" > engine3_mode
+
+sysfs contains a selftest entry.
+The test communicates with the chip and checks that
+the clock mode is automatically set to the requested one.
+
+Each channel has its own led current settings.
+/sys/class/leds/lp5521:channel0/led_current - RW
+/sys/class/leds/lp5521:channel0/max_current - RO
+Format: 10x mA i.e 10 means 1.0 mA
+
+example platform data:
+
+Note: chan_nr can have values between 0 and 2.
+
+static struct lp5521_led_config lp5521_led_config[] = {
+        {
+                .chan_nr        = 0,
+                .led_current    = 50,
+		.max_current    = 130,
+        }, {
+                .chan_nr        = 1,
+                .led_current    = 0,
+		.max_current    = 130,
+        }, {
+                .chan_nr        = 2,
+                .led_current    = 0,
+		.max_current    = 130,
+        }
+};
+
+static int lp5521_setup(void)
+{
+	/* setup HW resources */
+}
+
+static void lp5521_release(void)
+{
+	/* Release HW resources */
+}
+
+static void lp5521_enable(bool state)
+{
+	/* Control of chip enable signal */
+}
+
+static struct lp5521_platform_data lp5521_platform_data = {
+        .led_config     = lp5521_led_config,
+        .num_channels   = ARRAY_SIZE(lp5521_led_config),
+        .clock_mode     = LP5521_CLOCK_EXT,
+        .setup_resources   = lp5521_setup,
+        .release_resources = lp5521_release,
+        .enable            = lp5521_enable,
+};
+
+If the current is set to 0 in the platform data, that channel is
+disabled and it is not visible in the sysfs.
diff --git a/Documentation/leds/leds-lp5523.txt b/Documentation/leds/leds-lp5523.txt
new file mode 100644
index 0000000..fad2feb
--- /dev/null
+++ b/Documentation/leds/leds-lp5523.txt
@@ -0,0 +1,83 @@
+Kernel driver for lp5523
+========================
+
+* National Semiconductor LP5523 led driver chip
+* Datasheet: http://www.national.com/pf/LP/LP5523.html
+
+Authors: Mathias Nyman, Yuri Zaporozhets, Samu Onkalo
+Contact: Samu Onkalo (samu.p.onkalo-at-nokia.com)
+
+Description
+-----------
+LP5523 can drive up to 9 channels. Leds can be controlled directly via
+the led class control interface. Channels have generic names:
+lp5523:channelx where x is 0...8
+
+The chip provides 3 engines. Each engine can control channels without
+interaction from the main CPU. Details of the micro engine code can be found
+from the public data sheet. Leds can be muxed to different channels.
+
+Control interface for the engines:
+x is 1 .. 3
+enginex_mode : disabled, load, run
+enginex_load : microcode load (visible only in load mode)
+enginex_leds : led mux control (visible only in load mode)
+
+cd /sys/class/leds/lp5523:channel2/device
+echo "load" > engine3_mode
+echo "9d80400004ff05ff437f0000" > engine3_load
+echo "111111111" > engine3_leds
+echo "run" > engine3_mode
+
+sysfs contains a selftest entry. It measures each channel
+voltage level and checks if it looks reasonable. If the level is too high,
+the led is missing; if the level is too low, there is a short circuit.
+
+Selftest uses always the current from the platform data.
+
+Each channel contains led current settings.
+/sys/class/leds/lp5523:channel2/led_current - RW
+/sys/class/leds/lp5523:channel2/max_current - RO
+Format: 10x mA i.e 10 means 1.0 mA
+
+Example platform data:
+
+Note - chan_nr can have values between 0 and 8.
+
+static struct lp5523_led_config lp5523_led_config[] = {
+        {
+                .chan_nr        = 0,
+                .led_current    = 50,
+		.max_current    = 130,
+        },
+...
+        }, {
+                .chan_nr        = 8,
+                .led_current    = 50,
+		.max_current    = 130,
+        }
+};
+
+static int lp5523_setup(void)
+{
+	/* Setup HW resources */
+}
+
+static void lp5523_release(void)
+{
+	/* Release HW resources */
+}
+
+static void lp5523_enable(bool state)
+{
+	/* Control chip enable signal */
+}
+
+static struct lp5523_platform_data lp5523_platform_data = {
+        .led_config     = lp5523_led_config,
+        .num_channels   = ARRAY_SIZE(lp5523_led_config),
+        .clock_mode     = LP5523_CLOCK_EXT,
+        .setup_resources   = lp5523_setup,
+        .release_resources = lp5523_release,
+        .enable            = lp5523_enable,
+};
diff --git a/Documentation/rbtree.txt b/Documentation/rbtree.txt
index 221f38b..19f8278 100644
--- a/Documentation/rbtree.txt
+++ b/Documentation/rbtree.txt
@@ -21,8 +21,8 @@
 To quote Linux Weekly News:
 
     There are a number of red-black trees in use in the kernel.
-    The anticipatory, deadline, and CFQ I/O schedulers all employ
-    rbtrees to track requests; the packet CD/DVD driver does the same.
+    The deadline and CFQ I/O schedulers employ rbtrees to
+    track requests; the packet CD/DVD driver does the same.
     The high-resolution timer code uses an rbtree to organize outstanding
     timer requests.  The ext3 filesystem tracks directory entries in a
     red-black tree.  Virtual memory areas (VMAs) are tracked with red-black
diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
index 3894eaa..209e158 100644
--- a/Documentation/sysctl/kernel.txt
+++ b/Documentation/sysctl/kernel.txt
@@ -28,6 +28,7 @@
 - core_uses_pid
 - ctrl-alt-del
 - dentry-state
+- dmesg_restrict
 - domainname
 - hostname
 - hotplug
@@ -213,6 +214,19 @@
 
 ==============================================================
 
+dmesg_restrict:
+
+This toggle indicates whether unprivileged users are prevented from using
+dmesg(8) to view messages from the kernel's log buffer.  When
+dmesg_restrict is set to (0) there are no restrictions.  When
+dmesg_restrict is set set to (1), users must have CAP_SYS_ADMIN to use
+dmesg(8).
+
+The kernel config option CONFIG_SECURITY_DMESG_RESTRICT sets the default
+value of dmesg_restrict.
+
+==============================================================
+
 domainname & hostname:
 
 These files can be used to set the NIS/YP domainname and the
diff --git a/MAINTAINERS b/MAINTAINERS
index 4c43733..716c548 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -6598,14 +6598,14 @@
 
 XEN PCI SUBSYSTEM
 M:	Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
-L:	xen-devel@lists.xensource.com
+L:	xen-devel@lists.xensource.com (moderated for non-subscribers)
 S:	Supported
 F:	arch/x86/pci/*xen*
 F:	drivers/pci/*xen*
 
 XEN SWIOTLB SUBSYSTEM
 M:	Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
-L:	xen-devel@lists.xensource.com
+L:	xen-devel@lists.xensource.com (moderated for non-subscribers)
 S:	Supported
 F:	arch/x86/xen/*swiotlb*
 F:	drivers/xen/*swiotlb*
@@ -6613,7 +6613,7 @@
 XEN HYPERVISOR INTERFACE
 M:	Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
 M:	Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
-L:	xen-devel@lists.xen.org
+L:	xen-devel@lists.xensource.com (moderated for non-subscribers)
 L:	virtualization@lists.osdl.org
 S:	Supported
 F:	arch/x86/xen/
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index a19a526..8ae3d48 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -6,7 +6,7 @@
 	select HAVE_MEMBLOCK
 	select RTC_LIB
 	select SYS_SUPPORTS_APM_EMULATION
-	select GENERIC_ATOMIC64 if (!CPU_32v6K)
+	select GENERIC_ATOMIC64 if (!CPU_32v6K || !AEABI)
 	select HAVE_OPROFILE if (HAVE_PERF_EVENTS)
 	select HAVE_ARCH_KGDB
 	select HAVE_KPROBES if (!XIP_KERNEL)
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c
index ada6359..772f95f 100644
--- a/arch/arm/common/gic.c
+++ b/arch/arm/common/gic.c
@@ -251,15 +251,16 @@
 		writel(cpumask, base + GIC_DIST_TARGET + i * 4 / 4);
 
 	/*
-	 * Set priority on all interrupts.
+	 * Set priority on all global interrupts.
 	 */
-	for (i = 0; i < max_irq; i += 4)
+	for (i = 32; i < max_irq; i += 4)
 		writel(0xa0a0a0a0, base + GIC_DIST_PRI + i * 4 / 4);
 
 	/*
-	 * Disable all interrupts.
+	 * Disable all interrupts.  Leave the PPI and SGIs alone
+	 * as these enables are banked registers.
 	 */
-	for (i = 0; i < max_irq; i += 32)
+	for (i = 32; i < max_irq; i += 32)
 		writel(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32);
 
 	/*
@@ -277,11 +278,30 @@
 
 void __cpuinit gic_cpu_init(unsigned int gic_nr, void __iomem *base)
 {
+	void __iomem *dist_base;
+	int i;
+
 	if (gic_nr >= MAX_GIC_NR)
 		BUG();
 
+	dist_base = gic_data[gic_nr].dist_base;
+	BUG_ON(!dist_base);
+
 	gic_data[gic_nr].cpu_base = base;
 
+	/*
+	 * Deal with the banked PPI and SGI interrupts - disable all
+	 * PPI interrupts, ensure all SGI interrupts are enabled.
+	 */
+	writel(0xffff0000, dist_base + GIC_DIST_ENABLE_CLEAR);
+	writel(0x0000ffff, dist_base + GIC_DIST_ENABLE_SET);
+
+	/*
+	 * Set priority on PPI and SGI interrupts
+	 */
+	for (i = 0; i < 32; i += 4)
+		writel(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4 / 4);
+
 	writel(0xf0, base + GIC_CPU_PRIMASK);
 	writel(1, base + GIC_CPU_CTRL);
 }
diff --git a/arch/arm/include/asm/hardware/it8152.h b/arch/arm/include/asm/hardware/it8152.h
index 6700c7f..21fa272 100644
--- a/arch/arm/include/asm/hardware/it8152.h
+++ b/arch/arm/include/asm/hardware/it8152.h
@@ -75,7 +75,7 @@
   IT8152_PD_IRQ(1)  USB (USBR)
   IT8152_PD_IRQ(0)  Audio controller (ACR)
  */
-#define IT8152_IRQ(x)   (IRQ_BOARD_END + (x))
+#define IT8152_IRQ(x)   (IRQ_BOARD_START + (x))
 
 /* IRQ-sources in 3 groups - local devices, LPC (serial), and external PCI */
 #define IT8152_LD_IRQ_COUNT     9
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index 54593b0..21e3a4a 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -748,8 +748,7 @@
 		breakpoint_handler(addr, regs);
 		break;
 	case ARM_ENTRY_ASYNC_WATCHPOINT:
-		WARN_ON("Asynchronous watchpoint exception taken. "
-			"Debugging results may be unreliable");
+		WARN(1, "Asynchronous watchpoint exception taken. Debugging results may be unreliable\n");
 	case ARM_ENTRY_SYNC_WATCHPOINT:
 		watchpoint_handler(addr, regs);
 		break;
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 49643b1..07a5035 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -1749,7 +1749,7 @@
 static inline int armv7_pmnc_counter_has_overflowed(unsigned long pmnc,
 					enum armv7_counters counter)
 {
-	int ret;
+	int ret = 0;
 
 	if (counter == ARMV7_CYCLE_COUNTER)
 		ret = pmnc & ARMV7_FLAG_C;
diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c
index 20b7411..c2e112e 100644
--- a/arch/arm/kernel/stacktrace.c
+++ b/arch/arm/kernel/stacktrace.c
@@ -28,7 +28,7 @@
 
 	/* only go to a higher address on the stack */
 	low = frame->sp;
-	high = ALIGN(low, THREAD_SIZE) + THREAD_SIZE;
+	high = ALIGN(low, THREAD_SIZE);
 
 	/* check current frame pointer is within bounds */
 	if (fp < (low + 12) || fp + 4 >= high)
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index cda78d5..446aee9 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -53,10 +53,7 @@
 void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
 {
 #ifdef CONFIG_KALLSYMS
-	char sym1[KSYM_SYMBOL_LEN], sym2[KSYM_SYMBOL_LEN];
-	sprint_symbol(sym1, where);
-	sprint_symbol(sym2, from);
-	printk("[<%08lx>] (%s) from [<%08lx>] (%s)\n", where, sym1, from, sym2);
+	printk("[<%08lx>] (%pS) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
 #else
 	printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
 #endif
diff --git a/arch/arm/kernel/unwind.c b/arch/arm/kernel/unwind.c
index 2a16176..d2cb0b3 100644
--- a/arch/arm/kernel/unwind.c
+++ b/arch/arm/kernel/unwind.c
@@ -279,7 +279,7 @@
 
 	/* only go to a higher address on the stack */
 	low = frame->sp;
-	high = ALIGN(low, THREAD_SIZE) + THREAD_SIZE;
+	high = ALIGN(low, THREAD_SIZE);
 
 	pr_debug("%s(pc = %08lx lr = %08lx sp = %08lx)\n", __func__,
 		 frame->pc, frame->lr, frame->sp);
diff --git a/arch/arm/mach-ep93xx/include/mach/dma.h b/arch/arm/mach-ep93xx/include/mach/dma.h
index 3a5961d..5e31b2b 100644
--- a/arch/arm/mach-ep93xx/include/mach/dma.h
+++ b/arch/arm/mach-ep93xx/include/mach/dma.h
@@ -1,5 +1,13 @@
-/*
- * arch/arm/mach-ep93xx/include/mach/dma.h
+/**
+ * DOC: EP93xx DMA M2P memory to peripheral and peripheral to memory engine
+ *
+ * The EP93xx DMA M2P subsystem handles DMA transfers between memory and
+ * peripherals. DMA M2P channels are available for audio, UARTs and IrDA.
+ * See chapter 10 of the EP93xx users guide for full details on the DMA M2P
+ * engine.
+ *
+ * See sound/soc/ep93xx/ep93xx-pcm.c for an example use of the DMA M2P code.
+ *
  */
 
 #ifndef __ASM_ARCH_DMA_H
@@ -8,12 +16,34 @@
 #include <linux/list.h>
 #include <linux/types.h>
 
+/**
+ * struct ep93xx_dma_buffer - Information about a buffer to be transferred
+ * using the DMA M2P engine
+ *
+ * @list: Entry in DMA buffer list
+ * @bus_addr: Physical address of the buffer
+ * @size: Size of the buffer in bytes
+ */
 struct ep93xx_dma_buffer {
 	struct list_head	list;
 	u32			bus_addr;
 	u16			size;
 };
 
+/**
+ * struct ep93xx_dma_m2p_client - Information about a DMA M2P client
+ *
+ * @name: Unique name for this client
+ * @flags: Client flags
+ * @cookie: User data to pass to callback functions
+ * @buffer_started: Non NULL function to call when a transfer is started.
+ * 			The arguments are the user data cookie and the DMA
+ *			buffer which is starting.
+ * @buffer_finished: Non NULL function to call when a transfer is completed.
+ *			The arguments are the user data cookie, the DMA buffer
+ *			which has completed, and a boolean flag indicating if
+ *			the transfer had an error.
+ */
 struct ep93xx_dma_m2p_client {
 	char			*name;
 	u8			flags;
@@ -24,10 +54,11 @@
 					struct ep93xx_dma_buffer *buf,
 					int bytes, int error);
 
-	/* Internal to the DMA code.  */
+	/* private: Internal use only */
 	void			*channel;
 };
 
+/* DMA M2P ports */
 #define EP93XX_DMA_M2P_PORT_I2S1	0x00
 #define EP93XX_DMA_M2P_PORT_I2S2	0x01
 #define EP93XX_DMA_M2P_PORT_AAC1	0x02
@@ -39,18 +70,80 @@
 #define EP93XX_DMA_M2P_PORT_UART3	0x08
 #define EP93XX_DMA_M2P_PORT_IRDA	0x09
 #define EP93XX_DMA_M2P_PORT_MASK	0x0f
-#define EP93XX_DMA_M2P_TX		0x00
-#define EP93XX_DMA_M2P_RX		0x10
-#define EP93XX_DMA_M2P_ABORT_ON_ERROR	0x20
-#define EP93XX_DMA_M2P_IGNORE_ERROR	0x40
-#define EP93XX_DMA_M2P_ERROR_MASK	0x60
 
-int  ep93xx_dma_m2p_client_register(struct ep93xx_dma_m2p_client *m2p);
+/* DMA M2P client flags */
+#define EP93XX_DMA_M2P_TX		0x00	/* Memory to peripheral */
+#define EP93XX_DMA_M2P_RX		0x10	/* Peripheral to memory */
+
+/*
+ * DMA M2P client error handling flags. See the EP93xx users guide
+ * documentation on the DMA M2P CONTROL register for more details
+ */
+#define EP93XX_DMA_M2P_ABORT_ON_ERROR	0x20	/* Abort on peripheral error */
+#define EP93XX_DMA_M2P_IGNORE_ERROR	0x40	/* Ignore peripheral errors */
+#define EP93XX_DMA_M2P_ERROR_MASK	0x60	/* Mask of error bits */
+
+/**
+ * ep93xx_dma_m2p_client_register - Register a client with the DMA M2P
+ * subsystem
+ *
+ * @m2p: Client information to register
+ * returns 0 on success
+ *
+ * The DMA M2P subsystem allocates a channel and an interrupt line for the DMA
+ * client
+ */
+int ep93xx_dma_m2p_client_register(struct ep93xx_dma_m2p_client *m2p);
+
+/**
+ * ep93xx_dma_m2p_client_unregister - Unregister a client from the DMA M2P
+ * subsystem
+ *
+ * @m2p: Client to unregister
+ *
+ * Any transfers currently in progress will be completed in hardware, but
+ * ignored in software.
+ */
 void ep93xx_dma_m2p_client_unregister(struct ep93xx_dma_m2p_client *m2p);
+
+/**
+ * ep93xx_dma_m2p_submit - Submit a DMA M2P transfer
+ *
+ * @m2p: DMA Client to submit the transfer on
+ * @buf: DMA Buffer to submit
+ *
+ * If the current or next transfer positions are free on the M2P client then
+ * the transfer is started immediately. If not, the transfer is added to the
+ * list of pending transfers. This function must not be called from the
+ * buffer_finished callback for an M2P channel.
+ *
+ */
 void ep93xx_dma_m2p_submit(struct ep93xx_dma_m2p_client *m2p,
 			   struct ep93xx_dma_buffer *buf);
+
+/**
+ * ep93xx_dma_m2p_submit_recursive - Put a DMA transfer on the pending list
+ * for an M2P channel
+ *
+ * @m2p: DMA Client to submit the transfer on
+ * @buf: DMA Buffer to submit
+ *
+ * This function must only be called from the buffer_finished callback for an
+ * M2P channel. It is commonly used to add the next transfer in a chained list
+ * of DMA transfers.
+ */
 void ep93xx_dma_m2p_submit_recursive(struct ep93xx_dma_m2p_client *m2p,
 				     struct ep93xx_dma_buffer *buf);
+
+/**
+ * ep93xx_dma_m2p_flush - Flush all pending transfers on a DMA M2P client
+ *
+ * @m2p: DMA client to flush transfers on
+ *
+ * Any transfers currently in progress will be completed in hardware, but
+ * ignored in software.
+ *
+ */
 void ep93xx_dma_m2p_flush(struct ep93xx_dma_m2p_client *m2p);
 
 #endif /* __ASM_ARCH_DMA_H */
diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c
index 51ff23b..3688123 100644
--- a/arch/arm/mach-kirkwood/common.c
+++ b/arch/arm/mach-kirkwood/common.c
@@ -854,10 +854,9 @@
 
 	kirkwood_pcie_id(&dev, &rev);
 
-	if ((dev == MV88F6281_DEV_ID && (rev == MV88F6281_REV_A0 ||
-					rev == MV88F6281_REV_A1)) ||
-	    (dev == MV88F6282_DEV_ID))
-		return 200000000;
+	if (dev == MV88F6281_DEV_ID || dev == MV88F6282_DEV_ID)
+		if (((readl(SAMPLE_AT_RESET) >> 21) & 1) == 0)
+			return 200000000;
 
 	return 166666667;
 }
diff --git a/arch/arm/mach-kirkwood/d2net_v2-setup.c b/arch/arm/mach-kirkwood/d2net_v2-setup.c
index 4aa86e4..a31c949 100644
--- a/arch/arm/mach-kirkwood/d2net_v2-setup.c
+++ b/arch/arm/mach-kirkwood/d2net_v2-setup.c
@@ -225,5 +225,5 @@
 	.init_machine	= d2net_v2_init,
 	.map_io		= kirkwood_map_io,
 	.init_irq	= kirkwood_init_irq,
-	.timer		= &lacie_v2_timer,
+	.timer		= &kirkwood_timer,
 MACHINE_END
diff --git a/arch/arm/mach-kirkwood/lacie_v2-common.c b/arch/arm/mach-kirkwood/lacie_v2-common.c
index d3ea1b6..285edab 100644
--- a/arch/arm/mach-kirkwood/lacie_v2-common.c
+++ b/arch/arm/mach-kirkwood/lacie_v2-common.c
@@ -111,17 +111,3 @@
 			pr_err("Failed to power up HDD%d\n", i + 1);
 	}
 }
-
-/*****************************************************************************
- * Timer
- ****************************************************************************/
-
-static void lacie_v2_timer_init(void)
-{
-	kirkwood_tclk = 166666667;
-	orion_time_init(IRQ_KIRKWOOD_BRIDGE, kirkwood_tclk);
-}
-
-struct sys_timer lacie_v2_timer = {
-	.init = lacie_v2_timer_init,
-};
diff --git a/arch/arm/mach-kirkwood/lacie_v2-common.h b/arch/arm/mach-kirkwood/lacie_v2-common.h
index af52131..fc64f57 100644
--- a/arch/arm/mach-kirkwood/lacie_v2-common.h
+++ b/arch/arm/mach-kirkwood/lacie_v2-common.h
@@ -13,6 +13,4 @@
 void lacie_v2_register_i2c_devices(void);
 void lacie_v2_hdd_power_init(int hdd_num);
 
-extern struct sys_timer lacie_v2_timer;
-
 #endif
diff --git a/arch/arm/mach-kirkwood/mpp.c b/arch/arm/mach-kirkwood/mpp.c
index 065187d..27901f7 100644
--- a/arch/arm/mach-kirkwood/mpp.c
+++ b/arch/arm/mach-kirkwood/mpp.c
@@ -59,7 +59,7 @@
 	}
 	printk("\n");
 
-	while (*mpp_list) {
+	for ( ; *mpp_list; mpp_list++) {
 		unsigned int num = MPP_NUM(*mpp_list);
 		unsigned int sel = MPP_SEL(*mpp_list);
 		int shift, gpio_mode;
@@ -88,8 +88,6 @@
 		if (sel != 0)
 			gpio_mode = 0;
 		orion_gpio_set_valid(num, gpio_mode);
-
-		mpp_list++;
 	}
 
 	printk(KERN_DEBUG "  final MPP regs:");
diff --git a/arch/arm/mach-kirkwood/netspace_v2-setup.c b/arch/arm/mach-kirkwood/netspace_v2-setup.c
index 5ea66f1..65ee21f 100644
--- a/arch/arm/mach-kirkwood/netspace_v2-setup.c
+++ b/arch/arm/mach-kirkwood/netspace_v2-setup.c
@@ -262,7 +262,7 @@
 	.init_machine	= netspace_v2_init,
 	.map_io		= kirkwood_map_io,
 	.init_irq	= kirkwood_init_irq,
-	.timer		= &lacie_v2_timer,
+	.timer		= &kirkwood_timer,
 MACHINE_END
 #endif
 
@@ -272,7 +272,7 @@
 	.init_machine	= netspace_v2_init,
 	.map_io		= kirkwood_map_io,
 	.init_irq	= kirkwood_init_irq,
-	.timer		= &lacie_v2_timer,
+	.timer		= &kirkwood_timer,
 MACHINE_END
 #endif
 
@@ -282,6 +282,6 @@
 	.init_machine	= netspace_v2_init,
 	.map_io		= kirkwood_map_io,
 	.init_irq	= kirkwood_init_irq,
-	.timer		= &lacie_v2_timer,
+	.timer		= &kirkwood_timer,
 MACHINE_END
 #endif
diff --git a/arch/arm/mach-kirkwood/netxbig_v2-setup.c b/arch/arm/mach-kirkwood/netxbig_v2-setup.c
index a1b45d5..93afd3c 100644
--- a/arch/arm/mach-kirkwood/netxbig_v2-setup.c
+++ b/arch/arm/mach-kirkwood/netxbig_v2-setup.c
@@ -403,7 +403,7 @@
 	.init_machine	= netxbig_v2_init,
 	.map_io		= kirkwood_map_io,
 	.init_irq	= kirkwood_init_irq,
-	.timer		= &lacie_v2_timer,
+	.timer		= &kirkwood_timer,
 MACHINE_END
 #endif
 
@@ -413,6 +413,6 @@
 	.init_machine	= netxbig_v2_init,
 	.map_io		= kirkwood_map_io,
 	.init_irq	= kirkwood_init_irq,
-	.timer		= &lacie_v2_timer,
+	.timer		= &kirkwood_timer,
 MACHINE_END
 #endif
diff --git a/arch/arm/mach-kirkwood/ts41x-setup.c b/arch/arm/mach-kirkwood/ts41x-setup.c
index 8be09a0..3587a28 100644
--- a/arch/arm/mach-kirkwood/ts41x-setup.c
+++ b/arch/arm/mach-kirkwood/ts41x-setup.c
@@ -27,6 +27,10 @@
 #include "mpp.h"
 #include "tsx1x-common.h"
 
+/* for the PCIe reset workaround */
+#include <plat/pcie.h>
+
+
 #define QNAP_TS41X_JUMPER_JP1	45
 
 static struct i2c_board_info __initdata qnap_ts41x_i2c_rtc = {
@@ -140,8 +144,16 @@
 
 static int __init ts41x_pci_init(void)
 {
-	if (machine_is_ts41x())
+	if (machine_is_ts41x()) {
+		/*
+		 * Without this explicit reset, the PCIe SATA controller
+		 * (Marvell 88sx7042/sata_mv) is known to stop working
+		 * after a few minutes.
+		 */
+		orion_pcie_reset((void __iomem *)PCIE_VIRT_BASE);
+
 		kirkwood_pcie_init(KW_PCIE0);
+	}
 
    return 0;
 }
diff --git a/arch/arm/mach-mmp/include/mach/cputype.h b/arch/arm/mach-mmp/include/mach/cputype.h
index f43a68b..8a3b56d 100644
--- a/arch/arm/mach-mmp/include/mach/cputype.h
+++ b/arch/arm/mach-mmp/include/mach/cputype.h
@@ -46,7 +46,8 @@
 #ifdef CONFIG_CPU_MMP2
 static inline int cpu_is_mmp2(void)
 {
-	return (((cpu_readid_id() >> 8) & 0xff) == 0x58);
+	return (((read_cpuid_id() >> 8) & 0xff) == 0x58);
+}
 #else
 #define cpu_is_mmp2()	(0)
 #endif
diff --git a/arch/arm/mach-mv78xx0/mpp.c b/arch/arm/mach-mv78xx0/mpp.c
index 354ac51..84db2df 100644
--- a/arch/arm/mach-mv78xx0/mpp.c
+++ b/arch/arm/mach-mv78xx0/mpp.c
@@ -54,7 +54,7 @@
 	}
 	printk("\n");
 
-	while (*mpp_list) {
+	for ( ; *mpp_list; mpp_list++) {
 		unsigned int num = MPP_NUM(*mpp_list);
 		unsigned int sel = MPP_SEL(*mpp_list);
 		int shift, gpio_mode;
@@ -83,8 +83,6 @@
 		if (sel != 0)
 			gpio_mode = 0;
 		orion_gpio_set_valid(num, gpio_mode);
-
-		mpp_list++;
 	}
 
 	printk(KERN_DEBUG "  final MPP regs:");
diff --git a/arch/arm/mach-orion5x/mpp.c b/arch/arm/mach-orion5x/mpp.c
index bc4c3b9..db485d3 100644
--- a/arch/arm/mach-orion5x/mpp.c
+++ b/arch/arm/mach-orion5x/mpp.c
@@ -127,7 +127,7 @@
 	/* Initialize gpiolib. */
 	orion_gpio_init();
 
-	while (mode->mpp >= 0) {
+	for ( ; mode->mpp >= 0; mode++) {
 		u32 *reg;
 		int num_type;
 		int shift;
@@ -160,8 +160,6 @@
 			orion_gpio_set_unused(mode->mpp);
 
 		orion_gpio_set_valid(mode->mpp, !!(mode->type == MPP_GPIO));
-
-		mode++;
 	}
 
 	writel(mpp_0_7_ctrl, MPP_0_7_CTRL);
diff --git a/arch/arm/mach-orion5x/ts78xx-setup.c b/arch/arm/mach-orion5x/ts78xx-setup.c
index 16f1bd5..c1c1cd0 100644
--- a/arch/arm/mach-orion5x/ts78xx-setup.c
+++ b/arch/arm/mach-orion5x/ts78xx-setup.c
@@ -239,7 +239,7 @@
 static struct resource ts78xx_ts_nand_resources = {
 	.start		= TS_NAND_DATA,
 	.end		= TS_NAND_DATA + 4,
-	.flags		= IORESOURCE_IO,
+	.flags		= IORESOURCE_MEM,
 };
 
 static struct platform_device ts78xx_ts_nand_device = {
diff --git a/arch/arm/mach-pxa/cm-x2xx.c b/arch/arm/mach-pxa/cm-x2xx.c
index ac5598c..d34b99f 100644
--- a/arch/arm/mach-pxa/cm-x2xx.c
+++ b/arch/arm/mach-pxa/cm-x2xx.c
@@ -476,8 +476,6 @@
 
 static void __init cmx2xx_init_irq(void)
 {
-	pxa27x_init_irq();
-
 	if (cpu_is_pxa25x()) {
 		pxa25x_init_irq();
 		cmx2xx_pci_init_irq(CMX255_GPIO_IT8152_IRQ);
diff --git a/arch/arm/mach-pxa/saar.c b/arch/arm/mach-pxa/saar.c
index 4b521e0..ffa50e6 100644
--- a/arch/arm/mach-pxa/saar.c
+++ b/arch/arm/mach-pxa/saar.c
@@ -116,7 +116,7 @@
 	},
 };
 
-#if defined(CONFIG_FB_PXA) || (CONFIG_FB_PXA_MODULE)
+#if defined(CONFIG_FB_PXA) || defined(CONFIG_FB_PXA_MODULE)
 static uint16_t lcd_power_on[] = {
 	/* single frame */
 	SMART_CMD_NOOP,
diff --git a/arch/arm/mach-vexpress/ct-ca9x4.c b/arch/arm/mach-vexpress/ct-ca9x4.c
index c2e405a..fd25ccd 100644
--- a/arch/arm/mach-vexpress/ct-ca9x4.c
+++ b/arch/arm/mach-vexpress/ct-ca9x4.c
@@ -54,7 +54,9 @@
 
 static void __init ct_ca9x4_map_io(void)
 {
+#ifdef CONFIG_LOCAL_TIMERS
 	twd_base = MMIO_P2V(A9_MPCORE_TWD);
+#endif
 	v2m_map_io(ct_ca9x4_io_desc, ARRAY_SIZE(ct_ca9x4_io_desc));
 }
 
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index e4dd064..ac6a361 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -198,7 +198,7 @@
 	 * fragmentation of the DMA space, and also prevents allocations
 	 * smaller than a section from crossing a section boundary.
 	 */
-	bit = fls(size - 1) + 1;
+	bit = fls(size - 1);
 	if (bit > SECTION_SHIFT)
 		bit = SECTION_SHIFT;
 	align = 1 << bit;
diff --git a/arch/arm/plat-orion/include/plat/pcie.h b/arch/arm/plat-orion/include/plat/pcie.h
index 3ebfef7..cc99163 100644
--- a/arch/arm/plat-orion/include/plat/pcie.h
+++ b/arch/arm/plat-orion/include/plat/pcie.h
@@ -11,12 +11,15 @@
 #ifndef __PLAT_PCIE_H
 #define __PLAT_PCIE_H
 
+struct pci_bus;
+
 u32 orion_pcie_dev_id(void __iomem *base);
 u32 orion_pcie_rev(void __iomem *base);
 int orion_pcie_link_up(void __iomem *base);
 int orion_pcie_x4_mode(void __iomem *base);
 int orion_pcie_get_local_bus_nr(void __iomem *base);
 void orion_pcie_set_local_bus_nr(void __iomem *base, int nr);
+void orion_pcie_reset(void __iomem *base);
 void orion_pcie_setup(void __iomem *base,
 		      struct mbus_dram_target_info *dram);
 int orion_pcie_rd_conf(void __iomem *base, struct pci_bus *bus,
diff --git a/arch/arm/plat-orion/pcie.c b/arch/arm/plat-orion/pcie.c
index 779553a..af2d733 100644
--- a/arch/arm/plat-orion/pcie.c
+++ b/arch/arm/plat-orion/pcie.c
@@ -182,11 +182,6 @@
 	u32 mask;
 
 	/*
-	 * soft reset PCIe unit
-	 */
-	orion_pcie_reset(base);
-
-	/*
 	 * Point PCIe unit MBUS decode windows to DRAM space.
 	 */
 	orion_pcie_setup_wins(base, dram);
diff --git a/arch/um/include/asm/ptrace-generic.h b/arch/um/include/asm/ptrace-generic.h
index 2cd899f..b7c5bab 100644
--- a/arch/um/include/asm/ptrace-generic.h
+++ b/arch/um/include/asm/ptrace-generic.h
@@ -38,8 +38,8 @@
 
 struct task_struct;
 
-extern long subarch_ptrace(struct task_struct *child, long request, long addr,
-			   long data);
+extern long subarch_ptrace(struct task_struct *child, long request,
+	unsigned long addr, unsigned long data);
 extern unsigned long getreg(struct task_struct *child, int regno);
 extern int putreg(struct task_struct *child, int regno, unsigned long value);
 extern int get_fpregs(struct user_i387_struct __user *buf,
diff --git a/arch/um/kernel/ptrace.c b/arch/um/kernel/ptrace.c
index a5e33f2..701b672 100644
--- a/arch/um/kernel/ptrace.c
+++ b/arch/um/kernel/ptrace.c
@@ -122,7 +122,7 @@
 		break;
 
 	case PTRACE_SET_THREAD_AREA:
-		ret = ptrace_set_thread_area(child, addr, datavp);
+		ret = ptrace_set_thread_area(child, addr, vp);
 		break;
 
 	case PTRACE_FAULTINFO: {
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index 286de34..f6ce0bd 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -141,13 +141,13 @@
 
 static inline u32 native_apic_msr_read(u32 reg)
 {
-	u32 low, high;
+	u64 msr;
 
 	if (reg == APIC_DFR)
 		return -1;
 
-	rdmsr(APIC_BASE_MSR + (reg >> 4), low, high);
-	return low;
+	rdmsrl(APIC_BASE_MSR + (reg >> 4), msr);
+	return (u32)msr;
 }
 
 static inline void native_x2apic_wait_icr_idle(void)
@@ -181,12 +181,12 @@
 extern void x2apic_icr_write(u32 low, u32 id);
 static inline int x2apic_enabled(void)
 {
-	int msr, msr2;
+	u64 msr;
 
 	if (!cpu_has_x2apic)
 		return 0;
 
-	rdmsr(MSR_IA32_APICBASE, msr, msr2);
+	rdmsrl(MSR_IA32_APICBASE, msr);
 	if (msr & X2APIC_ENABLE)
 		return 1;
 	return 0;
diff --git a/arch/x86/include/asm/uv/uv_mmrs.h b/arch/x86/include/asm/uv/uv_mmrs.h
index b2f2d2e..6d90adf 100644
--- a/arch/x86/include/asm/uv/uv_mmrs.h
+++ b/arch/x86/include/asm/uv/uv_mmrs.h
@@ -806,6 +806,78 @@
 };
 
 /* ========================================================================= */
+/*                 UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR                  */
+/* ========================================================================= */
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR 0x16000c8UL
+
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_SHFT 24
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_MASK 0x00000000ff000000UL
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_SHFT 48
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_MASK 0x001f000000000000UL
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_SHFT 63
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_MASK 0x8000000000000000UL
+
+union uvh_rh_gam_alias210_overlay_config_0_mmr_u {
+    unsigned long	v;
+    struct uvh_rh_gam_alias210_overlay_config_0_mmr_s {
+	unsigned long	rsvd_0_23: 24;  /*    */
+	unsigned long	base    :  8;  /* RW */
+	unsigned long	rsvd_32_47: 16;  /*    */
+	unsigned long	m_alias :  5;  /* RW */
+	unsigned long	rsvd_53_62: 10;  /*    */
+	unsigned long	enable  :  1;  /* RW */
+    } s;
+};
+
+/* ========================================================================= */
+/*                 UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR                  */
+/* ========================================================================= */
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR 0x16000d8UL
+
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_SHFT 24
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_MASK 0x00000000ff000000UL
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_SHFT 48
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_MASK 0x001f000000000000UL
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_SHFT 63
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_MASK 0x8000000000000000UL
+
+union uvh_rh_gam_alias210_overlay_config_1_mmr_u {
+    unsigned long	v;
+    struct uvh_rh_gam_alias210_overlay_config_1_mmr_s {
+	unsigned long	rsvd_0_23: 24;  /*    */
+	unsigned long	base    :  8;  /* RW */
+	unsigned long	rsvd_32_47: 16;  /*    */
+	unsigned long	m_alias :  5;  /* RW */
+	unsigned long	rsvd_53_62: 10;  /*    */
+	unsigned long	enable  :  1;  /* RW */
+    } s;
+};
+
+/* ========================================================================= */
+/*                 UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR                  */
+/* ========================================================================= */
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR 0x16000e8UL
+
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_SHFT 24
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_MASK 0x00000000ff000000UL
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_SHFT 48
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_MASK 0x001f000000000000UL
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_SHFT 63
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_MASK 0x8000000000000000UL
+
+union uvh_rh_gam_alias210_overlay_config_2_mmr_u {
+    unsigned long	v;
+    struct uvh_rh_gam_alias210_overlay_config_2_mmr_s {
+	unsigned long	rsvd_0_23: 24;  /*    */
+	unsigned long	base    :  8;  /* RW */
+	unsigned long	rsvd_32_47: 16;  /*    */
+	unsigned long	m_alias :  5;  /* RW */
+	unsigned long	rsvd_53_62: 10;  /*    */
+	unsigned long	enable  :  1;  /* RW */
+    } s;
+};
+
+/* ========================================================================= */
 /*                UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR                  */
 /* ========================================================================= */
 #define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR 0x16000d0UL
@@ -857,6 +929,29 @@
 };
 
 /* ========================================================================= */
+/*                          UVH_RH_GAM_CONFIG_MMR                            */
+/* ========================================================================= */
+#define UVH_RH_GAM_CONFIG_MMR 0x1600000UL
+
+#define UVH_RH_GAM_CONFIG_MMR_M_SKT_SHFT 0
+#define UVH_RH_GAM_CONFIG_MMR_M_SKT_MASK 0x000000000000003fUL
+#define UVH_RH_GAM_CONFIG_MMR_N_SKT_SHFT 6
+#define UVH_RH_GAM_CONFIG_MMR_N_SKT_MASK 0x00000000000003c0UL
+#define UVH_RH_GAM_CONFIG_MMR_MMIOL_CFG_SHFT 12
+#define UVH_RH_GAM_CONFIG_MMR_MMIOL_CFG_MASK 0x0000000000001000UL
+
+union uvh_rh_gam_config_mmr_u {
+    unsigned long	v;
+    struct uvh_rh_gam_config_mmr_s {
+	unsigned long	m_skt     :  6;  /* RW */
+	unsigned long	n_skt     :  4;  /* RW */
+	unsigned long	rsvd_10_11:  2;  /*    */
+	unsigned long	mmiol_cfg :  1;  /* RW */
+	unsigned long	rsvd_13_63: 51;  /*    */
+    } s;
+};
+
+/* ========================================================================= */
 /*                    UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR                      */
 /* ========================================================================= */
 #define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL
@@ -987,97 +1082,5 @@
     } s;
 };
 
-/* ========================================================================= */
-/*                          UVH_SI_ADDR_MAP_CONFIG                           */
-/* ========================================================================= */
-#define UVH_SI_ADDR_MAP_CONFIG 0xc80000UL
 
-#define UVH_SI_ADDR_MAP_CONFIG_M_SKT_SHFT 0
-#define UVH_SI_ADDR_MAP_CONFIG_M_SKT_MASK 0x000000000000003fUL
-#define UVH_SI_ADDR_MAP_CONFIG_N_SKT_SHFT 8
-#define UVH_SI_ADDR_MAP_CONFIG_N_SKT_MASK 0x0000000000000f00UL
-
-union uvh_si_addr_map_config_u {
-    unsigned long	v;
-    struct uvh_si_addr_map_config_s {
-	unsigned long	m_skt :  6;  /* RW */
-	unsigned long	rsvd_6_7:  2;  /*    */
-	unsigned long	n_skt :  4;  /* RW */
-	unsigned long	rsvd_12_63: 52;  /*    */
-    } s;
-};
-
-/* ========================================================================= */
-/*                       UVH_SI_ALIAS0_OVERLAY_CONFIG                        */
-/* ========================================================================= */
-#define UVH_SI_ALIAS0_OVERLAY_CONFIG 0xc80008UL
-
-#define UVH_SI_ALIAS0_OVERLAY_CONFIG_BASE_SHFT 24
-#define UVH_SI_ALIAS0_OVERLAY_CONFIG_BASE_MASK 0x00000000ff000000UL
-#define UVH_SI_ALIAS0_OVERLAY_CONFIG_M_ALIAS_SHFT 48
-#define UVH_SI_ALIAS0_OVERLAY_CONFIG_M_ALIAS_MASK 0x001f000000000000UL
-#define UVH_SI_ALIAS0_OVERLAY_CONFIG_ENABLE_SHFT 63
-#define UVH_SI_ALIAS0_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL
-
-union uvh_si_alias0_overlay_config_u {
-    unsigned long	v;
-    struct uvh_si_alias0_overlay_config_s {
-	unsigned long	rsvd_0_23: 24;  /*    */
-	unsigned long	base    :  8;  /* RW */
-	unsigned long	rsvd_32_47: 16;  /*    */
-	unsigned long	m_alias :  5;  /* RW */
-	unsigned long	rsvd_53_62: 10;  /*    */
-	unsigned long	enable  :  1;  /* RW */
-    } s;
-};
-
-/* ========================================================================= */
-/*                       UVH_SI_ALIAS1_OVERLAY_CONFIG                        */
-/* ========================================================================= */
-#define UVH_SI_ALIAS1_OVERLAY_CONFIG 0xc80010UL
-
-#define UVH_SI_ALIAS1_OVERLAY_CONFIG_BASE_SHFT 24
-#define UVH_SI_ALIAS1_OVERLAY_CONFIG_BASE_MASK 0x00000000ff000000UL
-#define UVH_SI_ALIAS1_OVERLAY_CONFIG_M_ALIAS_SHFT 48
-#define UVH_SI_ALIAS1_OVERLAY_CONFIG_M_ALIAS_MASK 0x001f000000000000UL
-#define UVH_SI_ALIAS1_OVERLAY_CONFIG_ENABLE_SHFT 63
-#define UVH_SI_ALIAS1_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL
-
-union uvh_si_alias1_overlay_config_u {
-    unsigned long	v;
-    struct uvh_si_alias1_overlay_config_s {
-	unsigned long	rsvd_0_23: 24;  /*    */
-	unsigned long	base    :  8;  /* RW */
-	unsigned long	rsvd_32_47: 16;  /*    */
-	unsigned long	m_alias :  5;  /* RW */
-	unsigned long	rsvd_53_62: 10;  /*    */
-	unsigned long	enable  :  1;  /* RW */
-    } s;
-};
-
-/* ========================================================================= */
-/*                       UVH_SI_ALIAS2_OVERLAY_CONFIG                        */
-/* ========================================================================= */
-#define UVH_SI_ALIAS2_OVERLAY_CONFIG 0xc80018UL
-
-#define UVH_SI_ALIAS2_OVERLAY_CONFIG_BASE_SHFT 24
-#define UVH_SI_ALIAS2_OVERLAY_CONFIG_BASE_MASK 0x00000000ff000000UL
-#define UVH_SI_ALIAS2_OVERLAY_CONFIG_M_ALIAS_SHFT 48
-#define UVH_SI_ALIAS2_OVERLAY_CONFIG_M_ALIAS_MASK 0x001f000000000000UL
-#define UVH_SI_ALIAS2_OVERLAY_CONFIG_ENABLE_SHFT 63
-#define UVH_SI_ALIAS2_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL
-
-union uvh_si_alias2_overlay_config_u {
-    unsigned long	v;
-    struct uvh_si_alias2_overlay_config_s {
-	unsigned long	rsvd_0_23: 24;  /*    */
-	unsigned long	base    :  8;  /* RW */
-	unsigned long	rsvd_32_47: 16;  /*    */
-	unsigned long	m_alias :  5;  /* RW */
-	unsigned long	rsvd_53_62: 10;  /*    */
-	unsigned long	enable  :  1;  /* RW */
-    } s;
-};
-
-
-#endif /* _ASM_X86_UV_UV_MMRS_H */
+#endif /* __ASM_UV_MMRS_X86_H__ */
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 850657d..3f838d5 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -52,7 +52,6 @@
 #include <asm/mce.h>
 #include <asm/kvm_para.h>
 #include <asm/tsc.h>
-#include <asm/atomic.h>
 
 unsigned int num_processors;
 
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index ed4118d..194539a 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -379,14 +379,14 @@
 #define DEST_SHIFT UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT
 
 static __initdata struct redir_addr redir_addrs[] = {
-	{UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR, UVH_SI_ALIAS0_OVERLAY_CONFIG},
-	{UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR, UVH_SI_ALIAS1_OVERLAY_CONFIG},
-	{UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR, UVH_SI_ALIAS2_OVERLAY_CONFIG},
+	{UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR, UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR},
+	{UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR, UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR},
+	{UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR, UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR},
 };
 
 static __init void get_lowmem_redirect(unsigned long *base, unsigned long *size)
 {
-	union uvh_si_alias0_overlay_config_u alias;
+	union uvh_rh_gam_alias210_overlay_config_2_mmr_u alias;
 	union uvh_rh_gam_alias210_redirect_config_2_mmr_u redirect;
 	int i;
 
@@ -660,7 +660,7 @@
 
 void __init uv_system_init(void)
 {
-	union uvh_si_addr_map_config_u m_n_config;
+	union uvh_rh_gam_config_mmr_u  m_n_config;
 	union uvh_node_id_u node_id;
 	unsigned long gnode_upper, lowmem_redir_base, lowmem_redir_size;
 	int bytes, nid, cpu, lcpu, pnode, blade, i, j, m_val, n_val;
@@ -670,7 +670,7 @@
 
 	map_low_mmrs();
 
-	m_n_config.v = uv_read_local_mmr(UVH_SI_ADDR_MAP_CONFIG);
+	m_n_config.v = uv_read_local_mmr(UVH_RH_GAM_CONFIG_MMR );
 	m_val = m_n_config.s.m_skt;
 	n_val = m_n_config.s.n_skt;
 	mmr_base =
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c
index 46d5844..e421b8c 100644
--- a/arch/x86/kernel/cpu/perf_event_amd.c
+++ b/arch/x86/kernel/cpu/perf_event_amd.c
@@ -280,11 +280,11 @@
 	struct amd_nb *nb;
 	int i;
 
-	nb = kmalloc(sizeof(struct amd_nb), GFP_KERNEL);
+	nb = kmalloc_node(sizeof(struct amd_nb), GFP_KERNEL | __GFP_ZERO,
+			  cpu_to_node(cpu));
 	if (!nb)
 		return NULL;
 
-	memset(nb, 0, sizeof(*nb));
 	nb->nb_id = nb_id;
 
 	/*
diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c
index e1af7c0..ce0cb47 100644
--- a/arch/x86/kernel/microcode_amd.c
+++ b/arch/x86/kernel/microcode_amd.c
@@ -212,7 +212,7 @@
 		return 0;
 	}
 
-	equiv_cpu_table = (struct equiv_cpu_entry *) vmalloc(size);
+	equiv_cpu_table = vmalloc(size);
 	if (!equiv_cpu_table) {
 		pr_err("failed to allocate equivalent CPU table\n");
 		return 0;
diff --git a/arch/x86/kernel/mmconf-fam10h_64.c b/arch/x86/kernel/mmconf-fam10h_64.c
index 7182580..6da143c 100644
--- a/arch/x86/kernel/mmconf-fam10h_64.c
+++ b/arch/x86/kernel/mmconf-fam10h_64.c
@@ -217,13 +217,13 @@
 	wrmsrl(address, val);
 }
 
-static int __devinit set_check_enable_amd_mmconf(const struct dmi_system_id *d)
+static int __init set_check_enable_amd_mmconf(const struct dmi_system_id *d)
 {
         pci_probe |= PCI_CHECK_ENABLE_AMD_MMCONF;
         return 0;
 }
 
-static const struct dmi_system_id __cpuinitconst mmconf_dmi_table[] = {
+static const struct dmi_system_id __initconst mmconf_dmi_table[] = {
         {
                 .callback = set_check_enable_amd_mmconf,
                 .ident = "Sun Microsystems Machine",
@@ -234,7 +234,8 @@
 	{}
 };
 
-void __cpuinit check_enable_amd_mmconf_dmi(void)
+/* Called from a __cpuinit function, but only on the BSP. */
+void __ref check_enable_amd_mmconf_dmi(void)
 {
 	dmi_check_system(mmconf_dmi_table);
 }
diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
index bab3b9e..008b91e 100644
--- a/arch/x86/kernel/pvclock.c
+++ b/arch/x86/kernel/pvclock.c
@@ -41,44 +41,6 @@
 	valid_flags = flags;
 }
 
-/*
- * Scale a 64-bit delta by scaling and multiplying by a 32-bit fraction,
- * yielding a 64-bit result.
- */
-static inline u64 scale_delta(u64 delta, u32 mul_frac, int shift)
-{
-	u64 product;
-#ifdef __i386__
-	u32 tmp1, tmp2;
-#endif
-
-	if (shift < 0)
-		delta >>= -shift;
-	else
-		delta <<= shift;
-
-#ifdef __i386__
-	__asm__ (
-		"mul  %5       ; "
-		"mov  %4,%%eax ; "
-		"mov  %%edx,%4 ; "
-		"mul  %5       ; "
-		"xor  %5,%5    ; "
-		"add  %4,%%eax ; "
-		"adc  %5,%%edx ; "
-		: "=A" (product), "=r" (tmp1), "=r" (tmp2)
-		: "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (mul_frac) );
-#elif defined(__x86_64__)
-	__asm__ (
-		"mul %%rdx ; shrd $32,%%rdx,%%rax"
-		: "=a" (product) : "0" (delta), "d" ((u64)mul_frac) );
-#else
-#error implement me!
-#endif
-
-	return product;
-}
-
 static u64 pvclock_get_nsec_offset(struct pvclock_shadow_time *shadow)
 {
 	u64 delta = native_read_tsc() - shadow->tsc_timestamp;
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 4935848..12cdbb1 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -251,7 +251,7 @@
 	}
 }
 
-static int tlb_cpuhp_notify(struct notifier_block *n,
+static int __cpuinit tlb_cpuhp_notify(struct notifier_block *n,
 		unsigned long action, void *hcpu)
 {
 	switch (action & 0xf) {
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index 117f5b8..d7b5109 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -147,8 +147,10 @@
 		irq = xen_allocate_pirq(v[i], 0, /* not sharable */
 			(type == PCI_CAP_ID_MSIX) ?
 			"pcifront-msi-x" : "pcifront-msi");
-		if (irq < 0)
-			return -1;
+		if (irq < 0) {
+			ret = -1;
+			goto free;
+		}
 
 		ret = set_irq_msi(irq, msidesc);
 		if (ret)
@@ -164,7 +166,7 @@
 	if (ret == -ENODEV)
 		dev_err(&dev->dev, "Xen PCI frontend has not registered" \
 			" MSI/MSI-X support!\n");
-
+free:
 	kfree(v);
 	return ret;
 }
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
index 20ea20a..a318194 100644
--- a/arch/x86/platform/uv/tlb_uv.c
+++ b/arch/x86/platform/uv/tlb_uv.c
@@ -1343,8 +1343,8 @@
 	 * each bau_desc is 64 bytes; there are 8 (UV_ITEMS_PER_DESCRIPTOR)
 	 * per cpu; and up to 32 (UV_ADP_SIZE) cpu's per uvhub
 	 */
-	bau_desc = (struct bau_desc *)kmalloc_node(sizeof(struct bau_desc)*
-		UV_ADP_SIZE*UV_ITEMS_PER_DESCRIPTOR, GFP_KERNEL, node);
+	bau_desc = kmalloc_node(sizeof(struct bau_desc) * UV_ADP_SIZE
+				* UV_ITEMS_PER_DESCRIPTOR, GFP_KERNEL, node);
 	BUG_ON(!bau_desc);
 
 	pa = uv_gpa(bau_desc); /* need the real nasid*/
@@ -1402,9 +1402,9 @@
 	struct bau_payload_queue_entry *pqp_malloc;
 	struct bau_control *bcp;
 
-	pqp = (struct bau_payload_queue_entry *) kmalloc_node(
-		(DEST_Q_SIZE + 1) * sizeof(struct bau_payload_queue_entry),
-		GFP_KERNEL, node);
+	pqp = kmalloc_node((DEST_Q_SIZE + 1)
+			   * sizeof(struct bau_payload_queue_entry),
+			   GFP_KERNEL, node);
 	BUG_ON(!pqp);
 	pqp_malloc = pqp;
 
@@ -1520,8 +1520,7 @@
 
 	timeout_us = calculate_destination_timeout();
 
-	uvhub_descs = (struct uvhub_desc *)
-		kmalloc(nuvhubs * sizeof(struct uvhub_desc), GFP_KERNEL);
+	uvhub_descs = kmalloc(nuvhubs * sizeof(struct uvhub_desc), GFP_KERNEL);
 	memset(uvhub_descs, 0, nuvhubs * sizeof(struct uvhub_desc));
 	uvhub_mask = kzalloc((nuvhubs+7)/8, GFP_KERNEL);
 	for_each_present_cpu(cpu) {
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index c237b81..21ed8d7 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -2126,7 +2126,7 @@
 {
 	pmd_t *kernel_pmd;
 
-	level2_kernel_pgt = extend_brk(sizeof(pmd_t *) * PTRS_PER_PMD, PAGE_SIZE);
+	level2_kernel_pgt = extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
 
 	max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) +
 				  xen_start_info->nr_pt_frames * PAGE_SIZE +
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index b1dbdaa..769c4b0 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -118,16 +118,18 @@
 						     const struct e820map *e820)
 {
 	phys_addr_t max_addr = PFN_PHYS(max_pfn);
-	phys_addr_t last_end = 0;
+	phys_addr_t last_end = ISA_END_ADDRESS;
 	unsigned long released = 0;
 	int i;
 
+	/* Free any unused memory above the low 1Mbyte. */
 	for (i = 0; i < e820->nr_map && last_end < max_addr; i++) {
 		phys_addr_t end = e820->map[i].addr;
 		end = min(max_addr, end);
 
-		released += xen_release_chunk(last_end, end);
-		last_end = e820->map[i].addr + e820->map[i].size;
+		if (last_end < end)
+			released += xen_release_chunk(last_end, end);
+		last_end = max(last_end, e820->map[i].addr + e820->map[i].size);
 	}
 
 	if (last_end < max_addr)
@@ -164,6 +166,7 @@
 		XENMEM_memory_map;
 	rc = HYPERVISOR_memory_op(op, &memmap);
 	if (rc == -ENOSYS) {
+		BUG_ON(xen_initial_domain());
 		memmap.nr_entries = 1;
 		map[0].addr = 0ULL;
 		map[0].size = mem_end;
@@ -201,12 +204,13 @@
 	}
 
 	/*
-	 * Even though this is normal, usable memory under Xen, reserve
-	 * ISA memory anyway because too many things think they can poke
+	 * In domU, the ISA region is normal, usable memory, but we
+	 * reserve ISA memory anyway because too many things poke
 	 * about in there.
 	 *
-	 * In a dom0 kernel, this region is identity mapped with the
-	 * hardware ISA area, so it really is out of bounds.
+	 * In Dom0, the host E820 information can leave gaps in the
+	 * ISA range, which would cause us to release those pages.  To
+	 * avoid this, we unconditionally reserve them here.
 	 */
 	e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS,
 			E820_RESERVED);
diff --git a/block/blk-core.c b/block/blk-core.c
index f0834e2..4ce953f 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1194,13 +1194,6 @@
 	int where = ELEVATOR_INSERT_SORT;
 	int rw_flags;
 
-	/* REQ_HARDBARRIER is no more */
-	if (WARN_ONCE(bio->bi_rw & REQ_HARDBARRIER,
-		"block: HARDBARRIER is deprecated, use FLUSH/FUA instead\n")) {
-		bio_endio(bio, -EOPNOTSUPP);
-		return 0;
-	}
-
 	/*
 	 * low level driver can indicate that it wants pages above a
 	 * certain limit bounced to low memory (ie for highmem, or even
@@ -1351,7 +1344,7 @@
 			bdevname(bio->bi_bdev, b),
 			bio->bi_rw,
 			(unsigned long long)bio->bi_sector + bio_sectors(bio),
-			(long long)(bio->bi_bdev->bd_inode->i_size >> 9));
+			(long long)(i_size_read(bio->bi_bdev->bd_inode) >> 9));
 
 	set_bit(BIO_EOF, &bio->bi_flags);
 }
@@ -1404,7 +1397,7 @@
 		return 0;
 
 	/* Test device or partition size, when known. */
-	maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
+	maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9;
 	if (maxsector) {
 		sector_t sector = bio->bi_sector;
 
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index d22c4c5..3c7a339 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -153,20 +153,6 @@
 }
 EXPORT_SYMBOL(get_io_context);
 
-void copy_io_context(struct io_context **pdst, struct io_context **psrc)
-{
-	struct io_context *src = *psrc;
-	struct io_context *dst = *pdst;
-
-	if (src) {
-		BUG_ON(atomic_long_read(&src->refcount) == 0);
-		atomic_long_inc(&src->refcount);
-		put_io_context(dst);
-		*pdst = src;
-	}
-}
-EXPORT_SYMBOL(copy_io_context);
-
 static int __init blk_ioc_init(void)
 {
 	iocontext_cachep = kmem_cache_create("blkdev_ioc",
diff --git a/block/blk-map.c b/block/blk-map.c
index d4a586d..5d5dbe4 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -205,6 +205,8 @@
 			unaligned = 1;
 			break;
 		}
+		if (!iov[i].iov_len)
+			return -EINVAL;
 	}
 
 	if (unaligned || (q->dma_pad_mask & len) || map_data)
diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
index 119f07b..58c6ee5b 100644
--- a/block/compat_ioctl.c
+++ b/block/compat_ioctl.c
@@ -744,13 +744,13 @@
 		bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE;
 		return 0;
 	case BLKGETSIZE:
-		size = bdev->bd_inode->i_size;
+		size = i_size_read(bdev->bd_inode);
 		if ((size >> 9) > ~0UL)
 			return -EFBIG;
 		return compat_put_ulong(arg, size >> 9);
 
 	case BLKGETSIZE64_32:
-		return compat_put_u64(arg, bdev->bd_inode->i_size);
+		return compat_put_u64(arg, i_size_read(bdev->bd_inode));
 
 	case BLKTRACESETUP32:
 	case BLKTRACESTART: /* compatible */
diff --git a/block/elevator.c b/block/elevator.c
index 282e830..2569512 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -429,7 +429,7 @@
 	q->nr_sorted--;
 
 	boundary = q->end_sector;
-	stop_flags = REQ_SOFTBARRIER | REQ_HARDBARRIER | REQ_STARTED;
+	stop_flags = REQ_SOFTBARRIER | REQ_STARTED;
 	list_for_each_prev(entry, &q->queue_head) {
 		struct request *pos = list_entry_rq(entry);
 
@@ -691,7 +691,7 @@
 void __elv_add_request(struct request_queue *q, struct request *rq, int where,
 		       int plug)
 {
-	if (rq->cmd_flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) {
+	if (rq->cmd_flags & REQ_SOFTBARRIER) {
 		/* barriers are scheduling boundary, update end_sector */
 		if (rq->cmd_type == REQ_TYPE_FS ||
 		    (rq->cmd_flags & REQ_DISCARD)) {
diff --git a/block/ioctl.c b/block/ioctl.c
index d724ceb..3d866d0 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -125,7 +125,7 @@
 	start >>= 9;
 	len >>= 9;
 
-	if (start + len > (bdev->bd_inode->i_size >> 9))
+	if (start + len > (i_size_read(bdev->bd_inode) >> 9))
 		return -EINVAL;
 	if (secure)
 		flags |= BLKDEV_DISCARD_SECURE;
@@ -242,6 +242,7 @@
 		 * We need to set the startsect first, the driver may
 		 * want to override it.
 		 */
+		memset(&geo, 0, sizeof(geo));
 		geo.start = get_start_sect(bdev);
 		ret = disk->fops->getgeo(bdev, &geo);
 		if (ret)
@@ -307,12 +308,12 @@
 		ret = blkdev_reread_part(bdev);
 		break;
 	case BLKGETSIZE:
-		size = bdev->bd_inode->i_size;
+		size = i_size_read(bdev->bd_inode);
 		if ((size >> 9) > ~0UL)
 			return -EFBIG;
 		return put_ulong(arg, size >> 9);
 	case BLKGETSIZE64:
-		return put_u64(arg, bdev->bd_inode->i_size);
+		return put_u64(arg, i_size_read(bdev->bd_inode));
 	case BLKTRACESTART:
 	case BLKTRACESTOP:
 	case BLKTRACESETUP:
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index a8b5a10..4f4230b 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -321,33 +321,47 @@
 	if (hdr->iovec_count) {
 		const int size = sizeof(struct sg_iovec) * hdr->iovec_count;
 		size_t iov_data_len;
-		struct sg_iovec *iov;
+		struct sg_iovec *sg_iov;
+		struct iovec *iov;
+		int i;
 
-		iov = kmalloc(size, GFP_KERNEL);
-		if (!iov) {
+		sg_iov = kmalloc(size, GFP_KERNEL);
+		if (!sg_iov) {
 			ret = -ENOMEM;
 			goto out;
 		}
 
-		if (copy_from_user(iov, hdr->dxferp, size)) {
-			kfree(iov);
+		if (copy_from_user(sg_iov, hdr->dxferp, size)) {
+			kfree(sg_iov);
 			ret = -EFAULT;
 			goto out;
 		}
 
+		/*
+		 * Sum up the vecs, making sure they don't overflow
+		 */
+		iov = (struct iovec *) sg_iov;
+		iov_data_len = 0;
+		for (i = 0; i < hdr->iovec_count; i++) {
+			if (iov_data_len + iov[i].iov_len < iov_data_len) {
+				kfree(sg_iov);
+				ret = -EINVAL;
+				goto out;
+			}
+			iov_data_len += iov[i].iov_len;
+		}
+
 		/* SG_IO howto says that the shorter of the two wins */
-		iov_data_len = iov_length((struct iovec *)iov,
-					  hdr->iovec_count);
 		if (hdr->dxfer_len < iov_data_len) {
-			hdr->iovec_count = iov_shorten((struct iovec *)iov,
+			hdr->iovec_count = iov_shorten(iov,
 						       hdr->iovec_count,
 						       hdr->dxfer_len);
 			iov_data_len = hdr->dxfer_len;
 		}
 
-		ret = blk_rq_map_user_iov(q, rq, NULL, iov, hdr->iovec_count,
+		ret = blk_rq_map_user_iov(q, rq, NULL, sg_iov, hdr->iovec_count,
 					  iov_data_len, GFP_KERNEL);
-		kfree(iov);
+		kfree(sg_iov);
 	} else if (hdr->dxfer_len)
 		ret = blk_rq_map_user(q, rq, NULL, hdr->dxferp, hdr->dxfer_len,
 				      GFP_KERNEL);
diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
index de30782..75586f1 100644
--- a/crypto/pcrypt.c
+++ b/crypto/pcrypt.c
@@ -504,7 +504,6 @@
 
 static void pcrypt_fini_padata(struct padata_pcrypt *pcrypt)
 {
-	kobject_put(&pcrypt->pinst->kobj);
 	free_cpumask_var(pcrypt->cb_cpumask->mask);
 	kfree(pcrypt->cb_cpumask);
 
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index 541e188..528f631 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -180,9 +180,6 @@
 		BUG();
 		bio_endio(bio, -ENXIO);
 		return 0;
-	} else if (bio->bi_rw & REQ_HARDBARRIER) {
-		bio_endio(bio, -EOPNOTSUPP);
-		return 0;
 	} else if (bio->bi_io_vec == NULL) {
 		printk(KERN_ERR "aoe: bi_io_vec is NULL\n");
 		BUG();
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 2cc4dda..a67d0a6 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -113,6 +113,8 @@
 	{0x409D0E11, "Smart Array 6400 EM", &SA5_access},
 	{0x40910E11, "Smart Array 6i", &SA5_access},
 	{0x3225103C, "Smart Array P600", &SA5_access},
+	{0x3223103C, "Smart Array P800", &SA5_access},
+	{0x3234103C, "Smart Array P400", &SA5_access},
 	{0x3235103C, "Smart Array P400i", &SA5_access},
 	{0x3211103C, "Smart Array E200i", &SA5_access},
 	{0x3212103C, "Smart Array E200", &SA5_access},
@@ -3753,7 +3755,7 @@
 	for (i = 0; i < MAX_CONFIG_WAIT; i++) {
 		if (!(readl(h->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
 			break;
-		msleep(10);
+		usleep_range(10000, 20000);
 	}
 }
 
@@ -3937,10 +3939,9 @@
 	*board_id = ((subsystem_device_id << 16) & 0xffff0000) |
 			subsystem_vendor_id;
 
-	for (i = 0; i < ARRAY_SIZE(products); i++) {
+	for (i = 0; i < ARRAY_SIZE(products); i++)
 		if (*board_id == products[i].board_id)
 			return i;
-	}
 	dev_warn(&pdev->dev, "unrecognized board ID: 0x%08x, ignoring.\n",
 		*board_id);
 	return -ENODEV;
@@ -3971,18 +3972,31 @@
 	return -ENODEV;
 }
 
-static int __devinit cciss_wait_for_board_ready(ctlr_info_t *h)
+static int __devinit cciss_wait_for_board_state(struct pci_dev *pdev,
+	void __iomem *vaddr, int wait_for_ready)
+#define BOARD_READY 1
+#define BOARD_NOT_READY 0
 {
-	int i;
+	int i, iterations;
 	u32 scratchpad;
 
-	for (i = 0; i < CCISS_BOARD_READY_ITERATIONS; i++) {
-		scratchpad = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
-		if (scratchpad == CCISS_FIRMWARE_READY)
-			return 0;
+	if (wait_for_ready)
+		iterations = CCISS_BOARD_READY_ITERATIONS;
+	else
+		iterations = CCISS_BOARD_NOT_READY_ITERATIONS;
+
+	for (i = 0; i < iterations; i++) {
+		scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET);
+		if (wait_for_ready) {
+			if (scratchpad == CCISS_FIRMWARE_READY)
+				return 0;
+		} else {
+			if (scratchpad != CCISS_FIRMWARE_READY)
+				return 0;
+		}
 		msleep(CCISS_BOARD_READY_POLL_INTERVAL_MSECS);
 	}
-	dev_warn(&h->pdev->dev, "board not ready, timed out.\n");
+	dev_warn(&pdev->dev, "board not ready, timed out.\n");
 	return -ENODEV;
 }
 
@@ -4031,6 +4045,11 @@
 static void __devinit cciss_get_max_perf_mode_cmds(struct ctlr_info *h)
 {
 	h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
+
+	/* Limit commands in memory limited kdump scenario. */
+	if (reset_devices && h->max_commands > 32)
+		h->max_commands = 32;
+
 	if (h->max_commands < 16) {
 		dev_warn(&h->pdev->dev, "Controller reports "
 			"max supported commands of %d, an obvious lie. "
@@ -4148,7 +4167,7 @@
 		err = -ENOMEM;
 		goto err_out_free_res;
 	}
-	err = cciss_wait_for_board_ready(h);
+	err = cciss_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
 	if (err)
 		goto err_out_free_res;
 	err = cciss_find_cfgtables(h);
@@ -4313,36 +4332,6 @@
 #define cciss_soft_reset_controller(p) cciss_message(p, 1, 0)
 #define cciss_noop(p) cciss_message(p, 3, 0)
 
-static __devinit int cciss_reset_msi(struct pci_dev *pdev)
-{
-/* the #defines are stolen from drivers/pci/msi.h. */
-#define msi_control_reg(base)		(base + PCI_MSI_FLAGS)
-#define PCI_MSIX_FLAGS_ENABLE		(1 << 15)
-
-	int pos;
-	u16 control = 0;
-
-	pos = pci_find_capability(pdev, PCI_CAP_ID_MSI);
-	if (pos) {
-		pci_read_config_word(pdev, msi_control_reg(pos), &control);
-		if (control & PCI_MSI_FLAGS_ENABLE) {
-			dev_info(&pdev->dev, "resetting MSI\n");
-			pci_write_config_word(pdev, msi_control_reg(pos), control & ~PCI_MSI_FLAGS_ENABLE);
-		}
-	}
-
-	pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
-	if (pos) {
-		pci_read_config_word(pdev, msi_control_reg(pos), &control);
-		if (control & PCI_MSIX_FLAGS_ENABLE) {
-			dev_info(&pdev->dev, "resetting MSI-X\n");
-			pci_write_config_word(pdev, msi_control_reg(pos), control & ~PCI_MSIX_FLAGS_ENABLE);
-		}
-	}
-
-	return 0;
-}
-
 static int cciss_controller_hard_reset(struct pci_dev *pdev,
 	void * __iomem vaddr, bool use_doorbell)
 {
@@ -4397,17 +4386,17 @@
  * states or using the doorbell register. */
 static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev)
 {
-	u16 saved_config_space[32];
 	u64 cfg_offset;
 	u32 cfg_base_addr;
 	u64 cfg_base_addr_index;
 	void __iomem *vaddr;
 	unsigned long paddr;
 	u32 misc_fw_support, active_transport;
-	int rc, i;
+	int rc;
 	CfgTable_struct __iomem *cfgtable;
 	bool use_doorbell;
 	u32 board_id;
+	u16 command_register;
 
 	/* For controllers as old a the p600, this is very nearly
 	 * the same thing as
@@ -4417,14 +4406,6 @@
 	 * pci_set_power_state(pci_dev, PCI_D0);
 	 * pci_restore_state(pci_dev);
 	 *
-	 * but we can't use these nice canned kernel routines on
-	 * kexec, because they also check the MSI/MSI-X state in PCI
-	 * configuration space and do the wrong thing when it is
-	 * set/cleared.  Also, the pci_save/restore_state functions
-	 * violate the ordering requirements for restoring the
-	 * configuration space from the CCISS document (see the
-	 * comment below).  So we roll our own ....
-	 *
 	 * For controllers newer than the P600, the pci power state
 	 * method of resetting doesn't work so we have another way
 	 * using the doorbell register.
@@ -4443,8 +4424,13 @@
 		return -ENODEV;
 	}
 
-	for (i = 0; i < 32; i++)
-		pci_read_config_word(pdev, 2*i, &saved_config_space[i]);
+	/* Save the PCI command register */
+	pci_read_config_word(pdev, 4, &command_register);
+	/* Turn the board off.  This is so that later pci_restore_state()
+	 * won't turn the board on before the rest of config space is ready.
+	 */
+	pci_disable_device(pdev);
+	pci_save_state(pdev);
 
 	/* find the first memory BAR, so we can find the cfg table */
 	rc = cciss_pci_find_memory_BAR(pdev, &paddr);
@@ -4479,26 +4465,32 @@
 	rc = cciss_controller_hard_reset(pdev, vaddr, use_doorbell);
 	if (rc)
 		goto unmap_cfgtable;
-
-	/* Restore the PCI configuration space.  The Open CISS
-	 * Specification says, "Restore the PCI Configuration
-	 * Registers, offsets 00h through 60h. It is important to
-	 * restore the command register, 16-bits at offset 04h,
-	 * last. Do not restore the configuration status register,
-	 * 16-bits at offset 06h."  Note that the offset is 2*i.
-	 */
-	for (i = 0; i < 32; i++) {
-		if (i == 2 || i == 3)
-			continue;
-		pci_write_config_word(pdev, 2*i, saved_config_space[i]);
+	pci_restore_state(pdev);
+	rc = pci_enable_device(pdev);
+	if (rc) {
+		dev_warn(&pdev->dev, "failed to enable device.\n");
+		goto unmap_cfgtable;
 	}
-	wmb();
-	pci_write_config_word(pdev, 4, saved_config_space[2]);
+	pci_write_config_word(pdev, 4, command_register);
 
 	/* Some devices (notably the HP Smart Array 5i Controller)
 	   need a little pause here */
 	msleep(CCISS_POST_RESET_PAUSE_MSECS);
 
+	/* Wait for board to become not ready, then ready. */
+	dev_info(&pdev->dev, "Waiting for board to become ready.\n");
+	rc = cciss_wait_for_board_state(pdev, vaddr, BOARD_NOT_READY);
+	if (rc) /* Don't bail, might be E500, etc. which can't be reset */
+		dev_warn(&pdev->dev,
+			"failed waiting for board to become not ready\n");
+	rc = cciss_wait_for_board_state(pdev, vaddr, BOARD_READY);
+	if (rc) {
+		dev_warn(&pdev->dev,
+			"failed waiting for board to become ready\n");
+		goto unmap_cfgtable;
+	}
+	dev_info(&pdev->dev, "board ready.\n");
+
 	/* Controller should be in simple mode at this point.  If it's not,
 	 * It means we're on one of those controllers which doesn't support
 	 * the doorbell reset method and on which the PCI power management reset
@@ -4539,8 +4531,6 @@
 		return 0; /* just try to do the kdump anyhow. */
 	if (rc)
 		return -ENODEV;
-	if (cciss_reset_msi(pdev))
-		return -ENODEV;
 
 	/* Now try to get the controller to respond to a no-op */
 	for (i = 0; i < CCISS_POST_RESET_NOOP_RETRIES; i++) {
@@ -4936,7 +4926,8 @@
 		}
 	}
 	kthread_stop(cciss_scan_thread);
-	remove_proc_entry("driver/cciss", NULL);
+	if (proc_cciss)
+		remove_proc_entry("driver/cciss", NULL);
 	bus_unregister(&cciss_bus_type);
 }
 
diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
index ae340ff..4b8933d 100644
--- a/drivers/block/cciss.h
+++ b/drivers/block/cciss.h
@@ -200,10 +200,14 @@
  * the above.
  */
 #define CCISS_BOARD_READY_WAIT_SECS (120)
+#define CCISS_BOARD_NOT_READY_WAIT_SECS (10)
 #define CCISS_BOARD_READY_POLL_INTERVAL_MSECS (100)
 #define CCISS_BOARD_READY_ITERATIONS \
 	((CCISS_BOARD_READY_WAIT_SECS * 1000) / \
 		CCISS_BOARD_READY_POLL_INTERVAL_MSECS)
+#define CCISS_BOARD_NOT_READY_ITERATIONS \
+	((CCISS_BOARD_NOT_READY_WAIT_SECS * 1000) / \
+		CCISS_BOARD_READY_POLL_INTERVAL_MSECS)
 #define CCISS_POST_RESET_PAUSE_MSECS (3000)
 #define CCISS_POST_RESET_NOOP_INTERVAL_MSECS (1000)
 #define CCISS_POST_RESET_NOOP_RETRIES (12)
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index ac04ef9..ba95cba 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -78,11 +78,10 @@
 	init_completion(&md_io.event);
 	md_io.error = 0;
 
-	if ((rw & WRITE) && !test_bit(MD_NO_BARRIER, &mdev->flags))
-		rw |= REQ_HARDBARRIER;
+	if ((rw & WRITE) && !test_bit(MD_NO_FUA, &mdev->flags))
+		rw |= REQ_FUA;
 	rw |= REQ_UNPLUG | REQ_SYNC;
 
- retry:
 	bio = bio_alloc(GFP_NOIO, 1);
 	bio->bi_bdev = bdev->md_bdev;
 	bio->bi_sector = sector;
@@ -100,17 +99,6 @@
 	wait_for_completion(&md_io.event);
 	ok = bio_flagged(bio, BIO_UPTODATE) && md_io.error == 0;
 
-	/* check for unsupported barrier op.
-	 * would rather check on EOPNOTSUPP, but that is not reliable.
-	 * don't try again for ANY return value != 0 */
-	if (unlikely((bio->bi_rw & REQ_HARDBARRIER) && !ok)) {
-		/* Try again with no barrier */
-		dev_warn(DEV, "Barriers not supported on meta data device - disabling\n");
-		set_bit(MD_NO_BARRIER, &mdev->flags);
-		rw &= ~REQ_HARDBARRIER;
-		bio_put(bio);
-		goto retry;
-	}
  out:
 	bio_put(bio);
 	return ok;
@@ -284,18 +272,32 @@
 	u32 xor_sum = 0;
 
 	if (!get_ldev(mdev)) {
-		dev_err(DEV, "get_ldev() failed in w_al_write_transaction\n");
+		dev_err(DEV,
+			"disk is %s, cannot start al transaction (-%d +%d)\n",
+			drbd_disk_str(mdev->state.disk), evicted, new_enr);
 		complete(&((struct update_al_work *)w)->event);
 		return 1;
 	}
 	/* do we have to do a bitmap write, first?
 	 * TODO reduce maximum latency:
 	 * submit both bios, then wait for both,
-	 * instead of doing two synchronous sector writes. */
+	 * instead of doing two synchronous sector writes.
+	 * For now, we must not write the transaction,
+	 * if we cannot write out the bitmap of the evicted extent. */
 	if (mdev->state.conn < C_CONNECTED && evicted != LC_FREE)
 		drbd_bm_write_sect(mdev, evicted/AL_EXT_PER_BM_SECT);
 
-	mutex_lock(&mdev->md_io_mutex); /* protects md_io_page, al_tr_cycle, ... */
+	/* The bitmap write may have failed, causing a state change. */
+	if (mdev->state.disk < D_INCONSISTENT) {
+		dev_err(DEV,
+			"disk is %s, cannot write al transaction (-%d +%d)\n",
+			drbd_disk_str(mdev->state.disk), evicted, new_enr);
+		complete(&((struct update_al_work *)w)->event);
+		put_ldev(mdev);
+		return 1;
+	}
+
+	mutex_lock(&mdev->md_io_mutex); /* protects md_io_buffer, al_tr_cycle, ... */
 	buffer = (struct al_transaction *)page_address(mdev->md_io_page);
 
 	buffer->magic = __constant_cpu_to_be32(DRBD_MAGIC);
@@ -739,7 +741,7 @@
 	unsigned int enr;
 	unsigned long add = 0;
 	char ppb[10];
-	int i;
+	int i, tmp;
 
 	wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
 
@@ -747,7 +749,9 @@
 		enr = lc_element_by_index(mdev->act_log, i)->lc_number;
 		if (enr == LC_FREE)
 			continue;
-		add += drbd_bm_ALe_set_all(mdev, enr);
+		tmp = drbd_bm_ALe_set_all(mdev, enr);
+		dynamic_dev_dbg(DEV, "AL: set %d bits in extent %u\n", tmp, enr);
+		add += tmp;
 	}
 
 	lc_unlock(mdev->act_log);
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 9bdcf43..1ea1a34 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -114,11 +114,11 @@
 #define D_ASSERT(exp)	if (!(exp)) \
 	 dev_err(DEV, "ASSERT( " #exp " ) in %s:%d\n", __FILE__, __LINE__)
 
-#define ERR_IF(exp) if (({				\
-	int _b = (exp) != 0;				\
-	if (_b) dev_err(DEV, "%s: (%s) in %s:%d\n",	\
-		__func__, #exp, __FILE__, __LINE__);	\
-	 _b;						\
+#define ERR_IF(exp) if (({						\
+	int _b = (exp) != 0;						\
+	if (_b) dev_err(DEV, "ASSERT FAILED: %s: (%s) in %s:%d\n",	\
+			__func__, #exp, __FILE__, __LINE__);		\
+	_b;								\
 	}))
 
 /* Defines to control fault insertion */
@@ -749,17 +749,12 @@
 
 /* drbd_epoch flag bits */
 enum {
-	DE_BARRIER_IN_NEXT_EPOCH_ISSUED,
-	DE_BARRIER_IN_NEXT_EPOCH_DONE,
-	DE_CONTAINS_A_BARRIER,
 	DE_HAVE_BARRIER_NUMBER,
-	DE_IS_FINISHING,
 };
 
 enum epoch_event {
 	EV_PUT,
 	EV_GOT_BARRIER_NR,
-	EV_BARRIER_DONE,
 	EV_BECAME_LAST,
 	EV_CLEANUP = 32, /* used as flag */
 };
@@ -801,11 +796,6 @@
 	__EE_CALL_AL_COMPLETE_IO,
 	__EE_MAY_SET_IN_SYNC,
 
-	/* This epoch entry closes an epoch using a barrier.
-	 * On sucessful completion, the epoch is released,
-	 * and the P_BARRIER_ACK send. */
-	__EE_IS_BARRIER,
-
 	/* In case a barrier failed,
 	 * we need to resubmit without the barrier flag. */
 	__EE_RESUBMITTED,
@@ -820,7 +810,6 @@
 };
 #define EE_CALL_AL_COMPLETE_IO (1<<__EE_CALL_AL_COMPLETE_IO)
 #define EE_MAY_SET_IN_SYNC     (1<<__EE_MAY_SET_IN_SYNC)
-#define EE_IS_BARRIER          (1<<__EE_IS_BARRIER)
 #define	EE_RESUBMITTED         (1<<__EE_RESUBMITTED)
 #define EE_WAS_ERROR           (1<<__EE_WAS_ERROR)
 #define EE_HAS_DIGEST          (1<<__EE_HAS_DIGEST)
@@ -843,16 +832,15 @@
 				 * Gets cleared when the state.conn
 				 * goes into C_CONNECTED state. */
 	WRITE_BM_AFTER_RESYNC,	/* A kmalloc() during resync failed */
-	NO_BARRIER_SUPP,	/* underlying block device doesn't implement barriers */
 	CONSIDER_RESYNC,
 
-	MD_NO_BARRIER,		/* meta data device does not support barriers,
-				   so don't even try */
+	MD_NO_FUA,		/* Users wants us to not use FUA/FLUSH on meta data dev */
 	SUSPEND_IO,		/* suspend application io */
 	BITMAP_IO,		/* suspend application io;
 				   once no more io in flight, start bitmap io */
 	BITMAP_IO_QUEUED,       /* Started bitmap IO */
-	GO_DISKLESS,		/* Disk failed, local_cnt reached zero, we are going diskless */
+	GO_DISKLESS,		/* Disk is being detached, on io-error or admin request. */
+	WAS_IO_ERROR,		/* Local disk failed returned IO error */
 	RESYNC_AFTER_NEG,       /* Resync after online grow after the attach&negotiate finished. */
 	NET_CONGESTED,		/* The data socket is congested */
 
@@ -947,7 +935,6 @@
 	WO_none,
 	WO_drain_io,
 	WO_bdev_flush,
-	WO_bio_barrier
 };
 
 struct fifo_buffer {
@@ -1281,6 +1268,7 @@
 extern int drbd_bmio_clear_n_write(struct drbd_conf *mdev);
 extern int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *), char *why);
 extern void drbd_go_diskless(struct drbd_conf *mdev);
+extern void drbd_ldev_destroy(struct drbd_conf *mdev);
 
 
 /* Meta data layout
@@ -1798,17 +1786,17 @@
 	case EP_PASS_ON:
 		if (!forcedetach) {
 			if (__ratelimit(&drbd_ratelimit_state))
-				dev_err(DEV, "Local IO failed in %s."
-					     "Passing error on...\n", where);
+				dev_err(DEV, "Local IO failed in %s.\n", where);
 			break;
 		}
 		/* NOTE fall through to detach case if forcedetach set */
 	case EP_DETACH:
 	case EP_CALL_HELPER:
+		set_bit(WAS_IO_ERROR, &mdev->flags);
 		if (mdev->state.disk > D_FAILED) {
 			_drbd_set_state(_NS(mdev, disk, D_FAILED), CS_HARD, NULL);
-			dev_err(DEV, "Local IO failed in %s."
-				     "Detaching...\n", where);
+			dev_err(DEV,
+				"Local IO failed in %s. Detaching...\n", where);
 		}
 		break;
 	}
@@ -1874,7 +1862,7 @@
 static inline sector_t drbd_get_capacity(struct block_device *bdev)
 {
 	/* return bdev ? get_capacity(bdev->bd_disk) : 0; */
-	return bdev ? bdev->bd_inode->i_size >> 9 : 0;
+	return bdev ? i_size_read(bdev->bd_inode) >> 9 : 0;
 }
 
 /**
@@ -2127,7 +2115,11 @@
 	__release(local);
 	D_ASSERT(i >= 0);
 	if (i == 0) {
+		if (mdev->state.disk == D_DISKLESS)
+			/* even internal references gone, safe to destroy */
+			drbd_ldev_destroy(mdev);
 		if (mdev->state.disk == D_FAILED)
+			/* all application IO references gone. */
 			drbd_go_diskless(mdev);
 		wake_up(&mdev->misc_wait);
 	}
@@ -2138,6 +2130,10 @@
 {
 	int io_allowed;
 
+	/* never get a reference while D_DISKLESS */
+	if (mdev->state.disk == D_DISKLESS)
+		return 0;
+
 	atomic_inc(&mdev->local_cnt);
 	io_allowed = (mdev->state.disk >= mins);
 	if (!io_allowed)
@@ -2406,12 +2402,12 @@
 {
 	int r;
 
-	if (test_bit(MD_NO_BARRIER, &mdev->flags))
+	if (test_bit(MD_NO_FUA, &mdev->flags))
 		return;
 
 	r = blkdev_issue_flush(mdev->ldev->md_bdev, GFP_KERNEL, NULL);
 	if (r) {
-		set_bit(MD_NO_BARRIER, &mdev->flags);
+		set_bit(MD_NO_FUA, &mdev->flags);
 		dev_err(DEV, "meta data flush failed with status %d, disabling md-flushes\n", r);
 	}
 }
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 25c7a73..6be5401 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -835,6 +835,15 @@
 	    ns.conn != C_UNCONNECTED && ns.conn != C_DISCONNECTING && ns.conn <= C_TEAR_DOWN)
 		ns.conn = os.conn;
 
+	/* we cannot fail (again) if we already detached */
+	if (ns.disk == D_FAILED && os.disk == D_DISKLESS)
+		ns.disk = D_DISKLESS;
+
+	/* if we are only D_ATTACHING yet,
+	 * we can (and should) go directly to D_DISKLESS. */
+	if (ns.disk == D_FAILED && os.disk == D_ATTACHING)
+		ns.disk = D_DISKLESS;
+
 	/* After C_DISCONNECTING only C_STANDALONE may follow */
 	if (os.conn == C_DISCONNECTING && ns.conn != C_STANDALONE)
 		ns.conn = os.conn;
@@ -1056,7 +1065,15 @@
 	    !test_and_set_bit(CONFIG_PENDING, &mdev->flags))
 		set_bit(DEVICE_DYING, &mdev->flags);
 
-	mdev->state.i = ns.i;
+	/* if we are going -> D_FAILED or D_DISKLESS, grab one extra reference
+	 * on the ldev here, to be sure the transition -> D_DISKLESS resp.
+	 * drbd_ldev_destroy() won't happen before our corresponding
+	 * after_state_ch works run, where we put_ldev again. */
+	if ((os.disk != D_FAILED && ns.disk == D_FAILED) ||
+	    (os.disk != D_DISKLESS && ns.disk == D_DISKLESS))
+		atomic_inc(&mdev->local_cnt);
+
+	mdev->state = ns;
 	wake_up(&mdev->misc_wait);
 	wake_up(&mdev->state_wait);
 
@@ -1268,7 +1285,6 @@
 			if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
 				drbd_uuid_new_current(mdev);
 				clear_bit(NEW_CUR_UUID, &mdev->flags);
-				drbd_md_sync(mdev);
 			}
 			spin_lock_irq(&mdev->req_lock);
 			_drbd_set_state(_NS(mdev, susp_fen, 0), CS_VERBOSE, NULL);
@@ -1365,63 +1381,64 @@
 	    os.disk > D_INCONSISTENT && ns.disk == D_INCONSISTENT)
 		drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, NULL, "set_n_write from invalidate");
 
-	/* first half of local IO error */
-	if (os.disk > D_FAILED && ns.disk == D_FAILED) {
-		enum drbd_io_error_p eh = EP_PASS_ON;
+	/* first half of local IO error, failure to attach,
+	 * or administrative detach */
+	if (os.disk != D_FAILED && ns.disk == D_FAILED) {
+		enum drbd_io_error_p eh;
+		int was_io_error;
+		/* corresponding get_ldev was in __drbd_set_state, to serialize
+		 * our cleanup here with the transition to D_DISKLESS,
+		 * so it is safe to dreference ldev here. */
+		eh = mdev->ldev->dc.on_io_error;
+		was_io_error = test_and_clear_bit(WAS_IO_ERROR, &mdev->flags);
+
+		/* current state still has to be D_FAILED,
+		 * there is only one way out: to D_DISKLESS,
+		 * and that may only happen after our put_ldev below. */
+		if (mdev->state.disk != D_FAILED)
+			dev_err(DEV,
+				"ASSERT FAILED: disk is %s during detach\n",
+				drbd_disk_str(mdev->state.disk));
 
 		if (drbd_send_state(mdev))
-			dev_warn(DEV, "Notified peer that my disk is broken.\n");
+			dev_warn(DEV, "Notified peer that I am detaching my disk\n");
 		else
-			dev_err(DEV, "Sending state for drbd_io_error() failed\n");
+			dev_err(DEV, "Sending state for detaching disk failed\n");
 
 		drbd_rs_cancel_all(mdev);
 
-		if (get_ldev_if_state(mdev, D_FAILED)) {
-			eh = mdev->ldev->dc.on_io_error;
-			put_ldev(mdev);
-		}
-		if (eh == EP_CALL_HELPER)
+		/* In case we want to get something to stable storage still,
+		 * this may be the last chance.
+		 * Following put_ldev may transition to D_DISKLESS. */
+		drbd_md_sync(mdev);
+		put_ldev(mdev);
+
+		if (was_io_error && eh == EP_CALL_HELPER)
 			drbd_khelper(mdev, "local-io-error");
 	}
 
+        /* second half of local IO error, failure to attach,
+         * or administrative detach,
+         * after local_cnt references have reached zero again */
+        if (os.disk != D_DISKLESS && ns.disk == D_DISKLESS) {
+                /* We must still be diskless,
+                 * re-attach has to be serialized with this! */
+                if (mdev->state.disk != D_DISKLESS)
+                        dev_err(DEV,
+                                "ASSERT FAILED: disk is %s while going diskless\n",
+                                drbd_disk_str(mdev->state.disk));
 
-	/* second half of local IO error handling,
-	 * after local_cnt references have reached zero: */
-	if (os.disk == D_FAILED && ns.disk == D_DISKLESS) {
-		mdev->rs_total = 0;
-		mdev->rs_failed = 0;
-		atomic_set(&mdev->rs_pending_cnt, 0);
-	}
+                mdev->rs_total = 0;
+                mdev->rs_failed = 0;
+                atomic_set(&mdev->rs_pending_cnt, 0);
 
-	if (os.disk > D_DISKLESS && ns.disk == D_DISKLESS) {
-		/* We must still be diskless,
-		 * re-attach has to be serialized with this! */
-		if (mdev->state.disk != D_DISKLESS)
-			dev_err(DEV,
-				"ASSERT FAILED: disk is %s while going diskless\n",
-				drbd_disk_str(mdev->state.disk));
-
-		/* we cannot assert local_cnt == 0 here, as get_ldev_if_state
-		 * will inc/dec it frequently. Since we became D_DISKLESS, no
-		 * one has touched the protected members anymore, though, so we
-		 * are safe to free them here. */
 		if (drbd_send_state(mdev))
-			dev_warn(DEV, "Notified peer that I detached my disk.\n");
+			dev_warn(DEV, "Notified peer that I'm now diskless.\n");
 		else
-			dev_err(DEV, "Sending state for detach failed\n");
-
-		lc_destroy(mdev->resync);
-		mdev->resync = NULL;
-		lc_destroy(mdev->act_log);
-		mdev->act_log = NULL;
-		__no_warn(local,
-			drbd_free_bc(mdev->ldev);
-			mdev->ldev = NULL;);
-
-		if (mdev->md_io_tmpp) {
-			__free_page(mdev->md_io_tmpp);
-			mdev->md_io_tmpp = NULL;
-		}
+			dev_err(DEV, "Sending state for being diskless failed\n");
+		/* corresponding get_ldev in __drbd_set_state
+		 * this may finaly trigger drbd_ldev_destroy. */
+		put_ldev(mdev);
 	}
 
 	/* Disks got bigger while they were detached */
@@ -2772,11 +2789,6 @@
 
 	drbd_set_defaults(mdev);
 
-	/* for now, we do NOT yet support it,
-	 * even though we start some framework
-	 * to eventually support barriers */
-	set_bit(NO_BARRIER_SUPP, &mdev->flags);
-
 	atomic_set(&mdev->ap_bio_cnt, 0);
 	atomic_set(&mdev->ap_pending_cnt, 0);
 	atomic_set(&mdev->rs_pending_cnt, 0);
@@ -2842,7 +2854,7 @@
 	drbd_thread_init(mdev, &mdev->asender, drbd_asender);
 
 	mdev->agreed_pro_version = PRO_VERSION_MAX;
-	mdev->write_ordering = WO_bio_barrier;
+	mdev->write_ordering = WO_bdev_flush;
 	mdev->resync_wenr = LC_FREE;
 }
 
@@ -2899,7 +2911,6 @@
 	D_ASSERT(list_empty(&mdev->resync_work.list));
 	D_ASSERT(list_empty(&mdev->unplug_work.list));
 	D_ASSERT(list_empty(&mdev->go_diskless.list));
-
 }
 
 
@@ -3660,6 +3671,8 @@
 
 	get_random_bytes(&val, sizeof(u64));
 	_drbd_uuid_set(mdev, UI_CURRENT, val);
+	/* get it to stable storage _now_ */
+	drbd_md_sync(mdev);
 }
 
 void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local)
@@ -3756,19 +3769,31 @@
 	return 1;
 }
 
+void drbd_ldev_destroy(struct drbd_conf *mdev)
+{
+	lc_destroy(mdev->resync);
+	mdev->resync = NULL;
+	lc_destroy(mdev->act_log);
+	mdev->act_log = NULL;
+	__no_warn(local,
+		drbd_free_bc(mdev->ldev);
+		mdev->ldev = NULL;);
+
+	if (mdev->md_io_tmpp) {
+		__free_page(mdev->md_io_tmpp);
+		mdev->md_io_tmpp = NULL;
+	}
+	clear_bit(GO_DISKLESS, &mdev->flags);
+}
+
 static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused)
 {
 	D_ASSERT(mdev->state.disk == D_FAILED);
 	/* we cannot assert local_cnt == 0 here, as get_ldev_if_state will
 	 * inc/dec it frequently. Once we are D_DISKLESS, no one will touch
-	 * the protected members anymore, though, so in the after_state_ch work
-	 * it will be safe to free them. */
+	 * the protected members anymore, though, so once put_ldev reaches zero
+	 * again, it will be safe to free them. */
 	drbd_force_state(mdev, NS(disk, D_DISKLESS));
-	/* We need to wait for return of references checked out while we still
-	 * have been D_FAILED, though (drbd_md_sync, bitmap io). */
-	wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
-
-	clear_bit(GO_DISKLESS, &mdev->flags);
 	return 1;
 }
 
@@ -3777,9 +3802,6 @@
 	D_ASSERT(mdev->state.disk == D_FAILED);
 	if (!test_and_set_bit(GO_DISKLESS, &mdev->flags))
 		drbd_queue_work(&mdev->data.work, &mdev->go_diskless);
-		/* don't drbd_queue_work_front,
-		 * we need to serialize with the after_state_ch work
-		 * of the -> D_FAILED transition. */
 }
 
 /**
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 87925e9..29e5c70 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -870,6 +870,11 @@
 		retcode = ERR_DISK_CONFIGURED;
 		goto fail;
 	}
+	/* It may just now have detached because of IO error.  Make sure
+	 * drbd_ldev_destroy is done already, we may end up here very fast,
+	 * e.g. if someone calls attach from the on-io-error handler,
+	 * to realize a "hot spare" feature (not that I'd recommend that) */
+	wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
 
 	/* allocation not in the IO path, cqueue thread context */
 	nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
@@ -1098,9 +1103,9 @@
 	/* Reset the "barriers don't work" bits here, then force meta data to
 	 * be written, to ensure we determine if barriers are supported. */
 	if (nbc->dc.no_md_flush)
-		set_bit(MD_NO_BARRIER, &mdev->flags);
+		set_bit(MD_NO_FUA, &mdev->flags);
 	else
-		clear_bit(MD_NO_BARRIER, &mdev->flags);
+		clear_bit(MD_NO_FUA, &mdev->flags);
 
 	/* Point of no return reached.
 	 * Devices and memory are no longer released by error cleanup below.
@@ -1112,8 +1117,8 @@
 	nbc = NULL;
 	resync_lru = NULL;
 
-	mdev->write_ordering = WO_bio_barrier;
-	drbd_bump_write_ordering(mdev, WO_bio_barrier);
+	mdev->write_ordering = WO_bdev_flush;
+	drbd_bump_write_ordering(mdev, WO_bdev_flush);
 
 	if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY))
 		set_bit(CRASHED_PRIMARY, &mdev->flags);
@@ -1262,7 +1267,7 @@
  force_diskless_dec:
 	put_ldev(mdev);
  force_diskless:
-	drbd_force_state(mdev, NS(disk, D_DISKLESS));
+	drbd_force_state(mdev, NS(disk, D_FAILED));
 	drbd_md_sync(mdev);
  release_bdev2_fail:
 	if (nbc)
@@ -1285,10 +1290,19 @@
 	return 0;
 }
 
+/* Detaching the disk is a process in multiple stages.  First we need to lock
+ * out application IO, in-flight IO, IO stuck in drbd_al_begin_io.
+ * Then we transition to D_DISKLESS, and wait for put_ldev() to return all
+ * internal references as well.
+ * Only then we have finally detached. */
 static int drbd_nl_detach(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
 			  struct drbd_nl_cfg_reply *reply)
 {
+	drbd_suspend_io(mdev); /* so no-one is stuck in drbd_al_begin_io */
 	reply->ret_code = drbd_request_state(mdev, NS(disk, D_DISKLESS));
+	if (mdev->state.disk == D_DISKLESS)
+		wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
+	drbd_resume_io(mdev);
 	return 0;
 }
 
@@ -1953,7 +1967,6 @@
 	if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
 		drbd_uuid_new_current(mdev);
 		clear_bit(NEW_CUR_UUID, &mdev->flags);
-		drbd_md_sync(mdev);
 	}
 	drbd_suspend_io(mdev);
 	reply->ret_code = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
diff --git a/drivers/block/drbd/drbd_proc.c b/drivers/block/drbd/drbd_proc.c
index ad325c5..7e6ac30 100644
--- a/drivers/block/drbd/drbd_proc.c
+++ b/drivers/block/drbd/drbd_proc.c
@@ -158,7 +158,6 @@
 		[WO_none] = 'n',
 		[WO_drain_io] = 'd',
 		[WO_bdev_flush] = 'f',
-		[WO_bio_barrier] = 'b',
 	};
 
 	seq_printf(seq, "version: " REL_VERSION " (api:%d/proto:%d-%d)\n%s\n",
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index efd6169..d299fe9 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -49,11 +49,6 @@
 
 #include "drbd_vli.h"
 
-struct flush_work {
-	struct drbd_work w;
-	struct drbd_epoch *epoch;
-};
-
 enum finish_epoch {
 	FE_STILL_LIVE,
 	FE_DESTROYED,
@@ -66,16 +61,6 @@
 static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *, struct drbd_epoch *, enum epoch_event);
 static int e_end_block(struct drbd_conf *, struct drbd_work *, int);
 
-static struct drbd_epoch *previous_epoch(struct drbd_conf *mdev, struct drbd_epoch *epoch)
-{
-	struct drbd_epoch *prev;
-	spin_lock(&mdev->epoch_lock);
-	prev = list_entry(epoch->list.prev, struct drbd_epoch, list);
-	if (prev == epoch || prev == mdev->current_epoch)
-		prev = NULL;
-	spin_unlock(&mdev->epoch_lock);
-	return prev;
-}
 
 #define GFP_TRY	(__GFP_HIGHMEM | __GFP_NOWARN)
 
@@ -981,7 +966,7 @@
 	return TRUE;
 }
 
-static enum finish_epoch drbd_flush_after_epoch(struct drbd_conf *mdev, struct drbd_epoch *epoch)
+static void drbd_flush(struct drbd_conf *mdev)
 {
 	int rv;
 
@@ -997,24 +982,6 @@
 		}
 		put_ldev(mdev);
 	}
-
-	return drbd_may_finish_epoch(mdev, epoch, EV_BARRIER_DONE);
-}
-
-static int w_flush(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
-{
-	struct flush_work *fw = (struct flush_work *)w;
-	struct drbd_epoch *epoch = fw->epoch;
-
-	kfree(w);
-
-	if (!test_and_set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags))
-		drbd_flush_after_epoch(mdev, epoch);
-
-	drbd_may_finish_epoch(mdev, epoch, EV_PUT |
-			      (mdev->state.conn < C_CONNECTED ? EV_CLEANUP : 0));
-
-	return 1;
 }
 
 /**
@@ -1027,15 +994,13 @@
 					       struct drbd_epoch *epoch,
 					       enum epoch_event ev)
 {
-	int finish, epoch_size;
+	int epoch_size;
 	struct drbd_epoch *next_epoch;
-	int schedule_flush = 0;
 	enum finish_epoch rv = FE_STILL_LIVE;
 
 	spin_lock(&mdev->epoch_lock);
 	do {
 		next_epoch = NULL;
-		finish = 0;
 
 		epoch_size = atomic_read(&epoch->epoch_size);
 
@@ -1045,16 +1010,6 @@
 			break;
 		case EV_GOT_BARRIER_NR:
 			set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags);
-
-			/* Special case: If we just switched from WO_bio_barrier to
-			   WO_bdev_flush we should not finish the current epoch */
-			if (test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags) && epoch_size == 1 &&
-			    mdev->write_ordering != WO_bio_barrier &&
-			    epoch == mdev->current_epoch)
-				clear_bit(DE_CONTAINS_A_BARRIER, &epoch->flags);
-			break;
-		case EV_BARRIER_DONE:
-			set_bit(DE_BARRIER_IN_NEXT_EPOCH_DONE, &epoch->flags);
 			break;
 		case EV_BECAME_LAST:
 			/* nothing to do*/
@@ -1063,23 +1018,7 @@
 
 		if (epoch_size != 0 &&
 		    atomic_read(&epoch->active) == 0 &&
-		    test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) &&
-		    epoch->list.prev == &mdev->current_epoch->list &&
-		    !test_bit(DE_IS_FINISHING, &epoch->flags)) {
-			/* Nearly all conditions are met to finish that epoch... */
-			if (test_bit(DE_BARRIER_IN_NEXT_EPOCH_DONE, &epoch->flags) ||
-			    mdev->write_ordering == WO_none ||
-			    (epoch_size == 1 && test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags)) ||
-			    ev & EV_CLEANUP) {
-				finish = 1;
-				set_bit(DE_IS_FINISHING, &epoch->flags);
-			} else if (!test_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags) &&
-				 mdev->write_ordering == WO_bio_barrier) {
-				atomic_inc(&epoch->active);
-				schedule_flush = 1;
-			}
-		}
-		if (finish) {
+		    test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags)) {
 			if (!(ev & EV_CLEANUP)) {
 				spin_unlock(&mdev->epoch_lock);
 				drbd_send_b_ack(mdev, epoch->barrier_nr, epoch_size);
@@ -1102,6 +1041,7 @@
 				/* atomic_set(&epoch->active, 0); is already zero */
 				if (rv == FE_STILL_LIVE)
 					rv = FE_RECYCLED;
+				wake_up(&mdev->ee_wait);
 			}
 		}
 
@@ -1113,22 +1053,6 @@
 
 	spin_unlock(&mdev->epoch_lock);
 
-	if (schedule_flush) {
-		struct flush_work *fw;
-		fw = kmalloc(sizeof(*fw), GFP_ATOMIC);
-		if (fw) {
-			fw->w.cb = w_flush;
-			fw->epoch = epoch;
-			drbd_queue_work(&mdev->data.work, &fw->w);
-		} else {
-			dev_warn(DEV, "Could not kmalloc a flush_work obj\n");
-			set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags);
-			/* That is not a recursion, only one level */
-			drbd_may_finish_epoch(mdev, epoch, EV_BARRIER_DONE);
-			drbd_may_finish_epoch(mdev, epoch, EV_PUT);
-		}
-	}
-
 	return rv;
 }
 
@@ -1144,19 +1068,16 @@
 		[WO_none] = "none",
 		[WO_drain_io] = "drain",
 		[WO_bdev_flush] = "flush",
-		[WO_bio_barrier] = "barrier",
 	};
 
 	pwo = mdev->write_ordering;
 	wo = min(pwo, wo);
-	if (wo == WO_bio_barrier && mdev->ldev->dc.no_disk_barrier)
-		wo = WO_bdev_flush;
 	if (wo == WO_bdev_flush && mdev->ldev->dc.no_disk_flush)
 		wo = WO_drain_io;
 	if (wo == WO_drain_io && mdev->ldev->dc.no_disk_drain)
 		wo = WO_none;
 	mdev->write_ordering = wo;
-	if (pwo != mdev->write_ordering || wo == WO_bio_barrier)
+	if (pwo != mdev->write_ordering || wo == WO_bdev_flush)
 		dev_info(DEV, "Method to ensure write ordering: %s\n", write_ordering_str[mdev->write_ordering]);
 }
 
@@ -1192,7 +1113,7 @@
 	bio->bi_sector = sector;
 	bio->bi_bdev = mdev->ldev->backing_bdev;
 	/* we special case some flags in the multi-bio case, see below
-	 * (REQ_UNPLUG, REQ_HARDBARRIER) */
+	 * (REQ_UNPLUG) */
 	bio->bi_rw = rw;
 	bio->bi_private = e;
 	bio->bi_end_io = drbd_endio_sec;
@@ -1226,11 +1147,6 @@
 			bio->bi_rw &= ~REQ_UNPLUG;
 
 		drbd_generic_make_request(mdev, fault_type, bio);
-
-		/* strip off REQ_HARDBARRIER,
-		 * unless it is the first or last bio */
-		if (bios && bios->bi_next)
-			bios->bi_rw &= ~REQ_HARDBARRIER;
 	} while (bios);
 	maybe_kick_lo(mdev);
 	return 0;
@@ -1244,45 +1160,9 @@
 	return -ENOMEM;
 }
 
-/**
- * w_e_reissue() - Worker callback; Resubmit a bio, without REQ_HARDBARRIER set
- * @mdev:	DRBD device.
- * @w:		work object.
- * @cancel:	The connection will be closed anyways (unused in this callback)
- */
-int w_e_reissue(struct drbd_conf *mdev, struct drbd_work *w, int cancel) __releases(local)
-{
-	struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
-	/* We leave DE_CONTAINS_A_BARRIER and EE_IS_BARRIER in place,
-	   (and DE_BARRIER_IN_NEXT_EPOCH_ISSUED in the previous Epoch)
-	   so that we can finish that epoch in drbd_may_finish_epoch().
-	   That is necessary if we already have a long chain of Epochs, before
-	   we realize that REQ_HARDBARRIER is actually not supported */
-
-	/* As long as the -ENOTSUPP on the barrier is reported immediately
-	   that will never trigger. If it is reported late, we will just
-	   print that warning and continue correctly for all future requests
-	   with WO_bdev_flush */
-	if (previous_epoch(mdev, e->epoch))
-		dev_warn(DEV, "Write ordering was not enforced (one time event)\n");
-
-	/* we still have a local reference,
-	 * get_ldev was done in receive_Data. */
-
-	e->w.cb = e_end_block;
-	if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_DT_WR) != 0) {
-		/* drbd_submit_ee fails for one reason only:
-		 * if was not able to allocate sufficient bios.
-		 * requeue, try again later. */
-		e->w.cb = w_e_reissue;
-		drbd_queue_work(&mdev->data.work, &e->w);
-	}
-	return 1;
-}
-
 static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
 {
-	int rv, issue_flush;
+	int rv;
 	struct p_barrier *p = &mdev->data.rbuf.barrier;
 	struct drbd_epoch *epoch;
 
@@ -1300,44 +1180,40 @@
 	 * Therefore we must send the barrier_ack after the barrier request was
 	 * completed. */
 	switch (mdev->write_ordering) {
-	case WO_bio_barrier:
 	case WO_none:
 		if (rv == FE_RECYCLED)
 			return TRUE;
-		break;
+
+		/* receiver context, in the writeout path of the other node.
+		 * avoid potential distributed deadlock */
+		epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
+		if (epoch)
+			break;
+		else
+			dev_warn(DEV, "Allocation of an epoch failed, slowing down\n");
+			/* Fall through */
 
 	case WO_bdev_flush:
 	case WO_drain_io:
-		if (rv == FE_STILL_LIVE) {
-			set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &mdev->current_epoch->flags);
-			drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
-			rv = drbd_flush_after_epoch(mdev, mdev->current_epoch);
-		}
-		if (rv == FE_RECYCLED)
-			return TRUE;
-
-		/* The asender will send all the ACKs and barrier ACKs out, since
-		   all EEs moved from the active_ee to the done_ee. We need to
-		   provide a new epoch object for the EEs that come in soon */
-		break;
-	}
-
-	/* receiver context, in the writeout path of the other node.
-	 * avoid potential distributed deadlock */
-	epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
-	if (!epoch) {
-		dev_warn(DEV, "Allocation of an epoch failed, slowing down\n");
-		issue_flush = !test_and_set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &mdev->current_epoch->flags);
 		drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
-		if (issue_flush) {
-			rv = drbd_flush_after_epoch(mdev, mdev->current_epoch);
-			if (rv == FE_RECYCLED)
-				return TRUE;
+		drbd_flush(mdev);
+
+		if (atomic_read(&mdev->current_epoch->epoch_size)) {
+			epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
+			if (epoch)
+				break;
 		}
 
-		drbd_wait_ee_list_empty(mdev, &mdev->done_ee);
+		epoch = mdev->current_epoch;
+		wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
+
+		D_ASSERT(atomic_read(&epoch->active) == 0);
+		D_ASSERT(epoch->flags == 0);
 
 		return TRUE;
+	default:
+		dev_err(DEV, "Strangeness in mdev->write_ordering %d\n", mdev->write_ordering);
+		return FALSE;
 	}
 
 	epoch->flags = 0;
@@ -1652,15 +1528,8 @@
 {
 	struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
 	sector_t sector = e->sector;
-	struct drbd_epoch *epoch;
 	int ok = 1, pcmd;
 
-	if (e->flags & EE_IS_BARRIER) {
-		epoch = previous_epoch(mdev, e->epoch);
-		if (epoch)
-			drbd_may_finish_epoch(mdev, epoch, EV_BARRIER_DONE + (cancel ? EV_CLEANUP : 0));
-	}
-
 	if (mdev->net_conf->wire_protocol == DRBD_PROT_C) {
 		if (likely((e->flags & EE_WAS_ERROR) == 0)) {
 			pcmd = (mdev->state.conn >= C_SYNC_SOURCE &&
@@ -1817,27 +1686,6 @@
 	e->epoch = mdev->current_epoch;
 	atomic_inc(&e->epoch->epoch_size);
 	atomic_inc(&e->epoch->active);
-
-	if (mdev->write_ordering == WO_bio_barrier && atomic_read(&e->epoch->epoch_size) == 1) {
-		struct drbd_epoch *epoch;
-		/* Issue a barrier if we start a new epoch, and the previous epoch
-		   was not a epoch containing a single request which already was
-		   a Barrier. */
-		epoch = list_entry(e->epoch->list.prev, struct drbd_epoch, list);
-		if (epoch == e->epoch) {
-			set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags);
-			rw |= REQ_HARDBARRIER;
-			e->flags |= EE_IS_BARRIER;
-		} else {
-			if (atomic_read(&epoch->epoch_size) > 1 ||
-			    !test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags)) {
-				set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags);
-				set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags);
-				rw |= REQ_HARDBARRIER;
-				e->flags |= EE_IS_BARRIER;
-			}
-		}
-	}
 	spin_unlock(&mdev->epoch_lock);
 
 	dp_flags = be32_to_cpu(p->dp_flags);
@@ -1995,10 +1843,11 @@
 		break;
 	}
 
-	if (mdev->state.pdsk == D_DISKLESS) {
+	if (mdev->state.pdsk < D_INCONSISTENT) {
 		/* In case we have the only disk of the cluster, */
 		drbd_set_out_of_sync(mdev, e->sector, e->size);
 		e->flags |= EE_CALL_AL_COMPLETE_IO;
+		e->flags &= ~EE_MAY_SET_IN_SYNC;
 		drbd_al_begin_io(mdev, e->sector);
 	}
 
@@ -3362,7 +3211,7 @@
 		if (ns.conn == C_MASK) {
 			ns.conn = C_CONNECTED;
 			if (mdev->state.disk == D_NEGOTIATING) {
-				drbd_force_state(mdev, NS(disk, D_DISKLESS));
+				drbd_force_state(mdev, NS(disk, D_FAILED));
 			} else if (peer_state.disk == D_NEGOTIATING) {
 				dev_err(DEV, "Disk attach process on the peer node was aborted.\n");
 				peer_state.disk = D_DISKLESS;
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 9e91a25..11a75d3 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -258,7 +258,7 @@
 		if (!hlist_unhashed(&req->colision))
 			hlist_del(&req->colision);
 		else
-			D_ASSERT((s & RQ_NET_MASK) == 0);
+			D_ASSERT((s & (RQ_NET_MASK & ~RQ_NET_DONE)) == 0);
 
 		/* for writes we need to do some extra housekeeping */
 		if (rw == WRITE)
@@ -813,7 +813,8 @@
 			     mdev->state.conn >= C_CONNECTED));
 
 	if (!(local || remote) && !is_susp(mdev->state)) {
-		dev_err(DEV, "IO ERROR: neither local nor remote disk\n");
+		if (__ratelimit(&drbd_ratelimit_state))
+			dev_err(DEV, "IO ERROR: neither local nor remote disk\n");
 		goto fail_free_complete;
 	}
 
@@ -942,12 +943,21 @@
 	if (local) {
 		req->private_bio->bi_bdev = mdev->ldev->backing_bdev;
 
-		if (FAULT_ACTIVE(mdev, rw == WRITE ? DRBD_FAULT_DT_WR
-				     : rw == READ  ? DRBD_FAULT_DT_RD
-				     :               DRBD_FAULT_DT_RA))
+		/* State may have changed since we grabbed our reference on the
+		 * mdev->ldev member. Double check, and short-circuit to endio.
+		 * In case the last activity log transaction failed to get on
+		 * stable storage, and this is a WRITE, we may not even submit
+		 * this bio. */
+		if (get_ldev(mdev)) {
+			if (FAULT_ACTIVE(mdev, rw == WRITE ? DRBD_FAULT_DT_WR
+					     : rw == READ  ? DRBD_FAULT_DT_RD
+					     :               DRBD_FAULT_DT_RA))
+				bio_endio(req->private_bio, -EIO);
+			else
+				generic_make_request(req->private_bio);
+			put_ldev(mdev);
+		} else
 			bio_endio(req->private_bio, -EIO);
-		else
-			generic_make_request(req->private_bio);
 	}
 
 	/* we need to plug ALWAYS since we possibly need to kick lo_dev.
@@ -1022,20 +1032,6 @@
 		return 0;
 	}
 
-	/* Reject barrier requests if we know the underlying device does
-	 * not support them.
-	 * XXX: Need to get this info from peer as well some how so we
-	 * XXX: reject if EITHER side/data/metadata area does not support them.
-	 *
-	 * because of those XXX, this is not yet enabled,
-	 * i.e. in drbd_init_set_defaults we set the NO_BARRIER_SUPP bit.
-	 */
-	if (unlikely(bio->bi_rw & REQ_HARDBARRIER) && test_bit(NO_BARRIER_SUPP, &mdev->flags)) {
-		/* dev_warn(DEV, "Rejecting barrier request as underlying device does not support\n"); */
-		bio_endio(bio, -EOPNOTSUPP);
-		return 0;
-	}
-
 	/*
 	 * what we "blindly" assume:
 	 */
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 108d580..b0551ba 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -102,12 +102,6 @@
 	put_ldev(mdev);
 }
 
-static int is_failed_barrier(int ee_flags)
-{
-	return (ee_flags & (EE_IS_BARRIER|EE_WAS_ERROR|EE_RESUBMITTED))
-			== (EE_IS_BARRIER|EE_WAS_ERROR);
-}
-
 /* writes on behalf of the partner, or resync writes,
  * "submitted" by the receiver, final stage.  */
 static void drbd_endio_write_sec_final(struct drbd_epoch_entry *e) __releases(local)
@@ -119,21 +113,6 @@
 	int is_syncer_req;
 	int do_al_complete_io;
 
-	/* if this is a failed barrier request, disable use of barriers,
-	 * and schedule for resubmission */
-	if (is_failed_barrier(e->flags)) {
-		drbd_bump_write_ordering(mdev, WO_bdev_flush);
-		spin_lock_irqsave(&mdev->req_lock, flags);
-		list_del(&e->w.list);
-		e->flags = (e->flags & ~EE_WAS_ERROR) | EE_RESUBMITTED;
-		e->w.cb = w_e_reissue;
-		/* put_ldev actually happens below, once we come here again. */
-		__release(local);
-		spin_unlock_irqrestore(&mdev->req_lock, flags);
-		drbd_queue_work(&mdev->data.work, &e->w);
-		return;
-	}
-
 	D_ASSERT(e->block_id != ID_VACANT);
 
 	/* after we moved e to done_ee,
@@ -925,7 +904,7 @@
 	drbd_md_sync(mdev);
 
 	if (test_and_clear_bit(WRITE_BM_AFTER_RESYNC, &mdev->flags)) {
-		dev_warn(DEV, "Writing the whole bitmap, due to failed kmalloc\n");
+		dev_info(DEV, "Writing the whole bitmap\n");
 		drbd_queue_bitmap_io(mdev, &drbd_bm_write, NULL, "write from resync_finished");
 	}
 
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 1e5284e..7ea0bea 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -481,12 +481,6 @@
 	if (bio_rw(bio) == WRITE) {
 		struct file *file = lo->lo_backing_file;
 
-		/* REQ_HARDBARRIER is deprecated */
-		if (bio->bi_rw & REQ_HARDBARRIER) {
-			ret = -EOPNOTSUPP;
-			goto out;
-		}
-
 		if (bio->bi_rw & REQ_FLUSH) {
 			ret = vfs_fsync(file, 0);
 			if (unlikely(ret && ret != -EINVAL)) {
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 06e2812..255035c 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -289,8 +289,6 @@
 
 	ring_req->operation = rq_data_dir(req) ?
 		BLKIF_OP_WRITE : BLKIF_OP_READ;
-	if (req->cmd_flags & REQ_HARDBARRIER)
-		ring_req->operation = BLKIF_OP_WRITE_BARRIER;
 
 	ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg);
 	BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST);
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index 6b6760e..9272c38 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -1210,14 +1210,14 @@
 	unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
 	u32 pte_flags;
 
-	if (type_mask == AGP_USER_UNCACHED_MEMORY)
+	if (type_mask == AGP_USER_MEMORY)
 		pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID;
 	else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) {
-		pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
+		pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID;
 		if (gfdt)
 			pte_flags |= GEN6_PTE_GFDT;
 	} else { /* set 'normal'/'cached' to LLC by default */
-		pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID;
+		pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
 		if (gfdt)
 			pte_flags |= GEN6_PTE_GFDT;
 	}
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index dcbeb98..f7af91c 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -276,7 +276,7 @@
 	struct drm_crtc *tmp;
 	int crtc_mask = 1;
 
-	WARN(!crtc, "checking null crtc?");
+	WARN(!crtc, "checking null crtc?\n");
 
 	dev = crtc->dev;
 
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index c1a2621..a245d17 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -240,7 +240,7 @@
 			.addr	= DDC_ADDR,
 			.flags	= I2C_M_RD,
 			.len	= len,
-			.buf	= buf + start,
+			.buf	= buf,
 		}
 	};
 
@@ -253,7 +253,7 @@
 static u8 *
 drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
 {
-	int i, j = 0;
+	int i, j = 0, valid_extensions = 0;
 	u8 *block, *new;
 
 	if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL)
@@ -280,14 +280,28 @@
 
 	for (j = 1; j <= block[0x7e]; j++) {
 		for (i = 0; i < 4; i++) {
-			if (drm_do_probe_ddc_edid(adapter, block, j,
-						  EDID_LENGTH))
+			if (drm_do_probe_ddc_edid(adapter,
+				  block + (valid_extensions + 1) * EDID_LENGTH,
+				  j, EDID_LENGTH))
 				goto out;
-			if (drm_edid_block_valid(block + j * EDID_LENGTH))
+			if (drm_edid_block_valid(block + (valid_extensions + 1) * EDID_LENGTH)) {
+				valid_extensions++;
 				break;
+			}
 		}
 		if (i == 4)
-			goto carp;
+			dev_warn(connector->dev->dev,
+			 "%s: Ignoring invalid EDID block %d.\n",
+			 drm_get_connector_name(connector), j);
+	}
+
+	if (valid_extensions != block[0x7e]) {
+		block[EDID_LENGTH-1] += block[0x7e] - valid_extensions;
+		block[0x7e] = valid_extensions;
+		new = krealloc(block, (valid_extensions + 1) * EDID_LENGTH, GFP_KERNEL);
+		if (!new)
+			goto out;
+		block = new;
 	}
 
 	return block;
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 3467dd4..80745f8 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -44,7 +44,7 @@
 module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
 
 unsigned int i915_powersave = 1;
-module_param_named(powersave, i915_powersave, int, 0400);
+module_param_named(powersave, i915_powersave, int, 0600);
 
 unsigned int i915_lvds_downclock = 0;
 module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 2c2c19b..90414ae 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1321,6 +1321,7 @@
 
 #define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
 #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
+#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
 
 #define PRIMARY_RINGBUFFER_SIZE         (128*1024)
 
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 8eb8453..ef188e3 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2172,7 +2172,7 @@
 static int i915_ring_idle(struct drm_device *dev,
 			  struct intel_ring_buffer *ring)
 {
-	if (list_empty(&ring->gpu_write_list))
+	if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
 		return 0;
 
 	i915_gem_flush_ring(dev, NULL, ring,
@@ -2190,9 +2190,7 @@
 	int ret;
 
 	lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
-		       list_empty(&dev_priv->render_ring.active_list) &&
-		       list_empty(&dev_priv->bsd_ring.active_list) &&
-		       list_empty(&dev_priv->blt_ring.active_list));
+		       list_empty(&dev_priv->mm.active_list));
 	if (lists_empty)
 		return 0;
 
@@ -3108,7 +3106,8 @@
 	 * write domain
 	 */
 	if (obj->write_domain &&
-	    obj->write_domain != obj->pending_read_domains) {
+	    (obj->write_domain != obj->pending_read_domains ||
+	     obj_priv->ring != ring)) {
 		flush_domains |= obj->write_domain;
 		invalidate_domains |=
 			obj->pending_read_domains & ~obj->write_domain;
@@ -3497,6 +3496,52 @@
 	return 0;
 }
 
+static int
+i915_gem_execbuffer_move_to_gpu(struct drm_device *dev,
+				struct drm_file *file,
+				struct intel_ring_buffer *ring,
+				struct drm_gem_object **objects,
+				int count)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret, i;
+
+	/* Zero the global flush/invalidate flags. These
+	 * will be modified as new domains are computed
+	 * for each object
+	 */
+	dev->invalidate_domains = 0;
+	dev->flush_domains = 0;
+	dev_priv->mm.flush_rings = 0;
+	for (i = 0; i < count; i++)
+		i915_gem_object_set_to_gpu_domain(objects[i], ring);
+
+	if (dev->invalidate_domains | dev->flush_domains) {
+#if WATCH_EXEC
+		DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
+			  __func__,
+			 dev->invalidate_domains,
+			 dev->flush_domains);
+#endif
+		i915_gem_flush(dev, file,
+			       dev->invalidate_domains,
+			       dev->flush_domains,
+			       dev_priv->mm.flush_rings);
+	}
+
+	for (i = 0; i < count; i++) {
+		struct drm_i915_gem_object *obj = to_intel_bo(objects[i]);
+		/* XXX replace with semaphores */
+		if (obj->ring && ring != obj->ring) {
+			ret = i915_gem_object_wait_rendering(&obj->base, true);
+			if (ret)
+				return ret;
+		}
+	}
+
+	return 0;
+}
+
 /* Throttle our rendering by waiting until the ring has completed our requests
  * emitted over 20 msec ago.
  *
@@ -3757,33 +3802,10 @@
 		goto err;
 	}
 
-	/* Zero the global flush/invalidate flags. These
-	 * will be modified as new domains are computed
-	 * for each object
-	 */
-	dev->invalidate_domains = 0;
-	dev->flush_domains = 0;
-	dev_priv->mm.flush_rings = 0;
-
-	for (i = 0; i < args->buffer_count; i++) {
-		struct drm_gem_object *obj = object_list[i];
-
-		/* Compute new gpu domains and update invalidate/flush */
-		i915_gem_object_set_to_gpu_domain(obj, ring);
-	}
-
-	if (dev->invalidate_domains | dev->flush_domains) {
-#if WATCH_EXEC
-		DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
-			  __func__,
-			 dev->invalidate_domains,
-			 dev->flush_domains);
-#endif
-		i915_gem_flush(dev, file,
-			       dev->invalidate_domains,
-			       dev->flush_domains,
-			       dev_priv->mm.flush_rings);
-	}
+	ret = i915_gem_execbuffer_move_to_gpu(dev, file, ring,
+					      object_list, args->buffer_count);
+	if (ret)
+		goto err;
 
 	for (i = 0; i < args->buffer_count; i++) {
 		struct drm_gem_object *obj = object_list[i];
@@ -4043,8 +4065,7 @@
 			alignment = i915_gem_get_gtt_alignment(obj);
 		if (obj_priv->gtt_offset & (alignment - 1)) {
 			WARN(obj_priv->pin_count,
-			     "bo is already pinned with incorrect alignment:"
-			     " offset=%x, req.alignment=%x\n",
+			     "bo is already pinned with incorrect alignment: offset=%x, req.alignment=%x\n",
 			     obj_priv->gtt_offset, alignment);
 			ret = i915_gem_object_unbind(obj);
 			if (ret)
@@ -4856,17 +4877,24 @@
 		     struct drm_file *file_priv)
 {
 	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
-	void *obj_addr;
-	int ret;
-	char __user *user_data;
+	void *vaddr = obj_priv->phys_obj->handle->vaddr + args->offset;
+	char __user *user_data = (char __user *) (uintptr_t) args->data_ptr;
 
-	user_data = (char __user *) (uintptr_t) args->data_ptr;
-	obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset;
+	DRM_DEBUG_DRIVER("vaddr %p, %lld\n", vaddr, args->size);
 
-	DRM_DEBUG_DRIVER("obj_addr %p, %lld\n", obj_addr, args->size);
-	ret = copy_from_user(obj_addr, user_data, args->size);
-	if (ret)
-		return -EFAULT;
+	if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
+		unsigned long unwritten;
+
+		/* The physical object once assigned is fixed for the lifetime
+		 * of the obj, so we can safely drop the lock and continue
+		 * to access vaddr.
+		 */
+		mutex_unlock(&dev->struct_mutex);
+		unwritten = copy_from_user(vaddr, user_data, args->size);
+		mutex_lock(&dev->struct_mutex);
+		if (unwritten)
+			return -EFAULT;
+	}
 
 	drm_agp_chipset_flush(dev);
 	return 0;
@@ -4900,9 +4928,7 @@
 	int lists_empty;
 
 	lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
-		      list_empty(&dev_priv->render_ring.active_list) &&
-		      list_empty(&dev_priv->bsd_ring.active_list) &&
-		      list_empty(&dev_priv->blt_ring.active_list);
+		      list_empty(&dev_priv->mm.active_list);
 
 	return !lists_empty;
 }
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 43a4013..d8ae7d1 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -165,9 +165,7 @@
 
 	lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
 		       list_empty(&dev_priv->mm.flushing_list) &&
-		       list_empty(&dev_priv->render_ring.active_list) &&
-		       list_empty(&dev_priv->bsd_ring.active_list) &&
-		       list_empty(&dev_priv->blt_ring.active_list));
+		       list_empty(&dev_priv->mm.active_list));
 	if (lists_empty)
 		return -ENOSPC;
 
@@ -184,9 +182,7 @@
 
 	lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
 		       list_empty(&dev_priv->mm.flushing_list) &&
-		       list_empty(&dev_priv->render_ring.active_list) &&
-		       list_empty(&dev_priv->bsd_ring.active_list) &&
-		       list_empty(&dev_priv->blt_ring.active_list));
+		       list_empty(&dev_priv->mm.active_list));
 	BUG_ON(!lists_empty);
 
 	return 0;
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 989c19d..454c064 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -862,8 +862,10 @@
 	/* Clock gating state */
 	intel_init_clock_gating(dev);
 
-	if (HAS_PCH_SPLIT(dev))
+	if (HAS_PCH_SPLIT(dev)) {
 		ironlake_enable_drps(dev);
+		intel_init_emon(dev);
+	}
 
 	/* Cache mode state */
 	I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 990f065..48d8fd6 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1681,6 +1681,37 @@
 	udelay(500);
 }
 
+static void intel_fdi_normal_train(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	int pipe = intel_crtc->pipe;
+	u32 reg, temp;
+
+	/* enable normal train */
+	reg = FDI_TX_CTL(pipe);
+	temp = I915_READ(reg);
+	temp &= ~FDI_LINK_TRAIN_NONE;
+	temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
+	I915_WRITE(reg, temp);
+
+	reg = FDI_RX_CTL(pipe);
+	temp = I915_READ(reg);
+	if (HAS_PCH_CPT(dev)) {
+		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+		temp |= FDI_LINK_TRAIN_NORMAL_CPT;
+	} else {
+		temp &= ~FDI_LINK_TRAIN_NONE;
+		temp |= FDI_LINK_TRAIN_NONE;
+	}
+	I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
+
+	/* wait one idle pattern time */
+	POSTING_READ(reg);
+	udelay(1000);
+}
+
 /* The FDI link training functions for ILK/Ibexpeak. */
 static void ironlake_fdi_link_train(struct drm_crtc *crtc)
 {
@@ -1767,27 +1798,6 @@
 
 	DRM_DEBUG_KMS("FDI train done\n");
 
-	/* enable normal train */
-	reg = FDI_TX_CTL(pipe);
-	temp = I915_READ(reg);
-	temp &= ~FDI_LINK_TRAIN_NONE;
-	temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
-	I915_WRITE(reg, temp);
-
-	reg = FDI_RX_CTL(pipe);
-	temp = I915_READ(reg);
-	if (HAS_PCH_CPT(dev)) {
-		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
-		temp |= FDI_LINK_TRAIN_NORMAL_CPT;
-	} else {
-		temp &= ~FDI_LINK_TRAIN_NONE;
-		temp |= FDI_LINK_TRAIN_NONE;
-	}
-	I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
-
-	/* wait one idle pattern time */
-	POSTING_READ(reg);
-	udelay(1000);
 }
 
 static const int const snb_b_fdi_train_param [] = {
@@ -2090,6 +2100,8 @@
 	I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe)));
 	I915_WRITE(TRANS_VSYNC(pipe),  I915_READ(VSYNC(pipe)));
 
+	intel_fdi_normal_train(crtc);
+
 	/* For PCH DP, enable TRANS_DP_CTL */
 	if (HAS_PCH_CPT(dev) &&
 	    intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
@@ -2200,9 +2212,10 @@
 	udelay(100);
 
 	/* Ironlake workaround, disable clock pointer after downing FDI */
-	I915_WRITE(FDI_RX_CHICKEN(pipe),
-		   I915_READ(FDI_RX_CHICKEN(pipe) &
-			     ~FDI_RX_PHASE_SYNC_POINTER_ENABLE));
+	if (HAS_PCH_IBX(dev))
+		I915_WRITE(FDI_RX_CHICKEN(pipe),
+			   I915_READ(FDI_RX_CHICKEN(pipe) &
+				     ~FDI_RX_PHASE_SYNC_POINTER_ENABLE));
 
 	/* still set train pattern 1 */
 	reg = FDI_TX_CTL(pipe);
@@ -5581,20 +5594,19 @@
 	fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
 	fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
 		MEMMODE_FSTART_SHIFT;
-	fstart = fmax;
 
 	vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
 		PXVFREQ_PX_SHIFT;
 
-	dev_priv->fmax = fstart; /* IPS callback will increase this */
+	dev_priv->fmax = fmax; /* IPS callback will increase this */
 	dev_priv->fstart = fstart;
 
-	dev_priv->max_delay = fmax;
+	dev_priv->max_delay = fstart;
 	dev_priv->min_delay = fmin;
 	dev_priv->cur_delay = fstart;
 
-	DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n", fmax, fmin,
-			 fstart);
+	DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
+			 fmax, fmin, fstart);
 
 	I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
 
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 891f4f1..c8e0055 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1517,7 +1517,7 @@
 			status = connector_status_connected;
 	}
 
-	return bit;
+	return status;
 }
 
 /**
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 9af9f86..21551fe 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -296,6 +296,7 @@
 extern void intel_init_clock_gating(struct drm_device *dev);
 extern void ironlake_enable_drps(struct drm_device *dev);
 extern void ironlake_disable_drps(struct drm_device *dev);
+extern void intel_init_emon(struct drm_device *dev);
 
 extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
 				      struct drm_gem_object *obj,
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index f1a6499..4324a32 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -481,11 +481,8 @@
 	struct drm_device *dev = connector->dev;
 	struct drm_display_mode *mode;
 
-	if (intel_lvds->edid) {
-		drm_mode_connector_update_edid_property(connector,
-							intel_lvds->edid);
+	if (intel_lvds->edid)
 		return drm_add_edid_modes(connector, intel_lvds->edid);
-	}
 
 	mode = drm_mode_duplicate(dev, intel_lvds->fixed_mode);
 	if (mode == 0)
@@ -939,7 +936,16 @@
 	 */
 	intel_lvds->edid = drm_get_edid(connector,
 					&dev_priv->gmbus[pin].adapter);
-
+	if (intel_lvds->edid) {
+		if (drm_add_edid_modes(connector,
+				       intel_lvds->edid)) {
+			drm_mode_connector_update_edid_property(connector,
+								intel_lvds->edid);
+		} else {
+			kfree(intel_lvds->edid);
+			intel_lvds->edid = NULL;
+		}
+	}
 	if (!intel_lvds->edid) {
 		/* Didn't get an EDID, so
 		 * Set wide sync ranges so we get all modes
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 917c7dc..9b0d9a8 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -512,6 +512,6 @@
 	return 0;
 
 err_out:
-	iounmap(opregion->header);
+	iounmap(base);
 	return err;
 }
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index afb96d2..02ff0a4 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -946,7 +946,9 @@
 {
 	int uv_hscale = uv_hsubsampling(rec->flags);
 	int uv_vscale = uv_vsubsampling(rec->flags);
-	u32 stride_mask, depth, tmp;
+	u32 stride_mask;
+	int depth;
+	u32 tmp;
 
 	/* check src dimensions */
 	if (IS_845G(dev) || IS_I830(dev)) {
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 09f2dc3..b83306f 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -177,7 +177,7 @@
 
 	I915_WRITE_CTL(ring,
 			((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES)
-			| RING_NO_REPORT | RING_VALID);
+			| RING_REPORT_64K | RING_VALID);
 
 	head = I915_READ_HEAD(ring) & HEAD_ADDR;
 	/* If the head is still not zero, the ring is dead */
@@ -654,6 +654,10 @@
 	i915_gem_object_unpin(ring->gem_object);
 	drm_gem_object_unreference(ring->gem_object);
 	ring->gem_object = NULL;
+
+	if (ring->cleanup)
+		ring->cleanup(ring);
+
 	cleanup_status_page(dev, ring);
 }
 
@@ -688,6 +692,17 @@
 {
 	unsigned long end;
 	drm_i915_private_t *dev_priv = dev->dev_private;
+	u32 head;
+
+	head = intel_read_status_page(ring, 4);
+	if (head) {
+		ring->head = head & HEAD_ADDR;
+		ring->space = ring->head - (ring->tail + 8);
+		if (ring->space < 0)
+			ring->space += ring->size;
+		if (ring->space >= n)
+			return 0;
+	}
 
 	trace_i915_ring_wait_begin (dev);
 	end = jiffies + 3 * HZ;
@@ -854,19 +869,125 @@
 	/* do nothing */
 }
 
+
+/* Workaround for some stepping of SNB,
+ * each time when BLT engine ring tail moved,
+ * the first command in the ring to be parsed
+ * should be MI_BATCH_BUFFER_START
+ */
+#define NEED_BLT_WORKAROUND(dev) \
+	(IS_GEN6(dev) && (dev->pdev->revision < 8))
+
+static inline struct drm_i915_gem_object *
+to_blt_workaround(struct intel_ring_buffer *ring)
+{
+	return ring->private;
+}
+
+static int blt_ring_init(struct drm_device *dev,
+			 struct intel_ring_buffer *ring)
+{
+	if (NEED_BLT_WORKAROUND(dev)) {
+		struct drm_i915_gem_object *obj;
+		u32 __iomem *ptr;
+		int ret;
+
+		obj = to_intel_bo(i915_gem_alloc_object(dev, 4096));
+		if (obj == NULL)
+			return -ENOMEM;
+
+		ret = i915_gem_object_pin(&obj->base, 4096);
+		if (ret) {
+			drm_gem_object_unreference(&obj->base);
+			return ret;
+		}
+
+		ptr = kmap(obj->pages[0]);
+		iowrite32(MI_BATCH_BUFFER_END, ptr);
+		iowrite32(MI_NOOP, ptr+1);
+		kunmap(obj->pages[0]);
+
+		ret = i915_gem_object_set_to_gtt_domain(&obj->base, false);
+		if (ret) {
+			i915_gem_object_unpin(&obj->base);
+			drm_gem_object_unreference(&obj->base);
+			return ret;
+		}
+
+		ring->private = obj;
+	}
+
+	return init_ring_common(dev, ring);
+}
+
+static void blt_ring_begin(struct drm_device *dev,
+			   struct intel_ring_buffer *ring,
+			  int num_dwords)
+{
+	if (ring->private) {
+		intel_ring_begin(dev, ring, num_dwords+2);
+		intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START);
+		intel_ring_emit(dev, ring, to_blt_workaround(ring)->gtt_offset);
+	} else
+		intel_ring_begin(dev, ring, 4);
+}
+
+static void blt_ring_flush(struct drm_device *dev,
+			   struct intel_ring_buffer *ring,
+			   u32 invalidate_domains,
+			   u32 flush_domains)
+{
+	blt_ring_begin(dev, ring, 4);
+	intel_ring_emit(dev, ring, MI_FLUSH_DW);
+	intel_ring_emit(dev, ring, 0);
+	intel_ring_emit(dev, ring, 0);
+	intel_ring_emit(dev, ring, 0);
+	intel_ring_advance(dev, ring);
+}
+
+static u32
+blt_ring_add_request(struct drm_device *dev,
+		     struct intel_ring_buffer *ring,
+		     u32 flush_domains)
+{
+	u32 seqno = i915_gem_get_seqno(dev);
+
+	blt_ring_begin(dev, ring, 4);
+	intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX);
+	intel_ring_emit(dev, ring,
+			I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+	intel_ring_emit(dev, ring, seqno);
+	intel_ring_emit(dev, ring, MI_USER_INTERRUPT);
+	intel_ring_advance(dev, ring);
+
+	DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno);
+	return seqno;
+}
+
+static void blt_ring_cleanup(struct intel_ring_buffer *ring)
+{
+	if (!ring->private)
+		return;
+
+	i915_gem_object_unpin(ring->private);
+	drm_gem_object_unreference(ring->private);
+	ring->private = NULL;
+}
+
 static const struct intel_ring_buffer gen6_blt_ring = {
        .name			= "blt ring",
        .id			= RING_BLT,
        .mmio_base		= BLT_RING_BASE,
        .size			= 32 * PAGE_SIZE,
-       .init			= init_ring_common,
+       .init			= blt_ring_init,
        .write_tail		= ring_write_tail,
-       .flush			= gen6_ring_flush,
-       .add_request		= ring_add_request,
+       .flush			= blt_ring_flush,
+       .add_request		= blt_ring_add_request,
        .get_seqno		= ring_status_page_get_seqno,
        .user_irq_get		= blt_ring_get_user_irq,
        .user_irq_put		= blt_ring_put_user_irq,
        .dispatch_gem_execbuffer	= gen6_ring_dispatch_gem_execbuffer,
+       .cleanup			= blt_ring_cleanup,
 };
 
 int intel_init_render_ring_buffer(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index a05aff0..3126c26 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -63,6 +63,7 @@
 			struct drm_i915_gem_execbuffer2 *exec,
 			struct drm_clip_rect *cliprects,
 			uint64_t exec_offset);
+	void		(*cleanup)(struct intel_ring_buffer *ring);
 
 	/**
 	 * List of objects currently involved in rendering from the
@@ -98,6 +99,8 @@
 
 	wait_queue_head_t irq_queue;
 	drm_local_map_t map;
+
+	void *private;
 };
 
 static inline u32
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index f12a5b3..488c36c 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -2033,7 +2033,7 @@
 	u32 grbm_int_cntl = 0;
 
 	if (!rdev->irq.installed) {
-		WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
+		WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
 		return -EINVAL;
 	}
 	/* don't enable anything if the ih is disabled */
@@ -2295,6 +2295,7 @@
 			case 0: /* D1 vblank */
 				if (disp_int & LB_D1_VBLANK_INTERRUPT) {
 					drm_handle_vblank(rdev->ddev, 0);
+					rdev->pm.vblank_sync = true;
 					wake_up(&rdev->irq.vblank_queue);
 					disp_int &= ~LB_D1_VBLANK_INTERRUPT;
 					DRM_DEBUG("IH: D1 vblank\n");
@@ -2316,6 +2317,7 @@
 			case 0: /* D2 vblank */
 				if (disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
 					drm_handle_vblank(rdev->ddev, 1);
+					rdev->pm.vblank_sync = true;
 					wake_up(&rdev->irq.vblank_queue);
 					disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
 					DRM_DEBUG("IH: D2 vblank\n");
@@ -2337,6 +2339,7 @@
 			case 0: /* D3 vblank */
 				if (disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
 					drm_handle_vblank(rdev->ddev, 2);
+					rdev->pm.vblank_sync = true;
 					wake_up(&rdev->irq.vblank_queue);
 					disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
 					DRM_DEBUG("IH: D3 vblank\n");
@@ -2358,6 +2361,7 @@
 			case 0: /* D4 vblank */
 				if (disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
 					drm_handle_vblank(rdev->ddev, 3);
+					rdev->pm.vblank_sync = true;
 					wake_up(&rdev->irq.vblank_queue);
 					disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
 					DRM_DEBUG("IH: D4 vblank\n");
@@ -2379,6 +2383,7 @@
 			case 0: /* D5 vblank */
 				if (disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
 					drm_handle_vblank(rdev->ddev, 4);
+					rdev->pm.vblank_sync = true;
 					wake_up(&rdev->irq.vblank_queue);
 					disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
 					DRM_DEBUG("IH: D5 vblank\n");
@@ -2400,6 +2405,7 @@
 			case 0: /* D6 vblank */
 				if (disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
 					drm_handle_vblank(rdev->ddev, 5);
+					rdev->pm.vblank_sync = true;
 					wake_up(&rdev->irq.vblank_queue);
 					disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
 					DRM_DEBUG("IH: D6 vblank\n");
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 0e8f28a..8e10aa9 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -442,7 +442,7 @@
 	int r;
 
 	if (rdev->gart.table.ram.ptr) {
-		WARN(1, "R100 PCI GART already initialized.\n");
+		WARN(1, "R100 PCI GART already initialized\n");
 		return 0;
 	}
 	/* Initialize common gart structure */
@@ -516,7 +516,7 @@
 	uint32_t tmp = 0;
 
 	if (!rdev->irq.installed) {
-		WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
+		WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
 		WREG32(R_000040_GEN_INT_CNTL, 0);
 		return -EINVAL;
 	}
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 34527e6..cde1d34 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -91,7 +91,7 @@
 	int r;
 
 	if (rdev->gart.table.vram.robj) {
-		WARN(1, "RV370 PCIE GART already initialized.\n");
+		WARN(1, "RV370 PCIE GART already initialized\n");
 		return 0;
 	}
 	/* Initialize common gart structure */
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 33952a1..0f806cc 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -97,14 +97,8 @@
 {
 	u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >>
 		ASIC_T_SHIFT;
-	u32 actual_temp = 0;
 
-	if ((temp >> 7) & 1)
-		actual_temp = 0;
-	else
-		actual_temp = (temp >> 1) & 0xff;
-
-	return actual_temp * 1000;
+	return temp * 1000;
 }
 
 void r600_pm_get_dynpm_state(struct radeon_device *rdev)
@@ -919,7 +913,7 @@
 	int r;
 
 	if (rdev->gart.table.vram.robj) {
-		WARN(1, "R600 PCIE GART already initialized.\n");
+		WARN(1, "R600 PCIE GART already initialized\n");
 		return 0;
 	}
 	/* Initialize common gart structure */
@@ -2995,7 +2989,7 @@
 	u32 hdmi1, hdmi2;
 
 	if (!rdev->irq.installed) {
-		WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
+		WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
 		return -EINVAL;
 	}
 	/* don't enable anything if the ih is disabled */
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 04cac7e..87ead09 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -526,8 +526,6 @@
 	if (crev < 2)
 		return false;
 
-	router.valid = false;
-
 	obj_header = (ATOM_OBJECT_HEADER *) (ctx->bios + data_offset);
 	path_obj = (ATOM_DISPLAY_OBJECT_PATH_TABLE *)
 	    (ctx->bios + data_offset +
@@ -624,6 +622,8 @@
 			if (connector_type == DRM_MODE_CONNECTOR_Unknown)
 				continue;
 
+			router.ddc_valid = false;
+			router.cd_valid = false;
 			for (j = 0; j < ((le16_to_cpu(path->usSize) - 8) / 2); j++) {
 				uint8_t grph_obj_id, grph_obj_num, grph_obj_type;
 
@@ -647,9 +647,8 @@
 								 usDeviceTag));
 
 				} else if (grph_obj_type == GRAPH_OBJECT_TYPE_ROUTER) {
-					router.valid = false;
 					for (k = 0; k < router_obj->ucNumberOfObjects; k++) {
-						u16 router_obj_id = le16_to_cpu(router_obj->asObjects[j].usObjectID);
+						u16 router_obj_id = le16_to_cpu(router_obj->asObjects[k].usObjectID);
 						if (le16_to_cpu(path->usGraphicObjIds[j]) == router_obj_id) {
 							ATOM_COMMON_RECORD_HEADER *record = (ATOM_COMMON_RECORD_HEADER *)
 								(ctx->bios + data_offset +
@@ -657,6 +656,7 @@
 							ATOM_I2C_RECORD *i2c_record;
 							ATOM_I2C_ID_CONFIG_ACCESS *i2c_config;
 							ATOM_ROUTER_DDC_PATH_SELECT_RECORD *ddc_path;
+							ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD *cd_path;
 							ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *router_src_dst_table =
 								(ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *)
 								(ctx->bios + data_offset +
@@ -690,10 +690,18 @@
 								case ATOM_ROUTER_DDC_PATH_SELECT_RECORD_TYPE:
 									ddc_path = (ATOM_ROUTER_DDC_PATH_SELECT_RECORD *)
 										record;
-									router.valid = true;
-									router.mux_type = ddc_path->ucMuxType;
-									router.mux_control_pin = ddc_path->ucMuxControlPin;
-									router.mux_state = ddc_path->ucMuxState[enum_id];
+									router.ddc_valid = true;
+									router.ddc_mux_type = ddc_path->ucMuxType;
+									router.ddc_mux_control_pin = ddc_path->ucMuxControlPin;
+									router.ddc_mux_state = ddc_path->ucMuxState[enum_id];
+									break;
+								case ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD_TYPE:
+									cd_path = (ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD *)
+										record;
+									router.cd_valid = true;
+									router.cd_mux_type = cd_path->ucMuxType;
+									router.cd_mux_control_pin = cd_path->ucMuxControlPin;
+									router.cd_mux_state = cd_path->ucMuxState[enum_id];
 									break;
 								}
 								record = (ATOM_COMMON_RECORD_HEADER *)
@@ -860,7 +868,8 @@
 	size_t bc_size = sizeof(*bios_connectors) * ATOM_MAX_SUPPORTED_DEVICE;
 	struct radeon_router router;
 
-	router.valid = false;
+	router.ddc_valid = false;
+	router.cd_valid = false;
 
 	bios_connectors = kzalloc(bc_size, GFP_KERNEL);
 	if (!bios_connectors)
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 4dac4b0..fe6c747 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -183,13 +183,13 @@
 					continue;
 
 				if (priority == true) {
-					DRM_INFO("1: conflicting encoders switching off %s\n", drm_get_connector_name(conflict));
-					DRM_INFO("in favor of %s\n", drm_get_connector_name(connector));
+					DRM_DEBUG_KMS("1: conflicting encoders switching off %s\n", drm_get_connector_name(conflict));
+					DRM_DEBUG_KMS("in favor of %s\n", drm_get_connector_name(connector));
 					conflict->status = connector_status_disconnected;
 					radeon_connector_update_scratch_regs(conflict, connector_status_disconnected);
 				} else {
-					DRM_INFO("2: conflicting encoders switching off %s\n", drm_get_connector_name(connector));
-					DRM_INFO("in favor of %s\n", drm_get_connector_name(conflict));
+					DRM_DEBUG_KMS("2: conflicting encoders switching off %s\n", drm_get_connector_name(connector));
+					DRM_DEBUG_KMS("in favor of %s\n", drm_get_connector_name(conflict));
 					current_status = connector_status_disconnected;
 				}
 				break;
@@ -432,13 +432,13 @@
 			    mode->vdisplay == native_mode->vdisplay) {
 				*native_mode = *mode;
 				drm_mode_set_crtcinfo(native_mode, CRTC_INTERLACE_HALVE_V);
-				DRM_INFO("Determined LVDS native mode details from EDID\n");
+				DRM_DEBUG_KMS("Determined LVDS native mode details from EDID\n");
 				break;
 			}
 		}
 	}
 	if (!native_mode->clock) {
-		DRM_INFO("No LVDS native mode details, disabling RMX\n");
+		DRM_DEBUG_KMS("No LVDS native mode details, disabling RMX\n");
 		radeon_encoder->rmx_type = RMX_OFF;
 	}
 }
@@ -1116,7 +1116,7 @@
 				radeon_connector->shared_ddc = true;
 				shared_ddc = true;
 			}
-			if (radeon_connector->router_bus && router->valid &&
+			if (radeon_connector->router_bus && router->ddc_valid &&
 			    (radeon_connector->router.router_id == router->router_id)) {
 				radeon_connector->shared_ddc = false;
 				shared_ddc = false;
@@ -1136,7 +1136,7 @@
 	radeon_connector->connector_object_id = connector_object_id;
 	radeon_connector->hpd = *hpd;
 	radeon_connector->router = *router;
-	if (router->valid) {
+	if (router->ddc_valid || router->cd_valid) {
 		radeon_connector->router_bus = radeon_i2c_lookup(rdev, &router->i2c_info);
 		if (!radeon_connector->router_bus)
 			goto failed;
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 0383631..1df4dc6 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -315,10 +315,14 @@
 				 radeon_connector->ddc_bus->rec.en_data_reg,
 				 radeon_connector->ddc_bus->rec.y_clk_reg,
 				 radeon_connector->ddc_bus->rec.y_data_reg);
-			if (radeon_connector->router_bus)
+			if (radeon_connector->router.ddc_valid)
 				DRM_INFO("  DDC Router 0x%x/0x%x\n",
-					 radeon_connector->router.mux_control_pin,
-					 radeon_connector->router.mux_state);
+					 radeon_connector->router.ddc_mux_control_pin,
+					 radeon_connector->router.ddc_mux_state);
+			if (radeon_connector->router.cd_valid)
+				DRM_INFO("  Clock/Data Router 0x%x/0x%x\n",
+					 radeon_connector->router.cd_mux_control_pin,
+					 radeon_connector->router.cd_mux_state);
 		} else {
 			if (connector->connector_type == DRM_MODE_CONNECTOR_VGA ||
 			    connector->connector_type == DRM_MODE_CONNECTOR_DVII ||
@@ -398,8 +402,8 @@
 	int ret = 0;
 
 	/* on hw with routers, select right port */
-	if (radeon_connector->router.valid)
-		radeon_router_select_port(radeon_connector);
+	if (radeon_connector->router.ddc_valid)
+		radeon_router_select_ddc_port(radeon_connector);
 
 	if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
 	    (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) {
@@ -432,8 +436,8 @@
 	int ret = 0;
 
 	/* on hw with routers, select right port */
-	if (radeon_connector->router.valid)
-		radeon_router_select_port(radeon_connector);
+	if (radeon_connector->router.ddc_valid)
+		radeon_router_select_ddc_port(radeon_connector);
 
 	if (!radeon_connector->ddc_bus)
 		return -1;
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index ae58b68..f678257 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -1520,6 +1520,7 @@
 static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
 {
 	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
 
 	if (radeon_encoder->active_device &
 	    (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) {
@@ -1531,6 +1532,13 @@
 	radeon_atom_output_lock(encoder, true);
 	radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
 
+	/* select the clock/data port if it uses a router */
+	if (connector) {
+		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+		if (radeon_connector->router.cd_valid)
+			radeon_router_select_cd_port(radeon_connector);
+	}
+
 	/* this is needed for the pll/ss setup to work correctly in some cases */
 	atombios_set_encoder_crtc_source(encoder);
 }
@@ -1547,6 +1555,23 @@
 	struct radeon_device *rdev = dev->dev_private;
 	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
 	struct radeon_encoder_atom_dig *dig;
+
+	/* check for pre-DCE3 cards with shared encoders;
+	 * can't really use the links individually, so don't disable
+	 * the encoder if it's in use by another connector
+	 */
+	if (!ASIC_IS_DCE3(rdev)) {
+		struct drm_encoder *other_encoder;
+		struct radeon_encoder *other_radeon_encoder;
+
+		list_for_each_entry(other_encoder, &dev->mode_config.encoder_list, head) {
+			other_radeon_encoder = to_radeon_encoder(other_encoder);
+			if ((radeon_encoder->encoder_id == other_radeon_encoder->encoder_id) &&
+			    drm_helper_encoder_in_use(other_encoder))
+				goto disable_done;
+		}
+	}
+
 	radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
 
 	switch (radeon_encoder->encoder_id) {
@@ -1586,6 +1611,7 @@
 		break;
 	}
 
+disable_done:
 	if (radeon_encoder_is_digital(encoder)) {
 		if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
 			r600_hdmi_disable(encoder);
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 216392d..daacb28 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -240,7 +240,8 @@
 		 */
 		if (seq == rdev->fence_drv.last_seq && radeon_gpu_is_lockup(rdev)) {
 			/* good news we believe it's a lockup */
-			WARN(1, "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n", fence->seq, seq);
+			WARN(1, "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n",
+			     fence->seq, seq);
 			/* FIXME: what should we do ? marking everyone
 			 * as signaled for now
 			 */
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index 6a13ee3..0cfbba0 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -53,8 +53,8 @@
 	};
 
 	/* on hw with routers, select right port */
-	if (radeon_connector->router.valid)
-		radeon_router_select_port(radeon_connector);
+	if (radeon_connector->router.ddc_valid)
+		radeon_router_select_ddc_port(radeon_connector);
 
 	ret = i2c_transfer(&radeon_connector->ddc_bus->adapter, msgs, 2);
 	if (ret == 2)
@@ -1084,26 +1084,51 @@
 			  addr, val);
 }
 
-/* router switching */
-void radeon_router_select_port(struct radeon_connector *radeon_connector)
+/* ddc router switching */
+void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector)
 {
 	u8 val;
 
-	if (!radeon_connector->router.valid)
+	if (!radeon_connector->router.ddc_valid)
 		return;
 
 	radeon_i2c_get_byte(radeon_connector->router_bus,
 			    radeon_connector->router.i2c_addr,
 			    0x3, &val);
-	val &= radeon_connector->router.mux_control_pin;
+	val &= ~radeon_connector->router.ddc_mux_control_pin;
 	radeon_i2c_put_byte(radeon_connector->router_bus,
 			    radeon_connector->router.i2c_addr,
 			    0x3, val);
 	radeon_i2c_get_byte(radeon_connector->router_bus,
 			    radeon_connector->router.i2c_addr,
 			    0x1, &val);
-	val &= radeon_connector->router.mux_control_pin;
-	val |= radeon_connector->router.mux_state;
+	val &= ~radeon_connector->router.ddc_mux_control_pin;
+	val |= radeon_connector->router.ddc_mux_state;
+	radeon_i2c_put_byte(radeon_connector->router_bus,
+			    radeon_connector->router.i2c_addr,
+			    0x1, val);
+}
+
+/* clock/data router switching */
+void radeon_router_select_cd_port(struct radeon_connector *radeon_connector)
+{
+	u8 val;
+
+	if (!radeon_connector->router.cd_valid)
+		return;
+
+	radeon_i2c_get_byte(radeon_connector->router_bus,
+			    radeon_connector->router.i2c_addr,
+			    0x3, &val);
+	val &= ~radeon_connector->router.cd_mux_control_pin;
+	radeon_i2c_put_byte(radeon_connector->router_bus,
+			    radeon_connector->router.i2c_addr,
+			    0x3, val);
+	radeon_i2c_get_byte(radeon_connector->router_bus,
+			    radeon_connector->router.i2c_addr,
+			    0x1, &val);
+	val &= ~radeon_connector->router.cd_mux_control_pin;
+	val |= radeon_connector->router.cd_mux_state;
 	radeon_i2c_put_byte(radeon_connector->router_bus,
 			    radeon_connector->router.i2c_addr,
 			    0x1, val);
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 9245716..680f576 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -401,13 +401,19 @@
 };
 
 struct radeon_router {
-	bool valid;
 	u32 router_id;
 	struct radeon_i2c_bus_rec i2c_info;
 	u8 i2c_addr;
-	u8 mux_type;
-	u8 mux_control_pin;
-	u8 mux_state;
+	/* i2c mux */
+	bool ddc_valid;
+	u8 ddc_mux_type;
+	u8 ddc_mux_control_pin;
+	u8 ddc_mux_state;
+	/* clock/data mux */
+	bool cd_valid;
+	u8 cd_mux_type;
+	u8 cd_mux_control_pin;
+	u8 cd_mux_state;
 };
 
 struct radeon_connector {
@@ -488,7 +494,8 @@
 				u8 slave_addr,
 				u8 addr,
 				u8 val);
-extern void radeon_router_select_port(struct radeon_connector *radeon_connector);
+extern void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector);
+extern void radeon_router_select_cd_port(struct radeon_connector *radeon_connector);
 extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector);
 extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector);
 
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index d7ab914..8eb1834 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -102,6 +102,8 @@
 		type = ttm_bo_type_device;
 	}
 	*bo_ptr = NULL;
+
+retry:
 	bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
 	if (bo == NULL)
 		return -ENOMEM;
@@ -109,8 +111,6 @@
 	bo->gobj = gobj;
 	bo->surface_reg = -1;
 	INIT_LIST_HEAD(&bo->list);
-
-retry:
 	radeon_ttm_placement_from_domain(bo, domain);
 	/* Kernel allocation are uninterruptible */
 	mutex_lock(&rdev->vram_mutex);
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index fe95bb3..01c2c73 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -689,7 +689,8 @@
 	gtt = container_of(backend, struct radeon_ttm_backend, backend);
 	gtt->offset = bo_mem->start << PAGE_SHIFT;
 	if (!gtt->num_pages) {
-		WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", gtt->num_pages, bo_mem, backend);
+		WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
+		     gtt->num_pages, bo_mem, backend);
 	}
 	r = radeon_gart_bind(gtt->rdev, gtt->offset,
 			     gtt->num_pages, gtt->pages);
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index f683e51..5512e4e 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -78,7 +78,7 @@
 	int r;
 
 	if (rdev->gart.table.ram.ptr) {
-		WARN(1, "RS400 GART already initialized.\n");
+		WARN(1, "RS400 GART already initialized\n");
 		return 0;
 	}
 	/* Check gart size */
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index b091a1f..f1c6e02c 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -375,7 +375,7 @@
 	int r;
 
 	if (rdev->gart.table.vram.robj) {
-		WARN(1, "RS600 GART already initialized.\n");
+		WARN(1, "RS600 GART already initialized\n");
 		return 0;
 	}
 	/* Initialize common gart structure */
@@ -505,7 +505,7 @@
 		~S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1);
 
 	if (!rdev->irq.installed) {
-		WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
+		WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
 		WREG32(R_000040_GEN_INT_CNTL, 0);
 		return -EINVAL;
 	}
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index a1cb783..3ca77dc 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -27,14 +27,6 @@
 /*
  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
  */
-/* Notes:
- *
- * We store bo pointer in drm_mm_node struct so we know which bo own a
- * specific node. There is no protection on the pointer, thus to make
- * sure things don't go berserk you have to access this pointer while
- * holding the global lru lock and make sure anytime you free a node you
- * reset the pointer to NULL.
- */
 
 #include "ttm/ttm_module.h"
 #include "ttm/ttm_bo_driver.h"
@@ -45,6 +37,7 @@
 #include <linux/mm.h>
 #include <linux/file.h>
 #include <linux/module.h>
+#include <asm/atomic.h>
 
 #define TTM_ASSERT_LOCKED(param)
 #define TTM_DEBUG(fmt, arg...)
@@ -452,6 +445,11 @@
 	ttm_bo_mem_put(bo, &bo->mem);
 
 	atomic_set(&bo->reserved, 0);
+
+	/*
+	 * Make processes trying to reserve really pick it up.
+	 */
+	smp_mb__after_atomic_dec();
 	wake_up_all(&bo->event_queue);
 }
 
@@ -460,7 +458,7 @@
 	struct ttm_bo_device *bdev = bo->bdev;
 	struct ttm_bo_global *glob = bo->glob;
 	struct ttm_bo_driver *driver;
-	void *sync_obj;
+	void *sync_obj = NULL;
 	void *sync_obj_arg;
 	int put_count;
 	int ret;
@@ -495,17 +493,20 @@
 		spin_lock(&glob->lru_lock);
 	}
 queue:
-	sync_obj = bo->sync_obj;
-	sync_obj_arg = bo->sync_obj_arg;
 	driver = bdev->driver;
+	if (bo->sync_obj)
+		sync_obj = driver->sync_obj_ref(bo->sync_obj);
+	sync_obj_arg = bo->sync_obj_arg;
 
 	kref_get(&bo->list_kref);
 	list_add_tail(&bo->ddestroy, &bdev->ddestroy);
 	spin_unlock(&glob->lru_lock);
 	spin_unlock(&bo->lock);
 
-	if (sync_obj)
+	if (sync_obj) {
 		driver->sync_obj_flush(sync_obj, sync_obj_arg);
+		driver->sync_obj_unref(&sync_obj);
+	}
 	schedule_delayed_work(&bdev->wq,
 			      ((HZ / 100) < 1) ? 1 : HZ / 100);
 }
@@ -822,7 +823,6 @@
 					bool no_wait_gpu)
 {
 	struct ttm_bo_device *bdev = bo->bdev;
-	struct ttm_bo_global *glob = bdev->glob;
 	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
 	int ret;
 
@@ -832,12 +832,6 @@
 			return ret;
 		if (mem->mm_node)
 			break;
-		spin_lock(&glob->lru_lock);
-		if (list_empty(&man->lru)) {
-			spin_unlock(&glob->lru_lock);
-			break;
-		}
-		spin_unlock(&glob->lru_lock);
 		ret = ttm_mem_evict_first(bdev, mem_type, interruptible,
 						no_wait_reserve, no_wait_gpu);
 		if (unlikely(ret != 0))
@@ -1125,35 +1119,9 @@
 int ttm_bo_check_placement(struct ttm_buffer_object *bo,
 				struct ttm_placement *placement)
 {
-	int i;
+	BUG_ON((placement->fpfn || placement->lpfn) &&
+	       (bo->mem.num_pages > (placement->lpfn - placement->fpfn)));
 
-	if (placement->fpfn || placement->lpfn) {
-		if (bo->mem.num_pages > (placement->lpfn - placement->fpfn)) {
-			printk(KERN_ERR TTM_PFX "Page number range to small "
-				"Need %lu pages, range is [%u, %u]\n",
-				bo->mem.num_pages, placement->fpfn,
-				placement->lpfn);
-			return -EINVAL;
-		}
-	}
-	for (i = 0; i < placement->num_placement; i++) {
-		if (!capable(CAP_SYS_ADMIN)) {
-			if (placement->placement[i] & TTM_PL_FLAG_NO_EVICT) {
-				printk(KERN_ERR TTM_PFX "Need to be root to "
-					"modify NO_EVICT status.\n");
-				return -EINVAL;
-			}
-		}
-	}
-	for (i = 0; i < placement->num_busy_placement; i++) {
-		if (!capable(CAP_SYS_ADMIN)) {
-			if (placement->busy_placement[i] & TTM_PL_FLAG_NO_EVICT) {
-				printk(KERN_ERR TTM_PFX "Need to be root to "
-					"modify NO_EVICT status.\n");
-				return -EINVAL;
-			}
-		}
-	}
 	return 0;
 }
 
@@ -1176,6 +1144,10 @@
 	num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
 	if (num_pages == 0) {
 		printk(KERN_ERR TTM_PFX "Illegal buffer object size.\n");
+		if (destroy)
+			(*destroy)(bo);
+		else
+			kfree(bo);
 		return -EINVAL;
 	}
 	bo->destroy = destroy;
@@ -1369,18 +1341,9 @@
 	int ret = -EINVAL;
 	struct ttm_mem_type_manager *man;
 
-	if (type >= TTM_NUM_MEM_TYPES) {
-		printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", type);
-		return ret;
-	}
-
+	BUG_ON(type >= TTM_NUM_MEM_TYPES);
 	man = &bdev->man[type];
-	if (man->has_type) {
-		printk(KERN_ERR TTM_PFX
-		       "Memory manager already initialized for type %d\n",
-		       type);
-		return ret;
-	}
+	BUG_ON(man->has_type);
 
 	ret = bdev->driver->init_mem_type(bdev, type, man);
 	if (ret)
@@ -1389,13 +1352,6 @@
 
 	ret = 0;
 	if (type != TTM_PL_SYSTEM) {
-		if (!p_size) {
-			printk(KERN_ERR TTM_PFX
-			       "Zero size memory manager type %d\n",
-			       type);
-			return ret;
-		}
-
 		ret = (*man->func->init)(man, p_size);
 		if (ret)
 			return ret;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
index 7410c19..038e947 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright (c) 2007-2010 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -31,20 +31,29 @@
 #include "ttm/ttm_module.h"
 #include "ttm/ttm_bo_driver.h"
 #include "ttm/ttm_placement.h"
-#include <linux/jiffies.h>
+#include "drm_mm.h"
 #include <linux/slab.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/file.h>
+#include <linux/spinlock.h>
 #include <linux/module.h>
 
+/**
+ * Currently we use a spinlock for the lock, but a mutex *may* be
+ * more appropriate to reduce scheduling latency if the range manager
+ * ends up with very fragmented allocation patterns.
+ */
+
+struct ttm_range_manager {
+	struct drm_mm mm;
+	spinlock_t lock;
+};
+
 static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
 			       struct ttm_buffer_object *bo,
 			       struct ttm_placement *placement,
 			       struct ttm_mem_reg *mem)
 {
-	struct ttm_bo_global *glob = man->bdev->glob;
-	struct drm_mm *mm = man->priv;
+	struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
+	struct drm_mm *mm = &rman->mm;
 	struct drm_mm_node *node = NULL;
 	unsigned long lpfn;
 	int ret;
@@ -57,19 +66,19 @@
 		if (unlikely(ret))
 			return ret;
 
-		spin_lock(&glob->lru_lock);
+		spin_lock(&rman->lock);
 		node = drm_mm_search_free_in_range(mm,
 					mem->num_pages, mem->page_alignment,
 					placement->fpfn, lpfn, 1);
 		if (unlikely(node == NULL)) {
-			spin_unlock(&glob->lru_lock);
+			spin_unlock(&rman->lock);
 			return 0;
 		}
 		node = drm_mm_get_block_atomic_range(node, mem->num_pages,
-							mem->page_alignment,
-							placement->fpfn,
-							lpfn);
-		spin_unlock(&glob->lru_lock);
+						     mem->page_alignment,
+						     placement->fpfn,
+						     lpfn);
+		spin_unlock(&rman->lock);
 	} while (node == NULL);
 
 	mem->mm_node = node;
@@ -80,12 +89,12 @@
 static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
 				struct ttm_mem_reg *mem)
 {
-	struct ttm_bo_global *glob = man->bdev->glob;
+	struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
 
 	if (mem->mm_node) {
-		spin_lock(&glob->lru_lock);
+		spin_lock(&rman->lock);
 		drm_mm_put_block(mem->mm_node);
-		spin_unlock(&glob->lru_lock);
+		spin_unlock(&rman->lock);
 		mem->mm_node = NULL;
 	}
 }
@@ -93,49 +102,49 @@
 static int ttm_bo_man_init(struct ttm_mem_type_manager *man,
 			   unsigned long p_size)
 {
-	struct drm_mm *mm;
+	struct ttm_range_manager *rman;
 	int ret;
 
-	mm = kzalloc(sizeof(*mm), GFP_KERNEL);
-	if (!mm)
+	rman = kzalloc(sizeof(*rman), GFP_KERNEL);
+	if (!rman)
 		return -ENOMEM;
 
-	ret = drm_mm_init(mm, 0, p_size);
+	ret = drm_mm_init(&rman->mm, 0, p_size);
 	if (ret) {
-		kfree(mm);
+		kfree(rman);
 		return ret;
 	}
 
-	man->priv = mm;
+	spin_lock_init(&rman->lock);
+	man->priv = rman;
 	return 0;
 }
 
 static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man)
 {
-	struct ttm_bo_global *glob = man->bdev->glob;
-	struct drm_mm *mm = man->priv;
-	int ret = 0;
+	struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
+	struct drm_mm *mm = &rman->mm;
 
-	spin_lock(&glob->lru_lock);
+	spin_lock(&rman->lock);
 	if (drm_mm_clean(mm)) {
 		drm_mm_takedown(mm);
-		kfree(mm);
+		spin_unlock(&rman->lock);
+		kfree(rman);
 		man->priv = NULL;
-	} else
-		ret = -EBUSY;
-	spin_unlock(&glob->lru_lock);
-	return ret;
+		return 0;
+	}
+	spin_unlock(&rman->lock);
+	return -EBUSY;
 }
 
 static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
 			     const char *prefix)
 {
-	struct ttm_bo_global *glob = man->bdev->glob;
-	struct drm_mm *mm = man->priv;
+	struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
 
-	spin_lock(&glob->lru_lock);
-	drm_mm_debug_table(mm, prefix);
-	spin_unlock(&glob->lru_lock);
+	spin_lock(&rman->lock);
+	drm_mm_debug_table(&rman->mm, prefix);
+	spin_unlock(&rman->lock);
 }
 
 const struct ttm_mem_type_manager_func ttm_bo_manager_func = {
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index a7bab87..af789dc 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -440,10 +440,8 @@
 		return ret;
 
 	ret = be->func->bind(be, bo_mem);
-	if (ret) {
-		printk(KERN_ERR TTM_PFX "Couldn't bind backend.\n");
+	if (unlikely(ret != 0))
 		return ret;
-	}
 
 	ttm->state = tt_bound;
 
diff --git a/drivers/gpu/drm/via/via_dmablit.c b/drivers/gpu/drm/via/via_dmablit.c
index 9b5b4d9..3e038a3 100644
--- a/drivers/gpu/drm/via/via_dmablit.c
+++ b/drivers/gpu/drm/via/via_dmablit.c
@@ -235,9 +235,9 @@
 	vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride - 1)) -
 		first_pfn + 1;
 
-	if (NULL == (vsg->pages = vmalloc(sizeof(struct page *) * vsg->num_pages)))
+	vsg->pages = vzalloc(sizeof(struct page *) * vsg->num_pages);
+	if (NULL == vsg->pages)
 		return -ENOMEM;
-	memset(vsg->pages, 0, sizeof(struct page *) * vsg->num_pages);
 	down_read(&current->mm->mmap_sem);
 	ret = get_user_pages(current, current->mm,
 			     (unsigned long)xfer->mem_addr,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 51d9f9f..76954e3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -691,6 +691,7 @@
 
 	fence_rep.error = ret;
 	fence_rep.fence_seq = (uint64_t) sequence;
+	fence_rep.pad64 = 0;
 
 	user_fence_rep = (struct drm_vmw_fence_rep __user *)
 	    (unsigned long)arg->fence_rep;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 87c6e61..cceeb42 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -720,6 +720,8 @@
 			       &vmw_vram_ne_placement,
 			       false, &vmw_dmabuf_bo_free);
 	vmw_overlay_resume_all(dev_priv);
+	if (unlikely(ret != 0))
+		vfbs->buffer = NULL;
 
 	return ret;
 }
@@ -730,6 +732,9 @@
 	struct vmw_framebuffer_surface *vfbs =
 		vmw_framebuffer_to_vfbs(&vfb->base);
 
+	if (unlikely(vfbs->buffer == NULL))
+		return 0;
+
 	bo = &vfbs->buffer->base;
 	ttm_bo_unref(&bo);
 	vfbs->buffer = NULL;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index a01c47d..29113c9 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -557,7 +557,7 @@
 		return -EINVAL;
 	}
 
-	dev_priv->ldu_priv = kmalloc(GFP_KERNEL, sizeof(*dev_priv->ldu_priv));
+	dev_priv->ldu_priv = kmalloc(sizeof(*dev_priv->ldu_priv), GFP_KERNEL);
 
 	if (!dev_priv->ldu_priv)
 		return -ENOMEM;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index df2036e..f1a52f9 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -585,7 +585,7 @@
 		return -ENOSYS;
 	}
 
-	overlay = kmalloc(GFP_KERNEL, sizeof(*overlay));
+	overlay = kmalloc(sizeof(*overlay), GFP_KERNEL);
 	if (!overlay)
 		return -ENOMEM;
 
diff --git a/drivers/gpu/stub/Kconfig b/drivers/gpu/stub/Kconfig
index 742c423..0e1edd7 100644
--- a/drivers/gpu/stub/Kconfig
+++ b/drivers/gpu/stub/Kconfig
@@ -3,6 +3,9 @@
 	depends on PCI
 	# Poulsbo stub depends on ACPI_VIDEO when ACPI is enabled
 	# but for select to work, need to select ACPI_VIDEO's dependencies, ick
+	select VIDEO_OUTPUT_CONTROL if ACPI
+	select BACKLIGHT_CLASS_DEVICE if ACPI
+	select INPUT if ACPI
 	select ACPI_VIDEO if ACPI
 	help
 	  Choose this option if you have a system that has Intel GMA500
diff --git a/drivers/hwmon/ad7414.c b/drivers/hwmon/ad7414.c
index 1e4c21f..86d822a 100644
--- a/drivers/hwmon/ad7414.c
+++ b/drivers/hwmon/ad7414.c
@@ -178,11 +178,13 @@
 {
 	struct ad7414_data *data;
 	int conf;
-	int err = 0;
+	int err;
 
 	if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA |
-				     I2C_FUNC_SMBUS_READ_WORD_DATA))
+				     I2C_FUNC_SMBUS_READ_WORD_DATA)) {
+		err = -EOPNOTSUPP;
 		goto exit;
+	}
 
 	data = kzalloc(sizeof(struct ad7414_data), GFP_KERNEL);
 	if (!data) {
diff --git a/drivers/hwmon/adt7470.c b/drivers/hwmon/adt7470.c
index 9e77571..87d92a5 100644
--- a/drivers/hwmon/adt7470.c
+++ b/drivers/hwmon/adt7470.c
@@ -1286,8 +1286,10 @@
 	init_completion(&data->auto_update_stop);
 	data->auto_update = kthread_run(adt7470_update_thread, client,
 					dev_name(data->hwmon_dev));
-	if (IS_ERR(data->auto_update))
+	if (IS_ERR(data->auto_update)) {
+		err = PTR_ERR(data->auto_update);
 		goto exit_unregister;
+	}
 
 	return 0;
 
diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c
index aa701a1..f141a1d 100644
--- a/drivers/hwmon/gpio-fan.c
+++ b/drivers/hwmon/gpio-fan.c
@@ -376,10 +376,6 @@
 		}
 	}
 
-	err = sysfs_create_group(&pdev->dev.kobj, &gpio_fan_ctrl_group);
-	if (err)
-		goto err_free_gpio;
-
 	fan_data->num_ctrl = num_ctrl;
 	fan_data->ctrl = ctrl;
 	fan_data->num_speed = pdata->num_speed;
@@ -391,6 +387,10 @@
 		goto err_free_gpio;
 	}
 
+	err = sysfs_create_group(&pdev->dev.kobj, &gpio_fan_ctrl_group);
+	if (err)
+		goto err_free_gpio;
+
 	return 0;
 
 err_free_gpio:
diff --git a/drivers/input/input.c b/drivers/input/input.c
index d092ef9..7f26ca6 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -74,6 +74,7 @@
  * dev->event_lock held and interrupts disabled.
  */
 static void input_pass_event(struct input_dev *dev,
+			     struct input_handler *src_handler,
 			     unsigned int type, unsigned int code, int value)
 {
 	struct input_handler *handler;
@@ -92,6 +93,15 @@
 				continue;
 
 			handler = handle->handler;
+
+			/*
+			 * If this is the handler that injected this
+			 * particular event we want to skip it to avoid
+			 * filters firing again and again.
+			 */
+			if (handler == src_handler)
+				continue;
+
 			if (!handler->filter) {
 				if (filtered)
 					break;
@@ -121,7 +131,7 @@
 	if (test_bit(dev->repeat_key, dev->key) &&
 	    is_event_supported(dev->repeat_key, dev->keybit, KEY_MAX)) {
 
-		input_pass_event(dev, EV_KEY, dev->repeat_key, 2);
+		input_pass_event(dev, NULL, EV_KEY, dev->repeat_key, 2);
 
 		if (dev->sync) {
 			/*
@@ -130,7 +140,7 @@
 			 * Otherwise assume that the driver will send
 			 * SYN_REPORT once it's done.
 			 */
-			input_pass_event(dev, EV_SYN, SYN_REPORT, 1);
+			input_pass_event(dev, NULL, EV_SYN, SYN_REPORT, 1);
 		}
 
 		if (dev->rep[REP_PERIOD])
@@ -163,6 +173,7 @@
 #define INPUT_PASS_TO_ALL	(INPUT_PASS_TO_HANDLERS | INPUT_PASS_TO_DEVICE)
 
 static int input_handle_abs_event(struct input_dev *dev,
+				  struct input_handler *src_handler,
 				  unsigned int code, int *pval)
 {
 	bool is_mt_event;
@@ -206,13 +217,15 @@
 	/* Flush pending "slot" event */
 	if (is_mt_event && dev->slot != input_abs_get_val(dev, ABS_MT_SLOT)) {
 		input_abs_set_val(dev, ABS_MT_SLOT, dev->slot);
-		input_pass_event(dev, EV_ABS, ABS_MT_SLOT, dev->slot);
+		input_pass_event(dev, src_handler,
+				 EV_ABS, ABS_MT_SLOT, dev->slot);
 	}
 
 	return INPUT_PASS_TO_HANDLERS;
 }
 
 static void input_handle_event(struct input_dev *dev,
+			       struct input_handler *src_handler,
 			       unsigned int type, unsigned int code, int value)
 {
 	int disposition = INPUT_IGNORE_EVENT;
@@ -265,7 +278,8 @@
 
 	case EV_ABS:
 		if (is_event_supported(code, dev->absbit, ABS_MAX))
-			disposition = input_handle_abs_event(dev, code, &value);
+			disposition = input_handle_abs_event(dev, src_handler,
+							     code, &value);
 
 		break;
 
@@ -323,7 +337,7 @@
 		dev->event(dev, type, code, value);
 
 	if (disposition & INPUT_PASS_TO_HANDLERS)
-		input_pass_event(dev, type, code, value);
+		input_pass_event(dev, src_handler, type, code, value);
 }
 
 /**
@@ -352,7 +366,7 @@
 
 		spin_lock_irqsave(&dev->event_lock, flags);
 		add_input_randomness(type, code, value);
-		input_handle_event(dev, type, code, value);
+		input_handle_event(dev, NULL, type, code, value);
 		spin_unlock_irqrestore(&dev->event_lock, flags);
 	}
 }
@@ -382,7 +396,8 @@
 		rcu_read_lock();
 		grab = rcu_dereference(dev->grab);
 		if (!grab || grab == handle)
-			input_handle_event(dev, type, code, value);
+			input_handle_event(dev, handle->handler,
+					   type, code, value);
 		rcu_read_unlock();
 
 		spin_unlock_irqrestore(&dev->event_lock, flags);
@@ -595,10 +610,10 @@
 		for (code = 0; code <= KEY_MAX; code++) {
 			if (is_event_supported(code, dev->keybit, KEY_MAX) &&
 			    __test_and_clear_bit(code, dev->key)) {
-				input_pass_event(dev, EV_KEY, code, 0);
+				input_pass_event(dev, NULL, EV_KEY, code, 0);
 			}
 		}
-		input_pass_event(dev, EV_SYN, SYN_REPORT, 1);
+		input_pass_event(dev, NULL, EV_SYN, SYN_REPORT, 1);
 	}
 }
 
@@ -873,9 +888,9 @@
 	    !is_event_supported(old_keycode, dev->keybit, KEY_MAX) &&
 	    __test_and_clear_bit(old_keycode, dev->key)) {
 
-		input_pass_event(dev, EV_KEY, old_keycode, 0);
+		input_pass_event(dev, NULL, EV_KEY, old_keycode, 0);
 		if (dev->sync)
-			input_pass_event(dev, EV_SYN, SYN_REPORT, 1);
+			input_pass_event(dev, NULL, EV_SYN, SYN_REPORT, 1);
 	}
 
  out:
@@ -1565,8 +1580,7 @@
 		}							\
 	} while (0)
 
-#ifdef CONFIG_PM
-static void input_dev_reset(struct input_dev *dev, bool activate)
+static void input_dev_toggle(struct input_dev *dev, bool activate)
 {
 	if (!dev->event)
 		return;
@@ -1580,12 +1594,44 @@
 	}
 }
 
+/**
+ * input_reset_device() - reset/restore the state of input device
+ * @dev: input device whose state needs to be reset
+ *
+ * This function tries to reset the state of an opened input device and
+ * bring internal state and state if the hardware in sync with each other.
+ * We mark all keys as released, restore LED state, repeat rate, etc.
+ */
+void input_reset_device(struct input_dev *dev)
+{
+	mutex_lock(&dev->mutex);
+
+	if (dev->users) {
+		input_dev_toggle(dev, true);
+
+		/*
+		 * Keys that have been pressed at suspend time are unlikely
+		 * to be still pressed when we resume.
+		 */
+		spin_lock_irq(&dev->event_lock);
+		input_dev_release_keys(dev);
+		spin_unlock_irq(&dev->event_lock);
+	}
+
+	mutex_unlock(&dev->mutex);
+}
+EXPORT_SYMBOL(input_reset_device);
+
+#ifdef CONFIG_PM
 static int input_dev_suspend(struct device *dev)
 {
 	struct input_dev *input_dev = to_input_dev(dev);
 
 	mutex_lock(&input_dev->mutex);
-	input_dev_reset(input_dev, false);
+
+	if (input_dev->users)
+		input_dev_toggle(input_dev, false);
+
 	mutex_unlock(&input_dev->mutex);
 
 	return 0;
@@ -1595,18 +1641,7 @@
 {
 	struct input_dev *input_dev = to_input_dev(dev);
 
-	mutex_lock(&input_dev->mutex);
-	input_dev_reset(input_dev, true);
-
-	/*
-	 * Keys that have been pressed at suspend time are unlikely
-	 * to be still pressed when we resume.
-	 */
-	spin_lock_irq(&input_dev->event_lock);
-	input_dev_release_keys(input_dev);
-	spin_unlock_irq(&input_dev->event_lock);
-
-	mutex_unlock(&input_dev->mutex);
+	input_reset_device(input_dev);
 
 	return 0;
 }
diff --git a/drivers/input/keyboard/adp5588-keys.c b/drivers/input/keyboard/adp5588-keys.c
index b92d1cd..af45d27 100644
--- a/drivers/input/keyboard/adp5588-keys.c
+++ b/drivers/input/keyboard/adp5588-keys.c
@@ -4,7 +4,7 @@
  *		 I2C QWERTY Keypad and IO Expander
  * Bugs: Enter bugs at http://blackfin.uclinux.org/
  *
- * Copyright (C) 2008-2009 Analog Devices Inc.
+ * Copyright (C) 2008-2010 Analog Devices Inc.
  * Licensed under the GPL-2 or later.
  */
 
@@ -24,29 +24,6 @@
 
 #include <linux/i2c/adp5588.h>
 
- /* Configuration Register1 */
-#define AUTO_INC	(1 << 7)
-#define GPIEM_CFG	(1 << 6)
-#define OVR_FLOW_M	(1 << 5)
-#define INT_CFG		(1 << 4)
-#define OVR_FLOW_IEN	(1 << 3)
-#define K_LCK_IM	(1 << 2)
-#define GPI_IEN		(1 << 1)
-#define KE_IEN		(1 << 0)
-
-/* Interrupt Status Register */
-#define CMP2_INT	(1 << 5)
-#define CMP1_INT	(1 << 4)
-#define OVR_FLOW_INT	(1 << 3)
-#define K_LCK_INT	(1 << 2)
-#define GPI_INT		(1 << 1)
-#define KE_INT		(1 << 0)
-
-/* Key Lock and Event Counter Register */
-#define K_LCK_EN	(1 << 6)
-#define LCK21		0x30
-#define KEC		0xF
-
 /* Key Event Register xy */
 #define KEY_EV_PRESSED		(1 << 7)
 #define KEY_EV_MASK		(0x7F)
@@ -55,10 +32,6 @@
 
 #define KEYP_MAX_EVENT		10
 
-#define MAXGPIO			18
-#define ADP_BANK(offs)		((offs) >> 3)
-#define ADP_BIT(offs)		(1u << ((offs) & 0x7))
-
 /*
  * Early pre 4.0 Silicon required to delay readout by at least 25ms,
  * since the Event Counter Register updated 25ms after the interrupt
@@ -75,7 +48,7 @@
 	const struct adp5588_gpi_map *gpimap;
 	unsigned short gpimapsize;
 #ifdef CONFIG_GPIOLIB
-	unsigned char gpiomap[MAXGPIO];
+	unsigned char gpiomap[ADP5588_MAXGPIO];
 	bool export_gpio;
 	struct gpio_chip gc;
 	struct mutex gpio_lock;	/* Protect cached dir, dat_out */
@@ -103,8 +76,8 @@
 static int adp5588_gpio_get_value(struct gpio_chip *chip, unsigned off)
 {
 	struct adp5588_kpad *kpad = container_of(chip, struct adp5588_kpad, gc);
-	unsigned int bank = ADP_BANK(kpad->gpiomap[off]);
-	unsigned int bit = ADP_BIT(kpad->gpiomap[off]);
+	unsigned int bank = ADP5588_BANK(kpad->gpiomap[off]);
+	unsigned int bit = ADP5588_BIT(kpad->gpiomap[off]);
 
 	return !!(adp5588_read(kpad->client, GPIO_DAT_STAT1 + bank) & bit);
 }
@@ -113,8 +86,8 @@
 				   unsigned off, int val)
 {
 	struct adp5588_kpad *kpad = container_of(chip, struct adp5588_kpad, gc);
-	unsigned int bank = ADP_BANK(kpad->gpiomap[off]);
-	unsigned int bit = ADP_BIT(kpad->gpiomap[off]);
+	unsigned int bank = ADP5588_BANK(kpad->gpiomap[off]);
+	unsigned int bit = ADP5588_BIT(kpad->gpiomap[off]);
 
 	mutex_lock(&kpad->gpio_lock);
 
@@ -132,8 +105,8 @@
 static int adp5588_gpio_direction_input(struct gpio_chip *chip, unsigned off)
 {
 	struct adp5588_kpad *kpad = container_of(chip, struct adp5588_kpad, gc);
-	unsigned int bank = ADP_BANK(kpad->gpiomap[off]);
-	unsigned int bit = ADP_BIT(kpad->gpiomap[off]);
+	unsigned int bank = ADP5588_BANK(kpad->gpiomap[off]);
+	unsigned int bit = ADP5588_BIT(kpad->gpiomap[off]);
 	int ret;
 
 	mutex_lock(&kpad->gpio_lock);
@@ -150,8 +123,8 @@
 					 unsigned off, int val)
 {
 	struct adp5588_kpad *kpad = container_of(chip, struct adp5588_kpad, gc);
-	unsigned int bank = ADP_BANK(kpad->gpiomap[off]);
-	unsigned int bit = ADP_BIT(kpad->gpiomap[off]);
+	unsigned int bank = ADP5588_BANK(kpad->gpiomap[off]);
+	unsigned int bit = ADP5588_BIT(kpad->gpiomap[off]);
 	int ret;
 
 	mutex_lock(&kpad->gpio_lock);
@@ -176,7 +149,7 @@
 static int __devinit adp5588_build_gpiomap(struct adp5588_kpad *kpad,
 				const struct adp5588_kpad_platform_data *pdata)
 {
-	bool pin_used[MAXGPIO];
+	bool pin_used[ADP5588_MAXGPIO];
 	int n_unused = 0;
 	int i;
 
@@ -191,7 +164,7 @@
 	for (i = 0; i < kpad->gpimapsize; i++)
 		pin_used[kpad->gpimap[i].pin - GPI_PIN_BASE] = true;
 
-	for (i = 0; i < MAXGPIO; i++)
+	for (i = 0; i < ADP5588_MAXGPIO; i++)
 		if (!pin_used[i])
 			kpad->gpiomap[n_unused++] = i;
 
@@ -234,7 +207,7 @@
 		return error;
 	}
 
-	for (i = 0; i <= ADP_BANK(MAXGPIO); i++) {
+	for (i = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++) {
 		kpad->dat_out[i] = adp5588_read(kpad->client,
 						GPIO_DAT_OUT1 + i);
 		kpad->dir[i] = adp5588_read(kpad->client, GPIO_DIR1 + i);
@@ -318,11 +291,11 @@
 
 	status = adp5588_read(client, INT_STAT);
 
-	if (status & OVR_FLOW_INT)	/* Unlikely and should never happen */
+	if (status & ADP5588_OVR_FLOW_INT)	/* Unlikely and should never happen */
 		dev_err(&client->dev, "Event Overflow Error\n");
 
-	if (status & KE_INT) {
-		ev_cnt = adp5588_read(client, KEY_LCK_EC_STAT) & KEC;
+	if (status & ADP5588_KE_INT) {
+		ev_cnt = adp5588_read(client, KEY_LCK_EC_STAT) & ADP5588_KEC;
 		if (ev_cnt) {
 			adp5588_report_events(kpad, ev_cnt);
 			input_sync(kpad->input);
@@ -360,7 +333,7 @@
 	if (pdata->en_keylock) {
 		ret |= adp5588_write(client, UNLOCK1, pdata->unlock_key1);
 		ret |= adp5588_write(client, UNLOCK2, pdata->unlock_key2);
-		ret |= adp5588_write(client, KEY_LCK_EC_STAT, K_LCK_EN);
+		ret |= adp5588_write(client, KEY_LCK_EC_STAT, ADP5588_K_LCK_EN);
 	}
 
 	for (i = 0; i < KEYP_MAX_EVENT; i++)
@@ -384,7 +357,7 @@
 	}
 
 	if (gpio_data) {
-		for (i = 0; i <= ADP_BANK(MAXGPIO); i++) {
+		for (i = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++) {
 			int pull_mask = gpio_data->pullup_dis_mask;
 
 			ret |= adp5588_write(client, GPIO_PULL1 + i,
@@ -392,11 +365,14 @@
 		}
 	}
 
-	ret |= adp5588_write(client, INT_STAT, CMP2_INT | CMP1_INT |
-					OVR_FLOW_INT | K_LCK_INT |
-					GPI_INT | KE_INT); /* Status is W1C */
+	ret |= adp5588_write(client, INT_STAT,
+				ADP5588_CMP2_INT | ADP5588_CMP1_INT |
+				ADP5588_OVR_FLOW_INT | ADP5588_K_LCK_INT |
+				ADP5588_GPI_INT | ADP5588_KE_INT); /* Status is W1C */
 
-	ret |= adp5588_write(client, CFG, INT_CFG | OVR_FLOW_IEN | KE_IEN);
+	ret |= adp5588_write(client, CFG, ADP5588_INT_CFG |
+					  ADP5588_OVR_FLOW_IEN |
+					  ADP5588_KE_IEN);
 
 	if (ret < 0) {
 		dev_err(&client->dev, "Write Error\n");
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index d358ef8..11478eb 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -63,6 +63,10 @@
 module_param_named(extra, atkbd_extra, bool, 0);
 MODULE_PARM_DESC(extra, "Enable extra LEDs and keys on IBM RapidAcces, EzKey and similar keyboards");
 
+static bool atkbd_terminal;
+module_param_named(terminal, atkbd_terminal, bool, 0);
+MODULE_PARM_DESC(terminal, "Enable break codes on an IBM Terminal keyboard connected via AT/PS2");
+
 /*
  * Scancode to keycode tables. These are just the default setting, and
  * are loadable via a userland utility.
@@ -136,7 +140,8 @@
 #define ATKBD_CMD_ENABLE	0x00f4
 #define ATKBD_CMD_RESET_DIS	0x00f5	/* Reset to defaults and disable */
 #define ATKBD_CMD_RESET_DEF	0x00f6	/* Reset to defaults */
-#define ATKBD_CMD_SETALL_MBR	0x00fa
+#define ATKBD_CMD_SETALL_MB	0x00f8	/* Set all keys to give break codes */
+#define ATKBD_CMD_SETALL_MBR	0x00fa  /* ... and repeat */
 #define ATKBD_CMD_RESET_BAT	0x02ff
 #define ATKBD_CMD_RESEND	0x00fe
 #define ATKBD_CMD_EX_ENABLE	0x10ea
@@ -764,6 +769,11 @@
 		}
 	}
 
+	if (atkbd_terminal) {
+		ps2_command(ps2dev, param, ATKBD_CMD_SETALL_MB);
+		return 3;
+	}
+
 	if (target_set != 3)
 		return 2;
 
diff --git a/drivers/input/misc/pcf8574_keypad.c b/drivers/input/misc/pcf8574_keypad.c
index 4b42ffc..d1583ae 100644
--- a/drivers/input/misc/pcf8574_keypad.c
+++ b/drivers/input/misc/pcf8574_keypad.c
@@ -127,14 +127,6 @@
 	idev->id.product = 0x0001;
 	idev->id.version = 0x0100;
 
-	input_set_drvdata(idev, lp);
-
-	ret = input_register_device(idev);
-	if (ret) {
-		dev_err(&client->dev, "input_register_device() failed\n");
-		goto fail_register;
-	}
-
 	lp->laststate = read_state(lp);
 
 	ret = request_threaded_irq(client->irq, NULL, pcf8574_kp_irq_handler,
@@ -142,16 +134,21 @@
 				   DRV_NAME, lp);
 	if (ret) {
 		dev_err(&client->dev, "IRQ %d is not free\n", client->irq);
-		goto fail_irq;
+		goto fail_free_device;
+	}
+
+	ret = input_register_device(idev);
+	if (ret) {
+		dev_err(&client->dev, "input_register_device() failed\n");
+		goto fail_free_irq;
 	}
 
 	i2c_set_clientdata(client, lp);
 	return 0;
 
- fail_irq:
-	input_unregister_device(idev);
- fail_register:
-	input_set_drvdata(idev, NULL);
+ fail_free_irq:
+	free_irq(client->irq, lp);
+ fail_free_device:
 	input_free_device(idev);
  fail_allocate:
 	kfree(lp);
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index ed7ad74..a5475b5 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -351,6 +351,17 @@
 		},
 	},
 	{
+		/*
+		 * Most (all?) VAIOs do not have external PS/2 ports nor
+		 * they implement active multiplexing properly, and
+		 * MUX discovery usually messes up keyboard/touchpad.
+		 */
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+			DMI_MATCH(DMI_BOARD_NAME, "VAIO"),
+		},
+	},
+	{
 		/* Amoi M636/A737 */
 		.matches = {
 			DMI_MATCH(DMI_SYS_VENDOR, "Amoi Electronics CO.,LTD."),
diff --git a/drivers/input/tablet/acecad.c b/drivers/input/tablet/acecad.c
index aea9a93..d94f7e9 100644
--- a/drivers/input/tablet/acecad.c
+++ b/drivers/input/tablet/acecad.c
@@ -229,12 +229,13 @@
 
 	err = input_register_device(acecad->input);
 	if (err)
-		goto fail2;
+		goto fail3;
 
 	usb_set_intfdata(intf, acecad);
 
 	return 0;
 
+ fail3:	usb_free_urb(acecad->irq);
  fail2:	usb_free_coherent(dev, 8, acecad->data, acecad->data_dma);
  fail1: input_free_device(input_dev);
 	kfree(acecad);
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index cc2a88d..77b8fd2 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -10,7 +10,7 @@
 if NEW_LEDS
 
 config LEDS_CLASS
-	tristate "LED Class Support"
+	bool "LED Class Support"
 	help
 	  This option enables the led sysfs class in /sys/class/leds.  You'll
 	  need this to do anything useful with LEDs.  If unsure, say N.
@@ -176,6 +176,24 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called leds-lp3944.
 
+config LEDS_LP5521
+	tristate "LED Support for N.S. LP5521 LED driver chip"
+	depends on LEDS_CLASS && I2C
+	help
+	  If you say yes here you get support for the National Semiconductor
+	  LP5521 LED driver. It is 3 channel chip with programmable engines.
+	  Driver provides direct control via LED class and interface for
+	  programming the engines.
+
+config LEDS_LP5523
+	tristate "LED Support for N.S. LP5523 LED driver chip"
+	depends on LEDS_CLASS && I2C
+	help
+	  If you say yes here you get support for the National Semiconductor
+	  LP5523 LED driver. It is 9 channel chip with programmable engines.
+	  Driver provides direct control via LED class and interface for
+	  programming the engines.
+
 config LEDS_CLEVO_MAIL
 	tristate "Mail LED on Clevo notebook"
 	depends on X86 && SERIO_I8042 && DMI
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 9c96db4..aae6989 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -23,6 +23,8 @@
 obj-$(CONFIG_LEDS_PCA9532)		+= leds-pca9532.o
 obj-$(CONFIG_LEDS_GPIO)			+= leds-gpio.o
 obj-$(CONFIG_LEDS_LP3944)		+= leds-lp3944.o
+obj-$(CONFIG_LEDS_LP5521)		+= leds-lp5521.o
+obj-$(CONFIG_LEDS_LP5523)		+= leds-lp5523.o
 obj-$(CONFIG_LEDS_CLEVO_MAIL)		+= leds-clevo-mail.o
 obj-$(CONFIG_LEDS_HP6XX)		+= leds-hp6xx.o
 obj-$(CONFIG_LEDS_FSG)			+= leds-fsg.o
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index 2606600..211e21f 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -81,6 +81,79 @@
 	__ATTR_NULL,
 };
 
+static void led_timer_function(unsigned long data)
+{
+	struct led_classdev *led_cdev = (void *)data;
+	unsigned long brightness;
+	unsigned long delay;
+
+	if (!led_cdev->blink_delay_on || !led_cdev->blink_delay_off) {
+		led_set_brightness(led_cdev, LED_OFF);
+		return;
+	}
+
+	brightness = led_get_brightness(led_cdev);
+	if (!brightness) {
+		/* Time to switch the LED on. */
+		brightness = led_cdev->blink_brightness;
+		delay = led_cdev->blink_delay_on;
+	} else {
+		/* Store the current brightness value to be able
+		 * to restore it when the delay_off period is over.
+		 */
+		led_cdev->blink_brightness = brightness;
+		brightness = LED_OFF;
+		delay = led_cdev->blink_delay_off;
+	}
+
+	led_set_brightness(led_cdev, brightness);
+
+	mod_timer(&led_cdev->blink_timer, jiffies + msecs_to_jiffies(delay));
+}
+
+static void led_stop_software_blink(struct led_classdev *led_cdev)
+{
+	/* deactivate previous settings */
+	del_timer_sync(&led_cdev->blink_timer);
+	led_cdev->blink_delay_on = 0;
+	led_cdev->blink_delay_off = 0;
+}
+
+static void led_set_software_blink(struct led_classdev *led_cdev,
+				   unsigned long delay_on,
+				   unsigned long delay_off)
+{
+	int current_brightness;
+
+	current_brightness = led_get_brightness(led_cdev);
+	if (current_brightness)
+		led_cdev->blink_brightness = current_brightness;
+	if (!led_cdev->blink_brightness)
+		led_cdev->blink_brightness = led_cdev->max_brightness;
+
+	if (delay_on == led_cdev->blink_delay_on &&
+	    delay_off == led_cdev->blink_delay_off)
+		return;
+
+	led_stop_software_blink(led_cdev);
+
+	led_cdev->blink_delay_on = delay_on;
+	led_cdev->blink_delay_off = delay_off;
+
+	/* never on - don't blink */
+	if (!delay_on)
+		return;
+
+	/* never off - just set to brightness */
+	if (!delay_off) {
+		led_set_brightness(led_cdev, led_cdev->blink_brightness);
+		return;
+	}
+
+	mod_timer(&led_cdev->blink_timer, jiffies + 1);
+}
+
+
 /**
  * led_classdev_suspend - suspend an led_classdev.
  * @led_cdev: the led_classdev to suspend.
@@ -148,6 +221,10 @@
 
 	led_update_brightness(led_cdev);
 
+	init_timer(&led_cdev->blink_timer);
+	led_cdev->blink_timer.function = led_timer_function;
+	led_cdev->blink_timer.data = (unsigned long)led_cdev;
+
 #ifdef CONFIG_LEDS_TRIGGERS
 	led_trigger_set_default(led_cdev);
 #endif
@@ -157,7 +234,6 @@
 
 	return 0;
 }
-
 EXPORT_SYMBOL_GPL(led_classdev_register);
 
 /**
@@ -175,6 +251,9 @@
 	up_write(&led_cdev->trigger_lock);
 #endif
 
+	/* Stop blinking */
+	led_brightness_set(led_cdev, LED_OFF);
+
 	device_unregister(led_cdev->dev);
 
 	down_write(&leds_list_lock);
@@ -183,6 +262,30 @@
 }
 EXPORT_SYMBOL_GPL(led_classdev_unregister);
 
+void led_blink_set(struct led_classdev *led_cdev,
+		   unsigned long *delay_on,
+		   unsigned long *delay_off)
+{
+	if (led_cdev->blink_set &&
+	    led_cdev->blink_set(led_cdev, delay_on, delay_off))
+		return;
+
+	/* blink with 1 Hz as default if nothing specified */
+	if (!*delay_on && !*delay_off)
+		*delay_on = *delay_off = 500;
+
+	led_set_software_blink(led_cdev, *delay_on, *delay_off);
+}
+EXPORT_SYMBOL(led_blink_set);
+
+void led_brightness_set(struct led_classdev *led_cdev,
+			enum led_brightness brightness)
+{
+	led_stop_software_blink(led_cdev);
+	led_cdev->brightness_set(led_cdev, brightness);
+}
+EXPORT_SYMBOL(led_brightness_set);
+
 static int __init leds_init(void)
 {
 	leds_class = class_create(THIS_MODULE, "leds");
diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c
index f1c00db..c41eb61 100644
--- a/drivers/leds/led-triggers.c
+++ b/drivers/leds/led-triggers.c
@@ -113,7 +113,7 @@
 		if (led_cdev->trigger->deactivate)
 			led_cdev->trigger->deactivate(led_cdev);
 		led_cdev->trigger = NULL;
-		led_set_brightness(led_cdev, LED_OFF);
+		led_brightness_set(led_cdev, LED_OFF);
 	}
 	if (trigger) {
 		write_lock_irqsave(&trigger->leddev_list_lock, flags);
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
index ea57e05..4d9fa38 100644
--- a/drivers/leds/leds-gpio.c
+++ b/drivers/leds/leds-gpio.c
@@ -316,7 +316,7 @@
 
 static int __init gpio_led_init(void)
 {
-	int ret;
+	int ret = 0;
 
 #ifdef CONFIG_LEDS_GPIO_PLATFORM	
 	ret = platform_driver_register(&gpio_led_driver);
diff --git a/drivers/leds/leds-lp5521.c b/drivers/leds/leds-lp5521.c
new file mode 100644
index 0000000..3782f31
--- /dev/null
+++ b/drivers/leds/leds-lp5521.c
@@ -0,0 +1,821 @@
+/*
+ * LP5521 LED chip driver.
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ *
+ * Contact: Samu Onkalo <samu.p.onkalo@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/i2c.h>
+#include <linux/mutex.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/ctype.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <linux/leds.h>
+#include <linux/leds-lp5521.h>
+#include <linux/workqueue.h>
+#include <linux/slab.h>
+
+#define LP5521_PROGRAM_LENGTH		32	/* in bytes */
+
+#define LP5521_MAX_LEDS			3	/* Maximum number of LEDs */
+#define LP5521_MAX_ENGINES		3	/* Maximum number of engines */
+
+#define LP5521_ENG_MASK_BASE		0x30	/* 00110000 */
+#define LP5521_ENG_STATUS_MASK		0x07	/* 00000111 */
+
+#define LP5521_CMD_LOAD			0x15	/* 00010101 */
+#define LP5521_CMD_RUN			0x2a	/* 00101010 */
+#define LP5521_CMD_DIRECT		0x3f	/* 00111111 */
+#define LP5521_CMD_DISABLED		0x00	/* 00000000 */
+
+/* Registers */
+#define LP5521_REG_ENABLE		0x00
+#define LP5521_REG_OP_MODE		0x01
+#define LP5521_REG_R_PWM		0x02
+#define LP5521_REG_G_PWM		0x03
+#define LP5521_REG_B_PWM		0x04
+#define LP5521_REG_R_CURRENT		0x05
+#define LP5521_REG_G_CURRENT		0x06
+#define LP5521_REG_B_CURRENT		0x07
+#define LP5521_REG_CONFIG		0x08
+#define LP5521_REG_R_CHANNEL_PC		0x09
+#define LP5521_REG_G_CHANNEL_PC		0x0A
+#define LP5521_REG_B_CHANNEL_PC		0x0B
+#define LP5521_REG_STATUS		0x0C
+#define LP5521_REG_RESET		0x0D
+#define LP5521_REG_GPO			0x0E
+#define LP5521_REG_R_PROG_MEM		0x10
+#define LP5521_REG_G_PROG_MEM		0x30
+#define LP5521_REG_B_PROG_MEM		0x50
+
+#define LP5521_PROG_MEM_BASE		LP5521_REG_R_PROG_MEM
+#define LP5521_PROG_MEM_SIZE		0x20
+
+/* Base register to set LED current */
+#define LP5521_REG_LED_CURRENT_BASE	LP5521_REG_R_CURRENT
+
+/* Base register to set the brightness */
+#define LP5521_REG_LED_PWM_BASE		LP5521_REG_R_PWM
+
+/* Bits in ENABLE register */
+#define LP5521_MASTER_ENABLE		0x40	/* Chip master enable */
+#define LP5521_LOGARITHMIC_PWM		0x80	/* Logarithmic PWM adjustment */
+#define LP5521_EXEC_RUN			0x2A
+
+/* Bits in CONFIG register */
+#define LP5521_PWM_HF			0x40	/* PWM: 0 = 256Hz, 1 = 558Hz */
+#define LP5521_PWRSAVE_EN		0x20	/* 1 = Power save mode */
+#define LP5521_CP_MODE_OFF		0	/* Charge pump (CP) off */
+#define LP5521_CP_MODE_BYPASS		8	/* CP forced to bypass mode */
+#define LP5521_CP_MODE_1X5		0x10	/* CP forced to 1.5x mode */
+#define LP5521_CP_MODE_AUTO		0x18	/* Automatic mode selection */
+#define LP5521_R_TO_BATT		4	/* R out: 0 = CP, 1 = Vbat */
+#define LP5521_CLK_SRC_EXT		0	/* Ext-clk source (CLK_32K) */
+#define LP5521_CLK_INT			1	/* Internal clock */
+#define LP5521_CLK_AUTO			2	/* Automatic clock selection */
+
+/* Status */
+#define LP5521_EXT_CLK_USED		0x08
+
+struct lp5521_engine {
+	const struct attribute_group *attributes;
+	int		id;
+	u8		mode;
+	u8		prog_page;
+	u8		engine_mask;
+};
+
+struct lp5521_led {
+	int			id;
+	u8			chan_nr;
+	u8			led_current;
+	u8			max_current;
+	struct led_classdev	cdev;
+	struct work_struct	brightness_work;
+	u8			brightness;
+};
+
+struct lp5521_chip {
+	struct lp5521_platform_data *pdata;
+	struct mutex		lock; /* Serialize control */
+	struct i2c_client	*client;
+	struct lp5521_engine	engines[LP5521_MAX_ENGINES];
+	struct lp5521_led	leds[LP5521_MAX_LEDS];
+	u8			num_channels;
+	u8			num_leds;
+};
+
+#define cdev_to_led(c)		container_of(c, struct lp5521_led, cdev)
+#define engine_to_lp5521(eng)	container_of((eng), struct lp5521_chip, \
+						engines[(eng)->id - 1])
+#define led_to_lp5521(led)	container_of((led), struct lp5521_chip, \
+						leds[(led)->id])
+
+static void lp5521_led_brightness_work(struct work_struct *work);
+
+static inline int lp5521_write(struct i2c_client *client, u8 reg, u8 value)
+{
+	return i2c_smbus_write_byte_data(client, reg, value);
+}
+
+static int lp5521_read(struct i2c_client *client, u8 reg, u8 *buf)
+{
+	s32 ret;
+
+	ret = i2c_smbus_read_byte_data(client, reg);
+	if (ret < 0)
+		return -EIO;
+
+	*buf = ret;
+	return 0;
+}
+
+static int lp5521_set_engine_mode(struct lp5521_engine *engine, u8 mode)
+{
+	struct lp5521_chip *chip = engine_to_lp5521(engine);
+	struct i2c_client *client = chip->client;
+	int ret;
+	u8 engine_state;
+
+	/* Only transition between RUN and DIRECT mode are handled here */
+	if (mode == LP5521_CMD_LOAD)
+		return 0;
+
+	if (mode == LP5521_CMD_DISABLED)
+		mode = LP5521_CMD_DIRECT;
+
+	ret = lp5521_read(client, LP5521_REG_OP_MODE, &engine_state);
+
+	/* set mode only for this engine */
+	engine_state &= ~(engine->engine_mask);
+	mode &= engine->engine_mask;
+	engine_state |= mode;
+	ret |= lp5521_write(client, LP5521_REG_OP_MODE, engine_state);
+
+	return ret;
+}
+
+static int lp5521_load_program(struct lp5521_engine *eng, const u8 *pattern)
+{
+	struct lp5521_chip *chip = engine_to_lp5521(eng);
+	struct i2c_client *client = chip->client;
+	int ret;
+	int addr;
+	u8 mode;
+
+	/* move current engine to direct mode and remember the state */
+	ret = lp5521_set_engine_mode(eng, LP5521_CMD_DIRECT);
+	usleep_range(1000, 10000);
+	ret |= lp5521_read(client, LP5521_REG_OP_MODE, &mode);
+
+	/* For loading, all the engines to load mode */
+	lp5521_write(client, LP5521_REG_OP_MODE, LP5521_CMD_DIRECT);
+	usleep_range(1000, 10000);
+	lp5521_write(client, LP5521_REG_OP_MODE, LP5521_CMD_LOAD);
+	usleep_range(1000, 10000);
+
+	addr = LP5521_PROG_MEM_BASE + eng->prog_page * LP5521_PROG_MEM_SIZE;
+	i2c_smbus_write_i2c_block_data(client,
+				addr,
+				LP5521_PROG_MEM_SIZE,
+				pattern);
+
+	ret |= lp5521_write(client, LP5521_REG_OP_MODE, mode);
+	return ret;
+}
+
+static int lp5521_set_led_current(struct lp5521_chip *chip, int led, u8 curr)
+{
+	return lp5521_write(chip->client,
+		    LP5521_REG_LED_CURRENT_BASE + chip->leds[led].chan_nr,
+		    curr);
+}
+
+static void lp5521_init_engine(struct lp5521_chip *chip,
+			const struct attribute_group *attr_group)
+{
+	int i;
+	for (i = 0; i < ARRAY_SIZE(chip->engines); i++) {
+		chip->engines[i].id = i + 1;
+		chip->engines[i].engine_mask = LP5521_ENG_MASK_BASE >> (i * 2);
+		chip->engines[i].prog_page = i;
+		chip->engines[i].attributes = &attr_group[i];
+	}
+}
+
+static int lp5521_configure(struct i2c_client *client,
+			const struct attribute_group *attr_group)
+{
+	struct lp5521_chip *chip = i2c_get_clientdata(client);
+	int ret;
+
+	lp5521_init_engine(chip, attr_group);
+
+	lp5521_write(client, LP5521_REG_RESET, 0xff);
+
+	usleep_range(10000, 20000);
+
+	/* Set all PWMs to direct control mode */
+	ret = lp5521_write(client, LP5521_REG_OP_MODE, 0x3F);
+
+	/* Enable auto-powersave, set charge pump to auto, red to battery */
+	ret |= lp5521_write(client, LP5521_REG_CONFIG,
+		LP5521_PWRSAVE_EN | LP5521_CP_MODE_AUTO | LP5521_R_TO_BATT);
+
+	/* Initialize all channels PWM to zero -> leds off */
+	ret |= lp5521_write(client, LP5521_REG_R_PWM, 0);
+	ret |= lp5521_write(client, LP5521_REG_G_PWM, 0);
+	ret |= lp5521_write(client, LP5521_REG_B_PWM, 0);
+
+	/* Set engines are set to run state when OP_MODE enables engines */
+	ret |= lp5521_write(client, LP5521_REG_ENABLE,
+			LP5521_MASTER_ENABLE | LP5521_LOGARITHMIC_PWM |
+			LP5521_EXEC_RUN);
+	/* enable takes 500us */
+	usleep_range(500, 20000);
+
+	return ret;
+}
+
+static int lp5521_run_selftest(struct lp5521_chip *chip, char *buf)
+{
+	int ret;
+	u8 status;
+
+	ret = lp5521_read(chip->client, LP5521_REG_STATUS, &status);
+	if (ret < 0)
+		return ret;
+
+	/* Check that ext clock is really in use if requested */
+	if (chip->pdata && chip->pdata->clock_mode == LP5521_CLOCK_EXT)
+		if  ((status & LP5521_EXT_CLK_USED) == 0)
+			return -EIO;
+	return 0;
+}
+
+static void lp5521_set_brightness(struct led_classdev *cdev,
+			     enum led_brightness brightness)
+{
+	struct lp5521_led *led = cdev_to_led(cdev);
+	led->brightness = (u8)brightness;
+	schedule_work(&led->brightness_work);
+}
+
+static void lp5521_led_brightness_work(struct work_struct *work)
+{
+	struct lp5521_led *led = container_of(work,
+					      struct lp5521_led,
+					      brightness_work);
+	struct lp5521_chip *chip = led_to_lp5521(led);
+	struct i2c_client *client = chip->client;
+
+	mutex_lock(&chip->lock);
+	lp5521_write(client, LP5521_REG_LED_PWM_BASE + led->chan_nr,
+		led->brightness);
+	mutex_unlock(&chip->lock);
+}
+
+/* Detect the chip by setting its ENABLE register and reading it back. */
+static int lp5521_detect(struct i2c_client *client)
+{
+	int ret;
+	u8 buf;
+
+	ret = lp5521_write(client, LP5521_REG_ENABLE,
+			LP5521_MASTER_ENABLE | LP5521_LOGARITHMIC_PWM);
+	if (ret)
+		return ret;
+	usleep_range(1000, 10000);
+	ret = lp5521_read(client, LP5521_REG_ENABLE, &buf);
+	if (ret)
+		return ret;
+	if (buf != (LP5521_MASTER_ENABLE | LP5521_LOGARITHMIC_PWM))
+		return -ENODEV;
+
+	return 0;
+}
+
+/* Set engine mode and create appropriate sysfs attributes, if required. */
+static int lp5521_set_mode(struct lp5521_engine *engine, u8 mode)
+{
+	struct lp5521_chip *chip = engine_to_lp5521(engine);
+	struct i2c_client *client = chip->client;
+	struct device *dev = &client->dev;
+	int ret = 0;
+
+	/* if in that mode already do nothing, except for run */
+	if (mode == engine->mode && mode != LP5521_CMD_RUN)
+		return 0;
+
+	if (mode == LP5521_CMD_RUN) {
+		ret = lp5521_set_engine_mode(engine, LP5521_CMD_RUN);
+	} else if (mode == LP5521_CMD_LOAD) {
+		lp5521_set_engine_mode(engine, LP5521_CMD_DISABLED);
+		lp5521_set_engine_mode(engine, LP5521_CMD_LOAD);
+
+		ret = sysfs_create_group(&dev->kobj, engine->attributes);
+		if (ret)
+			return ret;
+	} else if (mode == LP5521_CMD_DISABLED) {
+		lp5521_set_engine_mode(engine, LP5521_CMD_DISABLED);
+	}
+
+	/* remove load attribute from sysfs if not in load mode */
+	if (engine->mode == LP5521_CMD_LOAD && mode != LP5521_CMD_LOAD)
+		sysfs_remove_group(&dev->kobj, engine->attributes);
+
+	engine->mode = mode;
+
+	return ret;
+}
+
+static int lp5521_do_store_load(struct lp5521_engine *engine,
+				const char *buf, size_t len)
+{
+	struct lp5521_chip *chip = engine_to_lp5521(engine);
+	struct i2c_client *client = chip->client;
+	int  ret, nrchars, offset = 0, i = 0;
+	char c[3];
+	unsigned cmd;
+	u8 pattern[LP5521_PROGRAM_LENGTH] = {0};
+
+	while ((offset < len - 1) && (i < LP5521_PROGRAM_LENGTH)) {
+		/* separate sscanfs because length is working only for %s */
+		ret = sscanf(buf + offset, "%2s%n ", c, &nrchars);
+		ret = sscanf(c, "%2x", &cmd);
+		if (ret != 1)
+			goto fail;
+		pattern[i] = (u8)cmd;
+
+		offset += nrchars;
+		i++;
+	}
+
+	/* Each instruction is 16bit long. Check that length is even */
+	if (i % 2)
+		goto fail;
+
+	mutex_lock(&chip->lock);
+	ret = lp5521_load_program(engine, pattern);
+	mutex_unlock(&chip->lock);
+
+	if (ret) {
+		dev_err(&client->dev, "failed loading pattern\n");
+		return ret;
+	}
+
+	return len;
+fail:
+	dev_err(&client->dev, "wrong pattern format\n");
+	return -EINVAL;
+}
+
+static ssize_t store_engine_load(struct device *dev,
+				     struct device_attribute *attr,
+				     const char *buf, size_t len, int nr)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct lp5521_chip *chip = i2c_get_clientdata(client);
+	return lp5521_do_store_load(&chip->engines[nr - 1], buf, len);
+}
+
+#define store_load(nr)							\
+static ssize_t store_engine##nr##_load(struct device *dev,		\
+				     struct device_attribute *attr,	\
+				     const char *buf, size_t len)	\
+{									\
+	return store_engine_load(dev, attr, buf, len, nr);		\
+}
+store_load(1)
+store_load(2)
+store_load(3)
+
+static ssize_t show_engine_mode(struct device *dev,
+				struct device_attribute *attr,
+				char *buf, int nr)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct lp5521_chip *chip = i2c_get_clientdata(client);
+	switch (chip->engines[nr - 1].mode) {
+	case LP5521_CMD_RUN:
+		return sprintf(buf, "run\n");
+	case LP5521_CMD_LOAD:
+		return sprintf(buf, "load\n");
+	case LP5521_CMD_DISABLED:
+		return sprintf(buf, "disabled\n");
+	default:
+		return sprintf(buf, "disabled\n");
+	}
+}
+
+#define show_mode(nr)							\
+static ssize_t show_engine##nr##_mode(struct device *dev,		\
+				    struct device_attribute *attr,	\
+				    char *buf)				\
+{									\
+	return show_engine_mode(dev, attr, buf, nr);			\
+}
+show_mode(1)
+show_mode(2)
+show_mode(3)
+
+static ssize_t store_engine_mode(struct device *dev,
+				 struct device_attribute *attr,
+				 const char *buf, size_t len, int nr)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct lp5521_chip *chip = i2c_get_clientdata(client);
+	struct lp5521_engine *engine = &chip->engines[nr - 1];
+	mutex_lock(&chip->lock);
+
+	if (!strncmp(buf, "run", 3))
+		lp5521_set_mode(engine, LP5521_CMD_RUN);
+	else if (!strncmp(buf, "load", 4))
+		lp5521_set_mode(engine, LP5521_CMD_LOAD);
+	else if (!strncmp(buf, "disabled", 8))
+		lp5521_set_mode(engine, LP5521_CMD_DISABLED);
+
+	mutex_unlock(&chip->lock);
+	return len;
+}
+
+#define store_mode(nr)							\
+static ssize_t store_engine##nr##_mode(struct device *dev,		\
+				     struct device_attribute *attr,	\
+				     const char *buf, size_t len)	\
+{									\
+	return store_engine_mode(dev, attr, buf, len, nr);		\
+}
+store_mode(1)
+store_mode(2)
+store_mode(3)
+
+static ssize_t show_max_current(struct device *dev,
+			    struct device_attribute *attr,
+			    char *buf)
+{
+	struct led_classdev *led_cdev = dev_get_drvdata(dev);
+	struct lp5521_led *led = cdev_to_led(led_cdev);
+
+	return sprintf(buf, "%d\n", led->max_current);
+}
+
+static ssize_t show_current(struct device *dev,
+			    struct device_attribute *attr,
+			    char *buf)
+{
+	struct led_classdev *led_cdev = dev_get_drvdata(dev);
+	struct lp5521_led *led = cdev_to_led(led_cdev);
+
+	return sprintf(buf, "%d\n", led->led_current);
+}
+
+static ssize_t store_current(struct device *dev,
+			     struct device_attribute *attr,
+			     const char *buf, size_t len)
+{
+	struct led_classdev *led_cdev = dev_get_drvdata(dev);
+	struct lp5521_led *led = cdev_to_led(led_cdev);
+	struct lp5521_chip *chip = led_to_lp5521(led);
+	ssize_t ret;
+	unsigned long curr;
+
+	if (strict_strtoul(buf, 0, &curr))
+		return -EINVAL;
+
+	if (curr > led->max_current)
+		return -EINVAL;
+
+	mutex_lock(&chip->lock);
+	ret = lp5521_set_led_current(chip, led->id, curr);
+	mutex_unlock(&chip->lock);
+
+	if (ret < 0)
+		return ret;
+
+	led->led_current = (u8)curr;
+
+	return len;
+}
+
+static ssize_t lp5521_selftest(struct device *dev,
+			       struct device_attribute *attr,
+			       char *buf)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct lp5521_chip *chip = i2c_get_clientdata(client);
+	int ret;
+
+	mutex_lock(&chip->lock);
+	ret = lp5521_run_selftest(chip, buf);
+	mutex_unlock(&chip->lock);
+	return sprintf(buf, "%s\n", ret ? "FAIL" : "OK");
+}
+
+/* led class device attributes */
+static DEVICE_ATTR(led_current, S_IRUGO | S_IWUGO, show_current, store_current);
+static DEVICE_ATTR(max_current, S_IRUGO , show_max_current, NULL);
+
+static struct attribute *lp5521_led_attributes[] = {
+	&dev_attr_led_current.attr,
+	&dev_attr_max_current.attr,
+	NULL,
+};
+
+static struct attribute_group lp5521_led_attribute_group = {
+	.attrs = lp5521_led_attributes
+};
+
+/* device attributes */
+static DEVICE_ATTR(engine1_mode, S_IRUGO | S_IWUGO,
+		   show_engine1_mode, store_engine1_mode);
+static DEVICE_ATTR(engine2_mode, S_IRUGO | S_IWUGO,
+		   show_engine2_mode, store_engine2_mode);
+static DEVICE_ATTR(engine3_mode, S_IRUGO | S_IWUGO,
+		   show_engine3_mode, store_engine3_mode);
+static DEVICE_ATTR(engine1_load, S_IWUGO, NULL, store_engine1_load);
+static DEVICE_ATTR(engine2_load, S_IWUGO, NULL, store_engine2_load);
+static DEVICE_ATTR(engine3_load, S_IWUGO, NULL, store_engine3_load);
+static DEVICE_ATTR(selftest, S_IRUGO, lp5521_selftest, NULL);
+
+static struct attribute *lp5521_attributes[] = {
+	&dev_attr_engine1_mode.attr,
+	&dev_attr_engine2_mode.attr,
+	&dev_attr_engine3_mode.attr,
+	&dev_attr_selftest.attr,
+	NULL
+};
+
+static struct attribute *lp5521_engine1_attributes[] = {
+	&dev_attr_engine1_load.attr,
+	NULL
+};
+
+static struct attribute *lp5521_engine2_attributes[] = {
+	&dev_attr_engine2_load.attr,
+	NULL
+};
+
+static struct attribute *lp5521_engine3_attributes[] = {
+	&dev_attr_engine3_load.attr,
+	NULL
+};
+
+static const struct attribute_group lp5521_group = {
+	.attrs = lp5521_attributes,
+};
+
+static const struct attribute_group lp5521_engine_group[] = {
+	{.attrs = lp5521_engine1_attributes },
+	{.attrs = lp5521_engine2_attributes },
+	{.attrs = lp5521_engine3_attributes },
+};
+
+static int lp5521_register_sysfs(struct i2c_client *client)
+{
+	struct device *dev = &client->dev;
+	return sysfs_create_group(&dev->kobj, &lp5521_group);
+}
+
+static void lp5521_unregister_sysfs(struct i2c_client *client)
+{
+	struct lp5521_chip *chip = i2c_get_clientdata(client);
+	struct device *dev = &client->dev;
+	int i;
+
+	sysfs_remove_group(&dev->kobj, &lp5521_group);
+
+	for (i = 0; i <  ARRAY_SIZE(chip->engines); i++) {
+		if (chip->engines[i].mode == LP5521_CMD_LOAD)
+			sysfs_remove_group(&dev->kobj,
+					chip->engines[i].attributes);
+	}
+
+	for (i = 0; i < chip->num_leds; i++)
+		sysfs_remove_group(&chip->leds[i].cdev.dev->kobj,
+				&lp5521_led_attribute_group);
+}
+
+static int __init lp5521_init_led(struct lp5521_led *led,
+				struct i2c_client *client,
+				int chan, struct lp5521_platform_data *pdata)
+{
+	struct device *dev = &client->dev;
+	char name[32];
+	int res;
+
+	if (chan >= LP5521_MAX_LEDS)
+		return -EINVAL;
+
+	if (pdata->led_config[chan].led_current == 0)
+		return 0;
+
+	led->led_current = pdata->led_config[chan].led_current;
+	led->max_current = pdata->led_config[chan].max_current;
+	led->chan_nr = pdata->led_config[chan].chan_nr;
+
+	if (led->chan_nr >= LP5521_MAX_LEDS) {
+		dev_err(dev, "Use channel numbers between 0 and %d\n",
+			LP5521_MAX_LEDS - 1);
+		return -EINVAL;
+	}
+
+	snprintf(name, sizeof(name), "%s:channel%d", client->name, chan);
+	led->cdev.brightness_set = lp5521_set_brightness;
+	led->cdev.name = name;
+	res = led_classdev_register(dev, &led->cdev);
+	if (res < 0) {
+		dev_err(dev, "couldn't register led on channel %d\n", chan);
+		return res;
+	}
+
+	res = sysfs_create_group(&led->cdev.dev->kobj,
+			&lp5521_led_attribute_group);
+	if (res < 0) {
+		dev_err(dev, "couldn't register current attribute\n");
+		led_classdev_unregister(&led->cdev);
+		return res;
+	}
+	return 0;
+}
+
+static int lp5521_probe(struct i2c_client *client,
+			const struct i2c_device_id *id)
+{
+	struct lp5521_chip		*chip;
+	struct lp5521_platform_data	*pdata;
+	int ret, i, led;
+
+	chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+	if (!chip)
+		return -ENOMEM;
+
+	i2c_set_clientdata(client, chip);
+	chip->client = client;
+
+	pdata = client->dev.platform_data;
+
+	if (!pdata) {
+		dev_err(&client->dev, "no platform data\n");
+		ret = -EINVAL;
+		goto fail1;
+	}
+
+	mutex_init(&chip->lock);
+
+	chip->pdata   = pdata;
+
+	if (pdata->setup_resources) {
+		ret = pdata->setup_resources();
+		if (ret < 0)
+			goto fail1;
+	}
+
+	if (pdata->enable) {
+		pdata->enable(0);
+		usleep_range(1000, 10000);
+		pdata->enable(1);
+		usleep_range(1000, 10000); /* Spec says min 500us */
+	}
+
+	ret = lp5521_detect(client);
+
+	if (ret) {
+		dev_err(&client->dev, "Chip not found\n");
+		goto fail2;
+	}
+
+	dev_info(&client->dev, "%s programmable led chip found\n", id->name);
+
+	ret = lp5521_configure(client, lp5521_engine_group);
+	if (ret < 0) {
+		dev_err(&client->dev, "error configuring chip\n");
+		goto fail2;
+	}
+
+	/* Initialize leds */
+	chip->num_channels = pdata->num_channels;
+	chip->num_leds = 0;
+	led = 0;
+	for (i = 0; i < pdata->num_channels; i++) {
+		/* Do not initialize channels that are not connected */
+		if (pdata->led_config[i].led_current == 0)
+			continue;
+
+		ret = lp5521_init_led(&chip->leds[led], client, i, pdata);
+		if (ret) {
+			dev_err(&client->dev, "error initializing leds\n");
+			goto fail3;
+		}
+		chip->num_leds++;
+
+		chip->leds[led].id = led;
+		/* Set initial LED current */
+		lp5521_set_led_current(chip, led,
+				chip->leds[led].led_current);
+
+		INIT_WORK(&(chip->leds[led].brightness_work),
+			lp5521_led_brightness_work);
+
+		led++;
+	}
+
+	ret = lp5521_register_sysfs(client);
+	if (ret) {
+		dev_err(&client->dev, "registering sysfs failed\n");
+		goto fail3;
+	}
+	return ret;
+fail3:
+	for (i = 0; i < chip->num_leds; i++) {
+		led_classdev_unregister(&chip->leds[i].cdev);
+		cancel_work_sync(&chip->leds[i].brightness_work);
+	}
+fail2:
+	if (pdata->enable)
+		pdata->enable(0);
+	if (pdata->release_resources)
+		pdata->release_resources();
+fail1:
+	kfree(chip);
+	return ret;
+}
+
+static int lp5521_remove(struct i2c_client *client)
+{
+	struct lp5521_chip *chip = i2c_get_clientdata(client);
+	int i;
+
+	lp5521_unregister_sysfs(client);
+
+	for (i = 0; i < chip->num_leds; i++) {
+		led_classdev_unregister(&chip->leds[i].cdev);
+		cancel_work_sync(&chip->leds[i].brightness_work);
+	}
+
+	if (chip->pdata->enable)
+		chip->pdata->enable(0);
+	if (chip->pdata->release_resources)
+		chip->pdata->release_resources();
+	kfree(chip);
+	return 0;
+}
+
+static const struct i2c_device_id lp5521_id[] = {
+	{ "lp5521", 0 }, /* Three channel chip */
+	{ }
+};
+MODULE_DEVICE_TABLE(i2c, lp5521_id);
+
+static struct i2c_driver lp5521_driver = {
+	.driver = {
+		.name	= "lp5521",
+	},
+	.probe		= lp5521_probe,
+	.remove		= lp5521_remove,
+	.id_table	= lp5521_id,
+};
+
+static int __init lp5521_init(void)
+{
+	int ret;
+
+	ret = i2c_add_driver(&lp5521_driver);
+
+	if (ret < 0)
+		printk(KERN_ALERT "Adding lp5521 driver failed\n");
+
+	return ret;
+}
+
+static void __exit lp5521_exit(void)
+{
+	i2c_del_driver(&lp5521_driver);
+}
+
+module_init(lp5521_init);
+module_exit(lp5521_exit);
+
+MODULE_AUTHOR("Mathias Nyman, Yuri Zaporozhets, Samu Onkalo");
+MODULE_DESCRIPTION("LP5521 LED engine");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c
new file mode 100644
index 0000000..1e11fcc
--- /dev/null
+++ b/drivers/leds/leds-lp5523.c
@@ -0,0 +1,1065 @@
+/*
+ * lp5523.c - LP5523 LED Driver
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ *
+ * Contact: Samu Onkalo <samu.p.onkalo@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/i2c.h>
+#include <linux/mutex.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/ctype.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <linux/leds.h>
+#include <linux/leds-lp5523.h>
+#include <linux/workqueue.h>
+#include <linux/slab.h>
+
+#define LP5523_REG_ENABLE		0x00
+#define LP5523_REG_OP_MODE		0x01
+#define LP5523_REG_RATIOMETRIC_MSB	0x02
+#define LP5523_REG_RATIOMETRIC_LSB	0x03
+#define LP5523_REG_ENABLE_LEDS_MSB	0x04
+#define LP5523_REG_ENABLE_LEDS_LSB	0x05
+#define LP5523_REG_LED_CNTRL_BASE	0x06
+#define LP5523_REG_LED_PWM_BASE		0x16
+#define LP5523_REG_LED_CURRENT_BASE	0x26
+#define LP5523_REG_CONFIG		0x36
+#define LP5523_REG_CHANNEL1_PC		0x37
+#define LP5523_REG_CHANNEL2_PC		0x38
+#define LP5523_REG_CHANNEL3_PC		0x39
+#define LP5523_REG_STATUS		0x3a
+#define LP5523_REG_GPO			0x3b
+#define LP5523_REG_VARIABLE		0x3c
+#define LP5523_REG_RESET		0x3d
+#define LP5523_REG_TEMP_CTRL		0x3e
+#define LP5523_REG_TEMP_READ		0x3f
+#define LP5523_REG_TEMP_WRITE		0x40
+#define LP5523_REG_LED_TEST_CTRL	0x41
+#define LP5523_REG_LED_TEST_ADC		0x42
+#define LP5523_REG_ENG1_VARIABLE	0x45
+#define LP5523_REG_ENG2_VARIABLE	0x46
+#define LP5523_REG_ENG3_VARIABLE	0x47
+#define LP5523_REG_MASTER_FADER1	0x48
+#define LP5523_REG_MASTER_FADER2	0x49
+#define LP5523_REG_MASTER_FADER3	0x4a
+#define LP5523_REG_CH1_PROG_START	0x4c
+#define LP5523_REG_CH2_PROG_START	0x4d
+#define LP5523_REG_CH3_PROG_START	0x4e
+#define LP5523_REG_PROG_PAGE_SEL	0x4f
+#define LP5523_REG_PROG_MEM		0x50
+
+#define LP5523_CMD_LOAD			0x15 /* 00010101 */
+#define LP5523_CMD_RUN			0x2a /* 00101010 */
+#define LP5523_CMD_DISABLED		0x00 /* 00000000 */
+
+#define LP5523_ENABLE			0x40
+#define LP5523_AUTO_INC			0x40
+#define LP5523_PWR_SAVE			0x20
+#define LP5523_PWM_PWR_SAVE		0x04
+#define LP5523_CP_1			0x08
+#define LP5523_CP_1_5			0x10
+#define LP5523_CP_AUTO			0x18
+#define LP5523_INT_CLK			0x01
+#define LP5523_AUTO_CLK			0x02
+#define LP5523_EN_LEDTEST		0x80
+#define LP5523_LEDTEST_DONE		0x80
+
+#define LP5523_DEFAULT_CURRENT		50 /* microAmps */
+#define LP5523_PROGRAM_LENGTH		32 /* in bytes */
+#define LP5523_PROGRAM_PAGES		6
+#define LP5523_ADC_SHORTCIRC_LIM	80
+
+#define LP5523_LEDS			9
+#define LP5523_ENGINES			3
+
+#define LP5523_ENG_MASK_BASE		0x30 /* 00110000 */
+
+#define LP5523_ENG_STATUS_MASK          0x07 /* 00000111 */
+
+#define LP5523_IRQ_FLAGS                IRQF_TRIGGER_FALLING
+
+#define LP5523_EXT_CLK_USED		0x08
+
+#define LED_ACTIVE(mux, led)		(!!(mux & (0x0001 << led)))
+#define SHIFT_MASK(id)			(((id) - 1) * 2)
+
+struct lp5523_engine {
+	const struct attribute_group *attributes;
+	int		id;
+	u8		mode;
+	u8		prog_page;
+	u8		mux_page;
+	u16		led_mux;
+	u8		engine_mask;
+};
+
+struct lp5523_led {
+	int			id;
+	u8			chan_nr;
+	u8			led_current;
+	u8			max_current;
+	struct led_classdev     cdev;
+	struct work_struct	brightness_work;
+	u8			brightness;
+};
+
+struct lp5523_chip {
+	struct mutex		lock; /* Serialize control */
+	struct i2c_client	*client;
+	struct lp5523_engine	engines[LP5523_ENGINES];
+	struct lp5523_led	leds[LP5523_LEDS];
+	struct lp5523_platform_data *pdata;
+	u8			num_channels;
+	u8			num_leds;
+};
+
+#define cdev_to_led(c)          container_of(c, struct lp5523_led, cdev)
+
+static struct lp5523_chip *engine_to_lp5523(struct lp5523_engine *engine)
+{
+	return container_of(engine, struct lp5523_chip,
+			    engines[engine->id - 1]);
+}
+
+static struct lp5523_chip *led_to_lp5523(struct lp5523_led *led)
+{
+	return container_of(led, struct lp5523_chip,
+			    leds[led->id]);
+}
+
+static int lp5523_set_mode(struct lp5523_engine *engine, u8 mode);
+static int lp5523_set_engine_mode(struct lp5523_engine *engine, u8 mode);
+static int lp5523_load_program(struct lp5523_engine *engine, u8 *pattern);
+
+static void lp5523_led_brightness_work(struct work_struct *work);
+
+static int lp5523_write(struct i2c_client *client, u8 reg, u8 value)
+{
+	return i2c_smbus_write_byte_data(client, reg, value);
+}
+
+static int lp5523_read(struct i2c_client *client, u8 reg, u8 *buf)
+{
+	s32 ret = i2c_smbus_read_byte_data(client, reg);
+
+	if (ret < 0)
+		return -EIO;
+
+	*buf = ret;
+	return 0;
+}
+
+static int lp5523_detect(struct i2c_client *client)
+{
+	int ret;
+	u8 buf;
+
+	ret = lp5523_write(client, LP5523_REG_ENABLE, 0x40);
+	if (ret)
+		return ret;
+	ret = lp5523_read(client, LP5523_REG_ENABLE, &buf);
+	if (ret)
+		return ret;
+	if (buf == 0x40)
+		return 0;
+	else
+		return -ENODEV;
+}
+
+static int lp5523_configure(struct i2c_client *client)
+{
+	struct lp5523_chip *chip = i2c_get_clientdata(client);
+	int ret = 0;
+	u8 status;
+
+	/* one pattern per engine setting led mux start and stop addresses */
+	u8 pattern[][LP5523_PROGRAM_LENGTH] =  {
+		{ 0x9c, 0x30, 0x9c, 0xb0, 0x9d, 0x80, 0xd8, 0x00, 0},
+		{ 0x9c, 0x40, 0x9c, 0xc0, 0x9d, 0x80, 0xd8, 0x00, 0},
+		{ 0x9c, 0x50, 0x9c, 0xd0, 0x9d, 0x80, 0xd8, 0x00, 0},
+	};
+
+	lp5523_write(client, LP5523_REG_RESET, 0xff);
+
+	usleep_range(10000, 100000);
+
+	ret |= lp5523_write(client, LP5523_REG_ENABLE, LP5523_ENABLE);
+	/* Chip startup time after reset is 500 us */
+	usleep_range(1000, 10000);
+
+	ret |= lp5523_write(client, LP5523_REG_CONFIG,
+			    LP5523_AUTO_INC | LP5523_PWR_SAVE |
+			    LP5523_CP_AUTO | LP5523_AUTO_CLK |
+			    LP5523_PWM_PWR_SAVE);
+
+	/* turn on all leds */
+	ret |= lp5523_write(client, LP5523_REG_ENABLE_LEDS_MSB, 0x01);
+	ret |= lp5523_write(client, LP5523_REG_ENABLE_LEDS_LSB, 0xff);
+
+	/* hardcode 32 bytes of memory for each engine from program memory */
+	ret |= lp5523_write(client, LP5523_REG_CH1_PROG_START, 0x00);
+	ret |= lp5523_write(client, LP5523_REG_CH2_PROG_START, 0x10);
+	ret |= lp5523_write(client, LP5523_REG_CH3_PROG_START, 0x20);
+
+	/* write led mux address space for each channel */
+	ret |= lp5523_load_program(&chip->engines[0], pattern[0]);
+	ret |= lp5523_load_program(&chip->engines[1], pattern[1]);
+	ret |= lp5523_load_program(&chip->engines[2], pattern[2]);
+
+	if (ret) {
+		dev_err(&client->dev, "could not load mux programs\n");
+		return -1;
+	}
+
+	/* set all engines exec state and mode to run 00101010 */
+	ret |= lp5523_write(client, LP5523_REG_ENABLE,
+			    (LP5523_CMD_RUN | LP5523_ENABLE));
+
+	ret |= lp5523_write(client, LP5523_REG_OP_MODE, LP5523_CMD_RUN);
+
+	if (ret) {
+		dev_err(&client->dev, "could not start mux programs\n");
+		return -1;
+	}
+
+	/* Wait 3ms and check the engine status */
+	usleep_range(3000, 20000);
+	lp5523_read(client, LP5523_REG_STATUS, &status);
+	status &= LP5523_ENG_STATUS_MASK;
+
+	if (status == LP5523_ENG_STATUS_MASK) {
+		dev_dbg(&client->dev, "all engines configured\n");
+	} else {
+		dev_info(&client->dev, "status == %x\n", status);
+		dev_err(&client->dev, "cound not configure LED engine\n");
+		return -1;
+	}
+
+	dev_info(&client->dev, "disabling engines\n");
+
+	ret |= lp5523_write(client, LP5523_REG_OP_MODE, LP5523_CMD_DISABLED);
+
+	return ret;
+}
+
+static int lp5523_set_engine_mode(struct lp5523_engine *engine, u8 mode)
+{
+	struct lp5523_chip *chip = engine_to_lp5523(engine);
+	struct i2c_client *client = chip->client;
+	int ret;
+	u8 engine_state;
+
+	ret = lp5523_read(client, LP5523_REG_OP_MODE, &engine_state);
+	if (ret)
+		goto fail;
+
+	engine_state &= ~(engine->engine_mask);
+
+	/* set mode only for this engine */
+	mode &= engine->engine_mask;
+
+	engine_state |= mode;
+
+	ret |= lp5523_write(client, LP5523_REG_OP_MODE, engine_state);
+fail:
+	return ret;
+}
+
+static int lp5523_load_mux(struct lp5523_engine *engine, u16 mux)
+{
+	struct lp5523_chip *chip = engine_to_lp5523(engine);
+	struct i2c_client *client = chip->client;
+	int ret = 0;
+
+	ret |= lp5523_set_engine_mode(engine, LP5523_CMD_LOAD);
+
+	ret |= lp5523_write(client, LP5523_REG_PROG_PAGE_SEL, engine->mux_page);
+	ret |= lp5523_write(client, LP5523_REG_PROG_MEM,
+			    (u8)(mux >> 8));
+	ret |= lp5523_write(client, LP5523_REG_PROG_MEM + 1, (u8)(mux));
+	engine->led_mux = mux;
+
+	return ret;
+}
+
+static int lp5523_load_program(struct lp5523_engine *engine, u8 *pattern)
+{
+	struct lp5523_chip *chip = engine_to_lp5523(engine);
+	struct i2c_client *client = chip->client;
+
+	int ret = 0;
+
+	ret |= lp5523_set_engine_mode(engine, LP5523_CMD_LOAD);
+
+	ret |= lp5523_write(client, LP5523_REG_PROG_PAGE_SEL,
+			    engine->prog_page);
+	ret |= i2c_smbus_write_i2c_block_data(client, LP5523_REG_PROG_MEM,
+					      LP5523_PROGRAM_LENGTH, pattern);
+
+	return ret;
+}
+
+static int lp5523_run_program(struct lp5523_engine *engine)
+{
+	struct lp5523_chip *chip = engine_to_lp5523(engine);
+	struct i2c_client *client = chip->client;
+	int ret;
+
+	ret = lp5523_write(client, LP5523_REG_ENABLE,
+					LP5523_CMD_RUN | LP5523_ENABLE);
+	if (ret)
+		goto fail;
+
+	ret = lp5523_set_engine_mode(engine, LP5523_CMD_RUN);
+fail:
+	return ret;
+}
+
+static int lp5523_mux_parse(const char *buf, u16 *mux, size_t len)
+{
+	int i;
+	u16 tmp_mux = 0;
+	len = len < LP5523_LEDS ? len : LP5523_LEDS;
+	for (i = 0; i < len; i++) {
+		switch (buf[i]) {
+		case '1':
+			tmp_mux |= (1 << i);
+			break;
+		case '0':
+			break;
+		case '\n':
+			i = len;
+			break;
+		default:
+			return -1;
+		}
+	}
+	*mux = tmp_mux;
+
+	return 0;
+}
+
+static void lp5523_mux_to_array(u16 led_mux, char *array)
+{
+	int i, pos = 0;
+	for (i = 0; i < LP5523_LEDS; i++)
+		pos += sprintf(array + pos, "%x", LED_ACTIVE(led_mux, i));
+
+	array[pos] = '\0';
+}
+
+/*--------------------------------------------------------------*/
+/*			Sysfs interface				*/
+/*--------------------------------------------------------------*/
+
+static ssize_t show_engine_leds(struct device *dev,
+			    struct device_attribute *attr,
+			    char *buf, int nr)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct lp5523_chip *chip = i2c_get_clientdata(client);
+	char mux[LP5523_LEDS + 1];
+
+	lp5523_mux_to_array(chip->engines[nr - 1].led_mux, mux);
+
+	return sprintf(buf, "%s\n", mux);
+}
+
+#define show_leds(nr)							\
+static ssize_t show_engine##nr##_leds(struct device *dev,		\
+			    struct device_attribute *attr,		\
+			    char *buf)					\
+{									\
+	return show_engine_leds(dev, attr, buf, nr);			\
+}
+show_leds(1)
+show_leds(2)
+show_leds(3)
+
+static ssize_t store_engine_leds(struct device *dev,
+			     struct device_attribute *attr,
+			     const char *buf, size_t len, int nr)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct lp5523_chip *chip = i2c_get_clientdata(client);
+	u16 mux = 0;
+
+	if (lp5523_mux_parse(buf, &mux, len))
+		return -EINVAL;
+
+	if (lp5523_load_mux(&chip->engines[nr - 1], mux))
+		return -EINVAL;
+
+	return len;
+}
+
+#define store_leds(nr)						\
+static ssize_t store_engine##nr##_leds(struct device *dev,	\
+			     struct device_attribute *attr,	\
+			     const char *buf, size_t len)	\
+{								\
+	return store_engine_leds(dev, attr, buf, len, nr);	\
+}
+store_leds(1)
+store_leds(2)
+store_leds(3)
+
+static ssize_t lp5523_selftest(struct device *dev,
+			       struct device_attribute *attr,
+			       char *buf)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct lp5523_chip *chip = i2c_get_clientdata(client);
+	int i, ret, pos = 0;
+	int led = 0;
+	u8 status, adc, vdd;
+
+	mutex_lock(&chip->lock);
+
+	ret = lp5523_read(chip->client, LP5523_REG_STATUS, &status);
+	if (ret < 0)
+		goto fail;
+
+	/* Check that ext clock is really in use if requested */
+	if ((chip->pdata) && (chip->pdata->clock_mode == LP5523_CLOCK_EXT))
+		if  ((status & LP5523_EXT_CLK_USED) == 0)
+			goto fail;
+
+	/* Measure VDD (i.e. VBAT) first (channel 16 corresponds to VDD) */
+	lp5523_write(chip->client, LP5523_REG_LED_TEST_CTRL,
+				    LP5523_EN_LEDTEST | 16);
+	usleep_range(3000, 10000);
+	ret = lp5523_read(chip->client, LP5523_REG_STATUS, &status);
+	if (!(status & LP5523_LEDTEST_DONE))
+		usleep_range(3000, 10000);
+
+	ret |= lp5523_read(chip->client, LP5523_REG_LED_TEST_ADC, &vdd);
+	vdd--;	/* There may be some fluctuation in measurement */
+
+	for (i = 0; i < LP5523_LEDS; i++) {
+		/* Skip non-existing channels */
+		if (chip->pdata->led_config[i].led_current == 0)
+			continue;
+
+		/* Set default current */
+		lp5523_write(chip->client,
+			LP5523_REG_LED_CURRENT_BASE + i,
+			chip->pdata->led_config[i].led_current);
+
+		lp5523_write(chip->client, LP5523_REG_LED_PWM_BASE + i, 0xff);
+		/* let current stabilize 2ms before measurements start */
+		usleep_range(2000, 10000);
+		lp5523_write(chip->client,
+			     LP5523_REG_LED_TEST_CTRL,
+			     LP5523_EN_LEDTEST | i);
+		/* ledtest takes 2.7ms */
+		usleep_range(3000, 10000);
+		ret = lp5523_read(chip->client, LP5523_REG_STATUS, &status);
+		if (!(status & LP5523_LEDTEST_DONE))
+			usleep_range(3000, 10000);
+		ret |= lp5523_read(chip->client, LP5523_REG_LED_TEST_ADC, &adc);
+
+		if (adc >= vdd || adc < LP5523_ADC_SHORTCIRC_LIM)
+			pos += sprintf(buf + pos, "LED %d FAIL\n", i);
+
+		lp5523_write(chip->client, LP5523_REG_LED_PWM_BASE + i, 0x00);
+
+		/* Restore current */
+		lp5523_write(chip->client,
+			LP5523_REG_LED_CURRENT_BASE + i,
+			chip->leds[led].led_current);
+		led++;
+	}
+	if (pos == 0)
+		pos = sprintf(buf, "OK\n");
+	goto release_lock;
+fail:
+	pos = sprintf(buf, "FAIL\n");
+
+release_lock:
+	mutex_unlock(&chip->lock);
+
+	return pos;
+}
+
+static void lp5523_set_brightness(struct led_classdev *cdev,
+			     enum led_brightness brightness)
+{
+	struct lp5523_led *led = cdev_to_led(cdev);
+
+	led->brightness = (u8)brightness;
+
+	schedule_work(&led->brightness_work);
+}
+
+static void lp5523_led_brightness_work(struct work_struct *work)
+{
+	struct lp5523_led *led = container_of(work,
+					      struct lp5523_led,
+					      brightness_work);
+	struct lp5523_chip *chip = led_to_lp5523(led);
+	struct i2c_client *client = chip->client;
+
+	mutex_lock(&chip->lock);
+
+	lp5523_write(client, LP5523_REG_LED_PWM_BASE + led->chan_nr,
+		     led->brightness);
+
+	mutex_unlock(&chip->lock);
+}
+
+static int lp5523_do_store_load(struct lp5523_engine *engine,
+				const char *buf, size_t len)
+{
+	struct lp5523_chip *chip = engine_to_lp5523(engine);
+	struct i2c_client *client = chip->client;
+	int  ret, nrchars, offset = 0, i = 0;
+	char c[3];
+	unsigned cmd;
+	u8 pattern[LP5523_PROGRAM_LENGTH] = {0};
+
+	while ((offset < len - 1) && (i < LP5523_PROGRAM_LENGTH)) {
+		/* separate sscanfs because length is working only for %s */
+		ret = sscanf(buf + offset, "%2s%n ", c, &nrchars);
+		ret = sscanf(c, "%2x", &cmd);
+		if (ret != 1)
+			goto fail;
+		pattern[i] = (u8)cmd;
+
+		offset += nrchars;
+		i++;
+	}
+
+	/* Each instruction is 16bit long. Check that length is even */
+	if (i % 2)
+		goto fail;
+
+	mutex_lock(&chip->lock);
+
+	ret = lp5523_load_program(engine, pattern);
+	mutex_unlock(&chip->lock);
+
+	if (ret) {
+		dev_err(&client->dev, "failed loading pattern\n");
+		return ret;
+	}
+
+	return len;
+fail:
+	dev_err(&client->dev, "wrong pattern format\n");
+	return -EINVAL;
+}
+
+static ssize_t store_engine_load(struct device *dev,
+				     struct device_attribute *attr,
+				     const char *buf, size_t len, int nr)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct lp5523_chip *chip = i2c_get_clientdata(client);
+	return lp5523_do_store_load(&chip->engines[nr - 1], buf, len);
+}
+
+#define store_load(nr)							\
+static ssize_t store_engine##nr##_load(struct device *dev,		\
+				     struct device_attribute *attr,	\
+				     const char *buf, size_t len)	\
+{									\
+	return store_engine_load(dev, attr, buf, len, nr);		\
+}
+store_load(1)
+store_load(2)
+store_load(3)
+
+static ssize_t show_engine_mode(struct device *dev,
+				struct device_attribute *attr,
+				char *buf, int nr)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct lp5523_chip *chip = i2c_get_clientdata(client);
+	switch (chip->engines[nr - 1].mode) {
+	case LP5523_CMD_RUN:
+		return sprintf(buf, "run\n");
+	case LP5523_CMD_LOAD:
+		return sprintf(buf, "load\n");
+	case LP5523_CMD_DISABLED:
+		return sprintf(buf, "disabled\n");
+	default:
+		return sprintf(buf, "disabled\n");
+	}
+}
+
+#define show_mode(nr)							\
+static ssize_t show_engine##nr##_mode(struct device *dev,		\
+				    struct device_attribute *attr,	\
+				    char *buf)				\
+{									\
+	return show_engine_mode(dev, attr, buf, nr);			\
+}
+show_mode(1)
+show_mode(2)
+show_mode(3)
+
+static ssize_t store_engine_mode(struct device *dev,
+				 struct device_attribute *attr,
+				 const char *buf, size_t len, int nr)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct lp5523_chip *chip = i2c_get_clientdata(client);
+	struct lp5523_engine *engine = &chip->engines[nr - 1];
+	mutex_lock(&chip->lock);
+
+	if (!strncmp(buf, "run", 3))
+		lp5523_set_mode(engine, LP5523_CMD_RUN);
+	else if (!strncmp(buf, "load", 4))
+		lp5523_set_mode(engine, LP5523_CMD_LOAD);
+	else if (!strncmp(buf, "disabled", 8))
+		lp5523_set_mode(engine, LP5523_CMD_DISABLED);
+
+	mutex_unlock(&chip->lock);
+	return len;
+}
+
+#define store_mode(nr)							\
+static ssize_t store_engine##nr##_mode(struct device *dev,		\
+				     struct device_attribute *attr,	\
+				     const char *buf, size_t len)	\
+{									\
+	return store_engine_mode(dev, attr, buf, len, nr);		\
+}
+store_mode(1)
+store_mode(2)
+store_mode(3)
+
+static ssize_t show_max_current(struct device *dev,
+			    struct device_attribute *attr,
+			    char *buf)
+{
+	struct led_classdev *led_cdev = dev_get_drvdata(dev);
+	struct lp5523_led *led = cdev_to_led(led_cdev);
+
+	return sprintf(buf, "%d\n", led->max_current);
+}
+
+static ssize_t show_current(struct device *dev,
+			    struct device_attribute *attr,
+			    char *buf)
+{
+	struct led_classdev *led_cdev = dev_get_drvdata(dev);
+	struct lp5523_led *led = cdev_to_led(led_cdev);
+
+	return sprintf(buf, "%d\n", led->led_current);
+}
+
+static ssize_t store_current(struct device *dev,
+			     struct device_attribute *attr,
+			     const char *buf, size_t len)
+{
+	struct led_classdev *led_cdev = dev_get_drvdata(dev);
+	struct lp5523_led *led = cdev_to_led(led_cdev);
+	struct lp5523_chip *chip = led_to_lp5523(led);
+	ssize_t ret;
+	unsigned long curr;
+
+	if (strict_strtoul(buf, 0, &curr))
+		return -EINVAL;
+
+	if (curr > led->max_current)
+		return -EINVAL;
+
+	mutex_lock(&chip->lock);
+	ret = lp5523_write(chip->client,
+			LP5523_REG_LED_CURRENT_BASE + led->chan_nr,
+			(u8)curr);
+	mutex_unlock(&chip->lock);
+
+	if (ret < 0)
+		return ret;
+
+	led->led_current = (u8)curr;
+
+	return len;
+}
+
+/* led class device attributes */
+static DEVICE_ATTR(led_current, S_IRUGO | S_IWUGO, show_current, store_current);
+static DEVICE_ATTR(max_current, S_IRUGO , show_max_current, NULL);
+
+static struct attribute *lp5523_led_attributes[] = {
+	&dev_attr_led_current.attr,
+	&dev_attr_max_current.attr,
+	NULL,
+};
+
+static struct attribute_group lp5523_led_attribute_group = {
+	.attrs = lp5523_led_attributes
+};
+
+/* device attributes */
+static DEVICE_ATTR(engine1_mode, S_IRUGO | S_IWUGO,
+		   show_engine1_mode, store_engine1_mode);
+static DEVICE_ATTR(engine2_mode, S_IRUGO | S_IWUGO,
+		   show_engine2_mode, store_engine2_mode);
+static DEVICE_ATTR(engine3_mode, S_IRUGO | S_IWUGO,
+		   show_engine3_mode, store_engine3_mode);
+static DEVICE_ATTR(engine1_leds, S_IRUGO | S_IWUGO,
+		   show_engine1_leds, store_engine1_leds);
+static DEVICE_ATTR(engine2_leds, S_IRUGO | S_IWUGO,
+		   show_engine2_leds, store_engine2_leds);
+static DEVICE_ATTR(engine3_leds, S_IRUGO | S_IWUGO,
+		   show_engine3_leds, store_engine3_leds);
+static DEVICE_ATTR(engine1_load, S_IWUGO, NULL, store_engine1_load);
+static DEVICE_ATTR(engine2_load, S_IWUGO, NULL, store_engine2_load);
+static DEVICE_ATTR(engine3_load, S_IWUGO, NULL, store_engine3_load);
+static DEVICE_ATTR(selftest, S_IRUGO, lp5523_selftest, NULL);
+
+static struct attribute *lp5523_attributes[] = {
+	&dev_attr_engine1_mode.attr,
+	&dev_attr_engine2_mode.attr,
+	&dev_attr_engine3_mode.attr,
+	&dev_attr_selftest.attr,
+	NULL
+};
+
+static struct attribute *lp5523_engine1_attributes[] = {
+	&dev_attr_engine1_load.attr,
+	&dev_attr_engine1_leds.attr,
+	NULL
+};
+
+static struct attribute *lp5523_engine2_attributes[] = {
+	&dev_attr_engine2_load.attr,
+	&dev_attr_engine2_leds.attr,
+	NULL
+};
+
+static struct attribute *lp5523_engine3_attributes[] = {
+	&dev_attr_engine3_load.attr,
+	&dev_attr_engine3_leds.attr,
+	NULL
+};
+
+static const struct attribute_group lp5523_group = {
+	.attrs = lp5523_attributes,
+};
+
+static const struct attribute_group lp5523_engine_group[] = {
+	{.attrs = lp5523_engine1_attributes },
+	{.attrs = lp5523_engine2_attributes },
+	{.attrs = lp5523_engine3_attributes },
+};
+
+static int lp5523_register_sysfs(struct i2c_client *client)
+{
+	struct device *dev = &client->dev;
+	int ret;
+
+	ret = sysfs_create_group(&dev->kobj, &lp5523_group);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+static void lp5523_unregister_sysfs(struct i2c_client *client)
+{
+	struct lp5523_chip *chip = i2c_get_clientdata(client);
+	struct device *dev = &client->dev;
+	int i;
+
+	sysfs_remove_group(&dev->kobj, &lp5523_group);
+
+	for (i = 0; i < ARRAY_SIZE(chip->engines); i++)
+		if (chip->engines[i].mode == LP5523_CMD_LOAD)
+			sysfs_remove_group(&dev->kobj, &lp5523_engine_group[i]);
+
+	for (i = 0; i < chip->num_leds; i++)
+		sysfs_remove_group(&chip->leds[i].cdev.dev->kobj,
+				&lp5523_led_attribute_group);
+}
+
+/*--------------------------------------------------------------*/
+/*			Set chip operating mode			*/
+/*--------------------------------------------------------------*/
+static int lp5523_set_mode(struct lp5523_engine *engine, u8 mode)
+{
+	/*  engine to chip */
+	struct lp5523_chip *chip = engine_to_lp5523(engine);
+	struct i2c_client *client = chip->client;
+	struct device *dev = &client->dev;
+	int ret = 0;
+
+	/* if in that mode already do nothing, except for run */
+	if (mode == engine->mode && mode != LP5523_CMD_RUN)
+		return 0;
+
+	if (mode == LP5523_CMD_RUN) {
+		ret = lp5523_run_program(engine);
+	} else if (mode == LP5523_CMD_LOAD) {
+		lp5523_set_engine_mode(engine, LP5523_CMD_DISABLED);
+		lp5523_set_engine_mode(engine, LP5523_CMD_LOAD);
+
+		ret = sysfs_create_group(&dev->kobj, engine->attributes);
+		if (ret)
+			return ret;
+	} else if (mode == LP5523_CMD_DISABLED) {
+		lp5523_set_engine_mode(engine, LP5523_CMD_DISABLED);
+	}
+
+	/* remove load attribute from sysfs if not in load mode */
+	if (engine->mode == LP5523_CMD_LOAD && mode != LP5523_CMD_LOAD)
+		sysfs_remove_group(&dev->kobj, engine->attributes);
+
+	engine->mode = mode;
+
+	return ret;
+}
+
+/*--------------------------------------------------------------*/
+/*			Probe, Attach, Remove			*/
+/*--------------------------------------------------------------*/
+static int __init lp5523_init_engine(struct lp5523_engine *engine, int id)
+{
+	if (id < 1 || id > LP5523_ENGINES)
+		return -1;
+	engine->id = id;
+	engine->engine_mask = LP5523_ENG_MASK_BASE >> SHIFT_MASK(id);
+	engine->prog_page = id - 1;
+	engine->mux_page = id + 2;
+	engine->attributes = &lp5523_engine_group[id - 1];
+
+	return 0;
+}
+
+static int __init lp5523_init_led(struct lp5523_led *led, struct device *dev,
+			   int chan, struct lp5523_platform_data *pdata)
+{
+	char name[32];
+	int res;
+
+	if (chan >= LP5523_LEDS)
+		return -EINVAL;
+
+	if (pdata->led_config[chan].led_current) {
+		led->led_current = pdata->led_config[chan].led_current;
+		led->max_current = pdata->led_config[chan].max_current;
+		led->chan_nr = pdata->led_config[chan].chan_nr;
+
+		if (led->chan_nr >= LP5523_LEDS) {
+			dev_err(dev, "Use channel numbers between 0 and %d\n",
+				LP5523_LEDS - 1);
+			return -EINVAL;
+		}
+
+		snprintf(name, 32, "lp5523:channel%d", chan);
+
+		led->cdev.name = name;
+		led->cdev.brightness_set = lp5523_set_brightness;
+		res = led_classdev_register(dev, &led->cdev);
+		if (res < 0) {
+			dev_err(dev, "couldn't register led on channel %d\n",
+				chan);
+			return res;
+		}
+		res = sysfs_create_group(&led->cdev.dev->kobj,
+				&lp5523_led_attribute_group);
+		if (res < 0) {
+			dev_err(dev, "couldn't register current attribute\n");
+			led_classdev_unregister(&led->cdev);
+			return res;
+		}
+	} else {
+		led->led_current = 0;
+	}
+	return 0;
+}
+
+static struct i2c_driver lp5523_driver;
+
+static int lp5523_probe(struct i2c_client *client,
+			const struct i2c_device_id *id)
+{
+	struct lp5523_chip		*chip;
+	struct lp5523_platform_data	*pdata;
+	int ret, i, led;
+
+	chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+	if (!chip)
+		return -ENOMEM;
+
+	i2c_set_clientdata(client, chip);
+	chip->client = client;
+
+	pdata = client->dev.platform_data;
+
+	if (!pdata) {
+		dev_err(&client->dev, "no platform data\n");
+		ret = -EINVAL;
+		goto fail1;
+	}
+
+	mutex_init(&chip->lock);
+
+	chip->pdata   = pdata;
+
+	if (pdata->setup_resources) {
+		ret = pdata->setup_resources();
+		if (ret < 0)
+			goto fail1;
+	}
+
+	if (pdata->enable) {
+		pdata->enable(0);
+		usleep_range(1000, 10000);
+		pdata->enable(1);
+		usleep_range(1000, 10000); /* Spec says min 500us */
+	}
+
+	ret = lp5523_detect(client);
+	if (ret)
+		goto fail2;
+
+	dev_info(&client->dev, "LP5523 Programmable led chip found\n");
+
+	/* Initialize engines */
+	for (i = 0; i < ARRAY_SIZE(chip->engines); i++) {
+		ret = lp5523_init_engine(&chip->engines[i], i + 1);
+		if (ret) {
+			dev_err(&client->dev, "error initializing engine\n");
+			goto fail2;
+		}
+	}
+	ret = lp5523_configure(client);
+	if (ret < 0) {
+		dev_err(&client->dev, "error configuring chip\n");
+		goto fail2;
+	}
+
+	/* Initialize leds */
+	chip->num_channels = pdata->num_channels;
+	chip->num_leds = 0;
+	led = 0;
+	for (i = 0; i < pdata->num_channels; i++) {
+		/* Do not initialize channels that are not connected */
+		if (pdata->led_config[i].led_current == 0)
+			continue;
+
+		ret = lp5523_init_led(&chip->leds[led], &client->dev, i, pdata);
+		if (ret) {
+			dev_err(&client->dev, "error initializing leds\n");
+			goto fail3;
+		}
+		chip->num_leds++;
+
+		chip->leds[led].id = led;
+		/* Set LED current */
+		lp5523_write(client,
+			  LP5523_REG_LED_CURRENT_BASE + chip->leds[led].chan_nr,
+			  chip->leds[led].led_current);
+
+		INIT_WORK(&(chip->leds[led].brightness_work),
+			lp5523_led_brightness_work);
+
+		led++;
+	}
+
+	ret = lp5523_register_sysfs(client);
+	if (ret) {
+		dev_err(&client->dev, "registering sysfs failed\n");
+		goto fail3;
+	}
+	return ret;
+fail3:
+	for (i = 0; i < chip->num_leds; i++) {
+		led_classdev_unregister(&chip->leds[i].cdev);
+		cancel_work_sync(&chip->leds[i].brightness_work);
+	}
+fail2:
+	if (pdata->enable)
+		pdata->enable(0);
+	if (pdata->release_resources)
+		pdata->release_resources();
+fail1:
+	kfree(chip);
+	return ret;
+}
+
+static int lp5523_remove(struct i2c_client *client)
+{
+	struct lp5523_chip *chip = i2c_get_clientdata(client);
+	int i;
+
+	lp5523_unregister_sysfs(client);
+
+	for (i = 0; i < chip->num_leds; i++) {
+		led_classdev_unregister(&chip->leds[i].cdev);
+		cancel_work_sync(&chip->leds[i].brightness_work);
+	}
+
+	if (chip->pdata->enable)
+		chip->pdata->enable(0);
+	if (chip->pdata->release_resources)
+		chip->pdata->release_resources();
+	kfree(chip);
+	return 0;
+}
+
+static const struct i2c_device_id lp5523_id[] = {
+	{ "lp5523", 0 },
+	{ }
+};
+
+MODULE_DEVICE_TABLE(i2c, lp5523_id);
+
+static struct i2c_driver lp5523_driver = {
+	.driver = {
+		.name	= "lp5523",
+	},
+	.probe		= lp5523_probe,
+	.remove		= lp5523_remove,
+	.id_table	= lp5523_id,
+};
+
+static int __init lp5523_init(void)
+{
+	int ret;
+
+	ret = i2c_add_driver(&lp5523_driver);
+
+	if (ret < 0)
+		printk(KERN_ALERT "Adding lp5523 driver failed\n");
+
+	return ret;
+}
+
+static void __exit lp5523_exit(void)
+{
+	i2c_del_driver(&lp5523_driver);
+}
+
+module_init(lp5523_init);
+module_exit(lp5523_exit);
+
+MODULE_AUTHOR("Mathias Nyman <mathias.nyman@nokia.com>");
+MODULE_DESCRIPTION("LP5523 LED engine");
+MODULE_LICENSE("GPL");
diff --git a/drivers/leds/ledtrig-timer.c b/drivers/leds/ledtrig-timer.c
index 82b77bd..b09bcbe 100644
--- a/drivers/leds/ledtrig-timer.c
+++ b/drivers/leds/ledtrig-timer.c
@@ -12,73 +12,25 @@
  */
 
 #include <linux/module.h>
-#include <linux/jiffies.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
-#include <linux/list.h>
-#include <linux/spinlock.h>
 #include <linux/device.h>
-#include <linux/sysdev.h>
-#include <linux/timer.h>
 #include <linux/ctype.h>
 #include <linux/leds.h>
-#include <linux/slab.h>
 #include "leds.h"
 
-struct timer_trig_data {
-	int brightness_on;		/* LED brightness during "on" period.
-					 * (LED_OFF < brightness_on <= LED_FULL)
-					 */
-	unsigned long delay_on;		/* milliseconds on */
-	unsigned long delay_off;	/* milliseconds off */
-	struct timer_list timer;
-};
-
-static void led_timer_function(unsigned long data)
-{
-	struct led_classdev *led_cdev = (struct led_classdev *) data;
-	struct timer_trig_data *timer_data = led_cdev->trigger_data;
-	unsigned long brightness;
-	unsigned long delay;
-
-	if (!timer_data->delay_on || !timer_data->delay_off) {
-		led_set_brightness(led_cdev, LED_OFF);
-		return;
-	}
-
-	brightness = led_get_brightness(led_cdev);
-	if (!brightness) {
-		/* Time to switch the LED on. */
-		brightness = timer_data->brightness_on;
-		delay = timer_data->delay_on;
-	} else {
-		/* Store the current brightness value to be able
-		 * to restore it when the delay_off period is over.
-		 */
-		timer_data->brightness_on = brightness;
-		brightness = LED_OFF;
-		delay = timer_data->delay_off;
-	}
-
-	led_set_brightness(led_cdev, brightness);
-
-	mod_timer(&timer_data->timer, jiffies + msecs_to_jiffies(delay));
-}
-
 static ssize_t led_delay_on_show(struct device *dev,
 		struct device_attribute *attr, char *buf)
 {
 	struct led_classdev *led_cdev = dev_get_drvdata(dev);
-	struct timer_trig_data *timer_data = led_cdev->trigger_data;
 
-	return sprintf(buf, "%lu\n", timer_data->delay_on);
+	return sprintf(buf, "%lu\n", led_cdev->blink_delay_on);
 }
 
 static ssize_t led_delay_on_store(struct device *dev,
 		struct device_attribute *attr, const char *buf, size_t size)
 {
 	struct led_classdev *led_cdev = dev_get_drvdata(dev);
-	struct timer_trig_data *timer_data = led_cdev->trigger_data;
 	int ret = -EINVAL;
 	char *after;
 	unsigned long state = simple_strtoul(buf, &after, 10);
@@ -88,21 +40,7 @@
 		count++;
 
 	if (count == size) {
-		if (timer_data->delay_on != state) {
-			/* the new value differs from the previous */
-			timer_data->delay_on = state;
-
-			/* deactivate previous settings */
-			del_timer_sync(&timer_data->timer);
-
-			/* try to activate hardware acceleration, if any */
-			if (!led_cdev->blink_set ||
-			    led_cdev->blink_set(led_cdev,
-			      &timer_data->delay_on, &timer_data->delay_off)) {
-				/* no hardware acceleration, blink via timer */
-				mod_timer(&timer_data->timer, jiffies + 1);
-			}
-		}
+		led_blink_set(led_cdev, &state, &led_cdev->blink_delay_off);
 		ret = count;
 	}
 
@@ -113,16 +51,14 @@
 		struct device_attribute *attr, char *buf)
 {
 	struct led_classdev *led_cdev = dev_get_drvdata(dev);
-	struct timer_trig_data *timer_data = led_cdev->trigger_data;
 
-	return sprintf(buf, "%lu\n", timer_data->delay_off);
+	return sprintf(buf, "%lu\n", led_cdev->blink_delay_off);
 }
 
 static ssize_t led_delay_off_store(struct device *dev,
 		struct device_attribute *attr, const char *buf, size_t size)
 {
 	struct led_classdev *led_cdev = dev_get_drvdata(dev);
-	struct timer_trig_data *timer_data = led_cdev->trigger_data;
 	int ret = -EINVAL;
 	char *after;
 	unsigned long state = simple_strtoul(buf, &after, 10);
@@ -132,21 +68,7 @@
 		count++;
 
 	if (count == size) {
-		if (timer_data->delay_off != state) {
-			/* the new value differs from the previous */
-			timer_data->delay_off = state;
-
-			/* deactivate previous settings */
-			del_timer_sync(&timer_data->timer);
-
-			/* try to activate hardware acceleration, if any */
-			if (!led_cdev->blink_set ||
-			    led_cdev->blink_set(led_cdev,
-			      &timer_data->delay_on, &timer_data->delay_off)) {
-				/* no hardware acceleration, blink via timer */
-				mod_timer(&timer_data->timer, jiffies + 1);
-			}
-		}
+		led_blink_set(led_cdev, &led_cdev->blink_delay_on, &state);
 		ret = count;
 	}
 
@@ -158,60 +80,34 @@
 
 static void timer_trig_activate(struct led_classdev *led_cdev)
 {
-	struct timer_trig_data *timer_data;
 	int rc;
 
-	timer_data = kzalloc(sizeof(struct timer_trig_data), GFP_KERNEL);
-	if (!timer_data)
-		return;
-
-	timer_data->brightness_on = led_get_brightness(led_cdev);
-	if (timer_data->brightness_on == LED_OFF)
-		timer_data->brightness_on = led_cdev->max_brightness;
-	led_cdev->trigger_data = timer_data;
-
-	init_timer(&timer_data->timer);
-	timer_data->timer.function = led_timer_function;
-	timer_data->timer.data = (unsigned long) led_cdev;
+	led_cdev->trigger_data = NULL;
 
 	rc = device_create_file(led_cdev->dev, &dev_attr_delay_on);
 	if (rc)
-		goto err_out;
+		return;
 	rc = device_create_file(led_cdev->dev, &dev_attr_delay_off);
 	if (rc)
 		goto err_out_delayon;
 
-	/* If there is hardware support for blinking, start one
-	 * user friendly blink rate chosen by the driver.
-	 */
-	if (led_cdev->blink_set)
-		led_cdev->blink_set(led_cdev,
-			&timer_data->delay_on, &timer_data->delay_off);
+	led_cdev->trigger_data = (void *)1;
 
 	return;
 
 err_out_delayon:
 	device_remove_file(led_cdev->dev, &dev_attr_delay_on);
-err_out:
-	led_cdev->trigger_data = NULL;
-	kfree(timer_data);
 }
 
 static void timer_trig_deactivate(struct led_classdev *led_cdev)
 {
-	struct timer_trig_data *timer_data = led_cdev->trigger_data;
-	unsigned long on = 0, off = 0;
-
-	if (timer_data) {
+	if (led_cdev->trigger_data) {
 		device_remove_file(led_cdev->dev, &dev_attr_delay_on);
 		device_remove_file(led_cdev->dev, &dev_attr_delay_off);
-		del_timer_sync(&timer_data->timer);
-		kfree(timer_data);
 	}
 
-	/* If there is hardware support for blinking, stop it */
-	if (led_cdev->blink_set)
-		led_cdev->blink_set(led_cdev, &on, &off);
+	/* Stop blinking */
+	led_brightness_set(led_cdev, LED_OFF);
 }
 
 static struct led_trigger timer_led_trigger = {
diff --git a/drivers/macintosh/adb-iop.c b/drivers/macintosh/adb-iop.c
index 4446966..f5f4da3 100644
--- a/drivers/macintosh/adb-iop.c
+++ b/drivers/macintosh/adb-iop.c
@@ -80,7 +80,7 @@
 static void adb_iop_complete(struct iop_msg *msg)
 {
 	struct adb_request *req;
-	uint flags;
+	unsigned long flags;
 
 	local_irq_save(flags);
 
@@ -103,7 +103,7 @@
 {
 	struct adb_iopmsg *amsg = (struct adb_iopmsg *) msg->message;
 	struct adb_request *req;
-	uint flags;
+	unsigned long flags;
 #ifdef DEBUG_ADB_IOP
 	int i;
 #endif
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 4e957f3..324a366 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -706,7 +706,7 @@
 /* return the offset of the super block in 512byte sectors */
 static inline sector_t calc_dev_sboffset(struct block_device *bdev)
 {
-	sector_t num_sectors = bdev->bd_inode->i_size / 512;
+	sector_t num_sectors = i_size_read(bdev->bd_inode) / 512;
 	return MD_NEW_SIZE_SECTORS(num_sectors);
 }
 
@@ -1386,7 +1386,7 @@
 	 */
 	switch(minor_version) {
 	case 0:
-		sb_start = rdev->bdev->bd_inode->i_size >> 9;
+		sb_start = i_size_read(rdev->bdev->bd_inode) >> 9;
 		sb_start -= 8*2;
 		sb_start &= ~(sector_t)(4*2-1);
 		break;
@@ -1472,7 +1472,7 @@
 			ret = 0;
 	}
 	if (minor_version)
-		rdev->sectors = (rdev->bdev->bd_inode->i_size >> 9) -
+		rdev->sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) -
 			le64_to_cpu(sb->data_offset);
 	else
 		rdev->sectors = rdev->sb_start;
@@ -1680,7 +1680,7 @@
 		return 0; /* component must fit device */
 	if (rdev->sb_start < rdev->data_offset) {
 		/* minor versions 1 and 2; superblock before data */
-		max_sectors = rdev->bdev->bd_inode->i_size >> 9;
+		max_sectors = i_size_read(rdev->bdev->bd_inode) >> 9;
 		max_sectors -= rdev->data_offset;
 		if (!num_sectors || num_sectors > max_sectors)
 			num_sectors = max_sectors;
@@ -1690,7 +1690,7 @@
 	} else {
 		/* minor version 0; superblock after data */
 		sector_t sb_start;
-		sb_start = (rdev->bdev->bd_inode->i_size >> 9) - 8*2;
+		sb_start = (i_size_read(rdev->bdev->bd_inode) >> 9) - 8*2;
 		sb_start &= ~(sector_t)(4*2 - 1);
 		max_sectors = rdev->sectors + sb_start - rdev->sb_start;
 		if (!num_sectors || num_sectors > max_sectors)
@@ -2584,7 +2584,7 @@
 			if (!sectors)
 				return -EBUSY;
 		} else if (!sectors)
-			sectors = (rdev->bdev->bd_inode->i_size >> 9) -
+			sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) -
 				rdev->data_offset;
 	}
 	if (sectors < my_mddev->dev_sectors)
@@ -2797,7 +2797,7 @@
 
 	kobject_init(&rdev->kobj, &rdev_ktype);
 
-	size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
+	size = i_size_read(rdev->bdev->bd_inode) >> BLOCK_SIZE_BITS;
 	if (!size) {
 		printk(KERN_WARNING 
 			"md: %s has zero or unknown size, marking faulty!\n",
@@ -5235,8 +5235,8 @@
 
 		if (!mddev->persistent) {
 			printk(KERN_INFO "md: nonpersistent superblock ...\n");
-			rdev->sb_start = rdev->bdev->bd_inode->i_size / 512;
-		} else 
+			rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
+		} else
 			rdev->sb_start = calc_dev_sboffset(rdev->bdev);
 		rdev->sectors = rdev->sb_start;
 
@@ -5306,7 +5306,7 @@
 	if (mddev->persistent)
 		rdev->sb_start = calc_dev_sboffset(rdev->bdev);
 	else
-		rdev->sb_start = rdev->bdev->bd_inode->i_size / 512;
+		rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
 
 	rdev->sectors = rdev->sb_start;
 
diff --git a/drivers/misc/apds9802als.c b/drivers/misc/apds9802als.c
index f9b91ba..0ed0935 100644
--- a/drivers/misc/apds9802als.c
+++ b/drivers/misc/apds9802als.c
@@ -123,7 +123,7 @@
 {
 	struct i2c_client *client = to_i2c_client(dev);
 	struct als_data *data = i2c_get_clientdata(client);
-	unsigned int ret_val;
+	int ret_val;
 	unsigned long val;
 
 	if (strict_strtoul(buf, 10, &val))
diff --git a/drivers/misc/bh1770glc.c b/drivers/misc/bh1770glc.c
index cee632e..d79a972 100644
--- a/drivers/misc/bh1770glc.c
+++ b/drivers/misc/bh1770glc.c
@@ -649,7 +649,7 @@
 {
 	struct bh1770_chip *chip =  dev_get_drvdata(dev);
 	unsigned long value;
-	size_t ret;
+	ssize_t ret;
 
 	if (strict_strtoul(buf, 0, &value))
 		return -EINVAL;
@@ -659,8 +659,12 @@
 		pm_runtime_get_sync(dev);
 
 		ret = bh1770_lux_rate(chip, chip->lux_rate_index);
-		ret |= bh1770_lux_interrupt_control(chip, BH1770_ENABLE);
+		if (ret < 0) {
+			pm_runtime_put(dev);
+			goto leave;
+		}
 
+		ret = bh1770_lux_interrupt_control(chip, BH1770_ENABLE);
 		if (ret < 0) {
 			pm_runtime_put(dev);
 			goto leave;
diff --git a/drivers/misc/isl29020.c b/drivers/misc/isl29020.c
index 34fe835..ca47e62 100644
--- a/drivers/misc/isl29020.c
+++ b/drivers/misc/isl29020.c
@@ -87,7 +87,7 @@
 		struct device_attribute *attr, const  char *buf, size_t count)
 {
 	struct i2c_client *client = to_i2c_client(dev);
-	unsigned int ret_val;
+	int ret_val;
 	unsigned long val;
 
 	if (strict_strtoul(buf, 10, &val))
@@ -106,6 +106,8 @@
 		val = 4;
 
 	ret_val = i2c_smbus_read_byte_data(client, 0x00);
+	if (ret_val < 0)
+		return ret_val;
 
 	ret_val &= 0xFC; /*reset the bit before setting them */
 	ret_val |= val - 1;
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index eea1ef2..4396d4b 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -221,9 +221,6 @@
 	boolean
 	default y if (RT2X00_LIB=y && LEDS_CLASS=y) || (RT2X00_LIB=m && LEDS_CLASS!=n)
 
-comment "rt2x00 leds support disabled due to modularized LEDS_CLASS and built-in rt2x00"
-	depends on RT2X00_LIB=y && LEDS_CLASS=m
-
 config RT2X00_LIB_DEBUGFS
 	bool "Ralink debugfs support"
 	depends on RT2X00_LIB && MAC80211_DEBUGFS
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
index a87c498..3a5a6fc 100644
--- a/drivers/pci/xen-pcifront.c
+++ b/drivers/pci/xen-pcifront.c
@@ -13,7 +13,6 @@
 #include <linux/spinlock.h>
 #include <linux/pci.h>
 #include <linux/msi.h>
-#include <xen/xenbus.h>
 #include <xen/interface/io/pciif.h>
 #include <asm/xen/pci.h>
 #include <linux/interrupt.h>
@@ -576,8 +575,9 @@
 
 	pcidev = pci_get_bus_and_slot(bus, devfn);
 	if (!pcidev || !pcidev->driver) {
-		dev_err(&pcidev->dev,
-			"device or driver is NULL\n");
+		dev_err(&pdev->xdev->dev, "device or AER driver is NULL\n");
+		if (pcidev)
+			pci_dev_put(pcidev);
 		return result;
 	}
 	pdrv = pcidev->driver;
diff --git a/drivers/rapidio/rio.c b/drivers/rapidio/rio.c
index 68cf0c9..7b5080c 100644
--- a/drivers/rapidio/rio.c
+++ b/drivers/rapidio/rio.c
@@ -1159,11 +1159,11 @@
 
 	list_for_each_entry(port, &rio_mports, node) {
 		if (!request_mem_region(port->iores.start,
-					port->iores.end - port->iores.start,
+					resource_size(&port->iores),
 					port->name)) {
 			printk(KERN_ERR
 			       "RIO: Error requesting master port region 0x%016llx-0x%016llx\n",
-			       (u64)port->iores.start, (u64)port->iores.end - 1);
+			       (u64)port->iores.start, (u64)port->iores.end);
 			rc = -ENOMEM;
 			goto out;
 		}
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 1de30eb..f3cf924 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -320,19 +320,11 @@
 				    "changed. The Linux SCSI layer does not "
 				    "automatically adjust these parameters.\n");
 
-		if (scmd->request->cmd_flags & REQ_HARDBARRIER)
-			/*
-			 * barrier requests should always retry on UA
-			 * otherwise block will get a spurious error
-			 */
-			return NEEDS_RETRY;
-		else
-			/*
-			 * for normal (non barrier) commands, pass the
-			 * UA upwards for a determination in the
-			 * completion functions
-			 */
-			return SUCCESS;
+		/*
+		 * Pass the UA upwards for a determination in the completion
+		 * functions.
+		 */
+		return SUCCESS;
 
 		/* these three are not supported */
 	case COPY_ABORTED:
diff --git a/drivers/serial/kgdboc.c b/drivers/serial/kgdboc.c
index d4b711c..3374618 100644
--- a/drivers/serial/kgdboc.c
+++ b/drivers/serial/kgdboc.c
@@ -18,6 +18,7 @@
 #include <linux/tty.h>
 #include <linux/console.h>
 #include <linux/vt_kern.h>
+#include <linux/input.h>
 
 #define MAX_CONFIG_LEN		40
 
@@ -37,6 +38,61 @@
 static int			kgdb_tty_line;
 
 #ifdef CONFIG_KDB_KEYBOARD
+static int kgdboc_reset_connect(struct input_handler *handler,
+				struct input_dev *dev,
+				const struct input_device_id *id)
+{
+	input_reset_device(dev);
+
+	/* Retrun an error - we do not want to bind, just to reset */
+	return -ENODEV;
+}
+
+static void kgdboc_reset_disconnect(struct input_handle *handle)
+{
+	/* We do not expect anyone to actually bind to us */
+	BUG();
+}
+
+static const struct input_device_id kgdboc_reset_ids[] = {
+	{
+		.flags = INPUT_DEVICE_ID_MATCH_EVBIT,
+		.evbit = { BIT_MASK(EV_KEY) },
+	},
+	{ }
+};
+
+static struct input_handler kgdboc_reset_handler = {
+	.connect	= kgdboc_reset_connect,
+	.disconnect	= kgdboc_reset_disconnect,
+	.name		= "kgdboc_reset",
+	.id_table	= kgdboc_reset_ids,
+};
+
+static DEFINE_MUTEX(kgdboc_reset_mutex);
+
+static void kgdboc_restore_input_helper(struct work_struct *dummy)
+{
+	/*
+	 * We need to take a mutex to prevent several instances of
+	 * this work running on different CPUs so they don't try
+	 * to register again already registered handler.
+	 */
+	mutex_lock(&kgdboc_reset_mutex);
+
+	if (input_register_handler(&kgdboc_reset_handler) == 0)
+		input_unregister_handler(&kgdboc_reset_handler);
+
+	mutex_unlock(&kgdboc_reset_mutex);
+}
+
+static DECLARE_WORK(kgdboc_restore_input_work, kgdboc_restore_input_helper);
+
+static void kgdboc_restore_input(void)
+{
+	schedule_work(&kgdboc_restore_input_work);
+}
+
 static int kgdboc_register_kbd(char **cptr)
 {
 	if (strncmp(*cptr, "kbd", 3) == 0) {
@@ -64,10 +120,12 @@
 			i--;
 		}
 	}
+	flush_work_sync(&kgdboc_restore_input_work);
 }
 #else /* ! CONFIG_KDB_KEYBOARD */
 #define kgdboc_register_kbd(x) 0
 #define kgdboc_unregister_kbd()
+#define kgdboc_restore_input()
 #endif /* ! CONFIG_KDB_KEYBOARD */
 
 static int kgdboc_option_setup(char *opt)
@@ -231,6 +289,7 @@
 		dbg_restore_graphics = 0;
 		con_debug_leave();
 	}
+	kgdboc_restore_input();
 }
 
 static struct kgdb_io kgdboc_io_ops = {
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index 2054b1e..d126819 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -331,10 +331,7 @@
 
 	iu->iu_id = IU_ID_COMMAND;
 	iu->tag = cpu_to_be16(stream_id);
-	if (sdev->ordered_tags && (cmnd->request->cmd_flags & REQ_HARDBARRIER))
-		iu->prio_attr = UAS_ORDERED_TAG;
-	else
-		iu->prio_attr = UAS_SIMPLE_TAG;
+	iu->prio_attr = UAS_SIMPLE_TAG;
 	iu->len = len;
 	int_to_scsilun(sdev->lun, &iu->lun);
 	memcpy(iu->cdb, cmnd->cmnd, cmnd->cmd_len);
diff --git a/drivers/video/backlight/adp8860_bl.c b/drivers/video/backlight/adp8860_bl.c
index 3ec2460..734c650 100644
--- a/drivers/video/backlight/adp8860_bl.c
+++ b/drivers/video/backlight/adp8860_bl.c
@@ -502,8 +502,10 @@
 		struct device_attribute *attr, const char *buf, size_t count)
 {
 	struct adp8860_bl *data = dev_get_drvdata(dev);
+	int ret = strict_strtoul(buf, 10, &data->cached_daylight_max);
+	if (ret)
+		return ret;
 
-	strict_strtoul(buf, 10, &data->cached_daylight_max);
 	return adp8860_store(dev, buf, count, ADP8860_BLMX1);
 }
 static DEVICE_ATTR(l1_daylight_max, 0664, adp8860_bl_l1_daylight_max_show,
@@ -614,7 +616,7 @@
 	if (val == 0) {
 		/* Enable automatic ambient light sensing */
 		adp8860_set_bits(data->client, ADP8860_MDCR, CMP_AUTOEN);
-	} else if ((val > 0) && (val < 6)) {
+	} else if ((val > 0) && (val <= 3)) {
 		/* Disable automatic ambient light sensing */
 		adp8860_clr_bits(data->client, ADP8860_MDCR, CMP_AUTOEN);
 
@@ -622,7 +624,7 @@
 		mutex_lock(&data->lock);
 		adp8860_read(data->client, ADP8860_CFGR, &reg_val);
 		reg_val &= ~(CFGR_BLV_MASK << CFGR_BLV_SHIFT);
-		reg_val |= val << CFGR_BLV_SHIFT;
+		reg_val |= (val - 1) << CFGR_BLV_SHIFT;
 		adp8860_write(data->client, ADP8860_CFGR, reg_val);
 		mutex_unlock(&data->lock);
 	}
diff --git a/drivers/video/backlight/l4f00242t03.c b/drivers/video/backlight/l4f00242t03.c
index 9093ef0..c67801e 100644
--- a/drivers/video/backlight/l4f00242t03.c
+++ b/drivers/video/backlight/l4f00242t03.c
@@ -78,7 +78,7 @@
 	const u16 slpin = 0x10;
 	const u16 disoff = 0x28;
 
-	if (power) {
+	if (power <= FB_BLANK_NORMAL) {
 		if (priv->lcd_on)
 			return 0;
 
diff --git a/drivers/video/backlight/lms283gf05.c b/drivers/video/backlight/lms283gf05.c
index abc43a0..5d3cf33 100644
--- a/drivers/video/backlight/lms283gf05.c
+++ b/drivers/video/backlight/lms283gf05.c
@@ -129,7 +129,7 @@
 	struct spi_device *spi = st->spi;
 	struct lms283gf05_pdata *pdata = spi->dev.platform_data;
 
-	if (power) {
+	if (power <= FB_BLANK_NORMAL) {
 		if (pdata)
 			lms283gf05_reset(pdata->reset_gpio,
 					pdata->reset_inverted);
diff --git a/drivers/video/backlight/mbp_nvidia_bl.c b/drivers/video/backlight/mbp_nvidia_bl.c
index 9fb533f..1485f73 100644
--- a/drivers/video/backlight/mbp_nvidia_bl.c
+++ b/drivers/video/backlight/mbp_nvidia_bl.c
@@ -335,6 +335,24 @@
 		},
 		.driver_data	= (void *)&nvidia_chipset_data,
 	},
+	{
+		.callback	= mbp_dmi_match,
+		.ident		= "MacBookAir 3,1",
+		.matches	= {
+			DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir3,1"),
+		},
+		.driver_data	= (void *)&nvidia_chipset_data,
+	},
+	{
+		.callback	= mbp_dmi_match,
+		.ident		= "MacBookAir 3,2",
+		.matches	= {
+			DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir3,2"),
+		},
+		.driver_data	= (void *)&nvidia_chipset_data,
+	},
 	{ }
 };
 
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
index 5504435..21866ec 100644
--- a/drivers/video/backlight/pwm_bl.c
+++ b/drivers/video/backlight/pwm_bl.c
@@ -25,6 +25,7 @@
 	struct pwm_device	*pwm;
 	struct device		*dev;
 	unsigned int		period;
+	unsigned int		lth_brightness;
 	int			(*notify)(struct device *,
 					  int brightness);
 };
@@ -48,7 +49,9 @@
 		pwm_config(pb->pwm, 0, pb->period);
 		pwm_disable(pb->pwm);
 	} else {
-		pwm_config(pb->pwm, brightness * pb->period / max, pb->period);
+		brightness = pb->lth_brightness +
+			(brightness * (pb->period - pb->lth_brightness) / max);
+		pwm_config(pb->pwm, brightness, pb->period);
 		pwm_enable(pb->pwm);
 	}
 	return 0;
@@ -92,6 +95,8 @@
 
 	pb->period = data->pwm_period_ns;
 	pb->notify = data->notify;
+	pb->lth_brightness = data->lth_brightness *
+		(data->pwm_period_ns / data->max_brightness);
 	pb->dev = &pdev->dev;
 
 	pb->pwm = pwm_request(data->pwm_id, "backlight");
diff --git a/drivers/video/backlight/s6e63m0.c b/drivers/video/backlight/s6e63m0.c
index a3128c9..5927db0 100644
--- a/drivers/video/backlight/s6e63m0.c
+++ b/drivers/video/backlight/s6e63m0.c
@@ -729,10 +729,10 @@
 
 	return strlen(buf);
 }
-static DEVICE_ATTR(gamma_table, 0644,
+static DEVICE_ATTR(gamma_table, 0444,
 		s6e63m0_sysfs_show_gamma_table, NULL);
 
-static int __init s6e63m0_probe(struct spi_device *spi)
+static int __devinit s6e63m0_probe(struct spi_device *spi)
 {
 	int ret = 0;
 	struct s6e63m0 *lcd = NULL;
@@ -829,6 +829,9 @@
 	struct s6e63m0 *lcd = dev_get_drvdata(&spi->dev);
 
 	s6e63m0_power(lcd, FB_BLANK_POWERDOWN);
+	device_remove_file(&spi->dev, &dev_attr_gamma_table);
+	device_remove_file(&spi->dev, &dev_attr_gamma_mode);
+	backlight_device_unregister(lcd->bd);
 	lcd_device_unregister(lcd->ld);
 	kfree(lcd);
 
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 97612f5..321a0c8 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -1299,9 +1299,6 @@
 		evtchn_to_irq[evtchn] = irq;
 		irq_info[irq] = mk_virq_info(evtchn, virq);
 		bind_evtchn_to_cpu(evtchn, cpu);
-
-		/* Ready for use. */
-		unmask_evtchn(evtchn);
 	}
 }
 
@@ -1327,10 +1324,6 @@
 		evtchn_to_irq[evtchn] = irq;
 		irq_info[irq] = mk_ipi_info(evtchn, ipi);
 		bind_evtchn_to_cpu(evtchn, cpu);
-
-		/* Ready for use. */
-		unmask_evtchn(evtchn);
-
 	}
 }
 
@@ -1390,6 +1383,7 @@
 void xen_irq_resume(void)
 {
 	unsigned int cpu, irq, evtchn;
+	struct irq_desc *desc;
 
 	init_evtchn_cpu_bindings();
 
@@ -1408,6 +1402,23 @@
 		restore_cpu_virqs(cpu);
 		restore_cpu_ipis(cpu);
 	}
+
+	/*
+	 * Unmask any IRQF_NO_SUSPEND IRQs which are enabled. These
+	 * are not handled by the IRQ core.
+	 */
+	for_each_irq_desc(irq, desc) {
+		if (!desc->action || !(desc->action->flags & IRQF_NO_SUSPEND))
+			continue;
+		if (desc->status & IRQ_DISABLED)
+			continue;
+
+		evtchn = evtchn_from_irq(irq);
+		if (evtchn == -1)
+			continue;
+
+		unmask_evtchn(evtchn);
+	}
 }
 
 static struct irq_chip xen_dynamic_chip __read_mostly = {
diff --git a/fs/bio.c b/fs/bio.c
index 8abb2df..4bd454f 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -370,6 +370,9 @@
 {
 	struct bio *bio;
 
+	if (nr_iovecs > UIO_MAXIOV)
+		return NULL;
+
 	bio = kmalloc(sizeof(struct bio) + nr_iovecs * sizeof(struct bio_vec),
 		      gfp_mask);
 	if (unlikely(!bio))
@@ -697,8 +700,12 @@
 static struct bio_map_data *bio_alloc_map_data(int nr_segs, int iov_count,
 					       gfp_t gfp_mask)
 {
-	struct bio_map_data *bmd = kmalloc(sizeof(*bmd), gfp_mask);
+	struct bio_map_data *bmd;
 
+	if (iov_count > UIO_MAXIOV)
+		return NULL;
+
+	bmd = kmalloc(sizeof(*bmd), gfp_mask);
 	if (!bmd)
 		return NULL;
 
@@ -827,6 +834,12 @@
 		end = (uaddr + iov[i].iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
 		start = uaddr >> PAGE_SHIFT;
 
+		/*
+		 * Overflow, abort
+		 */
+		if (end < start)
+			return ERR_PTR(-EINVAL);
+
 		nr_pages += end - start;
 		len += iov[i].iov_len;
 	}
@@ -955,6 +968,12 @@
 		unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
 		unsigned long start = uaddr >> PAGE_SHIFT;
 
+		/*
+		 * Overflow, abort
+		 */
+		if (end < start)
+			return ERR_PTR(-EINVAL);
+
 		nr_pages += end - start;
 		/*
 		 * buffer must be aligned to at least hardsector size for now
@@ -982,7 +1001,7 @@
 		unsigned long start = uaddr >> PAGE_SHIFT;
 		const int local_nr_pages = end - start;
 		const int page_limit = cur_page + local_nr_pages;
-		
+
 		ret = get_user_pages_fast(uaddr, local_nr_pages,
 				write_to_vm, &pages[cur_page]);
 		if (ret < local_nr_pages) {
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 39869c3c..ef3a55b 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -2177,7 +2177,6 @@
 
 	setattr_copy(inode, attrs);
 	mark_inode_dirty(inode);
-	return 0;
 
 cifs_setattr_exit:
 	kfree(full_path);
diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c
index 2fa22f2..0c98672 100644
--- a/fs/cifs/ioctl.c
+++ b/fs/cifs/ioctl.c
@@ -38,10 +38,10 @@
 	struct cifs_sb_info *cifs_sb;
 #ifdef CONFIG_CIFS_POSIX
 	struct cifsFileInfo *pSMBFile = filep->private_data;
-	struct cifsTconInfo *tcon = tlink_tcon(pSMBFile->tlink);
+	struct cifsTconInfo *tcon;
 	__u64	ExtAttrBits = 0;
 	__u64	ExtAttrMask = 0;
-	__u64   caps = le64_to_cpu(tcon->fsUnixInfo.Capability);
+	__u64   caps;
 #endif /* CONFIG_CIFS_POSIX */
 
 	xid = GetXid();
@@ -62,6 +62,10 @@
 			break;
 #ifdef CONFIG_CIFS_POSIX
 		case FS_IOC_GETFLAGS:
+			if (pSMBFile == NULL)
+				break;
+			tcon = tlink_tcon(pSMBFile->tlink);
+			caps = le64_to_cpu(tcon->fsUnixInfo.Capability);
 			if (CIFS_UNIX_EXTATTR_CAP & caps) {
 				rc = CIFSGetExtAttr(xid, tcon, pSMBFile->netfid,
 					&ExtAttrBits, &ExtAttrMask);
@@ -73,6 +77,10 @@
 			break;
 
 		case FS_IOC_SETFLAGS:
+			if (pSMBFile == NULL)
+				break;
+			tcon = tlink_tcon(pSMBFile->tlink);
+			caps = le64_to_cpu(tcon->fsUnixInfo.Capability);
 			if (CIFS_UNIX_EXTATTR_CAP & caps) {
 				if (get_user(ExtAttrBits, (int __user *)arg)) {
 					rc = -EFAULT;
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index d6cfac1..a5fe681 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -932,8 +932,7 @@
 	if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) {
 		*user = current_user();
 		if (user_shm_lock(size, *user)) {
-			WARN_ONCE(1,
-			  "Using mlock ulimits for SHM_HUGETLB deprecated\n");
+			printk_once(KERN_WARNING "Using mlock ulimits for SHM_HUGETLB is deprecated\n");
 		} else {
 			*user = NULL;
 			return ERR_PTR(-EPERM);
diff --git a/fs/ioprio.c b/fs/ioprio.c
index 748cfb9..2f7d05c 100644
--- a/fs/ioprio.c
+++ b/fs/ioprio.c
@@ -111,12 +111,14 @@
 	read_lock(&tasklist_lock);
 	switch (which) {
 		case IOPRIO_WHO_PROCESS:
+			rcu_read_lock();
 			if (!who)
 				p = current;
 			else
 				p = find_task_by_vpid(who);
 			if (p)
 				ret = set_task_ioprio(p, ioprio);
+			rcu_read_unlock();
 			break;
 		case IOPRIO_WHO_PGRP:
 			if (!who)
@@ -139,7 +141,12 @@
 				break;
 
 			do_each_thread(g, p) {
-				if (__task_cred(p)->uid != who)
+				int match;
+
+				rcu_read_lock();
+				match = __task_cred(p)->uid == who;
+				rcu_read_unlock();
+				if (!match)
 					continue;
 				ret = set_task_ioprio(p, ioprio);
 				if (ret)
@@ -200,12 +207,14 @@
 	read_lock(&tasklist_lock);
 	switch (which) {
 		case IOPRIO_WHO_PROCESS:
+			rcu_read_lock();
 			if (!who)
 				p = current;
 			else
 				p = find_task_by_vpid(who);
 			if (p)
 				ret = get_task_ioprio(p);
+			rcu_read_unlock();
 			break;
 		case IOPRIO_WHO_PGRP:
 			if (!who)
@@ -232,7 +241,12 @@
 				break;
 
 			do_each_thread(g, p) {
-				if (__task_cred(p)->uid != user->uid)
+				int match;
+
+				rcu_read_lock();
+				match = __task_cred(p)->uid == user->uid;
+				rcu_read_unlock();
+				if (!match)
 					continue;
 				tmpio = get_task_ioprio(p);
 				if (tmpio < 0)
diff --git a/fs/locks.c b/fs/locks.c
index 65765cb..0e62dd3 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -1504,9 +1504,8 @@
 
 static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg)
 {
-	struct file_lock *fl;
+	struct file_lock *fl, *ret;
 	struct fasync_struct *new;
-	struct inode *inode = filp->f_path.dentry->d_inode;
 	int error;
 
 	fl = lease_alloc(filp, arg);
@@ -1518,13 +1517,16 @@
 		locks_free_lock(fl);
 		return -ENOMEM;
 	}
+	ret = fl;
 	lock_flocks();
-	error = __vfs_setlease(filp, arg, &fl);
+	error = __vfs_setlease(filp, arg, &ret);
 	if (error) {
 		unlock_flocks();
 		locks_free_lock(fl);
 		goto out_free_fasync;
 	}
+	if (ret != fl)
+		locks_free_lock(fl);
 
 	/*
 	 * fasync_insert_entry() returns the old entry if any.
@@ -1532,17 +1534,10 @@
 	 * inserted it into the fasync list. Clear new so that
 	 * we don't release it here.
 	 */
-	if (!fasync_insert_entry(fd, filp, &fl->fl_fasync, new))
+	if (!fasync_insert_entry(fd, filp, &ret->fl_fasync, new))
 		new = NULL;
 
-	if (error < 0) {
-		/* remove lease just inserted by setlease */
-		fl->fl_type = F_UNLCK | F_INPROGRESS;
-		fl->fl_break_time = jiffies - 10;
-		time_out_leases(inode);
-	} else {
-		error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0);
-	}
+	error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0);
 	unlock_flocks();
 
 out_free_fasync:
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index f1e5ec6..ad2bfa6 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -673,16 +673,17 @@
 	spin_unlock(&clp->cl_lock);
 }
 
-static void nfsd4_register_conn(struct nfsd4_conn *conn)
+static int nfsd4_register_conn(struct nfsd4_conn *conn)
 {
 	conn->cn_xpt_user.callback = nfsd4_conn_lost;
-	register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user);
+	return register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user);
 }
 
 static __be32 nfsd4_new_conn(struct svc_rqst *rqstp, struct nfsd4_session *ses)
 {
 	struct nfsd4_conn *conn;
 	u32 flags = NFS4_CDFC4_FORE;
+	int ret;
 
 	if (ses->se_flags & SESSION4_BACK_CHAN)
 		flags |= NFS4_CDFC4_BACK;
@@ -690,7 +691,10 @@
 	if (!conn)
 		return nfserr_jukebox;
 	nfsd4_hash_conn(conn, ses);
-	nfsd4_register_conn(conn);
+	ret = nfsd4_register_conn(conn);
+	if (ret)
+		/* oops; xprt is already down: */
+		nfsd4_conn_lost(&conn->cn_xpt_user);
 	return nfs_ok;
 }
 
@@ -1644,6 +1648,7 @@
 {
 	struct nfs4_client *clp = ses->se_client;
 	struct nfsd4_conn *c;
+	int ret;
 
 	spin_lock(&clp->cl_lock);
 	c = __nfsd4_find_conn(new->cn_xprt, ses);
@@ -1654,7 +1659,10 @@
 	}
 	__nfsd4_hash_conn(new, ses);
 	spin_unlock(&clp->cl_lock);
-	nfsd4_register_conn(new);
+	ret = nfsd4_register_conn(new);
+	if (ret)
+		/* oops; xprt is already down: */
+		nfsd4_conn_lost(&new->cn_xpt_user);
 	return;
 }
 
diff --git a/fs/openpromfs/inode.c b/fs/openpromfs/inode.c
index ddb1f41..911e61f 100644
--- a/fs/openpromfs/inode.c
+++ b/fs/openpromfs/inode.c
@@ -418,7 +418,7 @@
 static struct dentry *openprom_mount(struct file_system_type *fs_type,
 	int flags, const char *dev_name, void *data)
 {
-	return mount_single(fs_type, flags, data, openprom_fill_super)
+	return mount_single(fs_type, flags, data, openprom_fill_super);
 }
 
 static struct file_system_type openprom_fs_type = {
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index c9af48f..7d287af 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -1111,11 +1111,12 @@
 			uptodate = 0;
 
 		/*
-		 * A hole may still be marked uptodate because discard_buffer
-		 * leaves the flag set.
+		 * set_page_dirty dirties all buffers in a page, independent
+		 * of their state.  The dirty state however is entirely
+		 * meaningless for holes (!mapped && uptodate), so skip
+		 * buffers covering holes here.
 		 */
 		if (!buffer_mapped(bh) && buffer_uptodate(bh)) {
-			ASSERT(!buffer_dirty(bh));
 			imap_valid = 0;
 			continue;
 		}
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index 63fd2c0..aa1d353 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -1781,7 +1781,6 @@
 	INIT_LIST_HEAD(list);
 	spin_lock(dwlk);
 	list_for_each_entry_safe(bp, n, dwq, b_list) {
-		trace_xfs_buf_delwri_split(bp, _RET_IP_);
 		ASSERT(bp->b_flags & XBF_DELWRI);
 
 		if (!XFS_BUF_ISPINNED(bp) && !xfs_buf_cond_lock(bp)) {
@@ -1795,6 +1794,7 @@
 					 _XBF_RUN_QUEUES);
 			bp->b_flags |= XBF_WRITE;
 			list_move_tail(&bp->b_list, list);
+			trace_xfs_buf_delwri_split(bp, _RET_IP_);
 		} else
 			skipped++;
 	}
diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c
index 2ea238f6..ad442d9 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl.c
@@ -416,7 +416,7 @@
 	if (IS_ERR(dentry))
 		return PTR_ERR(dentry);
 
-	kbuf = kmalloc(al_hreq.buflen, GFP_KERNEL);
+	kbuf = kzalloc(al_hreq.buflen, GFP_KERNEL);
 	if (!kbuf)
 		goto out_dput;
 
diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
index 96107ef..94d5fd6 100644
--- a/fs/xfs/linux-2.6/xfs_iops.c
+++ b/fs/xfs/linux-2.6/xfs_iops.c
@@ -762,7 +762,8 @@
 	inode->i_state = I_NEW;
 
 	inode_sb_list_add(inode);
-	insert_inode_hash(inode);
+	/* make the inode look hashed for the writeback code */
+	hlist_add_fake(&inode->i_hash);
 
 	inode->i_mode	= ip->i_d.di_mode;
 	inode->i_nlink	= ip->i_d.di_nlink;
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index 9f3a78f..064f964 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -353,9 +353,6 @@
 			mp->m_qflags &= ~XFS_OQUOTA_ENFD;
 		} else if (!strcmp(this_char, MNTOPT_DELAYLOG)) {
 			mp->m_flags |= XFS_MOUNT_DELAYLOG;
-			cmn_err(CE_WARN,
-				"Enabling EXPERIMENTAL delayed logging feature "
-				"- use at your own risk.\n");
 		} else if (!strcmp(this_char, MNTOPT_NODELAYLOG)) {
 			mp->m_flags &= ~XFS_MOUNT_DELAYLOG;
 		} else if (!strcmp(this_char, "ihashsize")) {
diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c
index 37d3325..afb0d7c 100644
--- a/fs/xfs/linux-2.6/xfs_sync.c
+++ b/fs/xfs/linux-2.6/xfs_sync.c
@@ -853,6 +853,7 @@
 		if (trylock) {
 			if (!mutex_trylock(&pag->pag_ici_reclaim_lock)) {
 				skipped++;
+				xfs_perag_put(pag);
 				continue;
 			}
 			first_index = pag->pag_ici_reclaim_cursor;
diff --git a/fs/xfs/xfs_filestream.c b/fs/xfs/xfs_filestream.c
index 9b715dc..9124425 100644
--- a/fs/xfs/xfs_filestream.c
+++ b/fs/xfs/xfs_filestream.c
@@ -744,9 +744,15 @@
 	 * If the file's parent directory is known, take its iolock in exclusive
 	 * mode to prevent two sibling files from racing each other to migrate
 	 * themselves and their parent to different AGs.
+	 *
+	 * Note that we lock the parent directory iolock inside the child
+	 * iolock here.  That's fine as we never hold both parent and child
+	 * iolock in any other place.  This is different from the ilock,
+	 * which requires locking of the child after the parent for namespace
+	 * operations.
 	 */
 	if (pip)
-		xfs_ilock(pip, XFS_IOLOCK_EXCL);
+		xfs_ilock(pip, XFS_IOLOCK_EXCL | XFS_IOLOCK_PARENT);
 
 	/*
 	 * A new AG needs to be found for the file.  If the file's parent
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index b1498ab..19e9dfa 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -275,6 +275,7 @@
 		pag = radix_tree_delete(&mp->m_perag_tree, agno);
 		spin_unlock(&mp->m_perag_lock);
 		ASSERT(pag);
+		ASSERT(atomic_read(&pag->pag_ref) == 0);
 		call_rcu(&pag->rcu_head, __xfs_free_perag);
 	}
 }
diff --git a/fs/xfs/xfs_quota.h b/fs/xfs/xfs_quota.h
index e0e64b1..9bb6eda 100644
--- a/fs/xfs/xfs_quota.h
+++ b/fs/xfs/xfs_quota.h
@@ -346,8 +346,17 @@
 #define xfs_trans_mod_dquot_byino(tp, ip, fields, delta)
 #define xfs_trans_apply_dquot_deltas(tp)
 #define xfs_trans_unreserve_and_mod_dquots(tp)
-#define xfs_trans_reserve_quota_nblks(tp, ip, nblks, ninos, flags)	(0)
-#define xfs_trans_reserve_quota_bydquots(tp, mp, u, g, nb, ni, fl)	(0)
+static inline int xfs_trans_reserve_quota_nblks(struct xfs_trans *tp,
+		struct xfs_inode *ip, long nblks, long ninos, uint flags)
+{
+	return 0;
+}
+static inline int xfs_trans_reserve_quota_bydquots(struct xfs_trans *tp,
+		struct xfs_mount *mp, struct xfs_dquot *udqp,
+		struct xfs_dquot *gdqp, long nblks, long nions, uint flags)
+{
+	return 0;
+}
 #define xfs_qm_vop_create_dqattach(tp, ip, u, g)
 #define xfs_qm_vop_rename_dqattach(it)					(0)
 #define xfs_qm_vop_chown(tp, ip, old, new)				(NULL)
@@ -357,11 +366,14 @@
 #define xfs_qm_dqdetach(ip)
 #define xfs_qm_dqrele(d)
 #define xfs_qm_statvfs(ip, s)
-#define xfs_qm_sync(mp, fl)						(0)
+static inline int xfs_qm_sync(struct xfs_mount *mp, int flags)
+{
+	return 0;
+}
 #define xfs_qm_newmount(mp, a, b)					(0)
 #define xfs_qm_mount_quotas(mp)
 #define xfs_qm_unmount(mp)
-#define xfs_qm_unmount_quotas(mp)					(0)
+#define xfs_qm_unmount_quotas(mp)
 #endif /* CONFIG_XFS_QUOTA */
 
 #define xfs_trans_unreserve_quota_nblks(tp, ip, nblks, ninos, flags) \
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 5afa5b5..beafc15 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -432,6 +432,10 @@
  * together with the @destroy function,
  * enables driver-specific objects derived from a ttm_buffer_object.
  * On successful return, the object kref and list_kref are set to 1.
+ * If a failure occurs, the function will call the @destroy function, or
+ * kfree() if @destroy is NULL. Thus, after a failure, dereferencing @bo is
+ * illegal and will likely cause memory corruption.
+ *
  * Returns
  * -ENOMEM: Out of memory.
  * -EINVAL: Invalid placement flags.
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index d01b4dd..8e0c848 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -206,14 +206,84 @@
 struct ttm_mem_type_manager;
 
 struct ttm_mem_type_manager_func {
+	/**
+	 * struct ttm_mem_type_manager member init
+	 *
+	 * @man: Pointer to a memory type manager.
+	 * @p_size: Implementation dependent, but typically the size of the
+	 * range to be managed in pages.
+	 *
+	 * Called to initialize a private range manager. The function is
+	 * expected to initialize the man::priv member.
+	 * Returns 0 on success, negative error code on failure.
+	 */
 	int  (*init)(struct ttm_mem_type_manager *man, unsigned long p_size);
+
+	/**
+	 * struct ttm_mem_type_manager member takedown
+	 *
+	 * @man: Pointer to a memory type manager.
+	 *
+	 * Called to undo the setup done in init. All allocated resources
+	 * should be freed.
+	 */
 	int  (*takedown)(struct ttm_mem_type_manager *man);
+
+	/**
+	 * struct ttm_mem_type_manager member get_node
+	 *
+	 * @man: Pointer to a memory type manager.
+	 * @bo: Pointer to the buffer object we're allocating space for.
+	 * @placement: Placement details.
+	 * @mem: Pointer to a struct ttm_mem_reg to be filled in.
+	 *
+	 * This function should allocate space in the memory type managed
+	 * by @man. Placement details if
+	 * applicable are given by @placement. If successful,
+	 * @mem::mm_node should be set to a non-null value, and
+	 * @mem::start should be set to a value identifying the beginning
+	 * of the range allocated, and the function should return zero.
+	 * If the memory region accomodate the buffer object, @mem::mm_node
+	 * should be set to NULL, and the function should return 0.
+	 * If a system error occured, preventing the request to be fulfilled,
+	 * the function should return a negative error code.
+	 *
+	 * Note that @mem::mm_node will only be dereferenced by
+	 * struct ttm_mem_type_manager functions and optionally by the driver,
+	 * which has knowledge of the underlying type.
+	 *
+	 * This function may not be called from within atomic context, so
+	 * an implementation can and must use either a mutex or a spinlock to
+	 * protect any data structures managing the space.
+	 */
 	int  (*get_node)(struct ttm_mem_type_manager *man,
 			 struct ttm_buffer_object *bo,
 			 struct ttm_placement *placement,
 			 struct ttm_mem_reg *mem);
+
+	/**
+	 * struct ttm_mem_type_manager member put_node
+	 *
+	 * @man: Pointer to a memory type manager.
+	 * @mem: Pointer to a struct ttm_mem_reg to be filled in.
+	 *
+	 * This function frees memory type resources previously allocated
+	 * and that are identified by @mem::mm_node and @mem::start. May not
+	 * be called from within atomic context.
+	 */
 	void (*put_node)(struct ttm_mem_type_manager *man,
 			 struct ttm_mem_reg *mem);
+
+	/**
+	 * struct ttm_mem_type_manager member debug
+	 *
+	 * @man: Pointer to a memory type manager.
+	 * @prefix: Prefix to be used in printout to identify the caller.
+	 *
+	 * This function is called to print out the state of the memory
+	 * type manager to aid debugging of out-of-memory conditions.
+	 * It may not be called from within atomic context.
+	 */
 	void (*debug)(struct ttm_mem_type_manager *man, const char *prefix);
 };
 
@@ -231,14 +301,13 @@
 	uint64_t size;
 	uint32_t available_caching;
 	uint32_t default_caching;
-
-	/*
-	 * Protected by the bdev->lru_lock.
-	 * TODO: Consider one lru_lock per ttm_mem_type_manager.
-	 * Plays ill with list removal, though.
-	 */
 	const struct ttm_mem_type_manager_func *func;
 	void *priv;
+
+	/*
+	 * Protected by the global->lru_lock.
+	 */
+
 	struct list_head lru;
 };
 
diff --git a/include/linux/atomic.h b/include/linux/atomic.h
new file mode 100644
index 0000000..96c038e
--- /dev/null
+++ b/include/linux/atomic.h
@@ -0,0 +1,37 @@
+#ifndef _LINUX_ATOMIC_H
+#define _LINUX_ATOMIC_H
+#include <asm/atomic.h>
+
+/**
+ * atomic_inc_not_zero_hint - increment if not null
+ * @v: pointer of type atomic_t
+ * @hint: probable value of the atomic before the increment
+ *
+ * This version of atomic_inc_not_zero() gives a hint of probable
+ * value of the atomic. This helps processor to not read the memory
+ * before doing the atomic read/modify/write cycle, lowering
+ * number of bus transactions on some arches.
+ *
+ * Returns: 0 if increment was not done, 1 otherwise.
+ */
+#ifndef atomic_inc_not_zero_hint
+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
+{
+	int val, c = hint;
+
+	/* sanity test, should be removed by compiler if hint is a constant */
+	if (!hint)
+		return atomic_inc_not_zero(v);
+
+	do {
+		val = atomic_cmpxchg(v, c, c + 1);
+		if (val == c)
+			return 1;
+		c = val;
+	} while (c);
+
+	return 0;
+}
+#endif
+
+#endif /* _LINUX_ATOMIC_H */
diff --git a/include/linux/bio.h b/include/linux/bio.h
index ba67999..35dcdb3 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -66,10 +66,6 @@
 #define bio_offset(bio)		bio_iovec((bio))->bv_offset
 #define bio_segments(bio)	((bio)->bi_vcnt - (bio)->bi_idx)
 #define bio_sectors(bio)	((bio)->bi_size >> 9)
-#define bio_empty_barrier(bio) \
-	((bio->bi_rw & REQ_HARDBARRIER) && \
-	 !bio_has_data(bio) && \
-	 !(bio->bi_rw & REQ_DISCARD))
 
 static inline unsigned int bio_cur_bytes(struct bio *bio)
 {
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 0437ab6..46ad519 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -122,7 +122,6 @@
 	__REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */
 	__REQ_FAILFAST_DRIVER,	/* no driver retries of driver errors */
 
-	__REQ_HARDBARRIER,	/* may not be passed by drive either */
 	__REQ_SYNC,		/* request is sync (sync write or read) */
 	__REQ_META,		/* metadata io request */
 	__REQ_DISCARD,		/* request to discard sectors */
@@ -159,7 +158,6 @@
 #define REQ_FAILFAST_DEV	(1 << __REQ_FAILFAST_DEV)
 #define REQ_FAILFAST_TRANSPORT	(1 << __REQ_FAILFAST_TRANSPORT)
 #define REQ_FAILFAST_DRIVER	(1 << __REQ_FAILFAST_DRIVER)
-#define REQ_HARDBARRIER		(1 << __REQ_HARDBARRIER)
 #define REQ_SYNC		(1 << __REQ_SYNC)
 #define REQ_META		(1 << __REQ_META)
 #define REQ_DISCARD		(1 << __REQ_DISCARD)
@@ -168,8 +166,8 @@
 #define REQ_FAILFAST_MASK \
 	(REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
 #define REQ_COMMON_MASK \
-	(REQ_WRITE | REQ_FAILFAST_MASK | REQ_HARDBARRIER | REQ_SYNC | \
-	 REQ_META | REQ_DISCARD | REQ_NOIDLE | REQ_FLUSH | REQ_FUA)
+	(REQ_WRITE | REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_DISCARD | \
+	 REQ_NOIDLE | REQ_FLUSH | REQ_FUA)
 #define REQ_CLONE_MASK		REQ_COMMON_MASK
 
 #define REQ_UNPLUG		(1 << __REQ_UNPLUG)
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 5027a59..aae86fd 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -552,8 +552,7 @@
  * it already be started by driver.
  */
 #define RQ_NOMERGE_FLAGS	\
-	(REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER | \
-	 REQ_FLUSH | REQ_FUA)
+	(REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA)
 #define rq_mergeable(rq)	\
 	(!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \
 	 (((rq)->cmd_flags & REQ_DISCARD) || \
diff --git a/include/linux/drbd.h b/include/linux/drbd.h
index 9b2a015..ef44c7a 100644
--- a/include/linux/drbd.h
+++ b/include/linux/drbd.h
@@ -53,7 +53,7 @@
 
 
 extern const char *drbd_buildtag(void);
-#define REL_VERSION "8.3.9rc2"
+#define REL_VERSION "8.3.9"
 #define API_VERSION 88
 #define PRO_VERSION_MIN 86
 #define PRO_VERSION_MAX 95
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index e913819..b676c58 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -5,6 +5,7 @@
 #include <linux/kernel.h>
 #include <linux/mm.h>
 #include <linux/uaccess.h>
+#include <linux/hardirq.h>
 
 #include <asm/cacheflush.h>
 
diff --git a/include/linux/i2c/adp5588.h b/include/linux/i2c/adp5588.h
index 3c5d6b6..cec17cf 100644
--- a/include/linux/i2c/adp5588.h
+++ b/include/linux/i2c/adp5588.h
@@ -1,7 +1,7 @@
 /*
  * Analog Devices ADP5588 I/O Expander and QWERTY Keypad Controller
  *
- * Copyright 2009 Analog Devices Inc.
+ * Copyright 2009-2010 Analog Devices Inc.
  *
  * Licensed under the GPL-2 or later.
  */
@@ -77,13 +77,26 @@
  /* Configuration Register1 */
 #define ADP5588_AUTO_INC	(1 << 7)
 #define ADP5588_GPIEM_CFG	(1 << 6)
+#define ADP5588_OVR_FLOW_M	(1 << 5)
 #define ADP5588_INT_CFG		(1 << 4)
+#define ADP5588_OVR_FLOW_IEN	(1 << 3)
+#define ADP5588_K_LCK_IM	(1 << 2)
 #define ADP5588_GPI_IEN		(1 << 1)
+#define ADP5588_KE_IEN		(1 << 0)
 
 /* Interrupt Status Register */
+#define ADP5588_CMP2_INT	(1 << 5)
+#define ADP5588_CMP1_INT	(1 << 4)
+#define ADP5588_OVR_FLOW_INT	(1 << 3)
+#define ADP5588_K_LCK_INT	(1 << 2)
 #define ADP5588_GPI_INT		(1 << 1)
 #define ADP5588_KE_INT		(1 << 0)
 
+/* Key Lock and Event Counter Register */
+#define ADP5588_K_LCK_EN	(1 << 6)
+#define ADP5588_LCK21		0x30
+#define ADP5588_KEC		0xF
+
 #define ADP5588_MAXGPIO		18
 #define ADP5588_BANK(offs)	((offs) >> 3)
 #define ADP5588_BIT(offs)	(1u << ((offs) & 0x7))
diff --git a/include/linux/input.h b/include/linux/input.h
index 51af441..6ef4446 100644
--- a/include/linux/input.h
+++ b/include/linux/input.h
@@ -1406,6 +1406,8 @@
 int __must_check input_register_device(struct input_dev *);
 void input_unregister_device(struct input_dev *);
 
+void input_reset_device(struct input_dev *);
+
 int __must_check input_register_handler(struct input_handler *);
 void input_unregister_handler(struct input_handler *);
 
@@ -1421,7 +1423,7 @@
 int input_open_device(struct input_handle *);
 void input_close_device(struct input_handle *);
 
-int input_flush_device(struct input_handle* handle, struct file* file);
+int input_flush_device(struct input_handle *handle, struct file *file);
 
 void input_event(struct input_dev *dev, unsigned int type, unsigned int code, int value);
 void input_inject_event(struct input_handle *handle, unsigned int type, unsigned int code, int value);
diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h
index 3e70b21..b2eee89 100644
--- a/include/linux/iocontext.h
+++ b/include/linux/iocontext.h
@@ -76,7 +76,6 @@
 void exit_io_context(struct task_struct *task);
 struct io_context *get_io_context(gfp_t gfp_flags, int node);
 struct io_context *alloc_io_context(gfp_t gfp_flags, int node);
-void copy_io_context(struct io_context **pdst, struct io_context **psrc);
 #else
 static inline void exit_io_context(struct task_struct *task)
 {
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 450092c..fc3da9e 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -60,7 +60,7 @@
 #define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
 #define roundup(x, y) (					\
 {							\
-	typeof(y) __y = y;				\
+	const typeof(y) __y = y;			\
 	(((x) + (__y - 1)) / __y) * __y;		\
 }							\
 )
@@ -293,6 +293,7 @@
 				   unsigned int interval_msec);
 
 extern int printk_delay_msec;
+extern int dmesg_restrict;
 
 /*
  * Print a one-time message (analogous to WARN_ONCE() et al):
diff --git a/include/linux/leds-lp5521.h b/include/linux/leds-lp5521.h
new file mode 100644
index 0000000..38368d7
--- /dev/null
+++ b/include/linux/leds-lp5521.h
@@ -0,0 +1,47 @@
+/*
+ * LP5521 LED chip driver.
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ *
+ * Contact: Samu Onkalo <samu.p.onkalo@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#ifndef __LINUX_LP5521_H
+#define __LINUX_LP5521_H
+
+/* See Documentation/leds/leds-lp5521.txt */
+
+struct lp5521_led_config {
+	u8		chan_nr;
+	u8		led_current; /* mA x10, 0 if led is not connected */
+	u8		max_current;
+};
+
+#define LP5521_CLOCK_AUTO	0
+#define LP5521_CLOCK_INT	1
+#define LP5521_CLOCK_EXT	2
+
+struct lp5521_platform_data {
+	struct lp5521_led_config *led_config;
+	u8	num_channels;
+	u8	clock_mode;
+	int	(*setup_resources)(void);
+	void	(*release_resources)(void);
+	void	(*enable)(bool state);
+};
+
+#endif /* __LINUX_LP5521_H */
diff --git a/include/linux/leds-lp5523.h b/include/linux/leds-lp5523.h
new file mode 100644
index 0000000..7967476
--- /dev/null
+++ b/include/linux/leds-lp5523.h
@@ -0,0 +1,47 @@
+/*
+ * LP5523 LED Driver
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ *
+ * Contact: Samu Onkalo <samu.p.onkalo@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#ifndef __LINUX_LP5523_H
+#define __LINUX_LP5523_H
+
+/* See Documentation/leds/leds-lp5523.txt */
+
+struct lp5523_led_config {
+	u8		chan_nr;
+	u8		led_current; /* mA x10, 0 if led is not connected */
+	u8		max_current;
+};
+
+#define LP5523_CLOCK_AUTO	0
+#define LP5523_CLOCK_INT	1
+#define LP5523_CLOCK_EXT	2
+
+struct lp5523_platform_data {
+	struct lp5523_led_config *led_config;
+	u8	num_channels;
+	u8	clock_mode;
+	int	(*setup_resources)(void);
+	void	(*release_resources)(void);
+	void	(*enable)(bool state);
+};
+
+#endif /* __LINUX_LP5523_H */
diff --git a/include/linux/leds.h b/include/linux/leds.h
index ba6986a..0f19df9 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -15,6 +15,7 @@
 #include <linux/list.h>
 #include <linux/spinlock.h>
 #include <linux/rwsem.h>
+#include <linux/timer.h>
 
 struct device;
 /*
@@ -45,10 +46,14 @@
 	/* Get LED brightness level */
 	enum led_brightness (*brightness_get)(struct led_classdev *led_cdev);
 
-	/* Activate hardware accelerated blink, delays are in
-	 * miliseconds and if none is provided then a sensible default
-	 * should be chosen. The call can adjust the timings if it can't
-	 * match the values specified exactly. */
+	/*
+	 * Activate hardware accelerated blink, delays are in milliseconds
+	 * and if both are zero then a sensible default should be chosen.
+	 * The call should adjust the timings in that case and if it can't
+	 * match the values specified exactly.
+	 * Deactivate blinking again when the brightness is set to a fixed
+	 * value via the brightness_set() callback.
+	 */
 	int		(*blink_set)(struct led_classdev *led_cdev,
 				     unsigned long *delay_on,
 				     unsigned long *delay_off);
@@ -57,6 +62,10 @@
 	struct list_head	 node;			/* LED Device list */
 	const char		*default_trigger;	/* Trigger to use */
 
+	unsigned long		 blink_delay_on, blink_delay_off;
+	struct timer_list	 blink_timer;
+	int			 blink_brightness;
+
 #ifdef CONFIG_LEDS_TRIGGERS
 	/* Protects the trigger data below */
 	struct rw_semaphore	 trigger_lock;
@@ -73,6 +82,36 @@
 extern void led_classdev_suspend(struct led_classdev *led_cdev);
 extern void led_classdev_resume(struct led_classdev *led_cdev);
 
+/**
+ * led_blink_set - set blinking with software fallback
+ * @led_cdev: the LED to start blinking
+ * @delay_on: the time it should be on (in ms)
+ * @delay_off: the time it should ble off (in ms)
+ *
+ * This function makes the LED blink, attempting to use the
+ * hardware acceleration if possible, but falling back to
+ * software blinking if there is no hardware blinking or if
+ * the LED refuses the passed values.
+ *
+ * Note that if software blinking is active, simply calling
+ * led_cdev->brightness_set() will not stop the blinking,
+ * use led_classdev_brightness_set() instead.
+ */
+extern void led_blink_set(struct led_classdev *led_cdev,
+			  unsigned long *delay_on,
+			  unsigned long *delay_off);
+/**
+ * led_brightness_set - set LED brightness
+ * @led_cdev: the LED to set
+ * @brightness: the brightness to set it to
+ *
+ * Set an LED's brightness, and, if necessary, cancel the
+ * software blink timer that implements blinking when the
+ * hardware doesn't.
+ */
+extern void led_brightness_set(struct led_classdev *led_cdev,
+			       enum led_brightness brightness);
+
 /*
  * LED Triggers
  */
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 057bf22..40150f3 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -747,6 +747,16 @@
 	u64				tstamp_running;
 	u64				tstamp_stopped;
 
+	/*
+	 * timestamp shadows the actual context timing but it can
+	 * be safely used in NMI interrupt context. It reflects the
+	 * context time as it was when the event was last scheduled in.
+	 *
+	 * ctx_time already accounts for ctx->timestamp. Therefore to
+	 * compute ctx_time for a sample, simply add perf_clock().
+	 */
+	u64				shadow_ctx_time;
+
 	struct perf_event_attr		attr;
 	struct hw_perf_event		hw;
 
diff --git a/include/linux/pwm_backlight.h b/include/linux/pwm_backlight.h
index 01b3d75..e031e1a 100644
--- a/include/linux/pwm_backlight.h
+++ b/include/linux/pwm_backlight.h
@@ -8,6 +8,7 @@
 	int pwm_id;
 	unsigned int max_brightness;
 	unsigned int dft_brightness;
+	unsigned int lth_brightness;
 	unsigned int pwm_period_ns;
 	int (*init)(struct device *dev);
 	int (*notify)(struct device *dev, int brightness);
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index a39cbed..ab2baa5 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -34,19 +34,13 @@
  * needed for RCU lookups (because root->height is unreliable). The only
  * time callers need worry about this is when doing a lookup_slot under
  * RCU.
+ *
+ * Indirect pointer in fact is also used to tag the last pointer of a node
+ * when it is shrunk, before we rcu free the node. See shrink code for
+ * details.
  */
 #define RADIX_TREE_INDIRECT_PTR	1
-#define RADIX_TREE_RETRY ((void *)-1UL)
 
-static inline void *radix_tree_ptr_to_indirect(void *ptr)
-{
-	return (void *)((unsigned long)ptr | RADIX_TREE_INDIRECT_PTR);
-}
-
-static inline void *radix_tree_indirect_to_ptr(void *ptr)
-{
-	return (void *)((unsigned long)ptr & ~RADIX_TREE_INDIRECT_PTR);
-}
 #define radix_tree_indirect_to_ptr(ptr) \
 	radix_tree_indirect_to_ptr((void __force *)(ptr))
 
@@ -140,16 +134,29 @@
  *		removed.
  *
  * For use with radix_tree_lookup_slot().  Caller must hold tree at least read
- * locked across slot lookup and dereference.  More likely, will be used with
- * radix_tree_replace_slot(), as well, so caller will hold tree write locked.
+ * locked across slot lookup and dereference. Not required if write lock is
+ * held (ie. items cannot be concurrently inserted).
+ *
+ * radix_tree_deref_retry must be used to confirm validity of the pointer if
+ * only the read lock is held.
  */
 static inline void *radix_tree_deref_slot(void **pslot)
 {
-	void *ret = rcu_dereference(*pslot);
-	if (unlikely(radix_tree_is_indirect_ptr(ret)))
-		ret = RADIX_TREE_RETRY;
-	return ret;
+	return rcu_dereference(*pslot);
 }
+
+/**
+ * radix_tree_deref_retry	- check radix_tree_deref_slot
+ * @arg:	pointer returned by radix_tree_deref_slot
+ * Returns:	0 if retry is not required, otherwise retry is required
+ *
+ * radix_tree_deref_retry must be used with radix_tree_deref_slot.
+ */
+static inline int radix_tree_deref_retry(void *arg)
+{
+	return unlikely((unsigned long)arg & RADIX_TREE_INDIRECT_PTR);
+}
+
 /**
  * radix_tree_replace_slot	- replace item in a slot
  * @pslot:	pointer to slot, returned by radix_tree_lookup_slot
diff --git a/include/linux/resource.h b/include/linux/resource.h
index 88d36f9..d01c96c 100644
--- a/include/linux/resource.h
+++ b/include/linux/resource.h
@@ -2,6 +2,7 @@
 #define _LINUX_RESOURCE_H
 
 #include <linux/time.h>
+#include <linux/types.h>
 
 /*
  * Resource control/accounting header file for linux
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
index bbdb680..aea0d438 100644
--- a/include/linux/sunrpc/svc_xprt.h
+++ b/include/linux/sunrpc/svc_xprt.h
@@ -82,13 +82,6 @@
 	struct net		*xpt_net;
 };
 
-static inline void register_xpt_user(struct svc_xprt *xpt, struct svc_xpt_user *u)
-{
-	spin_lock(&xpt->xpt_lock);
-	list_add(&u->list, &xpt->xpt_users);
-	spin_unlock(&xpt->xpt_lock);
-}
-
 static inline void unregister_xpt_user(struct svc_xprt *xpt, struct svc_xpt_user *u)
 {
 	spin_lock(&xpt->xpt_lock);
@@ -96,6 +89,23 @@
 	spin_unlock(&xpt->xpt_lock);
 }
 
+static inline int register_xpt_user(struct svc_xprt *xpt, struct svc_xpt_user *u)
+{
+	spin_lock(&xpt->xpt_lock);
+	if (test_bit(XPT_CLOSE, &xpt->xpt_flags)) {
+		/*
+		 * The connection is about to be deleted soon (or,
+		 * worse, may already be deleted--in which case we've
+		 * already notified the xpt_users).
+		 */
+		spin_unlock(&xpt->xpt_lock);
+		return -ENOTCONN;
+	}
+	list_add(&u->list, &xpt->xpt_users);
+	spin_unlock(&xpt->xpt_lock);
+	return 0;
+}
+
 int	svc_reg_xprt_class(struct svc_xprt_class *);
 void	svc_unreg_xprt_class(struct svc_xprt_class *);
 void	svc_xprt_init(struct svc_xprt_class *, struct svc_xprt *,
diff --git a/kernel/latencytop.c b/kernel/latencytop.c
index 877fb30..17110a4 100644
--- a/kernel/latencytop.c
+++ b/kernel/latencytop.c
@@ -194,14 +194,7 @@
 
 	account_global_scheduler_latency(tsk, &lat);
 
-	/*
-	 * short term hack; if we're > 32 we stop; future we recycle:
-	 */
-	tsk->latency_record_count++;
-	if (tsk->latency_record_count >= LT_SAVECOUNT)
-		goto out_unlock;
-
-	for (i = 0; i < LT_SAVECOUNT; i++) {
+	for (i = 0; i < tsk->latency_record_count; i++) {
 		struct latency_record *mylat;
 		int same = 1;
 
@@ -227,8 +220,14 @@
 		}
 	}
 
+	/*
+	 * short term hack; if we're > 32 we stop; future we recycle:
+	 */
+	if (tsk->latency_record_count >= LT_SAVECOUNT)
+		goto out_unlock;
+
 	/* Allocated a new one: */
-	i = tsk->latency_record_count;
+	i = tsk->latency_record_count++;
 	memcpy(&tsk->latency_record[i], &lat, sizeof(struct latency_record));
 
 out_unlock:
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 517d827..cb6c0d2 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -674,6 +674,8 @@
 
 	event->tstamp_running += ctx->time - event->tstamp_stopped;
 
+	event->shadow_ctx_time = ctx->time - ctx->timestamp;
+
 	if (!is_software_event(event))
 		cpuctx->active_oncpu++;
 	ctx->nr_active++;
@@ -3396,7 +3398,8 @@
 }
 
 static void perf_output_read_one(struct perf_output_handle *handle,
-				 struct perf_event *event)
+				 struct perf_event *event,
+				 u64 enabled, u64 running)
 {
 	u64 read_format = event->attr.read_format;
 	u64 values[4];
@@ -3404,11 +3407,11 @@
 
 	values[n++] = perf_event_count(event);
 	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
-		values[n++] = event->total_time_enabled +
+		values[n++] = enabled +
 			atomic64_read(&event->child_total_time_enabled);
 	}
 	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
-		values[n++] = event->total_time_running +
+		values[n++] = running +
 			atomic64_read(&event->child_total_time_running);
 	}
 	if (read_format & PERF_FORMAT_ID)
@@ -3421,7 +3424,8 @@
  * XXX PERF_FORMAT_GROUP vs inherited events seems difficult.
  */
 static void perf_output_read_group(struct perf_output_handle *handle,
-			    struct perf_event *event)
+			    struct perf_event *event,
+			    u64 enabled, u64 running)
 {
 	struct perf_event *leader = event->group_leader, *sub;
 	u64 read_format = event->attr.read_format;
@@ -3431,10 +3435,10 @@
 	values[n++] = 1 + leader->nr_siblings;
 
 	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
-		values[n++] = leader->total_time_enabled;
+		values[n++] = enabled;
 
 	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
-		values[n++] = leader->total_time_running;
+		values[n++] = running;
 
 	if (leader != event)
 		leader->pmu->read(leader);
@@ -3459,13 +3463,35 @@
 	}
 }
 
+#define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\
+				 PERF_FORMAT_TOTAL_TIME_RUNNING)
+
 static void perf_output_read(struct perf_output_handle *handle,
 			     struct perf_event *event)
 {
+	u64 enabled = 0, running = 0, now, ctx_time;
+	u64 read_format = event->attr.read_format;
+
+	/*
+	 * compute total_time_enabled, total_time_running
+	 * based on snapshot values taken when the event
+	 * was last scheduled in.
+	 *
+	 * we cannot simply called update_context_time()
+	 * because of locking issue as we are called in
+	 * NMI context
+	 */
+	if (read_format & PERF_FORMAT_TOTAL_TIMES) {
+		now = perf_clock();
+		ctx_time = event->shadow_ctx_time + now;
+		enabled = ctx_time - event->tstamp_enabled;
+		running = ctx_time - event->tstamp_running;
+	}
+
 	if (event->attr.read_format & PERF_FORMAT_GROUP)
-		perf_output_read_group(handle, event);
+		perf_output_read_group(handle, event, enabled, running);
 	else
-		perf_output_read_one(handle, event);
+		perf_output_read_one(handle, event, enabled, running);
 }
 
 void perf_output_sample(struct perf_output_handle *handle,
diff --git a/kernel/printk.c b/kernel/printk.c
index b2ebaee..38e7d58 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -261,6 +261,12 @@
 }
 #endif
 
+#ifdef CONFIG_SECURITY_DMESG_RESTRICT
+int dmesg_restrict = 1;
+#else
+int dmesg_restrict;
+#endif
+
 int do_syslog(int type, char __user *buf, int len, bool from_file)
 {
 	unsigned i, j, limit, count;
diff --git a/kernel/range.c b/kernel/range.c
index 471b66a..37fa9b9 100644
--- a/kernel/range.c
+++ b/kernel/range.c
@@ -119,7 +119,7 @@
 
 int clean_sort_range(struct range *range, int az)
 {
-	int i, j, k = az - 1, nr_range = 0;
+	int i, j, k = az - 1, nr_range = az;
 
 	for (i = 0; i < k; i++) {
 		if (range[i].end)
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index c33a1ed..b65bf63 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -704,6 +704,15 @@
 	},
 #endif
 	{
+		.procname	= "dmesg_restrict",
+		.data		= &dmesg_restrict,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec_minmax,
+		.extra1		= &zero,
+		.extra2		= &one,
+	},
+	{
 		.procname	= "ngroups_max",
 		.data		= &ngroups_max,
 		.maxlen		= sizeof (int),
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index bc251ed..7b8ec02 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -168,7 +168,6 @@
 static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ),
 				 BLK_TC_ACT(BLK_TC_WRITE) };
 
-#define BLK_TC_HARDBARRIER	BLK_TC_BARRIER
 #define BLK_TC_RAHEAD		BLK_TC_AHEAD
 
 /* The ilog2() calls fall out because they're constant */
@@ -196,7 +195,6 @@
 		return;
 
 	what |= ddir_act[rw & WRITE];
-	what |= MASK_TC_BIT(rw, HARDBARRIER);
 	what |= MASK_TC_BIT(rw, SYNC);
 	what |= MASK_TC_BIT(rw, RAHEAD);
 	what |= MASK_TC_BIT(rw, META);
@@ -1807,8 +1805,6 @@
 
 	if (rw & REQ_RAHEAD)
 		rwbs[i++] = 'A';
-	if (rw & REQ_HARDBARRIER)
-		rwbs[i++] = 'B';
 	if (rw & REQ_SYNC)
 		rwbs[i++] = 'S';
 	if (rw & REQ_META)
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 6f412ab4..5086bb9 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -82,6 +82,16 @@
 };
 static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
 
+static inline void *ptr_to_indirect(void *ptr)
+{
+	return (void *)((unsigned long)ptr | RADIX_TREE_INDIRECT_PTR);
+}
+
+static inline void *indirect_to_ptr(void *ptr)
+{
+	return (void *)((unsigned long)ptr & ~RADIX_TREE_INDIRECT_PTR);
+}
+
 static inline gfp_t root_gfp_mask(struct radix_tree_root *root)
 {
 	return root->gfp_mask & __GFP_BITS_MASK;
@@ -265,7 +275,7 @@
 			return -ENOMEM;
 
 		/* Increase the height.  */
-		node->slots[0] = radix_tree_indirect_to_ptr(root->rnode);
+		node->slots[0] = indirect_to_ptr(root->rnode);
 
 		/* Propagate the aggregated tag info into the new root */
 		for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) {
@@ -276,7 +286,7 @@
 		newheight = root->height+1;
 		node->height = newheight;
 		node->count = 1;
-		node = radix_tree_ptr_to_indirect(node);
+		node = ptr_to_indirect(node);
 		rcu_assign_pointer(root->rnode, node);
 		root->height = newheight;
 	} while (height > root->height);
@@ -309,7 +319,7 @@
 			return error;
 	}
 
-	slot = radix_tree_indirect_to_ptr(root->rnode);
+	slot = indirect_to_ptr(root->rnode);
 
 	height = root->height;
 	shift = (height-1) * RADIX_TREE_MAP_SHIFT;
@@ -325,8 +335,7 @@
 				rcu_assign_pointer(node->slots[offset], slot);
 				node->count++;
 			} else
-				rcu_assign_pointer(root->rnode,
-					radix_tree_ptr_to_indirect(slot));
+				rcu_assign_pointer(root->rnode, ptr_to_indirect(slot));
 		}
 
 		/* Go a level down */
@@ -374,7 +383,7 @@
 			return NULL;
 		return is_slot ? (void *)&root->rnode : node;
 	}
-	node = radix_tree_indirect_to_ptr(node);
+	node = indirect_to_ptr(node);
 
 	height = node->height;
 	if (index > radix_tree_maxindex(height))
@@ -393,7 +402,7 @@
 		height--;
 	} while (height > 0);
 
-	return is_slot ? (void *)slot:node;
+	return is_slot ? (void *)slot : indirect_to_ptr(node);
 }
 
 /**
@@ -455,7 +464,7 @@
 	height = root->height;
 	BUG_ON(index > radix_tree_maxindex(height));
 
-	slot = radix_tree_indirect_to_ptr(root->rnode);
+	slot = indirect_to_ptr(root->rnode);
 	shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
 
 	while (height > 0) {
@@ -509,7 +518,7 @@
 
 	shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
 	pathp->node = NULL;
-	slot = radix_tree_indirect_to_ptr(root->rnode);
+	slot = indirect_to_ptr(root->rnode);
 
 	while (height > 0) {
 		int offset;
@@ -579,7 +588,7 @@
 
 	if (!radix_tree_is_indirect_ptr(node))
 		return (index == 0);
-	node = radix_tree_indirect_to_ptr(node);
+	node = indirect_to_ptr(node);
 
 	height = node->height;
 	if (index > radix_tree_maxindex(height))
@@ -666,7 +675,7 @@
 	}
 
 	shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
-	slot = radix_tree_indirect_to_ptr(root->rnode);
+	slot = indirect_to_ptr(root->rnode);
 
 	/*
 	 * we fill the path from (root->height - 2) to 0, leaving the index at
@@ -897,7 +906,7 @@
 		results[0] = node;
 		return 1;
 	}
-	node = radix_tree_indirect_to_ptr(node);
+	node = indirect_to_ptr(node);
 
 	max_index = radix_tree_maxindex(node->height);
 
@@ -916,7 +925,8 @@
 			slot = *(((void ***)results)[ret + i]);
 			if (!slot)
 				continue;
-			results[ret + nr_found] = rcu_dereference_raw(slot);
+			results[ret + nr_found] =
+				indirect_to_ptr(rcu_dereference_raw(slot));
 			nr_found++;
 		}
 		ret += nr_found;
@@ -965,7 +975,7 @@
 		results[0] = (void **)&root->rnode;
 		return 1;
 	}
-	node = radix_tree_indirect_to_ptr(node);
+	node = indirect_to_ptr(node);
 
 	max_index = radix_tree_maxindex(node->height);
 
@@ -1090,7 +1100,7 @@
 		results[0] = node;
 		return 1;
 	}
-	node = radix_tree_indirect_to_ptr(node);
+	node = indirect_to_ptr(node);
 
 	max_index = radix_tree_maxindex(node->height);
 
@@ -1109,7 +1119,8 @@
 			slot = *(((void ***)results)[ret + i]);
 			if (!slot)
 				continue;
-			results[ret + nr_found] = rcu_dereference_raw(slot);
+			results[ret + nr_found] =
+				indirect_to_ptr(rcu_dereference_raw(slot));
 			nr_found++;
 		}
 		ret += nr_found;
@@ -1159,7 +1170,7 @@
 		results[0] = (void **)&root->rnode;
 		return 1;
 	}
-	node = radix_tree_indirect_to_ptr(node);
+	node = indirect_to_ptr(node);
 
 	max_index = radix_tree_maxindex(node->height);
 
@@ -1195,7 +1206,7 @@
 		void *newptr;
 
 		BUG_ON(!radix_tree_is_indirect_ptr(to_free));
-		to_free = radix_tree_indirect_to_ptr(to_free);
+		to_free = indirect_to_ptr(to_free);
 
 		/*
 		 * The candidate node has more than one child, or its child
@@ -1208,16 +1219,39 @@
 
 		/*
 		 * We don't need rcu_assign_pointer(), since we are simply
-		 * moving the node from one part of the tree to another. If
-		 * it was safe to dereference the old pointer to it
+		 * moving the node from one part of the tree to another: if it
+		 * was safe to dereference the old pointer to it
 		 * (to_free->slots[0]), it will be safe to dereference the new
-		 * one (root->rnode).
+		 * one (root->rnode) as far as dependent read barriers go.
 		 */
 		newptr = to_free->slots[0];
 		if (root->height > 1)
-			newptr = radix_tree_ptr_to_indirect(newptr);
+			newptr = ptr_to_indirect(newptr);
 		root->rnode = newptr;
 		root->height--;
+
+		/*
+		 * We have a dilemma here. The node's slot[0] must not be
+		 * NULLed in case there are concurrent lookups expecting to
+		 * find the item. However if this was a bottom-level node,
+		 * then it may be subject to the slot pointer being visible
+		 * to callers dereferencing it. If item corresponding to
+		 * slot[0] is subsequently deleted, these callers would expect
+		 * their slot to become empty sooner or later.
+		 *
+		 * For example, lockless pagecache will look up a slot, deref
+		 * the page pointer, and if the page is 0 refcount it means it
+		 * was concurrently deleted from pagecache so try the deref
+		 * again. Fortunately there is already a requirement for logic
+		 * to retry the entire slot lookup -- the indirect pointer
+		 * problem (replacing direct root node with an indirect pointer
+		 * also results in a stale slot). So tag the slot as indirect
+		 * to force callers to retry.
+		 */
+		if (root->height == 0)
+			*((unsigned long *)&to_free->slots[0]) |=
+						RADIX_TREE_INDIRECT_PTR;
+
 		radix_tree_node_free(to_free);
 	}
 }
@@ -1254,7 +1288,7 @@
 		root->rnode = NULL;
 		goto out;
 	}
-	slot = radix_tree_indirect_to_ptr(slot);
+	slot = indirect_to_ptr(slot);
 
 	shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
 	pathp->node = NULL;
@@ -1296,8 +1330,7 @@
 			radix_tree_node_free(to_free);
 
 		if (pathp->node->count) {
-			if (pathp->node ==
-					radix_tree_indirect_to_ptr(root->rnode))
+			if (pathp->node == indirect_to_ptr(root->rnode))
 				radix_tree_shrink(root);
 			goto out;
 		}
diff --git a/mm/filemap.c b/mm/filemap.c
index 61ba5e4..ea89840 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -644,7 +644,9 @@
 	pagep = radix_tree_lookup_slot(&mapping->page_tree, offset);
 	if (pagep) {
 		page = radix_tree_deref_slot(pagep);
-		if (unlikely(!page || page == RADIX_TREE_RETRY))
+		if (unlikely(!page))
+			goto out;
+		if (radix_tree_deref_retry(page))
 			goto repeat;
 
 		if (!page_cache_get_speculative(page))
@@ -660,6 +662,7 @@
 			goto repeat;
 		}
 	}
+out:
 	rcu_read_unlock();
 
 	return page;
@@ -777,12 +780,11 @@
 		page = radix_tree_deref_slot((void **)pages[i]);
 		if (unlikely(!page))
 			continue;
-		/*
-		 * this can only trigger if nr_found == 1, making livelock
-		 * a non issue.
-		 */
-		if (unlikely(page == RADIX_TREE_RETRY))
+		if (radix_tree_deref_retry(page)) {
+			if (ret)
+				start = pages[ret-1]->index;
 			goto restart;
+		}
 
 		if (!page_cache_get_speculative(page))
 			goto repeat;
@@ -830,11 +832,7 @@
 		page = radix_tree_deref_slot((void **)pages[i]);
 		if (unlikely(!page))
 			continue;
-		/*
-		 * this can only trigger if nr_found == 1, making livelock
-		 * a non issue.
-		 */
-		if (unlikely(page == RADIX_TREE_RETRY))
+		if (radix_tree_deref_retry(page))
 			goto restart;
 
 		if (page->mapping == NULL || page->index != index)
@@ -887,11 +885,7 @@
 		page = radix_tree_deref_slot((void **)pages[i]);
 		if (unlikely(!page))
 			continue;
-		/*
-		 * this can only trigger if nr_found == 1, making livelock
-		 * a non issue.
-		 */
-		if (unlikely(page == RADIX_TREE_RETRY))
+		if (radix_tree_deref_retry(page))
 			goto restart;
 
 		if (!page_cache_get_speculative(page))
@@ -1029,6 +1023,9 @@
 				goto page_not_up_to_date;
 			if (!trylock_page(page))
 				goto page_not_up_to_date;
+			/* Did it get truncated before we got the lock? */
+			if (!page->mapping)
+				goto page_not_up_to_date_locked;
 			if (!mapping->a_ops->is_partially_uptodate(page,
 								desc, offset))
 				goto page_not_up_to_date_locked;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 9a99cfa..2efa8ea 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -4208,15 +4208,17 @@
 
 	memset(mem, 0, size);
 	mem->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
-	if (!mem->stat) {
-		if (size < PAGE_SIZE)
-			kfree(mem);
-		else
-			vfree(mem);
-		mem = NULL;
-	}
+	if (!mem->stat)
+		goto out_free;
 	spin_lock_init(&mem->pcp_counter_lock);
 	return mem;
+
+out_free:
+	if (size < PAGE_SIZE)
+		kfree(mem);
+	else
+		vfree(mem);
+	return NULL;
 }
 
 /*
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 2d1bf7c..4c51338 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -211,6 +211,7 @@
 	mmu_notifier_invalidate_range_end(mm, start, end);
 	vm_stat_account(mm, oldflags, vma->vm_file, -nrpages);
 	vm_stat_account(mm, newflags, vma->vm_file, nrpages);
+	perf_event_mmap(vma);
 	return 0;
 
 fail:
@@ -299,7 +300,6 @@
 		error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
 		if (error)
 			goto out;
-		perf_event_mmap(vma);
 		nstart = tmp;
 
 		if (nstart < prev->vm_end)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index b8a6fdc..d31d7ce 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -913,7 +913,7 @@
 	 * back off and wait for congestion to clear because further reclaim
 	 * will encounter the same problem
 	 */
-	if (nr_dirty == nr_congested)
+	if (nr_dirty == nr_congested && nr_dirty != 0)
 		zone_set_flag(zone, ZONE_CONGESTED);
 
 	free_page_list(&free_pages);
diff --git a/security/Kconfig b/security/Kconfig
index bd72ae6..e80da95 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -39,6 +39,18 @@
 
 	  If you are unsure as to whether this is required, answer N.
 
+config SECURITY_DMESG_RESTRICT
+	bool "Restrict unprivileged access to the kernel syslog"
+	default n
+	help
+	  This enforces restrictions on unprivileged users reading the kernel
+	  syslog via dmesg(8).
+
+	  If this option is not selected, no restrictions will be enforced
+	  unless the dmesg_restrict sysctl is explicitly set to (1).
+
+	  If you are unsure how to answer this question, answer N.
+
 config SECURITY
 	bool "Enable different security models"
 	depends on SYSFS
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
index cf1de44..b7106f1 100644
--- a/security/apparmor/lsm.c
+++ b/security/apparmor/lsm.c
@@ -922,7 +922,7 @@
 	error = register_security(&apparmor_ops);
 	if (error) {
 		AA_ERROR("Unable to register AppArmor\n");
-		goto register_security_out;
+		goto set_init_cxt_out;
 	}
 
 	/* Report that AppArmor successfully initialized */
@@ -936,6 +936,9 @@
 
 	return error;
 
+set_init_cxt_out:
+	aa_free_task_context(current->real_cred->security);
+
 register_security_out:
 	aa_free_root_ns();
 
@@ -944,7 +947,6 @@
 
 	apparmor_enabled = 0;
 	return error;
-
 }
 
 security_initcall(apparmor_init);
diff --git a/security/apparmor/policy.c b/security/apparmor/policy.c
index 52cc865..4f0eade 100644
--- a/security/apparmor/policy.c
+++ b/security/apparmor/policy.c
@@ -306,7 +306,7 @@
 	return ns;
 
 fail_unconfined:
-	kzfree(ns->base.name);
+	kzfree(ns->base.hname);
 fail_ns:
 	kzfree(ns);
 	return NULL;
diff --git a/security/commoncap.c b/security/commoncap.c
index 5e632b4..04b80f9 100644
--- a/security/commoncap.c
+++ b/security/commoncap.c
@@ -895,6 +895,8 @@
 {
 	if (type != SYSLOG_ACTION_OPEN && from_file)
 		return 0;
+	if (dmesg_restrict && !capable(CAP_SYS_ADMIN))
+		return -EPERM;
 	if ((type != SYSLOG_ACTION_READ_ALL &&
 	     type != SYSLOG_ACTION_SIZE_BUFFER) && !capable(CAP_SYS_ADMIN))
 		return -EPERM;
diff --git a/tools/perf/Documentation/perf-trace.txt b/tools/perf/Documentation/perf-trace.txt
index 122ec9d..26aff6b 100644
--- a/tools/perf/Documentation/perf-trace.txt
+++ b/tools/perf/Documentation/perf-trace.txt
@@ -8,7 +8,11 @@
 SYNOPSIS
 --------
 [verse]
-'perf trace' {record <script> | report <script> [args] }
+'perf trace' [<options>]
+'perf trace' [<options>] record <script> [<record-options>] <command>
+'perf trace' [<options>] report <script> [script-args]
+'perf trace' [<options>] <script> <required-script-args> [<record-options>] <command>
+'perf trace' [<options>] <top-script> [script-args]
 
 DESCRIPTION
 -----------
@@ -24,23 +28,53 @@
   available via 'perf trace -l').  The following variants allow you to
   record and run those scripts:
 
-  'perf trace record <script>' to record the events required for 'perf
-  trace report'.  <script> is the name displayed in the output of
-  'perf trace --list' i.e. the actual script name minus any language
-  extension.
+  'perf trace record <script> <command>' to record the events required
+  for 'perf trace report'.  <script> is the name displayed in the
+  output of 'perf trace --list' i.e. the actual script name minus any
+  language extension.  If <command> is not specified, the events are
+  recorded using the -a (system-wide) 'perf record' option.
 
-  'perf trace report <script>' to run and display the results of
-  <script>.  <script> is the name displayed in the output of 'perf
+  'perf trace report <script> [args]' to run and display the results
+  of <script>.  <script> is the name displayed in the output of 'perf
   trace --list' i.e. the actual script name minus any language
   extension.  The perf.data output from a previous run of 'perf trace
   record <script>' is used and should be present for this command to
-  succeed.
+  succeed.  [args] refers to the (mainly optional) args expected by
+  the script.
+
+  'perf trace <script> <required-script-args> <command>' to both
+  record the events required for <script> and to run the <script>
+  using 'live-mode' i.e. without writing anything to disk.  <script>
+  is the name displayed in the output of 'perf trace --list' i.e. the
+  actual script name minus any language extension.  If <command> is
+  not specified, the events are recorded using the -a (system-wide)
+  'perf record' option.  If <script> has any required args, they
+  should be specified before <command>.  This mode doesn't allow for
+  optional script args to be specified; if optional script args are
+  desired, they can be specified using separate 'perf trace record'
+  and 'perf trace report' commands, with the stdout of the record step
+  piped to the stdin of the report script, using the '-o -' and '-i -'
+  options of the corresponding commands.
+
+  'perf trace <top-script>' to both record the events required for
+  <top-script> and to run the <top-script> using 'live-mode'
+  i.e. without writing anything to disk.  <top-script> is the name
+  displayed in the output of 'perf trace --list' i.e. the actual
+  script name minus any language extension; a <top-script> is defined
+  as any script name ending with the string 'top'.
+
+  [<record-options>] can be passed to the record steps of 'perf trace
+  record' and 'live-mode' variants; this isn't possible however for
+  <top-script> 'live-mode' or 'perf trace report' variants.
 
   See the 'SEE ALSO' section for links to language-specific
   information on how to write and run your own trace scripts.
 
 OPTIONS
 -------
+<command>...::
+	Any command you can specify in a shell.
+
 -D::
 --dump-raw-trace=::
         Display verbose dump of the trace data.
@@ -64,6 +98,13 @@
         Generate perf-trace.[ext] starter script for given language,
         using current perf.data.
 
+-a::
+        Force system-wide collection.  Scripts run without a <command>
+        normally use -a by default, while scripts run with a <command>
+        normally don't - this option allows the latter to be run in
+        system-wide mode.
+
+
 SEE ALSO
 --------
 linkperf:perf-record[1], linkperf:perf-trace-perl[1],
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 4e75583..93bd2ff 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -790,7 +790,7 @@
 
 static bool force, append_file;
 
-static const struct option options[] = {
+const struct option record_options[] = {
 	OPT_CALLBACK('e', "event", NULL, "event",
 		     "event selector. use 'perf list' to list available events",
 		     parse_events),
@@ -839,16 +839,16 @@
 {
 	int i, j, err = -ENOMEM;
 
-	argc = parse_options(argc, argv, options, record_usage,
+	argc = parse_options(argc, argv, record_options, record_usage,
 			    PARSE_OPT_STOP_AT_NON_OPTION);
 	if (!argc && target_pid == -1 && target_tid == -1 &&
 		!system_wide && !cpu_list)
-		usage_with_options(record_usage, options);
+		usage_with_options(record_usage, record_options);
 
 	if (force && append_file) {
 		fprintf(stderr, "Can't overwrite and append at the same time."
 				" You need to choose between -f and -A");
-		usage_with_options(record_usage, options);
+		usage_with_options(record_usage, record_options);
 	} else if (append_file) {
 		write_mode = WRITE_APPEND;
 	} else {
@@ -871,7 +871,7 @@
 		if (thread_num <= 0) {
 			fprintf(stderr, "Can't find all threads of pid %d\n",
 					target_pid);
-			usage_with_options(record_usage, options);
+			usage_with_options(record_usage, record_options);
 		}
 	} else {
 		all_tids=malloc(sizeof(pid_t));
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index b513e40..dd62580 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -69,7 +69,6 @@
 static pid_t			*all_tids			=      NULL;
 static int			thread_num			=      0;
 static bool			inherit				=  false;
-static int			profile_cpu			=     -1;
 static int			nr_cpus				=      0;
 static int			realtime_prio			=      0;
 static bool			group				=  false;
@@ -558,13 +557,13 @@
 	else
 		printf(" (all");
 
-	if (profile_cpu != -1)
-		printf(", cpu: %d)\n", profile_cpu);
+	if (cpu_list)
+		printf(", CPU%s: %s)\n", nr_cpus > 1 ? "s" : "", cpu_list);
 	else {
 		if (target_tid != -1)
 			printf(")\n");
 		else
-			printf(", %d CPUs)\n", nr_cpus);
+			printf(", %d CPU%s)\n", nr_cpus, nr_cpus > 1 ? "s" : "");
 	}
 
 	printf("%-*.*s\n", win_width, win_width, graph_dotted_line);
@@ -1187,11 +1186,10 @@
 static void start_counter(int i, int counter)
 {
 	struct perf_event_attr *attr;
-	int cpu;
+	int cpu = -1;
 	int thread_index;
 
-	cpu = profile_cpu;
-	if (target_tid == -1 && profile_cpu == -1)
+	if (target_tid == -1)
 		cpu = cpumap[i];
 
 	attr = attrs + counter;
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index 2f8df45..86cfe38 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -10,6 +10,7 @@
 #include "util/symbol.h"
 #include "util/thread.h"
 #include "util/trace-event.h"
+#include "util/parse-options.h"
 #include "util/util.h"
 
 static char const		*script_name;
@@ -17,6 +18,7 @@
 static bool			debug_mode;
 static u64			last_timestamp;
 static u64			nr_unordered;
+extern const struct option	record_options[];
 
 static int default_start_script(const char *script __unused,
 				int argc __unused,
@@ -328,7 +330,7 @@
 {
 	struct script_desc *s = zalloc(sizeof(*s));
 
-	if (s != NULL)
+	if (s != NULL && name)
 		s->name = strdup(name);
 
 	return s;
@@ -337,6 +339,8 @@
 static void script_desc__delete(struct script_desc *s)
 {
 	free(s->name);
+	free(s->half_liner);
+	free(s->args);
 	free(s);
 }
 
@@ -537,8 +541,40 @@
 	return path;
 }
 
+static bool is_top_script(const char *script_path)
+{
+	return ends_with((char *)script_path, "top") == NULL ? false : true;
+}
+
+static int has_required_arg(char *script_path)
+{
+	struct script_desc *desc;
+	int n_args = 0;
+	char *p;
+
+	desc = script_desc__new(NULL);
+
+	if (read_script_info(desc, script_path))
+		goto out;
+
+	if (!desc->args)
+		goto out;
+
+	for (p = desc->args; *p; p++)
+		if (*p == '<')
+			n_args++;
+out:
+	script_desc__delete(desc);
+
+	return n_args;
+}
+
 static const char * const trace_usage[] = {
-	"perf trace [<options>] <command>",
+	"perf trace [<options>]",
+	"perf trace [<options>] record <script> [<record-options>] <command>",
+	"perf trace [<options>] report <script> [script-args]",
+	"perf trace [<options>] <script> [<record-options>] <command>",
+	"perf trace [<options>] <top-script> [script-args]",
 	NULL
 };
 
@@ -564,50 +600,81 @@
 	OPT_END()
 };
 
+static bool have_cmd(int argc, const char **argv)
+{
+	char **__argv = malloc(sizeof(const char *) * argc);
+
+	if (!__argv)
+		die("malloc");
+	memcpy(__argv, argv, sizeof(const char *) * argc);
+	argc = parse_options(argc, (const char **)__argv, record_options,
+			     NULL, PARSE_OPT_STOP_AT_NON_OPTION);
+	free(__argv);
+
+	return argc != 0;
+}
+
 int cmd_trace(int argc, const char **argv, const char *prefix __used)
 {
+	char *rec_script_path = NULL;
+	char *rep_script_path = NULL;
 	struct perf_session *session;
-	const char *suffix = NULL;
+	char *script_path = NULL;
 	const char **__argv;
-	char *script_path;
-	int i, err;
+	bool system_wide;
+	int i, j, err;
 
-	if (argc >= 2 && strncmp(argv[1], "rec", strlen("rec")) == 0) {
-		if (argc < 3) {
-			fprintf(stderr,
-				"Please specify a record script\n");
-			return -1;
-		}
-		suffix = RECORD_SUFFIX;
+	setup_scripting();
+
+	argc = parse_options(argc, argv, options, trace_usage,
+			     PARSE_OPT_STOP_AT_NON_OPTION);
+
+	if (argc > 1 && !strncmp(argv[0], "rec", strlen("rec"))) {
+		rec_script_path = get_script_path(argv[1], RECORD_SUFFIX);
+		if (!rec_script_path)
+			return cmd_record(argc, argv, NULL);
 	}
 
-	if (argc >= 2 && strncmp(argv[1], "rep", strlen("rep")) == 0) {
-		if (argc < 3) {
+	if (argc > 1 && !strncmp(argv[0], "rep", strlen("rep"))) {
+		rep_script_path = get_script_path(argv[1], REPORT_SUFFIX);
+		if (!rep_script_path) {
 			fprintf(stderr,
-				"Please specify a report script\n");
+				"Please specify a valid report script"
+				"(see 'perf trace -l' for listing)\n");
 			return -1;
 		}
-		suffix = REPORT_SUFFIX;
 	}
 
 	/* make sure PERF_EXEC_PATH is set for scripts */
 	perf_set_argv_exec_path(perf_exec_path());
 
-	if (!suffix && argc >= 2 && strncmp(argv[1], "-", strlen("-")) != 0) {
-		char *record_script_path, *report_script_path;
+	if (argc && !script_name && !rec_script_path && !rep_script_path) {
 		int live_pipe[2];
+		int rep_args;
 		pid_t pid;
 
-		record_script_path = get_script_path(argv[1], RECORD_SUFFIX);
-		if (!record_script_path) {
-			fprintf(stderr, "record script not found\n");
-			return -1;
+		rec_script_path = get_script_path(argv[0], RECORD_SUFFIX);
+		rep_script_path = get_script_path(argv[0], REPORT_SUFFIX);
+
+		if (!rec_script_path && !rep_script_path) {
+			fprintf(stderr, " Couldn't find script %s\n\n See perf"
+				" trace -l for available scripts.\n", argv[0]);
+			usage_with_options(trace_usage, options);
 		}
 
-		report_script_path = get_script_path(argv[1], REPORT_SUFFIX);
-		if (!report_script_path) {
-			fprintf(stderr, "report script not found\n");
-			return -1;
+		if (is_top_script(argv[0])) {
+			rep_args = argc - 1;
+		} else {
+			int rec_args;
+
+			rep_args = has_required_arg(rep_script_path);
+			rec_args = (argc - 1) - rep_args;
+			if (rec_args < 0) {
+				fprintf(stderr, " %s script requires options."
+					"\n\n See perf trace -l for available "
+					"scripts and options.\n", argv[0]);
+				usage_with_options(trace_usage, options);
+			}
 		}
 
 		if (pipe(live_pipe) < 0) {
@@ -622,60 +689,84 @@
 		}
 
 		if (!pid) {
+			system_wide = true;
+			j = 0;
+
 			dup2(live_pipe[1], 1);
 			close(live_pipe[0]);
 
-			__argv = malloc(6 * sizeof(const char *));
-			__argv[0] = "/bin/sh";
-			__argv[1] = record_script_path;
-			__argv[2] = "-q";
-			__argv[3] = "-o";
-			__argv[4] = "-";
-			__argv[5] = NULL;
+			if (!is_top_script(argv[0]))
+				system_wide = !have_cmd(argc - rep_args,
+							&argv[rep_args]);
+
+			__argv = malloc((argc + 6) * sizeof(const char *));
+			if (!__argv)
+				die("malloc");
+
+			__argv[j++] = "/bin/sh";
+			__argv[j++] = rec_script_path;
+			if (system_wide)
+				__argv[j++] = "-a";
+			__argv[j++] = "-q";
+			__argv[j++] = "-o";
+			__argv[j++] = "-";
+			for (i = rep_args + 1; i < argc; i++)
+				__argv[j++] = argv[i];
+			__argv[j++] = NULL;
 
 			execvp("/bin/sh", (char **)__argv);
+			free(__argv);
 			exit(-1);
 		}
 
 		dup2(live_pipe[0], 0);
 		close(live_pipe[1]);
 
-		__argv = malloc((argc + 3) * sizeof(const char *));
-		__argv[0] = "/bin/sh";
-		__argv[1] = report_script_path;
+		__argv = malloc((argc + 4) * sizeof(const char *));
+		if (!__argv)
+			die("malloc");
+		j = 0;
+		__argv[j++] = "/bin/sh";
+		__argv[j++] = rep_script_path;
+		for (i = 1; i < rep_args + 1; i++)
+			__argv[j++] = argv[i];
+		__argv[j++] = "-i";
+		__argv[j++] = "-";
+		__argv[j++] = NULL;
+
+		execvp("/bin/sh", (char **)__argv);
+		free(__argv);
+		exit(-1);
+	}
+
+	if (rec_script_path)
+		script_path = rec_script_path;
+	if (rep_script_path)
+		script_path = rep_script_path;
+
+	if (script_path) {
+		system_wide = false;
+		j = 0;
+
+		if (rec_script_path)
+			system_wide = !have_cmd(argc - 1, &argv[1]);
+
+		__argv = malloc((argc + 2) * sizeof(const char *));
+		if (!__argv)
+			die("malloc");
+		__argv[j++] = "/bin/sh";
+		__argv[j++] = script_path;
+		if (system_wide)
+			__argv[j++] = "-a";
 		for (i = 2; i < argc; i++)
-			__argv[i] = argv[i];
-		__argv[i++] = "-i";
-		__argv[i++] = "-";
-		__argv[i++] = NULL;
+			__argv[j++] = argv[i];
+		__argv[j++] = NULL;
 
 		execvp("/bin/sh", (char **)__argv);
+		free(__argv);
 		exit(-1);
 	}
 
-	if (suffix) {
-		script_path = get_script_path(argv[2], suffix);
-		if (!script_path) {
-			fprintf(stderr, "script not found\n");
-			return -1;
-		}
-
-		__argv = malloc((argc + 1) * sizeof(const char *));
-		__argv[0] = "/bin/sh";
-		__argv[1] = script_path;
-		for (i = 3; i < argc; i++)
-			__argv[i - 1] = argv[i];
-		__argv[argc - 1] = NULL;
-
-		execvp("/bin/sh", (char **)__argv);
-		exit(-1);
-	}
-
-	setup_scripting();
-
-	argc = parse_options(argc, argv, options, trace_usage,
-			     PARSE_OPT_STOP_AT_NON_OPTION);
-
 	if (symbol__init() < 0)
 		return -1;
 	if (!script_name)
diff --git a/tools/perf/scripts/perl/bin/failed-syscalls-record b/tools/perf/scripts/perl/bin/failed-syscalls-record
index eb5846b..8104895 100644
--- a/tools/perf/scripts/perl/bin/failed-syscalls-record
+++ b/tools/perf/scripts/perl/bin/failed-syscalls-record
@@ -1,2 +1,2 @@
 #!/bin/bash
-perf record -a -e raw_syscalls:sys_exit $@
+perf record -e raw_syscalls:sys_exit $@
diff --git a/tools/perf/scripts/perl/bin/rw-by-file-record b/tools/perf/scripts/perl/bin/rw-by-file-record
index 5bfaae5..33efc86 100644
--- a/tools/perf/scripts/perl/bin/rw-by-file-record
+++ b/tools/perf/scripts/perl/bin/rw-by-file-record
@@ -1,3 +1,3 @@
 #!/bin/bash
-perf record -a -e syscalls:sys_enter_read -e syscalls:sys_enter_write $@
+perf record -e syscalls:sys_enter_read -e syscalls:sys_enter_write $@
 
diff --git a/tools/perf/scripts/perl/bin/rw-by-pid-record b/tools/perf/scripts/perl/bin/rw-by-pid-record
index 6e0b2f7..7cb9db2 100644
--- a/tools/perf/scripts/perl/bin/rw-by-pid-record
+++ b/tools/perf/scripts/perl/bin/rw-by-pid-record
@@ -1,2 +1,2 @@
 #!/bin/bash
-perf record -a -e syscalls:sys_enter_read -e syscalls:sys_exit_read -e syscalls:sys_enter_write -e syscalls:sys_exit_write $@
+perf record -e syscalls:sys_enter_read -e syscalls:sys_exit_read -e syscalls:sys_enter_write -e syscalls:sys_exit_write $@
diff --git a/tools/perf/scripts/perl/bin/rwtop-record b/tools/perf/scripts/perl/bin/rwtop-record
index 6e0b2f7..7cb9db2 100644
--- a/tools/perf/scripts/perl/bin/rwtop-record
+++ b/tools/perf/scripts/perl/bin/rwtop-record
@@ -1,2 +1,2 @@
 #!/bin/bash
-perf record -a -e syscalls:sys_enter_read -e syscalls:sys_exit_read -e syscalls:sys_enter_write -e syscalls:sys_exit_write $@
+perf record -e syscalls:sys_enter_read -e syscalls:sys_exit_read -e syscalls:sys_enter_write -e syscalls:sys_exit_write $@
diff --git a/tools/perf/scripts/perl/bin/wakeup-latency-record b/tools/perf/scripts/perl/bin/wakeup-latency-record
index 9f2acaa..464251a 100644
--- a/tools/perf/scripts/perl/bin/wakeup-latency-record
+++ b/tools/perf/scripts/perl/bin/wakeup-latency-record
@@ -1,5 +1,5 @@
 #!/bin/bash
-perf record -a -e sched:sched_switch -e sched:sched_wakeup $@
+perf record -e sched:sched_switch -e sched:sched_wakeup $@
 
 
 
diff --git a/tools/perf/scripts/perl/bin/workqueue-stats-record b/tools/perf/scripts/perl/bin/workqueue-stats-record
index 85301f2..8edda90 100644
--- a/tools/perf/scripts/perl/bin/workqueue-stats-record
+++ b/tools/perf/scripts/perl/bin/workqueue-stats-record
@@ -1,2 +1,2 @@
 #!/bin/bash
-perf record -a -e workqueue:workqueue_creation -e workqueue:workqueue_destruction -e workqueue:workqueue_execution -e workqueue:workqueue_insertion $@
+perf record -e workqueue:workqueue_creation -e workqueue:workqueue_destruction -e workqueue:workqueue_execution -e workqueue:workqueue_insertion $@
diff --git a/tools/perf/scripts/python/bin/failed-syscalls-by-pid-record b/tools/perf/scripts/python/bin/failed-syscalls-by-pid-record
index eb5846b..8104895 100644
--- a/tools/perf/scripts/python/bin/failed-syscalls-by-pid-record
+++ b/tools/perf/scripts/python/bin/failed-syscalls-by-pid-record
@@ -1,2 +1,2 @@
 #!/bin/bash
-perf record -a -e raw_syscalls:sys_exit $@
+perf record -e raw_syscalls:sys_exit $@
diff --git a/tools/perf/scripts/python/bin/futex-contention-record b/tools/perf/scripts/python/bin/futex-contention-record
index 5ecbb43..b1495c9 100644
--- a/tools/perf/scripts/python/bin/futex-contention-record
+++ b/tools/perf/scripts/python/bin/futex-contention-record
@@ -1,2 +1,2 @@
 #!/bin/bash
-perf record -a -e syscalls:sys_enter_futex -e syscalls:sys_exit_futex $@
+perf record -e syscalls:sys_enter_futex -e syscalls:sys_exit_futex $@
diff --git a/tools/perf/scripts/python/bin/netdev-times-record b/tools/perf/scripts/python/bin/netdev-times-record
index d931a82..558754b 100644
--- a/tools/perf/scripts/python/bin/netdev-times-record
+++ b/tools/perf/scripts/python/bin/netdev-times-record
@@ -1,5 +1,5 @@
 #!/bin/bash
-perf record -a -e net:net_dev_xmit -e net:net_dev_queue		\
+perf record -e net:net_dev_xmit -e net:net_dev_queue		\
 		-e net:netif_receive_skb -e net:netif_rx		\
 		-e skb:consume_skb -e skb:kfree_skb			\
 		-e skb:skb_copy_datagram_iovec -e napi:napi_poll	\
diff --git a/tools/perf/scripts/python/bin/sched-migration-record b/tools/perf/scripts/python/bin/sched-migration-record
index 17a3e9b..7493fdd 100644
--- a/tools/perf/scripts/python/bin/sched-migration-record
+++ b/tools/perf/scripts/python/bin/sched-migration-record
@@ -1,2 +1,2 @@
 #!/bin/bash
-perf record -m 16384 -a -e sched:sched_wakeup -e sched:sched_wakeup_new -e sched:sched_switch -e sched:sched_migrate_task $@
+perf record -m 16384 -e sched:sched_wakeup -e sched:sched_wakeup_new -e sched:sched_switch -e sched:sched_migrate_task $@
diff --git a/tools/perf/scripts/python/bin/sctop-record b/tools/perf/scripts/python/bin/sctop-record
index 1fc5998..4efbfaa 100644
--- a/tools/perf/scripts/python/bin/sctop-record
+++ b/tools/perf/scripts/python/bin/sctop-record
@@ -1,2 +1,2 @@
 #!/bin/bash
-perf record -a -e raw_syscalls:sys_enter $@
+perf record -e raw_syscalls:sys_enter $@
diff --git a/tools/perf/scripts/python/bin/syscall-counts-by-pid-record b/tools/perf/scripts/python/bin/syscall-counts-by-pid-record
index 1fc5998..4efbfaa 100644
--- a/tools/perf/scripts/python/bin/syscall-counts-by-pid-record
+++ b/tools/perf/scripts/python/bin/syscall-counts-by-pid-record
@@ -1,2 +1,2 @@
 #!/bin/bash
-perf record -a -e raw_syscalls:sys_enter $@
+perf record -e raw_syscalls:sys_enter $@
diff --git a/tools/perf/scripts/python/bin/syscall-counts-record b/tools/perf/scripts/python/bin/syscall-counts-record
index 1fc5998..4efbfaa 100644
--- a/tools/perf/scripts/python/bin/syscall-counts-record
+++ b/tools/perf/scripts/python/bin/syscall-counts-record
@@ -1,2 +1,2 @@
 #!/bin/bash
-perf record -a -e raw_syscalls:sys_enter $@
+perf record -e raw_syscalls:sys_enter $@
diff --git a/tools/perf/util/ui/util.c b/tools/perf/util/ui/util.c
index 9706d9d..056c695 100644
--- a/tools/perf/util/ui/util.c
+++ b/tools/perf/util/ui/util.c
@@ -104,9 +104,10 @@
 	return rc;
 }
 
+static const char yes[] = "Yes", no[] = "No";
+
 bool ui__dialog_yesno(const char *msg)
 {
 	/* newtWinChoice should really be accepting const char pointers... */
-	char yes[] = "Yes", no[] = "No";
-	return newtWinChoice(NULL, yes, no, (char *)msg) == 1;
+	return newtWinChoice(NULL, (char *)yes, (char *)no, (char *)msg) == 1;
 }