Merge branch 'akpm'

* akpm: (182 commits)
  fbdev: bf54x-lq043fb: use kzalloc over kmalloc/memset
  fbdev: *bfin*: fix __dev{init,exit} markings
  fbdev: *bfin*: drop unnecessary calls to memset
  fbdev: bfin-t350mcqb-fb: drop unused local variables
  fbdev: blackfin has __raw I/O accessors, so use them in fb.h
  fbdev: s1d13xxxfb: add accelerated bitblt functions
  tcx: use standard fields for framebuffer physical address and length
  fbdev: add support for handoff from firmware to hw framebuffers
  intelfb: fix a bug when changing video timing
  fbdev: use framebuffer_release() for freeing fb_info structures
  radeon: P2G2CLK_ALWAYS_ONb tested twice, should 2nd be P2G2CLK_DAC_ALWAYS_ONb?
  s3c-fb: CPUFREQ frequency scaling support
  s3c-fb: fix resource releasing on error during probing
  carminefb: fix possible access beyond end of carmine_modedb[]
  acornfb: remove fb_mmap function
  mb862xxfb: use CONFIG_OF instead of CONFIG_PPC_OF
  mb862xxfb: restrict compliation of platform driver to PPC
  Samsung SoC Framebuffer driver: add Alpha Channel support
  atmel-lcdc: fix pixclock upper bound detection
  offb: use framebuffer_alloc() to allocate fb_info struct
  ...

Manually fix up conflicts due to kmemcheck in mm/slab.c
diff --git a/Documentation/DocBook/debugobjects.tmpl b/Documentation/DocBook/debugobjects.tmpl
index 7f5f218..08ff908 100644
--- a/Documentation/DocBook/debugobjects.tmpl
+++ b/Documentation/DocBook/debugobjects.tmpl
@@ -106,7 +106,7 @@
       number of errors are printk'ed including a full stack trace.
     </para>
     <para>
-      The statistics are available via debugfs/debug_objects/stats.
+      The statistics are available via /sys/kernel/debug/debug_objects/stats.
       They provide information about the number of warnings and the
       number of successful fixups along with information about the
       usage of the internal tracking objects and the state of the
diff --git a/Documentation/cdrom/packet-writing.txt b/Documentation/cdrom/packet-writing.txt
index cf1f812..1c40777 100644
--- a/Documentation/cdrom/packet-writing.txt
+++ b/Documentation/cdrom/packet-writing.txt
@@ -117,7 +117,7 @@
 
 To read pktcdvd device infos in human readable form, do:
 
-	# cat /debug/pktcdvd/pktcdvd[0-7]/info
+	# cat /sys/kernel/debug/pktcdvd/pktcdvd[0-7]/info
 
 For a description of the debugfs interface look into the file:
 
diff --git a/Documentation/driver-model/device.txt b/Documentation/driver-model/device.txt
index a7cbfff..a124f31 100644
--- a/Documentation/driver-model/device.txt
+++ b/Documentation/driver-model/device.txt
@@ -162,3 +162,35 @@
 
 The file name will be 'power' with a mode of 0644 (-rw-r--r--).
 
+Word of warning:  While the kernel allows device_create_file() and
+device_remove_file() to be called on a device at any time, userspace has
+strict expectations on when attributes get created.  When a new device is
+registered in the kernel, a uevent is generated to notify userspace (like
+udev) that a new device is available.  If attributes are added after the
+device is registered, then userspace won't get notified and userspace will
+not know about the new attributes.
+
+This is important for device driver that need to publish additional
+attributes for a device at driver probe time.  If the device driver simply
+calls device_create_file() on the device structure passed to it, then
+userspace will never be notified of the new attributes.  Instead, it should
+probably use class_create() and class->dev_attrs to set up a list of
+desired attributes in the modules_init function, and then in the .probe()
+hook, and then use device_create() to create a new device as a child
+of the probed device.  The new device will generate a new uevent and
+properly advertise the new attributes to userspace.
+
+For example, if a driver wanted to add the following attributes:
+struct device_attribute mydriver_attribs[] = {
+	__ATTR(port_count, 0444, port_count_show),
+	__ATTR(serial_number, 0444, serial_number_show),
+	NULL
+};
+
+Then in the module init function is would do:
+	mydriver_class = class_create(THIS_MODULE, "my_attrs");
+	mydriver_class.dev_attr = mydriver_attribs;
+
+And assuming 'dev' is the struct device passed into the probe hook, the driver
+probe function would do something like:
+	create_device(&mydriver_class, dev, chrdev, &private_data, "my_name");
diff --git a/Documentation/fault-injection/fault-injection.txt b/Documentation/fault-injection/fault-injection.txt
index 4bc374a..0793056 100644
--- a/Documentation/fault-injection/fault-injection.txt
+++ b/Documentation/fault-injection/fault-injection.txt
@@ -29,16 +29,16 @@
 fault-inject-debugfs kernel module provides some debugfs entries for runtime
 configuration of fault-injection capabilities.
 
-- /debug/fail*/probability:
+- /sys/kernel/debug/fail*/probability:
 
 	likelihood of failure injection, in percent.
 	Format: <percent>
 
 	Note that one-failure-per-hundred is a very high error rate
 	for some testcases.  Consider setting probability=100 and configure
-	/debug/fail*/interval for such testcases.
+	/sys/kernel/debug/fail*/interval for such testcases.
 
-- /debug/fail*/interval:
+- /sys/kernel/debug/fail*/interval:
 
 	specifies the interval between failures, for calls to
 	should_fail() that pass all the other tests.
@@ -46,18 +46,18 @@
 	Note that if you enable this, by setting interval>1, you will
 	probably want to set probability=100.
 
-- /debug/fail*/times:
+- /sys/kernel/debug/fail*/times:
 
 	specifies how many times failures may happen at most.
 	A value of -1 means "no limit".
 
-- /debug/fail*/space:
+- /sys/kernel/debug/fail*/space:
 
 	specifies an initial resource "budget", decremented by "size"
 	on each call to should_fail(,size).  Failure injection is
 	suppressed until "space" reaches zero.
 
-- /debug/fail*/verbose
+- /sys/kernel/debug/fail*/verbose
 
 	Format: { 0 | 1 | 2 }
 	specifies the verbosity of the messages when failure is
@@ -65,17 +65,17 @@
 	log line per failure; '2' will print a call trace too -- useful
 	to debug the problems revealed by fault injection.
 
-- /debug/fail*/task-filter:
+- /sys/kernel/debug/fail*/task-filter:
 
 	Format: { 'Y' | 'N' }
 	A value of 'N' disables filtering by process (default).
 	Any positive value limits failures to only processes indicated by
 	/proc/<pid>/make-it-fail==1.
 
-- /debug/fail*/require-start:
-- /debug/fail*/require-end:
-- /debug/fail*/reject-start:
-- /debug/fail*/reject-end:
+- /sys/kernel/debug/fail*/require-start:
+- /sys/kernel/debug/fail*/require-end:
+- /sys/kernel/debug/fail*/reject-start:
+- /sys/kernel/debug/fail*/reject-end:
 
 	specifies the range of virtual addresses tested during
 	stacktrace walking.  Failure is injected only if some caller
@@ -84,26 +84,26 @@
 	Default required range is [0,ULONG_MAX) (whole of virtual address space).
 	Default rejected range is [0,0).
 
-- /debug/fail*/stacktrace-depth:
+- /sys/kernel/debug/fail*/stacktrace-depth:
 
 	specifies the maximum stacktrace depth walked during search
 	for a caller within [require-start,require-end) OR
 	[reject-start,reject-end).
 
-- /debug/fail_page_alloc/ignore-gfp-highmem:
+- /sys/kernel/debug/fail_page_alloc/ignore-gfp-highmem:
 
 	Format: { 'Y' | 'N' }
 	default is 'N', setting it to 'Y' won't inject failures into
 	highmem/user allocations.
 
-- /debug/failslab/ignore-gfp-wait:
-- /debug/fail_page_alloc/ignore-gfp-wait:
+- /sys/kernel/debug/failslab/ignore-gfp-wait:
+- /sys/kernel/debug/fail_page_alloc/ignore-gfp-wait:
 
 	Format: { 'Y' | 'N' }
 	default is 'N', setting it to 'Y' will inject failures
 	only into non-sleep allocations (GFP_ATOMIC allocations).
 
-- /debug/fail_page_alloc/min-order:
+- /sys/kernel/debug/fail_page_alloc/min-order:
 
 	specifies the minimum page allocation order to be injected
 	failures.
@@ -166,13 +166,13 @@
 #!/bin/bash
 
 FAILTYPE=failslab
-echo Y > /debug/$FAILTYPE/task-filter
-echo 10 > /debug/$FAILTYPE/probability
-echo 100 > /debug/$FAILTYPE/interval
-echo -1 > /debug/$FAILTYPE/times
-echo 0 > /debug/$FAILTYPE/space
-echo 2 > /debug/$FAILTYPE/verbose
-echo 1 > /debug/$FAILTYPE/ignore-gfp-wait
+echo Y > /sys/kernel/debug/$FAILTYPE/task-filter
+echo 10 > /sys/kernel/debug/$FAILTYPE/probability
+echo 100 > /sys/kernel/debug/$FAILTYPE/interval
+echo -1 > /sys/kernel/debug/$FAILTYPE/times
+echo 0 > /sys/kernel/debug/$FAILTYPE/space
+echo 2 > /sys/kernel/debug/$FAILTYPE/verbose
+echo 1 > /sys/kernel/debug/$FAILTYPE/ignore-gfp-wait
 
 faulty_system()
 {
@@ -217,20 +217,20 @@
 	exit 1
 fi
 
-cat /sys/module/$module/sections/.text > /debug/$FAILTYPE/require-start
-cat /sys/module/$module/sections/.data > /debug/$FAILTYPE/require-end
+cat /sys/module/$module/sections/.text > /sys/kernel/debug/$FAILTYPE/require-start
+cat /sys/module/$module/sections/.data > /sys/kernel/debug/$FAILTYPE/require-end
 
-echo N > /debug/$FAILTYPE/task-filter
-echo 10 > /debug/$FAILTYPE/probability
-echo 100 > /debug/$FAILTYPE/interval
-echo -1 > /debug/$FAILTYPE/times
-echo 0 > /debug/$FAILTYPE/space
-echo 2 > /debug/$FAILTYPE/verbose
-echo 1 > /debug/$FAILTYPE/ignore-gfp-wait
-echo 1 > /debug/$FAILTYPE/ignore-gfp-highmem
-echo 10 > /debug/$FAILTYPE/stacktrace-depth
+echo N > /sys/kernel/debug/$FAILTYPE/task-filter
+echo 10 > /sys/kernel/debug/$FAILTYPE/probability
+echo 100 > /sys/kernel/debug/$FAILTYPE/interval
+echo -1 > /sys/kernel/debug/$FAILTYPE/times
+echo 0 > /sys/kernel/debug/$FAILTYPE/space
+echo 2 > /sys/kernel/debug/$FAILTYPE/verbose
+echo 1 > /sys/kernel/debug/$FAILTYPE/ignore-gfp-wait
+echo 1 > /sys/kernel/debug/$FAILTYPE/ignore-gfp-highmem
+echo 10 > /sys/kernel/debug/$FAILTYPE/stacktrace-depth
 
-trap "echo 0 > /debug/$FAILTYPE/probability" SIGINT SIGTERM EXIT
+trap "echo 0 > /sys/kernel/debug/$FAILTYPE/probability" SIGINT SIGTERM EXIT
 
 echo "Injecting errors into the module $module... (interrupt to stop)"
 sleep 1000000
diff --git a/Documentation/filesystems/vfat.txt b/Documentation/filesystems/vfat.txt
index 5147be5..b58b84b 100644
--- a/Documentation/filesystems/vfat.txt
+++ b/Documentation/filesystems/vfat.txt
@@ -132,6 +132,11 @@
 		 If you want to use ATTR_RO as read-only flag even for
 		 the directory, set this option.
 
+errors=panic|continue|remount-ro
+	      -- specify FAT behavior on critical errors: panic, continue
+		 without doing anything or remount the partition in
+		 read-only mode (default behavior).
+
 <bool>: 0,1,yes,no,true,false
 
 TODO
diff --git a/Documentation/firmware_class/README b/Documentation/firmware_class/README
index c3480aa..7eceaff 100644
--- a/Documentation/firmware_class/README
+++ b/Documentation/firmware_class/README
@@ -77,7 +77,8 @@
    seconds for the whole load operation.
 
  - request_firmware_nowait() is also provided for convenience in
-   non-user contexts.
+   user contexts to request firmware asynchronously, but can't be called
+   in atomic contexts.
 
 
  about in-kernel persistence:
diff --git a/Documentation/hwmon/f71882fg b/Documentation/hwmon/f71882fg
index a832126..bee4c30 100644
--- a/Documentation/hwmon/f71882fg
+++ b/Documentation/hwmon/f71882fg
@@ -2,14 +2,18 @@
 ======================
 
 Supported chips:
-  * Fintek F71882FG and F71883FG
-    Prefix: 'f71882fg'
+  * Fintek F71858FG
+    Prefix: 'f71858fg'
     Addresses scanned: none, address read from Super I/O config space
     Datasheet: Available from the Fintek website
   * Fintek F71862FG and F71863FG
     Prefix: 'f71862fg'
     Addresses scanned: none, address read from Super I/O config space
     Datasheet: Available from the Fintek website
+  * Fintek F71882FG and F71883FG
+    Prefix: 'f71882fg'
+    Addresses scanned: none, address read from Super I/O config space
+    Datasheet: Available from the Fintek website
   * Fintek F8000
     Prefix: 'f8000'
     Addresses scanned: none, address read from Super I/O config space
@@ -66,13 +70,13 @@
 
 Three different fan control modes are supported; the mode number is written
 to the pwm#_enable file. Note that not all modes are supported on all
-chips, and some modes may only be available in RPM / PWM mode on the F8000.
+chips, and some modes may only be available in RPM / PWM mode.
 Writing an unsupported mode will result in an invalid parameter error.
 
 * 1: Manual mode
   You ask for a specific PWM duty cycle / DC voltage or a specific % of
   fan#_full_speed by writing to the pwm# file. This mode is only
-  available on the F8000 if the fan channel is in RPM mode.
+  available on the F71858FG / F8000 if the fan channel is in RPM mode.
 
 * 2: Normal auto mode
   You can define a number of temperature/fan speed trip points, which % the
diff --git a/Documentation/hwmon/ibmaem b/Documentation/hwmon/ibmaem
index e98bdfe..1e0d59e 100644
--- a/Documentation/hwmon/ibmaem
+++ b/Documentation/hwmon/ibmaem
@@ -7,7 +7,7 @@
 Supported systems:
   * Any recent IBM System X server with AEM support.
     This includes the x3350, x3550, x3650, x3655, x3755, x3850 M2,
-    x3950 M2, and certain HS2x/LS2x/QS2x blades.  The IPMI host interface
+    x3950 M2, and certain HC10/HS2x/LS2x/QS2x blades.  The IPMI host interface
     driver ("ipmi-si") needs to be loaded for this driver to do anything.
     Prefix: 'ibmaem'
     Datasheet: Not available
diff --git a/Documentation/hwmon/sysfs-interface b/Documentation/hwmon/sysfs-interface
index 004ee16..dcbd502 100644
--- a/Documentation/hwmon/sysfs-interface
+++ b/Documentation/hwmon/sysfs-interface
@@ -70,6 +70,7 @@
 [0-*]	denotes any positive number starting from 0
 [1-*]	denotes any positive number starting from 1
 RO	read only value
+WO	write only value
 RW	read/write value
 
 Read/write values may be read-only for some chips, depending on the
@@ -295,6 +296,24 @@
 		user-space.
 		RO
 
+temp[1-*]_lowest
+		Historical minimum temperature
+		Unit: millidegree Celsius
+		RO
+
+temp[1-*]_highest
+		Historical maximum temperature
+		Unit: millidegree Celsius
+		RO
+
+temp[1-*]_reset_history
+		Reset temp_lowest and temp_highest
+		WO
+
+temp_reset_history
+		Reset temp_lowest and temp_highest for all sensors
+		WO
+
 Some chips measure temperature using external thermistors and an ADC, and
 report the temperature measurement as a voltage. Converting this voltage
 back to a temperature (or the other way around for limits) requires
diff --git a/Documentation/hwmon/tmp401 b/Documentation/hwmon/tmp401
new file mode 100644
index 0000000..9fc4472
--- /dev/null
+++ b/Documentation/hwmon/tmp401
@@ -0,0 +1,42 @@
+Kernel driver tmp401
+====================
+
+Supported chips:
+  * Texas Instruments TMP401
+    Prefix: 'tmp401'
+    Addresses scanned: I2C 0x4c
+    Datasheet: http://focus.ti.com/docs/prod/folders/print/tmp401.html
+  * Texas Instruments TMP411
+    Prefix: 'tmp411'
+    Addresses scanned: I2C 0x4c
+    Datasheet: http://focus.ti.com/docs/prod/folders/print/tmp411.html
+
+Authors:
+         Hans de Goede <hdegoede@redhat.com>
+	 Andre Prendel <andre.prendel@gmx.de>
+
+Description
+-----------
+
+This driver implements support for Texas Instruments TMP401 and
+TMP411 chips. These chips implements one remote and one local
+temperature sensor. Temperature is measured in degrees
+Celsius. Resolution of the remote sensor is 0.0625 degree. Local
+sensor resolution can be set to 0.5, 0.25, 0.125 or 0.0625 degree (not
+supported by the driver so far, so using the default resolution of 0.5
+degree).
+
+The driver provides the common sysfs-interface for temperatures (see
+/Documentation/hwmon/sysfs-interface under Temperatures).
+
+The TMP411 chip is compatible with TMP401. It provides some additional
+features.
+
+* Minimum and Maximum temperature measured since power-on, chip-reset
+
+  Exported via sysfs attributes tempX_lowest and tempX_highest.
+
+* Reset of historical minimum/maximum temperature measurements
+
+  Exported via sysfs attribute temp_reset_history. Writing 1 to this
+  file triggers a reset.
diff --git a/Documentation/hwmon/w83627ehf b/Documentation/hwmon/w83627ehf
index b6eb593..02b7489 100644
--- a/Documentation/hwmon/w83627ehf
+++ b/Documentation/hwmon/w83627ehf
@@ -12,6 +12,10 @@
     Addresses scanned: ISA address retrieved from Super I/O registers
     Datasheet:
         http://www.nuvoton.com.tw/NR/rdonlyres/7885623D-A487-4CF9-A47F-30C5F73D6FE6/0/W83627DHG.pdf
+  * Winbond W83627DHG-P
+    Prefix: 'w83627dhg'
+    Addresses scanned: ISA address retrieved from Super I/O registers
+    Datasheet: not available
   * Winbond W83667HG
     Prefix: 'w83667hg'
     Addresses scanned: ISA address retrieved from Super I/O registers
@@ -28,8 +32,8 @@
 -----------
 
 This driver implements support for the Winbond W83627EHF, W83627EHG,
-W83627DHG and W83667HG super I/O chips. We will refer to them collectively
-as Winbond chips.
+W83627DHG, W83627DHG-P and W83667HG super I/O chips. We will refer to them
+collectively as Winbond chips.
 
 The chips implement three temperature sensors, five fan rotation
 speed sensors, ten analog voltage sensors (only nine for the 627DHG), one
@@ -135,3 +139,6 @@
 The DHG also supports PECI, where the DHG queries Intel CPU temperatures, and
 the ICH8 southbridge gets that data via PECI from the DHG, so that the
 southbridge drives the fans. And the DHG supports SST, a one-wire serial bus.
+
+The DHG-P has an additional automatic fan speed control mode named Smart Fan
+(TM) III+. This mode is not yet supported by the driver.
diff --git a/Documentation/i2c/busses/i2c-viapro b/Documentation/i2c/busses/i2c-viapro
index 22efedf..2e758b0 100644
--- a/Documentation/i2c/busses/i2c-viapro
+++ b/Documentation/i2c/busses/i2c-viapro
@@ -19,6 +19,9 @@
   * VIA Technologies, Inc. VX800/VX820
     Datasheet: available on http://linux.via.com.tw
 
+  * VIA Technologies, Inc. VX855/VX875
+    Datasheet: Availability unknown
+
 Authors:
 	Kyösti Mälkki <kmalkki@cc.hut.fi>,
 	Mark D. Studebaker <mdsxyz123@yahoo.com>,
@@ -53,6 +56,7 @@
  device 1106:3287   (VT8251)
  device 1106:8324   (CX700)
  device 1106:8353   (VX800/VX820)
+ device 1106:8409   (VX855/VX875)
 
 If none of these show up, you should look in the BIOS for settings like
 enable ACPI / SMBus or even USB.
diff --git a/Documentation/kmemcheck.txt b/Documentation/kmemcheck.txt
new file mode 100644
index 0000000..36304460
--- /dev/null
+++ b/Documentation/kmemcheck.txt
@@ -0,0 +1,773 @@
+GETTING STARTED WITH KMEMCHECK
+==============================
+
+Vegard Nossum <vegardno@ifi.uio.no>
+
+
+Contents
+========
+0. Introduction
+1. Downloading
+2. Configuring and compiling
+3. How to use
+3.1. Booting
+3.2. Run-time enable/disable
+3.3. Debugging
+3.4. Annotating false positives
+4. Reporting errors
+5. Technical description
+
+
+0. Introduction
+===============
+
+kmemcheck is a debugging feature for the Linux Kernel. More specifically, it
+is a dynamic checker that detects and warns about some uses of uninitialized
+memory.
+
+Userspace programmers might be familiar with Valgrind's memcheck. The main
+difference between memcheck and kmemcheck is that memcheck works for userspace
+programs only, and kmemcheck works for the kernel only. The implementations
+are of course vastly different. Because of this, kmemcheck is not as accurate
+as memcheck, but it turns out to be good enough in practice to discover real
+programmer errors that the compiler is not able to find through static
+analysis.
+
+Enabling kmemcheck on a kernel will probably slow it down to the extent that
+the machine will not be usable for normal workloads such as e.g. an
+interactive desktop. kmemcheck will also cause the kernel to use about twice
+as much memory as normal. For this reason, kmemcheck is strictly a debugging
+feature.
+
+
+1. Downloading
+==============
+
+kmemcheck can only be downloaded using git. If you want to write patches
+against the current code, you should use the kmemcheck development branch of
+the tip tree. It is also possible to use the linux-next tree, which also
+includes the latest version of kmemcheck.
+
+Assuming that you've already cloned the linux-2.6.git repository, all you
+have to do is add the -tip tree as a remote, like this:
+
+	$ git remote add tip git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip.git
+
+To actually download the tree, fetch the remote:
+
+	$ git fetch tip
+
+And to check out a new local branch with the kmemcheck code:
+
+	$ git checkout -b kmemcheck tip/kmemcheck
+
+General instructions for the -tip tree can be found here:
+http://people.redhat.com/mingo/tip.git/readme.txt
+
+
+2. Configuring and compiling
+============================
+
+kmemcheck only works for the x86 (both 32- and 64-bit) platform. A number of
+configuration variables must have specific settings in order for the kmemcheck
+menu to even appear in "menuconfig". These are:
+
+  o CONFIG_CC_OPTIMIZE_FOR_SIZE=n
+
+	This option is located under "General setup" / "Optimize for size".
+
+	Without this, gcc will use certain optimizations that usually lead to
+	false positive warnings from kmemcheck. An example of this is a 16-bit
+	field in a struct, where gcc may load 32 bits, then discard the upper
+	16 bits. kmemcheck sees only the 32-bit load, and may trigger a
+	warning for the upper 16 bits (if they're uninitialized).
+
+  o CONFIG_SLAB=y or CONFIG_SLUB=y
+
+	This option is located under "General setup" / "Choose SLAB
+	allocator".
+
+  o CONFIG_FUNCTION_TRACER=n
+
+	This option is located under "Kernel hacking" / "Tracers" / "Kernel
+	Function Tracer"
+
+	When function tracing is compiled in, gcc emits a call to another
+	function at the beginning of every function. This means that when the
+	page fault handler is called, the ftrace framework will be called
+	before kmemcheck has had a chance to handle the fault. If ftrace then
+	modifies memory that was tracked by kmemcheck, the result is an
+	endless recursive page fault.
+
+  o CONFIG_DEBUG_PAGEALLOC=n
+
+	This option is located under "Kernel hacking" / "Debug page memory
+	allocations".
+
+In addition, I highly recommend turning on CONFIG_DEBUG_INFO=y. This is also
+located under "Kernel hacking". With this, you will be able to get line number
+information from the kmemcheck warnings, which is extremely valuable in
+debugging a problem. This option is not mandatory, however, because it slows
+down the compilation process and produces a much bigger kernel image.
+
+Now the kmemcheck menu should be visible (under "Kernel hacking" / "kmemcheck:
+trap use of uninitialized memory"). Here follows a description of the
+kmemcheck configuration variables:
+
+  o CONFIG_KMEMCHECK
+
+	This must be enabled in order to use kmemcheck at all...
+
+  o CONFIG_KMEMCHECK_[DISABLED | ENABLED | ONESHOT]_BY_DEFAULT
+
+	This option controls the status of kmemcheck at boot-time. "Enabled"
+	will enable kmemcheck right from the start, "disabled" will boot the
+	kernel as normal (but with the kmemcheck code compiled in, so it can
+	be enabled at run-time after the kernel has booted), and "one-shot" is
+	a special mode which will turn kmemcheck off automatically after
+	detecting the first use of uninitialized memory.
+
+	If you are using kmemcheck to actively debug a problem, then you
+	probably want to choose "enabled" here.
+
+	The one-shot mode is mostly useful in automated test setups because it
+	can prevent floods of warnings and increase the chances of the machine
+	surviving in case something is really wrong. In other cases, the one-
+	shot mode could actually be counter-productive because it would turn
+	itself off at the very first error -- in the case of a false positive
+	too -- and this would come in the way of debugging the specific
+	problem you were interested in.
+
+	If you would like to use your kernel as normal, but with a chance to
+	enable kmemcheck in case of some problem, it might be a good idea to
+	choose "disabled" here. When kmemcheck is disabled, most of the run-
+	time overhead is not incurred, and the kernel will be almost as fast
+	as normal.
+
+  o CONFIG_KMEMCHECK_QUEUE_SIZE
+
+	Select the maximum number of error reports to store in an internal
+	(fixed-size) buffer. Since errors can occur virtually anywhere and in
+	any context, we need a temporary storage area which is guaranteed not
+	to generate any other page faults when accessed. The queue will be
+	emptied as soon as a tasklet may be scheduled. If the queue is full,
+	new error reports will be lost.
+
+	The default value of 64 is probably fine. If some code produces more
+	than 64 errors within an irqs-off section, then the code is likely to
+	produce many, many more, too, and these additional reports seldom give
+	any more information (the first report is usually the most valuable
+	anyway).
+
+	This number might have to be adjusted if you are not using serial
+	console or similar to capture the kernel log. If you are using the
+	"dmesg" command to save the log, then getting a lot of kmemcheck
+	warnings might overflow the kernel log itself, and the earlier reports
+	will get lost in that way instead. Try setting this to 10 or so on
+	such a setup.
+
+  o CONFIG_KMEMCHECK_SHADOW_COPY_SHIFT
+
+	Select the number of shadow bytes to save along with each entry of the
+	error-report queue. These bytes indicate what parts of an allocation
+	are initialized, uninitialized, etc. and will be displayed when an
+	error is detected to help the debugging of a particular problem.
+
+	The number entered here is actually the logarithm of the number of
+	bytes that will be saved. So if you pick for example 5 here, kmemcheck
+	will save 2^5 = 32 bytes.
+
+	The default value should be fine for debugging most problems. It also
+	fits nicely within 80 columns.
+
+  o CONFIG_KMEMCHECK_PARTIAL_OK
+
+	This option (when enabled) works around certain GCC optimizations that
+	produce 32-bit reads from 16-bit variables where the upper 16 bits are
+	thrown away afterwards.
+
+	The default value (enabled) is recommended. This may of course hide
+	some real errors, but disabling it would probably produce a lot of
+	false positives.
+
+  o CONFIG_KMEMCHECK_BITOPS_OK
+
+	This option silences warnings that would be generated for bit-field
+	accesses where not all the bits are initialized at the same time. This
+	may also hide some real bugs.
+
+	This option is probably obsolete, or it should be replaced with
+	the kmemcheck-/bitfield-annotations for the code in question. The
+	default value is therefore fine.
+
+Now compile the kernel as usual.
+
+
+3. How to use
+=============
+
+3.1. Booting
+============
+
+First some information about the command-line options. There is only one
+option specific to kmemcheck, and this is called "kmemcheck". It can be used
+to override the default mode as chosen by the CONFIG_KMEMCHECK_*_BY_DEFAULT
+option. Its possible settings are:
+
+  o kmemcheck=0 (disabled)
+  o kmemcheck=1 (enabled)
+  o kmemcheck=2 (one-shot mode)
+
+If SLUB debugging has been enabled in the kernel, it may take precedence over
+kmemcheck in such a way that the slab caches which are under SLUB debugging
+will not be tracked by kmemcheck. In order to ensure that this doesn't happen
+(even though it shouldn't by default), use SLUB's boot option "slub_debug",
+like this: slub_debug=-
+
+In fact, this option may also be used for fine-grained control over SLUB vs.
+kmemcheck. For example, if the command line includes "kmemcheck=1
+slub_debug=,dentry", then SLUB debugging will be used only for the "dentry"
+slab cache, and with kmemcheck tracking all the other caches. This is advanced
+usage, however, and is not generally recommended.
+
+
+3.2. Run-time enable/disable
+============================
+
+When the kernel has booted, it is possible to enable or disable kmemcheck at
+run-time. WARNING: This feature is still experimental and may cause false
+positive warnings to appear. Therefore, try not to use this. If you find that
+it doesn't work properly (e.g. you see an unreasonable amount of warnings), I
+will be happy to take bug reports.
+
+Use the file /proc/sys/kernel/kmemcheck for this purpose, e.g.:
+
+	$ echo 0 > /proc/sys/kernel/kmemcheck # disables kmemcheck
+
+The numbers are the same as for the kmemcheck= command-line option.
+
+
+3.3. Debugging
+==============
+
+A typical report will look something like this:
+
+WARNING: kmemcheck: Caught 32-bit read from uninitialized memory (ffff88003e4a2024)
+80000000000000000000000000000000000000000088ffff0000000000000000
+ i i i i u u u u i i i i i i i i u u u u u u u u u u u u u u u u
+         ^
+
+Pid: 1856, comm: ntpdate Not tainted 2.6.29-rc5 #264 945P-A
+RIP: 0010:[<ffffffff8104ede8>]  [<ffffffff8104ede8>] __dequeue_signal+0xc8/0x190
+RSP: 0018:ffff88003cdf7d98  EFLAGS: 00210002
+RAX: 0000000000000030 RBX: ffff88003d4ea968 RCX: 0000000000000009
+RDX: ffff88003e5d6018 RSI: ffff88003e5d6024 RDI: ffff88003cdf7e84
+RBP: ffff88003cdf7db8 R08: ffff88003e5d6000 R09: 0000000000000000
+R10: 0000000000000080 R11: 0000000000000000 R12: 000000000000000e
+R13: ffff88003cdf7e78 R14: ffff88003d530710 R15: ffff88003d5a98c8
+FS:  0000000000000000(0000) GS:ffff880001982000(0063) knlGS:00000
+CS:  0010 DS: 002b ES: 002b CR0: 0000000080050033
+CR2: ffff88003f806ea0 CR3: 000000003c036000 CR4: 00000000000006a0
+DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+DR3: 0000000000000000 DR6: 00000000ffff4ff0 DR7: 0000000000000400
+ [<ffffffff8104f04e>] dequeue_signal+0x8e/0x170
+ [<ffffffff81050bd8>] get_signal_to_deliver+0x98/0x390
+ [<ffffffff8100b87d>] do_notify_resume+0xad/0x7d0
+ [<ffffffff8100c7b5>] int_signal+0x12/0x17
+ [<ffffffffffffffff>] 0xffffffffffffffff
+
+The single most valuable information in this report is the RIP (or EIP on 32-
+bit) value. This will help us pinpoint exactly which instruction that caused
+the warning.
+
+If your kernel was compiled with CONFIG_DEBUG_INFO=y, then all we have to do
+is give this address to the addr2line program, like this:
+
+	$ addr2line -e vmlinux -i ffffffff8104ede8
+	arch/x86/include/asm/string_64.h:12
+	include/asm-generic/siginfo.h:287
+	kernel/signal.c:380
+	kernel/signal.c:410
+
+The "-e vmlinux" tells addr2line which file to look in. IMPORTANT: This must
+be the vmlinux of the kernel that produced the warning in the first place! If
+not, the line number information will almost certainly be wrong.
+
+The "-i" tells addr2line to also print the line numbers of inlined functions.
+In this case, the flag was very important, because otherwise, it would only
+have printed the first line, which is just a call to memcpy(), which could be
+called from a thousand places in the kernel, and is therefore not very useful.
+These inlined functions would not show up in the stack trace above, simply
+because the kernel doesn't load the extra debugging information. This
+technique can of course be used with ordinary kernel oopses as well.
+
+In this case, it's the caller of memcpy() that is interesting, and it can be
+found in include/asm-generic/siginfo.h, line 287:
+
+281 static inline void copy_siginfo(struct siginfo *to, struct siginfo *from)
+282 {
+283         if (from->si_code < 0)
+284                 memcpy(to, from, sizeof(*to));
+285         else
+286                 /* _sigchld is currently the largest know union member */
+287                 memcpy(to, from, __ARCH_SI_PREAMBLE_SIZE + sizeof(from->_sifields._sigchld));
+288 }
+
+Since this was a read (kmemcheck usually warns about reads only, though it can
+warn about writes to unallocated or freed memory as well), it was probably the
+"from" argument which contained some uninitialized bytes. Following the chain
+of calls, we move upwards to see where "from" was allocated or initialized,
+kernel/signal.c, line 380:
+
+359 static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
+360 {
+...
+367         list_for_each_entry(q, &list->list, list) {
+368                 if (q->info.si_signo == sig) {
+369                         if (first)
+370                                 goto still_pending;
+371                         first = q;
+...
+377         if (first) {
+378 still_pending:
+379                 list_del_init(&first->list);
+380                 copy_siginfo(info, &first->info);
+381                 __sigqueue_free(first);
+...
+392         }
+393 }
+
+Here, it is &first->info that is being passed on to copy_siginfo(). The
+variable "first" was found on a list -- passed in as the second argument to
+collect_signal(). We  continue our journey through the stack, to figure out
+where the item on "list" was allocated or initialized. We move to line 410:
+
+395 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
+396                         siginfo_t *info)
+397 {
+...
+410                 collect_signal(sig, pending, info);
+...
+414 }
+
+Now we need to follow the "pending" pointer, since that is being passed on to
+collect_signal() as "list". At this point, we've run out of lines from the
+"addr2line" output. Not to worry, we just paste the next addresses from the
+kmemcheck stack dump, i.e.:
+
+ [<ffffffff8104f04e>] dequeue_signal+0x8e/0x170
+ [<ffffffff81050bd8>] get_signal_to_deliver+0x98/0x390
+ [<ffffffff8100b87d>] do_notify_resume+0xad/0x7d0
+ [<ffffffff8100c7b5>] int_signal+0x12/0x17
+
+	$ addr2line -e vmlinux -i ffffffff8104f04e ffffffff81050bd8 \
+		ffffffff8100b87d ffffffff8100c7b5
+	kernel/signal.c:446
+	kernel/signal.c:1806
+	arch/x86/kernel/signal.c:805
+	arch/x86/kernel/signal.c:871
+	arch/x86/kernel/entry_64.S:694
+
+Remember that since these addresses were found on the stack and not as the
+RIP value, they actually point to the _next_ instruction (they are return
+addresses). This becomes obvious when we look at the code for line 446:
+
+422 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
+423 {
+...
+431                 signr = __dequeue_signal(&tsk->signal->shared_pending,
+432                                          mask, info);
+433                 /*
+434                  * itimer signal ?
+435                  *
+436                  * itimers are process shared and we restart periodic
+437                  * itimers in the signal delivery path to prevent DoS
+438                  * attacks in the high resolution timer case. This is
+439                  * compliant with the old way of self restarting
+440                  * itimers, as the SIGALRM is a legacy signal and only
+441                  * queued once. Changing the restart behaviour to
+442                  * restart the timer in the signal dequeue path is
+443                  * reducing the timer noise on heavy loaded !highres
+444                  * systems too.
+445                  */
+446                 if (unlikely(signr == SIGALRM)) {
+...
+489 }
+
+So instead of looking at 446, we should be looking at 431, which is the line
+that executes just before 446. Here we see that what we are looking for is
+&tsk->signal->shared_pending.
+
+Our next task is now to figure out which function that puts items on this
+"shared_pending" list. A crude, but efficient tool, is git grep:
+
+	$ git grep -n 'shared_pending' kernel/
+	...
+	kernel/signal.c:828:    pending = group ? &t->signal->shared_pending : &t->pending;
+	kernel/signal.c:1339:   pending = group ? &t->signal->shared_pending : &t->pending;
+	...
+
+There were more results, but none of them were related to list operations,
+and these were the only assignments. We inspect the line numbers more closely
+and find that this is indeed where items are being added to the list:
+
+816 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
+817                         int group)
+818 {
+...
+828         pending = group ? &t->signal->shared_pending : &t->pending;
+...
+851         q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
+852                                              (is_si_special(info) ||
+853                                               info->si_code >= 0)));
+854         if (q) {
+855                 list_add_tail(&q->list, &pending->list);
+...
+890 }
+
+and:
+
+1309 int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
+1310 {
+....
+1339         pending = group ? &t->signal->shared_pending : &t->pending;
+1340         list_add_tail(&q->list, &pending->list);
+....
+1347 }
+
+In the first case, the list element we are looking for, "q", is being returned
+from the function __sigqueue_alloc(), which looks like an allocation function.
+Let's take a look at it:
+
+187 static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
+188                                          int override_rlimit)
+189 {
+190         struct sigqueue *q = NULL;
+191         struct user_struct *user;
+192 
+193         /*
+194          * We won't get problems with the target's UID changing under us
+195          * because changing it requires RCU be used, and if t != current, the
+196          * caller must be holding the RCU readlock (by way of a spinlock) and
+197          * we use RCU protection here
+198          */
+199         user = get_uid(__task_cred(t)->user);
+200         atomic_inc(&user->sigpending);
+201         if (override_rlimit ||
+202             atomic_read(&user->sigpending) <=
+203                         t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
+204                 q = kmem_cache_alloc(sigqueue_cachep, flags);
+205         if (unlikely(q == NULL)) {
+206                 atomic_dec(&user->sigpending);
+207                 free_uid(user);
+208         } else {
+209                 INIT_LIST_HEAD(&q->list);
+210                 q->flags = 0;
+211                 q->user = user;
+212         }
+213 
+214         return q;
+215 }
+
+We see that this function initializes q->list, q->flags, and q->user. It seems
+that now is the time to look at the definition of "struct sigqueue", e.g.:
+
+14 struct sigqueue {
+15         struct list_head list;
+16         int flags;
+17         siginfo_t info;
+18         struct user_struct *user;
+19 };
+
+And, you might remember, it was a memcpy() on &first->info that caused the
+warning, so this makes perfect sense. It also seems reasonable to assume that
+it is the caller of __sigqueue_alloc() that has the responsibility of filling
+out (initializing) this member.
+
+But just which fields of the struct were uninitialized? Let's look at
+kmemcheck's report again:
+
+WARNING: kmemcheck: Caught 32-bit read from uninitialized memory (ffff88003e4a2024)
+80000000000000000000000000000000000000000088ffff0000000000000000
+ i i i i u u u u i i i i i i i i u u u u u u u u u u u u u u u u
+         ^
+
+These first two lines are the memory dump of the memory object itself, and the
+shadow bytemap, respectively. The memory object itself is in this case
+&first->info. Just beware that the start of this dump is NOT the start of the
+object itself! The position of the caret (^) corresponds with the address of
+the read (ffff88003e4a2024).
+
+The shadow bytemap dump legend is as follows:
+
+  i - initialized
+  u - uninitialized
+  a - unallocated (memory has been allocated by the slab layer, but has not
+      yet been handed off to anybody)
+  f - freed (memory has been allocated by the slab layer, but has been freed
+      by the previous owner)
+
+In order to figure out where (relative to the start of the object) the
+uninitialized memory was located, we have to look at the disassembly. For
+that, we'll need the RIP address again:
+
+RIP: 0010:[<ffffffff8104ede8>]  [<ffffffff8104ede8>] __dequeue_signal+0xc8/0x190
+
+	$ objdump -d --no-show-raw-insn vmlinux | grep -C 8 ffffffff8104ede8:
+	ffffffff8104edc8:       mov    %r8,0x8(%r8)
+	ffffffff8104edcc:       test   %r10d,%r10d
+	ffffffff8104edcf:       js     ffffffff8104ee88 <__dequeue_signal+0x168>
+	ffffffff8104edd5:       mov    %rax,%rdx
+	ffffffff8104edd8:       mov    $0xc,%ecx
+	ffffffff8104eddd:       mov    %r13,%rdi
+	ffffffff8104ede0:       mov    $0x30,%eax
+	ffffffff8104ede5:       mov    %rdx,%rsi
+	ffffffff8104ede8:       rep movsl %ds:(%rsi),%es:(%rdi)
+	ffffffff8104edea:       test   $0x2,%al
+	ffffffff8104edec:       je     ffffffff8104edf0 <__dequeue_signal+0xd0>
+	ffffffff8104edee:       movsw  %ds:(%rsi),%es:(%rdi)
+	ffffffff8104edf0:       test   $0x1,%al
+	ffffffff8104edf2:       je     ffffffff8104edf5 <__dequeue_signal+0xd5>
+	ffffffff8104edf4:       movsb  %ds:(%rsi),%es:(%rdi)
+	ffffffff8104edf5:       mov    %r8,%rdi
+	ffffffff8104edf8:       callq  ffffffff8104de60 <__sigqueue_free>
+
+As expected, it's the "rep movsl" instruction from the memcpy() that causes
+the warning. We know about REP MOVSL that it uses the register RCX to count
+the number of remaining iterations. By taking a look at the register dump
+again (from the kmemcheck report), we can figure out how many bytes were left
+to copy:
+
+RAX: 0000000000000030 RBX: ffff88003d4ea968 RCX: 0000000000000009
+
+By looking at the disassembly, we also see that %ecx is being loaded with the
+value $0xc just before (ffffffff8104edd8), so we are very lucky. Keep in mind
+that this is the number of iterations, not bytes. And since this is a "long"
+operation, we need to multiply by 4 to get the number of bytes. So this means
+that the uninitialized value was encountered at 4 * (0xc - 0x9) = 12 bytes
+from the start of the object.
+
+We can now try to figure out which field of the "struct siginfo" that was not
+initialized. This is the beginning of the struct:
+
+40 typedef struct siginfo {
+41         int si_signo;
+42         int si_errno;
+43         int si_code;
+44                 
+45         union {
+..
+92         } _sifields;
+93 } siginfo_t;
+
+On 64-bit, the int is 4 bytes long, so it must the the union member that has
+not been initialized. We can verify this using gdb:
+
+	$ gdb vmlinux
+	...
+	(gdb) p &((struct siginfo *) 0)->_sifields
+	$1 = (union {...} *) 0x10
+
+Actually, it seems that the union member is located at offset 0x10 -- which
+means that gcc has inserted 4 bytes of padding between the members si_code
+and _sifields. We can now get a fuller picture of the memory dump:
+
+         _----------------------------=> si_code
+        /        _--------------------=> (padding)
+       |        /        _------------=> _sifields(._kill._pid)
+       |       |        /        _----=> _sifields(._kill._uid)
+       |       |       |        / 
+-------|-------|-------|-------|
+80000000000000000000000000000000000000000088ffff0000000000000000
+ i i i i u u u u i i i i i i i i u u u u u u u u u u u u u u u u
+
+This allows us to realize another important fact: si_code contains the value
+0x80. Remember that x86 is little endian, so the first 4 bytes "80000000" are
+really the number 0x00000080. With a bit of research, we find that this is
+actually the constant SI_KERNEL defined in include/asm-generic/siginfo.h:
+
+144 #define SI_KERNEL       0x80            /* sent by the kernel from somewhere     */
+
+This macro is used in exactly one place in the x86 kernel: In send_signal()
+in kernel/signal.c:
+
+816 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
+817                         int group)
+818 {
+...
+828         pending = group ? &t->signal->shared_pending : &t->pending;
+...
+851         q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
+852                                              (is_si_special(info) ||
+853                                               info->si_code >= 0)));
+854         if (q) {
+855                 list_add_tail(&q->list, &pending->list);
+856                 switch ((unsigned long) info) {
+...
+865                 case (unsigned long) SEND_SIG_PRIV:
+866                         q->info.si_signo = sig;
+867                         q->info.si_errno = 0;
+868                         q->info.si_code = SI_KERNEL;
+869                         q->info.si_pid = 0;
+870                         q->info.si_uid = 0;
+871                         break;
+...
+890 }
+
+Not only does this match with the .si_code member, it also matches the place
+we found earlier when looking for where siginfo_t objects are enqueued on the
+"shared_pending" list.
+
+So to sum up: It seems that it is the padding introduced by the compiler
+between two struct fields that is uninitialized, and this gets reported when
+we do a memcpy() on the struct. This means that we have identified a false
+positive warning.
+
+Normally, kmemcheck will not report uninitialized accesses in memcpy() calls
+when both the source and destination addresses are tracked. (Instead, we copy
+the shadow bytemap as well). In this case, the destination address clearly
+was not tracked. We can dig a little deeper into the stack trace from above:
+
+	arch/x86/kernel/signal.c:805
+	arch/x86/kernel/signal.c:871
+	arch/x86/kernel/entry_64.S:694
+
+And we clearly see that the destination siginfo object is located on the
+stack:
+
+782 static void do_signal(struct pt_regs *regs)
+783 {
+784         struct k_sigaction ka;
+785         siginfo_t info;
+...
+804         signr = get_signal_to_deliver(&info, &ka, regs, NULL);
+...
+854 }
+
+And this &info is what eventually gets passed to copy_siginfo() as the
+destination argument.
+
+Now, even though we didn't find an actual error here, the example is still a
+good one, because it shows how one would go about to find out what the report
+was all about.
+
+
+3.4. Annotating false positives
+===============================
+
+There are a few different ways to make annotations in the source code that
+will keep kmemcheck from checking and reporting certain allocations. Here
+they are:
+
+  o __GFP_NOTRACK_FALSE_POSITIVE
+
+	This flag can be passed to kmalloc() or kmem_cache_alloc() (therefore
+	also to other functions that end up calling one of these) to indicate
+	that the allocation should not be tracked because it would lead to
+	a false positive report. This is a "big hammer" way of silencing
+	kmemcheck; after all, even if the false positive pertains to 
+	particular field in a struct, for example, we will now lose the
+	ability to find (real) errors in other parts of the same struct.
+
+	Example:
+
+	    /* No warnings will ever trigger on accessing any part of x */
+	    x = kmalloc(sizeof *x, GFP_KERNEL | __GFP_NOTRACK_FALSE_POSITIVE);
+
+  o kmemcheck_bitfield_begin(name)/kmemcheck_bitfield_end(name) and
+	kmemcheck_annotate_bitfield(ptr, name)
+
+	The first two of these three macros can be used inside struct
+	definitions to signal, respectively, the beginning and end of a
+	bitfield. Additionally, this will assign the bitfield a name, which
+	is given as an argument to the macros.
+
+	Having used these markers, one can later use
+	kmemcheck_annotate_bitfield() at the point of allocation, to indicate
+	which parts of the allocation is part of a bitfield.
+
+	Example:
+
+	    struct foo {
+		int x;
+
+		kmemcheck_bitfield_begin(flags);
+		int flag_a:1;
+		int flag_b:1;
+		kmemcheck_bitfield_end(flags);
+
+		int y;
+	    };
+
+	    struct foo *x = kmalloc(sizeof *x);
+
+	    /* No warnings will trigger on accessing the bitfield of x */
+	    kmemcheck_annotate_bitfield(x, flags);
+
+	Note that kmemcheck_annotate_bitfield() can be used even before the
+	return value of kmalloc() is checked -- in other words, passing NULL
+	as the first argument is legal (and will do nothing).
+
+
+4. Reporting errors
+===================
+
+As we have seen, kmemcheck will produce false positive reports. Therefore, it
+is not very wise to blindly post kmemcheck warnings to mailing lists and
+maintainers. Instead, I encourage maintainers and developers to find errors
+in their own code. If you get a warning, you can try to work around it, try
+to figure out if it's a real error or not, or simply ignore it. Most
+developers know their own code and will quickly and efficiently determine the
+root cause of a kmemcheck report. This is therefore also the most efficient
+way to work with kmemcheck.
+
+That said, we (the kmemcheck maintainers) will always be on the lookout for
+false positives that we can annotate and silence. So whatever you find,
+please drop us a note privately! Kernel configs and steps to reproduce (if
+available) are of course a great help too.
+
+Happy hacking!
+
+
+5. Technical description
+========================
+
+kmemcheck works by marking memory pages non-present. This means that whenever
+somebody attempts to access the page, a page fault is generated. The page
+fault handler notices that the page was in fact only hidden, and so it calls
+on the kmemcheck code to make further investigations.
+
+When the investigations are completed, kmemcheck "shows" the page by marking
+it present (as it would be under normal circumstances). This way, the
+interrupted code can continue as usual.
+
+But after the instruction has been executed, we should hide the page again, so
+that we can catch the next access too! Now kmemcheck makes use of a debugging
+feature of the processor, namely single-stepping. When the processor has
+finished the one instruction that generated the memory access, a debug
+exception is raised. From here, we simply hide the page again and continue
+execution, this time with the single-stepping feature turned off.
+
+kmemcheck requires some assistance from the memory allocator in order to work.
+The memory allocator needs to
+
+  1. Tell kmemcheck about newly allocated pages and pages that are about to
+     be freed. This allows kmemcheck to set up and tear down the shadow memory
+     for the pages in question. The shadow memory stores the status of each
+     byte in the allocation proper, e.g. whether it is initialized or
+     uninitialized.
+
+  2. Tell kmemcheck which parts of memory should be marked uninitialized.
+     There are actually a few more states, such as "not yet allocated" and
+     "recently freed".
+
+If a slab cache is set up using the SLAB_NOTRACK flag, it will never return
+memory that can take page faults because of kmemcheck.
+
+If a slab cache is NOT set up using the SLAB_NOTRACK flag, callers can still
+request memory with the __GFP_NOTRACK or __GFP_NOTRACK_FALSE_POSITIVE flags.
+This does not prevent the page faults from occurring, however, but marks the
+object in question as being initialized so that no warnings will ever be
+produced for this object.
+
+Currently, the SLAB and SLUB allocators are supported by kmemcheck.
diff --git a/Documentation/kprobes.txt b/Documentation/kprobes.txt
index 1e7a769..053037a 100644
--- a/Documentation/kprobes.txt
+++ b/Documentation/kprobes.txt
@@ -507,9 +507,9 @@
 Appendix A: The kprobes debugfs interface
 
 With recent kernels (> 2.6.20) the list of registered kprobes is visible
-under the /debug/kprobes/ directory (assuming debugfs is mounted at /debug).
+under the /sys/kernel/debug/kprobes/ directory (assuming debugfs is mounted at //sys/kernel/debug).
 
-/debug/kprobes/list: Lists all registered probes on the system
+/sys/kernel/debug/kprobes/list: Lists all registered probes on the system
 
 c015d71a  k  vfs_read+0x0
 c011a316  j  do_fork+0x0
@@ -525,7 +525,7 @@
 such probes are marked with [GONE]. If the probe is temporarily disabled,
 such probes are marked with [DISABLED].
 
-/debug/kprobes/enabled: Turn kprobes ON/OFF forcibly.
+/sys/kernel/debug/kprobes/enabled: Turn kprobes ON/OFF forcibly.
 
 Provides a knob to globally and forcibly turn registered kprobes ON or OFF.
 By default, all kprobes are enabled. By echoing "0" to this file, all
diff --git a/Documentation/trace/ftrace.txt b/Documentation/trace/ftrace.txt
index 7bd27f0..a39b3c7 100644
--- a/Documentation/trace/ftrace.txt
+++ b/Documentation/trace/ftrace.txt
@@ -7,7 +7,6 @@
                (dual licensed under the GPL v2)
 Reviewers:   Elias Oltmanns, Randy Dunlap, Andrew Morton,
 	     John Kacur, and David Teigland.
-
 Written for: 2.6.28-rc2
 
 Introduction
@@ -33,13 +32,26 @@
 Ftrace uses the debugfs file system to hold the control files as
 well as the files to display output.
 
-To mount the debugfs system:
+When debugfs is configured into the kernel (which selecting any ftrace
+option will do) the directory /sys/kernel/debug will be created. To mount
+this directory, you can add to your /etc/fstab file:
 
-  # mkdir /debug
-  # mount -t debugfs nodev /debug
+ debugfs       /sys/kernel/debug          debugfs defaults        0       0
 
-( Note: it is more common to mount at /sys/kernel/debug, but for
-  simplicity this document will use /debug)
+Or you can mount it at run time with:
+
+ mount -t debugfs nodev /sys/kernel/debug
+
+For quicker access to that directory you may want to make a soft link to
+it:
+
+ ln -s /sys/kernel/debug /debug
+
+Any selected ftrace option will also create a directory called tracing
+within the debugfs. The rest of the document will assume that you are in
+the ftrace directory (cd /sys/kernel/debug/tracing) and will only concentrate
+on the files within that directory and not distract from the content with
+the extended "/sys/kernel/debug/tracing" path name.
 
 That's it! (assuming that you have ftrace configured into your kernel)
 
@@ -389,18 +401,18 @@
 The trace_options file is used to control what gets printed in
 the trace output. To see what is available, simply cat the file:
 
-  cat /debug/tracing/trace_options
+  cat trace_options
   print-parent nosym-offset nosym-addr noverbose noraw nohex nobin \
   noblock nostacktrace nosched-tree nouserstacktrace nosym-userobj
 
 To disable one of the options, echo in the option prepended with
 "no".
 
-  echo noprint-parent > /debug/tracing/trace_options
+  echo noprint-parent > trace_options
 
 To enable an option, leave off the "no".
 
-  echo sym-offset > /debug/tracing/trace_options
+  echo sym-offset > trace_options
 
 Here are the available options:
 
@@ -476,11 +488,11 @@
 This tracer simply records schedule switches. Here is an example
 of how to use it.
 
- # echo sched_switch > /debug/tracing/current_tracer
- # echo 1 > /debug/tracing/tracing_enabled
+ # echo sched_switch > current_tracer
+ # echo 1 > tracing_enabled
  # sleep 1
- # echo 0 > /debug/tracing/tracing_enabled
- # cat /debug/tracing/trace
+ # echo 0 > tracing_enabled
+ # cat trace
 
 # tracer: sched_switch
 #
@@ -583,13 +595,13 @@
 To reset the maximum, echo 0 into tracing_max_latency. Here is
 an example:
 
- # echo irqsoff > /debug/tracing/current_tracer
- # echo 0 > /debug/tracing/tracing_max_latency
- # echo 1 > /debug/tracing/tracing_enabled
+ # echo irqsoff > current_tracer
+ # echo 0 > tracing_max_latency
+ # echo 1 > tracing_enabled
  # ls -ltr
  [...]
- # echo 0 > /debug/tracing/tracing_enabled
- # cat /debug/tracing/latency_trace
+ # echo 0 > tracing_enabled
+ # cat latency_trace
 # tracer: irqsoff
 #
 irqsoff latency trace v1.1.5 on 2.6.26
@@ -690,13 +702,13 @@
 which preemption was disabled. The control of preemptoff tracer
 is much like the irqsoff tracer.
 
- # echo preemptoff > /debug/tracing/current_tracer
- # echo 0 > /debug/tracing/tracing_max_latency
- # echo 1 > /debug/tracing/tracing_enabled
+ # echo preemptoff > current_tracer
+ # echo 0 > tracing_max_latency
+ # echo 1 > tracing_enabled
  # ls -ltr
  [...]
- # echo 0 > /debug/tracing/tracing_enabled
- # cat /debug/tracing/latency_trace
+ # echo 0 > tracing_enabled
+ # cat latency_trace
 # tracer: preemptoff
 #
 preemptoff latency trace v1.1.5 on 2.6.26-rc8
@@ -837,13 +849,13 @@
 Again, using this trace is much like the irqsoff and preemptoff
 tracers.
 
- # echo preemptirqsoff > /debug/tracing/current_tracer
- # echo 0 > /debug/tracing/tracing_max_latency
- # echo 1 > /debug/tracing/tracing_enabled
+ # echo preemptirqsoff > current_tracer
+ # echo 0 > tracing_max_latency
+ # echo 1 > tracing_enabled
  # ls -ltr
  [...]
- # echo 0 > /debug/tracing/tracing_enabled
- # cat /debug/tracing/latency_trace
+ # echo 0 > tracing_enabled
+ # cat latency_trace
 # tracer: preemptirqsoff
 #
 preemptirqsoff latency trace v1.1.5 on 2.6.26-rc8
@@ -999,12 +1011,12 @@
 Instead of performing an 'ls', we will run 'sleep 1' under
 'chrt' which changes the priority of the task.
 
- # echo wakeup > /debug/tracing/current_tracer
- # echo 0 > /debug/tracing/tracing_max_latency
- # echo 1 > /debug/tracing/tracing_enabled
+ # echo wakeup > current_tracer
+ # echo 0 > tracing_max_latency
+ # echo 1 > tracing_enabled
  # chrt -f 5 sleep 1
- # echo 0 > /debug/tracing/tracing_enabled
- # cat /debug/tracing/latency_trace
+ # echo 0 > tracing_enabled
+ # cat latency_trace
 # tracer: wakeup
 #
 wakeup latency trace v1.1.5 on 2.6.26-rc8
@@ -1114,11 +1126,11 @@
 ftrace_enabled is set; otherwise this tracer is a nop.
 
  # sysctl kernel.ftrace_enabled=1
- # echo function > /debug/tracing/current_tracer
- # echo 1 > /debug/tracing/tracing_enabled
+ # echo function > current_tracer
+ # echo 1 > tracing_enabled
  # usleep 1
- # echo 0 > /debug/tracing/tracing_enabled
- # cat /debug/tracing/trace
+ # echo 0 > tracing_enabled
+ # cat trace
 # tracer: function
 #
 #           TASK-PID   CPU#    TIMESTAMP  FUNCTION
@@ -1155,7 +1167,7 @@
 [...]
 int main(int argc, char *argv[]) {
 	[...]
-	trace_fd = open("/debug/tracing/tracing_enabled", O_WRONLY);
+	trace_fd = open(tracing_file("tracing_enabled"), O_WRONLY);
 	[...]
 	if (condition_hit()) {
 		write(trace_fd, "0", 1);
@@ -1163,26 +1175,20 @@
 	[...]
 }
 
-Note: Here we hard coded the path name. The debugfs mount is not
-guaranteed to be at /debug (and is more commonly at
-/sys/kernel/debug). For simple one time traces, the above is
-sufficent. For anything else, a search through /proc/mounts may
-be needed to find where the debugfs file-system is mounted.
-
 
 Single thread tracing
 ---------------------
 
-By writing into /debug/tracing/set_ftrace_pid you can trace a
+By writing into set_ftrace_pid you can trace a
 single thread. For example:
 
-# cat /debug/tracing/set_ftrace_pid
+# cat set_ftrace_pid
 no pid
-# echo 3111 > /debug/tracing/set_ftrace_pid
-# cat /debug/tracing/set_ftrace_pid
+# echo 3111 > set_ftrace_pid
+# cat set_ftrace_pid
 3111
-# echo function > /debug/tracing/current_tracer
-# cat /debug/tracing/trace | head
+# echo function > current_tracer
+# cat trace | head
  # tracer: function
  #
  #           TASK-PID    CPU#    TIMESTAMP  FUNCTION
@@ -1193,8 +1199,8 @@
      yum-updatesd-3111  [003]  1637.254683: lock_hrtimer_base <-hrtimer_try_to_cancel
      yum-updatesd-3111  [003]  1637.254685: fget_light <-do_sys_poll
      yum-updatesd-3111  [003]  1637.254686: pipe_poll <-do_sys_poll
-# echo -1 > /debug/tracing/set_ftrace_pid
-# cat /debug/tracing/trace |head
+# echo -1 > set_ftrace_pid
+# cat trace |head
  # tracer: function
  #
  #           TASK-PID    CPU#    TIMESTAMP  FUNCTION
@@ -1216,6 +1222,51 @@
 #include <fcntl.h>
 #include <unistd.h>
 
+#define _STR(x) #x
+#define STR(x) _STR(x)
+#define MAX_PATH 256
+
+const char *find_debugfs(void)
+{
+       static char debugfs[MAX_PATH+1];
+       static int debugfs_found;
+       char type[100];
+       FILE *fp;
+
+       if (debugfs_found)
+               return debugfs;
+
+       if ((fp = fopen("/proc/mounts","r")) == NULL) {
+               perror("/proc/mounts");
+               return NULL;
+       }
+
+       while (fscanf(fp, "%*s %"
+                     STR(MAX_PATH)
+                     "s %99s %*s %*d %*d\n",
+                     debugfs, type) == 2) {
+               if (strcmp(type, "debugfs") == 0)
+                       break;
+       }
+       fclose(fp);
+
+       if (strcmp(type, "debugfs") != 0) {
+               fprintf(stderr, "debugfs not mounted");
+               return NULL;
+       }
+
+       debugfs_found = 1;
+
+       return debugfs;
+}
+
+const char *tracing_file(const char *file_name)
+{
+       static char trace_file[MAX_PATH+1];
+       snprintf(trace_file, MAX_PATH, "%s/%s", find_debugfs(), file_name);
+       return trace_file;
+}
+
 int main (int argc, char **argv)
 {
         if (argc < 1)
@@ -1226,12 +1277,12 @@
                 char line[64];
                 int s;
 
-                ffd = open("/debug/tracing/current_tracer", O_WRONLY);
+                ffd = open(tracing_file("current_tracer"), O_WRONLY);
                 if (ffd < 0)
                         exit(-1);
                 write(ffd, "nop", 3);
 
-                fd = open("/debug/tracing/set_ftrace_pid", O_WRONLY);
+                fd = open(tracing_file("set_ftrace_pid"), O_WRONLY);
                 s = sprintf(line, "%d\n", getpid());
                 write(fd, line, s);
 
@@ -1383,22 +1434,22 @@
   tracing_cpu_mask file) or you might sometimes see unordered
   function calls while cpu tracing switch.
 
-	hide: echo nofuncgraph-cpu > /debug/tracing/trace_options
-	show: echo funcgraph-cpu > /debug/tracing/trace_options
+	hide: echo nofuncgraph-cpu > trace_options
+	show: echo funcgraph-cpu > trace_options
 
 - The duration (function's time of execution) is displayed on
   the closing bracket line of a function or on the same line
   than the current function in case of a leaf one. It is default
   enabled.
 
-	hide: echo nofuncgraph-duration > /debug/tracing/trace_options
-	show: echo funcgraph-duration > /debug/tracing/trace_options
+	hide: echo nofuncgraph-duration > trace_options
+	show: echo funcgraph-duration > trace_options
 
 - The overhead field precedes the duration field in case of
   reached duration thresholds.
 
-	hide: echo nofuncgraph-overhead > /debug/tracing/trace_options
-	show: echo funcgraph-overhead > /debug/tracing/trace_options
+	hide: echo nofuncgraph-overhead > trace_options
+	show: echo funcgraph-overhead > trace_options
 	depends on: funcgraph-duration
 
   ie:
@@ -1427,8 +1478,8 @@
 - The task/pid field displays the thread cmdline and pid which
   executed the function. It is default disabled.
 
-	hide: echo nofuncgraph-proc > /debug/tracing/trace_options
-	show: echo funcgraph-proc > /debug/tracing/trace_options
+	hide: echo nofuncgraph-proc > trace_options
+	show: echo funcgraph-proc > trace_options
 
   ie:
 
@@ -1451,8 +1502,8 @@
   system clock since it started. A snapshot of this time is
   given on each entry/exit of functions
 
-	hide: echo nofuncgraph-abstime > /debug/tracing/trace_options
-	show: echo funcgraph-abstime > /debug/tracing/trace_options
+	hide: echo nofuncgraph-abstime > trace_options
+	show: echo funcgraph-abstime > trace_options
 
   ie:
 
@@ -1549,7 +1600,7 @@
 
    available_filter_functions
 
- # cat /debug/tracing/available_filter_functions
+ # cat available_filter_functions
 put_prev_task_idle
 kmem_cache_create
 pick_next_task_rt
@@ -1561,12 +1612,12 @@
 If I am only interested in sys_nanosleep and hrtimer_interrupt:
 
  # echo sys_nanosleep hrtimer_interrupt \
-		> /debug/tracing/set_ftrace_filter
- # echo ftrace > /debug/tracing/current_tracer
- # echo 1 > /debug/tracing/tracing_enabled
+		> set_ftrace_filter
+ # echo ftrace > current_tracer
+ # echo 1 > tracing_enabled
  # usleep 1
- # echo 0 > /debug/tracing/tracing_enabled
- # cat /debug/tracing/trace
+ # echo 0 > tracing_enabled
+ # cat trace
 # tracer: ftrace
 #
 #           TASK-PID   CPU#    TIMESTAMP  FUNCTION
@@ -1577,7 +1628,7 @@
 
 To see which functions are being traced, you can cat the file:
 
- # cat /debug/tracing/set_ftrace_filter
+ # cat set_ftrace_filter
 hrtimer_interrupt
 sys_nanosleep
 
@@ -1597,7 +1648,7 @@
       otherwise the shell may expand the parameters into names
       of files in the local directory.
 
- # echo 'hrtimer_*' > /debug/tracing/set_ftrace_filter
+ # echo 'hrtimer_*' > set_ftrace_filter
 
 Produces:
 
@@ -1618,7 +1669,7 @@
 
 Notice that we lost the sys_nanosleep.
 
- # cat /debug/tracing/set_ftrace_filter
+ # cat set_ftrace_filter
 hrtimer_run_queues
 hrtimer_run_pending
 hrtimer_init
@@ -1644,17 +1695,17 @@
 To clear out a filter so that all functions will be recorded
 again:
 
- # echo > /debug/tracing/set_ftrace_filter
- # cat /debug/tracing/set_ftrace_filter
+ # echo > set_ftrace_filter
+ # cat set_ftrace_filter
  #
 
 Again, now we want to append.
 
- # echo sys_nanosleep > /debug/tracing/set_ftrace_filter
- # cat /debug/tracing/set_ftrace_filter
+ # echo sys_nanosleep > set_ftrace_filter
+ # cat set_ftrace_filter
 sys_nanosleep
- # echo 'hrtimer_*' >> /debug/tracing/set_ftrace_filter
- # cat /debug/tracing/set_ftrace_filter
+ # echo 'hrtimer_*' >> set_ftrace_filter
+ # cat set_ftrace_filter
 hrtimer_run_queues
 hrtimer_run_pending
 hrtimer_init
@@ -1677,7 +1728,7 @@
 The set_ftrace_notrace prevents those functions from being
 traced.
 
- # echo '*preempt*' '*lock*' > /debug/tracing/set_ftrace_notrace
+ # echo '*preempt*' '*lock*' > set_ftrace_notrace
 
 Produces:
 
@@ -1767,13 +1818,13 @@
 trace_pipe is consumed. This means that subsequent reads will be
 different. The trace is live.
 
- # echo function > /debug/tracing/current_tracer
- # cat /debug/tracing/trace_pipe > /tmp/trace.out &
+ # echo function > current_tracer
+ # cat trace_pipe > /tmp/trace.out &
 [1] 4153
- # echo 1 > /debug/tracing/tracing_enabled
+ # echo 1 > tracing_enabled
  # usleep 1
- # echo 0 > /debug/tracing/tracing_enabled
- # cat /debug/tracing/trace
+ # echo 0 > tracing_enabled
+ # cat trace
 # tracer: function
 #
 #           TASK-PID   CPU#    TIMESTAMP  FUNCTION
@@ -1809,7 +1860,7 @@
 CPU. To know the full size, multiply the number of possible CPUS
 with the number of entries.
 
- # cat /debug/tracing/buffer_size_kb
+ # cat buffer_size_kb
 1408 (units kilobytes)
 
 Note, to modify this, you must have tracing completely disabled.
@@ -1817,18 +1868,18 @@
 current_tracer is not set to "nop", an EINVAL error will be
 returned.
 
- # echo nop > /debug/tracing/current_tracer
- # echo 10000 > /debug/tracing/buffer_size_kb
- # cat /debug/tracing/buffer_size_kb
+ # echo nop > current_tracer
+ # echo 10000 > buffer_size_kb
+ # cat buffer_size_kb
 10000 (units kilobytes)
 
 The number of pages which will be allocated is limited to a
 percentage of available memory. Allocating too much will produce
 an error.
 
- # echo 1000000000000 > /debug/tracing/buffer_size_kb
+ # echo 1000000000000 > buffer_size_kb
 -bash: echo: write error: Cannot allocate memory
- # cat /debug/tracing/buffer_size_kb
+ # cat buffer_size_kb
 85
 
 -----------
diff --git a/Documentation/trace/mmiotrace.txt b/Documentation/trace/mmiotrace.txt
index 5731c67..162effb 100644
--- a/Documentation/trace/mmiotrace.txt
+++ b/Documentation/trace/mmiotrace.txt
@@ -32,41 +32,41 @@
 Usage Quick Reference
 ---------------------
 
-$ mount -t debugfs debugfs /debug
-$ echo mmiotrace > /debug/tracing/current_tracer
-$ cat /debug/tracing/trace_pipe > mydump.txt &
+$ mount -t debugfs debugfs /sys/kernel/debug
+$ echo mmiotrace > /sys/kernel/debug/tracing/current_tracer
+$ cat /sys/kernel/debug/tracing/trace_pipe > mydump.txt &
 Start X or whatever.
-$ echo "X is up" > /debug/tracing/trace_marker
-$ echo nop > /debug/tracing/current_tracer
+$ echo "X is up" > /sys/kernel/debug/tracing/trace_marker
+$ echo nop > /sys/kernel/debug/tracing/current_tracer
 Check for lost events.
 
 
 Usage
 -----
 
-Make sure debugfs is mounted to /debug. If not, (requires root privileges)
-$ mount -t debugfs debugfs /debug
+Make sure debugfs is mounted to /sys/kernel/debug. If not, (requires root privileges)
+$ mount -t debugfs debugfs /sys/kernel/debug
 
 Check that the driver you are about to trace is not loaded.
 
 Activate mmiotrace (requires root privileges):
-$ echo mmiotrace > /debug/tracing/current_tracer
+$ echo mmiotrace > /sys/kernel/debug/tracing/current_tracer
 
 Start storing the trace:
-$ cat /debug/tracing/trace_pipe > mydump.txt &
+$ cat /sys/kernel/debug/tracing/trace_pipe > mydump.txt &
 The 'cat' process should stay running (sleeping) in the background.
 
 Load the driver you want to trace and use it. Mmiotrace will only catch MMIO
 accesses to areas that are ioremapped while mmiotrace is active.
 
 During tracing you can place comments (markers) into the trace by
-$ echo "X is up" > /debug/tracing/trace_marker
+$ echo "X is up" > /sys/kernel/debug/tracing/trace_marker
 This makes it easier to see which part of the (huge) trace corresponds to
 which action. It is recommended to place descriptive markers about what you
 do.
 
 Shut down mmiotrace (requires root privileges):
-$ echo nop > /debug/tracing/current_tracer
+$ echo nop > /sys/kernel/debug/tracing/current_tracer
 The 'cat' process exits. If it does not, kill it by issuing 'fg' command and
 pressing ctrl+c.
 
@@ -78,10 +78,10 @@
 events were lost, the trace is incomplete. You should enlarge the buffers and
 try again. Buffers are enlarged by first seeing how large the current buffers
 are:
-$ cat /debug/tracing/buffer_size_kb
+$ cat /sys/kernel/debug/tracing/buffer_size_kb
 gives you a number. Approximately double this number and write it back, for
 instance:
-$ echo 128000 > /debug/tracing/buffer_size_kb
+$ echo 128000 > /sys/kernel/debug/tracing/buffer_size_kb
 Then start again from the top.
 
 If you are doing a trace for a driver project, e.g. Nouveau, you should also
diff --git a/MAINTAINERS b/MAINTAINERS
index 06579ab..fb94add 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -163,9 +163,10 @@
 F:	drivers/net/r8169.c
 
 8250/16?50 (AND CLONE UARTS) SERIAL DRIVER
+P:	Alan Cox
+M:	alan@lxorguk.ukuu.org.uk
 L:	linux-serial@vger.kernel.org
 W:	http://serial.sourceforge.net
-M:	alan@lxorguk.ukuu.org.uk
 S:	Odd Fixes
 F:	drivers/serial/8250*
 F:	include/linux/serial_8250.h
@@ -3383,6 +3384,14 @@
 F:	include/linux/kgdb.h
 F:	kernel/kgdb.c
 
+KMEMCHECK
+P:	Vegard Nossum
+M:	vegardno@ifi.uio.no
+P	Pekka Enberg
+M:	penberg@cs.helsinki.fi
+L:	linux-kernel@vger.kernel.org
+S:	Maintained
+
 KMEMLEAK
 P:	Catalin Marinas
 M:	catalin.marinas@arm.com
@@ -6104,6 +6113,12 @@
 S:	Maintained
 F:	drivers/net/wireless/rndis_wlan.c
 
+USB XHCI DRIVER
+P:	Sarah Sharp
+M:	sarah.a.sharp@intel.com
+L:	linux-usb@vger.kernel.org
+S:	Supported
+
 USB ZC0301 DRIVER
 P:	Luca Risolia
 M:	luca.risolia@studio.unibo.it
diff --git a/arch/arm/plat-s3c/include/plat/regs-usb-hsotg-phy.h b/arch/arm/plat-s3c/include/plat/regs-usb-hsotg-phy.h
new file mode 100644
index 0000000..36a85f5
--- /dev/null
+++ b/arch/arm/plat-s3c/include/plat/regs-usb-hsotg-phy.h
@@ -0,0 +1,50 @@
+/* arch/arm/plat-s3c/include/plat/regs-usb-hsotg-phy.h
+ *
+ * Copyright 2008 Openmoko, Inc.
+ * Copyright 2008 Simtec Electronics
+ *      http://armlinux.simtec.co.uk/
+ *      Ben Dooks <ben@simtec.co.uk>
+ *
+ * S3C - USB2.0 Highspeed/OtG device PHY registers
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+/* Note, this is a seperate header file as some of the clock framework
+ * needs to touch this if the clk_48m is used as the USB OHCI or other
+ * peripheral source.
+*/
+
+#ifndef __PLAT_S3C64XX_REGS_USB_HSOTG_PHY_H
+#define __PLAT_S3C64XX_REGS_USB_HSOTG_PHY_H __FILE__
+
+/* S3C64XX_PA_USB_HSPHY */
+
+#define S3C_HSOTG_PHYREG(x)	((x) + S3C_VA_USB_HSPHY)
+
+#define S3C_PHYPWR				S3C_HSOTG_PHYREG(0x00)
+#define SRC_PHYPWR_OTG_DISABLE			(1 << 4)
+#define SRC_PHYPWR_ANALOG_POWERDOWN		(1 << 3)
+#define SRC_PHYPWR_FORCE_SUSPEND		(1 << 1)
+
+#define S3C_PHYCLK				S3C_HSOTG_PHYREG(0x04)
+#define S3C_PHYCLK_MODE_USB11			(1 << 6)
+#define S3C_PHYCLK_EXT_OSC			(1 << 5)
+#define S3C_PHYCLK_CLK_FORCE			(1 << 4)
+#define S3C_PHYCLK_ID_PULL			(1 << 2)
+#define S3C_PHYCLK_CLKSEL_MASK			(0x3 << 0)
+#define S3C_PHYCLK_CLKSEL_SHIFT			(0)
+#define S3C_PHYCLK_CLKSEL_48M			(0x0 << 0)
+#define S3C_PHYCLK_CLKSEL_12M			(0x2 << 0)
+#define S3C_PHYCLK_CLKSEL_24M			(0x3 << 0)
+
+#define S3C_RSTCON				S3C_HSOTG_PHYREG(0x08)
+#define S3C_RSTCON_PHYCLK			(1 << 2)
+#define S3C_RSTCON_HCLK				(1 << 2)
+#define S3C_RSTCON_PHY				(1 << 0)
+
+#define S3C_PHYTUNE				S3C_HSOTG_PHYREG(0x20)
+
+#endif /* __PLAT_S3C64XX_REGS_USB_HSOTG_PHY_H */
diff --git a/arch/arm/plat-s3c/include/plat/regs-usb-hsotg.h b/arch/arm/plat-s3c/include/plat/regs-usb-hsotg.h
new file mode 100644
index 0000000..8d18d9d
--- /dev/null
+++ b/arch/arm/plat-s3c/include/plat/regs-usb-hsotg.h
@@ -0,0 +1,377 @@
+/* arch/arm/plat-s3c/include/plat/regs-usb-hsotg.h
+ *
+ * Copyright 2008 Openmoko, Inc.
+ * Copyright 2008 Simtec Electronics
+ *      http://armlinux.simtec.co.uk/
+ *      Ben Dooks <ben@simtec.co.uk>
+ *
+ * S3C - USB2.0 Highspeed/OtG device block registers
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef __PLAT_S3C64XX_REGS_USB_HSOTG_H
+#define __PLAT_S3C64XX_REGS_USB_HSOTG_H __FILE__
+
+#define S3C_HSOTG_REG(x) (x)
+
+#define S3C_GOTGCTL				S3C_HSOTG_REG(0x000)
+#define S3C_GOTGCTL_BSESVLD			(1 << 19)
+#define S3C_GOTGCTL_ASESVLD			(1 << 18)
+#define S3C_GOTGCTL_DBNC_SHORT			(1 << 17)
+#define S3C_GOTGCTL_CONID_B			(1 << 16)
+#define S3C_GOTGCTL_DEVHNPEN			(1 << 11)
+#define S3C_GOTGCTL_HSSETHNPEN			(1 << 10)
+#define S3C_GOTGCTL_HNPREQ			(1 << 9)
+#define S3C_GOTGCTL_HSTNEGSCS			(1 << 8)
+#define S3C_GOTGCTL_SESREQ			(1 << 1)
+#define S3C_GOTGCTL_SESREQSCS			(1 << 0)
+
+#define S3C_GOTGINT				S3C_HSOTG_REG(0x004)
+#define S3C_GOTGINT_DbnceDone			(1 << 19)
+#define S3C_GOTGINT_ADevTOUTChg			(1 << 18)
+#define S3C_GOTGINT_HstNegDet			(1 << 17)
+#define S3C_GOTGINT_HstnegSucStsChng		(1 << 9)
+#define S3C_GOTGINT_SesReqSucStsChng		(1 << 8)
+#define S3C_GOTGINT_SesEndDet			(1 << 2)
+
+#define S3C_GAHBCFG				S3C_HSOTG_REG(0x008)
+#define S3C_GAHBCFG_PTxFEmpLvl			(1 << 8)
+#define S3C_GAHBCFG_NPTxFEmpLvl			(1 << 7)
+#define S3C_GAHBCFG_DMAEn			(1 << 5)
+#define S3C_GAHBCFG_HBstLen_MASK		(0xf << 1)
+#define S3C_GAHBCFG_HBstLen_SHIFT		(1)
+#define S3C_GAHBCFG_HBstLen_Single		(0x0 << 1)
+#define S3C_GAHBCFG_HBstLen_Incr		(0x1 << 1)
+#define S3C_GAHBCFG_HBstLen_Incr4		(0x3 << 1)
+#define S3C_GAHBCFG_HBstLen_Incr8		(0x5 << 1)
+#define S3C_GAHBCFG_HBstLen_Incr16		(0x7 << 1)
+#define S3C_GAHBCFG_GlblIntrEn			(1 << 0)
+
+#define S3C_GUSBCFG				S3C_HSOTG_REG(0x00C)
+#define S3C_GUSBCFG_PHYLPClkSel			(1 << 15)
+#define S3C_GUSBCFG_HNPCap			(1 << 9)
+#define S3C_GUSBCFG_SRPCap			(1 << 8)
+#define S3C_GUSBCFG_PHYIf16			(1 << 3)
+#define S3C_GUSBCFG_TOutCal_MASK		(0x7 << 0)
+#define S3C_GUSBCFG_TOutCal_SHIFT		(0)
+#define S3C_GUSBCFG_TOutCal_LIMIT		(0x7)
+#define S3C_GUSBCFG_TOutCal(_x)			((_x) << 0)
+
+#define S3C_GRSTCTL				S3C_HSOTG_REG(0x010)
+
+#define S3C_GRSTCTL_AHBIdle			(1 << 31)
+#define S3C_GRSTCTL_DMAReq			(1 << 30)
+#define S3C_GRSTCTL_TxFNum_MASK			(0x1f << 6)
+#define S3C_GRSTCTL_TxFNum_SHIFT		(6)
+#define S3C_GRSTCTL_TxFNum_LIMIT		(0x1f)
+#define S3C_GRSTCTL_TxFNum(_x)			((_x) << 6)
+#define S3C_GRSTCTL_TxFFlsh			(1 << 5)
+#define S3C_GRSTCTL_RxFFlsh			(1 << 4)
+#define S3C_GRSTCTL_INTknQFlsh			(1 << 3)
+#define S3C_GRSTCTL_FrmCntrRst			(1 << 2)
+#define S3C_GRSTCTL_HSftRst			(1 << 1)
+#define S3C_GRSTCTL_CSftRst			(1 << 0)
+
+#define S3C_GINTSTS				S3C_HSOTG_REG(0x014)
+#define S3C_GINTMSK				S3C_HSOTG_REG(0x018)
+
+#define S3C_GINTSTS_WkUpInt			(1 << 31)
+#define S3C_GINTSTS_SessReqInt			(1 << 30)
+#define S3C_GINTSTS_DisconnInt			(1 << 29)
+#define S3C_GINTSTS_ConIDStsChng		(1 << 28)
+#define S3C_GINTSTS_PTxFEmp			(1 << 26)
+#define S3C_GINTSTS_HChInt			(1 << 25)
+#define S3C_GINTSTS_PrtInt			(1 << 24)
+#define S3C_GINTSTS_FetSusp			(1 << 22)
+#define S3C_GINTSTS_incompIP			(1 << 21)
+#define S3C_GINTSTS_IncomplSOIN			(1 << 20)
+#define S3C_GINTSTS_OEPInt			(1 << 19)
+#define S3C_GINTSTS_IEPInt			(1 << 18)
+#define S3C_GINTSTS_EPMis			(1 << 17)
+#define S3C_GINTSTS_EOPF			(1 << 15)
+#define S3C_GINTSTS_ISOutDrop			(1 << 14)
+#define S3C_GINTSTS_EnumDone			(1 << 13)
+#define S3C_GINTSTS_USBRst			(1 << 12)
+#define S3C_GINTSTS_USBSusp			(1 << 11)
+#define S3C_GINTSTS_ErlySusp			(1 << 10)
+#define S3C_GINTSTS_GOUTNakEff			(1 << 7)
+#define S3C_GINTSTS_GINNakEff			(1 << 6)
+#define S3C_GINTSTS_NPTxFEmp			(1 << 5)
+#define S3C_GINTSTS_RxFLvl			(1 << 4)
+#define S3C_GINTSTS_SOF				(1 << 3)
+#define S3C_GINTSTS_OTGInt			(1 << 2)
+#define S3C_GINTSTS_ModeMis			(1 << 1)
+#define S3C_GINTSTS_CurMod_Host			(1 << 0)
+
+#define S3C_GRXSTSR				S3C_HSOTG_REG(0x01C)
+#define S3C_GRXSTSP				S3C_HSOTG_REG(0x020)
+
+#define S3C_GRXSTS_FN_MASK			(0x7f << 25)
+#define S3C_GRXSTS_FN_SHIFT			(25)
+
+#define S3C_GRXSTS_PktSts_MASK			(0xf << 17)
+#define S3C_GRXSTS_PktSts_SHIFT			(17)
+#define S3C_GRXSTS_PktSts_GlobalOutNAK		(0x1 << 17)
+#define S3C_GRXSTS_PktSts_OutRX			(0x2 << 17)
+#define S3C_GRXSTS_PktSts_OutDone		(0x3 << 17)
+#define S3C_GRXSTS_PktSts_SetupDone		(0x4 << 17)
+#define S3C_GRXSTS_PktSts_SetupRX		(0x6 << 17)
+
+#define S3C_GRXSTS_DPID_MASK			(0x3 << 15)
+#define S3C_GRXSTS_DPID_SHIFT			(15)
+#define S3C_GRXSTS_ByteCnt_MASK			(0x7ff << 4)
+#define S3C_GRXSTS_ByteCnt_SHIFT		(4)
+#define S3C_GRXSTS_EPNum_MASK			(0xf << 0)
+#define S3C_GRXSTS_EPNum_SHIFT			(0)
+
+#define S3C_GRXFSIZ				S3C_HSOTG_REG(0x024)
+
+#define S3C_GNPTXFSIZ				S3C_HSOTG_REG(0x028)
+
+#define S3C_GNPTXFSIZ_NPTxFDep_MASK		(0xffff << 16)
+#define S3C_GNPTXFSIZ_NPTxFDep_SHIFT		(16)
+#define S3C_GNPTXFSIZ_NPTxFDep_LIMIT		(0xffff)
+#define S3C_GNPTXFSIZ_NPTxFDep(_x)		((_x) << 16)
+#define S3C_GNPTXFSIZ_NPTxFStAddr_MASK		(0xffff << 0)
+#define S3C_GNPTXFSIZ_NPTxFStAddr_SHIFT		(0)
+#define S3C_GNPTXFSIZ_NPTxFStAddr_LIMIT		(0xffff)
+#define S3C_GNPTXFSIZ_NPTxFStAddr(_x)		((_x) << 0)
+
+#define S3C_GNPTXSTS				S3C_HSOTG_REG(0x02C)
+
+#define S3C_GNPTXSTS_NPtxQTop_MASK		(0x7f << 24)
+#define S3C_GNPTXSTS_NPtxQTop_SHIFT		(24)
+
+#define S3C_GNPTXSTS_NPTxQSpcAvail_MASK		(0xff << 16)
+#define S3C_GNPTXSTS_NPTxQSpcAvail_SHIFT	(16)
+#define S3C_GNPTXSTS_NPTxQSpcAvail_GET(_v)	(((_v) >> 16) & 0xff)
+
+#define S3C_GNPTXSTS_NPTxFSpcAvail_MASK		(0xffff << 0)
+#define S3C_GNPTXSTS_NPTxFSpcAvail_SHIFT	(0)
+#define S3C_GNPTXSTS_NPTxFSpcAvail_GET(_v)	(((_v) >> 0) & 0xffff)
+
+
+#define S3C_HPTXFSIZ				S3C_HSOTG_REG(0x100)
+
+#define S3C_DPTXFSIZn(_a)			S3C_HSOTG_REG(0x104 + (((_a) - 1) * 4))
+
+#define S3C_DPTXFSIZn_DPTxFSize_MASK		(0xffff << 16)
+#define S3C_DPTXFSIZn_DPTxFSize_SHIFT		(16)
+#define S3C_DPTXFSIZn_DPTxFSize_GET(_v)		(((_v) >> 16) & 0xffff)
+#define S3C_DPTXFSIZn_DPTxFSize_LIMIT		(0xffff)
+#define S3C_DPTXFSIZn_DPTxFSize(_x)		((_x) << 16)
+
+#define S3C_DPTXFSIZn_DPTxFStAddr_MASK		(0xffff << 0)
+#define S3C_DPTXFSIZn_DPTxFStAddr_SHIFT		(0)
+
+/* Device mode registers */
+#define S3C_DCFG				S3C_HSOTG_REG(0x800)
+
+#define S3C_DCFG_EPMisCnt_MASK			(0x1f << 18)
+#define S3C_DCFG_EPMisCnt_SHIFT			(18)
+#define S3C_DCFG_EPMisCnt_LIMIT			(0x1f)
+#define S3C_DCFG_EPMisCnt(_x)			((_x) << 18)
+
+#define S3C_DCFG_PerFrInt_MASK			(0x3 << 11)
+#define S3C_DCFG_PerFrInt_SHIFT			(11)
+#define S3C_DCFG_PerFrInt_LIMIT			(0x3)
+#define S3C_DCFG_PerFrInt(_x)			((_x) << 11)
+
+#define S3C_DCFG_DevAddr_MASK			(0x7f << 4)
+#define S3C_DCFG_DevAddr_SHIFT			(4)
+#define S3C_DCFG_DevAddr_LIMIT			(0x7f)
+#define S3C_DCFG_DevAddr(_x)			((_x) << 4)
+
+#define S3C_DCFG_NZStsOUTHShk			(1 << 2)
+
+#define S3C_DCFG_DevSpd_MASK			(0x3 << 0)
+#define S3C_DCFG_DevSpd_SHIFT			(0)
+#define S3C_DCFG_DevSpd_HS			(0x0 << 0)
+#define S3C_DCFG_DevSpd_FS			(0x1 << 0)
+#define S3C_DCFG_DevSpd_LS			(0x2 << 0)
+#define S3C_DCFG_DevSpd_FS48			(0x3 << 0)
+
+#define S3C_DCTL				S3C_HSOTG_REG(0x804)
+
+#define S3C_DCTL_PWROnPrgDone			(1 << 11)
+#define S3C_DCTL_CGOUTNak			(1 << 10)
+#define S3C_DCTL_SGOUTNak			(1 << 9)
+#define S3C_DCTL_CGNPInNAK			(1 << 8)
+#define S3C_DCTL_SGNPInNAK			(1 << 7)
+#define S3C_DCTL_TstCtl_MASK			(0x7 << 4)
+#define S3C_DCTL_TstCtl_SHIFT			(4)
+#define S3C_DCTL_GOUTNakSts			(1 << 3)
+#define S3C_DCTL_GNPINNakSts			(1 << 2)
+#define S3C_DCTL_SftDiscon			(1 << 1)
+#define S3C_DCTL_RmtWkUpSig			(1 << 0)
+
+#define S3C_DSTS				S3C_HSOTG_REG(0x808)
+
+#define S3C_DSTS_SOFFN_MASK			(0x3fff << 8)
+#define S3C_DSTS_SOFFN_SHIFT			(8)
+#define S3C_DSTS_SOFFN_LIMIT			(0x3fff)
+#define S3C_DSTS_SOFFN(_x)			((_x) << 8)
+#define S3C_DSTS_ErraticErr			(1 << 3)
+#define S3C_DSTS_EnumSpd_MASK			(0x3 << 1)
+#define S3C_DSTS_EnumSpd_SHIFT			(1)
+#define S3C_DSTS_EnumSpd_HS			(0x0 << 1)
+#define S3C_DSTS_EnumSpd_FS			(0x1 << 1)
+#define S3C_DSTS_EnumSpd_LS			(0x2 << 1)
+#define S3C_DSTS_EnumSpd_FS48			(0x3 << 1)
+
+#define S3C_DSTS_SuspSts			(1 << 0)
+
+#define S3C_DIEPMSK				S3C_HSOTG_REG(0x810)
+
+#define S3C_DIEPMSK_INEPNakEffMsk		(1 << 6)
+#define S3C_DIEPMSK_INTknEPMisMsk		(1 << 5)
+#define S3C_DIEPMSK_INTknTXFEmpMsk		(1 << 4)
+#define S3C_DIEPMSK_TimeOUTMsk			(1 << 3)
+#define S3C_DIEPMSK_AHBErrMsk			(1 << 2)
+#define S3C_DIEPMSK_EPDisbldMsk			(1 << 1)
+#define S3C_DIEPMSK_XferComplMsk		(1 << 0)
+
+#define S3C_DOEPMSK				S3C_HSOTG_REG(0x814)
+
+#define S3C_DOEPMSK_Back2BackSetup		(1 << 6)
+#define S3C_DOEPMSK_OUTTknEPdisMsk		(1 << 4)
+#define S3C_DOEPMSK_SetupMsk			(1 << 3)
+#define S3C_DOEPMSK_AHBErrMsk			(1 << 2)
+#define S3C_DOEPMSK_EPDisbldMsk			(1 << 1)
+#define S3C_DOEPMSK_XferComplMsk		(1 << 0)
+
+#define S3C_DAINT				S3C_HSOTG_REG(0x818)
+#define S3C_DAINTMSK				S3C_HSOTG_REG(0x81C)
+
+#define S3C_DAINT_OutEP_SHIFT			(16)
+#define S3C_DAINT_OutEP(x)			(1 << ((x) + 16))
+#define S3C_DAINT_InEP(x)			(1 << (x))
+
+#define S3C_DTKNQR1				S3C_HSOTG_REG(0x820)
+#define S3C_DTKNQR2				S3C_HSOTG_REG(0x824)
+#define S3C_DTKNQR3				S3C_HSOTG_REG(0x830)
+#define S3C_DTKNQR4				S3C_HSOTG_REG(0x834)
+
+#define S3C_DVBUSDIS				S3C_HSOTG_REG(0x828)
+#define S3C_DVBUSPULSE				S3C_HSOTG_REG(0x82C)
+
+#define S3C_DIEPCTL0				S3C_HSOTG_REG(0x900)
+#define S3C_DOEPCTL0				S3C_HSOTG_REG(0xB00)
+#define S3C_DIEPCTL(_a)				S3C_HSOTG_REG(0x900 + ((_a) * 0x20))
+#define S3C_DOEPCTL(_a)				S3C_HSOTG_REG(0xB00 + ((_a) * 0x20))
+
+/* EP0 specialness:
+ * bits[29..28] - reserved (no SetD0PID, SetD1PID)
+ * bits[25..22] - should always be zero, this isn't a periodic endpoint
+ * bits[10..0] - MPS setting differenct for EP0
+*/
+#define S3C_D0EPCTL_MPS_MASK			(0x3 << 0)
+#define S3C_D0EPCTL_MPS_SHIFT			(0)
+#define S3C_D0EPCTL_MPS_64			(0x0 << 0)
+#define S3C_D0EPCTL_MPS_32			(0x1 << 0)
+#define S3C_D0EPCTL_MPS_16			(0x2 << 0)
+#define S3C_D0EPCTL_MPS_8			(0x3 << 0)
+
+#define S3C_DxEPCTL_EPEna			(1 << 31)
+#define S3C_DxEPCTL_EPDis			(1 << 30)
+#define S3C_DxEPCTL_SetD1PID			(1 << 29)
+#define S3C_DxEPCTL_SetOddFr			(1 << 29)
+#define S3C_DxEPCTL_SetD0PID			(1 << 28)
+#define S3C_DxEPCTL_SetEvenFr			(1 << 28)
+#define S3C_DxEPCTL_SNAK			(1 << 27)
+#define S3C_DxEPCTL_CNAK			(1 << 26)
+#define S3C_DxEPCTL_TxFNum_MASK			(0xf << 22)
+#define S3C_DxEPCTL_TxFNum_SHIFT		(22)
+#define S3C_DxEPCTL_TxFNum_LIMIT		(0xf)
+#define S3C_DxEPCTL_TxFNum(_x)			((_x) << 22)
+
+#define S3C_DxEPCTL_Stall			(1 << 21)
+#define S3C_DxEPCTL_Snp				(1 << 20)
+#define S3C_DxEPCTL_EPType_MASK			(0x3 << 18)
+#define S3C_DxEPCTL_EPType_SHIFT		(18)
+#define S3C_DxEPCTL_EPType_Control		(0x0 << 18)
+#define S3C_DxEPCTL_EPType_Iso			(0x1 << 18)
+#define S3C_DxEPCTL_EPType_Bulk			(0x2 << 18)
+#define S3C_DxEPCTL_EPType_Intterupt		(0x3 << 18)
+
+#define S3C_DxEPCTL_NAKsts			(1 << 17)
+#define S3C_DxEPCTL_DPID			(1 << 16)
+#define S3C_DxEPCTL_EOFrNum			(1 << 16)
+#define S3C_DxEPCTL_USBActEp			(1 << 15)
+#define S3C_DxEPCTL_NextEp_MASK			(0xf << 11)
+#define S3C_DxEPCTL_NextEp_SHIFT		(11)
+#define S3C_DxEPCTL_NextEp_LIMIT		(0xf)
+#define S3C_DxEPCTL_NextEp(_x)			((_x) << 11)
+
+#define S3C_DxEPCTL_MPS_MASK			(0x7ff << 0)
+#define S3C_DxEPCTL_MPS_SHIFT			(0)
+#define S3C_DxEPCTL_MPS_LIMIT			(0x7ff)
+#define S3C_DxEPCTL_MPS(_x)			((_x) << 0)
+
+#define S3C_DIEPINT(_a)				S3C_HSOTG_REG(0x908 + ((_a) * 0x20))
+#define S3C_DOEPINT(_a)				S3C_HSOTG_REG(0xB08 + ((_a) * 0x20))
+
+#define S3C_DxEPINT_INEPNakEff			(1 << 6)
+#define S3C_DxEPINT_Back2BackSetup		(1 << 6)
+#define S3C_DxEPINT_INTknEPMis			(1 << 5)
+#define S3C_DxEPINT_INTknTXFEmp			(1 << 4)
+#define S3C_DxEPINT_OUTTknEPdis			(1 << 4)
+#define S3C_DxEPINT_Timeout			(1 << 3)
+#define S3C_DxEPINT_Setup			(1 << 3)
+#define S3C_DxEPINT_AHBErr			(1 << 2)
+#define S3C_DxEPINT_EPDisbld			(1 << 1)
+#define S3C_DxEPINT_XferCompl			(1 << 0)
+
+#define S3C_DIEPTSIZ0				S3C_HSOTG_REG(0x910)
+
+#define S3C_DIEPTSIZ0_PktCnt_MASK		(0x3 << 19)
+#define S3C_DIEPTSIZ0_PktCnt_SHIFT		(19)
+#define S3C_DIEPTSIZ0_PktCnt_LIMIT		(0x3)
+#define S3C_DIEPTSIZ0_PktCnt(_x)		((_x) << 19)
+
+#define S3C_DIEPTSIZ0_XferSize_MASK		(0x7f << 0)
+#define S3C_DIEPTSIZ0_XferSize_SHIFT		(0)
+#define S3C_DIEPTSIZ0_XferSize_LIMIT		(0x7f)
+#define S3C_DIEPTSIZ0_XferSize(_x)		((_x) << 0)
+
+
+#define DOEPTSIZ0				S3C_HSOTG_REG(0xB10)
+#define S3C_DOEPTSIZ0_SUPCnt_MASK		(0x3 << 29)
+#define S3C_DOEPTSIZ0_SUPCnt_SHIFT		(29)
+#define S3C_DOEPTSIZ0_SUPCnt_LIMIT		(0x3)
+#define S3C_DOEPTSIZ0_SUPCnt(_x)		((_x) << 29)
+
+#define S3C_DOEPTSIZ0_PktCnt			(1 << 19)
+#define S3C_DOEPTSIZ0_XferSize_MASK		(0x7f << 0)
+#define S3C_DOEPTSIZ0_XferSize_SHIFT		(0)
+
+#define S3C_DIEPTSIZ(_a)			S3C_HSOTG_REG(0x910 + ((_a) * 0x20))
+#define S3C_DOEPTSIZ(_a)			S3C_HSOTG_REG(0xB10 + ((_a) * 0x20))
+
+#define S3C_DxEPTSIZ_MC_MASK			(0x3 << 29)
+#define S3C_DxEPTSIZ_MC_SHIFT			(29)
+#define S3C_DxEPTSIZ_MC_LIMIT			(0x3)
+#define S3C_DxEPTSIZ_MC(_x)			((_x) << 29)
+
+#define S3C_DxEPTSIZ_PktCnt_MASK		(0x3ff << 19)
+#define S3C_DxEPTSIZ_PktCnt_SHIFT		(19)
+#define S3C_DxEPTSIZ_PktCnt_GET(_v)		(((_v) >> 19) & 0x3ff)
+#define S3C_DxEPTSIZ_PktCnt_LIMIT		(0x3ff)
+#define S3C_DxEPTSIZ_PktCnt(_x)			((_x) << 19)
+
+#define S3C_DxEPTSIZ_XferSize_MASK		(0x7ffff << 0)
+#define S3C_DxEPTSIZ_XferSize_SHIFT		(0)
+#define S3C_DxEPTSIZ_XferSize_GET(_v)		(((_v) >> 0) & 0x7ffff)
+#define S3C_DxEPTSIZ_XferSize_LIMIT		(0x7ffff)
+#define S3C_DxEPTSIZ_XferSize(_x)		((_x) << 0)
+
+
+#define S3C_DIEPDMA(_a)				S3C_HSOTG_REG(0x914 + ((_a) * 0x20))
+#define S3C_DOEPDMA(_a)				S3C_HSOTG_REG(0xB14 + ((_a) * 0x20))
+
+#define S3C_EPFIFO(_a)				S3C_HSOTG_REG(0x1000 + ((_a) * 0x1000))
+
+#endif /* __PLAT_S3C64XX_REGS_USB_HSOTG_H */
diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig
index a60cfe7..8ea0d94 100644
--- a/arch/blackfin/Kconfig
+++ b/arch/blackfin/Kconfig
@@ -6,59 +6,65 @@
 mainmenu "Blackfin Kernel Configuration"
 
 config MMU
-	bool
-	default n
+	def_bool n
 
 config FPU
-	bool
-	default n
+	def_bool n
 
 config RWSEM_GENERIC_SPINLOCK
-	bool
-	default y
+	def_bool y
 
 config RWSEM_XCHGADD_ALGORITHM
-	bool
-	default n
+	def_bool n
 
 config BLACKFIN
-	bool
-	default y
+	def_bool y
+	select HAVE_FUNCTION_GRAPH_TRACER
+	select HAVE_FUNCTION_TRACER
 	select HAVE_IDE
+	select HAVE_KERNEL_GZIP
+	select HAVE_KERNEL_BZIP2
+	select HAVE_KERNEL_LZMA
 	select HAVE_OPROFILE
 	select ARCH_WANT_OPTIONAL_GPIOLIB
 
+config GENERIC_BUG
+	def_bool y
+	depends on BUG
+
 config ZONE_DMA
-	bool
-	default y
+	def_bool y
 
 config GENERIC_FIND_NEXT_BIT
-	bool
-	default y
+	def_bool y
 
 config GENERIC_HWEIGHT
-	bool
-	default y
+	def_bool y
 
 config GENERIC_HARDIRQS
-	bool
-	default y
+	def_bool y
 
 config GENERIC_IRQ_PROBE
-	bool
-	default y
+	def_bool y
 
 config GENERIC_GPIO
-	bool
-	default y
+	def_bool y
 
 config FORCE_MAX_ZONEORDER
 	int
 	default "14"
 
 config GENERIC_CALIBRATE_DELAY
-	bool
-	default y
+	def_bool y
+
+config LOCKDEP_SUPPORT
+	def_bool y
+
+config STACKTRACE_SUPPORT
+	def_bool y
+
+config TRACE_IRQFLAGS_SUPPORT
+	def_bool y
 
 source "init/Kconfig"
 
@@ -408,12 +414,12 @@
 
 config CLKIN_HZ
 	int "Frequency of the crystal on the board in Hz"
-	default "11059200" if BFIN533_STAMP
-	default "27000000" if BFIN533_EZKIT
-	default "25000000" if (BFIN537_STAMP || BFIN527_EZKIT || H8606_HVSISTEMAS || BLACKSTAMP || BFIN526_EZBRD || BFIN538_EZKIT || BFIN518F-EZBRD)
-	default "30000000" if BFIN561_EZKIT
-	default "24576000" if PNAV10
 	default "10000000" if BFIN532_IP0X
+	default "11059200" if BFIN533_STAMP
+	default "24576000" if PNAV10
+	default "25000000" # most people use this
+	default "27000000" if BFIN533_EZKIT
+	default "30000000" if BFIN561_EZKIT
 	help
 	  The frequency of CLKIN crystal oscillator on the board in Hz.
 	  Warning: This value should match the crystal on the board. Otherwise,
diff --git a/arch/blackfin/Makefile b/arch/blackfin/Makefile
index d54c828..6f9533c 100644
--- a/arch/blackfin/Makefile
+++ b/arch/blackfin/Makefile
@@ -137,7 +137,7 @@
 
 INSTALL_PATH ?= /tftpboot
 boot := arch/$(ARCH)/boot
-BOOT_TARGETS = vmImage
+BOOT_TARGETS = vmImage vmImage.bz2 vmImage.gz vmImage.lzma
 PHONY += $(BOOT_TARGETS) install
 KBUILD_IMAGE := $(boot)/vmImage
 
@@ -150,7 +150,10 @@
 	$(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(KBUILD_IMAGE) install
 
 define archhelp
-  echo  '* vmImage         - Kernel-only image for U-Boot (arch/$(ARCH)/boot/vmImage)'
+  echo  '* vmImage         - Alias to selected kernel format (vmImage.gz by default)'
+  echo  '  vmImage.bz2     - Kernel-only image for U-Boot (arch/$(ARCH)/boot/vmImage.bz2)'
+  echo  '* vmImage.gz      - Kernel-only image for U-Boot (arch/$(ARCH)/boot/vmImage.gz)'
+  echo  '  vmImage.lzma    - Kernel-only image for U-Boot (arch/$(ARCH)/boot/vmImage.lzma)'
   echo  '  install         - Install kernel using'
   echo  '                     (your) ~/bin/$(CROSS_COMPILE)installkernel or'
   echo  '                     (distribution) PATH: $(CROSS_COMPILE)installkernel or'
diff --git a/arch/blackfin/boot/.gitignore b/arch/blackfin/boot/.gitignore
index 3ae0399..229e508 100644
--- a/arch/blackfin/boot/.gitignore
+++ b/arch/blackfin/boot/.gitignore
@@ -1 +1,2 @@
-+vmImage
+vmImage*
+vmlinux*
diff --git a/arch/blackfin/boot/Makefile b/arch/blackfin/boot/Makefile
index e028d13..3ab6f23 100644
--- a/arch/blackfin/boot/Makefile
+++ b/arch/blackfin/boot/Makefile
@@ -8,24 +8,41 @@
 
 MKIMAGE := $(srctree)/scripts/mkuboot.sh
 
-targets := vmImage
-extra-y += vmlinux.bin vmlinux.gz
+targets := vmImage vmImage.bz2 vmImage.gz vmImage.lzma
+extra-y += vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma
 
 quiet_cmd_uimage = UIMAGE  $@
       cmd_uimage = $(CONFIG_SHELL) $(MKIMAGE) -A $(ARCH) -O linux -T kernel \
-                   -C gzip -n 'Linux-$(KERNELRELEASE)' -a $(CONFIG_BOOT_LOAD) \
+                   -C $(2) -n 'Linux-$(KERNELRELEASE)' -a $(CONFIG_BOOT_LOAD) \
                    -e $(shell $(NM) vmlinux | awk '$$NF == "__start" {print $$1}') \
                    -d $< $@
 
 $(obj)/vmlinux.bin: vmlinux FORCE
 	$(call if_changed,objcopy)
 
-$(obj)/vmlinux.gz: $(obj)/vmlinux.bin FORCE
+$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE
 	$(call if_changed,gzip)
 
-$(obj)/vmImage: $(obj)/vmlinux.gz
-	$(call if_changed,uimage)
-	@$(kecho) 'Kernel: $@ is ready'
+$(obj)/vmlinux.bin.bz2: $(obj)/vmlinux.bin FORCE
+	$(call if_changed,bzip2)
+
+$(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin FORCE
+	$(call if_changed,lzma)
+
+$(obj)/vmImage.bz2: $(obj)/vmlinux.bin.bz2
+	$(call if_changed,uimage,bzip2)
+
+$(obj)/vmImage.gz: $(obj)/vmlinux.bin.gz
+	$(call if_changed,uimage,gzip)
+
+$(obj)/vmImage.lzma: $(obj)/vmlinux.bin.lzma
+	$(call if_changed,uimage,lzma)
+
+suffix-$(CONFIG_KERNEL_GZIP)  := gz
+suffix-$(CONFIG_KERNEL_BZIP2) := bz2
+suffix-$(CONFIG_KERNEL_LZMA)  := lzma
+$(obj)/vmImage: $(obj)/vmImage.$(suffix-y)
+	@ln -sf $(notdir $<) $@
 
 install:
 	sh $(srctree)/$(src)/install.sh $(KERNELRELEASE) $(BOOTIMAGE) System.map "$(INSTALL_PATH)"
diff --git a/arch/blackfin/include/asm/atomic.h b/arch/blackfin/include/asm/atomic.h
index 7bbf44e..b1d92f1 100644
--- a/arch/blackfin/include/asm/atomic.h
+++ b/arch/blackfin/include/asm/atomic.h
@@ -90,7 +90,7 @@
 
 static inline void atomic_add(int i, atomic_t *v)
 {
-	long flags;
+	unsigned long flags;
 
 	local_irq_save_hw(flags);
 	v->counter += i;
@@ -99,7 +99,7 @@
 
 static inline void atomic_sub(int i, atomic_t *v)
 {
-	long flags;
+	unsigned long flags;
 
 	local_irq_save_hw(flags);
 	v->counter -= i;
@@ -110,7 +110,7 @@
 static inline int atomic_add_return(int i, atomic_t *v)
 {
 	int __temp = 0;
-	long flags;
+	unsigned long flags;
 
 	local_irq_save_hw(flags);
 	v->counter += i;
@@ -124,7 +124,7 @@
 static inline int atomic_sub_return(int i, atomic_t *v)
 {
 	int __temp = 0;
-	long flags;
+	unsigned long flags;
 
 	local_irq_save_hw(flags);
 	v->counter -= i;
@@ -136,7 +136,7 @@
 
 static inline void atomic_inc(volatile atomic_t *v)
 {
-	long flags;
+	unsigned long flags;
 
 	local_irq_save_hw(flags);
 	v->counter++;
@@ -145,7 +145,7 @@
 
 static inline void atomic_dec(volatile atomic_t *v)
 {
-	long flags;
+	unsigned long flags;
 
 	local_irq_save_hw(flags);
 	v->counter--;
@@ -154,7 +154,7 @@
 
 static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
 {
-	long flags;
+	unsigned long flags;
 
 	local_irq_save_hw(flags);
 	v->counter &= ~mask;
@@ -163,7 +163,7 @@
 
 static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
 {
-	long flags;
+	unsigned long flags;
 
 	local_irq_save_hw(flags);
 	v->counter |= mask;
diff --git a/arch/blackfin/include/asm/bfin-global.h b/arch/blackfin/include/asm/bfin-global.h
index daffc06..e39277e 100644
--- a/arch/blackfin/include/asm/bfin-global.h
+++ b/arch/blackfin/include/asm/bfin-global.h
@@ -31,7 +31,7 @@
 
 #ifndef __ASSEMBLY__
 
-#include <asm-generic/sections.h>
+#include <asm/sections.h>
 #include <asm/ptrace.h>
 #include <asm/user.h>
 #include <linux/linkage.h>
@@ -99,15 +99,6 @@
 extern unsigned long bfin_sic_iwr[];
 extern unsigned vr_wakeup;
 extern u16 _bfin_swrst; /* shadow for Software Reset Register (SWRST) */
-extern unsigned long _ramstart, _ramend, _rambase;
-extern unsigned long memory_start, memory_end, physical_mem_end;
-extern char _stext_l1[], _etext_l1[], _sdata_l1[], _edata_l1[], _sbss_l1[],
-	_ebss_l1[], _l1_lma_start[], _sdata_b_l1[], _sbss_b_l1[], _ebss_b_l1[],
-	_stext_l2[], _etext_l2[], _sdata_l2[], _edata_l2[], _sbss_l2[],
-	_ebss_l2[], _l2_lma_start[];
-
-/* only used when MTD_UCLINUX */
-extern unsigned long memory_mtd_start, memory_mtd_end, mtd_size;
 
 #ifdef CONFIG_BFIN_ICACHE_LOCK
 extern void cache_grab_lock(int way);
diff --git a/arch/blackfin/include/asm/bitops.h b/arch/blackfin/include/asm/bitops.h
index 21b036e..75fee2f 100644
--- a/arch/blackfin/include/asm/bitops.h
+++ b/arch/blackfin/include/asm/bitops.h
@@ -109,7 +109,8 @@
 
 static inline void change_bit(int nr, volatile unsigned long *addr)
 {
-	int mask, flags;
+	int mask;
+	unsigned long flags;
 	unsigned long *ADDR = (unsigned long *)addr;
 
 	ADDR += nr >> 5;
diff --git a/arch/blackfin/include/asm/bug.h b/arch/blackfin/include/asm/bug.h
index 6d3e11b..655e495 100644
--- a/arch/blackfin/include/asm/bug.h
+++ b/arch/blackfin/include/asm/bug.h
@@ -2,13 +2,58 @@
 #define _BLACKFIN_BUG_H
 
 #ifdef CONFIG_BUG
-#define HAVE_ARCH_BUG
 
-#define BUG() do { \
-	dump_bfin_trace_buffer(); \
-	printk(KERN_EMERG "BUG: failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); \
-	panic("BUG!"); \
-} while (0)
+#define BFIN_BUG_OPCODE	0xefcd
+
+#ifdef CONFIG_DEBUG_BUGVERBOSE
+
+#define _BUG_OR_WARN(flags)						\
+	asm volatile(							\
+		"1:	.hword	%0\n"					\
+		"	.section __bug_table,\"a\",@progbits\n"		\
+		"2:	.long	1b\n"					\
+		"	.long	%1\n"					\
+		"	.short	%2\n"					\
+		"	.short	%3\n"					\
+		"	.org	2b + %4\n"				\
+		"	.previous"					\
+		:							\
+		: "i"(BFIN_BUG_OPCODE), "i"(__FILE__),			\
+		  "i"(__LINE__), "i"(flags),				\
+		  "i"(sizeof(struct bug_entry)))
+
+#else
+
+#define _BUG_OR_WARN(flags)						\
+	asm volatile(							\
+		"1:	.hword	%0\n"					\
+		"	.section __bug_table,\"a\",@progbits\n"		\
+		"2:	.long	1b\n"					\
+		"	.short	%1\n"					\
+		"	.org	2b + %2\n"				\
+		"	.previous"					\
+		:							\
+		: "i"(BFIN_BUG_OPCODE), "i"(flags),			\
+		  "i"(sizeof(struct bug_entry)))
+
+#endif /* CONFIG_DEBUG_BUGVERBOSE */
+
+#define BUG()								\
+	do {								\
+		_BUG_OR_WARN(0);					\
+		for (;;);						\
+	} while (0)
+
+#define WARN_ON(condition)							\
+	({								\
+		int __ret_warn_on = !!(condition);			\
+		if (unlikely(__ret_warn_on))				\
+			_BUG_OR_WARN(BUGFLAG_WARNING);			\
+		unlikely(__ret_warn_on);				\
+	})
+
+#define HAVE_ARCH_BUG
+#define HAVE_ARCH_WARN_ON
 
 #endif
 
diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
index 8663781..2ef669e 100644
--- a/arch/blackfin/include/asm/cache.h
+++ b/arch/blackfin/include/asm/cache.h
@@ -34,9 +34,13 @@
 #define L1_CACHE_SHIFT_MAX	5
 
 #if defined(CONFIG_SMP) && \
-    !defined(CONFIG_BFIN_CACHE_COHERENT) && \
-    defined(CONFIG_BFIN_DCACHE)
-#define __ARCH_SYNC_CORE_DCACHE
+    !defined(CONFIG_BFIN_CACHE_COHERENT)
+# if defined(CONFIG_BFIN_ICACHE)
+# define __ARCH_SYNC_CORE_ICACHE
+# endif
+# if defined(CONFIG_BFIN_DCACHE)
+# define __ARCH_SYNC_CORE_DCACHE
+# endif
 #ifndef __ASSEMBLY__
 asmlinkage void __raw_smp_mark_barrier_asm(void);
 asmlinkage void __raw_smp_check_barrier_asm(void);
@@ -51,6 +55,7 @@
 }
 
 void resync_core_dcache(void);
+void resync_core_icache(void);
 #endif
 #endif
 
diff --git a/arch/blackfin/include/asm/cacheflush.h b/arch/blackfin/include/asm/cacheflush.h
index 94697f0..5c17dee 100644
--- a/arch/blackfin/include/asm/cacheflush.h
+++ b/arch/blackfin/include/asm/cacheflush.h
@@ -37,6 +37,7 @@
 extern void blackfin_dcache_invalidate_range(unsigned long start_address, unsigned long end_address);
 extern void blackfin_dflush_page(void *page);
 extern void blackfin_invalidate_entire_dcache(void);
+extern void blackfin_invalidate_entire_icache(void);
 
 #define flush_dcache_mmap_lock(mapping)		do { } while (0)
 #define flush_dcache_mmap_unlock(mapping)	do { } while (0)
@@ -97,7 +98,7 @@
 extern unsigned long reserved_mem_dcache_on;
 extern unsigned long reserved_mem_icache_on;
 
-static inline int bfin_addr_dcachable(unsigned long addr)
+static inline int bfin_addr_dcacheable(unsigned long addr)
 {
 #ifdef CONFIG_BFIN_DCACHE
 	if (addr < (_ramend - DMA_UNCACHED_REGION))
diff --git a/arch/blackfin/include/asm/cpu.h b/arch/blackfin/include/asm/cpu.h
index c2594ef..565b813 100644
--- a/arch/blackfin/include/asm/cpu.h
+++ b/arch/blackfin/include/asm/cpu.h
@@ -34,6 +34,7 @@
 	unsigned int dmemctl;
 	unsigned long loops_per_jiffy;
 	unsigned long dcache_invld_count;
+	unsigned long icache_invld_count;
 };
 
 DECLARE_PER_CPU(struct blackfin_cpudata, cpu_data);
diff --git a/arch/blackfin/include/asm/ftrace.h b/arch/blackfin/include/asm/ftrace.h
index 40a8c17..8643680 100644
--- a/arch/blackfin/include/asm/ftrace.h
+++ b/arch/blackfin/include/asm/ftrace.h
@@ -1 +1,13 @@
-/* empty */
+/*
+ * Blackfin ftrace code
+ *
+ * Copyright 2009 Analog Devices Inc.
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef __ASM_BFIN_FTRACE_H__
+#define __ASM_BFIN_FTRACE_H__
+
+#define MCOUNT_INSN_SIZE	8 /* sizeof mcount call: LINK + CALL */
+
+#endif
diff --git a/arch/blackfin/include/asm/ipipe.h b/arch/blackfin/include/asm/ipipe.h
index 51d0bf5..bbe1c37 100644
--- a/arch/blackfin/include/asm/ipipe.h
+++ b/arch/blackfin/include/asm/ipipe.h
@@ -35,10 +35,10 @@
 #include <asm/atomic.h>
 #include <asm/traps.h>
 
-#define IPIPE_ARCH_STRING     "1.9-01"
+#define IPIPE_ARCH_STRING     "1.10-00"
 #define IPIPE_MAJOR_NUMBER    1
-#define IPIPE_MINOR_NUMBER    9
-#define IPIPE_PATCH_NUMBER    1
+#define IPIPE_MINOR_NUMBER    10
+#define IPIPE_PATCH_NUMBER    0
 
 #ifdef CONFIG_SMP
 #error "I-pipe/blackfin: SMP not implemented"
@@ -54,10 +54,11 @@
 
 #define task_hijacked(p)						\
 	({								\
-		int __x__ = ipipe_current_domain != ipipe_root_domain;	\
-		/* We would need to clear the SYNC flag for the root domain */ \
-		/* over the current processor in SMP mode. */		\
-		local_irq_enable_hw(); __x__;				\
+		int __x__ = __ipipe_root_domain_p;			\
+		__clear_bit(IPIPE_SYNC_FLAG, &ipipe_root_cpudom_var(status)); \
+		if (__x__)						\
+			local_irq_enable_hw();				\
+		!__x__;							\
 	})
 
 struct ipipe_domain;
@@ -179,23 +180,24 @@
 
 #define __ipipe_run_isr(ipd, irq)					\
 	do {								\
-		if (ipd == ipipe_root_domain) {				\
+		if (!__ipipe_pipeline_head_p(ipd))			\
 			local_irq_enable_hw();				\
-			if (ipipe_virtual_irq_p(irq))			\
+		if (ipd == ipipe_root_domain) {				\
+			if (unlikely(ipipe_virtual_irq_p(irq))) {	\
+				irq_enter();				\
 				ipd->irqs[irq].handler(irq, ipd->irqs[irq].cookie); \
-			else						\
+				irq_exit();				\
+			} else 						\
 				ipd->irqs[irq].handler(irq, &__raw_get_cpu_var(__ipipe_tick_regs)); \
-			local_irq_disable_hw();				\
 		} else {						\
 			__clear_bit(IPIPE_SYNC_FLAG, &ipipe_cpudom_var(ipd, status)); \
-			local_irq_enable_nohead(ipd);			\
 			ipd->irqs[irq].handler(irq, ipd->irqs[irq].cookie); \
 			/* Attempt to exit the outer interrupt level before \
 			 * starting the deferred IRQ processing. */	\
-			local_irq_disable_nohead(ipd);			\
 			__ipipe_run_irqtail();				\
 			__set_bit(IPIPE_SYNC_FLAG, &ipipe_cpudom_var(ipd, status)); \
 		}							\
+		local_irq_disable_hw();					\
 	} while (0)
 
 #define __ipipe_syscall_watched_p(p, sc)	\
diff --git a/arch/blackfin/include/asm/irq.h b/arch/blackfin/include/asm/irq.h
index 7645e85..400bdd5 100644
--- a/arch/blackfin/include/asm/irq.h
+++ b/arch/blackfin/include/asm/irq.h
@@ -17,270 +17,17 @@
 #ifndef _BFIN_IRQ_H_
 #define _BFIN_IRQ_H_
 
-/* SYS_IRQS and NR_IRQS are defined in <mach-bf5xx/irq.h>*/
+#include <linux/irqflags.h>
+
+/* SYS_IRQS and NR_IRQS are defined in <mach-bf5xx/irq.h> */
 #include <mach/irq.h>
-#include <asm/pda.h>
-#include <asm/processor.h>
 
-#ifdef CONFIG_SMP
-/* Forward decl needed due to cdef inter dependencies */
-static inline uint32_t __pure bfin_dspid(void);
-# define blackfin_core_id() (bfin_dspid() & 0xff)
-# define bfin_irq_flags cpu_pda[blackfin_core_id()].imask
-#else
-extern unsigned long bfin_irq_flags;
-#endif
-
-#ifdef CONFIG_IPIPE
-
-#include <linux/ipipe_trace.h>
-
-void __ipipe_unstall_root(void);
-
-void __ipipe_restore_root(unsigned long flags);
-
-#ifdef CONFIG_DEBUG_HWERR
-# define __all_masked_irq_flags 0x3f
-# define __save_and_cli_hw(x) \
-	__asm__ __volatile__( \
-		"cli %0;" \
-		"sti %1;" \
-		: "=&d"(x) \
-		: "d" (0x3F) \
-	)
-#else
-# define __all_masked_irq_flags 0x1f
-# define __save_and_cli_hw(x) \
-	__asm__ __volatile__( \
-		"cli %0;" \
-		: "=&d"(x) \
-	)
-#endif
-
-#define irqs_enabled_from_flags_hw(x)	((x) != __all_masked_irq_flags)
-#define raw_irqs_disabled_flags(flags)	(!irqs_enabled_from_flags_hw(flags))
-#define local_test_iflag_hw(x)		irqs_enabled_from_flags_hw(x)
-
-#define local_save_flags(x)					 \
-	do {							 \
-		(x) = __ipipe_test_root() ?			 \
-			__all_masked_irq_flags : bfin_irq_flags; \
-		barrier();					 \
-	} while (0)
-
-#define local_irq_save(x)					 \
-	do {						 	 \
-		(x) = __ipipe_test_and_stall_root() ?		 \
-			__all_masked_irq_flags : bfin_irq_flags; \
-		barrier();					 \
-	} while (0)
-
-static inline void local_irq_restore(unsigned long x)
-{
-	barrier();
-	__ipipe_restore_root(x == __all_masked_irq_flags);
-}
-
-#define local_irq_disable()			\
-	do {					\
-		__ipipe_stall_root();		\
-		barrier();			\
-	} while (0)
-
-static inline void local_irq_enable(void)
-{
-	barrier();
-	__ipipe_unstall_root();
-}
-
-#define irqs_disabled()		__ipipe_test_root()
-
-#define local_save_flags_hw(x) \
-	__asm__ __volatile__( \
-		"cli %0;" \
-		"sti %0;" \
-		: "=d"(x) \
-	)
-
-#define	irqs_disabled_hw()				\
-	({						\
-		unsigned long flags;			\
-		local_save_flags_hw(flags);		\
-		!irqs_enabled_from_flags_hw(flags);	\
-	})
-
-static inline unsigned long raw_mangle_irq_bits(int virt, unsigned long real)
-{
-	/* Merge virtual and real interrupt mask bits into a single
-	   32bit word. */
-	return (real & ~(1 << 31)) | ((virt != 0) << 31);
-}
-
-static inline int raw_demangle_irq_bits(unsigned long *x)
-{
-	int virt = (*x & (1 << 31)) != 0;
-	*x &= ~(1L << 31);
-	return virt;
-}
-
-#ifdef CONFIG_IPIPE_TRACE_IRQSOFF
-
-#define local_irq_disable_hw()						\
-	do {								\
-		int _tmp_dummy;						\
-		if (!irqs_disabled_hw())				\
-			ipipe_trace_begin(0x80000000);			\
-		__asm__ __volatile__ ("cli %0;" : "=d" (_tmp_dummy) : );	\
-	} while (0)
-
-#define local_irq_enable_hw()						\
-	do {								\
-		if (irqs_disabled_hw())					\
-			ipipe_trace_end(0x80000000);			\
-		__asm__ __volatile__ ("sti %0;" : : "d"(bfin_irq_flags));	\
-	} while (0)
-
-#define local_irq_save_hw(x)				\
-	do {						\
-		__save_and_cli_hw(x);			\
-		if (local_test_iflag_hw(x))		\
-			ipipe_trace_begin(0x80000001);	\
-	} while (0)
-
-#define local_irq_restore_hw(x)				\
-	do {						\
-		if (local_test_iflag_hw(x)) {		\
-			ipipe_trace_end(0x80000001);	\
-			local_irq_enable_hw_notrace();	\
-		}					\
-	} while (0)
-
-#define local_irq_disable_hw_notrace()					\
-	do {								\
-		int _tmp_dummy;						\
-		__asm__ __volatile__ ("cli %0;" : "=d" (_tmp_dummy) : );	\
-	} while (0)
-
-#define local_irq_enable_hw_notrace() \
-	__asm__ __volatile__( \
-		"sti %0;" \
-		: \
-		: "d"(bfin_irq_flags) \
-	)
-
-#define local_irq_save_hw_notrace(x) __save_and_cli_hw(x)
-
-#define local_irq_restore_hw_notrace(x)			\
-	do {						\
-		if (local_test_iflag_hw(x))		\
-			local_irq_enable_hw_notrace();	\
-	} while (0)
-
-#else /* CONFIG_IPIPE_TRACE_IRQSOFF */
-
-#define local_irq_enable_hw() \
-	__asm__ __volatile__( \
-		"sti %0;" \
-		: \
-		: "d"(bfin_irq_flags) \
-	)
-
-#define local_irq_disable_hw()			\
-	do {					\
-		int _tmp_dummy;			\
-		__asm__ __volatile__ (		\
-			"cli %0;"		\
-			: "=d" (_tmp_dummy));	\
-	} while (0)
-
-#define local_irq_restore_hw(x) \
-	do { \
-		if (irqs_enabled_from_flags_hw(x)) \
-			local_irq_enable_hw(); \
-	} while (0)
-
-#define local_irq_save_hw(x)		__save_and_cli_hw(x)
-
-#define local_irq_disable_hw_notrace()	local_irq_disable_hw()
-#define local_irq_enable_hw_notrace()	local_irq_enable_hw()
-#define local_irq_save_hw_notrace(x)	local_irq_save_hw(x)
-#define local_irq_restore_hw_notrace(x)	local_irq_restore_hw(x)
-
-#endif  /* CONFIG_IPIPE_TRACE_IRQSOFF */
-
-#else /* !CONFIG_IPIPE */
-
-/*
- * Interrupt configuring macros.
- */
-#define local_irq_disable() \
-	do { \
-		int __tmp_dummy; \
-		__asm__ __volatile__( \
-			"cli %0;" \
-			: "=d" (__tmp_dummy) \
-		); \
-	} while (0)
-
-#define local_irq_enable() \
-	__asm__ __volatile__( \
-		"sti %0;" \
-		: \
-		: "d" (bfin_irq_flags) \
-	)
-
-#ifdef CONFIG_DEBUG_HWERR
-# define __save_and_cli(x) \
-	__asm__ __volatile__( \
-		"cli %0;" \
-		"sti %1;" \
-		: "=&d" (x) \
-		: "d" (0x3F) \
-	)
-#else
-# define __save_and_cli(x) \
-	__asm__ __volatile__( \
-		"cli %0;" \
-		: "=&d" (x) \
-	)
-#endif
-
-#define local_save_flags(x) \
-	__asm__ __volatile__( \
-		"cli %0;" \
-		"sti %0;" \
-		: "=d" (x) \
-	)
-
-#ifdef CONFIG_DEBUG_HWERR
-#define irqs_enabled_from_flags(x) (((x) & ~0x3f) != 0)
-#else
-#define irqs_enabled_from_flags(x) ((x) != 0x1f)
-#endif
-
-#define local_irq_restore(x) \
-	do { \
-		if (irqs_enabled_from_flags(x)) \
-			local_irq_enable(); \
-	} while (0)
-
-/* For spinlocks etc */
-#define local_irq_save(x) __save_and_cli(x)
-
-#define irqs_disabled()				\
-({						\
-	unsigned long flags;			\
-	local_save_flags(flags);		\
-	!irqs_enabled_from_flags(flags);	\
-})
-
-#define local_irq_save_hw(x)		local_irq_save(x)
-#define local_irq_restore_hw(x)		local_irq_restore(x)
-#define local_irq_enable_hw()		local_irq_enable()
-#define local_irq_disable_hw()		local_irq_disable()
-#define irqs_disabled_hw()		irqs_disabled()
-
-#endif /* !CONFIG_IPIPE */
+/* Xenomai IPIPE helpers */
+#define local_irq_restore_hw(x) local_irq_restore(x)
+#define local_irq_save_hw(x)    local_irq_save(x)
+#define local_irq_enable_hw(x)  local_irq_enable(x)
+#define local_irq_disable_hw(x) local_irq_disable(x)
+#define irqs_disabled_hw(x)     irqs_disabled(x)
 
 #if ANOMALY_05000244 && defined(CONFIG_BFIN_ICACHE)
 # define NOP_PAD_ANOMALY_05000244 "nop; nop;"
diff --git a/arch/blackfin/include/asm/irqflags.h b/arch/blackfin/include/asm/irqflags.h
new file mode 100644
index 0000000..139cba4
--- /dev/null
+++ b/arch/blackfin/include/asm/irqflags.h
@@ -0,0 +1,63 @@
+/*
+ * interface to Blackfin CEC
+ *
+ * Copyright 2009 Analog Devices Inc.
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef __ASM_BFIN_IRQFLAGS_H__
+#define __ASM_BFIN_IRQFLAGS_H__
+
+#ifdef CONFIG_SMP
+# include <asm/pda.h>
+# include <asm/processor.h>
+/* Forward decl needed due to cdef inter dependencies */
+static inline uint32_t __pure bfin_dspid(void);
+# define blackfin_core_id() (bfin_dspid() & 0xff)
+# define bfin_irq_flags cpu_pda[blackfin_core_id()].imask
+#else
+extern unsigned long bfin_irq_flags;
+#endif
+
+static inline void bfin_sti(unsigned long flags)
+{
+	asm volatile("sti %0;" : : "d" (flags));
+}
+
+static inline unsigned long bfin_cli(void)
+{
+	unsigned long flags;
+	asm volatile("cli %0;" : "=d" (flags));
+	return flags;
+}
+
+static inline void raw_local_irq_disable(void)
+{
+	bfin_cli();
+}
+static inline void raw_local_irq_enable(void)
+{
+	bfin_sti(bfin_irq_flags);
+}
+
+#define raw_local_save_flags(flags) do { (flags) = bfin_read_IMASK(); } while (0)
+
+#define raw_irqs_disabled_flags(flags) (((flags) & ~0x3f) == 0)
+
+static inline void raw_local_irq_restore(unsigned long flags)
+{
+	if (!raw_irqs_disabled_flags(flags))
+		raw_local_irq_enable();
+}
+
+static inline unsigned long __raw_local_irq_save(void)
+{
+	unsigned long flags = bfin_cli();
+#ifdef CONFIG_DEBUG_HWERR
+	bfin_sti(0x3f);
+#endif
+	return flags;
+}
+#define raw_local_irq_save(flags) do { (flags) = __raw_local_irq_save(); } while (0)
+
+#endif
diff --git a/arch/blackfin/include/asm/mutex-dec.h b/arch/blackfin/include/asm/mutex-dec.h
deleted file mode 100644
index 0134151..0000000
--- a/arch/blackfin/include/asm/mutex-dec.h
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- * include/asm-generic/mutex-dec.h
- *
- * Generic implementation of the mutex fastpath, based on atomic
- * decrement/increment.
- */
-#ifndef _ASM_GENERIC_MUTEX_DEC_H
-#define _ASM_GENERIC_MUTEX_DEC_H
-
-/**
- *  __mutex_fastpath_lock - try to take the lock by moving the count
- *                          from 1 to a 0 value
- *  @count: pointer of type atomic_t
- *  @fail_fn: function to call if the original value was not 1
- *
- * Change the count from 1 to a value lower than 1, and call <fail_fn> if
- * it wasn't 1 originally. This function MUST leave the value lower than
- * 1 even when the "1" assertion wasn't true.
- */
-static inline void
-__mutex_fastpath_lock(atomic_t *count, fastcall void (*fail_fn)(atomic_t *))
-{
-	if (unlikely(atomic_dec_return(count) < 0))
-		fail_fn(count);
-	else
-		smp_mb();
-}
-
-/**
- *  __mutex_fastpath_lock_retval - try to take the lock by moving the count
- *                                 from 1 to a 0 value
- *  @count: pointer of type atomic_t
- *  @fail_fn: function to call if the original value was not 1
- *
- * Change the count from 1 to a value lower than 1, and call <fail_fn> if
- * it wasn't 1 originally. This function returns 0 if the fastpath succeeds,
- * or anything the slow path function returns.
- */
-static inline int
-__mutex_fastpath_lock_retval(atomic_t *count, fastcall int (*fail_fn)(atomic_t *))
-{
-	if (unlikely(atomic_dec_return(count) < 0))
-		return fail_fn(count);
-	else {
-		smp_mb();
-		return 0;
-	}
-}
-
-/**
- *  __mutex_fastpath_unlock - try to promote the count from 0 to 1
- *  @count: pointer of type atomic_t
- *  @fail_fn: function to call if the original value was not 0
- *
- * Try to promote the count from 0 to 1. If it wasn't 0, call <fail_fn>.
- * In the failure case, this function is allowed to either set the value to
- * 1, or to set it to a value lower than 1.
- *
- * If the implementation sets it to a value of lower than 1, then the
- * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs
- * to return 0 otherwise.
- */
-static inline void
-__mutex_fastpath_unlock(atomic_t *count, fastcall void (*fail_fn)(atomic_t *))
-{
-	smp_mb();
-	if (unlikely(atomic_inc_return(count) <= 0))
-		fail_fn(count);
-}
-
-#define __mutex_slowpath_needs_to_unlock()		1
-
-/**
- * __mutex_fastpath_trylock - try to acquire the mutex, without waiting
- *
- *  @count: pointer of type atomic_t
- *  @fail_fn: fallback function
- *
- * Change the count from 1 to a value lower than 1, and return 0 (failure)
- * if it wasn't 1 originally, or return 1 (success) otherwise. This function
- * MUST leave the value lower than 1 even when the "1" assertion wasn't true.
- * Additionally, if the value was < 0 originally, this function must not leave
- * it to 0 on failure.
- *
- * If the architecture has no effective trylock variant, it should call the
- * <fail_fn> spinlock-based trylock variant unconditionally.
- */
-static inline int
-__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
-{
-	/*
-	 * We have two variants here. The cmpxchg based one is the best one
-	 * because it never induce a false contention state.  It is included
-	 * here because architectures using the inc/dec algorithms over the
-	 * xchg ones are much more likely to support cmpxchg natively.
-	 *
-	 * If not we fall back to the spinlock based variant - that is
-	 * just as efficient (and simpler) as a 'destructive' probing of
-	 * the mutex state would be.
-	 */
-#ifdef __HAVE_ARCH_CMPXCHG
-	if (likely(atomic_cmpxchg(count, 1, 0) == 1)) {
-		smp_mb();
-		return 1;
-	}
-	return 0;
-#else
-	return fail_fn(count);
-#endif
-}
-
-#endif
diff --git a/arch/blackfin/include/asm/sections.h b/arch/blackfin/include/asm/sections.h
index 1443c33..e7fd0ec 100644
--- a/arch/blackfin/include/asm/sections.h
+++ b/arch/blackfin/include/asm/sections.h
@@ -4,4 +4,15 @@
 /* nothing to see, move along */
 #include <asm-generic/sections.h>
 
+/* only used when MTD_UCLINUX */
+extern unsigned long memory_mtd_start, memory_mtd_end, mtd_size;
+
+extern unsigned long _ramstart, _ramend, _rambase;
+extern unsigned long memory_start, memory_end, physical_mem_end;
+
+extern char _stext_l1[], _etext_l1[], _sdata_l1[], _edata_l1[], _sbss_l1[],
+	_ebss_l1[], _l1_lma_start[], _sdata_b_l1[], _sbss_b_l1[], _ebss_b_l1[],
+	_stext_l2[], _etext_l2[], _sdata_l2[], _edata_l2[], _sbss_l2[],
+	_ebss_l2[], _l2_lma_start[];
+
 #endif
diff --git a/arch/blackfin/include/asm/system.h b/arch/blackfin/include/asm/system.h
index a4c8254..294dbda 100644
--- a/arch/blackfin/include/asm/system.h
+++ b/arch/blackfin/include/asm/system.h
@@ -35,10 +35,10 @@
 #define _BLACKFIN_SYSTEM_H
 
 #include <linux/linkage.h>
-#include <linux/compiler.h>
+#include <linux/irqflags.h>
 #include <mach/anomaly.h>
+#include <asm/cache.h>
 #include <asm/pda.h>
-#include <asm/processor.h>
 #include <asm/irq.h>
 
 /*
diff --git a/arch/blackfin/include/asm/unistd.h b/arch/blackfin/include/asm/unistd.h
index cf5066d..da35133 100644
--- a/arch/blackfin/include/asm/unistd.h
+++ b/arch/blackfin/include/asm/unistd.h
@@ -380,8 +380,9 @@
 #define __NR_inotify_init1	365
 #define __NR_preadv		366
 #define __NR_pwritev		367
+#define __NR_rt_tgsigqueueinfo	368
 
-#define __NR_syscall		368
+#define __NR_syscall		369
 #define NR_syscalls		__NR_syscall
 
 /* Old optional stuff no one actually uses */
diff --git a/arch/blackfin/kernel/Makefile b/arch/blackfin/kernel/Makefile
index fd4d432..3731088 100644
--- a/arch/blackfin/kernel/Makefile
+++ b/arch/blackfin/kernel/Makefile
@@ -15,6 +15,10 @@
     obj-y += time.o
 endif
 
+obj-$(CONFIG_FUNCTION_TRACER)        += ftrace-entry.o
+obj-$(CONFIG_FUNCTION_GRAPH_TRACER)  += ftrace.o
+CFLAGS_REMOVE_ftrace.o = -pg
+
 obj-$(CONFIG_IPIPE)                  += ipipe.o
 obj-$(CONFIG_IPIPE_TRACE_MCOUNT)     += mcount.o
 obj-$(CONFIG_BFIN_GPTIMERS)          += gptimers.o
@@ -23,6 +27,7 @@
 obj-$(CONFIG_KGDB)                   += kgdb.o
 obj-$(CONFIG_KGDB_TESTS)             += kgdb_test.o
 obj-$(CONFIG_EARLY_PRINTK)           += early_printk.o
+obj-$(CONFIG_STACKTRACE)             += stacktrace.o
 
 # the kgdb test puts code into L2 and without linker
 # relaxation, we need to force long calls to/from it
diff --git a/arch/blackfin/kernel/bfin_dma_5xx.c b/arch/blackfin/kernel/bfin_dma_5xx.c
index 763ed84..e0bf8cc 100644
--- a/arch/blackfin/kernel/bfin_dma_5xx.c
+++ b/arch/blackfin/kernel/bfin_dma_5xx.c
@@ -453,10 +453,10 @@
 	unsigned long src = (unsigned long)psrc;
 	size_t bulk, rest;
 
-	if (bfin_addr_dcachable(src))
+	if (bfin_addr_dcacheable(src))
 		blackfin_dcache_flush_range(src, src + size);
 
-	if (bfin_addr_dcachable(dst))
+	if (bfin_addr_dcacheable(dst))
 		blackfin_dcache_invalidate_range(dst, dst + size);
 
 	bulk = size & ~0xffff;
diff --git a/arch/blackfin/kernel/bfin_ksyms.c b/arch/blackfin/kernel/bfin_ksyms.c
index 53e893f..aa05e63 100644
--- a/arch/blackfin/kernel/bfin_ksyms.c
+++ b/arch/blackfin/kernel/bfin_ksyms.c
@@ -103,3 +103,8 @@
 EXPORT_SYMBOL(__raw_smp_check_barrier_asm);
 #endif
 #endif
+
+#ifdef CONFIG_FUNCTION_TRACER
+extern void _mcount(void);
+EXPORT_SYMBOL(_mcount);
+#endif
diff --git a/arch/blackfin/kernel/cplb-mpu/cplbmgr.c b/arch/blackfin/kernel/cplb-mpu/cplbmgr.c
index 87463ce..784923e 100644
--- a/arch/blackfin/kernel/cplb-mpu/cplbmgr.c
+++ b/arch/blackfin/kernel/cplb-mpu/cplbmgr.c
@@ -151,7 +151,7 @@
 
 	d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB;
 #ifdef CONFIG_BFIN_DCACHE
-	if (bfin_addr_dcachable(addr)) {
+	if (bfin_addr_dcacheable(addr)) {
 		d_data |= CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND;
 #ifdef CONFIG_BFIN_WT
 		d_data |= CPLB_L1_AOW | CPLB_WT;
diff --git a/arch/blackfin/kernel/cplb-nompu/cplbmgr.c b/arch/blackfin/kernel/cplb-nompu/cplbmgr.c
index 8cbb47c..12b0308 100644
--- a/arch/blackfin/kernel/cplb-nompu/cplbmgr.c
+++ b/arch/blackfin/kernel/cplb-nompu/cplbmgr.c
@@ -28,6 +28,7 @@
 #include <asm/cplbinit.h>
 #include <asm/cplb.h>
 #include <asm/mmu_context.h>
+#include <asm/traps.h>
 
 /*
  * WARNING
@@ -100,28 +101,6 @@
 #endif
 }
 
-/*
- * Given the contents of the status register, return the index of the
- * CPLB that caused the fault.
- */
-static inline int faulting_cplb_index(int status)
-{
-	int signbits = __builtin_bfin_norm_fr1x32(status & 0xFFFF);
-	return 30 - signbits;
-}
-
-/*
- * Given the contents of the status register and the DCPLB_DATA contents,
- * return true if a write access should be permitted.
- */
-static inline int write_permitted(int status, unsigned long data)
-{
-	if (status & FAULT_USERSUPV)
-		return !!(data & CPLB_SUPV_WR);
-	else
-		return !!(data & CPLB_USER_WR);
-}
-
 /* Counters to implement round-robin replacement.  */
 static int icplb_rr_index[NR_CPUS] PDT_ATTR;
 static int dcplb_rr_index[NR_CPUS] PDT_ATTR;
@@ -245,43 +224,16 @@
 	return CPLB_RELOADED;
 }
 
-MGR_ATTR static noinline int dcplb_protection_fault(int cpu)
-{
-	int status = bfin_read_DCPLB_STATUS();
-
-	nr_dcplb_prot[cpu]++;
-
-	if (likely(status & FAULT_RW)) {
-		int idx = faulting_cplb_index(status);
-		unsigned long regaddr = DCPLB_DATA0 + idx * 4;
-		unsigned long data = bfin_read32(regaddr);
-
-		/* Check if fault is to dirty a clean page */
-		if (!(data & CPLB_WT) && !(data & CPLB_DIRTY) &&
-		    write_permitted(status, data)) {
-
-			dcplb_tbl[cpu][idx].data = data;
-			bfin_write32(regaddr, data);
-			return CPLB_RELOADED;
-		}
-	}
-
-	return CPLB_PROT_VIOL;
-}
-
 MGR_ATTR int cplb_hdr(int seqstat, struct pt_regs *regs)
 {
 	int cause = seqstat & 0x3f;
 	unsigned int cpu = smp_processor_id();
 	switch (cause) {
-	case 0x2C:
+	case VEC_CPLB_I_M:
 		return icplb_miss(cpu);
-	case 0x26:
+	case VEC_CPLB_M:
 		return dcplb_miss(cpu);
 	default:
-		if (unlikely(cause == 0x23))
-			return dcplb_protection_fault(cpu);
-
 		return CPLB_UNKNOWN_ERR;
 	}
 }
diff --git a/arch/blackfin/kernel/early_printk.c b/arch/blackfin/kernel/early_printk.c
index 3302719..2ab5681 100644
--- a/arch/blackfin/kernel/early_printk.c
+++ b/arch/blackfin/kernel/early_printk.c
@@ -202,11 +202,15 @@
 asmlinkage void __init early_trap_c(struct pt_regs *fp, void *retaddr)
 {
 	/* This can happen before the uart is initialized, so initialize
-	 * the UART now
+	 * the UART now (but only if we are running on the processor we think
+	 * we are compiled for - otherwise we write to MMRs that don't exist,
+	 * and cause other problems. Nothing comes out the UART, but it does
+	 * end up in the __buf_log.
 	 */
-	if (likely(early_console == NULL))
+	if (likely(early_console == NULL) && CPUID == bfin_cpuid())
 		setup_early_printk(DEFAULT_EARLY_PORT);
 
+	printk(KERN_EMERG "Early panic\n");
 	dump_bfin_mem(fp);
 	show_regs(fp);
 	dump_bfin_trace_buffer();
diff --git a/arch/blackfin/kernel/ftrace-entry.S b/arch/blackfin/kernel/ftrace-entry.S
new file mode 100644
index 0000000..6980b7a
--- /dev/null
+++ b/arch/blackfin/kernel/ftrace-entry.S
@@ -0,0 +1,140 @@
+/*
+ * mcount and friends -- ftrace stuff
+ *
+ * Copyright (C) 2009 Analog Devices Inc.
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/linkage.h>
+#include <asm/ftrace.h>
+
+.text
+
+/* GCC will have called us before setting up the function prologue, so we
+ * can clobber the normal scratch registers, but we need to make sure to
+ * save/restore the registers used for argument passing (R0-R2) in case
+ * the profiled function is using them.  With data registers, R3 is the
+ * only one we can blow away.  With pointer registers, we have P0-P2.
+ *
+ * Upon entry, the RETS will point to the top of the current profiled
+ * function.  And since GCC setup the frame for us, the previous function
+ * will be waiting there.  mmmm pie.
+ */
+ENTRY(__mcount)
+	/* save third function arg early so we can do testing below */
+	[--sp] = r2;
+
+	/* load the function pointer to the tracer */
+	p0.l = _ftrace_trace_function;
+	p0.h = _ftrace_trace_function;
+	r3 = [p0];
+
+	/* optional micro optimization: don't call the stub tracer */
+	r2.l = _ftrace_stub;
+	r2.h = _ftrace_stub;
+	cc = r2 == r3;
+	if ! cc jump .Ldo_trace;
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+	/* if the ftrace_graph_return function pointer is not set to
+	 * the ftrace_stub entry, call prepare_ftrace_return().
+	 */
+	p0.l = _ftrace_graph_return;
+	p0.h = _ftrace_graph_return;
+	r3 = [p0];
+	cc = r2 == r3;
+	if ! cc jump _ftrace_graph_caller;
+
+	/* similarly, if the ftrace_graph_entry function pointer is not
+	 * set to the ftrace_graph_entry_stub entry, ...
+	 */
+	p0.l = _ftrace_graph_entry;
+	p0.h = _ftrace_graph_entry;
+	r2.l = _ftrace_graph_entry_stub;
+	r2.h = _ftrace_graph_entry_stub;
+	r3 = [p0];
+	cc = r2 == r3;
+	if ! cc jump _ftrace_graph_caller;
+#endif
+
+	r2 = [sp++];
+	rts;
+
+.Ldo_trace:
+
+	/* save first/second function arg and the return register */
+	[--sp] = r0;
+	[--sp] = r1;
+	[--sp] = rets;
+
+	/* setup the tracer function */
+	p0 = r3;
+
+	/* tracer(ulong frompc, ulong selfpc):
+	 *  frompc: the pc that did the call to ...
+	 *  selfpc: ... this location
+	 * the selfpc itself will need adjusting for the mcount call
+	 */
+	r1 = rets;
+	r0 = [fp + 4];
+	r1 += -MCOUNT_INSN_SIZE;
+
+	/* call the tracer */
+	call (p0);
+
+	/* restore state and get out of dodge */
+.Lfinish_trace:
+	rets = [sp++];
+	r1 = [sp++];
+	r0 = [sp++];
+	r2 = [sp++];
+
+.globl _ftrace_stub
+_ftrace_stub:
+	rts;
+ENDPROC(__mcount)
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+/* The prepare_ftrace_return() function is similar to the trace function
+ * except it takes a pointer to the location of the frompc.  This is so
+ * the prepare_ftrace_return() can hijack it temporarily for probing
+ * purposes.
+ */
+ENTRY(_ftrace_graph_caller)
+	/* save first/second function arg and the return register */
+	[--sp] = r0;
+	[--sp] = r1;
+	[--sp] = rets;
+
+	r0 = fp;
+	r1 = rets;
+	r0 += 4;
+	r1 += -MCOUNT_INSN_SIZE;
+	call _prepare_ftrace_return;
+
+	jump .Lfinish_trace;
+ENDPROC(_ftrace_graph_caller)
+
+/* Undo the rewrite caused by ftrace_graph_caller().  The common function
+ * ftrace_return_to_handler() will return the original rets so we can
+ * restore it and be on our way.
+ */
+ENTRY(_return_to_handler)
+	/* make sure original return values are saved */
+	[--sp] = p0;
+	[--sp] = r0;
+	[--sp] = r1;
+
+	/* get original return address */
+	call _ftrace_return_to_handler;
+	rets = r0;
+
+	/* anomaly 05000371 - make sure we have at least three instructions
+	 * between rets setting and the return
+	 */
+	r1 = [sp++];
+	r0 = [sp++];
+	p0 = [sp++];
+	rts;
+ENDPROC(_return_to_handler)
+#endif
diff --git a/arch/blackfin/kernel/ftrace.c b/arch/blackfin/kernel/ftrace.c
new file mode 100644
index 0000000..905bfc4
--- /dev/null
+++ b/arch/blackfin/kernel/ftrace.c
@@ -0,0 +1,42 @@
+/*
+ * ftrace graph code
+ *
+ * Copyright (C) 2009 Analog Devices Inc.
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/ftrace.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <asm/atomic.h>
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+
+/*
+ * Hook the return address and push it in the stack of return addrs
+ * in current thread info.
+ */
+void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
+{
+	struct ftrace_graph_ent trace;
+	unsigned long return_hooker = (unsigned long)&return_to_handler;
+
+	if (unlikely(atomic_read(&current->tracing_graph_pause)))
+		return;
+
+	if (ftrace_push_return_trace(*parent, self_addr, &trace.depth) == -EBUSY)
+		return;
+
+	trace.func = self_addr;
+
+	/* Only trace if the calling function expects to */
+	if (!ftrace_graph_entry(&trace)) {
+		current->curr_ret_stack--;
+		return;
+	}
+
+	/* all is well in the world !  hijack RETS ... */
+	*parent = return_hooker;
+}
+
+#endif
diff --git a/arch/blackfin/kernel/ipipe.c b/arch/blackfin/kernel/ipipe.c
index 5fc4248..d8cde1f 100644
--- a/arch/blackfin/kernel/ipipe.c
+++ b/arch/blackfin/kernel/ipipe.c
@@ -99,7 +99,7 @@
 	 * interrupt.
 	 */
 	m_ack = (regs == NULL || irq == IRQ_SYSTMR || irq == IRQ_CORETMR);
-	this_domain = ipipe_current_domain;
+	this_domain = __ipipe_current_domain;
 
 	if (unlikely(test_bit(IPIPE_STICKY_FLAG, &this_domain->irqs[irq].control)))
 		head = &this_domain->p_link;
@@ -212,7 +212,9 @@
 
 int __ipipe_syscall_root(struct pt_regs *regs)
 {
+	struct ipipe_percpu_domain_data *p;
 	unsigned long flags;
+	int ret;
 
 	/*
 	 * We need to run the IRQ tail hook whenever we don't
@@ -231,29 +233,31 @@
 	/*
 	 * This routine either returns:
 	 * 0 -- if the syscall is to be passed to Linux;
-	 * 1 -- if the syscall should not be passed to Linux, and no
+	 * >0 -- if the syscall should not be passed to Linux, and no
 	 * tail work should be performed;
-	 * -1 -- if the syscall should not be passed to Linux but the
+	 * <0 -- if the syscall should not be passed to Linux but the
 	 * tail work has to be performed (for handling signals etc).
 	 */
 
-	if (__ipipe_event_monitored_p(IPIPE_EVENT_SYSCALL) &&
-	    __ipipe_dispatch_event(IPIPE_EVENT_SYSCALL, regs) > 0) {
-		if (ipipe_root_domain_p && !in_atomic()) {
-			/*
-			 * Sync pending VIRQs before _TIF_NEED_RESCHED
-			 * is tested.
-			 */
-			local_irq_save_hw(flags);
-			if ((ipipe_root_cpudom_var(irqpend_himask) & IPIPE_IRQMASK_VIRT) != 0)
-				__ipipe_sync_pipeline(IPIPE_IRQMASK_VIRT);
-			local_irq_restore_hw(flags);
-			return -1;
-		}
+	if (!__ipipe_event_monitored_p(IPIPE_EVENT_SYSCALL))
+		return 0;
+
+	ret = __ipipe_dispatch_event(IPIPE_EVENT_SYSCALL, regs);
+
+	local_irq_save_hw(flags);
+
+	if (!__ipipe_root_domain_p) {
+		local_irq_restore_hw(flags);
 		return 1;
 	}
 
-	return 0;
+	p = ipipe_root_cpudom_ptr();
+	if ((p->irqpend_himask & IPIPE_IRQMASK_VIRT) != 0)
+		__ipipe_sync_pipeline(IPIPE_IRQMASK_VIRT);
+
+	local_irq_restore_hw(flags);
+
+	return -ret;
 }
 
 unsigned long ipipe_critical_enter(void (*syncfn) (void))
@@ -329,9 +333,7 @@
 
 void ___ipipe_sync_pipeline(unsigned long syncmask)
 {
-	struct ipipe_domain *ipd = ipipe_current_domain;
-
-	if (ipd == ipipe_root_domain) {
+	if (__ipipe_root_domain_p) {
 		if (test_bit(IPIPE_SYNCDEFER_FLAG, &ipipe_root_cpudom_var(status)))
 			return;
 	}
diff --git a/arch/blackfin/kernel/setup.c b/arch/blackfin/kernel/setup.c
index 80447f9..6454bab 100644
--- a/arch/blackfin/kernel/setup.c
+++ b/arch/blackfin/kernel/setup.c
@@ -1098,7 +1098,7 @@
 			CPUID, bfin_cpuid());
 
 	seq_printf(m, "model name\t: ADSP-%s %lu(MHz CCLK) %lu(MHz SCLK) (%s)\n"
-		"stepping\t: %d\n",
+		"stepping\t: %d ",
 		cpu, cclk/1000000, sclk/1000000,
 #ifdef CONFIG_MPU
 		"mpu on",
@@ -1107,7 +1107,16 @@
 #endif
 		revid);
 
-	seq_printf(m, "cpu MHz\t\t: %lu.%03lu/%lu.%03lu\n",
+	if (bfin_revid() != bfin_compiled_revid()) {
+		if (bfin_compiled_revid() == -1)
+			seq_printf(m, "(Compiled for Rev none)");
+		else if (bfin_compiled_revid() == 0xffff)
+			seq_printf(m, "(Compiled for Rev any)");
+		else
+			seq_printf(m, "(Compiled for Rev %d)", bfin_compiled_revid());
+	}
+
+	seq_printf(m, "\ncpu MHz\t\t: %lu.%03lu/%lu.%03lu\n",
 		cclk/1000000, cclk%1000000,
 		sclk/1000000, sclk%1000000);
 	seq_printf(m, "bogomips\t: %lu.%02lu\n"
@@ -1172,6 +1181,9 @@
 #ifdef __ARCH_SYNC_CORE_DCACHE
 	seq_printf(m, "SMP Dcache Flushes\t: %lu\n\n", cpudata->dcache_invld_count);
 #endif
+#ifdef __ARCH_SYNC_CORE_ICACHE
+	seq_printf(m, "SMP Icache Flushes\t: %lu\n\n", cpudata->icache_invld_count);
+#endif
 #ifdef CONFIG_BFIN_ICACHE_LOCK
 	switch ((cpudata->imemctl >> 3) & WAYALL_L) {
 	case WAY0_L:
diff --git a/arch/blackfin/kernel/stacktrace.c b/arch/blackfin/kernel/stacktrace.c
new file mode 100644
index 0000000..30301e1
--- /dev/null
+++ b/arch/blackfin/kernel/stacktrace.c
@@ -0,0 +1,53 @@
+/*
+ * Blackfin stacktrace code (mostly copied from avr32)
+ *
+ * Copyright 2009 Analog Devices Inc.
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/sched.h>
+#include <linux/stacktrace.h>
+#include <linux/thread_info.h>
+#include <linux/module.h>
+
+register unsigned long current_frame_pointer asm("FP");
+
+struct stackframe {
+	unsigned long fp;
+	unsigned long rets;
+};
+
+/*
+ * Save stack-backtrace addresses into a stack_trace buffer.
+ */
+void save_stack_trace(struct stack_trace *trace)
+{
+	unsigned long low, high;
+	unsigned long fp;
+	struct stackframe *frame;
+	int skip = trace->skip;
+
+	low = (unsigned long)task_stack_page(current);
+	high = low + THREAD_SIZE;
+	fp = current_frame_pointer;
+
+	while (fp >= low && fp <= (high - sizeof(*frame))) {
+		frame = (struct stackframe *)fp;
+
+		if (skip) {
+			skip--;
+		} else {
+			trace->entries[trace->nr_entries++] = frame->rets;
+			if (trace->nr_entries >= trace->max_entries)
+				break;
+		}
+
+		/*
+		 * The next frame must be at a higher address than the
+		 * current frame.
+		 */
+		low = fp + sizeof(*frame);
+		fp = frame->fp;
+	}
+}
+EXPORT_SYMBOL_GPL(save_stack_trace);
diff --git a/arch/blackfin/kernel/traps.c b/arch/blackfin/kernel/traps.c
index aa76dfb..d279552 100644
--- a/arch/blackfin/kernel/traps.c
+++ b/arch/blackfin/kernel/traps.c
@@ -27,6 +27,7 @@
  * 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 
+#include <linux/bug.h>
 #include <linux/uaccess.h>
 #include <linux/interrupt.h>
 #include <linux/module.h>
@@ -238,6 +239,11 @@
 
 }
 
+static int kernel_mode_regs(struct pt_regs *regs)
+{
+	return regs->ipend & 0xffc0;
+}
+
 asmlinkage void trap_c(struct pt_regs *fp)
 {
 #ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON
@@ -246,6 +252,7 @@
 #ifdef CONFIG_DEBUG_HUNT_FOR_ZERO
 	unsigned int cpu = smp_processor_id();
 #endif
+	const char *strerror = NULL;
 	int sig = 0;
 	siginfo_t info;
 	unsigned long trapnr = fp->seqstat & SEQSTAT_EXCAUSE;
@@ -259,27 +266,10 @@
 	 * double faults if the stack has become corrupt
 	 */
 
-	/* If the fault was caused by a kernel thread, or interrupt handler
-	 * we will kernel panic, so the system reboots.
-	 * If KGDB is enabled, don't set this for kernel breakpoints
-	*/
-
-	/* TODO: check to see if we are in some sort of deferred HWERR
-	 * that we should be able to recover from, not kernel panic
-	 */
-	if ((bfin_read_IPEND() & 0xFFC0) && (trapnr != VEC_STEP)
-#ifdef CONFIG_KGDB
-		&& (trapnr != VEC_EXCPT02)
+#ifndef CONFIG_KGDB
+	/* IPEND is skipped if KGDB isn't enabled (see entry code) */
+	fp->ipend = bfin_read_IPEND();
 #endif
-	){
-		console_verbose();
-		oops_in_progress = 1;
-	} else if (current) {
-		if (current->mm == NULL) {
-			console_verbose();
-			oops_in_progress = 1;
-		}
-	}
 
 	/* trap_c() will be called for exceptions. During exceptions
 	 * processing, the pc value should be set with retx value.
@@ -307,15 +297,15 @@
 		sig = SIGTRAP;
 		CHK_DEBUGGER_TRAP_MAYBE();
 		/* Check if this is a breakpoint in kernel space */
-		if (fp->ipend & 0xffc0)
-			return;
+		if (kernel_mode_regs(fp))
+			goto traps_done;
 		else
 			break;
 	/* 0x03 - User Defined, userspace stack overflow */
 	case VEC_EXCPT03:
 		info.si_code = SEGV_STACKFLOW;
 		sig = SIGSEGV;
-		verbose_printk(KERN_NOTICE EXC_0x03(KERN_NOTICE));
+		strerror = KERN_NOTICE EXC_0x03(KERN_NOTICE);
 		CHK_DEBUGGER_TRAP_MAYBE();
 		break;
 	/* 0x02 - KGDB initial connection and break signal trap */
@@ -324,7 +314,7 @@
 		info.si_code = TRAP_ILLTRAP;
 		sig = SIGTRAP;
 		CHK_DEBUGGER_TRAP();
-		return;
+		goto traps_done;
 #endif
 	/* 0x04 - User Defined */
 	/* 0x05 - User Defined */
@@ -344,7 +334,7 @@
 	case VEC_EXCPT04 ... VEC_EXCPT15:
 		info.si_code = ILL_ILLPARAOP;
 		sig = SIGILL;
-		verbose_printk(KERN_NOTICE EXC_0x04(KERN_NOTICE));
+		strerror = KERN_NOTICE EXC_0x04(KERN_NOTICE);
 		CHK_DEBUGGER_TRAP_MAYBE();
 		break;
 	/* 0x10 HW Single step, handled here */
@@ -353,15 +343,15 @@
 		sig = SIGTRAP;
 		CHK_DEBUGGER_TRAP_MAYBE();
 		/* Check if this is a single step in kernel space */
-		if (fp->ipend & 0xffc0)
-			return;
+		if (kernel_mode_regs(fp))
+			goto traps_done;
 		else
 			break;
 	/* 0x11 - Trace Buffer Full, handled here */
 	case VEC_OVFLOW:
 		info.si_code = TRAP_TRACEFLOW;
 		sig = SIGTRAP;
-		verbose_printk(KERN_NOTICE EXC_0x11(KERN_NOTICE));
+		strerror = KERN_NOTICE EXC_0x11(KERN_NOTICE);
 		CHK_DEBUGGER_TRAP_MAYBE();
 		break;
 	/* 0x12 - Reserved, Caught by default */
@@ -381,37 +371,54 @@
 	/* 0x20 - Reserved, Caught by default */
 	/* 0x21 - Undefined Instruction, handled here */
 	case VEC_UNDEF_I:
+#ifdef CONFIG_BUG
+		if (kernel_mode_regs(fp)) {
+			switch (report_bug(fp->pc, fp)) {
+			case BUG_TRAP_TYPE_NONE:
+				break;
+			case BUG_TRAP_TYPE_WARN:
+				dump_bfin_trace_buffer();
+				fp->pc += 2;
+				goto traps_done;
+			case BUG_TRAP_TYPE_BUG:
+				/* call to panic() will dump trace, and it is
+				 * off at this point, so it won't be clobbered
+				 */
+				panic("BUG()");
+			}
+		}
+#endif
 		info.si_code = ILL_ILLOPC;
 		sig = SIGILL;
-		verbose_printk(KERN_NOTICE EXC_0x21(KERN_NOTICE));
+		strerror = KERN_NOTICE EXC_0x21(KERN_NOTICE);
 		CHK_DEBUGGER_TRAP_MAYBE();
 		break;
 	/* 0x22 - Illegal Instruction Combination, handled here */
 	case VEC_ILGAL_I:
 		info.si_code = ILL_ILLPARAOP;
 		sig = SIGILL;
-		verbose_printk(KERN_NOTICE EXC_0x22(KERN_NOTICE));
+		strerror = KERN_NOTICE EXC_0x22(KERN_NOTICE);
 		CHK_DEBUGGER_TRAP_MAYBE();
 		break;
 	/* 0x23 - Data CPLB protection violation, handled here */
 	case VEC_CPLB_VL:
 		info.si_code = ILL_CPLB_VI;
 		sig = SIGBUS;
-		verbose_printk(KERN_NOTICE EXC_0x23(KERN_NOTICE));
+		strerror = KERN_NOTICE EXC_0x23(KERN_NOTICE);
 		CHK_DEBUGGER_TRAP_MAYBE();
 		break;
 	/* 0x24 - Data access misaligned, handled here */
 	case VEC_MISALI_D:
 		info.si_code = BUS_ADRALN;
 		sig = SIGBUS;
-		verbose_printk(KERN_NOTICE EXC_0x24(KERN_NOTICE));
+		strerror = KERN_NOTICE EXC_0x24(KERN_NOTICE);
 		CHK_DEBUGGER_TRAP_MAYBE();
 		break;
 	/* 0x25 - Unrecoverable Event, handled here */
 	case VEC_UNCOV:
 		info.si_code = ILL_ILLEXCPT;
 		sig = SIGILL;
-		verbose_printk(KERN_NOTICE EXC_0x25(KERN_NOTICE));
+		strerror = KERN_NOTICE EXC_0x25(KERN_NOTICE);
 		CHK_DEBUGGER_TRAP_MAYBE();
 		break;
 	/* 0x26 - Data CPLB Miss, normal case is handled in _cplb_hdr,
@@ -419,7 +426,7 @@
 	case VEC_CPLB_M:
 		info.si_code = BUS_ADRALN;
 		sig = SIGBUS;
-		verbose_printk(KERN_NOTICE EXC_0x26(KERN_NOTICE));
+		strerror = KERN_NOTICE EXC_0x26(KERN_NOTICE);
 		break;
 	/* 0x27 - Data CPLB Multiple Hits - Linux Trap Zero, handled here */
 	case VEC_CPLB_MHIT:
@@ -427,10 +434,10 @@
 		sig = SIGSEGV;
 #ifdef CONFIG_DEBUG_HUNT_FOR_ZERO
 		if (cpu_pda[cpu].dcplb_fault_addr < FIXED_CODE_START)
-			verbose_printk(KERN_NOTICE "NULL pointer access\n");
+			strerror = KERN_NOTICE "NULL pointer access\n";
 		else
 #endif
-			verbose_printk(KERN_NOTICE EXC_0x27(KERN_NOTICE));
+			strerror = KERN_NOTICE EXC_0x27(KERN_NOTICE);
 		CHK_DEBUGGER_TRAP_MAYBE();
 		break;
 	/* 0x28 - Emulation Watchpoint, handled here */
@@ -440,8 +447,8 @@
 		pr_debug(EXC_0x28(KERN_DEBUG));
 		CHK_DEBUGGER_TRAP_MAYBE();
 		/* Check if this is a watchpoint in kernel space */
-		if (fp->ipend & 0xffc0)
-			return;
+		if (kernel_mode_regs(fp))
+			goto traps_done;
 		else
 			break;
 #ifdef CONFIG_BF535
@@ -449,7 +456,7 @@
 	case VEC_ISTRU_VL:      /* ADSP-BF535 only (MH) */
 		info.si_code = BUS_OPFETCH;
 		sig = SIGBUS;
-		verbose_printk(KERN_NOTICE "BF535: VEC_ISTRU_VL\n");
+		strerror = KERN_NOTICE "BF535: VEC_ISTRU_VL\n";
 		CHK_DEBUGGER_TRAP_MAYBE();
 		break;
 #else
@@ -459,21 +466,21 @@
 	case VEC_MISALI_I:
 		info.si_code = BUS_ADRALN;
 		sig = SIGBUS;
-		verbose_printk(KERN_NOTICE EXC_0x2A(KERN_NOTICE));
+		strerror = KERN_NOTICE EXC_0x2A(KERN_NOTICE);
 		CHK_DEBUGGER_TRAP_MAYBE();
 		break;
 	/* 0x2B - Instruction CPLB protection violation, handled here */
 	case VEC_CPLB_I_VL:
 		info.si_code = ILL_CPLB_VI;
 		sig = SIGBUS;
-		verbose_printk(KERN_NOTICE EXC_0x2B(KERN_NOTICE));
+		strerror = KERN_NOTICE EXC_0x2B(KERN_NOTICE);
 		CHK_DEBUGGER_TRAP_MAYBE();
 		break;
 	/* 0x2C - Instruction CPLB miss, handled in _cplb_hdr */
 	case VEC_CPLB_I_M:
 		info.si_code = ILL_CPLB_MISS;
 		sig = SIGBUS;
-		verbose_printk(KERN_NOTICE EXC_0x2C(KERN_NOTICE));
+		strerror = KERN_NOTICE EXC_0x2C(KERN_NOTICE);
 		break;
 	/* 0x2D - Instruction CPLB Multiple Hits, handled here */
 	case VEC_CPLB_I_MHIT:
@@ -481,17 +488,17 @@
 		sig = SIGSEGV;
 #ifdef CONFIG_DEBUG_HUNT_FOR_ZERO
 		if (cpu_pda[cpu].icplb_fault_addr < FIXED_CODE_START)
-			verbose_printk(KERN_NOTICE "Jump to NULL address\n");
+			strerror = KERN_NOTICE "Jump to NULL address\n";
 		else
 #endif
-			verbose_printk(KERN_NOTICE EXC_0x2D(KERN_NOTICE));
+			strerror = KERN_NOTICE EXC_0x2D(KERN_NOTICE);
 		CHK_DEBUGGER_TRAP_MAYBE();
 		break;
 	/* 0x2E - Illegal use of Supervisor Resource, handled here */
 	case VEC_ILL_RES:
 		info.si_code = ILL_PRVOPC;
 		sig = SIGILL;
-		verbose_printk(KERN_NOTICE EXC_0x2E(KERN_NOTICE));
+		strerror = KERN_NOTICE EXC_0x2E(KERN_NOTICE);
 		CHK_DEBUGGER_TRAP_MAYBE();
 		break;
 	/* 0x2F - Reserved, Caught by default */
@@ -519,17 +526,17 @@
 		case (SEQSTAT_HWERRCAUSE_SYSTEM_MMR):
 			info.si_code = BUS_ADRALN;
 			sig = SIGBUS;
-			verbose_printk(KERN_NOTICE HWC_x2(KERN_NOTICE));
+			strerror = KERN_NOTICE HWC_x2(KERN_NOTICE);
 			break;
 		/* External Memory Addressing Error */
 		case (SEQSTAT_HWERRCAUSE_EXTERN_ADDR):
 			info.si_code = BUS_ADRERR;
 			sig = SIGBUS;
-			verbose_printk(KERN_NOTICE HWC_x3(KERN_NOTICE));
+			strerror = KERN_NOTICE HWC_x3(KERN_NOTICE);
 			break;
 		/* Performance Monitor Overflow */
 		case (SEQSTAT_HWERRCAUSE_PERF_FLOW):
-			verbose_printk(KERN_NOTICE HWC_x12(KERN_NOTICE));
+			strerror = KERN_NOTICE HWC_x12(KERN_NOTICE);
 			break;
 		/* RAISE 5 instruction */
 		case (SEQSTAT_HWERRCAUSE_RAISE_5):
@@ -546,7 +553,6 @@
 	 * if we get here we hit a reserved one, so panic
 	 */
 	default:
-		oops_in_progress = 1;
 		info.si_code = ILL_ILLPARAOP;
 		sig = SIGILL;
 		verbose_printk(KERN_EMERG "Caught Unhandled Exception, code = %08lx\n",
@@ -557,6 +563,16 @@
 
 	BUG_ON(sig == 0);
 
+	/* If the fault was caused by a kernel thread, or interrupt handler
+	 * we will kernel panic, so the system reboots.
+	 */
+	if (kernel_mode_regs(fp) || (current && !current->mm)) {
+		console_verbose();
+		oops_in_progress = 1;
+		if (strerror)
+			verbose_printk(strerror);
+	}
+
 	if (sig != SIGTRAP) {
 		dump_bfin_process(fp);
 		dump_bfin_mem(fp);
@@ -606,8 +622,8 @@
 	if (ANOMALY_05000461 && trapnr == VEC_HWERR && !access_ok(VERIFY_READ, fp->pc, 8))
 		fp->pc = SAFE_USER_INSTRUCTION;
 
+ traps_done:
 	trace_buffer_restore(j);
-	return;
 }
 
 /* Typical exception handling routines	*/
@@ -792,6 +808,18 @@
 }
 EXPORT_SYMBOL(dump_bfin_trace_buffer);
 
+#ifdef CONFIG_BUG
+int is_valid_bugaddr(unsigned long addr)
+{
+	unsigned short opcode;
+
+	if (!get_instruction(&opcode, (unsigned short *)addr))
+		return 0;
+
+	return opcode == BFIN_BUG_OPCODE;
+}
+#endif
+
 /*
  * Checks to see if the address pointed to is either a
  * 16-bit CALL instruction, or a 32-bit CALL instruction
diff --git a/arch/blackfin/kernel/vmlinux.lds.S b/arch/blackfin/kernel/vmlinux.lds.S
index 8b67167..6ac307c 100644
--- a/arch/blackfin/kernel/vmlinux.lds.S
+++ b/arch/blackfin/kernel/vmlinux.lds.S
@@ -54,6 +54,7 @@
 		SCHED_TEXT
 #endif
 		LOCK_TEXT
+		IRQENTRY_TEXT
 		KPROBES_TEXT
 		*(.text.*)
 		*(.fixup)
@@ -166,6 +167,20 @@
 	}
 	PERCPU(4)
 	SECURITY_INIT
+
+	/* we have to discard exit text and such at runtime, not link time, to
+	 * handle embedded cross-section references (alt instructions, bug
+	 * table, eh_frame, etc...)
+	 */
+	.exit.text :
+	{
+		EXIT_TEXT
+	}
+	.exit.data :
+	{
+		EXIT_DATA
+	}
+
 	.init.ramfs :
 	{
 		. = ALIGN(4);
@@ -264,8 +279,6 @@
 
 	/DISCARD/ :
 	{
-		EXIT_TEXT
-		EXIT_DATA
 		*(.exitcall.exit)
 	}
 }
diff --git a/arch/blackfin/lib/checksum.c b/arch/blackfin/lib/checksum.c
index 762a7f0..cd605e7 100644
--- a/arch/blackfin/lib/checksum.c
+++ b/arch/blackfin/lib/checksum.c
@@ -116,6 +116,7 @@
 {
 	return (__force __sum16)~do_csum(buff, len);
 }
+EXPORT_SYMBOL(ip_compute_csum);
 
 /*
  * copy from fs while checksumming, otherwise like csum_partial
@@ -130,6 +131,7 @@
 	memcpy(dst, (__force void *)src, len);
 	return csum_partial(dst, len, sum);
 }
+EXPORT_SYMBOL(csum_partial_copy_from_user);
 
 /*
  * copy from ds while checksumming, otherwise like csum_partial
diff --git a/arch/blackfin/mach-bf518/boards/ezbrd.c b/arch/blackfin/mach-bf518/boards/ezbrd.c
index 62bba09..1382f03 100644
--- a/arch/blackfin/mach-bf518/boards/ezbrd.c
+++ b/arch/blackfin/mach-bf518/boards/ezbrd.c
@@ -246,7 +246,7 @@
 		.modalias = "m25p80", /* Name of spi_driver for this device */
 		.max_speed_hz = 25000000,     /* max spi clock (SCK) speed in HZ */
 		.bus_num = 0, /* Framework bus number */
-		.chip_select = 1, /* Framework chip select. On STAMP537 it is SPISSEL1*/
+		.chip_select = 2, /* On BF518F-EZBRD it's SPI0_SSEL2 */
 		.platform_data = &bfin_spi_flash_data,
 		.controller_data = &spi_flash_chip_info,
 		.mode = SPI_MODE_3,
@@ -369,6 +369,11 @@
 	[1] = {
 		.start = CH_SPI0,
 		.end   = CH_SPI0,
+		.flags = IORESOURCE_DMA,
+	},
+	[2] = {
+		.start = IRQ_SPI0,
+		.end   = IRQ_SPI0,
 		.flags = IORESOURCE_IRQ,
 	},
 };
@@ -399,6 +404,11 @@
 	[1] = {
 		.start = CH_SPI1,
 		.end   = CH_SPI1,
+		.flags = IORESOURCE_DMA,
+	},
+	[2] = {
+		.start = IRQ_SPI1,
+		.end   = IRQ_SPI1,
 		.flags = IORESOURCE_IRQ,
 	},
 };
diff --git a/arch/blackfin/mach-bf527/boards/cm_bf527.c b/arch/blackfin/mach-bf527/boards/cm_bf527.c
index 6d6f9ef..1eaf27f 100644
--- a/arch/blackfin/mach-bf527/boards/cm_bf527.c
+++ b/arch/blackfin/mach-bf527/boards/cm_bf527.c
@@ -664,6 +664,11 @@
 	[1] = {
 		.start = CH_SPI,
 		.end   = CH_SPI,
+		.flags = IORESOURCE_DMA,
+	},
+	[2] = {
+		.start = IRQ_SPI,
+		.end   = IRQ_SPI,
 		.flags = IORESOURCE_IRQ,
 	},
 };
diff --git a/arch/blackfin/mach-bf527/boards/ezbrd.c b/arch/blackfin/mach-bf527/boards/ezbrd.c
index 1435c5d..9f9c000 100644
--- a/arch/blackfin/mach-bf527/boards/ezbrd.c
+++ b/arch/blackfin/mach-bf527/boards/ezbrd.c
@@ -467,6 +467,11 @@
 	[1] = {
 		.start = CH_SPI,
 		.end   = CH_SPI,
+		.flags = IORESOURCE_DMA,
+	},
+	[2] = {
+		.start = IRQ_SPI,
+		.end   = IRQ_SPI,
 		.flags = IORESOURCE_IRQ,
 	},
 };
diff --git a/arch/blackfin/mach-bf527/boards/ezkit.c b/arch/blackfin/mach-bf527/boards/ezkit.c
index 147edd1..3e5b7db 100644
--- a/arch/blackfin/mach-bf527/boards/ezkit.c
+++ b/arch/blackfin/mach-bf527/boards/ezkit.c
@@ -723,6 +723,11 @@
 	[1] = {
 		.start = CH_SPI,
 		.end   = CH_SPI,
+		.flags = IORESOURCE_DMA,
+	},
+	[2] = {
+		.start = IRQ_SPI,
+		.end   = IRQ_SPI,
 		.flags = IORESOURCE_IRQ,
 	},
 };
diff --git a/arch/blackfin/mach-bf533/boards/H8606.c b/arch/blackfin/mach-bf533/boards/H8606.c
index 895f213..38cf8ff 100644
--- a/arch/blackfin/mach-bf533/boards/H8606.c
+++ b/arch/blackfin/mach-bf533/boards/H8606.c
@@ -266,6 +266,11 @@
 	[1] = {
 		.start = CH_SPI,
 		.end   = CH_SPI,
+		.flags = IORESOURCE_DMA,
+	},
+	[2] = {
+		.start = IRQ_SPI,
+		.end   = IRQ_SPI,
 		.flags = IORESOURCE_IRQ,
 	}
 };
diff --git a/arch/blackfin/mach-bf533/boards/blackstamp.c b/arch/blackfin/mach-bf533/boards/blackstamp.c
index 0765872..9ecdc36 100644
--- a/arch/blackfin/mach-bf533/boards/blackstamp.c
+++ b/arch/blackfin/mach-bf533/boards/blackstamp.c
@@ -162,6 +162,11 @@
 	[1] = {
 		.start = CH_SPI,
 		.end   = CH_SPI,
+		.flags = IORESOURCE_DMA,
+	},
+	[2] = {
+		.start = IRQ_SPI,
+		.end   = IRQ_SPI,
 		.flags = IORESOURCE_IRQ,
 	}
 };
diff --git a/arch/blackfin/mach-bf533/boards/cm_bf533.c b/arch/blackfin/mach-bf533/boards/cm_bf533.c
index a727e53..1443e92 100644
--- a/arch/blackfin/mach-bf533/boards/cm_bf533.c
+++ b/arch/blackfin/mach-bf533/boards/cm_bf533.c
@@ -160,6 +160,11 @@
 	[1] = {
 		.start = CH_SPI,
 		.end   = CH_SPI,
+		.flags = IORESOURCE_DMA,
+	},
+	[2] = {
+		.start = IRQ_SPI,
+		.end   = IRQ_SPI,
 		.flags = IORESOURCE_IRQ,
 	}
 };
diff --git a/arch/blackfin/mach-bf533/boards/ezkit.c b/arch/blackfin/mach-bf533/boards/ezkit.c
index 842f1c9..89a5ec4 100644
--- a/arch/blackfin/mach-bf533/boards/ezkit.c
+++ b/arch/blackfin/mach-bf533/boards/ezkit.c
@@ -196,6 +196,11 @@
 	[1] = {
 		.start = CH_SPI,
 		.end   = CH_SPI,
+		.flags = IORESOURCE_DMA,
+	},
+	[2] = {
+		.start = IRQ_SPI,
+		.end   = IRQ_SPI,
 		.flags = IORESOURCE_IRQ,
 	}
 };
diff --git a/arch/blackfin/mach-bf533/boards/stamp.c b/arch/blackfin/mach-bf533/boards/stamp.c
index e19c565..a68ade8 100644
--- a/arch/blackfin/mach-bf533/boards/stamp.c
+++ b/arch/blackfin/mach-bf533/boards/stamp.c
@@ -299,6 +299,11 @@
 	[1] = {
 		.start = CH_SPI,
 		.end   = CH_SPI,
+		.flags = IORESOURCE_DMA,
+	},
+	[2] = {
+		.start = IRQ_SPI,
+		.end   = IRQ_SPI,
 		.flags = IORESOURCE_IRQ,
 	}
 };
diff --git a/arch/blackfin/mach-bf537/boards/cm_bf537.c b/arch/blackfin/mach-bf537/boards/cm_bf537.c
index 4fee196..2a87d1c 100644
--- a/arch/blackfin/mach-bf537/boards/cm_bf537.c
+++ b/arch/blackfin/mach-bf537/boards/cm_bf537.c
@@ -182,8 +182,13 @@
 	[1] = {
 		.start = CH_SPI,
 		.end   = CH_SPI,
+		.flags = IORESOURCE_DMA,
+	},
+	[2] = {
+		.start = IRQ_SPI,
+		.end   = IRQ_SPI,
 		.flags = IORESOURCE_IRQ,
-	}
+	},
 };
 
 /* SPI controller data */
diff --git a/arch/blackfin/mach-bf537/boards/minotaur.c b/arch/blackfin/mach-bf537/boards/minotaur.c
index 3c15981..399f81d 100644
--- a/arch/blackfin/mach-bf537/boards/minotaur.c
+++ b/arch/blackfin/mach-bf537/boards/minotaur.c
@@ -184,6 +184,11 @@
 	[1] = {
 		.start = CH_SPI,
 		.end   = CH_SPI,
+		.flags = IORESOURCE_DMA,
+	},
+	[2] = {
+		.start = IRQ_SPI,
+		.end   = IRQ_SPI,
 		.flags = IORESOURCE_IRQ,
 	},
 };
diff --git a/arch/blackfin/mach-bf537/boards/pnav10.c b/arch/blackfin/mach-bf537/boards/pnav10.c
index 26707ce..838240f 100644
--- a/arch/blackfin/mach-bf537/boards/pnav10.c
+++ b/arch/blackfin/mach-bf537/boards/pnav10.c
@@ -398,8 +398,13 @@
 	[1] = {
 		.start = CH_SPI,
 		.end   = CH_SPI,
+		.flags = IORESOURCE_DMA,
+	},
+	[2] = {
+		.start = IRQ_SPI,
+		.end   = IRQ_SPI,
 		.flags = IORESOURCE_IRQ,
-	}
+	},
 };
 
 /* SPI controller data */
diff --git a/arch/blackfin/mach-bf537/boards/stamp.c b/arch/blackfin/mach-bf537/boards/stamp.c
index dfb5036..ff7228c 100644
--- a/arch/blackfin/mach-bf537/boards/stamp.c
+++ b/arch/blackfin/mach-bf537/boards/stamp.c
@@ -1345,7 +1345,7 @@
 #if defined(CONFIG_PMIC_ADP5520) || defined(CONFIG_PMIC_ADP5520_MODULE)
 	{
 		I2C_BOARD_INFO("pmic-adp5520", 0x32),
-		.irq = IRQ_PF7,
+		.irq = IRQ_PG0,
 		.platform_data = (void *)&adp5520_pdev_data,
 	},
 #endif
diff --git a/arch/blackfin/mach-bf537/boards/tcm_bf537.c b/arch/blackfin/mach-bf537/boards/tcm_bf537.c
index 2805745..e523e6e 100644
--- a/arch/blackfin/mach-bf537/boards/tcm_bf537.c
+++ b/arch/blackfin/mach-bf537/boards/tcm_bf537.c
@@ -182,6 +182,11 @@
 	[1] = {
 		.start = CH_SPI,
 		.end   = CH_SPI,
+		.flags = IORESOURCE_DMA,
+	},
+	[2] = {
+		.start = IRQ_SPI,
+		.end   = IRQ_SPI,
 		.flags = IORESOURCE_IRQ,
 	}
 };
diff --git a/arch/blackfin/mach-bf538/boards/ezkit.c b/arch/blackfin/mach-bf538/boards/ezkit.c
index e37cb93..57695b4 100644
--- a/arch/blackfin/mach-bf538/boards/ezkit.c
+++ b/arch/blackfin/mach-bf538/boards/ezkit.c
@@ -352,6 +352,11 @@
 	[1] = {
 		.start = CH_SPI0,
 		.end   = CH_SPI0,
+		.flags = IORESOURCE_DMA,
+	},
+	[2] = {
+		.start = IRQ_SPI0,
+		.end   = IRQ_SPI0,
 		.flags = IORESOURCE_IRQ,
 	}
 };
@@ -366,6 +371,11 @@
 	[1] = {
 		.start = CH_SPI1,
 		.end   = CH_SPI1,
+		.flags = IORESOURCE_DMA,
+	},
+	[2] = {
+		.start = IRQ_SPI1,
+		.end   = IRQ_SPI1,
 		.flags = IORESOURCE_IRQ,
 	}
 };
diff --git a/arch/blackfin/mach-bf548/boards/cm_bf548.c b/arch/blackfin/mach-bf548/boards/cm_bf548.c
index f53ad68..f5a3c30 100644
--- a/arch/blackfin/mach-bf548/boards/cm_bf548.c
+++ b/arch/blackfin/mach-bf548/boards/cm_bf548.c
@@ -612,6 +612,11 @@
 	[1] = {
 		.start = CH_SPI0,
 		.end   = CH_SPI0,
+		.flags = IORESOURCE_DMA,
+	},
+	[2] = {
+		.start = IRQ_SPI0,
+		.end   = IRQ_SPI0,
 		.flags = IORESOURCE_IRQ,
 	}
 };
@@ -626,6 +631,11 @@
 	[1] = {
 		.start = CH_SPI1,
 		.end   = CH_SPI1,
+		.flags = IORESOURCE_DMA,
+	},
+	[2] = {
+		.start = IRQ_SPI1,
+		.end   = IRQ_SPI1,
 		.flags = IORESOURCE_IRQ,
 	}
 };
diff --git a/arch/blackfin/mach-bf548/boards/ezkit.c b/arch/blackfin/mach-bf548/boards/ezkit.c
index add5a17..805a57b 100644
--- a/arch/blackfin/mach-bf548/boards/ezkit.c
+++ b/arch/blackfin/mach-bf548/boards/ezkit.c
@@ -396,6 +396,8 @@
 #endif
 
 #if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE)
+#include <linux/smsc911x.h>
+
 static struct resource smsc911x_resources[] = {
 	{
 		.name = "smsc911x-memory",
@@ -409,11 +411,22 @@
 		.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL,
 	},
 };
+
+static struct smsc911x_platform_config smsc911x_config = {
+	.flags = SMSC911X_USE_32BIT,
+	.irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
+	.irq_type = SMSC911X_IRQ_TYPE_OPEN_DRAIN,
+	.phy_interface = PHY_INTERFACE_MODE_MII,
+};
+
 static struct platform_device smsc911x_device = {
 	.name = "smsc911x",
 	.id = 0,
 	.num_resources = ARRAY_SIZE(smsc911x_resources),
 	.resource = smsc911x_resources,
+	.dev = {
+		.platform_data = &smsc911x_config,
+	},
 };
 #endif
 
@@ -741,6 +754,11 @@
 	[1] = {
 		.start = CH_SPI0,
 		.end   = CH_SPI0,
+		.flags = IORESOURCE_DMA,
+	},
+	[2] = {
+		.start = IRQ_SPI0,
+		.end   = IRQ_SPI0,
 		.flags = IORESOURCE_IRQ,
 	}
 };
@@ -755,6 +773,11 @@
 	[1] = {
 		.start = CH_SPI1,
 		.end   = CH_SPI1,
+		.flags = IORESOURCE_DMA,
+	},
+	[2] = {
+		.start = IRQ_SPI1,
+		.end   = IRQ_SPI1,
 		.flags = IORESOURCE_IRQ,
 	}
 };
diff --git a/arch/blackfin/mach-bf561/boards/cm_bf561.c b/arch/blackfin/mach-bf561/boards/cm_bf561.c
index 0dd9685..0c9d72c 100644
--- a/arch/blackfin/mach-bf561/boards/cm_bf561.c
+++ b/arch/blackfin/mach-bf561/boards/cm_bf561.c
@@ -177,8 +177,13 @@
 	[1] = {
 		.start = CH_SPI,
 		.end   = CH_SPI,
+		.flags = IORESOURCE_DMA,
+	},
+	[2] = {
+		.start = IRQ_SPI,
+		.end   = IRQ_SPI,
 		.flags = IORESOURCE_IRQ,
-	}
+	},
 };
 
 /* SPI controller data */
diff --git a/arch/blackfin/mach-bf561/boards/ezkit.c b/arch/blackfin/mach-bf561/boards/ezkit.c
index 0e2178a..b5ef7ff 100644
--- a/arch/blackfin/mach-bf561/boards/ezkit.c
+++ b/arch/blackfin/mach-bf561/boards/ezkit.c
@@ -304,6 +304,11 @@
 	[1] = {
 		.start = CH_SPI,
 		.end   = CH_SPI,
+		.flags = IORESOURCE_DMA,
+	},
+	[2] = {
+		.start = IRQ_SPI,
+		.end   = IRQ_SPI,
 		.flags = IORESOURCE_IRQ,
 	}
 };
diff --git a/arch/blackfin/mach-common/cache-c.c b/arch/blackfin/mach-common/cache-c.c
index e6ab1f8..b59ce3c 100644
--- a/arch/blackfin/mach-common/cache-c.c
+++ b/arch/blackfin/mach-common/cache-c.c
@@ -16,9 +16,21 @@
 void blackfin_invalidate_entire_dcache(void)
 {
 	u32 dmem = bfin_read_DMEM_CONTROL();
-	SSYNC();
 	bfin_write_DMEM_CONTROL(dmem & ~0xc);
 	SSYNC();
 	bfin_write_DMEM_CONTROL(dmem);
 	SSYNC();
 }
+
+/* Invalidate the Entire Instruction cache by
+ * clearing IMC bit
+ */
+void blackfin_invalidate_entire_icache(void)
+{
+	u32 imem = bfin_read_IMEM_CONTROL();
+	bfin_write_IMEM_CONTROL(imem & ~0x4);
+	SSYNC();
+	bfin_write_IMEM_CONTROL(imem);
+	SSYNC();
+}
+
diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S
index da0558a..31fa313 100644
--- a/arch/blackfin/mach-common/entry.S
+++ b/arch/blackfin/mach-common/entry.S
@@ -42,6 +42,7 @@
 #include <asm/thread_info.h>  /* TIF_NEED_RESCHED */
 #include <asm/asm-offsets.h>
 #include <asm/trace.h>
+#include <asm/traps.h>
 
 #include <asm/context.S>
 
@@ -84,13 +85,15 @@
 	if !cc jump _bfin_return_from_exception;
 	/* fall through */
 	R7 = P4;
-	R6 = 0x26;	/* Data CPLB Miss */
+	R6 = VEC_CPLB_M;	/* Data CPLB Miss */
 	cc = R6 == R7;
 	if cc jump _ex_dcplb_miss (BP);
-	R6 = 0x23;	/* Data CPLB Miss */
+#ifdef CONFIG_MPU
+	R6 = VEC_CPLB_VL;	/* Data CPLB Violation */
 	cc = R6 == R7;
 	if cc jump _ex_dcplb_viol (BP);
-	/* Handle 0x23 Data CPLB Protection Violation
+#endif
+	/* Handle Data CPLB Protection Violation
 	 * and Data CPLB Multiple Hits - Linux Trap Zero
 	 */
 	jump _ex_trap_c;
@@ -270,7 +273,7 @@
 	r6.l = lo(SEQSTAT_EXCAUSE);
 	r6.h = hi(SEQSTAT_EXCAUSE);
 	r7 = r7 & r6;
-	r6 = 0x25;
+	r6 = VEC_UNCOV;
 	CC = R7 == R6;
 	if CC JUMP _double_fault;
 #endif
@@ -1605,6 +1608,7 @@
 	.long _sys_inotify_init1	/* 365 */
 	.long _sys_preadv
 	.long _sys_pwritev
+	.long _sys_rt_tgsigqueueinfo
 
 	.rept NR_syscalls-(.-_sys_call_table)/4
 	.long _sys_ni_syscall
diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c
index 3b8ebae..6184005 100644
--- a/arch/blackfin/mach-common/smp.c
+++ b/arch/blackfin/mach-common/smp.c
@@ -144,7 +144,7 @@
 
 static irqreturn_t ipi_handler(int irq, void *dev_instance)
 {
-	struct ipi_message *msg, *mg;
+	struct ipi_message *msg;
 	struct ipi_message_queue *msg_queue;
 	unsigned int cpu = smp_processor_id();
 
@@ -154,7 +154,8 @@
 	msg_queue->count++;
 
 	spin_lock(&msg_queue->lock);
-	list_for_each_entry_safe(msg, mg, &msg_queue->head, list) {
+	while (!list_empty(&msg_queue->head)) {
+		msg = list_entry(msg_queue->head.next, typeof(*msg), list);
 		list_del(&msg->list);
 		switch (msg->type) {
 		case BFIN_IPI_RESCHEDULE:
@@ -221,7 +222,7 @@
 	for_each_cpu_mask(cpu, callmap) {
 		msg_queue = &per_cpu(ipi_msg_queue, cpu);
 		spin_lock_irqsave(&msg_queue->lock, flags);
-		list_add(&msg->list, &msg_queue->head);
+		list_add_tail(&msg->list, &msg_queue->head);
 		spin_unlock_irqrestore(&msg_queue->lock, flags);
 		platform_send_ipi_cpu(cpu);
 	}
@@ -261,7 +262,7 @@
 
 	msg_queue = &per_cpu(ipi_msg_queue, cpu);
 	spin_lock_irqsave(&msg_queue->lock, flags);
-	list_add(&msg->list, &msg_queue->head);
+	list_add_tail(&msg->list, &msg_queue->head);
 	spin_unlock_irqrestore(&msg_queue->lock, flags);
 	platform_send_ipi_cpu(cpu);
 
@@ -292,7 +293,7 @@
 
 	msg_queue = &per_cpu(ipi_msg_queue, cpu);
 	spin_lock_irqsave(&msg_queue->lock, flags);
-	list_add(&msg->list, &msg_queue->head);
+	list_add_tail(&msg->list, &msg_queue->head);
 	spin_unlock_irqrestore(&msg_queue->lock, flags);
 	platform_send_ipi_cpu(cpu);
 
@@ -320,7 +321,7 @@
 	for_each_cpu_mask(cpu, callmap) {
 		msg_queue = &per_cpu(ipi_msg_queue, cpu);
 		spin_lock_irqsave(&msg_queue->lock, flags);
-		list_add(&msg->list, &msg_queue->head);
+		list_add_tail(&msg->list, &msg_queue->head);
 		spin_unlock_irqrestore(&msg_queue->lock, flags);
 		platform_send_ipi_cpu(cpu);
 	}
@@ -468,6 +469,17 @@
 }
 EXPORT_SYMBOL_GPL(smp_icache_flush_range_others);
 
+#ifdef __ARCH_SYNC_CORE_ICACHE
+void resync_core_icache(void)
+{
+	unsigned int cpu = get_cpu();
+	blackfin_invalidate_entire_icache();
+	++per_cpu(cpu_data, cpu).icache_invld_count;
+	put_cpu();
+}
+EXPORT_SYMBOL(resync_core_icache);
+#endif
+
 #ifdef __ARCH_SYNC_CORE_DCACHE
 unsigned long barrier_mask __attribute__ ((__section__(".l2.bss")));
 
diff --git a/arch/mips/configs/bigsur_defconfig b/arch/mips/configs/bigsur_defconfig
index 783da85..d6d35b2 100644
--- a/arch/mips/configs/bigsur_defconfig
+++ b/arch/mips/configs/bigsur_defconfig
@@ -963,7 +963,7 @@
 CONFIG_SENSORS_PCF8574=y
 # CONFIG_PCF8575 is not set
 CONFIG_SENSORS_PCF8591=y
-CONFIG_SENSORS_MAX6875=y
+CONFIG_EEPROM_MAX6875=y
 # CONFIG_SENSORS_TSL2550 is not set
 CONFIG_I2C_DEBUG_CORE=y
 CONFIG_I2C_DEBUG_ALGO=y
diff --git a/arch/mips/configs/mtx1_defconfig b/arch/mips/configs/mtx1_defconfig
index 8426d3b..fadb351 100644
--- a/arch/mips/configs/mtx1_defconfig
+++ b/arch/mips/configs/mtx1_defconfig
@@ -1849,7 +1849,7 @@
 CONFIG_SENSORS_PCF8574=m
 CONFIG_SENSORS_PCA9539=m
 CONFIG_SENSORS_PCF8591=m
-CONFIG_SENSORS_MAX6875=m
+CONFIG_EEPROM_MAX6875=m
 # CONFIG_SENSORS_TSL2550 is not set
 # CONFIG_I2C_DEBUG_CORE is not set
 # CONFIG_I2C_DEBUG_ALGO is not set
diff --git a/arch/mips/sni/eisa.c b/arch/mips/sni/eisa.c
index 7396cd7..6827feb 100644
--- a/arch/mips/sni/eisa.c
+++ b/arch/mips/sni/eisa.c
@@ -38,7 +38,7 @@
 	if (!r)
 		return r;
 
-	eisa_root_dev.dev.driver_data = &eisa_bus_root;
+	dev_set_drvdata(&eisa_root_dev.dev, &eisa_bus_root);
 
 	if (eisa_root_register(&eisa_bus_root)) {
 		/* A real bridge may have been registered before
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 93a6189..9fb344d 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -93,10 +93,6 @@
 	bool
 	default y
 
-config GENERIC_CALIBRATE_DELAY
-	bool
-	default y
-
 config GENERIC_FIND_NEXT_BIT
 	bool
 	default y
@@ -129,6 +125,7 @@
 	select USE_GENERIC_SMP_HELPERS if SMP
 	select HAVE_OPROFILE
 	select HAVE_SYSCALL_WRAPPERS if PPC64
+	select GENERIC_ATOMIC64 if PPC32
 
 config EARLY_PRINTK
 	bool
diff --git a/arch/powerpc/boot/install.sh b/arch/powerpc/boot/install.sh
index 51b2387..98312d1 100644
--- a/arch/powerpc/boot/install.sh
+++ b/arch/powerpc/boot/install.sh
@@ -18,6 +18,9 @@
 #   $5 and more - kernel boot files; zImage*, uImage, cuImage.*, etc.
 #
 
+# Bail with error code if anything goes wrong
+set -e
+
 # User may have a custom install script
 
 if [ -x ~/bin/${CROSS_COMPILE}installkernel ]; then exec ~/bin/${CROSS_COMPILE}installkernel "$@"; fi
diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig
index 7d044df..12dc7c4 100644
--- a/arch/powerpc/configs/ppc6xx_defconfig
+++ b/arch/powerpc/configs/ppc6xx_defconfig
@@ -1808,7 +1808,7 @@
 CONFIG_SENSORS_PCA9539=m
 CONFIG_SENSORS_PCF8591=m
 # CONFIG_TPS65010 is not set
-CONFIG_SENSORS_MAX6875=m
+CONFIG_EEPROM_MAX6875=m
 CONFIG_SENSORS_TSL2550=m
 CONFIG_MCU_MPC8349EMITX=m
 # CONFIG_I2C_DEBUG_CORE is not set
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
index b7d2d07..4012483 100644
--- a/arch/powerpc/include/asm/atomic.h
+++ b/arch/powerpc/include/asm/atomic.h
@@ -470,6 +470,9 @@
 
 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
 
+#else  /* __powerpc64__ */
+#include <asm-generic/atomic64.h>
+
 #endif /* __powerpc64__ */
 
 #include <asm-generic/atomic-long.h>
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index 5351237..b7f8f4a 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -80,7 +80,7 @@
 	__asm__ __volatile__("wrteei 0": : :"memory");
 #else
 	unsigned long msr;
-	__asm__ __volatile__("": : :"memory");
+
 	msr = mfmsr();
 	SET_MSR_EE(msr & ~MSR_EE);
 #endif
@@ -92,7 +92,7 @@
 	__asm__ __volatile__("wrteei 1": : :"memory");
 #else
 	unsigned long msr;
-	__asm__ __volatile__("": : :"memory");
+
 	msr = mfmsr();
 	SET_MSR_EE(msr | MSR_EE);
 #endif
@@ -108,7 +108,6 @@
 #else
 	SET_MSR_EE(msr & ~MSR_EE);
 #endif
-	__asm__ __volatile__("": : :"memory");
 }
 
 #define local_save_flags(flags)	((flags) = mfmsr())
diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
index 7464c0d..7ead7c1 100644
--- a/arch/powerpc/include/asm/iommu.h
+++ b/arch/powerpc/include/asm/iommu.h
@@ -35,6 +35,16 @@
 #define IOMMU_PAGE_MASK       (~((1 << IOMMU_PAGE_SHIFT) - 1))
 #define IOMMU_PAGE_ALIGN(addr) _ALIGN_UP(addr, IOMMU_PAGE_SIZE)
 
+/* Cell page table entries */
+#define CBE_IOPTE_PP_W		0x8000000000000000ul /* protection: write */
+#define CBE_IOPTE_PP_R		0x4000000000000000ul /* protection: read */
+#define CBE_IOPTE_M		0x2000000000000000ul /* coherency required */
+#define CBE_IOPTE_SO_R		0x1000000000000000ul /* ordering: writes */
+#define CBE_IOPTE_SO_RW		0x1800000000000000ul /* ordering: r & w */
+#define CBE_IOPTE_RPN_Mask	0x07fffffffffff000ul /* RPN */
+#define CBE_IOPTE_H		0x0000000000000800ul /* cache hint */
+#define CBE_IOPTE_IOID_Mask	0x00000000000007fful /* ioid */
+
 /* Boot time flags */
 extern int iommu_is_off;
 extern int iommu_force_on;
diff --git a/arch/powerpc/include/asm/ps3.h b/arch/powerpc/include/asm/ps3.h
index cdb6fd8..7f065e1 100644
--- a/arch/powerpc/include/asm/ps3.h
+++ b/arch/powerpc/include/asm/ps3.h
@@ -53,6 +53,13 @@
 extern u64 ps3_os_area_get_rtc_diff(void);
 extern void ps3_os_area_set_rtc_diff(u64 rtc_diff);
 
+struct ps3_os_area_flash_ops {
+	ssize_t (*read)(void *buf, size_t count, loff_t pos);
+	ssize_t (*write)(const void *buf, size_t count, loff_t pos);
+};
+
+extern void ps3_os_area_flash_register(const struct ps3_os_area_flash_ops *ops);
+
 /* dma routines */
 
 enum ps3_dma_page_size {
@@ -418,15 +425,15 @@
  * @data: Data to set
  */
 
-static inline void ps3_system_bus_set_driver_data(
+static inline void ps3_system_bus_set_drvdata(
 	struct ps3_system_bus_device *dev, void *data)
 {
-	dev->core.driver_data = data;
+	dev_set_drvdata(&dev->core, data);
 }
-static inline void *ps3_system_bus_get_driver_data(
+static inline void *ps3_system_bus_get_drvdata(
 	struct ps3_system_bus_device *dev)
 {
-	return dev->core.driver_data;
+	return dev_get_drvdata(&dev->core);
 }
 
 /* These two need global scope for get_dma_ops(). */
@@ -520,7 +527,4 @@
 u32 ps3_get_hw_thread_id(int cpu);
 u64 ps3_get_spe_id(void *arg);
 
-/* mutex synchronizing GPU accesses and video mode changes */
-extern struct mutex ps3_gpu_mutex;
-
 #endif
diff --git a/arch/powerpc/include/asm/ps3gpu.h b/arch/powerpc/include/asm/ps3gpu.h
new file mode 100644
index 0000000..b2b8959
--- /dev/null
+++ b/arch/powerpc/include/asm/ps3gpu.h
@@ -0,0 +1,86 @@
+/*
+ *  PS3 GPU declarations.
+ *
+ *  Copyright 2009 Sony Corporation
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program.
+ *  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _ASM_POWERPC_PS3GPU_H
+#define _ASM_POWERPC_PS3GPU_H
+
+#include <linux/mutex.h>
+
+#include <asm/lv1call.h>
+
+
+#define L1GPU_CONTEXT_ATTRIBUTE_DISPLAY_SYNC	0x101
+#define L1GPU_CONTEXT_ATTRIBUTE_DISPLAY_FLIP	0x102
+
+#define L1GPU_CONTEXT_ATTRIBUTE_FB_SETUP	0x600
+#define L1GPU_CONTEXT_ATTRIBUTE_FB_BLIT		0x601
+#define L1GPU_CONTEXT_ATTRIBUTE_FB_BLIT_SYNC	0x602
+#define L1GPU_CONTEXT_ATTRIBUTE_FB_CLOSE	0x603
+
+#define L1GPU_FB_BLIT_WAIT_FOR_COMPLETION	(1ULL << 32)
+
+#define L1GPU_DISPLAY_SYNC_HSYNC		1
+#define L1GPU_DISPLAY_SYNC_VSYNC		2
+
+
+/* mutex synchronizing GPU accesses and video mode changes */
+extern struct mutex ps3_gpu_mutex;
+
+
+static inline int lv1_gpu_display_sync(u64 context_handle, u64 head,
+				       u64 ddr_offset)
+{
+	return lv1_gpu_context_attribute(context_handle,
+					 L1GPU_CONTEXT_ATTRIBUTE_DISPLAY_SYNC,
+					 head, ddr_offset, 0, 0);
+}
+
+static inline int lv1_gpu_display_flip(u64 context_handle, u64 head,
+				       u64 ddr_offset)
+{
+	return lv1_gpu_context_attribute(context_handle,
+					 L1GPU_CONTEXT_ATTRIBUTE_DISPLAY_FLIP,
+					 head, ddr_offset, 0, 0);
+}
+
+static inline int lv1_gpu_fb_setup(u64 context_handle, u64 xdr_lpar,
+				   u64 xdr_size, u64 ioif_offset)
+{
+	return lv1_gpu_context_attribute(context_handle,
+					 L1GPU_CONTEXT_ATTRIBUTE_FB_SETUP,
+					 xdr_lpar, xdr_size, ioif_offset, 0);
+}
+
+static inline int lv1_gpu_fb_blit(u64 context_handle, u64 ddr_offset,
+				  u64 ioif_offset, u64 sync_width, u64 pitch)
+{
+	return lv1_gpu_context_attribute(context_handle,
+					 L1GPU_CONTEXT_ATTRIBUTE_FB_BLIT,
+					 ddr_offset, ioif_offset, sync_width,
+					 pitch);
+}
+
+static inline int lv1_gpu_fb_close(u64 context_handle)
+{
+	return lv1_gpu_context_attribute(context_handle,
+					 L1GPU_CONTEXT_ATTRIBUTE_FB_CLOSE, 0,
+					 0, 0, 0);
+}
+
+#endif /* _ASM_POWERPC_PS3GPU_H */
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index fb359b0..a3c28e4 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -745,11 +745,11 @@
 			asm volatile("mfmsr %0" : "=r" (rval)); rval;})
 #ifdef CONFIG_PPC64
 #define __mtmsrd(v, l)	asm volatile("mtmsrd %0," __stringify(l) \
-				     : : "r" (v))
+				     : : "r" (v) : "memory")
 #define mtmsrd(v)	__mtmsrd((v), 0)
 #define mtmsr(v)	mtmsrd(v)
 #else
-#define mtmsr(v)	asm volatile("mtmsr %0" : : "r" (v))
+#define mtmsr(v)	asm volatile("mtmsr %0" : : "r" (v) : "memory")
 #endif
 
 #define mfspr(rn)	({unsigned long rval; \
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
index a0b92de..370600ca 100644
--- a/arch/powerpc/include/asm/systbl.h
+++ b/arch/powerpc/include/asm/systbl.h
@@ -325,3 +325,4 @@
 SYSCALL_SPU(perf_counter_open)
 COMPAT_SYS_SPU(preadv)
 COMPAT_SYS_SPU(pwritev)
+COMPAT_SYS(rt_tgsigqueueinfo)
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h
index 4badac2..cef080b 100644
--- a/arch/powerpc/include/asm/unistd.h
+++ b/arch/powerpc/include/asm/unistd.h
@@ -344,10 +344,11 @@
 #define __NR_perf_counter_open	319
 #define __NR_preadv		320
 #define __NR_pwritev		321
+#define __NR_rt_tgsigqueueinfo	322
 
 #ifdef __KERNEL__
 
-#define __NR_syscalls		322
+#define __NR_syscalls		323
 
 #define __NR__exit __NR_exit
 #define NR_syscalls	__NR_syscalls
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index a7def5f..612b0c4 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -125,6 +125,7 @@
 systbl_chk: $(src)/systbl_chk.sh $(obj)/systbl_chk.i
 	$(call cmd,systbl_chk)
 
+ifeq ($(CONFIG_PPC_OF_BOOT_TRAMPOLINE),y)
 $(obj)/built-in.o:		prom_init_check
 
 quiet_cmd_prom_init_check = CALL    $<
@@ -133,5 +134,6 @@
 PHONY += prom_init_check
 prom_init_check: $(src)/prom_init_check.sh $(obj)/prom_init.o
 	$(call cmd,prom_init_check)
+endif
 
 clean-files := vmlinux.lds
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index f46548e..1f68160 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -424,8 +424,8 @@
 	printk("htab_hash_mask                = 0x%lx\n", htab_hash_mask);
 #endif /* CONFIG_PPC_STD_MMU_64 */
 	if (PHYSICAL_START > 0)
-		printk("physical_start                = 0x%lx\n",
-		       PHYSICAL_START);
+		printk("physical_start                = 0x%llx\n",
+		       (unsigned long long)PHYSICAL_START);
 	printk("-----------------------------------------------------\n");
 
 	DBG(" <- setup_system()\n");
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index bee1443..15391c2 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -52,6 +52,7 @@
 #include <linux/jiffies.h>
 #include <linux/posix-timers.h>
 #include <linux/irq.h>
+#include <linux/delay.h>
 
 #include <asm/io.h>
 #include <asm/processor.h>
@@ -1143,6 +1144,15 @@
 
 }
 
+/* We don't need to calibrate delay, we use the CPU timebase for that */
+void calibrate_delay(void)
+{
+	/* Some generic code (such as spinlock debug) use loops_per_jiffy
+	 * as the number of __delay(1) in a jiffy, so make it so
+	 */
+	loops_per_jiffy = tb_ticks_per_jiffy;
+}
+
 static int __init rtc_init(void)
 {
 	struct platform_device *pdev;
diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c
index 0ce45c2..c71498d 100644
--- a/arch/powerpc/platforms/cell/axon_msi.c
+++ b/arch/powerpc/platforms/cell/axon_msi.c
@@ -329,7 +329,7 @@
 
 static int axon_msi_shutdown(struct of_device *device)
 {
-	struct axon_msic *msic = device->dev.platform_data;
+	struct axon_msic *msic = dev_get_drvdata(&device->dev);
 	u32 tmp;
 
 	pr_debug("axon_msi: disabling %s\n",
@@ -416,7 +416,7 @@
 	msic->read_offset = dcr_read(msic->dcr_host, MSIC_WRITE_OFFSET_REG)
 				& MSIC_FIFO_SIZE_MASK;
 
-	device->dev.platform_data = msic;
+	dev_set_drvdata(&device->dev, msic);
 
 	ppc_md.setup_msi_irqs = axon_msi_setup_msi_irqs;
 	ppc_md.teardown_msi_irqs = axon_msi_teardown_msi_irqs;
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
index bed4690..5b34fc2 100644
--- a/arch/powerpc/platforms/cell/iommu.c
+++ b/arch/powerpc/platforms/cell/iommu.c
@@ -100,16 +100,6 @@
 #define IOSTE_PS_1M		0x0000000000000005ul /*   - 1MB  */
 #define IOSTE_PS_16M		0x0000000000000007ul /*   - 16MB */
 
-/* Page table entries */
-#define IOPTE_PP_W		0x8000000000000000ul /* protection: write */
-#define IOPTE_PP_R		0x4000000000000000ul /* protection: read */
-#define IOPTE_M			0x2000000000000000ul /* coherency required */
-#define IOPTE_SO_R		0x1000000000000000ul /* ordering: writes */
-#define IOPTE_SO_RW             0x1800000000000000ul /* ordering: r & w */
-#define IOPTE_RPN_Mask		0x07fffffffffff000ul /* RPN */
-#define IOPTE_H			0x0000000000000800ul /* cache hint */
-#define IOPTE_IOID_Mask		0x00000000000007fful /* ioid */
-
 
 /* IOMMU sizing */
 #define IO_SEGMENT_SHIFT	28
@@ -193,19 +183,21 @@
 	 */
 	const unsigned long prot = 0xc48;
 	base_pte =
-		((prot << (52 + 4 * direction)) & (IOPTE_PP_W | IOPTE_PP_R))
-		| IOPTE_M | IOPTE_SO_RW | (window->ioid & IOPTE_IOID_Mask);
+		((prot << (52 + 4 * direction)) &
+		 (CBE_IOPTE_PP_W | CBE_IOPTE_PP_R)) |
+		CBE_IOPTE_M | CBE_IOPTE_SO_RW |
+		(window->ioid & CBE_IOPTE_IOID_Mask);
 #else
-	base_pte = IOPTE_PP_W | IOPTE_PP_R | IOPTE_M | IOPTE_SO_RW |
-		(window->ioid & IOPTE_IOID_Mask);
+	base_pte = CBE_IOPTE_PP_W | CBE_IOPTE_PP_R | CBE_IOPTE_M |
+		CBE_IOPTE_SO_RW | (window->ioid & CBE_IOPTE_IOID_Mask);
 #endif
 	if (unlikely(dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs)))
-		base_pte &= ~IOPTE_SO_RW;
+		base_pte &= ~CBE_IOPTE_SO_RW;
 
 	io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset);
 
 	for (i = 0; i < npages; i++, uaddr += IOMMU_PAGE_SIZE)
-		io_pte[i] = base_pte | (__pa(uaddr) & IOPTE_RPN_Mask);
+		io_pte[i] = base_pte | (__pa(uaddr) & CBE_IOPTE_RPN_Mask);
 
 	mb();
 
@@ -231,8 +223,9 @@
 #else
 	/* spider bridge does PCI reads after freeing - insert a mapping
 	 * to a scratch page instead of an invalid entry */
-	pte = IOPTE_PP_R | IOPTE_M | IOPTE_SO_RW | __pa(window->iommu->pad_page)
-		| (window->ioid & IOPTE_IOID_Mask);
+	pte = CBE_IOPTE_PP_R | CBE_IOPTE_M | CBE_IOPTE_SO_RW |
+		__pa(window->iommu->pad_page) |
+		(window->ioid & CBE_IOPTE_IOID_Mask);
 #endif
 
 	io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset);
@@ -1001,7 +994,7 @@
 	pr_debug("iommu: addr %lx ptab %p segment %lx offset %lx\n",
 		  addr, ptab, segment, offset);
 
-	ptab[offset] = base_pte | (__pa(addr) & IOPTE_RPN_Mask);
+	ptab[offset] = base_pte | (__pa(addr) & CBE_IOPTE_RPN_Mask);
 }
 
 static void cell_iommu_setup_fixed_ptab(struct cbe_iommu *iommu,
@@ -1016,14 +1009,14 @@
 
 	pr_debug("iommu: mapping 0x%lx pages from 0x%lx\n", fsize, fbase);
 
-	base_pte = IOPTE_PP_W | IOPTE_PP_R | IOPTE_M
-		    | (cell_iommu_get_ioid(np) & IOPTE_IOID_Mask);
+	base_pte = CBE_IOPTE_PP_W | CBE_IOPTE_PP_R | CBE_IOPTE_M |
+		(cell_iommu_get_ioid(np) & CBE_IOPTE_IOID_Mask);
 
 	if (iommu_fixed_is_weak)
 		pr_info("IOMMU: Using weak ordering for fixed mapping\n");
 	else {
 		pr_info("IOMMU: Using strong ordering for fixed mapping\n");
-		base_pte |= IOPTE_SO_RW;
+		base_pte |= CBE_IOPTE_SO_RW;
 	}
 
 	for (uaddr = 0; uaddr < fsize; uaddr += (1 << 24)) {
diff --git a/arch/powerpc/platforms/iseries/dt.c b/arch/powerpc/platforms/iseries/dt.c
index 4543c4b..c5a87a7 100644
--- a/arch/powerpc/platforms/iseries/dt.c
+++ b/arch/powerpc/platforms/iseries/dt.c
@@ -204,7 +204,8 @@
 	dt_prop(dt, name, &data, sizeof(u32));
 }
 
-static void __init dt_prop_u64(struct iseries_flat_dt *dt, const char *name,
+static void __init __maybe_unused dt_prop_u64(struct iseries_flat_dt *dt,
+					      const char *name,
 		u64 data)
 {
 	dt_prop(dt, name, &data, sizeof(u64));
diff --git a/arch/powerpc/platforms/iseries/mf.c b/arch/powerpc/platforms/iseries/mf.c
index 3689c24..fef4d51 100644
--- a/arch/powerpc/platforms/iseries/mf.c
+++ b/arch/powerpc/platforms/iseries/mf.c
@@ -267,7 +267,8 @@
 	return ev;
 }
 
-static int signal_vsp_instruction(struct vsp_cmd_data *vsp_cmd)
+static int __maybe_unused
+signal_vsp_instruction(struct vsp_cmd_data *vsp_cmd)
 {
 	struct pending_event *ev = new_pending_event();
 	int rc;
diff --git a/arch/powerpc/platforms/ps3/mm.c b/arch/powerpc/platforms/ps3/mm.c
index 9a2b6d9..846eb8b 100644
--- a/arch/powerpc/platforms/ps3/mm.c
+++ b/arch/powerpc/platforms/ps3/mm.c
@@ -24,6 +24,7 @@
 #include <linux/lmb.h>
 
 #include <asm/firmware.h>
+#include <asm/iommu.h>
 #include <asm/prom.h>
 #include <asm/udbg.h>
 #include <asm/lv1call.h>
@@ -605,9 +606,8 @@
 				       r->ioid,
 				       iopte_flag);
 		if (result) {
-			printk(KERN_WARNING "%s:%d: lv1_map_device_dma_region "
-				"failed: %s\n", __func__, __LINE__,
-				ps3_result(result));
+			pr_warning("%s:%d: lv1_put_iopte failed: %s\n",
+				   __func__, __LINE__, ps3_result(result));
 			goto fail_map;
 		}
 		DBG("%s: pg=%d bus=%#lx, lpar=%#lx, ioid=%#x\n", __func__,
@@ -1001,7 +1001,8 @@
 		if (len > r->len)
 			len = r->len;
 		result = dma_sb_map_area(r, virt_addr, len, &tmp,
-			IOPTE_PP_W | IOPTE_PP_R | IOPTE_SO_RW | IOPTE_M);
+			CBE_IOPTE_PP_W | CBE_IOPTE_PP_R | CBE_IOPTE_SO_RW |
+			CBE_IOPTE_M);
 		BUG_ON(result);
 	}
 
@@ -1014,7 +1015,8 @@
 		else
 			len -= map.rm.size - r->offset;
 		result = dma_sb_map_area(r, virt_addr, len, &tmp,
-			IOPTE_PP_W | IOPTE_PP_R | IOPTE_SO_RW | IOPTE_M);
+			CBE_IOPTE_PP_W | CBE_IOPTE_PP_R | CBE_IOPTE_SO_RW |
+			CBE_IOPTE_M);
 		BUG_ON(result);
 	}
 
diff --git a/arch/powerpc/platforms/ps3/os-area.c b/arch/powerpc/platforms/ps3/os-area.c
index cf1cd0f..d6487a9 100644
--- a/arch/powerpc/platforms/ps3/os-area.c
+++ b/arch/powerpc/platforms/ps3/os-area.c
@@ -226,6 +226,44 @@
 	.value = &saved_params.av_multi_out,
 };
 
+
+static DEFINE_MUTEX(os_area_flash_mutex);
+
+static const struct ps3_os_area_flash_ops *os_area_flash_ops;
+
+void ps3_os_area_flash_register(const struct ps3_os_area_flash_ops *ops)
+{
+	mutex_lock(&os_area_flash_mutex);
+	os_area_flash_ops = ops;
+	mutex_unlock(&os_area_flash_mutex);
+}
+EXPORT_SYMBOL_GPL(ps3_os_area_flash_register);
+
+static ssize_t os_area_flash_read(void *buf, size_t count, loff_t pos)
+{
+	ssize_t res = -ENODEV;
+
+	mutex_lock(&os_area_flash_mutex);
+	if (os_area_flash_ops)
+		res = os_area_flash_ops->read(buf, count, pos);
+	mutex_unlock(&os_area_flash_mutex);
+
+	return res;
+}
+
+static ssize_t os_area_flash_write(const void *buf, size_t count, loff_t pos)
+{
+	ssize_t res = -ENODEV;
+
+	mutex_lock(&os_area_flash_mutex);
+	if (os_area_flash_ops)
+		res = os_area_flash_ops->write(buf, count, pos);
+	mutex_unlock(&os_area_flash_mutex);
+
+	return res;
+}
+
+
 /**
  * os_area_set_property - Add or overwrite a saved_params value to the device tree.
  *
@@ -352,12 +390,12 @@
 	if (memcmp(db->magic_num, OS_AREA_DB_MAGIC_NUM,
 		sizeof(db->magic_num))) {
 		pr_debug("%s:%d magic_num failed\n", __func__, __LINE__);
-		return -1;
+		return -EINVAL;
 	}
 
 	if (db->version != 1) {
 		pr_debug("%s:%d version failed\n", __func__, __LINE__);
-		return -1;
+		return -EINVAL;
 	}
 
 	return 0;
@@ -578,59 +616,48 @@
  *
  */
 
-static void __maybe_unused update_flash_db(void)
+static int update_flash_db(void)
 {
-	int result;
-	int file;
-	off_t offset;
+	const unsigned int buf_len = 8 * OS_AREA_SEGMENT_SIZE;
+	struct os_area_header *header;
 	ssize_t count;
-	static const unsigned int buf_len = 8 * OS_AREA_SEGMENT_SIZE;
-	const struct os_area_header *header;
+	int error;
+	loff_t pos;
 	struct os_area_db* db;
 
 	/* Read in header and db from flash. */
 
-	file = sys_open("/dev/ps3flash", O_RDWR, 0);
-
-	if (file < 0) {
-		pr_debug("%s:%d sys_open failed\n", __func__, __LINE__);
-		goto fail_open;
-	}
-
 	header = kmalloc(buf_len, GFP_KERNEL);
-
 	if (!header) {
-		pr_debug("%s:%d kmalloc failed\n", __func__, __LINE__);
-		goto fail_malloc;
+		pr_debug("%s: kmalloc failed\n", __func__);
+		return -ENOMEM;
 	}
 
-	offset = sys_lseek(file, 0, SEEK_SET);
-
-	if (offset != 0) {
-		pr_debug("%s:%d sys_lseek failed\n", __func__, __LINE__);
-		goto fail_header_seek;
+	count = os_area_flash_read(header, buf_len, 0);
+	if (count < 0) {
+		pr_debug("%s: os_area_flash_read failed %zd\n", __func__,
+			 count);
+		error = count;
+		goto fail;
 	}
 
-	count = sys_read(file, (char __user *)header, buf_len);
-
-	result = count < OS_AREA_SEGMENT_SIZE || verify_header(header)
-		|| count < header->db_area_offset * OS_AREA_SEGMENT_SIZE;
-
-	if (result) {
-		pr_debug("%s:%d verify_header failed\n", __func__, __LINE__);
+	pos = header->db_area_offset * OS_AREA_SEGMENT_SIZE;
+	if (count < OS_AREA_SEGMENT_SIZE || verify_header(header) ||
+	    count < pos) {
+		pr_debug("%s: verify_header failed\n", __func__);
 		dump_header(header);
-		goto fail_header;
+		error = -EINVAL;
+		goto fail;
 	}
 
 	/* Now got a good db offset and some maybe good db data. */
 
-	db = (void*)header + header->db_area_offset * OS_AREA_SEGMENT_SIZE;
+	db = (void *)header + pos;
 
-	result = db_verify(db);
-
-	if (result) {
-		printk(KERN_NOTICE "%s:%d: Verify of flash database failed, "
-			"formatting.\n", __func__, __LINE__);
+	error = db_verify(db);
+	if (error) {
+		pr_notice("%s: Verify of flash database failed, formatting.\n",
+			  __func__);
 		dump_db(db);
 		os_area_db_init(db);
 	}
@@ -639,29 +666,16 @@
 
 	db_set_64(db, &os_area_db_id_rtc_diff, saved_params.rtc_diff);
 
-	offset = sys_lseek(file, header->db_area_offset * OS_AREA_SEGMENT_SIZE,
-		SEEK_SET);
-
-	if (offset != header->db_area_offset * OS_AREA_SEGMENT_SIZE) {
-		pr_debug("%s:%d sys_lseek failed\n", __func__, __LINE__);
-		goto fail_db_seek;
-	}
-
-	count = sys_write(file, (const char __user *)db,
-		sizeof(struct os_area_db));
-
+	count = os_area_flash_write(db, sizeof(struct os_area_db), pos);
 	if (count < sizeof(struct os_area_db)) {
-		pr_debug("%s:%d sys_write failed\n", __func__, __LINE__);
+		pr_debug("%s: os_area_flash_write failed %zd\n", __func__,
+			 count);
+		error = count < 0 ? count : -EIO;
 	}
 
-fail_db_seek:
-fail_header:
-fail_header_seek:
+fail:
 	kfree(header);
-fail_malloc:
-	sys_close(file);
-fail_open:
-	return;
+	return error;
 }
 
 /**
@@ -674,11 +688,11 @@
 static void os_area_queue_work_handler(struct work_struct *work)
 {
 	struct device_node *node;
+	int error;
 
 	pr_debug(" -> %s:%d\n", __func__, __LINE__);
 
 	node = of_find_node_by_path("/");
-
 	if (node) {
 		os_area_set_property(node, &property_rtc_diff);
 		of_node_put(node);
@@ -686,12 +700,10 @@
 		pr_debug("%s:%d of_find_node_by_path failed\n",
 			__func__, __LINE__);
 
-#if defined(CONFIG_PS3_FLASH) || defined(CONFIG_PS3_FLASH_MODULE)
-	update_flash_db();
-#else
-	printk(KERN_WARNING "%s:%d: No flash rom driver configured.\n",
-		__func__, __LINE__);
-#endif
+	error = update_flash_db();
+	if (error)
+		pr_warning("%s: Could not update FLASH ROM\n", __func__);
+
 	pr_debug(" <- %s:%d\n", __func__, __LINE__);
 }
 
@@ -808,7 +820,7 @@
 {
 	return saved_params.rtc_diff;
 }
-EXPORT_SYMBOL(ps3_os_area_get_rtc_diff);
+EXPORT_SYMBOL_GPL(ps3_os_area_get_rtc_diff);
 
 /**
  * ps3_os_area_set_rtc_diff - Set the rtc diff value.
@@ -824,7 +836,7 @@
 		os_area_queue_work();
 	}
 }
-EXPORT_SYMBOL(ps3_os_area_set_rtc_diff);
+EXPORT_SYMBOL_GPL(ps3_os_area_set_rtc_diff);
 
 /**
  * ps3_os_area_get_av_multi_out - Returns the default video mode.
diff --git a/arch/powerpc/platforms/ps3/platform.h b/arch/powerpc/platforms/ps3/platform.h
index 136aa06..9a196a8 100644
--- a/arch/powerpc/platforms/ps3/platform.h
+++ b/arch/powerpc/platforms/ps3/platform.h
@@ -232,14 +232,4 @@
 int ps3_repository_read_vuart_av_port(unsigned int *port);
 int ps3_repository_read_vuart_sysmgr_port(unsigned int *port);
 
-/* Page table entries */
-#define IOPTE_PP_W		0x8000000000000000ul /* protection: write */
-#define IOPTE_PP_R		0x4000000000000000ul /* protection: read */
-#define IOPTE_M			0x2000000000000000ul /* coherency required */
-#define IOPTE_SO_R		0x1000000000000000ul /* ordering: writes */
-#define IOPTE_SO_RW             0x1800000000000000ul /* ordering: r & w */
-#define IOPTE_RPN_Mask		0x07fffffffffff000ul /* RPN */
-#define IOPTE_H			0x0000000000000800ul /* cache hint */
-#define IOPTE_IOID_Mask		0x00000000000007fful /* ioid */
-
 #endif
diff --git a/arch/powerpc/platforms/ps3/setup.c b/arch/powerpc/platforms/ps3/setup.c
index 1a7b5ae..149bea2 100644
--- a/arch/powerpc/platforms/ps3/setup.c
+++ b/arch/powerpc/platforms/ps3/setup.c
@@ -32,6 +32,7 @@
 #include <asm/udbg.h>
 #include <asm/prom.h>
 #include <asm/lv1call.h>
+#include <asm/ps3gpu.h>
 
 #include "platform.h"
 
diff --git a/arch/powerpc/platforms/ps3/system-bus.c b/arch/powerpc/platforms/ps3/system-bus.c
index 9a73d02..9fead0f 100644
--- a/arch/powerpc/platforms/ps3/system-bus.c
+++ b/arch/powerpc/platforms/ps3/system-bus.c
@@ -27,6 +27,7 @@
 #include <asm/udbg.h>
 #include <asm/lv1call.h>
 #include <asm/firmware.h>
+#include <asm/iommu.h>
 
 #include "platform.h"
 
@@ -531,7 +532,8 @@
 	}
 
 	result = ps3_dma_map(dev->d_region, virt_addr, size, dma_handle,
-			     IOPTE_PP_W | IOPTE_PP_R | IOPTE_SO_RW | IOPTE_M);
+			     CBE_IOPTE_PP_W | CBE_IOPTE_PP_R |
+			     CBE_IOPTE_SO_RW | CBE_IOPTE_M);
 
 	if (result) {
 		pr_debug("%s:%d: ps3_dma_map failed (%d)\n",
@@ -575,7 +577,8 @@
 
 	result = ps3_dma_map(dev->d_region, (unsigned long)ptr, size,
 			     &bus_addr,
-			     IOPTE_PP_R | IOPTE_PP_W | IOPTE_SO_RW | IOPTE_M);
+			     CBE_IOPTE_PP_R | CBE_IOPTE_PP_W |
+			     CBE_IOPTE_SO_RW | CBE_IOPTE_M);
 
 	if (result) {
 		pr_debug("%s:%d: ps3_dma_map failed (%d)\n",
@@ -596,16 +599,16 @@
 	u64 iopte_flag;
 	void *ptr = page_address(page) + offset;
 
-	iopte_flag = IOPTE_M;
+	iopte_flag = CBE_IOPTE_M;
 	switch (direction) {
 	case DMA_BIDIRECTIONAL:
-		iopte_flag |= IOPTE_PP_R | IOPTE_PP_W | IOPTE_SO_RW;
+		iopte_flag |= CBE_IOPTE_PP_R | CBE_IOPTE_PP_W | CBE_IOPTE_SO_RW;
 		break;
 	case DMA_TO_DEVICE:
-		iopte_flag |= IOPTE_PP_R | IOPTE_SO_R;
+		iopte_flag |= CBE_IOPTE_PP_R | CBE_IOPTE_SO_R;
 		break;
 	case DMA_FROM_DEVICE:
-		iopte_flag |= IOPTE_PP_W | IOPTE_SO_RW;
+		iopte_flag |= CBE_IOPTE_PP_W | CBE_IOPTE_SO_RW;
 		break;
 	default:
 		/* not happned */
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 99dc3de..a14dba0 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -348,6 +348,9 @@
 config ARCH_ENABLE_MEMORY_HOTREMOVE
 	def_bool y
 
+config ARCH_HIBERNATION_POSSIBLE
+       def_bool y if 64BIT
+
 source "mm/Kconfig"
 
 comment "I/O subsystem configuration"
@@ -592,6 +595,12 @@
 
 endmenu
 
+menu "Power Management"
+
+source "kernel/power/Kconfig"
+
+endmenu
+
 source "net/Kconfig"
 
 config PCMCIA
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index 578c61f..0ff387c 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -88,7 +88,9 @@
 head-y		:= arch/s390/kernel/head.o arch/s390/kernel/init_task.o
 
 core-y		+= arch/s390/mm/ arch/s390/kernel/ arch/s390/crypto/ \
-		   arch/s390/appldata/ arch/s390/hypfs/ arch/s390/kvm/
+		   arch/s390/appldata/ arch/s390/hypfs/ arch/s390/kvm/ \
+		   arch/s390/power/
+
 libs-y		+= arch/s390/lib/
 drivers-y	+= drivers/s390/
 drivers-$(CONFIG_MATHEMU) += arch/s390/math-emu/
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
index 1dfc710..264528e 100644
--- a/arch/s390/appldata/appldata_base.c
+++ b/arch/s390/appldata/appldata_base.c
@@ -5,7 +5,7 @@
  * Exports appldata_register_ops() and appldata_unregister_ops() for the
  * data gathering modules.
  *
- * Copyright IBM Corp. 2003, 2008
+ * Copyright IBM Corp. 2003, 2009
  *
  * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com>
  */
@@ -26,6 +26,8 @@
 #include <linux/notifier.h>
 #include <linux/cpu.h>
 #include <linux/workqueue.h>
+#include <linux/suspend.h>
+#include <linux/platform_device.h>
 #include <asm/appldata.h>
 #include <asm/timer.h>
 #include <asm/uaccess.h>
@@ -41,6 +43,9 @@
 
 #define TOD_MICRO	0x01000			/* nr. of TOD clock units
 						   for 1 microsecond */
+
+static struct platform_device *appldata_pdev;
+
 /*
  * /proc entries (sysctl)
  */
@@ -86,6 +91,7 @@
 static DEFINE_SPINLOCK(appldata_timer_lock);
 static int appldata_interval = APPLDATA_CPU_INTERVAL;
 static int appldata_timer_active;
+static int appldata_timer_suspended = 0;
 
 /*
  * Work queue
@@ -475,6 +481,93 @@
 /********************** module-ops management <END> **************************/
 
 
+/**************************** suspend / resume *******************************/
+static int appldata_freeze(struct device *dev)
+{
+	struct appldata_ops *ops;
+	int rc;
+	struct list_head *lh;
+
+	get_online_cpus();
+	spin_lock(&appldata_timer_lock);
+	if (appldata_timer_active) {
+		__appldata_vtimer_setup(APPLDATA_DEL_TIMER);
+		appldata_timer_suspended = 1;
+	}
+	spin_unlock(&appldata_timer_lock);
+	put_online_cpus();
+
+	mutex_lock(&appldata_ops_mutex);
+	list_for_each(lh, &appldata_ops_list) {
+		ops = list_entry(lh, struct appldata_ops, list);
+		if (ops->active == 1) {
+			rc = appldata_diag(ops->record_nr, APPLDATA_STOP_REC,
+					(unsigned long) ops->data, ops->size,
+					ops->mod_lvl);
+			if (rc != 0)
+				pr_err("Stopping the data collection for %s "
+				       "failed with rc=%d\n", ops->name, rc);
+		}
+	}
+	mutex_unlock(&appldata_ops_mutex);
+	return 0;
+}
+
+static int appldata_restore(struct device *dev)
+{
+	struct appldata_ops *ops;
+	int rc;
+	struct list_head *lh;
+
+	get_online_cpus();
+	spin_lock(&appldata_timer_lock);
+	if (appldata_timer_suspended) {
+		__appldata_vtimer_setup(APPLDATA_ADD_TIMER);
+		appldata_timer_suspended = 0;
+	}
+	spin_unlock(&appldata_timer_lock);
+	put_online_cpus();
+
+	mutex_lock(&appldata_ops_mutex);
+	list_for_each(lh, &appldata_ops_list) {
+		ops = list_entry(lh, struct appldata_ops, list);
+		if (ops->active == 1) {
+			ops->callback(ops->data);	// init record
+			rc = appldata_diag(ops->record_nr,
+					APPLDATA_START_INTERVAL_REC,
+					(unsigned long) ops->data, ops->size,
+					ops->mod_lvl);
+			if (rc != 0) {
+				pr_err("Starting the data collection for %s "
+				       "failed with rc=%d\n", ops->name, rc);
+			}
+		}
+	}
+	mutex_unlock(&appldata_ops_mutex);
+	return 0;
+}
+
+static int appldata_thaw(struct device *dev)
+{
+	return appldata_restore(dev);
+}
+
+static struct dev_pm_ops appldata_pm_ops = {
+	.freeze		= appldata_freeze,
+	.thaw		= appldata_thaw,
+	.restore	= appldata_restore,
+};
+
+static struct platform_driver appldata_pdrv = {
+	.driver = {
+		.name	= "appldata",
+		.owner	= THIS_MODULE,
+		.pm	= &appldata_pm_ops,
+	},
+};
+/************************* suspend / resume <END> ****************************/
+
+
 /******************************* init / exit *********************************/
 
 static void __cpuinit appldata_online_cpu(int cpu)
@@ -531,11 +624,23 @@
  */
 static int __init appldata_init(void)
 {
-	int i;
+	int i, rc;
 
+	rc = platform_driver_register(&appldata_pdrv);
+	if (rc)
+		return rc;
+
+	appldata_pdev = platform_device_register_simple("appldata", -1, NULL,
+							0);
+	if (IS_ERR(appldata_pdev)) {
+		rc = PTR_ERR(appldata_pdev);
+		goto out_driver;
+	}
 	appldata_wq = create_singlethread_workqueue("appldata");
-	if (!appldata_wq)
-		return -ENOMEM;
+	if (!appldata_wq) {
+		rc = -ENOMEM;
+		goto out_device;
+	}
 
 	get_online_cpus();
 	for_each_online_cpu(i)
@@ -547,6 +652,12 @@
 
 	appldata_sysctl_header = register_sysctl_table(appldata_dir_table);
 	return 0;
+
+out_device:
+	platform_device_unregister(appldata_pdev);
+out_driver:
+	platform_driver_unregister(&appldata_pdrv);
+	return rc;
 }
 
 __initcall(appldata_init);
diff --git a/arch/s390/include/asm/ccwdev.h b/arch/s390/include/asm/ccwdev.h
index ba007d8..2a54195 100644
--- a/arch/s390/include/asm/ccwdev.h
+++ b/arch/s390/include/asm/ccwdev.h
@@ -1,11 +1,9 @@
 /*
- *  include/asm-s390/ccwdev.h
- *  include/asm-s390x/ccwdev.h
+ * Copyright  IBM Corp. 2002, 2009
  *
- *    Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
- *    Author(s): Arnd Bergmann <arndb@de.ibm.com>
+ * Author(s): Arnd Bergmann <arndb@de.ibm.com>
  *
- *  Interface for CCW device drivers
+ * Interface for CCW device drivers
  */
 #ifndef _S390_CCWDEV_H_
 #define _S390_CCWDEV_H_
@@ -104,6 +102,11 @@
  * @set_offline: called when setting device offline
  * @notify: notify driver of device state changes
  * @shutdown: called at device shutdown
+ * @prepare: prepare for pm state transition
+ * @complete: undo work done in @prepare
+ * @freeze: callback for freezing during hibernation snapshotting
+ * @thaw: undo work done in @freeze
+ * @restore: callback for restoring after hibernation
  * @driver: embedded device driver structure
  * @name: device driver name
  */
@@ -116,6 +119,11 @@
 	int (*set_offline) (struct ccw_device *);
 	int (*notify) (struct ccw_device *, int);
 	void (*shutdown) (struct ccw_device *);
+	int (*prepare) (struct ccw_device *);
+	void (*complete) (struct ccw_device *);
+	int (*freeze)(struct ccw_device *);
+	int (*thaw) (struct ccw_device *);
+	int (*restore)(struct ccw_device *);
 	struct device_driver driver;
 	char *name;
 };
@@ -184,6 +192,7 @@
 #define to_ccwdrv(n) container_of(n, struct ccw_driver, driver)
 
 extern struct ccw_device *ccw_device_probe_console(void);
+extern int ccw_device_force_console(void);
 
 // FIXME: these have to go
 extern int _ccw_device_get_subchannel_number(struct ccw_device *);
diff --git a/arch/s390/include/asm/ccwgroup.h b/arch/s390/include/asm/ccwgroup.h
index a27f689..c79c1e7 100644
--- a/arch/s390/include/asm/ccwgroup.h
+++ b/arch/s390/include/asm/ccwgroup.h
@@ -38,6 +38,11 @@
  * @set_online: function called when device is set online
  * @set_offline: function called when device is set offline
  * @shutdown: function called when device is shut down
+ * @prepare: prepare for pm state transition
+ * @complete: undo work done in @prepare
+ * @freeze: callback for freezing during hibernation snapshotting
+ * @thaw: undo work done in @freeze
+ * @restore: callback for restoring after hibernation
  * @driver: embedded driver structure
  */
 struct ccwgroup_driver {
@@ -51,6 +56,11 @@
 	int (*set_online) (struct ccwgroup_device *);
 	int (*set_offline) (struct ccwgroup_device *);
 	void (*shutdown)(struct ccwgroup_device *);
+	int (*prepare) (struct ccwgroup_device *);
+	void (*complete) (struct ccwgroup_device *);
+	int (*freeze)(struct ccwgroup_device *);
+	int (*thaw) (struct ccwgroup_device *);
+	int (*restore)(struct ccwgroup_device *);
 
 	struct device_driver driver;
 };
diff --git a/arch/s390/include/asm/suspend.h b/arch/s390/include/asm/suspend.h
new file mode 100644
index 0000000..dc75c61
--- /dev/null
+++ b/arch/s390/include/asm/suspend.h
@@ -0,0 +1,10 @@
+#ifndef __ASM_S390_SUSPEND_H
+#define __ASM_S390_SUSPEND_H
+
+static inline int arch_prepare_suspend(void)
+{
+	return 0;
+}
+
+#endif
+
diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h
index 3a8b26e..4fb83c1 100644
--- a/arch/s390/include/asm/system.h
+++ b/arch/s390/include/asm/system.h
@@ -1,11 +1,7 @@
 /*
- *  include/asm-s390/system.h
+ * Copyright IBM Corp. 1999, 2009
  *
- *  S390 version
- *    Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
- *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
- *
- *  Derived from "include/asm-i386/system.h"
+ * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
  */
 
 #ifndef __ASM_SYSTEM_H
@@ -469,6 +465,20 @@
 extern psw_t io_restore_trace_psw;
 #endif
 
+static inline int tprot(unsigned long addr)
+{
+	int rc = -EFAULT;
+
+	asm volatile(
+		"	tprot	0(%1),0\n"
+		"0:	ipm	%0\n"
+		"	srl	%0,28\n"
+		"1:\n"
+		EX_TABLE(0b,1b)
+		: "+d" (rc) : "a" (addr) : "cc");
+	return rc;
+}
+
 #endif /* __KERNEL__ */
 
 #endif
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index fb26373..f9b1440 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -1,7 +1,7 @@
 /*
  *  arch/s390/kernel/early.c
  *
- *    Copyright IBM Corp. 2007
+ *    Copyright IBM Corp. 2007, 2009
  *    Author(s): Hongjie Yang <hongjie@us.ibm.com>,
  *		 Heiko Carstens <heiko.carstens@de.ibm.com>
  */
@@ -210,7 +210,7 @@
 		machine_flags |= MACHINE_FLAG_VM;
 }
 
-static __init void early_pgm_check_handler(void)
+static void early_pgm_check_handler(void)
 {
 	unsigned long addr;
 	const struct exception_table_entry *fixup;
@@ -222,7 +222,7 @@
 	S390_lowcore.program_old_psw.addr = fixup->fixup | PSW_ADDR_AMODE;
 }
 
-static noinline __init void setup_lowcore_early(void)
+void setup_lowcore_early(void)
 {
 	psw_t psw;
 
diff --git a/arch/s390/kernel/mem_detect.c b/arch/s390/kernel/mem_detect.c
index 9872999..559af0d 100644
--- a/arch/s390/kernel/mem_detect.c
+++ b/arch/s390/kernel/mem_detect.c
@@ -1,6 +1,7 @@
 /*
- *    Copyright IBM Corp. 2008
- *    Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
+ * Copyright IBM Corp. 2008, 2009
+ *
+ * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
  */
 
 #include <linux/kernel.h>
@@ -9,20 +10,6 @@
 #include <asm/sclp.h>
 #include <asm/setup.h>
 
-static inline int tprot(unsigned long addr)
-{
-	int rc = -EFAULT;
-
-	asm volatile(
-		"	tprot	0(%1),0\n"
-		"0:	ipm	%0\n"
-		"	srl	%0,28\n"
-		"1:\n"
-		EX_TABLE(0b,1b)
-		: "+d" (rc) : "a" (addr) : "cc");
-	return rc;
-}
-
 #define ADDR2G (1ULL << 31)
 
 static void find_memory_chunks(struct mem_chunk chunk[])
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index cc8c484..fd8e311 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -1,7 +1,7 @@
 /*
  *  arch/s390/kernel/smp.c
  *
- *    Copyright IBM Corp. 1999,2007
+ *    Copyright IBM Corp. 1999, 2009
  *    Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
  *		 Martin Schwidefsky (schwidefsky@de.ibm.com)
  *		 Heiko Carstens (heiko.carstens@de.ibm.com)
@@ -1031,6 +1031,42 @@
 static SYSDEV_CLASS_ATTR(dispatching, 0644, dispatching_show,
 			 dispatching_store);
 
+/*
+ * If the resume kernel runs on another cpu than the suspended kernel,
+ * we have to switch the cpu IDs in the logical map.
+ */
+void smp_switch_boot_cpu_in_resume(u32 resume_phys_cpu_id,
+				   struct _lowcore *suspend_lowcore)
+{
+	int cpu, suspend_cpu_id, resume_cpu_id;
+	u32 suspend_phys_cpu_id;
+
+	suspend_phys_cpu_id = __cpu_logical_map[suspend_lowcore->cpu_nr];
+	suspend_cpu_id = suspend_lowcore->cpu_nr;
+
+	for_each_present_cpu(cpu) {
+		if (__cpu_logical_map[cpu] == resume_phys_cpu_id) {
+			resume_cpu_id = cpu;
+			goto found;
+		}
+	}
+	panic("Could not find resume cpu in logical map.\n");
+
+found:
+	printk("Resume  cpu ID: %i/%i\n", resume_phys_cpu_id, resume_cpu_id);
+	printk("Suspend cpu ID: %i/%i\n", suspend_phys_cpu_id, suspend_cpu_id);
+
+	__cpu_logical_map[resume_cpu_id] = suspend_phys_cpu_id;
+	__cpu_logical_map[suspend_cpu_id] = resume_phys_cpu_id;
+
+	lowcore_ptr[suspend_cpu_id]->cpu_addr = resume_phys_cpu_id;
+}
+
+u32 smp_get_phys_cpu_id(void)
+{
+	return __cpu_logical_map[smp_processor_id()];
+}
+
 static int __init topology_init(void)
 {
 	int cpu;
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 4ca8e82..5656672 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -313,3 +313,22 @@
 	return 0;
 }
 EXPORT_SYMBOL_GPL(s390_enable_sie);
+
+#ifdef CONFIG_DEBUG_PAGEALLOC
+#ifdef CONFIG_HIBERNATION
+bool kernel_page_present(struct page *page)
+{
+	unsigned long addr;
+	int cc;
+
+	addr = page_to_phys(page);
+	asm("lra %1,0(%1)\n"
+	    "ipm %0\n"
+	    "srl %0,28"
+	    :"=d"(cc),"+a"(addr)::"cc");
+	return cc == 0;
+}
+
+#endif /* CONFIG_HIBERNATION */
+#endif /* CONFIG_DEBUG_PAGEALLOC */
+
diff --git a/arch/s390/power/Makefile b/arch/s390/power/Makefile
new file mode 100644
index 0000000..973bb45
--- /dev/null
+++ b/arch/s390/power/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for s390 PM support
+#
+
+obj-$(CONFIG_HIBERNATION) += suspend.o
+obj-$(CONFIG_HIBERNATION) += swsusp.o
+obj-$(CONFIG_HIBERNATION) += swsusp_64.o
+obj-$(CONFIG_HIBERNATION) += swsusp_asm64.o
diff --git a/arch/s390/power/suspend.c b/arch/s390/power/suspend.c
new file mode 100644
index 0000000..b3351ec
--- /dev/null
+++ b/arch/s390/power/suspend.c
@@ -0,0 +1,40 @@
+/*
+ * Suspend support specific for s390.
+ *
+ * Copyright IBM Corp. 2009
+ *
+ * Author(s): Hans-Joachim Picht <hans@linux.vnet.ibm.com>
+ */
+
+#include <linux/mm.h>
+#include <linux/suspend.h>
+#include <linux/reboot.h>
+#include <linux/pfn.h>
+#include <asm/sections.h>
+#include <asm/ipl.h>
+
+/*
+ * References to section boundaries
+ */
+extern const void __nosave_begin, __nosave_end;
+
+/*
+ *  check if given pfn is in the 'nosave' or in the read only NSS section
+ */
+int pfn_is_nosave(unsigned long pfn)
+{
+	unsigned long nosave_begin_pfn = __pa(&__nosave_begin) >> PAGE_SHIFT;
+	unsigned long nosave_end_pfn = PAGE_ALIGN(__pa(&__nosave_end))
+					>> PAGE_SHIFT;
+	unsigned long eshared_pfn = PFN_DOWN(__pa(&_eshared)) - 1;
+	unsigned long stext_pfn = PFN_DOWN(__pa(&_stext));
+
+	if (pfn >= nosave_begin_pfn && pfn < nosave_end_pfn)
+		return 1;
+	if (pfn >= stext_pfn && pfn <= eshared_pfn) {
+		if (ipl_info.type == IPL_TYPE_NSS)
+			return 1;
+	} else if ((tprot(pfn * PAGE_SIZE) && pfn > 0))
+		return 1;
+	return 0;
+}
diff --git a/arch/s390/power/swsusp.c b/arch/s390/power/swsusp.c
new file mode 100644
index 0000000..e6a4fe9
--- /dev/null
+++ b/arch/s390/power/swsusp.c
@@ -0,0 +1,30 @@
+/*
+ * Support for suspend and resume on s390
+ *
+ * Copyright IBM Corp. 2009
+ *
+ * Author(s): Hans-Joachim Picht <hans@linux.vnet.ibm.com>
+ *
+ */
+
+
+/*
+ * save CPU registers before creating a hibernation image and before
+ * restoring the memory state from it
+ */
+void save_processor_state(void)
+{
+	/* implentation contained in the
+	 * swsusp_arch_suspend function
+	 */
+}
+
+/*
+ * restore the contents of CPU registers
+ */
+void restore_processor_state(void)
+{
+	/* implentation contained in the
+	 * swsusp_arch_resume function
+	 */
+}
diff --git a/arch/s390/power/swsusp_64.c b/arch/s390/power/swsusp_64.c
new file mode 100644
index 0000000..9516a51
--- /dev/null
+++ b/arch/s390/power/swsusp_64.c
@@ -0,0 +1,17 @@
+/*
+ * Support for suspend and resume on s390
+ *
+ * Copyright IBM Corp. 2009
+ *
+ * Author(s): Hans-Joachim Picht <hans@linux.vnet.ibm.com>
+ *
+ */
+
+#include <asm/system.h>
+#include <linux/interrupt.h>
+
+void do_after_copyback(void)
+{
+	mb();
+}
+
diff --git a/arch/s390/power/swsusp_asm64.S b/arch/s390/power/swsusp_asm64.S
new file mode 100644
index 0000000..3c74e7d
--- /dev/null
+++ b/arch/s390/power/swsusp_asm64.S
@@ -0,0 +1,199 @@
+/*
+ * S390 64-bit swsusp implementation
+ *
+ * Copyright IBM Corp. 2009
+ *
+ * Author(s): Hans-Joachim Picht <hans@linux.vnet.ibm.com>
+ *	      Michael Holzheu <holzheu@linux.vnet.ibm.com>
+ */
+
+#include <asm/page.h>
+#include <asm/ptrace.h>
+#include <asm/asm-offsets.h>
+
+/*
+ * Save register context in absolute 0 lowcore and call swsusp_save() to
+ * create in-memory kernel image. The context is saved in the designated
+ * "store status" memory locations (see POP).
+ * We return from this function twice. The first time during the suspend to
+ * disk process. The second time via the swsusp_arch_resume() function
+ * (see below) in the resume process.
+ * This function runs with disabled interrupts.
+ */
+	.section .text
+	.align	2
+	.globl swsusp_arch_suspend
+swsusp_arch_suspend:
+	stmg	%r6,%r15,__SF_GPRS(%r15)
+	lgr	%r1,%r15
+	aghi	%r15,-STACK_FRAME_OVERHEAD
+	stg	%r1,__SF_BACKCHAIN(%r15)
+
+	/* Deactivate DAT */
+	stnsm	__SF_EMPTY(%r15),0xfb
+
+	/* Switch off lowcore protection */
+	stctg	%c0,%c0,__SF_EMPTY(%r15)
+	ni	__SF_EMPTY+4(%r15),0xef
+	lctlg	%c0,%c0,__SF_EMPTY(%r15)
+
+	/* Store prefix register on stack */
+	stpx	__SF_EMPTY(%r15)
+
+	/* Setup base register for lowcore (absolute 0) */
+	llgf	%r1,__SF_EMPTY(%r15)
+
+	/* Get pointer to save area */
+	aghi	%r1,0x1000
+
+	/* Store registers */
+	mvc	0x318(4,%r1),__SF_EMPTY(%r15)	/* move prefix to lowcore */
+	stfpc	0x31c(%r1)			/* store fpu control */
+	std	0,0x200(%r1)			/* store f0 */
+	std	1,0x208(%r1)			/* store f1 */
+	std	2,0x210(%r1)			/* store f2 */
+	std	3,0x218(%r1)			/* store f3 */
+	std	4,0x220(%r1)			/* store f4 */
+	std	5,0x228(%r1)			/* store f5 */
+	std	6,0x230(%r1)			/* store f6 */
+	std	7,0x238(%r1)			/* store f7 */
+	std	8,0x240(%r1)			/* store f8 */
+	std	9,0x248(%r1)			/* store f9 */
+	std	10,0x250(%r1)			/* store f10 */
+	std	11,0x258(%r1)			/* store f11 */
+	std	12,0x260(%r1)			/* store f12 */
+	std	13,0x268(%r1)			/* store f13 */
+	std	14,0x270(%r1)			/* store f14 */
+	std	15,0x278(%r1)			/* store f15 */
+	stam	%a0,%a15,0x340(%r1)		/* store access registers */
+	stctg	%c0,%c15,0x380(%r1)		/* store control registers */
+	stmg	%r0,%r15,0x280(%r1)		/* store general registers */
+
+	stpt	0x328(%r1)			/* store timer */
+	stckc	0x330(%r1)			/* store clock comparator */
+
+	/* Activate DAT */
+	stosm	__SF_EMPTY(%r15),0x04
+
+	/* Set prefix page to zero */
+	xc	__SF_EMPTY(4,%r15),__SF_EMPTY(%r15)
+	spx	__SF_EMPTY(%r15)
+
+	/* Setup lowcore */
+	brasl	%r14,setup_lowcore_early
+
+	/* Save image */
+	brasl	%r14,swsusp_save
+
+	/* Switch on lowcore protection */
+	stctg	%c0,%c0,__SF_EMPTY(%r15)
+	oi	__SF_EMPTY+4(%r15),0x10
+	lctlg	%c0,%c0,__SF_EMPTY(%r15)
+
+	/* Restore prefix register and return */
+	lghi	%r1,0x1000
+	spx	0x318(%r1)
+	lmg	%r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15)
+	lghi	%r2,0
+	br	%r14
+
+/*
+ * Restore saved memory image to correct place and restore register context.
+ * Then we return to the function that called swsusp_arch_suspend().
+ * swsusp_arch_resume() runs with disabled interrupts.
+ */
+	.globl swsusp_arch_resume
+swsusp_arch_resume:
+	stmg	%r6,%r15,__SF_GPRS(%r15)
+	lgr	%r1,%r15
+	aghi	%r15,-STACK_FRAME_OVERHEAD
+	stg	%r1,__SF_BACKCHAIN(%r15)
+
+	/* Save boot cpu number */
+	brasl	%r14,smp_get_phys_cpu_id
+	lgr	%r10,%r2
+
+	/* Deactivate DAT */
+	stnsm	__SF_EMPTY(%r15),0xfb
+
+	/* Switch off lowcore protection */
+	stctg	%c0,%c0,__SF_EMPTY(%r15)
+	ni	__SF_EMPTY+4(%r15),0xef
+	lctlg	%c0,%c0,__SF_EMPTY(%r15)
+
+	/* Set prefix page to zero */
+	xc	__SF_EMPTY(4,%r15),__SF_EMPTY(%r15)
+	spx	__SF_EMPTY(%r15)
+
+	/* Restore saved image */
+	larl	%r1,restore_pblist
+	lg	%r1,0(%r1)
+	ltgr	%r1,%r1
+	jz	2f
+0:
+	lg	%r2,8(%r1)
+	lg	%r4,0(%r1)
+	lghi	%r3,PAGE_SIZE
+	lghi	%r5,PAGE_SIZE
+1:
+	mvcle	%r2,%r4,0
+	jo	1b
+	lg	%r1,16(%r1)
+	ltgr	%r1,%r1
+	jnz	0b
+2:
+	ptlb				/* flush tlb */
+
+	/* Restore registers */
+	lghi	%r13,0x1000		/* %r1 = pointer to save arae */
+
+	spt	0x328(%r13)		/* reprogram timer */
+	//sckc	0x330(%r13)		/* set clock comparator */
+
+	lctlg	%c0,%c15,0x380(%r13)	/* load control registers */
+	lam	%a0,%a15,0x340(%r13)	/* load access registers */
+
+	lfpc	0x31c(%r13)		/* load fpu control */
+	ld	0,0x200(%r13)		/* load f0 */
+	ld	1,0x208(%r13)		/* load f1 */
+	ld	2,0x210(%r13)		/* load f2 */
+	ld	3,0x218(%r13)		/* load f3 */
+	ld	4,0x220(%r13)		/* load f4 */
+	ld	5,0x228(%r13)		/* load f5 */
+	ld	6,0x230(%r13)		/* load f6 */
+	ld	7,0x238(%r13)		/* load f7 */
+	ld	8,0x240(%r13)		/* load f8 */
+	ld	9,0x248(%r13)		/* load f9 */
+	ld	10,0x250(%r13)		/* load f10 */
+	ld	11,0x258(%r13)		/* load f11 */
+	ld	12,0x260(%r13)		/* load f12 */
+	ld	13,0x268(%r13)		/* load f13 */
+	ld	14,0x270(%r13)		/* load f14 */
+	ld	15,0x278(%r13)		/* load f15 */
+
+	/* Load old stack */
+	lg	%r15,0x2f8(%r13)
+
+	/* Pointer to save arae */
+	lghi	%r13,0x1000
+
+	/* Switch CPUs */
+	lgr	%r2,%r10		/* get cpu id */
+	llgf	%r3,0x318(%r13)
+	brasl	%r14,smp_switch_boot_cpu_in_resume
+
+	/* Restore prefix register */
+	spx	0x318(%r13)
+
+	/* Switch on lowcore protection */
+	stctg	%c0,%c0,__SF_EMPTY(%r15)
+	oi	__SF_EMPTY+4(%r15),0x10
+	lctlg	%c0,%c0,__SF_EMPTY(%r15)
+
+	/* Activate DAT */
+	stosm	__SF_EMPTY(%r15),0x04
+
+	/* Return 0 */
+	lmg	%r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15)
+	lghi	%r2,0
+	br	%r14
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index cc12cd4..3f8b6a9 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -37,6 +37,8 @@
 	select HAVE_KPROBES
 	select HAVE_LMB
 	select HAVE_SYSCALL_WRAPPERS
+	select HAVE_DYNAMIC_FTRACE
+	select HAVE_FTRACE_MCOUNT_RECORD
 	select USE_GENERIC_SMP_HELPERS if SMP
 	select RTC_DRV_CMOS
 	select RTC_DRV_BQ4802
@@ -93,6 +95,9 @@
 config HAVE_SETUP_PER_CPU_AREA
 	def_bool y if SPARC64
 
+config HAVE_DYNAMIC_PER_CPU_AREA
+	def_bool y if SPARC64
+
 config GENERIC_HARDIRQS_NO__DO_IRQ
 	bool
 	def_bool y if SPARC64
diff --git a/arch/sparc/configs/sparc64_defconfig b/arch/sparc/configs/sparc64_defconfig
index b5d63bd..0123a4c 100644
--- a/arch/sparc/configs/sparc64_defconfig
+++ b/arch/sparc/configs/sparc64_defconfig
@@ -1,7 +1,7 @@
 #
 # Automatically generated make config: don't edit
-# Linux kernel version: 2.6.30-rc2
-# Fri Apr 17 02:03:07 2009
+# Linux kernel version: 2.6.30
+# Tue Jun 16 04:59:36 2009
 #
 CONFIG_64BIT=y
 CONFIG_SPARC=y
@@ -19,6 +19,7 @@
 CONFIG_HAVE_LATENCYTOP_SUPPORT=y
 CONFIG_AUDIT_ARCH=y
 CONFIG_HAVE_SETUP_PER_CPU_AREA=y
+CONFIG_HAVE_DYNAMIC_PER_CPU_AREA=y
 CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
 CONFIG_MMU=y
 CONFIG_ARCH_NO_VIRT_TO_BUS=y
@@ -82,7 +83,6 @@
 CONFIG_KALLSYMS=y
 # CONFIG_KALLSYMS_ALL is not set
 # CONFIG_KALLSYMS_EXTRA_PASS is not set
-# CONFIG_STRIP_ASM_SYMS is not set
 CONFIG_HOTPLUG=y
 CONFIG_PRINTK=y
 CONFIG_BUG=y
@@ -95,16 +95,21 @@
 CONFIG_EVENTFD=y
 CONFIG_SHMEM=y
 CONFIG_AIO=y
+
+#
+# Performance Counters
+#
 CONFIG_VM_EVENT_COUNTERS=y
 CONFIG_PCI_QUIRKS=y
 CONFIG_SLUB_DEBUG=y
+# CONFIG_STRIP_ASM_SYMS is not set
 # CONFIG_COMPAT_BRK is not set
 # CONFIG_SLAB is not set
 CONFIG_SLUB=y
 # CONFIG_SLOB is not set
 CONFIG_PROFILING=y
 CONFIG_TRACEPOINTS=y
-# CONFIG_MARKERS is not set
+CONFIG_MARKERS=y
 CONFIG_OPROFILE=m
 CONFIG_HAVE_OPROFILE=y
 CONFIG_KPROBES=y
@@ -202,6 +207,7 @@
 CONFIG_UNEVICTABLE_LRU=y
 CONFIG_HAVE_MLOCK=y
 CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+CONFIG_DEFAULT_MMAP_MIN_ADDR=8192
 CONFIG_SCHED_SMT=y
 CONFIG_SCHED_MC=y
 # CONFIG_PREEMPT_NONE is not set
@@ -321,6 +327,7 @@
 # CONFIG_ECONET is not set
 # CONFIG_WAN_ROUTER is not set
 # CONFIG_PHONET is not set
+# CONFIG_IEEE802154 is not set
 # CONFIG_NET_SCHED is not set
 # CONFIG_DCB is not set
 
@@ -340,7 +347,11 @@
 CONFIG_WIRELESS_OLD_REGULATORY=y
 # CONFIG_WIRELESS_EXT is not set
 # CONFIG_LIB80211 is not set
-# CONFIG_MAC80211 is not set
+
+#
+# CFG80211 needs to be enabled for MAC80211
+#
+CONFIG_MAC80211_DEFAULT_PS_VALUE=0
 # CONFIG_WIMAX is not set
 # CONFIG_RFKILL is not set
 # CONFIG_NET_9P is not set
@@ -364,6 +375,7 @@
 CONFIG_CONNECTOR=m
 # CONFIG_MTD is not set
 CONFIG_OF_DEVICE=y
+CONFIG_OF_MDIO=m
 # CONFIG_PARPORT is not set
 CONFIG_BLK_DEV=y
 # CONFIG_BLK_DEV_FD is not set
@@ -399,6 +411,7 @@
 # CONFIG_EEPROM_AT24 is not set
 # CONFIG_EEPROM_LEGACY is not set
 # CONFIG_EEPROM_93CX6 is not set
+# CONFIG_CB710_CORE is not set
 CONFIG_HAVE_IDE=y
 CONFIG_IDE=y
 
@@ -477,10 +490,6 @@
 CONFIG_BLK_DEV_SR_VENDOR=y
 CONFIG_CHR_DEV_SG=m
 # CONFIG_CHR_DEV_SCH is not set
-
-#
-# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
-#
 CONFIG_SCSI_MULTI_LUN=y
 CONFIG_SCSI_CONSTANTS=y
 # CONFIG_SCSI_LOGGING is not set
@@ -499,6 +508,7 @@
 CONFIG_SCSI_LOWLEVEL=y
 # CONFIG_ISCSI_TCP is not set
 # CONFIG_SCSI_CXGB3_ISCSI is not set
+# CONFIG_SCSI_BNX2_ISCSI is not set
 # CONFIG_BLK_DEV_3W_XXXX_RAID is not set
 # CONFIG_SCSI_3W_9XXX is not set
 # CONFIG_SCSI_ACARD is not set
@@ -507,6 +517,7 @@
 # CONFIG_SCSI_AIC7XXX_OLD is not set
 # CONFIG_SCSI_AIC79XX is not set
 # CONFIG_SCSI_AIC94XX is not set
+# CONFIG_SCSI_MVSAS is not set
 # CONFIG_SCSI_ARCMSR is not set
 # CONFIG_MEGARAID_NEWGEN is not set
 # CONFIG_MEGARAID_LEGACY is not set
@@ -521,7 +532,6 @@
 # CONFIG_SCSI_IPS is not set
 # CONFIG_SCSI_INITIO is not set
 # CONFIG_SCSI_INIA100 is not set
-# CONFIG_SCSI_MVSAS is not set
 # CONFIG_SCSI_STEX is not set
 # CONFIG_SCSI_SYM53C8XX_2 is not set
 # CONFIG_SCSI_QLOGIC_1280 is not set
@@ -569,7 +579,6 @@
 # CONFIG_IEEE1394 is not set
 # CONFIG_I2O is not set
 CONFIG_NETDEVICES=y
-CONFIG_COMPAT_NET_DEV_OPS=y
 # CONFIG_DUMMY is not set
 # CONFIG_BONDING is not set
 # CONFIG_MACVLAN is not set
@@ -635,6 +644,7 @@
 # CONFIG_SMSC9420 is not set
 # CONFIG_SUNDANCE is not set
 # CONFIG_TLAN is not set
+# CONFIG_KS8842 is not set
 # CONFIG_VIA_RHINE is not set
 # CONFIG_SC92031 is not set
 # CONFIG_ATL2 is not set
@@ -1127,6 +1137,11 @@
 # CONFIG_SND_VERBOSE_PRINTK is not set
 # CONFIG_SND_DEBUG is not set
 CONFIG_SND_VMASTER=y
+CONFIG_SND_RAWMIDI_SEQ=m
+# CONFIG_SND_OPL3_LIB_SEQ is not set
+# CONFIG_SND_OPL4_LIB_SEQ is not set
+# CONFIG_SND_SBAWE_SEQ is not set
+# CONFIG_SND_EMU10K1_SEQ is not set
 CONFIG_SND_MPU401_UART=m
 CONFIG_SND_AC97_CODEC=m
 CONFIG_SND_DRIVERS=y
@@ -1153,6 +1168,7 @@
 # CONFIG_SND_OXYGEN is not set
 # CONFIG_SND_CS4281 is not set
 # CONFIG_SND_CS46XX is not set
+# CONFIG_SND_CTXFI is not set
 # CONFIG_SND_DARLA20 is not set
 # CONFIG_SND_GINA20 is not set
 # CONFIG_SND_LAYLA20 is not set
@@ -1183,6 +1199,7 @@
 # CONFIG_SND_INTEL8X0 is not set
 # CONFIG_SND_INTEL8X0M is not set
 # CONFIG_SND_KORG1212 is not set
+# CONFIG_SND_LX6464ES is not set
 # CONFIG_SND_MAESTRO3 is not set
 # CONFIG_SND_MIXART is not set
 # CONFIG_SND_NM256 is not set
@@ -1229,6 +1246,7 @@
 CONFIG_HID_CHERRY=y
 CONFIG_HID_CHICONY=y
 CONFIG_HID_CYPRESS=y
+CONFIG_HID_DRAGONRISE=y
 # CONFIG_DRAGONRISE_FF is not set
 CONFIG_HID_EZKEY=y
 CONFIG_HID_KYE=y
@@ -1246,9 +1264,14 @@
 CONFIG_HID_SAMSUNG=y
 CONFIG_HID_SONY=y
 CONFIG_HID_SUNPLUS=y
+CONFIG_HID_GREENASIA=y
 # CONFIG_GREENASIA_FF is not set
+CONFIG_HID_SMARTJOYPLUS=y
+# CONFIG_SMARTJOYPLUS_FF is not set
 CONFIG_HID_TOPSEED=y
+CONFIG_HID_THRUSTMASTER=y
 # CONFIG_THRUSTMASTER_FF is not set
+CONFIG_HID_ZEROPLUS=y
 # CONFIG_ZEROPLUS_FF is not set
 CONFIG_USB_SUPPORT=y
 CONFIG_USB_ARCH_HAS_HCD=y
@@ -1462,6 +1485,7 @@
 # CONFIG_GFS2_FS is not set
 # CONFIG_OCFS2_FS is not set
 # CONFIG_BTRFS_FS is not set
+CONFIG_FSNOTIFY=y
 CONFIG_DNOTIFY=y
 CONFIG_INOTIFY=y
 CONFIG_INOTIFY_USER=y
@@ -1636,25 +1660,28 @@
 # CONFIG_DEBUG_PAGEALLOC is not set
 CONFIG_NOP_TRACER=y
 CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_DYNAMIC_FTRACE=y
+CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
 CONFIG_RING_BUFFER=y
+CONFIG_EVENT_TRACING=y
+CONFIG_CONTEXT_SWITCH_TRACER=y
 CONFIG_TRACING=y
+CONFIG_GENERIC_TRACER=y
 CONFIG_TRACING_SUPPORT=y
-
-#
-# Tracers
-#
+CONFIG_FTRACE=y
 # CONFIG_FUNCTION_TRACER is not set
 # CONFIG_IRQSOFF_TRACER is not set
 # CONFIG_SCHED_TRACER is not set
-# CONFIG_CONTEXT_SWITCH_TRACER is not set
-# CONFIG_EVENT_TRACER is not set
 # CONFIG_BOOT_TRACER is not set
-# CONFIG_TRACE_BRANCH_PROFILING is not set
+CONFIG_BRANCH_PROFILE_NONE=y
+# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
+# CONFIG_PROFILE_ALL_BRANCHES is not set
 # CONFIG_STACK_TRACER is not set
 # CONFIG_KMEMTRACE is not set
 # CONFIG_WORKQUEUE_TRACER is not set
 CONFIG_BLK_DEV_IO_TRACE=y
 # CONFIG_FTRACE_STARTUP_TEST is not set
+# CONFIG_RING_BUFFER_BENCHMARK is not set
 # CONFIG_DYNAMIC_DEBUG is not set
 # CONFIG_SAMPLES is not set
 CONFIG_HAVE_ARCH_KGDB=y
diff --git a/arch/sparc/include/asm/cpudata_64.h b/arch/sparc/include/asm/cpudata_64.h
index a11b89e..926397d 100644
--- a/arch/sparc/include/asm/cpudata_64.h
+++ b/arch/sparc/include/asm/cpudata_64.h
@@ -6,9 +6,6 @@
 #ifndef _SPARC64_CPUDATA_H
 #define _SPARC64_CPUDATA_H
 
-#include <asm/hypervisor.h>
-#include <asm/asi.h>
-
 #ifndef __ASSEMBLY__
 
 #include <linux/percpu.h>
@@ -38,202 +35,10 @@
 #define cpu_data(__cpu)		per_cpu(__cpu_data, (__cpu))
 #define local_cpu_data()	__get_cpu_var(__cpu_data)
 
-/* Trap handling code needs to get at a few critical values upon
- * trap entry and to process TSB misses.  These cannot be in the
- * per_cpu() area as we really need to lock them into the TLB and
- * thus make them part of the main kernel image.  As a result we
- * try to make this as small as possible.
- *
- * This is padded out and aligned to 64-bytes to avoid false sharing
- * on SMP.
- */
-
-/* If you modify the size of this structure, please update
- * TRAP_BLOCK_SZ_SHIFT below.
- */
-struct thread_info;
-struct trap_per_cpu {
-/* D-cache line 1: Basic thread information, cpu and device mondo queues */
-	struct thread_info	*thread;
-	unsigned long		pgd_paddr;
-	unsigned long		cpu_mondo_pa;
-	unsigned long		dev_mondo_pa;
-
-/* D-cache line 2: Error Mondo Queue and kernel buffer pointers */
-	unsigned long		resum_mondo_pa;
-	unsigned long		resum_kernel_buf_pa;
-	unsigned long		nonresum_mondo_pa;
-	unsigned long		nonresum_kernel_buf_pa;
-
-/* Dcache lines 3, 4, 5, and 6: Hypervisor Fault Status */
-	struct hv_fault_status	fault_info;
-
-/* Dcache line 7: Physical addresses of CPU send mondo block and CPU list.  */
-	unsigned long		cpu_mondo_block_pa;
-	unsigned long		cpu_list_pa;
-	unsigned long		tsb_huge;
-	unsigned long		tsb_huge_temp;
-
-/* Dcache line 8: IRQ work list, and keep trap_block a power-of-2 in size.  */
-	unsigned long		irq_worklist_pa;
-	unsigned int		cpu_mondo_qmask;
-	unsigned int		dev_mondo_qmask;
-	unsigned int		resum_qmask;
-	unsigned int		nonresum_qmask;
-	void			*hdesc;
-} __attribute__((aligned(64)));
-extern struct trap_per_cpu trap_block[NR_CPUS];
-extern void init_cur_cpu_trap(struct thread_info *);
-extern void setup_tba(void);
-extern int ncpus_probed;
 extern const struct seq_operations cpuinfo_op;
 
-extern unsigned long real_hard_smp_processor_id(void);
-
-struct cpuid_patch_entry {
-	unsigned int	addr;
-	unsigned int	cheetah_safari[4];
-	unsigned int	cheetah_jbus[4];
-	unsigned int	starfire[4];
-	unsigned int	sun4v[4];
-};
-extern struct cpuid_patch_entry __cpuid_patch, __cpuid_patch_end;
-
-struct sun4v_1insn_patch_entry {
-	unsigned int	addr;
-	unsigned int	insn;
-};
-extern struct sun4v_1insn_patch_entry __sun4v_1insn_patch,
-	__sun4v_1insn_patch_end;
-
-struct sun4v_2insn_patch_entry {
-	unsigned int	addr;
-	unsigned int	insns[2];
-};
-extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch,
-	__sun4v_2insn_patch_end;
-
 #endif /* !(__ASSEMBLY__) */
 
-#define TRAP_PER_CPU_THREAD		0x00
-#define TRAP_PER_CPU_PGD_PADDR		0x08
-#define TRAP_PER_CPU_CPU_MONDO_PA	0x10
-#define TRAP_PER_CPU_DEV_MONDO_PA	0x18
-#define TRAP_PER_CPU_RESUM_MONDO_PA	0x20
-#define TRAP_PER_CPU_RESUM_KBUF_PA	0x28
-#define TRAP_PER_CPU_NONRESUM_MONDO_PA	0x30
-#define TRAP_PER_CPU_NONRESUM_KBUF_PA	0x38
-#define TRAP_PER_CPU_FAULT_INFO		0x40
-#define TRAP_PER_CPU_CPU_MONDO_BLOCK_PA	0xc0
-#define TRAP_PER_CPU_CPU_LIST_PA	0xc8
-#define TRAP_PER_CPU_TSB_HUGE		0xd0
-#define TRAP_PER_CPU_TSB_HUGE_TEMP	0xd8
-#define TRAP_PER_CPU_IRQ_WORKLIST_PA	0xe0
-#define TRAP_PER_CPU_CPU_MONDO_QMASK	0xe8
-#define TRAP_PER_CPU_DEV_MONDO_QMASK	0xec
-#define TRAP_PER_CPU_RESUM_QMASK	0xf0
-#define TRAP_PER_CPU_NONRESUM_QMASK	0xf4
-
-#define TRAP_BLOCK_SZ_SHIFT		8
-
-#include <asm/scratchpad.h>
-
-#define __GET_CPUID(REG)				\
-	/* Spitfire implementation (default). */	\
-661:	ldxa		[%g0] ASI_UPA_CONFIG, REG;	\
-	srlx		REG, 17, REG;			\
-	 and		REG, 0x1f, REG;			\
-	nop;						\
-	.section	.cpuid_patch, "ax";		\
-	/* Instruction location. */			\
-	.word		661b;				\
-	/* Cheetah Safari implementation. */		\
-	ldxa		[%g0] ASI_SAFARI_CONFIG, REG;	\
-	srlx		REG, 17, REG;			\
-	and		REG, 0x3ff, REG;		\
-	nop;						\
-	/* Cheetah JBUS implementation. */		\
-	ldxa		[%g0] ASI_JBUS_CONFIG, REG;	\
-	srlx		REG, 17, REG;			\
-	and		REG, 0x1f, REG;			\
-	nop;						\
-	/* Starfire implementation. */			\
-	sethi		%hi(0x1fff40000d0 >> 9), REG;	\
-	sllx		REG, 9, REG;			\
-	or		REG, 0xd0, REG;			\
-	lduwa		[REG] ASI_PHYS_BYPASS_EC_E, REG;\
-	/* sun4v implementation. */			\
-	mov		SCRATCHPAD_CPUID, REG;		\
-	ldxa		[REG] ASI_SCRATCHPAD, REG;	\
-	nop;						\
-	nop;						\
-	.previous;
-
-#ifdef CONFIG_SMP
-
-#define TRAP_LOAD_TRAP_BLOCK(DEST, TMP)		\
-	__GET_CPUID(TMP)			\
-	sethi	%hi(trap_block), DEST;		\
-	sllx	TMP, TRAP_BLOCK_SZ_SHIFT, TMP;	\
-	or	DEST, %lo(trap_block), DEST;	\
-	add	DEST, TMP, DEST;		\
-
-/* Clobbers TMP, current address space PGD phys address into DEST.  */
-#define TRAP_LOAD_PGD_PHYS(DEST, TMP)		\
-	TRAP_LOAD_TRAP_BLOCK(DEST, TMP)		\
-	ldx	[DEST + TRAP_PER_CPU_PGD_PADDR], DEST;
-
-/* Clobbers TMP, loads local processor's IRQ work area into DEST.  */
-#define TRAP_LOAD_IRQ_WORK_PA(DEST, TMP)	\
-	TRAP_LOAD_TRAP_BLOCK(DEST, TMP)		\
-	add	DEST, TRAP_PER_CPU_IRQ_WORKLIST_PA, DEST;
-
-/* Clobbers TMP, loads DEST with current thread info pointer.  */
-#define TRAP_LOAD_THREAD_REG(DEST, TMP)		\
-	TRAP_LOAD_TRAP_BLOCK(DEST, TMP)		\
-	ldx	[DEST + TRAP_PER_CPU_THREAD], DEST;
-
-/* Given the current thread info pointer in THR, load the per-cpu
- * area base of the current processor into DEST.  REG1, REG2, and REG3 are
- * clobbered.
- *
- * You absolutely cannot use DEST as a temporary in this code.  The
- * reason is that traps can happen during execution, and return from
- * trap will load the fully resolved DEST per-cpu base.  This can corrupt
- * the calculations done by the macro mid-stream.
- */
-#define LOAD_PER_CPU_BASE(DEST, THR, REG1, REG2, REG3)	\
-	lduh	[THR + TI_CPU], REG1;			\
-	sethi	%hi(__per_cpu_shift), REG3;		\
-	sethi	%hi(__per_cpu_base), REG2;		\
-	ldx	[REG3 + %lo(__per_cpu_shift)], REG3;	\
-	ldx	[REG2 + %lo(__per_cpu_base)], REG2;	\
-	sllx	REG1, REG3, REG3;			\
-	add	REG3, REG2, DEST;
-
-#else
-
-#define TRAP_LOAD_TRAP_BLOCK(DEST, TMP)		\
-	sethi	%hi(trap_block), DEST;		\
-	or	DEST, %lo(trap_block), DEST;	\
-
-/* Uniprocessor versions, we know the cpuid is zero.  */
-#define TRAP_LOAD_PGD_PHYS(DEST, TMP)		\
-	TRAP_LOAD_TRAP_BLOCK(DEST, TMP)		\
-	ldx	[DEST + TRAP_PER_CPU_PGD_PADDR], DEST;
-
-/* Clobbers TMP, loads local processor's IRQ work area into DEST.  */
-#define TRAP_LOAD_IRQ_WORK_PA(DEST, TMP)	\
-	TRAP_LOAD_TRAP_BLOCK(DEST, TMP)		\
-	add	DEST, TRAP_PER_CPU_IRQ_WORKLIST_PA, DEST;
-
-#define TRAP_LOAD_THREAD_REG(DEST, TMP)		\
-	TRAP_LOAD_TRAP_BLOCK(DEST, TMP)		\
-	ldx	[DEST + TRAP_PER_CPU_THREAD], DEST;
-
-/* No per-cpu areas on uniprocessor, so no need to load DEST.  */
-#define LOAD_PER_CPU_BASE(DEST, THR, REG1, REG2, REG3)
-
-#endif /* !(CONFIG_SMP) */
+#include <asm/trap_block.h>
 
 #endif /* _SPARC64_CPUDATA_H */
diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h
index 0f4150e..204e4bf 100644
--- a/arch/sparc/include/asm/dma-mapping.h
+++ b/arch/sparc/include/asm/dma-mapping.h
@@ -1,8 +1,166 @@
 #ifndef ___ASM_SPARC_DMA_MAPPING_H
 #define ___ASM_SPARC_DMA_MAPPING_H
-#if defined(__sparc__) && defined(__arch64__)
-#include <asm/dma-mapping_64.h>
-#else
-#include <asm/dma-mapping_32.h>
-#endif
+
+#include <linux/scatterlist.h>
+#include <linux/mm.h>
+
+#define DMA_ERROR_CODE	(~(dma_addr_t)0x0)
+
+extern int dma_supported(struct device *dev, u64 mask);
+extern int dma_set_mask(struct device *dev, u64 dma_mask);
+
+#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
+#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
+#define dma_is_consistent(d, h)	(1)
+
+struct dma_ops {
+	void *(*alloc_coherent)(struct device *dev, size_t size,
+				dma_addr_t *dma_handle, gfp_t flag);
+	void (*free_coherent)(struct device *dev, size_t size,
+			      void *cpu_addr, dma_addr_t dma_handle);
+	dma_addr_t (*map_page)(struct device *dev, struct page *page,
+			       unsigned long offset, size_t size,
+			       enum dma_data_direction direction);
+	void (*unmap_page)(struct device *dev, dma_addr_t dma_addr,
+			   size_t size,
+			   enum dma_data_direction direction);
+	int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents,
+		      enum dma_data_direction direction);
+	void (*unmap_sg)(struct device *dev, struct scatterlist *sg,
+			 int nhwentries,
+			 enum dma_data_direction direction);
+	void (*sync_single_for_cpu)(struct device *dev,
+				    dma_addr_t dma_handle, size_t size,
+				    enum dma_data_direction direction);
+	void (*sync_single_for_device)(struct device *dev,
+				       dma_addr_t dma_handle, size_t size,
+				       enum dma_data_direction direction);
+	void (*sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg,
+				int nelems,
+				enum dma_data_direction direction);
+	void (*sync_sg_for_device)(struct device *dev,
+				   struct scatterlist *sg, int nents,
+				   enum dma_data_direction dir);
+};
+extern const struct dma_ops *dma_ops;
+
+static inline void *dma_alloc_coherent(struct device *dev, size_t size,
+				       dma_addr_t *dma_handle, gfp_t flag)
+{
+	return dma_ops->alloc_coherent(dev, size, dma_handle, flag);
+}
+
+static inline void dma_free_coherent(struct device *dev, size_t size,
+				     void *cpu_addr, dma_addr_t dma_handle)
+{
+	dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
+}
+
+static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
+					size_t size,
+					enum dma_data_direction direction)
+{
+	return dma_ops->map_page(dev, virt_to_page(cpu_addr),
+				 (unsigned long)cpu_addr & ~PAGE_MASK, size,
+				 direction);
+}
+
+static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
+				    size_t size,
+				    enum dma_data_direction direction)
+{
+	dma_ops->unmap_page(dev, dma_addr, size, direction);
+}
+
+static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
+				      unsigned long offset, size_t size,
+				      enum dma_data_direction direction)
+{
+	return dma_ops->map_page(dev, page, offset, size, direction);
+}
+
+static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
+				  size_t size,
+				  enum dma_data_direction direction)
+{
+	dma_ops->unmap_page(dev, dma_address, size, direction);
+}
+
+static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
+			     int nents, enum dma_data_direction direction)
+{
+	return dma_ops->map_sg(dev, sg, nents, direction);
+}
+
+static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
+				int nents, enum dma_data_direction direction)
+{
+	dma_ops->unmap_sg(dev, sg, nents, direction);
+}
+
+static inline void dma_sync_single_for_cpu(struct device *dev,
+					   dma_addr_t dma_handle, size_t size,
+					   enum dma_data_direction direction)
+{
+	dma_ops->sync_single_for_cpu(dev, dma_handle, size, direction);
+}
+
+static inline void dma_sync_single_for_device(struct device *dev,
+					      dma_addr_t dma_handle,
+					      size_t size,
+					      enum dma_data_direction direction)
+{
+	if (dma_ops->sync_single_for_device)
+		dma_ops->sync_single_for_device(dev, dma_handle, size,
+						direction);
+}
+
+static inline void dma_sync_sg_for_cpu(struct device *dev,
+				       struct scatterlist *sg, int nelems,
+				       enum dma_data_direction direction)
+{
+	dma_ops->sync_sg_for_cpu(dev, sg, nelems, direction);
+}
+
+static inline void dma_sync_sg_for_device(struct device *dev,
+					  struct scatterlist *sg, int nelems,
+					  enum dma_data_direction direction)
+{
+	if (dma_ops->sync_sg_for_device)
+		dma_ops->sync_sg_for_device(dev, sg, nelems, direction);
+}
+
+static inline void dma_sync_single_range_for_cpu(struct device *dev,
+						 dma_addr_t dma_handle,
+						 unsigned long offset,
+						 size_t size,
+						 enum dma_data_direction dir)
+{
+	dma_sync_single_for_cpu(dev, dma_handle+offset, size, dir);
+}
+
+static inline void dma_sync_single_range_for_device(struct device *dev,
+						    dma_addr_t dma_handle,
+						    unsigned long offset,
+						    size_t size,
+						    enum dma_data_direction dir)
+{
+	dma_sync_single_for_device(dev, dma_handle+offset, size, dir);
+}
+
+
+static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+	return (dma_addr == DMA_ERROR_CODE);
+}
+
+static inline int dma_get_cache_alignment(void)
+{
+	/*
+	 * no easy way to get cache size on all processors, so return
+	 * the maximum possible, to be safe
+	 */
+	return (1 << INTERNODE_CACHE_SHIFT);
+}
+
 #endif
diff --git a/arch/sparc/include/asm/dma-mapping_32.h b/arch/sparc/include/asm/dma-mapping_32.h
deleted file mode 100644
index 8a57ea0..0000000
--- a/arch/sparc/include/asm/dma-mapping_32.h
+++ /dev/null
@@ -1,60 +0,0 @@
-#ifndef _ASM_SPARC_DMA_MAPPING_H
-#define _ASM_SPARC_DMA_MAPPING_H
-
-#include <linux/types.h>
-
-struct device;
-struct scatterlist;
-struct page;
-
-#define DMA_ERROR_CODE	(~(dma_addr_t)0x0)
-
-extern int dma_supported(struct device *dev, u64 mask);
-extern int dma_set_mask(struct device *dev, u64 dma_mask);
-extern void *dma_alloc_coherent(struct device *dev, size_t size,
-				dma_addr_t *dma_handle, gfp_t flag);
-extern void dma_free_coherent(struct device *dev, size_t size,
-			      void *cpu_addr, dma_addr_t dma_handle);
-extern dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
-				 size_t size,
-				 enum dma_data_direction direction);
-extern void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
-			     size_t size,
-			     enum dma_data_direction direction);
-extern dma_addr_t dma_map_page(struct device *dev, struct page *page,
-			       unsigned long offset, size_t size,
-			       enum dma_data_direction direction);
-extern void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
-			   size_t size, enum dma_data_direction direction);
-extern int dma_map_sg(struct device *dev, struct scatterlist *sg,
-		      int nents, enum dma_data_direction direction);
-extern void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
-			 int nents, enum dma_data_direction direction);
-extern void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
-				    size_t size,
-				    enum dma_data_direction direction);
-extern void dma_sync_single_for_device(struct device *dev,
-				       dma_addr_t dma_handle,
-				       size_t size,
-				       enum dma_data_direction direction);
-extern void dma_sync_single_range_for_cpu(struct device *dev,
-					  dma_addr_t dma_handle,
-					  unsigned long offset,
-					  size_t size,
-					  enum dma_data_direction direction);
-extern void dma_sync_single_range_for_device(struct device *dev,
-					     dma_addr_t dma_handle,
-					     unsigned long offset, size_t size,
-					     enum dma_data_direction direction);
-extern void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
-				int nelems, enum dma_data_direction direction);
-extern void dma_sync_sg_for_device(struct device *dev,
-				   struct scatterlist *sg, int nelems,
-				   enum dma_data_direction direction);
-extern int dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
-extern int dma_get_cache_alignment(void);
-
-#define dma_alloc_noncoherent	dma_alloc_coherent
-#define dma_free_noncoherent	dma_free_coherent
-
-#endif /* _ASM_SPARC_DMA_MAPPING_H */
diff --git a/arch/sparc/include/asm/dma-mapping_64.h b/arch/sparc/include/asm/dma-mapping_64.h
deleted file mode 100644
index bfa64f9..0000000
--- a/arch/sparc/include/asm/dma-mapping_64.h
+++ /dev/null
@@ -1,154 +0,0 @@
-#ifndef _ASM_SPARC64_DMA_MAPPING_H
-#define _ASM_SPARC64_DMA_MAPPING_H
-
-#include <linux/scatterlist.h>
-#include <linux/mm.h>
-
-#define DMA_ERROR_CODE	(~(dma_addr_t)0x0)
-
-struct dma_ops {
-	void *(*alloc_coherent)(struct device *dev, size_t size,
-				dma_addr_t *dma_handle, gfp_t flag);
-	void (*free_coherent)(struct device *dev, size_t size,
-			      void *cpu_addr, dma_addr_t dma_handle);
-	dma_addr_t (*map_single)(struct device *dev, void *cpu_addr,
-				 size_t size,
-				 enum dma_data_direction direction);
-	void (*unmap_single)(struct device *dev, dma_addr_t dma_addr,
-			     size_t size,
-			     enum dma_data_direction direction);
-	int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents,
-		      enum dma_data_direction direction);
-	void (*unmap_sg)(struct device *dev, struct scatterlist *sg,
-			 int nhwentries,
-			 enum dma_data_direction direction);
-	void (*sync_single_for_cpu)(struct device *dev,
-				    dma_addr_t dma_handle, size_t size,
-				    enum dma_data_direction direction);
-	void (*sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg,
-				int nelems,
-				enum dma_data_direction direction);
-};
-extern const struct dma_ops *dma_ops;
-
-extern int dma_supported(struct device *dev, u64 mask);
-extern int dma_set_mask(struct device *dev, u64 dma_mask);
-
-static inline void *dma_alloc_coherent(struct device *dev, size_t size,
-				       dma_addr_t *dma_handle, gfp_t flag)
-{
-	return dma_ops->alloc_coherent(dev, size, dma_handle, flag);
-}
-
-static inline void dma_free_coherent(struct device *dev, size_t size,
-				     void *cpu_addr, dma_addr_t dma_handle)
-{
-	dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
-}
-
-static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
-					size_t size,
-					enum dma_data_direction direction)
-{
-	return dma_ops->map_single(dev, cpu_addr, size, direction);
-}
-
-static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
-				    size_t size,
-				    enum dma_data_direction direction)
-{
-	dma_ops->unmap_single(dev, dma_addr, size, direction);
-}
-
-static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
-				      unsigned long offset, size_t size,
-				      enum dma_data_direction direction)
-{
-	return dma_ops->map_single(dev, page_address(page) + offset,
-				   size, direction);
-}
-
-static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
-				  size_t size,
-				  enum dma_data_direction direction)
-{
-	dma_ops->unmap_single(dev, dma_address, size, direction);
-}
-
-static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
-			     int nents, enum dma_data_direction direction)
-{
-	return dma_ops->map_sg(dev, sg, nents, direction);
-}
-
-static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
-				int nents, enum dma_data_direction direction)
-{
-	dma_ops->unmap_sg(dev, sg, nents, direction);
-}
-
-static inline void dma_sync_single_for_cpu(struct device *dev,
-					   dma_addr_t dma_handle, size_t size,
-					   enum dma_data_direction direction)
-{
-	dma_ops->sync_single_for_cpu(dev, dma_handle, size, direction);
-}
-
-static inline void dma_sync_single_for_device(struct device *dev,
-					      dma_addr_t dma_handle,
-					      size_t size,
-					      enum dma_data_direction direction)
-{
-	/* No flushing needed to sync cpu writes to the device.  */
-}
-
-static inline void dma_sync_single_range_for_cpu(struct device *dev,
-						 dma_addr_t dma_handle,
-						 unsigned long offset,
-						 size_t size,
-						 enum dma_data_direction direction)
-{
-	dma_sync_single_for_cpu(dev, dma_handle+offset, size, direction);
-}
-
-static inline void dma_sync_single_range_for_device(struct device *dev,
-						    dma_addr_t dma_handle,
-						    unsigned long offset,
-						    size_t size,
-						    enum dma_data_direction direction)
-{
-	/* No flushing needed to sync cpu writes to the device.  */
-}
-
-
-static inline void dma_sync_sg_for_cpu(struct device *dev,
-				       struct scatterlist *sg, int nelems,
-				       enum dma_data_direction direction)
-{
-	dma_ops->sync_sg_for_cpu(dev, sg, nelems, direction);
-}
-
-static inline void dma_sync_sg_for_device(struct device *dev,
-					  struct scatterlist *sg, int nelems,
-					  enum dma_data_direction direction)
-{
-	/* No flushing needed to sync cpu writes to the device.  */
-}
-
-static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
-	return (dma_addr == DMA_ERROR_CODE);
-}
-
-static inline int dma_get_cache_alignment(void)
-{
-	/* no easy way to get cache size on all processors, so return
-	 * the maximum possible, to be safe */
-	return (1 << INTERNODE_CACHE_SHIFT);
-}
-
-#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
-#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
-#define dma_is_consistent(d, h)	(1)
-
-#endif /* _ASM_SPARC64_DMA_MAPPING_H */
diff --git a/arch/sparc/include/asm/ftrace.h b/arch/sparc/include/asm/ftrace.h
index d27716c..b0f18e9 100644
--- a/arch/sparc/include/asm/ftrace.h
+++ b/arch/sparc/include/asm/ftrace.h
@@ -11,4 +11,15 @@
 
 #endif
 
+#ifdef CONFIG_DYNAMIC_FTRACE
+/* reloction of mcount call site is the same as the address */
+static inline unsigned long ftrace_call_adjust(unsigned long addr)
+{
+	return addr;
+}
+
+struct dyn_arch_ftrace {
+};
+#endif /*  CONFIG_DYNAMIC_FTRACE */
+
 #endif /* _ASM_SPARC64_FTRACE */
diff --git a/arch/sparc/include/asm/mdesc.h b/arch/sparc/include/asm/mdesc.h
index 1acc727..9faa046 100644
--- a/arch/sparc/include/asm/mdesc.h
+++ b/arch/sparc/include/asm/mdesc.h
@@ -71,7 +71,8 @@
 
 extern void mdesc_register_notifier(struct mdesc_notifier_client *client);
 
-extern void mdesc_fill_in_cpu_data(cpumask_t mask);
+extern void mdesc_fill_in_cpu_data(cpumask_t *mask);
+extern void mdesc_populate_present_mask(cpumask_t *mask);
 
 extern void sun4v_mdesc_init(void);
 
diff --git a/arch/sparc/include/asm/percpu_64.h b/arch/sparc/include/asm/percpu_64.h
index bee6459..007aafb 100644
--- a/arch/sparc/include/asm/percpu_64.h
+++ b/arch/sparc/include/asm/percpu_64.h
@@ -7,20 +7,16 @@
 
 #ifdef CONFIG_SMP
 
-extern void real_setup_per_cpu_areas(void);
+#include <asm/trap_block.h>
 
-extern unsigned long __per_cpu_base;
-extern unsigned long __per_cpu_shift;
 #define __per_cpu_offset(__cpu) \
-	(__per_cpu_base + ((unsigned long)(__cpu) << __per_cpu_shift))
+	(trap_block[(__cpu)].__per_cpu_base)
 #define per_cpu_offset(x) (__per_cpu_offset(x))
 
 #define __my_cpu_offset __local_per_cpu_offset
 
 #else /* ! SMP */
 
-#define real_setup_per_cpu_areas()		do { } while (0)
-
 #endif	/* SMP */
 
 #include <asm-generic/percpu.h>
diff --git a/arch/sparc/include/asm/prom.h b/arch/sparc/include/asm/prom.h
index 900d447..be8d7aa 100644
--- a/arch/sparc/include/asm/prom.h
+++ b/arch/sparc/include/asm/prom.h
@@ -86,6 +86,8 @@
 #endif
 
 extern void prom_build_devicetree(void);
+extern void of_populate_present_mask(void);
+extern void of_fill_in_cpu_data(void);
 
 /* Dummy ref counting routines - to be implemented later */
 static inline struct device_node *of_node_get(struct device_node *node)
diff --git a/arch/sparc/include/asm/trap_block.h b/arch/sparc/include/asm/trap_block.h
new file mode 100644
index 0000000..7e26b2d
--- /dev/null
+++ b/arch/sparc/include/asm/trap_block.h
@@ -0,0 +1,207 @@
+#ifndef _SPARC_TRAP_BLOCK_H
+#define _SPARC_TRAP_BLOCK_H
+
+#include <asm/hypervisor.h>
+#include <asm/asi.h>
+
+#ifndef __ASSEMBLY__
+
+/* Trap handling code needs to get at a few critical values upon
+ * trap entry and to process TSB misses.  These cannot be in the
+ * per_cpu() area as we really need to lock them into the TLB and
+ * thus make them part of the main kernel image.  As a result we
+ * try to make this as small as possible.
+ *
+ * This is padded out and aligned to 64-bytes to avoid false sharing
+ * on SMP.
+ */
+
+/* If you modify the size of this structure, please update
+ * TRAP_BLOCK_SZ_SHIFT below.
+ */
+struct thread_info;
+struct trap_per_cpu {
+/* D-cache line 1: Basic thread information, cpu and device mondo queues */
+	struct thread_info	*thread;
+	unsigned long		pgd_paddr;
+	unsigned long		cpu_mondo_pa;
+	unsigned long		dev_mondo_pa;
+
+/* D-cache line 2: Error Mondo Queue and kernel buffer pointers */
+	unsigned long		resum_mondo_pa;
+	unsigned long		resum_kernel_buf_pa;
+	unsigned long		nonresum_mondo_pa;
+	unsigned long		nonresum_kernel_buf_pa;
+
+/* Dcache lines 3, 4, 5, and 6: Hypervisor Fault Status */
+	struct hv_fault_status	fault_info;
+
+/* Dcache line 7: Physical addresses of CPU send mondo block and CPU list.  */
+	unsigned long		cpu_mondo_block_pa;
+	unsigned long		cpu_list_pa;
+	unsigned long		tsb_huge;
+	unsigned long		tsb_huge_temp;
+
+/* Dcache line 8: IRQ work list, and keep trap_block a power-of-2 in size.  */
+	unsigned long		irq_worklist_pa;
+	unsigned int		cpu_mondo_qmask;
+	unsigned int		dev_mondo_qmask;
+	unsigned int		resum_qmask;
+	unsigned int		nonresum_qmask;
+	unsigned long		__per_cpu_base;
+} __attribute__((aligned(64)));
+extern struct trap_per_cpu trap_block[NR_CPUS];
+extern void init_cur_cpu_trap(struct thread_info *);
+extern void setup_tba(void);
+extern int ncpus_probed;
+
+extern unsigned long real_hard_smp_processor_id(void);
+
+struct cpuid_patch_entry {
+	unsigned int	addr;
+	unsigned int	cheetah_safari[4];
+	unsigned int	cheetah_jbus[4];
+	unsigned int	starfire[4];
+	unsigned int	sun4v[4];
+};
+extern struct cpuid_patch_entry __cpuid_patch, __cpuid_patch_end;
+
+struct sun4v_1insn_patch_entry {
+	unsigned int	addr;
+	unsigned int	insn;
+};
+extern struct sun4v_1insn_patch_entry __sun4v_1insn_patch,
+	__sun4v_1insn_patch_end;
+
+struct sun4v_2insn_patch_entry {
+	unsigned int	addr;
+	unsigned int	insns[2];
+};
+extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch,
+	__sun4v_2insn_patch_end;
+
+
+#endif /* !(__ASSEMBLY__) */
+
+#define TRAP_PER_CPU_THREAD		0x00
+#define TRAP_PER_CPU_PGD_PADDR		0x08
+#define TRAP_PER_CPU_CPU_MONDO_PA	0x10
+#define TRAP_PER_CPU_DEV_MONDO_PA	0x18
+#define TRAP_PER_CPU_RESUM_MONDO_PA	0x20
+#define TRAP_PER_CPU_RESUM_KBUF_PA	0x28
+#define TRAP_PER_CPU_NONRESUM_MONDO_PA	0x30
+#define TRAP_PER_CPU_NONRESUM_KBUF_PA	0x38
+#define TRAP_PER_CPU_FAULT_INFO		0x40
+#define TRAP_PER_CPU_CPU_MONDO_BLOCK_PA	0xc0
+#define TRAP_PER_CPU_CPU_LIST_PA	0xc8
+#define TRAP_PER_CPU_TSB_HUGE		0xd0
+#define TRAP_PER_CPU_TSB_HUGE_TEMP	0xd8
+#define TRAP_PER_CPU_IRQ_WORKLIST_PA	0xe0
+#define TRAP_PER_CPU_CPU_MONDO_QMASK	0xe8
+#define TRAP_PER_CPU_DEV_MONDO_QMASK	0xec
+#define TRAP_PER_CPU_RESUM_QMASK	0xf0
+#define TRAP_PER_CPU_NONRESUM_QMASK	0xf4
+#define TRAP_PER_CPU_PER_CPU_BASE	0xf8
+
+#define TRAP_BLOCK_SZ_SHIFT		8
+
+#include <asm/scratchpad.h>
+
+#define __GET_CPUID(REG)				\
+	/* Spitfire implementation (default). */	\
+661:	ldxa		[%g0] ASI_UPA_CONFIG, REG;	\
+	srlx		REG, 17, REG;			\
+	 and		REG, 0x1f, REG;			\
+	nop;						\
+	.section	.cpuid_patch, "ax";		\
+	/* Instruction location. */			\
+	.word		661b;				\
+	/* Cheetah Safari implementation. */		\
+	ldxa		[%g0] ASI_SAFARI_CONFIG, REG;	\
+	srlx		REG, 17, REG;			\
+	and		REG, 0x3ff, REG;		\
+	nop;						\
+	/* Cheetah JBUS implementation. */		\
+	ldxa		[%g0] ASI_JBUS_CONFIG, REG;	\
+	srlx		REG, 17, REG;			\
+	and		REG, 0x1f, REG;			\
+	nop;						\
+	/* Starfire implementation. */			\
+	sethi		%hi(0x1fff40000d0 >> 9), REG;	\
+	sllx		REG, 9, REG;			\
+	or		REG, 0xd0, REG;			\
+	lduwa		[REG] ASI_PHYS_BYPASS_EC_E, REG;\
+	/* sun4v implementation. */			\
+	mov		SCRATCHPAD_CPUID, REG;		\
+	ldxa		[REG] ASI_SCRATCHPAD, REG;	\
+	nop;						\
+	nop;						\
+	.previous;
+
+#ifdef CONFIG_SMP
+
+#define TRAP_LOAD_TRAP_BLOCK(DEST, TMP)		\
+	__GET_CPUID(TMP)			\
+	sethi	%hi(trap_block), DEST;		\
+	sllx	TMP, TRAP_BLOCK_SZ_SHIFT, TMP;	\
+	or	DEST, %lo(trap_block), DEST;	\
+	add	DEST, TMP, DEST;		\
+
+/* Clobbers TMP, current address space PGD phys address into DEST.  */
+#define TRAP_LOAD_PGD_PHYS(DEST, TMP)		\
+	TRAP_LOAD_TRAP_BLOCK(DEST, TMP)		\
+	ldx	[DEST + TRAP_PER_CPU_PGD_PADDR], DEST;
+
+/* Clobbers TMP, loads local processor's IRQ work area into DEST.  */
+#define TRAP_LOAD_IRQ_WORK_PA(DEST, TMP)	\
+	TRAP_LOAD_TRAP_BLOCK(DEST, TMP)		\
+	add	DEST, TRAP_PER_CPU_IRQ_WORKLIST_PA, DEST;
+
+/* Clobbers TMP, loads DEST with current thread info pointer.  */
+#define TRAP_LOAD_THREAD_REG(DEST, TMP)		\
+	TRAP_LOAD_TRAP_BLOCK(DEST, TMP)		\
+	ldx	[DEST + TRAP_PER_CPU_THREAD], DEST;
+
+/* Given the current thread info pointer in THR, load the per-cpu
+ * area base of the current processor into DEST.  REG1, REG2, and REG3 are
+ * clobbered.
+ *
+ * You absolutely cannot use DEST as a temporary in this code.  The
+ * reason is that traps can happen during execution, and return from
+ * trap will load the fully resolved DEST per-cpu base.  This can corrupt
+ * the calculations done by the macro mid-stream.
+ */
+#define LOAD_PER_CPU_BASE(DEST, THR, REG1, REG2, REG3)	\
+	lduh	[THR + TI_CPU], REG1;			\
+	sethi	%hi(trap_block), REG2;			\
+	sllx	REG1, TRAP_BLOCK_SZ_SHIFT, REG1;	\
+	or	REG2, %lo(trap_block), REG2;		\
+	add	REG2, REG1, REG2;			\
+	ldx	[REG2 + TRAP_PER_CPU_PER_CPU_BASE], DEST;
+
+#else
+
+#define TRAP_LOAD_TRAP_BLOCK(DEST, TMP)		\
+	sethi	%hi(trap_block), DEST;		\
+	or	DEST, %lo(trap_block), DEST;	\
+
+/* Uniprocessor versions, we know the cpuid is zero.  */
+#define TRAP_LOAD_PGD_PHYS(DEST, TMP)		\
+	TRAP_LOAD_TRAP_BLOCK(DEST, TMP)		\
+	ldx	[DEST + TRAP_PER_CPU_PGD_PADDR], DEST;
+
+/* Clobbers TMP, loads local processor's IRQ work area into DEST.  */
+#define TRAP_LOAD_IRQ_WORK_PA(DEST, TMP)	\
+	TRAP_LOAD_TRAP_BLOCK(DEST, TMP)		\
+	add	DEST, TRAP_PER_CPU_IRQ_WORKLIST_PA, DEST;
+
+#define TRAP_LOAD_THREAD_REG(DEST, TMP)		\
+	TRAP_LOAD_TRAP_BLOCK(DEST, TMP)		\
+	ldx	[DEST + TRAP_PER_CPU_THREAD], DEST;
+
+/* No per-cpu areas on uniprocessor, so no need to load DEST.  */
+#define LOAD_PER_CPU_BASE(DEST, THR, REG1, REG2, REG3)
+
+#endif /* !(CONFIG_SMP) */
+
+#endif /* _SPARC_TRAP_BLOCK_H */
diff --git a/arch/sparc/include/asm/unistd.h b/arch/sparc/include/asm/unistd.h
index b8eb71e..b2c406d 100644
--- a/arch/sparc/include/asm/unistd.h
+++ b/arch/sparc/include/asm/unistd.h
@@ -394,8 +394,9 @@
 #define __NR_accept4		323
 #define __NR_preadv		324
 #define __NR_pwritev		325
+#define __NR_rt_tgsigqueueinfo	326
 
-#define NR_SYSCALLS		326
+#define NR_SYSCALLS		327
 
 #ifdef __32bit_syscall_numbers__
 /* Sparc 32-bit only has the "setresuid32", "getresuid32" variants,
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
index 54742e5..475ce46 100644
--- a/arch/sparc/kernel/Makefile
+++ b/arch/sparc/kernel/Makefile
@@ -37,6 +37,7 @@
 obj-$(CONFIG_SPARC32)   += muldiv.o
 obj-y                   += prom_common.o
 obj-y                   += prom_$(BITS).o
+obj-y                   += of_device_common.o
 obj-y                   += of_device_$(BITS).o
 obj-$(CONFIG_SPARC64)   += prom_irqtrans.o
 
@@ -54,6 +55,7 @@
 obj-$(CONFIG_SPARC64)   += mdesc.o
 obj-$(CONFIG_SPARC64)	+= pcr.o
 obj-$(CONFIG_SPARC64)	+= nmi.o
+obj-$(CONFIG_SPARC64_SMP) += cpumap.o
 
 # sparc32 do not use GENERIC_HARDIRQS but uses the generic devres implementation
 obj-$(CONFIG_SPARC32)     += devres.o
diff --git a/arch/sparc/kernel/cpumap.c b/arch/sparc/kernel/cpumap.c
new file mode 100644
index 0000000..7430ed0
--- /dev/null
+++ b/arch/sparc/kernel/cpumap.c
@@ -0,0 +1,431 @@
+/* cpumap.c: used for optimizing CPU assignment
+ *
+ * Copyright (C) 2009 Hong H. Pham <hong.pham@windriver.com>
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/cpumask.h>
+#include <linux/spinlock.h>
+#include <asm/cpudata.h>
+#include "cpumap.h"
+
+
+enum {
+	CPUINFO_LVL_ROOT = 0,
+	CPUINFO_LVL_NODE,
+	CPUINFO_LVL_CORE,
+	CPUINFO_LVL_PROC,
+	CPUINFO_LVL_MAX,
+};
+
+enum {
+	ROVER_NO_OP              = 0,
+	/* Increment rover every time level is visited */
+	ROVER_INC_ON_VISIT       = 1 << 0,
+	/* Increment parent's rover every time rover wraps around */
+	ROVER_INC_PARENT_ON_LOOP = 1 << 1,
+};
+
+struct cpuinfo_node {
+	int id;
+	int level;
+	int num_cpus;    /* Number of CPUs in this hierarchy */
+	int parent_index;
+	int child_start; /* Array index of the first child node */
+	int child_end;   /* Array index of the last child node */
+	int rover;       /* Child node iterator */
+};
+
+struct cpuinfo_level {
+	int start_index; /* Index of first node of a level in a cpuinfo tree */
+	int end_index;   /* Index of last node of a level in a cpuinfo tree */
+	int num_nodes;   /* Number of nodes in a level in a cpuinfo tree */
+};
+
+struct cpuinfo_tree {
+	int total_nodes;
+
+	/* Offsets into nodes[] for each level of the tree */
+	struct cpuinfo_level level[CPUINFO_LVL_MAX];
+	struct cpuinfo_node  nodes[0];
+};
+
+
+static struct cpuinfo_tree *cpuinfo_tree;
+
+static u16 cpu_distribution_map[NR_CPUS];
+static DEFINE_SPINLOCK(cpu_map_lock);
+
+
+/* Niagara optimized cpuinfo tree traversal. */
+static const int niagara_iterate_method[] = {
+	[CPUINFO_LVL_ROOT] = ROVER_NO_OP,
+
+	/* Strands (or virtual CPUs) within a core may not run concurrently
+	 * on the Niagara, as instruction pipeline(s) are shared.  Distribute
+	 * work to strands in different cores first for better concurrency.
+	 * Go to next NUMA node when all cores are used.
+	 */
+	[CPUINFO_LVL_NODE] = ROVER_INC_ON_VISIT|ROVER_INC_PARENT_ON_LOOP,
+
+	/* Strands are grouped together by proc_id in cpuinfo_sparc, i.e.
+	 * a proc_id represents an instruction pipeline.  Distribute work to
+	 * strands in different proc_id groups if the core has multiple
+	 * instruction pipelines (e.g. the Niagara 2/2+ has two).
+	 */
+	[CPUINFO_LVL_CORE] = ROVER_INC_ON_VISIT,
+
+	/* Pick the next strand in the proc_id group. */
+	[CPUINFO_LVL_PROC] = ROVER_INC_ON_VISIT,
+};
+
+/* Generic cpuinfo tree traversal.  Distribute work round robin across NUMA
+ * nodes.
+ */
+static const int generic_iterate_method[] = {
+	[CPUINFO_LVL_ROOT] = ROVER_INC_ON_VISIT,
+	[CPUINFO_LVL_NODE] = ROVER_NO_OP,
+	[CPUINFO_LVL_CORE] = ROVER_INC_PARENT_ON_LOOP,
+	[CPUINFO_LVL_PROC] = ROVER_INC_ON_VISIT|ROVER_INC_PARENT_ON_LOOP,
+};
+
+
+static int cpuinfo_id(int cpu, int level)
+{
+	int id;
+
+	switch (level) {
+	case CPUINFO_LVL_ROOT:
+		id = 0;
+		break;
+	case CPUINFO_LVL_NODE:
+		id = cpu_to_node(cpu);
+		break;
+	case CPUINFO_LVL_CORE:
+		id = cpu_data(cpu).core_id;
+		break;
+	case CPUINFO_LVL_PROC:
+		id = cpu_data(cpu).proc_id;
+		break;
+	default:
+		id = -EINVAL;
+	}
+	return id;
+}
+
+/*
+ * Enumerate the CPU information in __cpu_data to determine the start index,
+ * end index, and number of nodes for each level in the cpuinfo tree.  The
+ * total number of cpuinfo nodes required to build the tree is returned.
+ */
+static int enumerate_cpuinfo_nodes(struct cpuinfo_level *tree_level)
+{
+	int prev_id[CPUINFO_LVL_MAX];
+	int i, n, num_nodes;
+
+	for (i = CPUINFO_LVL_ROOT; i < CPUINFO_LVL_MAX; i++) {
+		struct cpuinfo_level *lv = &tree_level[i];
+
+		prev_id[i] = -1;
+		lv->start_index = lv->end_index = lv->num_nodes = 0;
+	}
+
+	num_nodes = 1; /* Include the root node */
+
+	for (i = 0; i < num_possible_cpus(); i++) {
+		if (!cpu_online(i))
+			continue;
+
+		n = cpuinfo_id(i, CPUINFO_LVL_NODE);
+		if (n > prev_id[CPUINFO_LVL_NODE]) {
+			tree_level[CPUINFO_LVL_NODE].num_nodes++;
+			prev_id[CPUINFO_LVL_NODE] = n;
+			num_nodes++;
+		}
+		n = cpuinfo_id(i, CPUINFO_LVL_CORE);
+		if (n > prev_id[CPUINFO_LVL_CORE]) {
+			tree_level[CPUINFO_LVL_CORE].num_nodes++;
+			prev_id[CPUINFO_LVL_CORE] = n;
+			num_nodes++;
+		}
+		n = cpuinfo_id(i, CPUINFO_LVL_PROC);
+		if (n > prev_id[CPUINFO_LVL_PROC]) {
+			tree_level[CPUINFO_LVL_PROC].num_nodes++;
+			prev_id[CPUINFO_LVL_PROC] = n;
+			num_nodes++;
+		}
+	}
+
+	tree_level[CPUINFO_LVL_ROOT].num_nodes = 1;
+
+	n = tree_level[CPUINFO_LVL_NODE].num_nodes;
+	tree_level[CPUINFO_LVL_NODE].start_index = 1;
+	tree_level[CPUINFO_LVL_NODE].end_index   = n;
+
+	n++;
+	tree_level[CPUINFO_LVL_CORE].start_index = n;
+	n += tree_level[CPUINFO_LVL_CORE].num_nodes;
+	tree_level[CPUINFO_LVL_CORE].end_index   = n - 1;
+
+	tree_level[CPUINFO_LVL_PROC].start_index = n;
+	n += tree_level[CPUINFO_LVL_PROC].num_nodes;
+	tree_level[CPUINFO_LVL_PROC].end_index   = n - 1;
+
+	return num_nodes;
+}
+
+/* Build a tree representation of the CPU hierarchy using the per CPU
+ * information in __cpu_data.  Entries in __cpu_data[0..NR_CPUS] are
+ * assumed to be sorted in ascending order based on node, core_id, and
+ * proc_id (in order of significance).
+ */
+static struct cpuinfo_tree *build_cpuinfo_tree(void)
+{
+	struct cpuinfo_tree *new_tree;
+	struct cpuinfo_node *node;
+	struct cpuinfo_level tmp_level[CPUINFO_LVL_MAX];
+	int num_cpus[CPUINFO_LVL_MAX];
+	int level_rover[CPUINFO_LVL_MAX];
+	int prev_id[CPUINFO_LVL_MAX];
+	int n, id, cpu, prev_cpu, last_cpu, level;
+
+	n = enumerate_cpuinfo_nodes(tmp_level);
+
+	new_tree = kzalloc(sizeof(struct cpuinfo_tree) +
+	                   (sizeof(struct cpuinfo_node) * n), GFP_ATOMIC);
+	if (!new_tree)
+		return NULL;
+
+	new_tree->total_nodes = n;
+	memcpy(&new_tree->level, tmp_level, sizeof(tmp_level));
+
+	prev_cpu = cpu = first_cpu(cpu_online_map);
+
+	/* Initialize all levels in the tree with the first CPU */
+	for (level = CPUINFO_LVL_PROC; level >= CPUINFO_LVL_ROOT; level--) {
+		n = new_tree->level[level].start_index;
+
+		level_rover[level] = n;
+		node = &new_tree->nodes[n];
+
+		id = cpuinfo_id(cpu, level);
+		if (unlikely(id < 0)) {
+			kfree(new_tree);
+			return NULL;
+		}
+		node->id = id;
+		node->level = level;
+		node->num_cpus = 1;
+
+		node->parent_index = (level > CPUINFO_LVL_ROOT)
+		    ? new_tree->level[level - 1].start_index : -1;
+
+		node->child_start = node->child_end = node->rover =
+		    (level == CPUINFO_LVL_PROC)
+		    ? cpu : new_tree->level[level + 1].start_index;
+
+		prev_id[level] = node->id;
+		num_cpus[level] = 1;
+	}
+
+	for (last_cpu = (num_possible_cpus() - 1); last_cpu >= 0; last_cpu--) {
+		if (cpu_online(last_cpu))
+			break;
+	}
+
+	while (++cpu <= last_cpu) {
+		if (!cpu_online(cpu))
+			continue;
+
+		for (level = CPUINFO_LVL_PROC; level >= CPUINFO_LVL_ROOT;
+		     level--) {
+			id = cpuinfo_id(cpu, level);
+			if (unlikely(id < 0)) {
+				kfree(new_tree);
+				return NULL;
+			}
+
+			if ((id != prev_id[level]) || (cpu == last_cpu)) {
+				prev_id[level] = id;
+				node = &new_tree->nodes[level_rover[level]];
+				node->num_cpus = num_cpus[level];
+				num_cpus[level] = 1;
+
+				if (cpu == last_cpu)
+					node->num_cpus++;
+
+				/* Connect tree node to parent */
+				if (level == CPUINFO_LVL_ROOT)
+					node->parent_index = -1;
+				else
+					node->parent_index =
+					    level_rover[level - 1];
+
+				if (level == CPUINFO_LVL_PROC) {
+					node->child_end =
+					    (cpu == last_cpu) ? cpu : prev_cpu;
+				} else {
+					node->child_end =
+					    level_rover[level + 1] - 1;
+				}
+
+				/* Initialize the next node in the same level */
+				n = ++level_rover[level];
+				if (n <= new_tree->level[level].end_index) {
+					node = &new_tree->nodes[n];
+					node->id = id;
+					node->level = level;
+
+					/* Connect node to child */
+					node->child_start = node->child_end =
+					node->rover =
+					    (level == CPUINFO_LVL_PROC)
+					    ? cpu : level_rover[level + 1];
+				}
+			} else
+				num_cpus[level]++;
+		}
+		prev_cpu = cpu;
+	}
+
+	return new_tree;
+}
+
+static void increment_rover(struct cpuinfo_tree *t, int node_index,
+                            int root_index, const int *rover_inc_table)
+{
+	struct cpuinfo_node *node = &t->nodes[node_index];
+	int top_level, level;
+
+	top_level = t->nodes[root_index].level;
+	for (level = node->level; level >= top_level; level--) {
+		node->rover++;
+		if (node->rover <= node->child_end)
+			return;
+
+		node->rover = node->child_start;
+		/* If parent's rover does not need to be adjusted, stop here. */
+		if ((level == top_level) ||
+		    !(rover_inc_table[level] & ROVER_INC_PARENT_ON_LOOP))
+			return;
+
+		node = &t->nodes[node->parent_index];
+	}
+}
+
+static int iterate_cpu(struct cpuinfo_tree *t, unsigned int root_index)
+{
+	const int *rover_inc_table;
+	int level, new_index, index = root_index;
+
+	switch (sun4v_chip_type) {
+	case SUN4V_CHIP_NIAGARA1:
+	case SUN4V_CHIP_NIAGARA2:
+		rover_inc_table = niagara_iterate_method;
+		break;
+	default:
+		rover_inc_table = generic_iterate_method;
+	}
+
+	for (level = t->nodes[root_index].level; level < CPUINFO_LVL_MAX;
+	     level++) {
+		new_index = t->nodes[index].rover;
+		if (rover_inc_table[level] & ROVER_INC_ON_VISIT)
+			increment_rover(t, index, root_index, rover_inc_table);
+
+		index = new_index;
+	}
+	return index;
+}
+
+static void _cpu_map_rebuild(void)
+{
+	int i;
+
+	if (cpuinfo_tree) {
+		kfree(cpuinfo_tree);
+		cpuinfo_tree = NULL;
+	}
+
+	cpuinfo_tree = build_cpuinfo_tree();
+	if (!cpuinfo_tree)
+		return;
+
+	/* Build CPU distribution map that spans all online CPUs.  No need
+	 * to check if the CPU is online, as that is done when the cpuinfo
+	 * tree is being built.
+	 */
+	for (i = 0; i < cpuinfo_tree->nodes[0].num_cpus; i++)
+		cpu_distribution_map[i] = iterate_cpu(cpuinfo_tree, 0);
+}
+
+/* Fallback if the cpuinfo tree could not be built.  CPU mapping is linear
+ * round robin.
+ */
+static int simple_map_to_cpu(unsigned int index)
+{
+	int i, end, cpu_rover;
+
+	cpu_rover = 0;
+	end = index % num_online_cpus();
+	for (i = 0; i < num_possible_cpus(); i++) {
+		if (cpu_online(cpu_rover)) {
+			if (cpu_rover >= end)
+				return cpu_rover;
+
+			cpu_rover++;
+		}
+	}
+
+	/* Impossible, since num_online_cpus() <= num_possible_cpus() */
+	return first_cpu(cpu_online_map);
+}
+
+static int _map_to_cpu(unsigned int index)
+{
+	struct cpuinfo_node *root_node;
+
+	if (unlikely(!cpuinfo_tree)) {
+		_cpu_map_rebuild();
+		if (!cpuinfo_tree)
+			return simple_map_to_cpu(index);
+	}
+
+	root_node = &cpuinfo_tree->nodes[0];
+#ifdef CONFIG_HOTPLUG_CPU
+	if (unlikely(root_node->num_cpus != num_online_cpus())) {
+		_cpu_map_rebuild();
+		if (!cpuinfo_tree)
+			return simple_map_to_cpu(index);
+	}
+#endif
+	return cpu_distribution_map[index % root_node->num_cpus];
+}
+
+int map_to_cpu(unsigned int index)
+{
+	int mapped_cpu;
+	unsigned long flag;
+
+	spin_lock_irqsave(&cpu_map_lock, flag);
+	mapped_cpu = _map_to_cpu(index);
+
+#ifdef CONFIG_HOTPLUG_CPU
+	while (unlikely(!cpu_online(mapped_cpu)))
+		mapped_cpu = _map_to_cpu(index);
+#endif
+	spin_unlock_irqrestore(&cpu_map_lock, flag);
+	return mapped_cpu;
+}
+EXPORT_SYMBOL(map_to_cpu);
+
+void cpu_map_rebuild(void)
+{
+	unsigned long flag;
+
+	spin_lock_irqsave(&cpu_map_lock, flag);
+	_cpu_map_rebuild();
+	spin_unlock_irqrestore(&cpu_map_lock, flag);
+}
diff --git a/arch/sparc/kernel/cpumap.h b/arch/sparc/kernel/cpumap.h
new file mode 100644
index 0000000..e639880
--- /dev/null
+++ b/arch/sparc/kernel/cpumap.h
@@ -0,0 +1,16 @@
+#ifndef _CPUMAP_H
+#define _CPUMAP_H
+
+#ifdef CONFIG_SMP
+extern void cpu_map_rebuild(void);
+extern int  map_to_cpu(unsigned int index);
+#define cpu_map_init() cpu_map_rebuild()
+#else
+#define cpu_map_init() do {} while (0)
+static inline int map_to_cpu(unsigned int index)
+{
+	return raw_smp_processor_id();
+}
+#endif
+
+#endif
diff --git a/arch/sparc/kernel/dma.c b/arch/sparc/kernel/dma.c
index ebc8403..524c32f 100644
--- a/arch/sparc/kernel/dma.c
+++ b/arch/sparc/kernel/dma.c
@@ -35,8 +35,8 @@
 }
 EXPORT_SYMBOL(dma_set_mask);
 
-void *dma_alloc_coherent(struct device *dev, size_t size,
-			 dma_addr_t *dma_handle, gfp_t flag)
+static void *dma32_alloc_coherent(struct device *dev, size_t size,
+				  dma_addr_t *dma_handle, gfp_t flag)
 {
 #ifdef CONFIG_PCI
 	if (dev->bus == &pci_bus_type)
@@ -44,10 +44,9 @@
 #endif
 	return sbus_alloc_consistent(dev, size, dma_handle);
 }
-EXPORT_SYMBOL(dma_alloc_coherent);
 
-void dma_free_coherent(struct device *dev, size_t size,
-		       void *cpu_addr, dma_addr_t dma_handle)
+static void dma32_free_coherent(struct device *dev, size_t size,
+				void *cpu_addr, dma_addr_t dma_handle)
 {
 #ifdef CONFIG_PCI
 	if (dev->bus == &pci_bus_type) {
@@ -58,38 +57,10 @@
 #endif
 	sbus_free_consistent(dev, size, cpu_addr, dma_handle);
 }
-EXPORT_SYMBOL(dma_free_coherent);
 
-dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
-			  size_t size, enum dma_data_direction direction)
-{
-#ifdef CONFIG_PCI
-	if (dev->bus == &pci_bus_type)
-		return pci_map_single(to_pci_dev(dev), cpu_addr,
-				      size, (int)direction);
-#endif
-	return sbus_map_single(dev, cpu_addr, size, (int)direction);
-}
-EXPORT_SYMBOL(dma_map_single);
-
-void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
-		      size_t size,
-		      enum dma_data_direction direction)
-{
-#ifdef CONFIG_PCI
-	if (dev->bus == &pci_bus_type) {
-		pci_unmap_single(to_pci_dev(dev), dma_addr,
-				 size, (int)direction);
-		return;
-	}
-#endif
-	sbus_unmap_single(dev, dma_addr, size, (int)direction);
-}
-EXPORT_SYMBOL(dma_unmap_single);
-
-dma_addr_t dma_map_page(struct device *dev, struct page *page,
-			unsigned long offset, size_t size,
-			enum dma_data_direction direction)
+static dma_addr_t dma32_map_page(struct device *dev, struct page *page,
+				 unsigned long offset, size_t size,
+				 enum dma_data_direction direction)
 {
 #ifdef CONFIG_PCI
 	if (dev->bus == &pci_bus_type)
@@ -99,10 +70,9 @@
 	return sbus_map_single(dev, page_address(page) + offset,
 			       size, (int)direction);
 }
-EXPORT_SYMBOL(dma_map_page);
 
-void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
-		    size_t size, enum dma_data_direction direction)
+static void dma32_unmap_page(struct device *dev, dma_addr_t dma_address,
+			     size_t size, enum dma_data_direction direction)
 {
 #ifdef CONFIG_PCI
 	if (dev->bus == &pci_bus_type) {
@@ -113,10 +83,9 @@
 #endif
 	sbus_unmap_single(dev, dma_address, size, (int)direction);
 }
-EXPORT_SYMBOL(dma_unmap_page);
 
-int dma_map_sg(struct device *dev, struct scatterlist *sg,
-			     int nents, enum dma_data_direction direction)
+static int dma32_map_sg(struct device *dev, struct scatterlist *sg,
+			int nents, enum dma_data_direction direction)
 {
 #ifdef CONFIG_PCI
 	if (dev->bus == &pci_bus_type)
@@ -124,10 +93,9 @@
 #endif
 	return sbus_map_sg(dev, sg, nents, direction);
 }
-EXPORT_SYMBOL(dma_map_sg);
 
-void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
-		  int nents, enum dma_data_direction direction)
+void dma32_unmap_sg(struct device *dev, struct scatterlist *sg,
+		    int nents, enum dma_data_direction direction)
 {
 #ifdef CONFIG_PCI
 	if (dev->bus == &pci_bus_type) {
@@ -137,10 +105,10 @@
 #endif
 	sbus_unmap_sg(dev, sg, nents, (int)direction);
 }
-EXPORT_SYMBOL(dma_unmap_sg);
 
-void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
-			     size_t size, enum dma_data_direction direction)
+static void dma32_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
+				      size_t size,
+				      enum dma_data_direction direction)
 {
 #ifdef CONFIG_PCI
 	if (dev->bus == &pci_bus_type) {
@@ -151,10 +119,10 @@
 #endif
 	sbus_dma_sync_single_for_cpu(dev, dma_handle, size, (int) direction);
 }
-EXPORT_SYMBOL(dma_sync_single_for_cpu);
 
-void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
-				size_t size, enum dma_data_direction direction)
+static void dma32_sync_single_for_device(struct device *dev,
+					 dma_addr_t dma_handle, size_t size,
+					 enum dma_data_direction direction)
 {
 #ifdef CONFIG_PCI
 	if (dev->bus == &pci_bus_type) {
@@ -165,28 +133,9 @@
 #endif
 	sbus_dma_sync_single_for_device(dev, dma_handle, size, (int) direction);
 }
-EXPORT_SYMBOL(dma_sync_single_for_device);
 
-void dma_sync_single_range_for_cpu(struct device *dev,
-				   dma_addr_t dma_handle,
-				   unsigned long offset,
-				   size_t size,
-				   enum dma_data_direction direction)
-{
-	dma_sync_single_for_cpu(dev, dma_handle+offset, size, direction);
-}
-EXPORT_SYMBOL(dma_sync_single_range_for_cpu);
-
-void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
-				      unsigned long offset, size_t size,
-				      enum dma_data_direction direction)
-{
-	dma_sync_single_for_device(dev, dma_handle+offset, size, direction);
-}
-EXPORT_SYMBOL(dma_sync_single_range_for_device);
-
-void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
-			 int nelems, enum dma_data_direction direction)
+static void dma32_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
+				  int nelems, enum dma_data_direction direction)
 {
 #ifdef CONFIG_PCI
 	if (dev->bus == &pci_bus_type) {
@@ -197,11 +146,10 @@
 #endif
 	BUG();
 }
-EXPORT_SYMBOL(dma_sync_sg_for_cpu);
 
-void dma_sync_sg_for_device(struct device *dev,
-			    struct scatterlist *sg, int nelems,
-			    enum dma_data_direction direction)
+static void dma32_sync_sg_for_device(struct device *dev,
+				     struct scatterlist *sg, int nelems,
+				     enum dma_data_direction direction)
 {
 #ifdef CONFIG_PCI
 	if (dev->bus == &pci_bus_type) {
@@ -212,16 +160,19 @@
 #endif
 	BUG();
 }
-EXPORT_SYMBOL(dma_sync_sg_for_device);
 
-int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
-	return (dma_addr == DMA_ERROR_CODE);
-}
-EXPORT_SYMBOL(dma_mapping_error);
+static const struct dma_ops dma32_dma_ops = {
+	.alloc_coherent		= dma32_alloc_coherent,
+	.free_coherent		= dma32_free_coherent,
+	.map_page		= dma32_map_page,
+	.unmap_page		= dma32_unmap_page,
+	.map_sg			= dma32_map_sg,
+	.unmap_sg		= dma32_unmap_sg,
+	.sync_single_for_cpu	= dma32_sync_single_for_cpu,
+	.sync_single_for_device	= dma32_sync_single_for_device,
+	.sync_sg_for_cpu	= dma32_sync_sg_for_cpu,
+	.sync_sg_for_device	= dma32_sync_sg_for_device,
+};
 
-int dma_get_cache_alignment(void)
-{
-	return 32;
-}
-EXPORT_SYMBOL(dma_get_cache_alignment);
+const struct dma_ops *dma_ops = &dma32_dma_ops;
+EXPORT_SYMBOL(dma_ops);
diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c
index 90350f83..4a700f4 100644
--- a/arch/sparc/kernel/ds.c
+++ b/arch/sparc/kernel/ds.c
@@ -544,7 +544,8 @@
 			     resp_len, ncpus, mask,
 			     DR_CPU_STAT_CONFIGURED);
 
-	mdesc_fill_in_cpu_data(*mask);
+	mdesc_populate_present_mask(mask);
+	mdesc_fill_in_cpu_data(mask);
 
 	for_each_cpu_mask(cpu, *mask) {
 		int err;
diff --git a/arch/sparc/kernel/ftrace.c b/arch/sparc/kernel/ftrace.c
index d0218e7..d3b1a30 100644
--- a/arch/sparc/kernel/ftrace.c
+++ b/arch/sparc/kernel/ftrace.c
@@ -7,14 +7,10 @@
 
 #include <asm/ftrace.h>
 
+#ifdef CONFIG_DYNAMIC_FTRACE
 static const u32 ftrace_nop = 0x01000000;
 
-unsigned char *ftrace_nop_replace(void)
-{
-	return (char *)&ftrace_nop;
-}
-
-unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
+static u32 ftrace_call_replace(unsigned long ip, unsigned long addr)
 {
 	static u32 call;
 	s32 off;
@@ -22,15 +18,11 @@
 	off = ((s32)addr - (s32)ip);
 	call = 0x40000000 | ((u32)off >> 2);
 
-	return (unsigned char *) &call;
+	return call;
 }
 
-int
-ftrace_modify_code(unsigned long ip, unsigned char *old_code,
-		   unsigned char *new_code)
+static int ftrace_modify_code(unsigned long ip, u32 old, u32 new)
 {
-	u32 old = *(u32 *)old_code;
-	u32 new = *(u32 *)new_code;
 	u32 replaced;
 	int faulted;
 
@@ -59,18 +51,43 @@
 	return faulted;
 }
 
+int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr)
+{
+	unsigned long ip = rec->ip;
+	u32 old, new;
+
+	old = ftrace_call_replace(ip, addr);
+	new = ftrace_nop;
+	return ftrace_modify_code(ip, old, new);
+}
+
+int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
+{
+	unsigned long ip = rec->ip;
+	u32 old, new;
+
+	old = ftrace_nop;
+	new = ftrace_call_replace(ip, addr);
+	return ftrace_modify_code(ip, old, new);
+}
+
 int ftrace_update_ftrace_func(ftrace_func_t func)
 {
 	unsigned long ip = (unsigned long)(&ftrace_call);
-	unsigned char old[MCOUNT_INSN_SIZE], *new;
+	u32 old, new;
 
-	memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
+	old = *(u32 *) &ftrace_call;
 	new = ftrace_call_replace(ip, (unsigned long)func);
 	return ftrace_modify_code(ip, old, new);
 }
 
 int __init ftrace_dyn_arch_init(void *data)
 {
-	ftrace_mcount_set(data);
+	unsigned long *p = data;
+
+	*p = 0;
+
 	return 0;
 }
+#endif
+
diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S
index 91bf4c7..f8f2105 100644
--- a/arch/sparc/kernel/head_64.S
+++ b/arch/sparc/kernel/head_64.S
@@ -641,28 +641,6 @@
 	/* Not reached... */
 
 1:
-	/* If we boot on a non-zero cpu, all of the per-cpu
-	 * variable references we make before setting up the
-	 * per-cpu areas will use a bogus offset.  Put a
-	 * compensating factor into __per_cpu_base to handle
-	 * this cleanly.
-	 *
-	 * What the per-cpu code calculates is:
-	 *
-	 *	__per_cpu_base + (cpu << __per_cpu_shift)
-	 *
-	 * These two variables are zero initially, so to
-	 * make it all cancel out to zero we need to put
-	 * "0 - (cpu << 0)" into __per_cpu_base so that the
-	 * above formula evaluates to zero.
-	 *
-	 * We cannot even perform a printk() until this stuff
-	 * is setup as that calls cpu_clock() which uses
-	 * per-cpu variables.
-	 */
-	sub	%g0, %o0, %o1
-	sethi	%hi(__per_cpu_base), %o2
-	stx	%o1, [%o2 + %lo(__per_cpu_base)]
 #else
 	mov	0, %o0
 #endif
diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c
index d8900e1..0aeaefe 100644
--- a/arch/sparc/kernel/iommu.c
+++ b/arch/sparc/kernel/iommu.c
@@ -351,8 +351,9 @@
 		free_pages((unsigned long)cpu, order);
 }
 
-static dma_addr_t dma_4u_map_single(struct device *dev, void *ptr, size_t sz,
-				    enum dma_data_direction direction)
+static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page,
+				  unsigned long offset, size_t sz,
+				  enum dma_data_direction direction)
 {
 	struct iommu *iommu;
 	struct strbuf *strbuf;
@@ -368,7 +369,7 @@
 	if (unlikely(direction == DMA_NONE))
 		goto bad_no_ctx;
 
-	oaddr = (unsigned long)ptr;
+	oaddr = (unsigned long)(page_address(page) + offset);
 	npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
 	npages >>= IO_PAGE_SHIFT;
 
@@ -472,8 +473,8 @@
 		       vaddr, ctx, npages);
 }
 
-static void dma_4u_unmap_single(struct device *dev, dma_addr_t bus_addr,
-				size_t sz, enum dma_data_direction direction)
+static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr,
+			      size_t sz, enum dma_data_direction direction)
 {
 	struct iommu *iommu;
 	struct strbuf *strbuf;
@@ -824,8 +825,8 @@
 static const struct dma_ops sun4u_dma_ops = {
 	.alloc_coherent		= dma_4u_alloc_coherent,
 	.free_coherent		= dma_4u_free_coherent,
-	.map_single		= dma_4u_map_single,
-	.unmap_single		= dma_4u_unmap_single,
+	.map_page		= dma_4u_map_page,
+	.unmap_page		= dma_4u_unmap_page,
 	.map_sg			= dma_4u_map_sg,
 	.unmap_sg		= dma_4u_unmap_sg,
 	.sync_single_for_cpu	= dma_4u_sync_single_for_cpu,
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
index e5e78f9..bd07505 100644
--- a/arch/sparc/kernel/irq_64.c
+++ b/arch/sparc/kernel/irq_64.c
@@ -45,6 +45,7 @@
 #include <asm/cacheflush.h>
 
 #include "entry.h"
+#include "cpumap.h"
 
 #define NUM_IVECS	(IMAP_INR + 1)
 
@@ -256,35 +257,13 @@
 	int cpuid;
 
 	cpumask_copy(&mask, irq_desc[virt_irq].affinity);
-	if (cpus_equal(mask, CPU_MASK_ALL)) {
-		static int irq_rover;
-		static DEFINE_SPINLOCK(irq_rover_lock);
-		unsigned long flags;
-
-		/* Round-robin distribution... */
-	do_round_robin:
-		spin_lock_irqsave(&irq_rover_lock, flags);
-
-		while (!cpu_online(irq_rover)) {
-			if (++irq_rover >= nr_cpu_ids)
-				irq_rover = 0;
-		}
-		cpuid = irq_rover;
-		do {
-			if (++irq_rover >= nr_cpu_ids)
-				irq_rover = 0;
-		} while (!cpu_online(irq_rover));
-
-		spin_unlock_irqrestore(&irq_rover_lock, flags);
+	if (cpus_equal(mask, cpu_online_map)) {
+		cpuid = map_to_cpu(virt_irq);
 	} else {
 		cpumask_t tmp;
 
 		cpus_and(tmp, cpu_online_map, mask);
-
-		if (cpus_empty(tmp))
-			goto do_round_robin;
-
-		cpuid = first_cpu(tmp);
+		cpuid = cpus_empty(tmp) ? map_to_cpu(virt_irq) : first_cpu(tmp);
 	}
 
 	return cpuid;
diff --git a/arch/sparc/kernel/mdesc.c b/arch/sparc/kernel/mdesc.c
index f0e6ed2..938da19 100644
--- a/arch/sparc/kernel/mdesc.c
+++ b/arch/sparc/kernel/mdesc.c
@@ -574,7 +574,7 @@
 	mdesc_release(hp);
 }
 
-static void __devinit fill_in_one_cache(cpuinfo_sparc *c,
+static void __cpuinit fill_in_one_cache(cpuinfo_sparc *c,
 					struct mdesc_handle *hp,
 					u64 mp)
 {
@@ -619,8 +619,7 @@
 	}
 }
 
-static void __devinit mark_core_ids(struct mdesc_handle *hp, u64 mp,
-				    int core_id)
+static void __cpuinit mark_core_ids(struct mdesc_handle *hp, u64 mp, int core_id)
 {
 	u64 a;
 
@@ -653,7 +652,7 @@
 	}
 }
 
-static void __devinit set_core_ids(struct mdesc_handle *hp)
+static void __cpuinit set_core_ids(struct mdesc_handle *hp)
 {
 	int idx;
 	u64 mp;
@@ -678,8 +677,7 @@
 	}
 }
 
-static void __devinit mark_proc_ids(struct mdesc_handle *hp, u64 mp,
-				    int proc_id)
+static void __cpuinit mark_proc_ids(struct mdesc_handle *hp, u64 mp, int proc_id)
 {
 	u64 a;
 
@@ -698,8 +696,7 @@
 	}
 }
 
-static void __devinit __set_proc_ids(struct mdesc_handle *hp,
-				     const char *exec_unit_name)
+static void __cpuinit __set_proc_ids(struct mdesc_handle *hp, const char *exec_unit_name)
 {
 	int idx;
 	u64 mp;
@@ -720,13 +717,13 @@
 	}
 }
 
-static void __devinit set_proc_ids(struct mdesc_handle *hp)
+static void __cpuinit set_proc_ids(struct mdesc_handle *hp)
 {
 	__set_proc_ids(hp, "exec_unit");
 	__set_proc_ids(hp, "exec-unit");
 }
 
-static void __devinit get_one_mondo_bits(const u64 *p, unsigned int *mask,
+static void __cpuinit get_one_mondo_bits(const u64 *p, unsigned int *mask,
 					 unsigned char def)
 {
 	u64 val;
@@ -745,7 +742,7 @@
 	*mask = ((1U << def) * 64U) - 1U;
 }
 
-static void __devinit get_mondo_data(struct mdesc_handle *hp, u64 mp,
+static void __cpuinit get_mondo_data(struct mdesc_handle *hp, u64 mp,
 				     struct trap_per_cpu *tb)
 {
 	const u64 *val;
@@ -763,23 +760,15 @@
 	get_one_mondo_bits(val, &tb->nonresum_qmask, 2);
 }
 
-void __cpuinit mdesc_fill_in_cpu_data(cpumask_t mask)
+static void * __cpuinit mdesc_iterate_over_cpus(void *(*func)(struct mdesc_handle *, u64, int, void *), void *arg, cpumask_t *mask)
 {
 	struct mdesc_handle *hp = mdesc_grab();
+	void *ret = NULL;
 	u64 mp;
 
-	ncpus_probed = 0;
 	mdesc_for_each_node_by_name(hp, mp, "cpu") {
 		const u64 *id = mdesc_get_property(hp, mp, "id", NULL);
-		const u64 *cfreq = mdesc_get_property(hp, mp, "clock-frequency", NULL);
-		struct trap_per_cpu *tb;
-		cpuinfo_sparc *c;
-		int cpuid;
-		u64 a;
-
-		ncpus_probed++;
-
-		cpuid = *id;
+		int cpuid = *id;
 
 #ifdef CONFIG_SMP
 		if (cpuid >= NR_CPUS) {
@@ -788,62 +777,104 @@
 			       cpuid, NR_CPUS);
 			continue;
 		}
-		if (!cpu_isset(cpuid, mask))
+		if (!cpu_isset(cpuid, *mask))
 			continue;
-#else
-		/* On uniprocessor we only want the values for the
-		 * real physical cpu the kernel booted onto, however
-		 * cpu_data() only has one entry at index 0.
-		 */
-		if (cpuid != real_hard_smp_processor_id())
-			continue;
-		cpuid = 0;
 #endif
 
-		c = &cpu_data(cpuid);
-		c->clock_tick = *cfreq;
+		ret = func(hp, mp, cpuid, arg);
+		if (ret)
+			goto out;
+	}
+out:
+	mdesc_release(hp);
+	return ret;
+}
 
-		tb = &trap_block[cpuid];
-		get_mondo_data(hp, mp, tb);
+static void * __cpuinit record_one_cpu(struct mdesc_handle *hp, u64 mp, int cpuid, void *arg)
+{
+	ncpus_probed++;
+#ifdef CONFIG_SMP
+	set_cpu_present(cpuid, true);
+#endif
+	return NULL;
+}
 
-		mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_FWD) {
-			u64 j, t = mdesc_arc_target(hp, a);
-			const char *t_name;
+void __cpuinit mdesc_populate_present_mask(cpumask_t *mask)
+{
+	if (tlb_type != hypervisor)
+		return;
 
-			t_name = mdesc_node_name(hp, t);
-			if (!strcmp(t_name, "cache")) {
-				fill_in_one_cache(c, hp, t);
-				continue;
-			}
+	ncpus_probed = 0;
+	mdesc_iterate_over_cpus(record_one_cpu, NULL, mask);
+}
 
-			mdesc_for_each_arc(j, hp, t, MDESC_ARC_TYPE_FWD) {
-				u64 n = mdesc_arc_target(hp, j);
-				const char *n_name;
+static void * __cpuinit fill_in_one_cpu(struct mdesc_handle *hp, u64 mp, int cpuid, void *arg)
+{
+	const u64 *cfreq = mdesc_get_property(hp, mp, "clock-frequency", NULL);
+	struct trap_per_cpu *tb;
+	cpuinfo_sparc *c;
+	u64 a;
 
-				n_name = mdesc_node_name(hp, n);
-				if (!strcmp(n_name, "cache"))
-					fill_in_one_cache(c, hp, n);
-			}
+#ifndef CONFIG_SMP
+	/* On uniprocessor we only want the values for the
+	 * real physical cpu the kernel booted onto, however
+	 * cpu_data() only has one entry at index 0.
+	 */
+	if (cpuid != real_hard_smp_processor_id())
+		return NULL;
+	cpuid = 0;
+#endif
+
+	c = &cpu_data(cpuid);
+	c->clock_tick = *cfreq;
+
+	tb = &trap_block[cpuid];
+	get_mondo_data(hp, mp, tb);
+
+	mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_FWD) {
+		u64 j, t = mdesc_arc_target(hp, a);
+		const char *t_name;
+
+		t_name = mdesc_node_name(hp, t);
+		if (!strcmp(t_name, "cache")) {
+			fill_in_one_cache(c, hp, t);
+			continue;
 		}
 
-#ifdef CONFIG_SMP
-		cpu_set(cpuid, cpu_present_map);
-#endif
+		mdesc_for_each_arc(j, hp, t, MDESC_ARC_TYPE_FWD) {
+			u64 n = mdesc_arc_target(hp, j);
+			const char *n_name;
 
-		c->core_id = 0;
-		c->proc_id = -1;
+			n_name = mdesc_node_name(hp, n);
+			if (!strcmp(n_name, "cache"))
+				fill_in_one_cache(c, hp, n);
+		}
 	}
 
+	c->core_id = 0;
+	c->proc_id = -1;
+
+	return NULL;
+}
+
+void __cpuinit mdesc_fill_in_cpu_data(cpumask_t *mask)
+{
+	struct mdesc_handle *hp;
+
+	mdesc_iterate_over_cpus(fill_in_one_cpu, NULL, mask);
+
 #ifdef CONFIG_SMP
 	sparc64_multi_core = 1;
 #endif
 
+	hp = mdesc_grab();
+
 	set_core_ids(hp);
 	set_proc_ids(hp);
 
-	smp_fill_in_sib_core_maps();
-
 	mdesc_release(hp);
+
+	smp_fill_in_sib_core_maps();
 }
 
 static ssize_t mdesc_read(struct file *file, char __user *buf,
@@ -887,7 +918,6 @@
 {
 	struct mdesc_handle *hp;
 	unsigned long len, real_len, status;
-	cpumask_t mask;
 
 	(void) sun4v_mach_desc(0UL, 0UL, &len);
 
@@ -911,7 +941,4 @@
 	cur_mdesc = hp;
 
 	report_platform_properties();
-
-	cpus_setall(mask);
-	mdesc_fill_in_cpu_data(mask);
 }
diff --git a/arch/sparc/kernel/of_device_32.c b/arch/sparc/kernel/of_device_32.c
index c8f14c1..9039670 100644
--- a/arch/sparc/kernel/of_device_32.c
+++ b/arch/sparc/kernel/of_device_32.c
@@ -6,159 +6,11 @@
 #include <linux/mod_devicetable.h>
 #include <linux/slab.h>
 #include <linux/errno.h>
+#include <linux/irq.h>
 #include <linux/of_device.h>
 #include <linux/of_platform.h>
 
-static int node_match(struct device *dev, void *data)
-{
-	struct of_device *op = to_of_device(dev);
-	struct device_node *dp = data;
-
-	return (op->node == dp);
-}
-
-struct of_device *of_find_device_by_node(struct device_node *dp)
-{
-	struct device *dev = bus_find_device(&of_platform_bus_type, NULL,
-					     dp, node_match);
-
-	if (dev)
-		return to_of_device(dev);
-
-	return NULL;
-}
-EXPORT_SYMBOL(of_find_device_by_node);
-
-unsigned int irq_of_parse_and_map(struct device_node *node, int index)
-{
-	struct of_device *op = of_find_device_by_node(node);
-
-	if (!op || index >= op->num_irqs)
-		return 0;
-
-	return op->irqs[index];
-}
-EXPORT_SYMBOL(irq_of_parse_and_map);
-
-/* Take the archdata values for IOMMU, STC, and HOSTDATA found in
- * BUS and propagate to all child of_device objects.
- */
-void of_propagate_archdata(struct of_device *bus)
-{
-	struct dev_archdata *bus_sd = &bus->dev.archdata;
-	struct device_node *bus_dp = bus->node;
-	struct device_node *dp;
-
-	for (dp = bus_dp->child; dp; dp = dp->sibling) {
-		struct of_device *op = of_find_device_by_node(dp);
-
-		op->dev.archdata.iommu = bus_sd->iommu;
-		op->dev.archdata.stc = bus_sd->stc;
-		op->dev.archdata.host_controller = bus_sd->host_controller;
-		op->dev.archdata.numa_node = bus_sd->numa_node;
-
-		if (dp->child)
-			of_propagate_archdata(op);
-	}
-}
-
-struct bus_type of_platform_bus_type;
-EXPORT_SYMBOL(of_platform_bus_type);
-
-static inline u64 of_read_addr(const u32 *cell, int size)
-{
-	u64 r = 0;
-	while (size--)
-		r = (r << 32) | *(cell++);
-	return r;
-}
-
-static void __init get_cells(struct device_node *dp,
-			     int *addrc, int *sizec)
-{
-	if (addrc)
-		*addrc = of_n_addr_cells(dp);
-	if (sizec)
-		*sizec = of_n_size_cells(dp);
-}
-
-/* Max address size we deal with */
-#define OF_MAX_ADDR_CELLS	4
-
-struct of_bus {
-	const char	*name;
-	const char	*addr_prop_name;
-	int		(*match)(struct device_node *parent);
-	void		(*count_cells)(struct device_node *child,
-				       int *addrc, int *sizec);
-	int		(*map)(u32 *addr, const u32 *range,
-			       int na, int ns, int pna);
-	unsigned long	(*get_flags)(const u32 *addr, unsigned long);
-};
-
-/*
- * Default translator (generic bus)
- */
-
-static void of_bus_default_count_cells(struct device_node *dev,
-				       int *addrc, int *sizec)
-{
-	get_cells(dev, addrc, sizec);
-}
-
-/* Make sure the least significant 64-bits are in-range.  Even
- * for 3 or 4 cell values it is a good enough approximation.
- */
-static int of_out_of_range(const u32 *addr, const u32 *base,
-			   const u32 *size, int na, int ns)
-{
-	u64 a = of_read_addr(addr, na);
-	u64 b = of_read_addr(base, na);
-
-	if (a < b)
-		return 1;
-
-	b += of_read_addr(size, ns);
-	if (a >= b)
-		return 1;
-
-	return 0;
-}
-
-static int of_bus_default_map(u32 *addr, const u32 *range,
-			      int na, int ns, int pna)
-{
-	u32 result[OF_MAX_ADDR_CELLS];
-	int i;
-
-	if (ns > 2) {
-		printk("of_device: Cannot handle size cells (%d) > 2.", ns);
-		return -EINVAL;
-	}
-
-	if (of_out_of_range(addr, range, range + na + pna, na, ns))
-		return -EINVAL;
-
-	/* Start with the parent range base.  */
-	memcpy(result, range + na, pna * 4);
-
-	/* Add in the child address offset.  */
-	for (i = 0; i < na; i++)
-		result[pna - 1 - i] +=
-			(addr[na - 1 - i] -
-			 range[na - 1 - i]);
-
-	memcpy(addr, result, pna * 4);
-
-	return 0;
-}
-
-static unsigned long of_bus_default_get_flags(const u32 *addr, unsigned long flags)
-{
-	if (flags)
-		return flags;
-	return IORESOURCE_MEM;
-}
+#include "of_device_common.h"
 
 /*
  * PCI bus specific translator
@@ -240,47 +92,6 @@
 	return flags;
 }
 
-/*
- * SBUS bus specific translator
- */
-
-static int of_bus_sbus_match(struct device_node *np)
-{
-	struct device_node *dp = np;
-
-	while (dp) {
-		if (!strcmp(dp->name, "sbus") ||
-		    !strcmp(dp->name, "sbi"))
-			return 1;
-
-		/* Have a look at use_1to1_mapping().  We're trying
-		 * to match SBUS if that's the top-level bus and we
-		 * don't have some intervening real bus that provides
-		 * ranges based translations.
-		 */
-		if (of_find_property(dp, "ranges", NULL) != NULL)
-			break;
-
-		dp = dp->parent;
-	}
-
-	return 0;
-}
-
-static void of_bus_sbus_count_cells(struct device_node *child,
-				   int *addrc, int *sizec)
-{
-	if (addrc)
-		*addrc = 2;
-	if (sizec)
-		*sizec = 1;
-}
-
-static int of_bus_sbus_map(u32 *addr, const u32 *range, int na, int ns, int pna)
-{
-	return of_bus_default_map(addr, range, na, ns, pna);
-}
-
 static unsigned long of_bus_sbus_get_flags(const u32 *addr, unsigned long flags)
 {
 	return IORESOURCE_MEM;
@@ -307,7 +118,7 @@
 		.addr_prop_name = "reg",
 		.match = of_bus_sbus_match,
 		.count_cells = of_bus_sbus_count_cells,
-		.map = of_bus_sbus_map,
+		.map = of_bus_default_map,
 		.get_flags = of_bus_sbus_get_flags,
 	},
 	/* Default */
diff --git a/arch/sparc/kernel/of_device_64.c b/arch/sparc/kernel/of_device_64.c
index 5ac287a..881947e 100644
--- a/arch/sparc/kernel/of_device_64.c
+++ b/arch/sparc/kernel/of_device_64.c
@@ -10,6 +10,8 @@
 #include <linux/of_device.h>
 #include <linux/of_platform.h>
 
+#include "of_device_common.h"
+
 void __iomem *of_ioremap(struct resource *res, unsigned long offset, unsigned long size, char *name)
 {
 	unsigned long ret = res->start + offset;
@@ -35,156 +37,6 @@
 }
 EXPORT_SYMBOL(of_iounmap);
 
-static int node_match(struct device *dev, void *data)
-{
-	struct of_device *op = to_of_device(dev);
-	struct device_node *dp = data;
-
-	return (op->node == dp);
-}
-
-struct of_device *of_find_device_by_node(struct device_node *dp)
-{
-	struct device *dev = bus_find_device(&of_platform_bus_type, NULL,
-					     dp, node_match);
-
-	if (dev)
-		return to_of_device(dev);
-
-	return NULL;
-}
-EXPORT_SYMBOL(of_find_device_by_node);
-
-unsigned int irq_of_parse_and_map(struct device_node *node, int index)
-{
-	struct of_device *op = of_find_device_by_node(node);
-
-	if (!op || index >= op->num_irqs)
-		return 0;
-
-	return op->irqs[index];
-}
-EXPORT_SYMBOL(irq_of_parse_and_map);
-
-/* Take the archdata values for IOMMU, STC, and HOSTDATA found in
- * BUS and propagate to all child of_device objects.
- */
-void of_propagate_archdata(struct of_device *bus)
-{
-	struct dev_archdata *bus_sd = &bus->dev.archdata;
-	struct device_node *bus_dp = bus->node;
-	struct device_node *dp;
-
-	for (dp = bus_dp->child; dp; dp = dp->sibling) {
-		struct of_device *op = of_find_device_by_node(dp);
-
-		op->dev.archdata.iommu = bus_sd->iommu;
-		op->dev.archdata.stc = bus_sd->stc;
-		op->dev.archdata.host_controller = bus_sd->host_controller;
-		op->dev.archdata.numa_node = bus_sd->numa_node;
-
-		if (dp->child)
-			of_propagate_archdata(op);
-	}
-}
-
-struct bus_type of_platform_bus_type;
-EXPORT_SYMBOL(of_platform_bus_type);
-
-static inline u64 of_read_addr(const u32 *cell, int size)
-{
-	u64 r = 0;
-	while (size--)
-		r = (r << 32) | *(cell++);
-	return r;
-}
-
-static void get_cells(struct device_node *dp, int *addrc, int *sizec)
-{
-	if (addrc)
-		*addrc = of_n_addr_cells(dp);
-	if (sizec)
-		*sizec = of_n_size_cells(dp);
-}
-
-/* Max address size we deal with */
-#define OF_MAX_ADDR_CELLS	4
-
-struct of_bus {
-	const char	*name;
-	const char	*addr_prop_name;
-	int		(*match)(struct device_node *parent);
-	void		(*count_cells)(struct device_node *child,
-				       int *addrc, int *sizec);
-	int		(*map)(u32 *addr, const u32 *range,
-			       int na, int ns, int pna);
-	unsigned long	(*get_flags)(const u32 *addr, unsigned long);
-};
-
-/*
- * Default translator (generic bus)
- */
-
-static void of_bus_default_count_cells(struct device_node *dev,
-				       int *addrc, int *sizec)
-{
-	get_cells(dev, addrc, sizec);
-}
-
-/* Make sure the least significant 64-bits are in-range.  Even
- * for 3 or 4 cell values it is a good enough approximation.
- */
-static int of_out_of_range(const u32 *addr, const u32 *base,
-			   const u32 *size, int na, int ns)
-{
-	u64 a = of_read_addr(addr, na);
-	u64 b = of_read_addr(base, na);
-
-	if (a < b)
-		return 1;
-
-	b += of_read_addr(size, ns);
-	if (a >= b)
-		return 1;
-
-	return 0;
-}
-
-static int of_bus_default_map(u32 *addr, const u32 *range,
-			      int na, int ns, int pna)
-{
-	u32 result[OF_MAX_ADDR_CELLS];
-	int i;
-
-	if (ns > 2) {
-		printk("of_device: Cannot handle size cells (%d) > 2.", ns);
-		return -EINVAL;
-	}
-
-	if (of_out_of_range(addr, range, range + na + pna, na, ns))
-		return -EINVAL;
-
-	/* Start with the parent range base.  */
-	memcpy(result, range + na, pna * 4);
-
-	/* Add in the child address offset.  */
-	for (i = 0; i < na; i++)
-		result[pna - 1 - i] +=
-			(addr[na - 1 - i] -
-			 range[na - 1 - i]);
-
-	memcpy(addr, result, pna * 4);
-
-	return 0;
-}
-
-static unsigned long of_bus_default_get_flags(const u32 *addr, unsigned long flags)
-{
-	if (flags)
-		return flags;
-	return IORESOURCE_MEM;
-}
-
 /*
  * PCI bus specific translator
  */
@@ -295,42 +147,6 @@
 }
 
 /*
- * SBUS bus specific translator
- */
-
-static int of_bus_sbus_match(struct device_node *np)
-{
-	struct device_node *dp = np;
-
-	while (dp) {
-		if (!strcmp(dp->name, "sbus") ||
-		    !strcmp(dp->name, "sbi"))
-			return 1;
-
-		/* Have a look at use_1to1_mapping().  We're trying
-		 * to match SBUS if that's the top-level bus and we
-		 * don't have some intervening real bus that provides
-		 * ranges based translations.
-		 */
-		if (of_find_property(dp, "ranges", NULL) != NULL)
-			break;
-
-		dp = dp->parent;
-	}
-
-	return 0;
-}
-
-static void of_bus_sbus_count_cells(struct device_node *child,
-				   int *addrc, int *sizec)
-{
-	if (addrc)
-		*addrc = 2;
-	if (sizec)
-		*sizec = 1;
-}
-
-/*
  * FHC/Central bus specific translator.
  *
  * This is just needed to hard-code the address and size cell
diff --git a/arch/sparc/kernel/of_device_common.c b/arch/sparc/kernel/of_device_common.c
new file mode 100644
index 0000000..cb8eb79
--- /dev/null
+++ b/arch/sparc/kernel/of_device_common.c
@@ -0,0 +1,174 @@
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/irq.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+
+#include "of_device_common.h"
+
+static int node_match(struct device *dev, void *data)
+{
+	struct of_device *op = to_of_device(dev);
+	struct device_node *dp = data;
+
+	return (op->node == dp);
+}
+
+struct of_device *of_find_device_by_node(struct device_node *dp)
+{
+	struct device *dev = bus_find_device(&of_platform_bus_type, NULL,
+					     dp, node_match);
+
+	if (dev)
+		return to_of_device(dev);
+
+	return NULL;
+}
+EXPORT_SYMBOL(of_find_device_by_node);
+
+unsigned int irq_of_parse_and_map(struct device_node *node, int index)
+{
+	struct of_device *op = of_find_device_by_node(node);
+
+	if (!op || index >= op->num_irqs)
+		return 0;
+
+	return op->irqs[index];
+}
+EXPORT_SYMBOL(irq_of_parse_and_map);
+
+/* Take the archdata values for IOMMU, STC, and HOSTDATA found in
+ * BUS and propagate to all child of_device objects.
+ */
+void of_propagate_archdata(struct of_device *bus)
+{
+	struct dev_archdata *bus_sd = &bus->dev.archdata;
+	struct device_node *bus_dp = bus->node;
+	struct device_node *dp;
+
+	for (dp = bus_dp->child; dp; dp = dp->sibling) {
+		struct of_device *op = of_find_device_by_node(dp);
+
+		op->dev.archdata.iommu = bus_sd->iommu;
+		op->dev.archdata.stc = bus_sd->stc;
+		op->dev.archdata.host_controller = bus_sd->host_controller;
+		op->dev.archdata.numa_node = bus_sd->numa_node;
+
+		if (dp->child)
+			of_propagate_archdata(op);
+	}
+}
+
+struct bus_type of_platform_bus_type;
+EXPORT_SYMBOL(of_platform_bus_type);
+
+static void get_cells(struct device_node *dp, int *addrc, int *sizec)
+{
+	if (addrc)
+		*addrc = of_n_addr_cells(dp);
+	if (sizec)
+		*sizec = of_n_size_cells(dp);
+}
+
+/*
+ * Default translator (generic bus)
+ */
+
+void of_bus_default_count_cells(struct device_node *dev, int *addrc, int *sizec)
+{
+	get_cells(dev, addrc, sizec);
+}
+
+/* Make sure the least significant 64-bits are in-range.  Even
+ * for 3 or 4 cell values it is a good enough approximation.
+ */
+int of_out_of_range(const u32 *addr, const u32 *base,
+		    const u32 *size, int na, int ns)
+{
+	u64 a = of_read_addr(addr, na);
+	u64 b = of_read_addr(base, na);
+
+	if (a < b)
+		return 1;
+
+	b += of_read_addr(size, ns);
+	if (a >= b)
+		return 1;
+
+	return 0;
+}
+
+int of_bus_default_map(u32 *addr, const u32 *range, int na, int ns, int pna)
+{
+	u32 result[OF_MAX_ADDR_CELLS];
+	int i;
+
+	if (ns > 2) {
+		printk("of_device: Cannot handle size cells (%d) > 2.", ns);
+		return -EINVAL;
+	}
+
+	if (of_out_of_range(addr, range, range + na + pna, na, ns))
+		return -EINVAL;
+
+	/* Start with the parent range base.  */
+	memcpy(result, range + na, pna * 4);
+
+	/* Add in the child address offset.  */
+	for (i = 0; i < na; i++)
+		result[pna - 1 - i] +=
+			(addr[na - 1 - i] -
+			 range[na - 1 - i]);
+
+	memcpy(addr, result, pna * 4);
+
+	return 0;
+}
+
+unsigned long of_bus_default_get_flags(const u32 *addr, unsigned long flags)
+{
+	if (flags)
+		return flags;
+	return IORESOURCE_MEM;
+}
+
+/*
+ * SBUS bus specific translator
+ */
+
+int of_bus_sbus_match(struct device_node *np)
+{
+	struct device_node *dp = np;
+
+	while (dp) {
+		if (!strcmp(dp->name, "sbus") ||
+		    !strcmp(dp->name, "sbi"))
+			return 1;
+
+		/* Have a look at use_1to1_mapping().  We're trying
+		 * to match SBUS if that's the top-level bus and we
+		 * don't have some intervening real bus that provides
+		 * ranges based translations.
+		 */
+		if (of_find_property(dp, "ranges", NULL) != NULL)
+			break;
+
+		dp = dp->parent;
+	}
+
+	return 0;
+}
+
+void of_bus_sbus_count_cells(struct device_node *child, int *addrc, int *sizec)
+{
+	if (addrc)
+		*addrc = 2;
+	if (sizec)
+		*sizec = 1;
+}
diff --git a/arch/sparc/kernel/of_device_common.h b/arch/sparc/kernel/of_device_common.h
new file mode 100644
index 0000000..cdfd239
--- /dev/null
+++ b/arch/sparc/kernel/of_device_common.h
@@ -0,0 +1,36 @@
+#ifndef _OF_DEVICE_COMMON_H
+#define _OF_DEVICE_COMMON_H
+
+static inline u64 of_read_addr(const u32 *cell, int size)
+{
+	u64 r = 0;
+	while (size--)
+		r = (r << 32) | *(cell++);
+	return r;
+}
+
+void of_bus_default_count_cells(struct device_node *dev, int *addrc,
+				int *sizec);
+int of_out_of_range(const u32 *addr, const u32 *base,
+		    const u32 *size, int na, int ns);
+int of_bus_default_map(u32 *addr, const u32 *range, int na, int ns, int pna);
+unsigned long of_bus_default_get_flags(const u32 *addr, unsigned long flags);
+
+int of_bus_sbus_match(struct device_node *np);
+void of_bus_sbus_count_cells(struct device_node *child, int *addrc, int *sizec);
+
+/* Max address size we deal with */
+#define OF_MAX_ADDR_CELLS	4
+
+struct of_bus {
+	const char	*name;
+	const char	*addr_prop_name;
+	int		(*match)(struct device_node *parent);
+	void		(*count_cells)(struct device_node *child,
+				       int *addrc, int *sizec);
+	int		(*map)(u32 *addr, const u32 *range,
+			       int na, int ns, int pna);
+	unsigned long	(*get_flags)(const u32 *addr, unsigned long);
+};
+
+#endif /* _OF_DEVICE_COMMON_H */
diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
index 5db5ebe..2485eaa 100644
--- a/arch/sparc/kernel/pci_sun4v.c
+++ b/arch/sparc/kernel/pci_sun4v.c
@@ -230,8 +230,9 @@
 		free_pages((unsigned long)cpu, order);
 }
 
-static dma_addr_t dma_4v_map_single(struct device *dev, void *ptr, size_t sz,
-				    enum dma_data_direction direction)
+static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
+				  unsigned long offset, size_t sz,
+				  enum dma_data_direction direction)
 {
 	struct iommu *iommu;
 	unsigned long flags, npages, oaddr;
@@ -245,7 +246,7 @@
 	if (unlikely(direction == DMA_NONE))
 		goto bad;
 
-	oaddr = (unsigned long)ptr;
+	oaddr = (unsigned long)(page_address(page) + offset);
 	npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
 	npages >>= IO_PAGE_SHIFT;
 
@@ -294,8 +295,8 @@
 	return DMA_ERROR_CODE;
 }
 
-static void dma_4v_unmap_single(struct device *dev, dma_addr_t bus_addr,
-				size_t sz, enum dma_data_direction direction)
+static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
+			      size_t sz, enum dma_data_direction direction)
 {
 	struct pci_pbm_info *pbm;
 	struct iommu *iommu;
@@ -537,8 +538,8 @@
 static const struct dma_ops sun4v_dma_ops = {
 	.alloc_coherent			= dma_4v_alloc_coherent,
 	.free_coherent			= dma_4v_free_coherent,
-	.map_single			= dma_4v_map_single,
-	.unmap_single			= dma_4v_unmap_single,
+	.map_page			= dma_4v_map_page,
+	.unmap_page			= dma_4v_unmap_page,
 	.map_sg				= dma_4v_map_sg,
 	.unmap_sg			= dma_4v_unmap_sg,
 	.sync_single_for_cpu		= dma_4v_sync_single_for_cpu,
diff --git a/arch/sparc/kernel/prom.h b/arch/sparc/kernel/prom.h
index bb0f0fd..453397f 100644
--- a/arch/sparc/kernel/prom.h
+++ b/arch/sparc/kernel/prom.h
@@ -22,7 +22,6 @@
 
 extern char *build_path_component(struct device_node *dp);
 extern void of_console_init(void);
-extern void of_fill_in_cpu_data(void);
 
 extern unsigned int prom_early_allocated;
 
diff --git a/arch/sparc/kernel/prom_64.c b/arch/sparc/kernel/prom_64.c
index ca55c70..fb06ac2 100644
--- a/arch/sparc/kernel/prom_64.c
+++ b/arch/sparc/kernel/prom_64.c
@@ -374,75 +374,26 @@
 	return (tlb_type == spitfire ? "upa-portid" : "portid");
 }
 
-struct device_node *of_find_node_by_cpuid(int cpuid)
-{
-	struct device_node *dp;
-	const char *mid_prop = get_mid_prop();
-
-	for_each_node_by_type(dp, "cpu") {
-		int id = of_getintprop_default(dp, mid_prop, -1);
-		const char *this_mid_prop = mid_prop;
-
-		if (id < 0) {
-			this_mid_prop = "cpuid";
-			id = of_getintprop_default(dp, this_mid_prop, -1);
-		}
-
-		if (id < 0) {
-			prom_printf("OF: Serious problem, cpu lacks "
-				    "%s property", this_mid_prop);
-			prom_halt();
-		}
-		if (cpuid == id)
-			return dp;
-	}
-	return NULL;
-}
-
-void __init of_fill_in_cpu_data(void)
+static void *of_iterate_over_cpus(void *(*func)(struct device_node *, int, int), int arg)
 {
 	struct device_node *dp;
 	const char *mid_prop;
 
-	if (tlb_type == hypervisor)
-		return;
-
 	mid_prop = get_mid_prop();
-	ncpus_probed = 0;
 	for_each_node_by_type(dp, "cpu") {
 		int cpuid = of_getintprop_default(dp, mid_prop, -1);
 		const char *this_mid_prop = mid_prop;
-		struct device_node *portid_parent;
-		int portid = -1;
+		void *ret;
 
-		portid_parent = NULL;
 		if (cpuid < 0) {
 			this_mid_prop = "cpuid";
 			cpuid = of_getintprop_default(dp, this_mid_prop, -1);
-			if (cpuid >= 0) {
-				int limit = 2;
-
-				portid_parent = dp;
-				while (limit--) {
-					portid_parent = portid_parent->parent;
-					if (!portid_parent)
-						break;
-					portid = of_getintprop_default(portid_parent,
-								       "portid", -1);
-					if (portid >= 0)
-						break;
-				}
-			}
 		}
-
 		if (cpuid < 0) {
 			prom_printf("OF: Serious problem, cpu lacks "
 				    "%s property", this_mid_prop);
 			prom_halt();
 		}
-
-		ncpus_probed++;
-
 #ifdef CONFIG_SMP
 		if (cpuid >= NR_CPUS) {
 			printk(KERN_WARNING "Ignoring CPU %d which is "
@@ -450,79 +401,142 @@
 			       cpuid, NR_CPUS);
 			continue;
 		}
-#else
-		/* On uniprocessor we only want the values for the
-		 * real physical cpu the kernel booted onto, however
-		 * cpu_data() only has one entry at index 0.
-		 */
-		if (cpuid != real_hard_smp_processor_id())
-			continue;
-		cpuid = 0;
 #endif
+		ret = func(dp, cpuid, arg);
+		if (ret)
+			return ret;
+	}
+	return NULL;
+}
 
-		cpu_data(cpuid).clock_tick =
-			of_getintprop_default(dp, "clock-frequency", 0);
+static void *check_cpu_node(struct device_node *dp, int cpuid, int id)
+{
+	if (id == cpuid)
+		return dp;
+	return NULL;
+}
 
-		if (portid_parent) {
-			cpu_data(cpuid).dcache_size =
-				of_getintprop_default(dp, "l1-dcache-size",
-						      16 * 1024);
-			cpu_data(cpuid).dcache_line_size =
-				of_getintprop_default(dp, "l1-dcache-line-size",
-						      32);
-			cpu_data(cpuid).icache_size =
-				of_getintprop_default(dp, "l1-icache-size",
-						      8 * 1024);
-			cpu_data(cpuid).icache_line_size =
-				of_getintprop_default(dp, "l1-icache-line-size",
-						      32);
-			cpu_data(cpuid).ecache_size =
-				of_getintprop_default(dp, "l2-cache-size", 0);
-			cpu_data(cpuid).ecache_line_size =
-				of_getintprop_default(dp, "l2-cache-line-size", 0);
-			if (!cpu_data(cpuid).ecache_size ||
-			    !cpu_data(cpuid).ecache_line_size) {
-				cpu_data(cpuid).ecache_size =
-					of_getintprop_default(portid_parent,
-							      "l2-cache-size",
-							      (4 * 1024 * 1024));
-				cpu_data(cpuid).ecache_line_size =
-					of_getintprop_default(portid_parent,
-							      "l2-cache-line-size", 64);
-			}
+struct device_node *of_find_node_by_cpuid(int cpuid)
+{
+	return of_iterate_over_cpus(check_cpu_node, cpuid);
+}
 
-			cpu_data(cpuid).core_id = portid + 1;
-			cpu_data(cpuid).proc_id = portid;
+static void *record_one_cpu(struct device_node *dp, int cpuid, int arg)
+{
+	ncpus_probed++;
 #ifdef CONFIG_SMP
-			sparc64_multi_core = 1;
+	set_cpu_present(cpuid, true);
+	set_cpu_possible(cpuid, true);
 #endif
-		} else {
-			cpu_data(cpuid).dcache_size =
-				of_getintprop_default(dp, "dcache-size", 16 * 1024);
-			cpu_data(cpuid).dcache_line_size =
-				of_getintprop_default(dp, "dcache-line-size", 32);
+	return NULL;
+}
 
-			cpu_data(cpuid).icache_size =
-				of_getintprop_default(dp, "icache-size", 16 * 1024);
-			cpu_data(cpuid).icache_line_size =
-				of_getintprop_default(dp, "icache-line-size", 32);
+void __init of_populate_present_mask(void)
+{
+	if (tlb_type == hypervisor)
+		return;
 
+	ncpus_probed = 0;
+	of_iterate_over_cpus(record_one_cpu, 0);
+}
+
+static void *fill_in_one_cpu(struct device_node *dp, int cpuid, int arg)
+{
+	struct device_node *portid_parent = NULL;
+	int portid = -1;
+
+	if (of_find_property(dp, "cpuid", NULL)) {
+		int limit = 2;
+
+		portid_parent = dp;
+		while (limit--) {
+			portid_parent = portid_parent->parent;
+			if (!portid_parent)
+				break;
+			portid = of_getintprop_default(portid_parent,
+						       "portid", -1);
+			if (portid >= 0)
+				break;
+		}
+	}
+
+#ifndef CONFIG_SMP
+	/* On uniprocessor we only want the values for the
+	 * real physical cpu the kernel booted onto, however
+	 * cpu_data() only has one entry at index 0.
+	 */
+	if (cpuid != real_hard_smp_processor_id())
+		return NULL;
+	cpuid = 0;
+#endif
+
+	cpu_data(cpuid).clock_tick =
+		of_getintprop_default(dp, "clock-frequency", 0);
+
+	if (portid_parent) {
+		cpu_data(cpuid).dcache_size =
+			of_getintprop_default(dp, "l1-dcache-size",
+					      16 * 1024);
+		cpu_data(cpuid).dcache_line_size =
+			of_getintprop_default(dp, "l1-dcache-line-size",
+					      32);
+		cpu_data(cpuid).icache_size =
+			of_getintprop_default(dp, "l1-icache-size",
+					      8 * 1024);
+		cpu_data(cpuid).icache_line_size =
+			of_getintprop_default(dp, "l1-icache-line-size",
+					      32);
+		cpu_data(cpuid).ecache_size =
+			of_getintprop_default(dp, "l2-cache-size", 0);
+		cpu_data(cpuid).ecache_line_size =
+			of_getintprop_default(dp, "l2-cache-line-size", 0);
+		if (!cpu_data(cpuid).ecache_size ||
+		    !cpu_data(cpuid).ecache_line_size) {
 			cpu_data(cpuid).ecache_size =
-				of_getintprop_default(dp, "ecache-size",
+				of_getintprop_default(portid_parent,
+						      "l2-cache-size",
 						      (4 * 1024 * 1024));
 			cpu_data(cpuid).ecache_line_size =
-				of_getintprop_default(dp, "ecache-line-size", 64);
-
-			cpu_data(cpuid).core_id = 0;
-			cpu_data(cpuid).proc_id = -1;
+				of_getintprop_default(portid_parent,
+						      "l2-cache-line-size", 64);
 		}
 
+		cpu_data(cpuid).core_id = portid + 1;
+		cpu_data(cpuid).proc_id = portid;
 #ifdef CONFIG_SMP
-		set_cpu_present(cpuid, true);
-		set_cpu_possible(cpuid, true);
+		sparc64_multi_core = 1;
 #endif
+	} else {
+		cpu_data(cpuid).dcache_size =
+			of_getintprop_default(dp, "dcache-size", 16 * 1024);
+		cpu_data(cpuid).dcache_line_size =
+			of_getintprop_default(dp, "dcache-line-size", 32);
+
+		cpu_data(cpuid).icache_size =
+			of_getintprop_default(dp, "icache-size", 16 * 1024);
+		cpu_data(cpuid).icache_line_size =
+			of_getintprop_default(dp, "icache-line-size", 32);
+
+		cpu_data(cpuid).ecache_size =
+			of_getintprop_default(dp, "ecache-size",
+					      (4 * 1024 * 1024));
+		cpu_data(cpuid).ecache_line_size =
+			of_getintprop_default(dp, "ecache-line-size", 64);
+
+		cpu_data(cpuid).core_id = 0;
+		cpu_data(cpuid).proc_id = -1;
 	}
 
+	return NULL;
+}
+
+void __init of_fill_in_cpu_data(void)
+{
+	if (tlb_type == hypervisor)
+		return;
+
+	of_iterate_over_cpus(fill_in_one_cpu, 0);
+
 	smp_fill_in_sib_core_maps();
 }
 
diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c
index ff7b591..0fb5789 100644
--- a/arch/sparc/kernel/prom_common.c
+++ b/arch/sparc/kernel/prom_common.c
@@ -313,6 +313,4 @@
 
 	printk("PROM: Built device tree with %u bytes of memory.\n",
 	       prom_early_allocated);
-
-	of_fill_in_cpu_data();
 }
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index f7642e5a..fa44eaf 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -20,7 +20,8 @@
 #include <linux/cache.h>
 #include <linux/jiffies.h>
 #include <linux/profile.h>
-#include <linux/lmb.h>
+#include <linux/bootmem.h>
+#include <linux/vmalloc.h>
 #include <linux/cpu.h>
 
 #include <asm/head.h>
@@ -47,6 +48,8 @@
 #include <asm/ldc.h>
 #include <asm/hypervisor.h>
 
+#include "cpumap.h"
+
 int sparc64_multi_core __read_mostly;
 
 DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
@@ -278,7 +281,7 @@
 	return kern_base + (val - KERNBASE);
 }
 
-static void __cpuinit ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg)
+static void __cpuinit ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg, void **descrp)
 {
 	extern unsigned long sparc64_ttable_tl0;
 	extern unsigned long kern_locked_tte_data;
@@ -298,12 +301,12 @@
 		       "hvtramp_descr.\n");
 		return;
 	}
+	*descrp = hdesc;
 
 	hdesc->cpu = cpu;
 	hdesc->num_mappings = num_kernel_image_mappings;
 
 	tb = &trap_block[cpu];
-	tb->hdesc = hdesc;
 
 	hdesc->fault_info_va = (unsigned long) &tb->fault_info;
 	hdesc->fault_info_pa = kimage_addr_to_ra(&tb->fault_info);
@@ -341,12 +344,12 @@
 
 static int __cpuinit smp_boot_one_cpu(unsigned int cpu)
 {
-	struct trap_per_cpu *tb = &trap_block[cpu];
 	unsigned long entry =
 		(unsigned long)(&sparc64_cpu_startup);
 	unsigned long cookie =
 		(unsigned long)(&cpu_new_thread);
 	struct task_struct *p;
+	void *descr = NULL;
 	int timeout, ret;
 
 	p = fork_idle(cpu);
@@ -359,7 +362,8 @@
 #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
 		if (ldom_domaining_enabled)
 			ldom_startcpu_cpuid(cpu,
-					    (unsigned long) cpu_new_thread);
+					    (unsigned long) cpu_new_thread,
+					    &descr);
 		else
 #endif
 			prom_startcpu_cpuid(cpu, entry, cookie);
@@ -383,10 +387,7 @@
 	}
 	cpu_new_thread = NULL;
 
-	if (tb->hdesc) {
-		kfree(tb->hdesc);
-		tb->hdesc = NULL;
-	}
+	kfree(descr);
 
 	return ret;
 }
@@ -1315,6 +1316,8 @@
 	cpu_clear(cpu, cpu_online_map);
 	ipi_call_unlock();
 
+	cpu_map_rebuild();
+
 	return 0;
 }
 
@@ -1373,36 +1376,171 @@
 {
 }
 
-unsigned long __per_cpu_base __read_mostly;
-unsigned long __per_cpu_shift __read_mostly;
-
-EXPORT_SYMBOL(__per_cpu_base);
-EXPORT_SYMBOL(__per_cpu_shift);
-
-void __init real_setup_per_cpu_areas(void)
+/**
+ * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu
+ * @cpu: cpu to allocate for
+ * @size: size allocation in bytes
+ * @align: alignment
+ *
+ * Allocate @size bytes aligned at @align for cpu @cpu.  This wrapper
+ * does the right thing for NUMA regardless of the current
+ * configuration.
+ *
+ * RETURNS:
+ * Pointer to the allocated area on success, NULL on failure.
+ */
+static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
+					unsigned long align)
 {
-	unsigned long paddr, goal, size, i;
-	char *ptr;
+	const unsigned long goal = __pa(MAX_DMA_ADDRESS);
+#ifdef CONFIG_NEED_MULTIPLE_NODES
+	int node = cpu_to_node(cpu);
+	void *ptr;
 
-	/* Copy section for each CPU (we discard the original) */
-	goal = PERCPU_ENOUGH_ROOM;
+	if (!node_online(node) || !NODE_DATA(node)) {
+		ptr = __alloc_bootmem(size, align, goal);
+		pr_info("cpu %d has no node %d or node-local memory\n",
+			cpu, node);
+		pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
+			 cpu, size, __pa(ptr));
+	} else {
+		ptr = __alloc_bootmem_node(NODE_DATA(node),
+					   size, align, goal);
+		pr_debug("per cpu data for cpu%d %lu bytes on node%d at "
+			 "%016lx\n", cpu, size, node, __pa(ptr));
+	}
+	return ptr;
+#else
+	return __alloc_bootmem(size, align, goal);
+#endif
+}
 
-	__per_cpu_shift = PAGE_SHIFT;
-	for (size = PAGE_SIZE; size < goal; size <<= 1UL)
-		__per_cpu_shift++;
+static size_t pcpur_size __initdata;
+static void **pcpur_ptrs __initdata;
 
-	paddr = lmb_alloc(size * NR_CPUS, PAGE_SIZE);
-	if (!paddr) {
-		prom_printf("Cannot allocate per-cpu memory.\n");
-		prom_halt();
+static struct page * __init pcpur_get_page(unsigned int cpu, int pageno)
+{
+	size_t off = (size_t)pageno << PAGE_SHIFT;
+
+	if (off >= pcpur_size)
+		return NULL;
+
+	return virt_to_page(pcpur_ptrs[cpu] + off);
+}
+
+#define PCPU_CHUNK_SIZE (4UL * 1024UL * 1024UL)
+
+static void __init pcpu_map_range(unsigned long start, unsigned long end,
+				  struct page *page)
+{
+	unsigned long pfn = page_to_pfn(page);
+	unsigned long pte_base;
+
+	BUG_ON((pfn<<PAGE_SHIFT)&(PCPU_CHUNK_SIZE - 1UL));
+
+	pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4U |
+		    _PAGE_CP_4U | _PAGE_CV_4U |
+		    _PAGE_P_4U | _PAGE_W_4U);
+	if (tlb_type == hypervisor)
+		pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4V |
+			    _PAGE_CP_4V | _PAGE_CV_4V |
+			    _PAGE_P_4V | _PAGE_W_4V);
+
+	while (start < end) {
+		pgd_t *pgd = pgd_offset_k(start);
+		unsigned long this_end;
+		pud_t *pud;
+		pmd_t *pmd;
+		pte_t *pte;
+
+		pud = pud_offset(pgd, start);
+		if (pud_none(*pud)) {
+			pmd_t *new;
+
+			new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
+			pud_populate(&init_mm, pud, new);
+		}
+
+		pmd = pmd_offset(pud, start);
+		if (!pmd_present(*pmd)) {
+			pte_t *new;
+
+			new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
+			pmd_populate_kernel(&init_mm, pmd, new);
+		}
+
+		pte = pte_offset_kernel(pmd, start);
+		this_end = (start + PMD_SIZE) & PMD_MASK;
+		if (this_end > end)
+			this_end = end;
+
+		while (start < this_end) {
+			unsigned long paddr = pfn << PAGE_SHIFT;
+
+			pte_val(*pte) = (paddr | pte_base);
+
+			start += PAGE_SIZE;
+			pte++;
+			pfn++;
+		}
+	}
+}
+
+void __init setup_per_cpu_areas(void)
+{
+	size_t dyn_size, static_size = __per_cpu_end - __per_cpu_start;
+	static struct vm_struct vm;
+	unsigned long delta, cpu;
+	size_t pcpu_unit_size;
+	size_t ptrs_size;
+
+	pcpur_size = PFN_ALIGN(static_size + PERCPU_MODULE_RESERVE +
+			       PERCPU_DYNAMIC_RESERVE);
+	dyn_size = pcpur_size - static_size - PERCPU_MODULE_RESERVE;
+
+
+	ptrs_size = PFN_ALIGN(num_possible_cpus() * sizeof(pcpur_ptrs[0]));
+	pcpur_ptrs = alloc_bootmem(ptrs_size);
+
+	for_each_possible_cpu(cpu) {
+		pcpur_ptrs[cpu] = pcpu_alloc_bootmem(cpu, PCPU_CHUNK_SIZE,
+						     PCPU_CHUNK_SIZE);
+
+		free_bootmem(__pa(pcpur_ptrs[cpu] + pcpur_size),
+			     PCPU_CHUNK_SIZE - pcpur_size);
+
+		memcpy(pcpur_ptrs[cpu], __per_cpu_load, static_size);
 	}
 
-	ptr = __va(paddr);
-	__per_cpu_base = ptr - __per_cpu_start;
+	/* allocate address and map */
+	vm.flags = VM_ALLOC;
+	vm.size = num_possible_cpus() * PCPU_CHUNK_SIZE;
+	vm_area_register_early(&vm, PCPU_CHUNK_SIZE);
 
-	for (i = 0; i < NR_CPUS; i++, ptr += size)
-		memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
+	for_each_possible_cpu(cpu) {
+		unsigned long start = (unsigned long) vm.addr;
+		unsigned long end;
+
+		start += cpu * PCPU_CHUNK_SIZE;
+		end = start + PCPU_CHUNK_SIZE;
+		pcpu_map_range(start, end, virt_to_page(pcpur_ptrs[cpu]));
+	}
+
+	pcpu_unit_size = pcpu_setup_first_chunk(pcpur_get_page, static_size,
+						PERCPU_MODULE_RESERVE, dyn_size,
+						PCPU_CHUNK_SIZE, vm.addr, NULL);
+
+	free_bootmem(__pa(pcpur_ptrs), ptrs_size);
+
+	delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
+	for_each_possible_cpu(cpu) {
+		__per_cpu_offset(cpu) = delta + cpu * pcpu_unit_size;
+	}
 
 	/* Setup %g5 for the boot cpu.  */
 	__local_per_cpu_offset = __per_cpu_offset(smp_processor_id());
+
+	of_fill_in_cpu_data();
+	if (tlb_type == hypervisor)
+		mdesc_fill_in_cpu_data(cpu_all_mask);
 }
diff --git a/arch/sparc/kernel/systbls_32.S b/arch/sparc/kernel/systbls_32.S
index 00ec3b1..6909016 100644
--- a/arch/sparc/kernel/systbls_32.S
+++ b/arch/sparc/kernel/systbls_32.S
@@ -81,4 +81,6 @@
 /*305*/	.long sys_set_mempolicy, sys_kexec_load, sys_move_pages, sys_getcpu, sys_epoll_pwait
 /*310*/	.long sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate
 /*315*/	.long sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1
-/*320*/	.long sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv, sys_pwritev
+/*320*/	.long sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv
+/*325*/	.long sys_pwritev, sys_rt_tgsigqueueinfo
+
diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S
index 82b5bf8..6b3ee88 100644
--- a/arch/sparc/kernel/systbls_64.S
+++ b/arch/sparc/kernel/systbls_64.S
@@ -82,7 +82,8 @@
 	.word compat_sys_set_mempolicy, compat_sys_kexec_load, compat_sys_move_pages, sys_getcpu, compat_sys_epoll_pwait
 /*310*/	.word compat_sys_utimensat, compat_sys_signalfd, sys_timerfd_create, sys_eventfd, compat_sys_fallocate
 	.word compat_sys_timerfd_settime, compat_sys_timerfd_gettime, compat_sys_signalfd4, sys_eventfd2, sys_epoll_create1
-/*320*/	.word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, compat_sys_preadv, compat_sys_pwritev
+/*320*/	.word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, compat_sys_preadv
+	.word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo
 
 #endif /* CONFIG_COMPAT */
 
@@ -156,4 +157,5 @@
 	.word sys_set_mempolicy, sys_kexec_load, sys_move_pages, sys_getcpu, sys_epoll_pwait
 /*310*/	.word sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate
 	.word sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1
-/*320*/	.word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv, sys_pwritev
+/*320*/	.word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv
+	.word sys_pwritev, sys_rt_tgsigqueueinfo
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
index d809c4e..10f7bb9 100644
--- a/arch/sparc/kernel/traps_64.c
+++ b/arch/sparc/kernel/traps_64.c
@@ -2509,6 +2509,7 @@
 }
 
 struct trap_per_cpu trap_block[NR_CPUS];
+EXPORT_SYMBOL(trap_block);
 
 /* This can get invoked before sched_init() so play it super safe
  * and use hard_smp_processor_id().
@@ -2530,84 +2531,97 @@
 void __init trap_init(void)
 {
 	/* Compile time sanity check. */
-	if (TI_TASK != offsetof(struct thread_info, task) ||
-	    TI_FLAGS != offsetof(struct thread_info, flags) ||
-	    TI_CPU != offsetof(struct thread_info, cpu) ||
-	    TI_FPSAVED != offsetof(struct thread_info, fpsaved) ||
-	    TI_KSP != offsetof(struct thread_info, ksp) ||
-	    TI_FAULT_ADDR != offsetof(struct thread_info, fault_address) ||
-	    TI_KREGS != offsetof(struct thread_info, kregs) ||
-	    TI_UTRAPS != offsetof(struct thread_info, utraps) ||
-	    TI_EXEC_DOMAIN != offsetof(struct thread_info, exec_domain) ||
-	    TI_REG_WINDOW != offsetof(struct thread_info, reg_window) ||
-	    TI_RWIN_SPTRS != offsetof(struct thread_info, rwbuf_stkptrs) ||
-	    TI_GSR != offsetof(struct thread_info, gsr) ||
-	    TI_XFSR != offsetof(struct thread_info, xfsr) ||
-	    TI_USER_CNTD0 != offsetof(struct thread_info, user_cntd0) ||
-	    TI_USER_CNTD1 != offsetof(struct thread_info, user_cntd1) ||
-	    TI_KERN_CNTD0 != offsetof(struct thread_info, kernel_cntd0) ||
-	    TI_KERN_CNTD1 != offsetof(struct thread_info, kernel_cntd1) ||
-	    TI_PCR != offsetof(struct thread_info, pcr_reg) ||
-	    TI_PRE_COUNT != offsetof(struct thread_info, preempt_count) ||
-	    TI_NEW_CHILD != offsetof(struct thread_info, new_child) ||
-	    TI_SYS_NOERROR != offsetof(struct thread_info, syscall_noerror) ||
-	    TI_RESTART_BLOCK != offsetof(struct thread_info, restart_block) ||
-	    TI_KUNA_REGS != offsetof(struct thread_info, kern_una_regs) ||
-	    TI_KUNA_INSN != offsetof(struct thread_info, kern_una_insn) ||
-	    TI_FPREGS != offsetof(struct thread_info, fpregs) ||
-	    (TI_FPREGS & (64 - 1)))
-		thread_info_offsets_are_bolixed_dave();
+	BUILD_BUG_ON(TI_TASK != offsetof(struct thread_info, task) ||
+		     TI_FLAGS != offsetof(struct thread_info, flags) ||
+		     TI_CPU != offsetof(struct thread_info, cpu) ||
+		     TI_FPSAVED != offsetof(struct thread_info, fpsaved) ||
+		     TI_KSP != offsetof(struct thread_info, ksp) ||
+		     TI_FAULT_ADDR != offsetof(struct thread_info,
+					       fault_address) ||
+		     TI_KREGS != offsetof(struct thread_info, kregs) ||
+		     TI_UTRAPS != offsetof(struct thread_info, utraps) ||
+		     TI_EXEC_DOMAIN != offsetof(struct thread_info,
+						exec_domain) ||
+		     TI_REG_WINDOW != offsetof(struct thread_info,
+					       reg_window) ||
+		     TI_RWIN_SPTRS != offsetof(struct thread_info,
+					       rwbuf_stkptrs) ||
+		     TI_GSR != offsetof(struct thread_info, gsr) ||
+		     TI_XFSR != offsetof(struct thread_info, xfsr) ||
+		     TI_USER_CNTD0 != offsetof(struct thread_info,
+					       user_cntd0) ||
+		     TI_USER_CNTD1 != offsetof(struct thread_info,
+					       user_cntd1) ||
+		     TI_KERN_CNTD0 != offsetof(struct thread_info,
+					       kernel_cntd0) ||
+		     TI_KERN_CNTD1 != offsetof(struct thread_info,
+					       kernel_cntd1) ||
+		     TI_PCR != offsetof(struct thread_info, pcr_reg) ||
+		     TI_PRE_COUNT != offsetof(struct thread_info,
+					      preempt_count) ||
+		     TI_NEW_CHILD != offsetof(struct thread_info, new_child) ||
+		     TI_SYS_NOERROR != offsetof(struct thread_info,
+						syscall_noerror) ||
+		     TI_RESTART_BLOCK != offsetof(struct thread_info,
+						  restart_block) ||
+		     TI_KUNA_REGS != offsetof(struct thread_info,
+					      kern_una_regs) ||
+		     TI_KUNA_INSN != offsetof(struct thread_info,
+					      kern_una_insn) ||
+		     TI_FPREGS != offsetof(struct thread_info, fpregs) ||
+		     (TI_FPREGS & (64 - 1)));
 
-	if (TRAP_PER_CPU_THREAD != offsetof(struct trap_per_cpu, thread) ||
-	    (TRAP_PER_CPU_PGD_PADDR !=
-	     offsetof(struct trap_per_cpu, pgd_paddr)) ||
-	    (TRAP_PER_CPU_CPU_MONDO_PA !=
-	     offsetof(struct trap_per_cpu, cpu_mondo_pa)) ||
-	    (TRAP_PER_CPU_DEV_MONDO_PA !=
-	     offsetof(struct trap_per_cpu, dev_mondo_pa)) ||
-	    (TRAP_PER_CPU_RESUM_MONDO_PA !=
-	     offsetof(struct trap_per_cpu, resum_mondo_pa)) ||
-	    (TRAP_PER_CPU_RESUM_KBUF_PA !=
-	     offsetof(struct trap_per_cpu, resum_kernel_buf_pa)) ||
-	    (TRAP_PER_CPU_NONRESUM_MONDO_PA !=
-	     offsetof(struct trap_per_cpu, nonresum_mondo_pa)) ||
-	    (TRAP_PER_CPU_NONRESUM_KBUF_PA !=
-	     offsetof(struct trap_per_cpu, nonresum_kernel_buf_pa)) ||
-	    (TRAP_PER_CPU_FAULT_INFO !=
-	     offsetof(struct trap_per_cpu, fault_info)) ||
-	    (TRAP_PER_CPU_CPU_MONDO_BLOCK_PA !=
-	     offsetof(struct trap_per_cpu, cpu_mondo_block_pa)) ||
-	    (TRAP_PER_CPU_CPU_LIST_PA !=
-	     offsetof(struct trap_per_cpu, cpu_list_pa)) ||
-	    (TRAP_PER_CPU_TSB_HUGE !=
-	     offsetof(struct trap_per_cpu, tsb_huge)) ||
-	    (TRAP_PER_CPU_TSB_HUGE_TEMP !=
-	     offsetof(struct trap_per_cpu, tsb_huge_temp)) ||
-	    (TRAP_PER_CPU_IRQ_WORKLIST_PA !=
-	     offsetof(struct trap_per_cpu, irq_worklist_pa)) ||
-	    (TRAP_PER_CPU_CPU_MONDO_QMASK !=
-	     offsetof(struct trap_per_cpu, cpu_mondo_qmask)) ||
-	    (TRAP_PER_CPU_DEV_MONDO_QMASK !=
-	     offsetof(struct trap_per_cpu, dev_mondo_qmask)) ||
-	    (TRAP_PER_CPU_RESUM_QMASK !=
-	     offsetof(struct trap_per_cpu, resum_qmask)) ||
-	    (TRAP_PER_CPU_NONRESUM_QMASK !=
-	     offsetof(struct trap_per_cpu, nonresum_qmask)))
-		trap_per_cpu_offsets_are_bolixed_dave();
+	BUILD_BUG_ON(TRAP_PER_CPU_THREAD != offsetof(struct trap_per_cpu,
+						     thread) ||
+		     (TRAP_PER_CPU_PGD_PADDR !=
+		      offsetof(struct trap_per_cpu, pgd_paddr)) ||
+		     (TRAP_PER_CPU_CPU_MONDO_PA !=
+		      offsetof(struct trap_per_cpu, cpu_mondo_pa)) ||
+		     (TRAP_PER_CPU_DEV_MONDO_PA !=
+		      offsetof(struct trap_per_cpu, dev_mondo_pa)) ||
+		     (TRAP_PER_CPU_RESUM_MONDO_PA !=
+		      offsetof(struct trap_per_cpu, resum_mondo_pa)) ||
+		     (TRAP_PER_CPU_RESUM_KBUF_PA !=
+		      offsetof(struct trap_per_cpu, resum_kernel_buf_pa)) ||
+		     (TRAP_PER_CPU_NONRESUM_MONDO_PA !=
+		      offsetof(struct trap_per_cpu, nonresum_mondo_pa)) ||
+		     (TRAP_PER_CPU_NONRESUM_KBUF_PA !=
+		      offsetof(struct trap_per_cpu, nonresum_kernel_buf_pa)) ||
+		     (TRAP_PER_CPU_FAULT_INFO !=
+		      offsetof(struct trap_per_cpu, fault_info)) ||
+		     (TRAP_PER_CPU_CPU_MONDO_BLOCK_PA !=
+		      offsetof(struct trap_per_cpu, cpu_mondo_block_pa)) ||
+		     (TRAP_PER_CPU_CPU_LIST_PA !=
+		      offsetof(struct trap_per_cpu, cpu_list_pa)) ||
+		     (TRAP_PER_CPU_TSB_HUGE !=
+		      offsetof(struct trap_per_cpu, tsb_huge)) ||
+		     (TRAP_PER_CPU_TSB_HUGE_TEMP !=
+		      offsetof(struct trap_per_cpu, tsb_huge_temp)) ||
+		     (TRAP_PER_CPU_IRQ_WORKLIST_PA !=
+		      offsetof(struct trap_per_cpu, irq_worklist_pa)) ||
+		     (TRAP_PER_CPU_CPU_MONDO_QMASK !=
+		      offsetof(struct trap_per_cpu, cpu_mondo_qmask)) ||
+		     (TRAP_PER_CPU_DEV_MONDO_QMASK !=
+		      offsetof(struct trap_per_cpu, dev_mondo_qmask)) ||
+		     (TRAP_PER_CPU_RESUM_QMASK !=
+		      offsetof(struct trap_per_cpu, resum_qmask)) ||
+		     (TRAP_PER_CPU_NONRESUM_QMASK !=
+		      offsetof(struct trap_per_cpu, nonresum_qmask)) ||
+		     (TRAP_PER_CPU_PER_CPU_BASE !=
+		      offsetof(struct trap_per_cpu, __per_cpu_base)));
 
-	if ((TSB_CONFIG_TSB !=
-	     offsetof(struct tsb_config, tsb)) ||
-	    (TSB_CONFIG_RSS_LIMIT !=
-	     offsetof(struct tsb_config, tsb_rss_limit)) ||
-	    (TSB_CONFIG_NENTRIES !=
-	     offsetof(struct tsb_config, tsb_nentries)) ||
-	    (TSB_CONFIG_REG_VAL !=
-	     offsetof(struct tsb_config, tsb_reg_val)) ||
-	    (TSB_CONFIG_MAP_VADDR !=
-	     offsetof(struct tsb_config, tsb_map_vaddr)) ||
-	    (TSB_CONFIG_MAP_PTE !=
-	     offsetof(struct tsb_config, tsb_map_pte)))
-		tsb_config_offsets_are_bolixed_dave();
+	BUILD_BUG_ON((TSB_CONFIG_TSB !=
+		      offsetof(struct tsb_config, tsb)) ||
+		     (TSB_CONFIG_RSS_LIMIT !=
+		      offsetof(struct tsb_config, tsb_rss_limit)) ||
+		     (TSB_CONFIG_NENTRIES !=
+		      offsetof(struct tsb_config, tsb_nentries)) ||
+		     (TSB_CONFIG_REG_VAL !=
+		      offsetof(struct tsb_config, tsb_reg_val)) ||
+		     (TSB_CONFIG_MAP_VADDR !=
+		      offsetof(struct tsb_config, tsb_map_vaddr)) ||
+		     (TSB_CONFIG_MAP_PTE !=
+		      offsetof(struct tsb_config, tsb_map_pte)));
 
 	/* Attach to the address space of init_task.  On SMP we
 	 * do this in smp.c:smp_callin for other cpus.
diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
index cbb282d..26bb391 100644
--- a/arch/sparc/mm/init_32.c
+++ b/arch/sparc/mm/init_32.c
@@ -358,6 +358,7 @@
 	protection_map[15] = PAGE_SHARED;
 	btfixup();
 	prom_build_devicetree();
+	of_fill_in_cpu_data();
 	device_scan();
 }
 
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index f26a352..ca92e2f 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -1679,11 +1679,6 @@
 static void sun4u_pgprot_init(void);
 static void sun4v_pgprot_init(void);
 
-/* Dummy function */
-void __init setup_per_cpu_areas(void)
-{
-}
-
 void __init paging_init(void)
 {
 	unsigned long end_pfn, shift, phys_base;
@@ -1799,16 +1794,13 @@
 	if (tlb_type == hypervisor)
 		sun4v_ktsb_register();
 
-	/* We must setup the per-cpu areas before we pull in the
-	 * PROM and the MDESC.  The code there fills in cpu and
-	 * other information into per-cpu data structures.
-	 */
-	real_setup_per_cpu_areas();
-
 	prom_build_devicetree();
+	of_populate_present_mask();
 
-	if (tlb_type == hypervisor)
+	if (tlb_type == hypervisor) {
 		sun4v_mdesc_init();
+		mdesc_populate_present_mask(cpu_all_mask);
+	}
 
 	/* Once the OF device tree and MDESC have been setup, we know
 	 * the list of possible cpus.  Therefore we can allocate the
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index 06c9a7d..ade4eb3 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -19,6 +19,7 @@
 #include <linux/fs.h>
 #include <linux/seq_file.h>
 #include <linux/kdebug.h>
+#include <linux/log2.h>
 
 #include <asm/bitext.h>
 #include <asm/page.h>
@@ -349,7 +350,7 @@
 		    vaddr, srmmu_nocache_end);
 		BUG();
 	}
-	if (size & (size-1)) {
+	if (!is_power_of_2(size)) {
 		printk("Size 0x%x is not a power of 2\n", size);
 		BUG();
 	}
diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c
index 434ba12..3b44b47 100644
--- a/arch/um/drivers/net_kern.c
+++ b/arch/um/drivers/net_kern.c
@@ -360,7 +360,7 @@
 
 static void net_device_release(struct device *dev)
 {
-	struct uml_net *device = dev->driver_data;
+	struct uml_net *device = dev_get_drvdata(dev);
 	struct net_device *netdev = device->dev;
 	struct uml_net_private *lp = netdev_priv(netdev);
 
@@ -440,7 +440,7 @@
 	device->pdev.id = n;
 	device->pdev.name = DRIVER_NAME;
 	device->pdev.dev.release = net_device_release;
-	device->pdev.dev.driver_data = device;
+	dev_set_drvdata(&device->pdev.dev, device);
 	if (platform_device_register(&device->pdev))
 		goto out_free_netdev;
 	SET_NETDEV_DEV(dev,&device->pdev.dev);
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
index aa9e926e..8f05d4d 100644
--- a/arch/um/drivers/ubd_kern.c
+++ b/arch/um/drivers/ubd_kern.c
@@ -778,7 +778,7 @@
 
 static void ubd_device_release(struct device *dev)
 {
-	struct ubd *ubd_dev = dev->driver_data;
+	struct ubd *ubd_dev = dev_get_drvdata(dev);
 
 	blk_cleanup_queue(ubd_dev->queue);
 	*ubd_dev = ((struct ubd) DEFAULT_UBD);
@@ -807,7 +807,7 @@
 		ubd_devs[unit].pdev.id   = unit;
 		ubd_devs[unit].pdev.name = DRIVER_NAME;
 		ubd_devs[unit].pdev.dev.release = ubd_device_release;
-		ubd_devs[unit].pdev.dev.driver_data = &ubd_devs[unit];
+		dev_set_drvdata(&ubd_devs[unit].pdev.dev, &ubd_devs[unit]);
 		platform_device_register(&ubd_devs[unit].pdev);
 		disk->driverfs_dev = &ubd_devs[unit].pdev.dev;
 	}
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 356d2ec..cf42fc3 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -46,6 +46,7 @@
 	select HAVE_KERNEL_GZIP
 	select HAVE_KERNEL_BZIP2
 	select HAVE_KERNEL_LZMA
+	select HAVE_ARCH_KMEMCHECK
 
 config OUTPUT_FORMAT
 	string
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index edbd0ca..1b68659 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -81,6 +81,11 @@
         endif
 endif
 
+# Don't unroll struct assignments with kmemcheck enabled
+ifeq ($(CONFIG_KMEMCHECK),y)
+	KBUILD_CFLAGS += $(call cc-option,-fno-builtin-memcpy)
+endif
+
 # Stackpointer is addressed different for 32 bit and 64 bit x86
 sp-$(CONFIG_X86_32) := esp
 sp-$(CONFIG_X86_64) := rsp
diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h
index f82fdc4..b93405b 100644
--- a/arch/x86/include/asm/dma-mapping.h
+++ b/arch/x86/include/asm/dma-mapping.h
@@ -6,6 +6,7 @@
  * Documentation/DMA-API.txt for documentation.
  */
 
+#include <linux/kmemcheck.h>
 #include <linux/scatterlist.h>
 #include <linux/dma-debug.h>
 #include <linux/dma-attrs.h>
@@ -60,6 +61,7 @@
 	dma_addr_t addr;
 
 	BUG_ON(!valid_dma_direction(dir));
+	kmemcheck_mark_initialized(ptr, size);
 	addr = ops->map_page(hwdev, virt_to_page(ptr),
 			     (unsigned long)ptr & ~PAGE_MASK, size,
 			     dir, NULL);
@@ -87,8 +89,12 @@
 {
 	struct dma_map_ops *ops = get_dma_ops(hwdev);
 	int ents;
+	struct scatterlist *s;
+	int i;
 
 	BUG_ON(!valid_dma_direction(dir));
+	for_each_sg(sg, s, nents, i)
+		kmemcheck_mark_initialized(sg_virt(s), s->length);
 	ents = ops->map_sg(hwdev, sg, nents, dir, NULL);
 	debug_dma_map_sg(hwdev, sg, nents, ents, dir);
 
@@ -200,6 +206,7 @@
 	dma_addr_t addr;
 
 	BUG_ON(!valid_dma_direction(dir));
+	kmemcheck_mark_initialized(page_address(page) + offset, size);
 	addr = ops->map_page(dev, page, offset, size, dir, NULL);
 	debug_dma_map_page(dev, page, offset, size, dir, addr, false);
 
diff --git a/arch/x86/include/asm/kmemcheck.h b/arch/x86/include/asm/kmemcheck.h
new file mode 100644
index 0000000..ed01518
--- /dev/null
+++ b/arch/x86/include/asm/kmemcheck.h
@@ -0,0 +1,42 @@
+#ifndef ASM_X86_KMEMCHECK_H
+#define ASM_X86_KMEMCHECK_H
+
+#include <linux/types.h>
+#include <asm/ptrace.h>
+
+#ifdef CONFIG_KMEMCHECK
+bool kmemcheck_active(struct pt_regs *regs);
+
+void kmemcheck_show(struct pt_regs *regs);
+void kmemcheck_hide(struct pt_regs *regs);
+
+bool kmemcheck_fault(struct pt_regs *regs,
+	unsigned long address, unsigned long error_code);
+bool kmemcheck_trap(struct pt_regs *regs);
+#else
+static inline bool kmemcheck_active(struct pt_regs *regs)
+{
+	return false;
+}
+
+static inline void kmemcheck_show(struct pt_regs *regs)
+{
+}
+
+static inline void kmemcheck_hide(struct pt_regs *regs)
+{
+}
+
+static inline bool kmemcheck_fault(struct pt_regs *regs,
+	unsigned long address, unsigned long error_code)
+{
+	return false;
+}
+
+static inline bool kmemcheck_trap(struct pt_regs *regs)
+{
+	return false;
+}
+#endif /* CONFIG_KMEMCHECK */
+
+#endif
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 18ef7eb..3cc06e3 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -317,6 +317,11 @@
 	return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE);
 }
 
+static inline int pte_hidden(pte_t pte)
+{
+	return pte_flags(pte) & _PAGE_HIDDEN;
+}
+
 static inline int pmd_present(pmd_t pmd)
 {
 	return pmd_flags(pmd) & _PAGE_PRESENT;
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index 4d258ad..54cb697 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -18,7 +18,7 @@
 #define _PAGE_BIT_GLOBAL	8	/* Global TLB entry PPro+ */
 #define _PAGE_BIT_UNUSED1	9	/* available for programmer */
 #define _PAGE_BIT_IOMAP		10	/* flag used to indicate IO mapping */
-#define _PAGE_BIT_UNUSED3	11
+#define _PAGE_BIT_HIDDEN	11	/* hidden by kmemcheck */
 #define _PAGE_BIT_PAT_LARGE	12	/* On 2MB or 1GB pages */
 #define _PAGE_BIT_SPECIAL	_PAGE_BIT_UNUSED1
 #define _PAGE_BIT_CPA_TEST	_PAGE_BIT_UNUSED1
@@ -41,13 +41,18 @@
 #define _PAGE_GLOBAL	(_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
 #define _PAGE_UNUSED1	(_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
 #define _PAGE_IOMAP	(_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
-#define _PAGE_UNUSED3	(_AT(pteval_t, 1) << _PAGE_BIT_UNUSED3)
 #define _PAGE_PAT	(_AT(pteval_t, 1) << _PAGE_BIT_PAT)
 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
 #define _PAGE_SPECIAL	(_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL)
 #define _PAGE_CPA_TEST	(_AT(pteval_t, 1) << _PAGE_BIT_CPA_TEST)
 #define __HAVE_ARCH_PTE_SPECIAL
 
+#ifdef CONFIG_KMEMCHECK
+#define _PAGE_HIDDEN	(_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
+#else
+#define _PAGE_HIDDEN	(_AT(pteval_t, 0))
+#endif
+
 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
 #define _PAGE_NX	(_AT(pteval_t, 1) << _PAGE_BIT_NX)
 #else
diff --git a/arch/x86/include/asm/string_32.h b/arch/x86/include/asm/string_32.h
index 0e0e3ba..c86f452 100644
--- a/arch/x86/include/asm/string_32.h
+++ b/arch/x86/include/asm/string_32.h
@@ -177,10 +177,18 @@
  *	No 3D Now!
  */
 
+#ifndef CONFIG_KMEMCHECK
 #define memcpy(t, f, n)				\
 	(__builtin_constant_p((n))		\
 	 ? __constant_memcpy((t), (f), (n))	\
 	 : __memcpy((t), (f), (n)))
+#else
+/*
+ * kmemcheck becomes very happy if we use the REP instructions unconditionally,
+ * because it means that we know both memory operands in advance.
+ */
+#define memcpy(t, f, n) __memcpy((t), (f), (n))
+#endif
 
 #endif
 
diff --git a/arch/x86/include/asm/string_64.h b/arch/x86/include/asm/string_64.h
index 2afe164..19e2c46 100644
--- a/arch/x86/include/asm/string_64.h
+++ b/arch/x86/include/asm/string_64.h
@@ -27,6 +27,7 @@
    function. */
 
 #define __HAVE_ARCH_MEMCPY 1
+#ifndef CONFIG_KMEMCHECK
 #if (__GNUC__ == 4 && __GNUC_MINOR__ >= 3) || __GNUC__ > 4
 extern void *memcpy(void *to, const void *from, size_t len);
 #else
@@ -42,6 +43,13 @@
 	__ret;							\
 })
 #endif
+#else
+/*
+ * kmemcheck becomes very happy if we use the REP instructions unconditionally,
+ * because it means that we know both memory operands in advance.
+ */
+#define memcpy(dst, src, len) __inline_memcpy((dst), (src), (len))
+#endif
 
 #define __HAVE_ARCH_MEMSET
 void *memset(void *s, int c, size_t n);
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index 602c769..b078352 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -154,9 +154,9 @@
 
 /* thread information allocation */
 #ifdef CONFIG_DEBUG_STACK_USAGE
-#define THREAD_FLAGS (GFP_KERNEL | __GFP_ZERO)
+#define THREAD_FLAGS (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
 #else
-#define THREAD_FLAGS GFP_KERNEL
+#define THREAD_FLAGS (GFP_KERNEL | __GFP_NOTRACK)
 #endif
 
 #define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
diff --git a/arch/x86/include/asm/xor.h b/arch/x86/include/asm/xor.h
index 11b3bb8..7fcf6f3 100644
--- a/arch/x86/include/asm/xor.h
+++ b/arch/x86/include/asm/xor.h
@@ -1,5 +1,10 @@
+#ifdef CONFIG_KMEMCHECK
+/* kmemcheck doesn't handle MMX/SSE/SSE2 instructions */
+# include <asm-generic/xor.h>
+#else
 #ifdef CONFIG_X86_32
 # include "xor_32.h"
 #else
 # include "xor_64.h"
 #endif
+#endif
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index daed39b..3260ab0 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -86,6 +86,29 @@
 	 */
 	if (c->x86 == 6 && c->x86_model < 15)
 		clear_cpu_cap(c, X86_FEATURE_PAT);
+
+#ifdef CONFIG_KMEMCHECK
+	/*
+	 * P4s have a "fast strings" feature which causes single-
+	 * stepping REP instructions to only generate a #DB on
+	 * cache-line boundaries.
+	 *
+	 * Ingo Molnar reported a Pentium D (model 6) and a Xeon
+	 * (model 2) with the same problem.
+	 */
+	if (c->x86 == 15) {
+		u64 misc_enable;
+
+		rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
+
+		if (misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING) {
+			printk(KERN_INFO "kmemcheck: Disabling fast string operations\n");
+
+			misc_enable &= ~MSR_IA32_MISC_ENABLE_FAST_STRING;
+			wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
+		}
+	}
+#endif
 }
 
 #ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
index 2ac1f0c..b07af88 100644
--- a/arch/x86/kernel/cpuid.c
+++ b/arch/x86/kernel/cpuid.c
@@ -182,6 +182,11 @@
 	.notifier_call = cpuid_class_cpu_callback,
 };
 
+static char *cpuid_nodename(struct device *dev)
+{
+	return kasprintf(GFP_KERNEL, "cpu/%u/cpuid", MINOR(dev->devt));
+}
+
 static int __init cpuid_init(void)
 {
 	int i, err = 0;
@@ -198,6 +203,7 @@
 		err = PTR_ERR(cpuid_class);
 		goto out_chrdev;
 	}
+	cpuid_class->nodename = cpuid_nodename;
 	for_each_online_cpu(i) {
 		err = cpuid_device_create(i);
 		if (err != 0)
diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
index 9c44615..9371448 100644
--- a/arch/x86/kernel/microcode_core.c
+++ b/arch/x86/kernel/microcode_core.c
@@ -236,6 +236,7 @@
 static struct miscdevice microcode_dev = {
 	.minor			= MICROCODE_MINOR,
 	.name			= "microcode",
+	.devnode		= "cpu/microcode",
 	.fops			= &microcode_fops,
 };
 
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
index 3cf3413..98fd6cd 100644
--- a/arch/x86/kernel/msr.c
+++ b/arch/x86/kernel/msr.c
@@ -196,6 +196,11 @@
 	.notifier_call = msr_class_cpu_callback,
 };
 
+static char *msr_nodename(struct device *dev)
+{
+	return kasprintf(GFP_KERNEL, "cpu/%u/msr", MINOR(dev->devt));
+}
+
 static int __init msr_init(void)
 {
 	int i, err = 0;
@@ -212,6 +217,7 @@
 		err = PTR_ERR(msr_class);
 		goto out_chrdev;
 	}
+	msr_class->nodename = msr_nodename;
 	for_each_online_cpu(i) {
 		err = msr_device_create(i);
 		if (err != 0)
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 3bb2be1..994dd6a 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -63,7 +63,7 @@
         task_xstate_cachep =
         	kmem_cache_create("task_xstate", xstate_size,
 				  __alignof__(union thread_xstate),
-				  SLAB_PANIC, NULL);
+				  SLAB_PANIC | SLAB_NOTRACK, NULL);
 }
 
 /*
diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c
index 4aaf7e4..c3eb207 100644
--- a/arch/x86/kernel/stacktrace.c
+++ b/arch/x86/kernel/stacktrace.c
@@ -77,6 +77,13 @@
 }
 EXPORT_SYMBOL_GPL(save_stack_trace);
 
+void save_stack_trace_bp(struct stack_trace *trace, unsigned long bp)
+{
+	dump_trace(current, NULL, NULL, bp, &save_stack_ops, trace);
+	if (trace->nr_entries < trace->max_entries)
+		trace->entries[trace->nr_entries++] = ULONG_MAX;
+}
+
 void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
 {
 	dump_trace(tsk, NULL, NULL, 0, &save_stack_ops_nosched, trace);
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 1e1e27b..5f935f0 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -45,6 +45,7 @@
 #include <linux/edac.h>
 #endif
 
+#include <asm/kmemcheck.h>
 #include <asm/stacktrace.h>
 #include <asm/processor.h>
 #include <asm/debugreg.h>
@@ -534,6 +535,10 @@
 
 	get_debugreg(condition, 6);
 
+	/* Catch kmemcheck conditions first of all! */
+	if (condition & DR_STEP && kmemcheck_trap(regs))
+		return;
+
 	/*
 	 * The processor cleared BTF, so don't mark that we need it set.
 	 */
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
index fdd30d0..eefdeee 100644
--- a/arch/x86/mm/Makefile
+++ b/arch/x86/mm/Makefile
@@ -10,6 +10,8 @@
 
 obj-$(CONFIG_HIGHMEM)		+= highmem_32.o
 
+obj-$(CONFIG_KMEMCHECK)		+= kmemcheck/
+
 obj-$(CONFIG_MMIOTRACE)		+= mmiotrace.o
 mmiotrace-y			:= kmmio.o pf_in.o mmio-mod.o
 obj-$(CONFIG_MMIOTRACE_TEST)	+= testmmiotrace.o
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index c6acc63..baa0e86 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -14,6 +14,7 @@
 
 #include <asm/traps.h>			/* dotraplinkage, ...		*/
 #include <asm/pgalloc.h>		/* pgd_*(), ...			*/
+#include <asm/kmemcheck.h>		/* kmemcheck_*(), ...		*/
 
 /*
  * Page fault error code bits:
@@ -956,6 +957,13 @@
 	/* Get the faulting address: */
 	address = read_cr2();
 
+	/*
+	 * Detect and handle instructions that would cause a page fault for
+	 * both a tracked kernel page and a userspace page.
+	 */
+	if (kmemcheck_active(regs))
+		kmemcheck_hide(regs);
+
 	if (unlikely(kmmio_fault(regs, address)))
 		return;
 
@@ -973,9 +981,13 @@
 	 * protection error (error_code & 9) == 0.
 	 */
 	if (unlikely(fault_in_kernel_space(address))) {
-		if (!(error_code & (PF_RSVD|PF_USER|PF_PROT)) &&
-		    vmalloc_fault(address) >= 0)
-			return;
+		if (!(error_code & (PF_RSVD | PF_USER | PF_PROT))) {
+			if (vmalloc_fault(address) >= 0)
+				return;
+
+			if (kmemcheck_fault(regs, address, error_code))
+				return;
+		}
 
 		/* Can handle a stale RO->RW TLB: */
 		if (spurious_fault(error_code, address))
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 34c1bfb..f53b57e 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -213,7 +213,7 @@
 	if (!after_bootmem)
 		init_gbpages();
 
-#ifdef CONFIG_DEBUG_PAGEALLOC
+#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KMEMCHECK)
 	/*
 	 * For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages.
 	 * This will simplify cpa(), which otherwise needs to support splitting
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 9ff3c08..3cd7711 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -111,7 +111,7 @@
 		pte_t *page_table = NULL;
 
 		if (after_bootmem) {
-#ifdef CONFIG_DEBUG_PAGEALLOC
+#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KMEMCHECK)
 			page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
 #endif
 			if (!page_table)
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 52bb951..9c54329 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -104,7 +104,7 @@
 	void *ptr;
 
 	if (after_bootmem)
-		ptr = (void *) get_zeroed_page(GFP_ATOMIC);
+		ptr = (void *) get_zeroed_page(GFP_ATOMIC | __GFP_NOTRACK);
 	else
 		ptr = alloc_bootmem_pages(PAGE_SIZE);
 
@@ -281,7 +281,7 @@
 	void *adr;
 
 	if (after_bootmem) {
-		adr = (void *)get_zeroed_page(GFP_ATOMIC);
+		adr = (void *)get_zeroed_page(GFP_ATOMIC | __GFP_NOTRACK);
 		*phys = __pa(adr);
 
 		return adr;
diff --git a/arch/x86/mm/kmemcheck/Makefile b/arch/x86/mm/kmemcheck/Makefile
new file mode 100644
index 0000000..520b3bc
--- /dev/null
+++ b/arch/x86/mm/kmemcheck/Makefile
@@ -0,0 +1 @@
+obj-y := error.o kmemcheck.o opcode.o pte.o selftest.o shadow.o
diff --git a/arch/x86/mm/kmemcheck/error.c b/arch/x86/mm/kmemcheck/error.c
new file mode 100644
index 0000000..4901d0d
--- /dev/null
+++ b/arch/x86/mm/kmemcheck/error.c
@@ -0,0 +1,228 @@
+#include <linux/interrupt.h>
+#include <linux/kdebug.h>
+#include <linux/kmemcheck.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/stacktrace.h>
+#include <linux/string.h>
+
+#include "error.h"
+#include "shadow.h"
+
+enum kmemcheck_error_type {
+	KMEMCHECK_ERROR_INVALID_ACCESS,
+	KMEMCHECK_ERROR_BUG,
+};
+
+#define SHADOW_COPY_SIZE (1 << CONFIG_KMEMCHECK_SHADOW_COPY_SHIFT)
+
+struct kmemcheck_error {
+	enum kmemcheck_error_type type;
+
+	union {
+		/* KMEMCHECK_ERROR_INVALID_ACCESS */
+		struct {
+			/* Kind of access that caused the error */
+			enum kmemcheck_shadow state;
+			/* Address and size of the erroneous read */
+			unsigned long	address;
+			unsigned int	size;
+		};
+	};
+
+	struct pt_regs		regs;
+	struct stack_trace	trace;
+	unsigned long		trace_entries[32];
+
+	/* We compress it to a char. */
+	unsigned char		shadow_copy[SHADOW_COPY_SIZE];
+	unsigned char		memory_copy[SHADOW_COPY_SIZE];
+};
+
+/*
+ * Create a ring queue of errors to output. We can't call printk() directly
+ * from the kmemcheck traps, since this may call the console drivers and
+ * result in a recursive fault.
+ */
+static struct kmemcheck_error error_fifo[CONFIG_KMEMCHECK_QUEUE_SIZE];
+static unsigned int error_count;
+static unsigned int error_rd;
+static unsigned int error_wr;
+static unsigned int error_missed_count;
+
+static struct kmemcheck_error *error_next_wr(void)
+{
+	struct kmemcheck_error *e;
+
+	if (error_count == ARRAY_SIZE(error_fifo)) {
+		++error_missed_count;
+		return NULL;
+	}
+
+	e = &error_fifo[error_wr];
+	if (++error_wr == ARRAY_SIZE(error_fifo))
+		error_wr = 0;
+	++error_count;
+	return e;
+}
+
+static struct kmemcheck_error *error_next_rd(void)
+{
+	struct kmemcheck_error *e;
+
+	if (error_count == 0)
+		return NULL;
+
+	e = &error_fifo[error_rd];
+	if (++error_rd == ARRAY_SIZE(error_fifo))
+		error_rd = 0;
+	--error_count;
+	return e;
+}
+
+void kmemcheck_error_recall(void)
+{
+	static const char *desc[] = {
+		[KMEMCHECK_SHADOW_UNALLOCATED]		= "unallocated",
+		[KMEMCHECK_SHADOW_UNINITIALIZED]	= "uninitialized",
+		[KMEMCHECK_SHADOW_INITIALIZED]		= "initialized",
+		[KMEMCHECK_SHADOW_FREED]		= "freed",
+	};
+
+	static const char short_desc[] = {
+		[KMEMCHECK_SHADOW_UNALLOCATED]		= 'a',
+		[KMEMCHECK_SHADOW_UNINITIALIZED]	= 'u',
+		[KMEMCHECK_SHADOW_INITIALIZED]		= 'i',
+		[KMEMCHECK_SHADOW_FREED]		= 'f',
+	};
+
+	struct kmemcheck_error *e;
+	unsigned int i;
+
+	e = error_next_rd();
+	if (!e)
+		return;
+
+	switch (e->type) {
+	case KMEMCHECK_ERROR_INVALID_ACCESS:
+		printk(KERN_ERR  "WARNING: kmemcheck: Caught %d-bit read "
+			"from %s memory (%p)\n",
+			8 * e->size, e->state < ARRAY_SIZE(desc) ?
+				desc[e->state] : "(invalid shadow state)",
+			(void *) e->address);
+
+		printk(KERN_INFO);
+		for (i = 0; i < SHADOW_COPY_SIZE; ++i)
+			printk("%02x", e->memory_copy[i]);
+		printk("\n");
+
+		printk(KERN_INFO);
+		for (i = 0; i < SHADOW_COPY_SIZE; ++i) {
+			if (e->shadow_copy[i] < ARRAY_SIZE(short_desc))
+				printk(" %c", short_desc[e->shadow_copy[i]]);
+			else
+				printk(" ?");
+		}
+		printk("\n");
+		printk(KERN_INFO "%*c\n", 2 + 2
+			* (int) (e->address & (SHADOW_COPY_SIZE - 1)), '^');
+		break;
+	case KMEMCHECK_ERROR_BUG:
+		printk(KERN_EMERG "ERROR: kmemcheck: Fatal error\n");
+		break;
+	}
+
+	__show_regs(&e->regs, 1);
+	print_stack_trace(&e->trace, 0);
+}
+
+static void do_wakeup(unsigned long data)
+{
+	while (error_count > 0)
+		kmemcheck_error_recall();
+
+	if (error_missed_count > 0) {
+		printk(KERN_WARNING "kmemcheck: Lost %d error reports because "
+			"the queue was too small\n", error_missed_count);
+		error_missed_count = 0;
+	}
+}
+
+static DECLARE_TASKLET(kmemcheck_tasklet, &do_wakeup, 0);
+
+/*
+ * Save the context of an error report.
+ */
+void kmemcheck_error_save(enum kmemcheck_shadow state,
+	unsigned long address, unsigned int size, struct pt_regs *regs)
+{
+	static unsigned long prev_ip;
+
+	struct kmemcheck_error *e;
+	void *shadow_copy;
+	void *memory_copy;
+
+	/* Don't report several adjacent errors from the same EIP. */
+	if (regs->ip == prev_ip)
+		return;
+	prev_ip = regs->ip;
+
+	e = error_next_wr();
+	if (!e)
+		return;
+
+	e->type = KMEMCHECK_ERROR_INVALID_ACCESS;
+
+	e->state = state;
+	e->address = address;
+	e->size = size;
+
+	/* Save regs */
+	memcpy(&e->regs, regs, sizeof(*regs));
+
+	/* Save stack trace */
+	e->trace.nr_entries = 0;
+	e->trace.entries = e->trace_entries;
+	e->trace.max_entries = ARRAY_SIZE(e->trace_entries);
+	e->trace.skip = 0;
+	save_stack_trace_bp(&e->trace, regs->bp);
+
+	/* Round address down to nearest 16 bytes */
+	shadow_copy = kmemcheck_shadow_lookup(address
+		& ~(SHADOW_COPY_SIZE - 1));
+	BUG_ON(!shadow_copy);
+
+	memcpy(e->shadow_copy, shadow_copy, SHADOW_COPY_SIZE);
+
+	kmemcheck_show_addr(address);
+	memory_copy = (void *) (address & ~(SHADOW_COPY_SIZE - 1));
+	memcpy(e->memory_copy, memory_copy, SHADOW_COPY_SIZE);
+	kmemcheck_hide_addr(address);
+
+	tasklet_hi_schedule_first(&kmemcheck_tasklet);
+}
+
+/*
+ * Save the context of a kmemcheck bug.
+ */
+void kmemcheck_error_save_bug(struct pt_regs *regs)
+{
+	struct kmemcheck_error *e;
+
+	e = error_next_wr();
+	if (!e)
+		return;
+
+	e->type = KMEMCHECK_ERROR_BUG;
+
+	memcpy(&e->regs, regs, sizeof(*regs));
+
+	e->trace.nr_entries = 0;
+	e->trace.entries = e->trace_entries;
+	e->trace.max_entries = ARRAY_SIZE(e->trace_entries);
+	e->trace.skip = 1;
+	save_stack_trace(&e->trace);
+
+	tasklet_hi_schedule_first(&kmemcheck_tasklet);
+}
diff --git a/arch/x86/mm/kmemcheck/error.h b/arch/x86/mm/kmemcheck/error.h
new file mode 100644
index 0000000..0efc2e8
--- /dev/null
+++ b/arch/x86/mm/kmemcheck/error.h
@@ -0,0 +1,15 @@
+#ifndef ARCH__X86__MM__KMEMCHECK__ERROR_H
+#define ARCH__X86__MM__KMEMCHECK__ERROR_H
+
+#include <linux/ptrace.h>
+
+#include "shadow.h"
+
+void kmemcheck_error_save(enum kmemcheck_shadow state,
+	unsigned long address, unsigned int size, struct pt_regs *regs);
+
+void kmemcheck_error_save_bug(struct pt_regs *regs);
+
+void kmemcheck_error_recall(void);
+
+#endif
diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
new file mode 100644
index 0000000..2c55ed0
--- /dev/null
+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
@@ -0,0 +1,640 @@
+/**
+ * kmemcheck - a heavyweight memory checker for the linux kernel
+ * Copyright (C) 2007, 2008  Vegard Nossum <vegardno@ifi.uio.no>
+ * (With a lot of help from Ingo Molnar and Pekka Enberg.)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2) as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kallsyms.h>
+#include <linux/kernel.h>
+#include <linux/kmemcheck.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/page-flags.h>
+#include <linux/percpu.h>
+#include <linux/ptrace.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include <asm/cacheflush.h>
+#include <asm/kmemcheck.h>
+#include <asm/pgtable.h>
+#include <asm/tlbflush.h>
+
+#include "error.h"
+#include "opcode.h"
+#include "pte.h"
+#include "selftest.h"
+#include "shadow.h"
+
+
+#ifdef CONFIG_KMEMCHECK_DISABLED_BY_DEFAULT
+#  define KMEMCHECK_ENABLED 0
+#endif
+
+#ifdef CONFIG_KMEMCHECK_ENABLED_BY_DEFAULT
+#  define KMEMCHECK_ENABLED 1
+#endif
+
+#ifdef CONFIG_KMEMCHECK_ONESHOT_BY_DEFAULT
+#  define KMEMCHECK_ENABLED 2
+#endif
+
+int kmemcheck_enabled = KMEMCHECK_ENABLED;
+
+int __init kmemcheck_init(void)
+{
+#ifdef CONFIG_SMP
+	/*
+	 * Limit SMP to use a single CPU. We rely on the fact that this code
+	 * runs before SMP is set up.
+	 */
+	if (setup_max_cpus > 1) {
+		printk(KERN_INFO
+			"kmemcheck: Limiting number of CPUs to 1.\n");
+		setup_max_cpus = 1;
+	}
+#endif
+
+	if (!kmemcheck_selftest()) {
+		printk(KERN_INFO "kmemcheck: self-tests failed; disabling\n");
+		kmemcheck_enabled = 0;
+		return -EINVAL;
+	}
+
+	printk(KERN_INFO "kmemcheck: Initialized\n");
+	return 0;
+}
+
+early_initcall(kmemcheck_init);
+
+/*
+ * We need to parse the kmemcheck= option before any memory is allocated.
+ */
+static int __init param_kmemcheck(char *str)
+{
+	if (!str)
+		return -EINVAL;
+
+	sscanf(str, "%d", &kmemcheck_enabled);
+	return 0;
+}
+
+early_param("kmemcheck", param_kmemcheck);
+
+int kmemcheck_show_addr(unsigned long address)
+{
+	pte_t *pte;
+
+	pte = kmemcheck_pte_lookup(address);
+	if (!pte)
+		return 0;
+
+	set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT));
+	__flush_tlb_one(address);
+	return 1;
+}
+
+int kmemcheck_hide_addr(unsigned long address)
+{
+	pte_t *pte;
+
+	pte = kmemcheck_pte_lookup(address);
+	if (!pte)
+		return 0;
+
+	set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_PRESENT));
+	__flush_tlb_one(address);
+	return 1;
+}
+
+struct kmemcheck_context {
+	bool busy;
+	int balance;
+
+	/*
+	 * There can be at most two memory operands to an instruction, but
+	 * each address can cross a page boundary -- so we may need up to
+	 * four addresses that must be hidden/revealed for each fault.
+	 */
+	unsigned long addr[4];
+	unsigned long n_addrs;
+	unsigned long flags;
+
+	/* Data size of the instruction that caused a fault. */
+	unsigned int size;
+};
+
+static DEFINE_PER_CPU(struct kmemcheck_context, kmemcheck_context);
+
+bool kmemcheck_active(struct pt_regs *regs)
+{
+	struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context);
+
+	return data->balance > 0;
+}
+
+/* Save an address that needs to be shown/hidden */
+static void kmemcheck_save_addr(unsigned long addr)
+{
+	struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context);
+
+	BUG_ON(data->n_addrs >= ARRAY_SIZE(data->addr));
+	data->addr[data->n_addrs++] = addr;
+}
+
+static unsigned int kmemcheck_show_all(void)
+{
+	struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context);
+	unsigned int i;
+	unsigned int n;
+
+	n = 0;
+	for (i = 0; i < data->n_addrs; ++i)
+		n += kmemcheck_show_addr(data->addr[i]);
+
+	return n;
+}
+
+static unsigned int kmemcheck_hide_all(void)
+{
+	struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context);
+	unsigned int i;
+	unsigned int n;
+
+	n = 0;
+	for (i = 0; i < data->n_addrs; ++i)
+		n += kmemcheck_hide_addr(data->addr[i]);
+
+	return n;
+}
+
+/*
+ * Called from the #PF handler.
+ */
+void kmemcheck_show(struct pt_regs *regs)
+{
+	struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context);
+
+	BUG_ON(!irqs_disabled());
+
+	if (unlikely(data->balance != 0)) {
+		kmemcheck_show_all();
+		kmemcheck_error_save_bug(regs);
+		data->balance = 0;
+		return;
+	}
+
+	/*
+	 * None of the addresses actually belonged to kmemcheck. Note that
+	 * this is not an error.
+	 */
+	if (kmemcheck_show_all() == 0)
+		return;
+
+	++data->balance;
+
+	/*
+	 * The IF needs to be cleared as well, so that the faulting
+	 * instruction can run "uninterrupted". Otherwise, we might take
+	 * an interrupt and start executing that before we've had a chance
+	 * to hide the page again.
+	 *
+	 * NOTE: In the rare case of multiple faults, we must not override
+	 * the original flags:
+	 */
+	if (!(regs->flags & X86_EFLAGS_TF))
+		data->flags = regs->flags;
+
+	regs->flags |= X86_EFLAGS_TF;
+	regs->flags &= ~X86_EFLAGS_IF;
+}
+
+/*
+ * Called from the #DB handler.
+ */
+void kmemcheck_hide(struct pt_regs *regs)
+{
+	struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context);
+	int n;
+
+	BUG_ON(!irqs_disabled());
+
+	if (data->balance == 0)
+		return;
+
+	if (unlikely(data->balance != 1)) {
+		kmemcheck_show_all();
+		kmemcheck_error_save_bug(regs);
+		data->n_addrs = 0;
+		data->balance = 0;
+
+		if (!(data->flags & X86_EFLAGS_TF))
+			regs->flags &= ~X86_EFLAGS_TF;
+		if (data->flags & X86_EFLAGS_IF)
+			regs->flags |= X86_EFLAGS_IF;
+		return;
+	}
+
+	if (kmemcheck_enabled)
+		n = kmemcheck_hide_all();
+	else
+		n = kmemcheck_show_all();
+
+	if (n == 0)
+		return;
+
+	--data->balance;
+
+	data->n_addrs = 0;
+
+	if (!(data->flags & X86_EFLAGS_TF))
+		regs->flags &= ~X86_EFLAGS_TF;
+	if (data->flags & X86_EFLAGS_IF)
+		regs->flags |= X86_EFLAGS_IF;
+}
+
+void kmemcheck_show_pages(struct page *p, unsigned int n)
+{
+	unsigned int i;
+
+	for (i = 0; i < n; ++i) {
+		unsigned long address;
+		pte_t *pte;
+		unsigned int level;
+
+		address = (unsigned long) page_address(&p[i]);
+		pte = lookup_address(address, &level);
+		BUG_ON(!pte);
+		BUG_ON(level != PG_LEVEL_4K);
+
+		set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT));
+		set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_HIDDEN));
+		__flush_tlb_one(address);
+	}
+}
+
+bool kmemcheck_page_is_tracked(struct page *p)
+{
+	/* This will also check the "hidden" flag of the PTE. */
+	return kmemcheck_pte_lookup((unsigned long) page_address(p));
+}
+
+void kmemcheck_hide_pages(struct page *p, unsigned int n)
+{
+	unsigned int i;
+
+	for (i = 0; i < n; ++i) {
+		unsigned long address;
+		pte_t *pte;
+		unsigned int level;
+
+		address = (unsigned long) page_address(&p[i]);
+		pte = lookup_address(address, &level);
+		BUG_ON(!pte);
+		BUG_ON(level != PG_LEVEL_4K);
+
+		set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_PRESENT));
+		set_pte(pte, __pte(pte_val(*pte) | _PAGE_HIDDEN));
+		__flush_tlb_one(address);
+	}
+}
+
+/* Access may NOT cross page boundary */
+static void kmemcheck_read_strict(struct pt_regs *regs,
+	unsigned long addr, unsigned int size)
+{
+	void *shadow;
+	enum kmemcheck_shadow status;
+
+	shadow = kmemcheck_shadow_lookup(addr);
+	if (!shadow)
+		return;
+
+	kmemcheck_save_addr(addr);
+	status = kmemcheck_shadow_test(shadow, size);
+	if (status == KMEMCHECK_SHADOW_INITIALIZED)
+		return;
+
+	if (kmemcheck_enabled)
+		kmemcheck_error_save(status, addr, size, regs);
+
+	if (kmemcheck_enabled == 2)
+		kmemcheck_enabled = 0;
+
+	/* Don't warn about it again. */
+	kmemcheck_shadow_set(shadow, size);
+}
+
+/* Access may cross page boundary */
+static void kmemcheck_read(struct pt_regs *regs,
+	unsigned long addr, unsigned int size)
+{
+	unsigned long page = addr & PAGE_MASK;
+	unsigned long next_addr = addr + size - 1;
+	unsigned long next_page = next_addr & PAGE_MASK;
+
+	if (likely(page == next_page)) {
+		kmemcheck_read_strict(regs, addr, size);
+		return;
+	}
+
+	/*
+	 * What we do is basically to split the access across the
+	 * two pages and handle each part separately. Yes, this means
+	 * that we may now see reads that are 3 + 5 bytes, for
+	 * example (and if both are uninitialized, there will be two
+	 * reports), but it makes the code a lot simpler.
+	 */
+	kmemcheck_read_strict(regs, addr, next_page - addr);
+	kmemcheck_read_strict(regs, next_page, next_addr - next_page);
+}
+
+static void kmemcheck_write_strict(struct pt_regs *regs,
+	unsigned long addr, unsigned int size)
+{
+	void *shadow;
+
+	shadow = kmemcheck_shadow_lookup(addr);
+	if (!shadow)
+		return;
+
+	kmemcheck_save_addr(addr);
+	kmemcheck_shadow_set(shadow, size);
+}
+
+static void kmemcheck_write(struct pt_regs *regs,
+	unsigned long addr, unsigned int size)
+{
+	unsigned long page = addr & PAGE_MASK;
+	unsigned long next_addr = addr + size - 1;
+	unsigned long next_page = next_addr & PAGE_MASK;
+
+	if (likely(page == next_page)) {
+		kmemcheck_write_strict(regs, addr, size);
+		return;
+	}
+
+	/* See comment in kmemcheck_read(). */
+	kmemcheck_write_strict(regs, addr, next_page - addr);
+	kmemcheck_write_strict(regs, next_page, next_addr - next_page);
+}
+
+/*
+ * Copying is hard. We have two addresses, each of which may be split across
+ * a page (and each page will have different shadow addresses).
+ */
+static void kmemcheck_copy(struct pt_regs *regs,
+	unsigned long src_addr, unsigned long dst_addr, unsigned int size)
+{
+	uint8_t shadow[8];
+	enum kmemcheck_shadow status;
+
+	unsigned long page;
+	unsigned long next_addr;
+	unsigned long next_page;
+
+	uint8_t *x;
+	unsigned int i;
+	unsigned int n;
+
+	BUG_ON(size > sizeof(shadow));
+
+	page = src_addr & PAGE_MASK;
+	next_addr = src_addr + size - 1;
+	next_page = next_addr & PAGE_MASK;
+
+	if (likely(page == next_page)) {
+		/* Same page */
+		x = kmemcheck_shadow_lookup(src_addr);
+		if (x) {
+			kmemcheck_save_addr(src_addr);
+			for (i = 0; i < size; ++i)
+				shadow[i] = x[i];
+		} else {
+			for (i = 0; i < size; ++i)
+				shadow[i] = KMEMCHECK_SHADOW_INITIALIZED;
+		}
+	} else {
+		n = next_page - src_addr;
+		BUG_ON(n > sizeof(shadow));
+
+		/* First page */
+		x = kmemcheck_shadow_lookup(src_addr);
+		if (x) {
+			kmemcheck_save_addr(src_addr);
+			for (i = 0; i < n; ++i)
+				shadow[i] = x[i];
+		} else {
+			/* Not tracked */
+			for (i = 0; i < n; ++i)
+				shadow[i] = KMEMCHECK_SHADOW_INITIALIZED;
+		}
+
+		/* Second page */
+		x = kmemcheck_shadow_lookup(next_page);
+		if (x) {
+			kmemcheck_save_addr(next_page);
+			for (i = n; i < size; ++i)
+				shadow[i] = x[i - n];
+		} else {
+			/* Not tracked */
+			for (i = n; i < size; ++i)
+				shadow[i] = KMEMCHECK_SHADOW_INITIALIZED;
+		}
+	}
+
+	page = dst_addr & PAGE_MASK;
+	next_addr = dst_addr + size - 1;
+	next_page = next_addr & PAGE_MASK;
+
+	if (likely(page == next_page)) {
+		/* Same page */
+		x = kmemcheck_shadow_lookup(dst_addr);
+		if (x) {
+			kmemcheck_save_addr(dst_addr);
+			for (i = 0; i < size; ++i) {
+				x[i] = shadow[i];
+				shadow[i] = KMEMCHECK_SHADOW_INITIALIZED;
+			}
+		}
+	} else {
+		n = next_page - dst_addr;
+		BUG_ON(n > sizeof(shadow));
+
+		/* First page */
+		x = kmemcheck_shadow_lookup(dst_addr);
+		if (x) {
+			kmemcheck_save_addr(dst_addr);
+			for (i = 0; i < n; ++i) {
+				x[i] = shadow[i];
+				shadow[i] = KMEMCHECK_SHADOW_INITIALIZED;
+			}
+		}
+
+		/* Second page */
+		x = kmemcheck_shadow_lookup(next_page);
+		if (x) {
+			kmemcheck_save_addr(next_page);
+			for (i = n; i < size; ++i) {
+				x[i - n] = shadow[i];
+				shadow[i] = KMEMCHECK_SHADOW_INITIALIZED;
+			}
+		}
+	}
+
+	status = kmemcheck_shadow_test(shadow, size);
+	if (status == KMEMCHECK_SHADOW_INITIALIZED)
+		return;
+
+	if (kmemcheck_enabled)
+		kmemcheck_error_save(status, src_addr, size, regs);
+
+	if (kmemcheck_enabled == 2)
+		kmemcheck_enabled = 0;
+}
+
+enum kmemcheck_method {
+	KMEMCHECK_READ,
+	KMEMCHECK_WRITE,
+};
+
+static void kmemcheck_access(struct pt_regs *regs,
+	unsigned long fallback_address, enum kmemcheck_method fallback_method)
+{
+	const uint8_t *insn;
+	const uint8_t *insn_primary;
+	unsigned int size;
+
+	struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context);
+
+	/* Recursive fault -- ouch. */
+	if (data->busy) {
+		kmemcheck_show_addr(fallback_address);
+		kmemcheck_error_save_bug(regs);
+		return;
+	}
+
+	data->busy = true;
+
+	insn = (const uint8_t *) regs->ip;
+	insn_primary = kmemcheck_opcode_get_primary(insn);
+
+	kmemcheck_opcode_decode(insn, &size);
+
+	switch (insn_primary[0]) {
+#ifdef CONFIG_KMEMCHECK_BITOPS_OK
+		/* AND, OR, XOR */
+		/*
+		 * Unfortunately, these instructions have to be excluded from
+		 * our regular checking since they access only some (and not
+		 * all) bits. This clears out "bogus" bitfield-access warnings.
+		 */
+	case 0x80:
+	case 0x81:
+	case 0x82:
+	case 0x83:
+		switch ((insn_primary[1] >> 3) & 7) {
+			/* OR */
+		case 1:
+			/* AND */
+		case 4:
+			/* XOR */
+		case 6:
+			kmemcheck_write(regs, fallback_address, size);
+			goto out;
+
+			/* ADD */
+		case 0:
+			/* ADC */
+		case 2:
+			/* SBB */
+		case 3:
+			/* SUB */
+		case 5:
+			/* CMP */
+		case 7:
+			break;
+		}
+		break;
+#endif
+
+		/* MOVS, MOVSB, MOVSW, MOVSD */
+	case 0xa4:
+	case 0xa5:
+		/*
+		 * These instructions are special because they take two
+		 * addresses, but we only get one page fault.
+		 */
+		kmemcheck_copy(regs, regs->si, regs->di, size);
+		goto out;
+
+		/* CMPS, CMPSB, CMPSW, CMPSD */
+	case 0xa6:
+	case 0xa7:
+		kmemcheck_read(regs, regs->si, size);
+		kmemcheck_read(regs, regs->di, size);
+		goto out;
+	}
+
+	/*
+	 * If the opcode isn't special in any way, we use the data from the
+	 * page fault handler to determine the address and type of memory
+	 * access.
+	 */
+	switch (fallback_method) {
+	case KMEMCHECK_READ:
+		kmemcheck_read(regs, fallback_address, size);
+		goto out;
+	case KMEMCHECK_WRITE:
+		kmemcheck_write(regs, fallback_address, size);
+		goto out;
+	}
+
+out:
+	data->busy = false;
+}
+
+bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
+	unsigned long error_code)
+{
+	pte_t *pte;
+
+	/*
+	 * XXX: Is it safe to assume that memory accesses from virtual 86
+	 * mode or non-kernel code segments will _never_ access kernel
+	 * memory (e.g. tracked pages)? For now, we need this to avoid
+	 * invoking kmemcheck for PnP BIOS calls.
+	 */
+	if (regs->flags & X86_VM_MASK)
+		return false;
+	if (regs->cs != __KERNEL_CS)
+		return false;
+
+	pte = kmemcheck_pte_lookup(address);
+	if (!pte)
+		return false;
+
+	if (error_code & 2)
+		kmemcheck_access(regs, address, KMEMCHECK_WRITE);
+	else
+		kmemcheck_access(regs, address, KMEMCHECK_READ);
+
+	kmemcheck_show(regs);
+	return true;
+}
+
+bool kmemcheck_trap(struct pt_regs *regs)
+{
+	if (!kmemcheck_active(regs))
+		return false;
+
+	/* We're done. */
+	kmemcheck_hide(regs);
+	return true;
+}
diff --git a/arch/x86/mm/kmemcheck/opcode.c b/arch/x86/mm/kmemcheck/opcode.c
new file mode 100644
index 0000000..63c19e2
--- /dev/null
+++ b/arch/x86/mm/kmemcheck/opcode.c
@@ -0,0 +1,106 @@
+#include <linux/types.h>
+
+#include "opcode.h"
+
+static bool opcode_is_prefix(uint8_t b)
+{
+	return
+		/* Group 1 */
+		b == 0xf0 || b == 0xf2 || b == 0xf3
+		/* Group 2 */
+		|| b == 0x2e || b == 0x36 || b == 0x3e || b == 0x26
+		|| b == 0x64 || b == 0x65 || b == 0x2e || b == 0x3e
+		/* Group 3 */
+		|| b == 0x66
+		/* Group 4 */
+		|| b == 0x67;
+}
+
+#ifdef CONFIG_X86_64
+static bool opcode_is_rex_prefix(uint8_t b)
+{
+	return (b & 0xf0) == 0x40;
+}
+#else
+static bool opcode_is_rex_prefix(uint8_t b)
+{
+	return false;
+}
+#endif
+
+#define REX_W (1 << 3)
+
+/*
+ * This is a VERY crude opcode decoder. We only need to find the size of the
+ * load/store that caused our #PF and this should work for all the opcodes
+ * that we care about. Moreover, the ones who invented this instruction set
+ * should be shot.
+ */
+void kmemcheck_opcode_decode(const uint8_t *op, unsigned int *size)
+{
+	/* Default operand size */
+	int operand_size_override = 4;
+
+	/* prefixes */
+	for (; opcode_is_prefix(*op); ++op) {
+		if (*op == 0x66)
+			operand_size_override = 2;
+	}
+
+	/* REX prefix */
+	if (opcode_is_rex_prefix(*op)) {
+		uint8_t rex = *op;
+
+		++op;
+		if (rex & REX_W) {
+			switch (*op) {
+			case 0x63:
+				*size = 4;
+				return;
+			case 0x0f:
+				++op;
+
+				switch (*op) {
+				case 0xb6:
+				case 0xbe:
+					*size = 1;
+					return;
+				case 0xb7:
+				case 0xbf:
+					*size = 2;
+					return;
+				}
+
+				break;
+			}
+
+			*size = 8;
+			return;
+		}
+	}
+
+	/* escape opcode */
+	if (*op == 0x0f) {
+		++op;
+
+		/*
+		 * This is move with zero-extend and sign-extend, respectively;
+		 * we don't have to think about 0xb6/0xbe, because this is
+		 * already handled in the conditional below.
+		 */
+		if (*op == 0xb7 || *op == 0xbf)
+			operand_size_override = 2;
+	}
+
+	*size = (*op & 1) ? operand_size_override : 1;
+}
+
+const uint8_t *kmemcheck_opcode_get_primary(const uint8_t *op)
+{
+	/* skip prefixes */
+	while (opcode_is_prefix(*op))
+		++op;
+	if (opcode_is_rex_prefix(*op))
+		++op;
+	return op;
+}
diff --git a/arch/x86/mm/kmemcheck/opcode.h b/arch/x86/mm/kmemcheck/opcode.h
new file mode 100644
index 0000000..6956aad
--- /dev/null
+++ b/arch/x86/mm/kmemcheck/opcode.h
@@ -0,0 +1,9 @@
+#ifndef ARCH__X86__MM__KMEMCHECK__OPCODE_H
+#define ARCH__X86__MM__KMEMCHECK__OPCODE_H
+
+#include <linux/types.h>
+
+void kmemcheck_opcode_decode(const uint8_t *op, unsigned int *size);
+const uint8_t *kmemcheck_opcode_get_primary(const uint8_t *op);
+
+#endif
diff --git a/arch/x86/mm/kmemcheck/pte.c b/arch/x86/mm/kmemcheck/pte.c
new file mode 100644
index 0000000..4ead26e
--- /dev/null
+++ b/arch/x86/mm/kmemcheck/pte.c
@@ -0,0 +1,22 @@
+#include <linux/mm.h>
+
+#include <asm/pgtable.h>
+
+#include "pte.h"
+
+pte_t *kmemcheck_pte_lookup(unsigned long address)
+{
+	pte_t *pte;
+	unsigned int level;
+
+	pte = lookup_address(address, &level);
+	if (!pte)
+		return NULL;
+	if (level != PG_LEVEL_4K)
+		return NULL;
+	if (!pte_hidden(*pte))
+		return NULL;
+
+	return pte;
+}
+
diff --git a/arch/x86/mm/kmemcheck/pte.h b/arch/x86/mm/kmemcheck/pte.h
new file mode 100644
index 0000000..9f59664
--- /dev/null
+++ b/arch/x86/mm/kmemcheck/pte.h
@@ -0,0 +1,10 @@
+#ifndef ARCH__X86__MM__KMEMCHECK__PTE_H
+#define ARCH__X86__MM__KMEMCHECK__PTE_H
+
+#include <linux/mm.h>
+
+#include <asm/pgtable.h>
+
+pte_t *kmemcheck_pte_lookup(unsigned long address);
+
+#endif
diff --git a/arch/x86/mm/kmemcheck/selftest.c b/arch/x86/mm/kmemcheck/selftest.c
new file mode 100644
index 0000000..036efbe
--- /dev/null
+++ b/arch/x86/mm/kmemcheck/selftest.c
@@ -0,0 +1,69 @@
+#include <linux/kernel.h>
+
+#include "opcode.h"
+#include "selftest.h"
+
+struct selftest_opcode {
+	unsigned int expected_size;
+	const uint8_t *insn;
+	const char *desc;
+};
+
+static const struct selftest_opcode selftest_opcodes[] = {
+	/* REP MOVS */
+	{1, "\xf3\xa4", 		"rep movsb <mem8>, <mem8>"},
+	{4, "\xf3\xa5",			"rep movsl <mem32>, <mem32>"},
+
+	/* MOVZX / MOVZXD */
+	{1, "\x66\x0f\xb6\x51\xf8",	"movzwq <mem8>, <reg16>"},
+	{1, "\x0f\xb6\x51\xf8",		"movzwq <mem8>, <reg32>"},
+
+	/* MOVSX / MOVSXD */
+	{1, "\x66\x0f\xbe\x51\xf8",	"movswq <mem8>, <reg16>"},
+	{1, "\x0f\xbe\x51\xf8",		"movswq <mem8>, <reg32>"},
+
+#ifdef CONFIG_X86_64
+	/* MOVZX / MOVZXD */
+	{1, "\x49\x0f\xb6\x51\xf8",	"movzbq <mem8>, <reg64>"},
+	{2, "\x49\x0f\xb7\x51\xf8",	"movzbq <mem16>, <reg64>"},
+
+	/* MOVSX / MOVSXD */
+	{1, "\x49\x0f\xbe\x51\xf8",	"movsbq <mem8>, <reg64>"},
+	{2, "\x49\x0f\xbf\x51\xf8",	"movsbq <mem16>, <reg64>"},
+	{4, "\x49\x63\x51\xf8",		"movslq <mem32>, <reg64>"},
+#endif
+};
+
+static bool selftest_opcode_one(const struct selftest_opcode *op)
+{
+	unsigned size;
+
+	kmemcheck_opcode_decode(op->insn, &size);
+
+	if (size == op->expected_size)
+		return true;
+
+	printk(KERN_WARNING "kmemcheck: opcode %s: expected size %d, got %d\n",
+		op->desc, op->expected_size, size);
+	return false;
+}
+
+static bool selftest_opcodes_all(void)
+{
+	bool pass = true;
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(selftest_opcodes); ++i)
+		pass = pass && selftest_opcode_one(&selftest_opcodes[i]);
+
+	return pass;
+}
+
+bool kmemcheck_selftest(void)
+{
+	bool pass = true;
+
+	pass = pass && selftest_opcodes_all();
+
+	return pass;
+}
diff --git a/arch/x86/mm/kmemcheck/selftest.h b/arch/x86/mm/kmemcheck/selftest.h
new file mode 100644
index 0000000..8fed4fe
--- /dev/null
+++ b/arch/x86/mm/kmemcheck/selftest.h
@@ -0,0 +1,6 @@
+#ifndef ARCH_X86_MM_KMEMCHECK_SELFTEST_H
+#define ARCH_X86_MM_KMEMCHECK_SELFTEST_H
+
+bool kmemcheck_selftest(void);
+
+#endif
diff --git a/arch/x86/mm/kmemcheck/shadow.c b/arch/x86/mm/kmemcheck/shadow.c
new file mode 100644
index 0000000..e773b6b
--- /dev/null
+++ b/arch/x86/mm/kmemcheck/shadow.c
@@ -0,0 +1,162 @@
+#include <linux/kmemcheck.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+
+#include <asm/page.h>
+#include <asm/pgtable.h>
+
+#include "pte.h"
+#include "shadow.h"
+
+/*
+ * Return the shadow address for the given address. Returns NULL if the
+ * address is not tracked.
+ *
+ * We need to be extremely careful not to follow any invalid pointers,
+ * because this function can be called for *any* possible address.
+ */
+void *kmemcheck_shadow_lookup(unsigned long address)
+{
+	pte_t *pte;
+	struct page *page;
+
+	if (!virt_addr_valid(address))
+		return NULL;
+
+	pte = kmemcheck_pte_lookup(address);
+	if (!pte)
+		return NULL;
+
+	page = virt_to_page(address);
+	if (!page->shadow)
+		return NULL;
+	return page->shadow + (address & (PAGE_SIZE - 1));
+}
+
+static void mark_shadow(void *address, unsigned int n,
+	enum kmemcheck_shadow status)
+{
+	unsigned long addr = (unsigned long) address;
+	unsigned long last_addr = addr + n - 1;
+	unsigned long page = addr & PAGE_MASK;
+	unsigned long last_page = last_addr & PAGE_MASK;
+	unsigned int first_n;
+	void *shadow;
+
+	/* If the memory range crosses a page boundary, stop there. */
+	if (page == last_page)
+		first_n = n;
+	else
+		first_n = page + PAGE_SIZE - addr;
+
+	shadow = kmemcheck_shadow_lookup(addr);
+	if (shadow)
+		memset(shadow, status, first_n);
+
+	addr += first_n;
+	n -= first_n;
+
+	/* Do full-page memset()s. */
+	while (n >= PAGE_SIZE) {
+		shadow = kmemcheck_shadow_lookup(addr);
+		if (shadow)
+			memset(shadow, status, PAGE_SIZE);
+
+		addr += PAGE_SIZE;
+		n -= PAGE_SIZE;
+	}
+
+	/* Do the remaining page, if any. */
+	if (n > 0) {
+		shadow = kmemcheck_shadow_lookup(addr);
+		if (shadow)
+			memset(shadow, status, n);
+	}
+}
+
+void kmemcheck_mark_unallocated(void *address, unsigned int n)
+{
+	mark_shadow(address, n, KMEMCHECK_SHADOW_UNALLOCATED);
+}
+
+void kmemcheck_mark_uninitialized(void *address, unsigned int n)
+{
+	mark_shadow(address, n, KMEMCHECK_SHADOW_UNINITIALIZED);
+}
+
+/*
+ * Fill the shadow memory of the given address such that the memory at that
+ * address is marked as being initialized.
+ */
+void kmemcheck_mark_initialized(void *address, unsigned int n)
+{
+	mark_shadow(address, n, KMEMCHECK_SHADOW_INITIALIZED);
+}
+EXPORT_SYMBOL_GPL(kmemcheck_mark_initialized);
+
+void kmemcheck_mark_freed(void *address, unsigned int n)
+{
+	mark_shadow(address, n, KMEMCHECK_SHADOW_FREED);
+}
+
+void kmemcheck_mark_unallocated_pages(struct page *p, unsigned int n)
+{
+	unsigned int i;
+
+	for (i = 0; i < n; ++i)
+		kmemcheck_mark_unallocated(page_address(&p[i]), PAGE_SIZE);
+}
+
+void kmemcheck_mark_uninitialized_pages(struct page *p, unsigned int n)
+{
+	unsigned int i;
+
+	for (i = 0; i < n; ++i)
+		kmemcheck_mark_uninitialized(page_address(&p[i]), PAGE_SIZE);
+}
+
+void kmemcheck_mark_initialized_pages(struct page *p, unsigned int n)
+{
+	unsigned int i;
+
+	for (i = 0; i < n; ++i)
+		kmemcheck_mark_initialized(page_address(&p[i]), PAGE_SIZE);
+}
+
+enum kmemcheck_shadow kmemcheck_shadow_test(void *shadow, unsigned int size)
+{
+	uint8_t *x;
+	unsigned int i;
+
+	x = shadow;
+
+#ifdef CONFIG_KMEMCHECK_PARTIAL_OK
+	/*
+	 * Make sure _some_ bytes are initialized. Gcc frequently generates
+	 * code to access neighboring bytes.
+	 */
+	for (i = 0; i < size; ++i) {
+		if (x[i] == KMEMCHECK_SHADOW_INITIALIZED)
+			return x[i];
+	}
+#else
+	/* All bytes must be initialized. */
+	for (i = 0; i < size; ++i) {
+		if (x[i] != KMEMCHECK_SHADOW_INITIALIZED)
+			return x[i];
+	}
+#endif
+
+	return x[0];
+}
+
+void kmemcheck_shadow_set(void *shadow, unsigned int size)
+{
+	uint8_t *x;
+	unsigned int i;
+
+	x = shadow;
+	for (i = 0; i < size; ++i)
+		x[i] = KMEMCHECK_SHADOW_INITIALIZED;
+}
diff --git a/arch/x86/mm/kmemcheck/shadow.h b/arch/x86/mm/kmemcheck/shadow.h
new file mode 100644
index 0000000..af46d9a
--- /dev/null
+++ b/arch/x86/mm/kmemcheck/shadow.h
@@ -0,0 +1,16 @@
+#ifndef ARCH__X86__MM__KMEMCHECK__SHADOW_H
+#define ARCH__X86__MM__KMEMCHECK__SHADOW_H
+
+enum kmemcheck_shadow {
+	KMEMCHECK_SHADOW_UNALLOCATED,
+	KMEMCHECK_SHADOW_UNINITIALIZED,
+	KMEMCHECK_SHADOW_INITIALIZED,
+	KMEMCHECK_SHADOW_FREED,
+};
+
+void *kmemcheck_shadow_lookup(unsigned long address);
+
+enum kmemcheck_shadow kmemcheck_shadow_test(void *shadow, unsigned int size);
+void kmemcheck_shadow_set(void *shadow, unsigned int size);
+
+#endif
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 6ce9518..3cfe9ce 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -470,7 +470,7 @@
 
 	if (!debug_pagealloc)
 		spin_unlock(&cpa_lock);
-	base = alloc_pages(GFP_KERNEL, 0);
+	base = alloc_pages(GFP_KERNEL | __GFP_NOTRACK, 0);
 	if (!debug_pagealloc)
 		spin_lock(&cpa_lock);
 	if (!base)
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 7aa03a5..8e43bdd 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -4,9 +4,11 @@
 #include <asm/tlb.h>
 #include <asm/fixmap.h>
 
+#define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO
+
 pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
 {
-	return (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
+	return (pte_t *)__get_free_page(PGALLOC_GFP);
 }
 
 pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
@@ -14,9 +16,9 @@
 	struct page *pte;
 
 #ifdef CONFIG_HIGHPTE
-	pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT|__GFP_ZERO, 0);
+	pte = alloc_pages(PGALLOC_GFP | __GFP_HIGHMEM, 0);
 #else
-	pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
+	pte = alloc_pages(PGALLOC_GFP, 0);
 #endif
 	if (pte)
 		pgtable_page_ctor(pte);
@@ -161,7 +163,7 @@
 	bool failed = false;
 
 	for(i = 0; i < PREALLOCATED_PMDS; i++) {
-		pmd_t *pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
+		pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
 		if (pmd == NULL)
 			failed = true;
 		pmds[i] = pmd;
@@ -228,7 +230,7 @@
 	pmd_t *pmds[PREALLOCATED_PMDS];
 	unsigned long flags;
 
-	pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
+	pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
 
 	if (pgd == NULL)
 		goto out;
diff --git a/block/blk-core.c b/block/blk-core.c
index f6452f6..b06cf5c 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -26,7 +26,6 @@
 #include <linux/swap.h>
 #include <linux/writeback.h>
 #include <linux/task_io_accounting_ops.h>
-#include <linux/blktrace_api.h>
 #include <linux/fault-inject.h>
 
 #define CREATE_TRACE_POINTS
@@ -498,6 +497,11 @@
 
 	q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug;
 	q->backing_dev_info.unplug_io_data = q;
+	q->backing_dev_info.ra_pages =
+			(VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
+	q->backing_dev_info.state = 0;
+	q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
+
 	err = bdi_init(&q->backing_dev_info);
 	if (err) {
 		kmem_cache_free(blk_requestq_cachep, q);
diff --git a/block/blk-settings.c b/block/blk-settings.c
index d71cedc..7541ea4 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -96,6 +96,31 @@
 EXPORT_SYMBOL_GPL(blk_queue_lld_busy);
 
 /**
+ * blk_set_default_limits - reset limits to default values
+ * @limits:  the queue_limits structure to reset
+ *
+ * Description:
+ *   Returns a queue_limit struct to its default state.  Can be used by
+ *   stacking drivers like DM that stage table swaps and reuse an
+ *   existing device queue.
+ */
+void blk_set_default_limits(struct queue_limits *lim)
+{
+	lim->max_phys_segments = MAX_PHYS_SEGMENTS;
+	lim->max_hw_segments = MAX_HW_SEGMENTS;
+	lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
+	lim->max_segment_size = MAX_SEGMENT_SIZE;
+	lim->max_sectors = lim->max_hw_sectors = SAFE_MAX_SECTORS;
+	lim->logical_block_size = lim->physical_block_size = lim->io_min = 512;
+	lim->bounce_pfn = BLK_BOUNCE_ANY;
+	lim->alignment_offset = 0;
+	lim->io_opt = 0;
+	lim->misaligned = 0;
+	lim->no_cluster = 0;
+}
+EXPORT_SYMBOL(blk_set_default_limits);
+
+/**
  * blk_queue_make_request - define an alternate make_request function for a device
  * @q:  the request queue for the device to be affected
  * @mfn: the alternate make_request function
@@ -123,18 +148,8 @@
 	 * set defaults
 	 */
 	q->nr_requests = BLKDEV_MAX_RQ;
-	blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS);
-	blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS);
-	blk_queue_segment_boundary(q, BLK_SEG_BOUNDARY_MASK);
-	blk_queue_max_segment_size(q, MAX_SEGMENT_SIZE);
 
 	q->make_request_fn = mfn;
-	q->backing_dev_info.ra_pages =
-			(VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
-	q->backing_dev_info.state = 0;
-	q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
-	blk_queue_max_sectors(q, SAFE_MAX_SECTORS);
-	blk_queue_logical_block_size(q, 512);
 	blk_queue_dma_alignment(q, 511);
 	blk_queue_congestion_threshold(q);
 	q->nr_batching = BLK_BATCH_REQ;
@@ -147,6 +162,8 @@
 	q->unplug_timer.function = blk_unplug_timeout;
 	q->unplug_timer.data = (unsigned long)q;
 
+	blk_set_default_limits(&q->limits);
+
 	/*
 	 * by default assume old behaviour and bounce for any highmem page
 	 */
diff --git a/block/bsg.c b/block/bsg.c
index 5358f9a..54106f0 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -1065,6 +1065,11 @@
 
 static struct cdev bsg_cdev;
 
+static char *bsg_nodename(struct device *dev)
+{
+	return kasprintf(GFP_KERNEL, "bsg/%s", dev_name(dev));
+}
+
 static int __init bsg_init(void)
 {
 	int ret, i;
@@ -1085,6 +1090,7 @@
 		ret = PTR_ERR(bsg_class);
 		goto destroy_kmemcache;
 	}
+	bsg_class->nodename = bsg_nodename;
 
 	ret = alloc_chrdev_region(&devid, 0, BSG_MAX_DEVS, "bsg");
 	if (ret)
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index ef2f72d..833ec18 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -122,7 +122,6 @@
 	struct cfq_queue *async_idle_cfqq;
 
 	sector_t last_position;
-	unsigned long last_end_request;
 
 	/*
 	 * tunables, see top of file
@@ -1253,7 +1252,7 @@
 
 	BUG_ON(cfqd->busy_queues);
 
-	cfq_log(cfqd, "forced_dispatch=%d\n", dispatched);
+	cfq_log(cfqd, "forced_dispatch=%d", dispatched);
 	return dispatched;
 }
 
@@ -2164,9 +2163,6 @@
 	if (cfq_cfqq_sync(cfqq))
 		cfqd->sync_flight--;
 
-	if (!cfq_class_idle(cfqq))
-		cfqd->last_end_request = now;
-
 	if (sync)
 		RQ_CIC(rq)->last_end_request = now;
 
@@ -2479,7 +2475,6 @@
 
 	INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
 
-	cfqd->last_end_request = jiffies;
 	cfqd->cfq_quantum = cfq_quantum;
 	cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
 	cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
diff --git a/block/genhd.c b/block/genhd.c
index fe7ccc0..f4c64c2 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -996,10 +996,20 @@
 	.name		= "block",
 };
 
+static char *block_nodename(struct device *dev)
+{
+	struct gendisk *disk = dev_to_disk(dev);
+
+	if (disk->nodename)
+		return disk->nodename(disk);
+	return NULL;
+}
+
 static struct device_type disk_type = {
 	.name		= "disk",
 	.groups		= disk_attr_groups,
 	.release	= disk_release,
+	.nodename	= block_nodename,
 };
 
 #ifdef CONFIG_PROC_FS
diff --git a/crypto/xor.c b/crypto/xor.c
index 996b6ee..fc5b836f 100644
--- a/crypto/xor.c
+++ b/crypto/xor.c
@@ -101,7 +101,12 @@
 	void *b1, *b2;
 	struct xor_block_template *f, *fastest;
 
-	b1 = (void *) __get_free_pages(GFP_KERNEL, 2);
+	/*
+	 * Note: Since the memory is not actually used for _anything_ but to
+	 * test the XOR speed, we don't really want kmemcheck to warn about
+	 * reading uninitialized bytes here.
+	 */
+	b1 = (void *) __get_free_pages(GFP_KERNEL | __GFP_NOTRACK, 2);
 	if (!b1) {
 		printk(KERN_WARNING "xor: Yikes!  No memory available.\n");
 		return -ENOMEM;
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
index 07e2013..0bba148 100644
--- a/drivers/acpi/acpica/acevents.h
+++ b/drivers/acpi/acpica/acevents.h
@@ -139,7 +139,7 @@
 acpi_status
 acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
 			       u32 function,
-			       acpi_physical_address address,
+			       u32 region_offset,
 			       u32 bit_width, acpi_integer * value);
 
 acpi_status
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index 16e5210..3d87362 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -362,9 +362,6 @@
 extern u8 acpi_gbl_abort_method;
 extern u8 acpi_gbl_db_terminate_threads;
 
-ACPI_EXTERN int optind;
-ACPI_EXTERN char *optarg;
-
 ACPI_EXTERN u8 acpi_gbl_db_opt_tables;
 ACPI_EXTERN u8 acpi_gbl_db_opt_stats;
 ACPI_EXTERN u8 acpi_gbl_db_opt_ini_methods;
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 2ec394a..ee986ed 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -205,6 +205,7 @@
 #define ANOBJ_METHOD_LOCAL              0x08	/* Node is a method local */
 #define ANOBJ_SUBTREE_HAS_INI           0x10	/* Used to optimize device initialization */
 #define ANOBJ_EVALUATED                 0x20	/* Set on first evaluation of node */
+#define ANOBJ_ALLOCATED_BUFFER          0x40	/* Method AML buffer is dynamic (install_method) */
 
 #define ANOBJ_IS_EXTERNAL               0x08	/* i_aSL only: This object created via External() */
 #define ANOBJ_METHOD_NO_RETVAL          0x10	/* i_aSL only: Method has no return value */
@@ -788,11 +789,14 @@
 /* For control registers, both ignored and reserved bits must be preserved */
 
 /*
- * The ACPI spec says to ignore PM1_CTL.SCI_EN (bit 0)
- * but we need to be able to write ACPI_BITREG_SCI_ENABLE directly
- * as a BIOS workaround on some machines.
+ * For PM1 control, the SCI enable bit (bit 0, SCI_EN) is defined by the
+ * ACPI specification to be a "preserved" bit - "OSPM always preserves this
+ * bit position", section 4.7.3.2.1. However, on some machines the OS must
+ * write a one to this bit after resume for the machine to work properly.
+ * To enable this, we no longer attempt to preserve this bit. No machines
+ * are known to fail if the bit is not preserved. (May 2009)
  */
-#define ACPI_PM1_CONTROL_IGNORED_BITS           0x0200	/* Bits 9 */
+#define ACPI_PM1_CONTROL_IGNORED_BITS           0x0200	/* Bit 9 */
 #define ACPI_PM1_CONTROL_RESERVED_BITS          0xC1F8	/* Bits 14-15, 3-8 */
 #define ACPI_PM1_CONTROL_PRESERVED_BITS \
 	       (ACPI_PM1_CONTROL_IGNORED_BITS | ACPI_PM1_CONTROL_RESERVED_BITS)
diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h
index 46cb5b4..94cdc2b 100644
--- a/drivers/acpi/acpica/acnamesp.h
+++ b/drivers/acpi/acpica/acnamesp.h
@@ -99,10 +99,19 @@
 		       acpi_walk_callback user_function,
 		       void *context, void **return_value);
 
-struct acpi_namespace_node *acpi_ns_get_next_node(acpi_object_type type, struct acpi_namespace_node
-						  *parent, struct acpi_namespace_node
+struct acpi_namespace_node *acpi_ns_get_next_node(struct acpi_namespace_node
+						  *parent,
+						  struct acpi_namespace_node
 						  *child);
 
+struct acpi_namespace_node *acpi_ns_get_next_node_typed(acpi_object_type type,
+							struct
+							acpi_namespace_node
+							*parent,
+							struct
+							acpi_namespace_node
+							*child);
+
 /*
  * nsparse - table parsing
  */
diff --git a/drivers/acpi/acpica/amlcode.h b/drivers/acpi/acpica/amlcode.h
index ff851c5..067f967 100644
--- a/drivers/acpi/acpica/amlcode.h
+++ b/drivers/acpi/acpica/amlcode.h
@@ -483,7 +483,7 @@
 
 #define AML_METHOD_ARG_COUNT        0x07
 #define AML_METHOD_SERIALIZED       0x08
-#define AML_METHOD_SYNCH_LEVEL      0xF0
+#define AML_METHOD_SYNC_LEVEL       0xF0
 
 /* METHOD_FLAGS_ARG_COUNT is not used internally, define additional flags */
 
diff --git a/drivers/acpi/acpica/dsobject.c b/drivers/acpi/acpica/dsobject.c
index dab3f48..02e6caa 100644
--- a/drivers/acpi/acpica/dsobject.c
+++ b/drivers/acpi/acpica/dsobject.c
@@ -734,7 +734,8 @@
 
 			/* Local ID (0-7) is (AML opcode - base AML_LOCAL_OP) */
 
-			obj_desc->reference.value = opcode - AML_LOCAL_OP;
+			obj_desc->reference.value =
+			    ((u32)opcode) - AML_LOCAL_OP;
 			obj_desc->reference.class = ACPI_REFCLASS_LOCAL;
 
 #ifndef ACPI_NO_METHOD_EXECUTION
@@ -754,7 +755,7 @@
 
 			/* Arg ID (0-6) is (AML opcode - base AML_ARG_OP) */
 
-			obj_desc->reference.value = opcode - AML_ARG_OP;
+			obj_desc->reference.value = ((u32)opcode) - AML_ARG_OP;
 			obj_desc->reference.class = ACPI_REFCLASS_ARG;
 
 #ifndef ACPI_NO_METHOD_EXECUTION
diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
index b4c87b5..584d766e 100644
--- a/drivers/acpi/acpica/dsopcode.c
+++ b/drivers/acpi/acpica/dsopcode.c
@@ -1386,14 +1386,19 @@
 
 	case AML_BREAK_POINT_OP:
 
-		/* Call up to the OS service layer to handle this */
+		/*
+		 * Set the single-step flag. This will cause the debugger (if present)
+		 * to break to the console within the AML debugger at the start of the
+		 * next AML instruction.
+		 */
+		ACPI_DEBUGGER_EXEC(acpi_gbl_cm_single_step = TRUE);
+		ACPI_DEBUGGER_EXEC(acpi_os_printf
+				   ("**break** Executed AML BreakPoint opcode\n"));
 
-		status =
-		    acpi_os_signal(ACPI_SIGNAL_BREAKPOINT,
-				   "Executed AML Breakpoint opcode");
+		/* Call to the OSL in case OS wants a piece of the action */
 
-		/* If and when it returns, all done. */
-
+		status = acpi_os_signal(ACPI_SIGNAL_BREAKPOINT,
+					"Executed AML Breakpoint opcode");
 		break;
 
 	case AML_BREAK_OP:
diff --git a/drivers/acpi/acpica/dswstate.c b/drivers/acpi/acpica/dswstate.c
index 40f92bf..e46c821 100644
--- a/drivers/acpi/acpica/dswstate.c
+++ b/drivers/acpi/acpica/dswstate.c
@@ -102,7 +102,7 @@
 	/* Return object of the top element and clean that top element result stack */
 
 	walk_state->result_count--;
-	index = walk_state->result_count % ACPI_RESULTS_FRAME_OBJ_NUM;
+	index = (u32)walk_state->result_count % ACPI_RESULTS_FRAME_OBJ_NUM;
 
 	*object = state->results.obj_desc[index];
 	if (!*object) {
@@ -186,7 +186,7 @@
 
 	/* Assign the address of object to the top free element of result stack */
 
-	index = walk_state->result_count % ACPI_RESULTS_FRAME_OBJ_NUM;
+	index = (u32)walk_state->result_count % ACPI_RESULTS_FRAME_OBJ_NUM;
 	state->results.obj_desc[index] = object;
 	walk_state->result_count++;
 
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
index 538d632..98c7f9c 100644
--- a/drivers/acpi/acpica/evregion.c
+++ b/drivers/acpi/acpica/evregion.c
@@ -275,7 +275,7 @@
  *
  * PARAMETERS:  region_obj          - Internal region object
  *              Function            - Read or Write operation
- *              Address             - Where in the space to read or write
+ *              region_offset       - Where in the region to read or write
  *              bit_width           - Field width in bits (8, 16, 32, or 64)
  *              Value               - Pointer to in or out value, must be
  *                                    full 64-bit acpi_integer
@@ -290,7 +290,7 @@
 acpi_status
 acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
 			       u32 function,
-			       acpi_physical_address address,
+			       u32 region_offset,
 			       u32 bit_width, acpi_integer * value)
 {
 	acpi_status status;
@@ -396,7 +396,8 @@
 	ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
 			  "Handler %p (@%p) Address %8.8X%8.8X [%s]\n",
 			  &region_obj->region.handler->address_space, handler,
-			  ACPI_FORMAT_NATIVE_UINT(address),
+			  ACPI_FORMAT_NATIVE_UINT(region_obj->region.address +
+						  region_offset),
 			  acpi_ut_get_region_name(region_obj->region.
 						  space_id)));
 
@@ -412,8 +413,9 @@
 
 	/* Call the handler */
 
-	status = handler(function, address, bit_width, value,
-			 handler_desc->address_space.context,
+	status = handler(function,
+			 (region_obj->region.address + region_offset),
+			 bit_width, value, handler_desc->address_space.context,
 			 region_obj2->extra.region_context);
 
 	if (ACPI_FAILURE(status)) {
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c
index d0a0807..4721f58 100644
--- a/drivers/acpi/acpica/evxfevnt.c
+++ b/drivers/acpi/acpica/evxfevnt.c
@@ -51,7 +51,7 @@
 ACPI_MODULE_NAME("evxfevnt")
 
 /* Local prototypes */
-acpi_status
+static acpi_status
 acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
 		       struct acpi_gpe_block_info *gpe_block, void *context);
 
@@ -785,7 +785,7 @@
  *              block device. NULL if the GPE is one of the FADT-defined GPEs.
  *
  ******************************************************************************/
-acpi_status
+static acpi_status
 acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
 		       struct acpi_gpe_block_info *gpe_block, void *context)
 {
diff --git a/drivers/acpi/acpica/exconfig.c b/drivers/acpi/acpica/exconfig.c
index 3deb20a..277fd60 100644
--- a/drivers/acpi/acpica/exconfig.c
+++ b/drivers/acpi/acpica/exconfig.c
@@ -47,6 +47,7 @@
 #include "acnamesp.h"
 #include "actables.h"
 #include "acdispat.h"
+#include "acevents.h"
 
 #define _COMPONENT          ACPI_EXECUTER
 ACPI_MODULE_NAME("exconfig")
@@ -57,6 +58,10 @@
 		  struct acpi_namespace_node *parent_node,
 		  union acpi_operand_object **ddb_handle);
 
+static acpi_status
+acpi_ex_region_read(union acpi_operand_object *obj_desc,
+		    u32 length, u8 *buffer);
+
 /*******************************************************************************
  *
  * FUNCTION:    acpi_ex_add_table
@@ -91,6 +96,7 @@
 
 	/* Init the table handle */
 
+	obj_desc->common.flags |= AOPOBJ_DATA_VALID;
 	obj_desc->reference.class = ACPI_REFCLASS_TABLE;
 	*ddb_handle = obj_desc;
 
@@ -229,6 +235,8 @@
 				       walk_state);
 		if (ACPI_FAILURE(status)) {
 			(void)acpi_ex_unload_table(ddb_handle);
+
+			acpi_ut_remove_reference(ddb_handle);
 			return_ACPI_STATUS(status);
 		}
 	}
@@ -254,6 +262,47 @@
 
 /*******************************************************************************
  *
+ * FUNCTION:    acpi_ex_region_read
+ *
+ * PARAMETERS:  obj_desc        - Region descriptor
+ *              Length          - Number of bytes to read
+ *              Buffer          - Pointer to where to put the data
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Read data from an operation region. The read starts from the
+ *              beginning of the region.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ex_region_read(union acpi_operand_object *obj_desc, u32 length, u8 *buffer)
+{
+	acpi_status status;
+	acpi_integer value;
+	u32 region_offset = 0;
+	u32 i;
+
+	/* Bytewise reads */
+
+	for (i = 0; i < length; i++) {
+		status = acpi_ev_address_space_dispatch(obj_desc, ACPI_READ,
+							region_offset, 8,
+							&value);
+		if (ACPI_FAILURE(status)) {
+			return status;
+		}
+
+		*buffer = (u8)value;
+		buffer++;
+		region_offset++;
+	}
+
+	return AE_OK;
+}
+
+/*******************************************************************************
+ *
  * FUNCTION:    acpi_ex_load_op
  *
  * PARAMETERS:  obj_desc        - Region or Buffer/Field where the table will be
@@ -314,18 +363,23 @@
 			}
 		}
 
-		/*
-		 * Map the table header and get the actual table length. The region
-		 * length is not guaranteed to be the same as the table length.
-		 */
-		table = acpi_os_map_memory(obj_desc->region.address,
-					   sizeof(struct acpi_table_header));
+		/* Get the table header first so we can get the table length */
+
+		table = ACPI_ALLOCATE(sizeof(struct acpi_table_header));
 		if (!table) {
 			return_ACPI_STATUS(AE_NO_MEMORY);
 		}
 
+		status =
+		    acpi_ex_region_read(obj_desc,
+					sizeof(struct acpi_table_header),
+					ACPI_CAST_PTR(u8, table));
 		length = table->length;
-		acpi_os_unmap_memory(table, sizeof(struct acpi_table_header));
+		ACPI_FREE(table);
+
+		if (ACPI_FAILURE(status)) {
+			return_ACPI_STATUS(status);
+		}
 
 		/* Must have at least an ACPI table header */
 
@@ -334,10 +388,19 @@
 		}
 
 		/*
-		 * The memory region is not guaranteed to remain stable and we must
-		 * copy the table to a local buffer. For example, the memory region
-		 * is corrupted after suspend on some machines. Dynamically loaded
-		 * tables are usually small, so this overhead is minimal.
+		 * The original implementation simply mapped the table, with no copy.
+		 * However, the memory region is not guaranteed to remain stable and
+		 * we must copy the table to a local buffer. For example, the memory
+		 * region is corrupted after suspend on some machines. Dynamically
+		 * loaded tables are usually small, so this overhead is minimal.
+		 *
+		 * The latest implementation (5/2009) does not use a mapping at all.
+		 * We use the low-level operation region interface to read the table
+		 * instead of the obvious optimization of using a direct mapping.
+		 * This maintains a consistent use of operation regions across the
+		 * entire subsystem. This is important if additional processing must
+		 * be performed in the (possibly user-installed) operation region
+		 * handler. For example, acpi_exec and ASLTS depend on this.
 		 */
 
 		/* Allocate a buffer for the table */
@@ -347,17 +410,16 @@
 			return_ACPI_STATUS(AE_NO_MEMORY);
 		}
 
-		/* Map the entire table and copy it */
+		/* Read the entire table */
 
-		table = acpi_os_map_memory(obj_desc->region.address, length);
-		if (!table) {
+		status = acpi_ex_region_read(obj_desc, length,
+					     ACPI_CAST_PTR(u8,
+							   table_desc.pointer));
+		if (ACPI_FAILURE(status)) {
 			ACPI_FREE(table_desc.pointer);
-			return_ACPI_STATUS(AE_NO_MEMORY);
+			return_ACPI_STATUS(status);
 		}
 
-		ACPI_MEMCPY(table_desc.pointer, table, length);
-		acpi_os_unmap_memory(table, length);
-
 		table_desc.address = obj_desc->region.address;
 		break;
 
@@ -454,6 +516,10 @@
 		return_ACPI_STATUS(status);
 	}
 
+	/* Remove the reference by added by acpi_ex_store above */
+
+	acpi_ut_remove_reference(ddb_handle);
+
 	/* Invoke table handler if present */
 
 	if (acpi_gbl_table_handler) {
@@ -495,13 +561,18 @@
 
 	/*
 	 * Validate the handle
-	 * Although the handle is partially validated in acpi_ex_reconfiguration(),
+	 * Although the handle is partially validated in acpi_ex_reconfiguration()
 	 * when it calls acpi_ex_resolve_operands(), the handle is more completely
 	 * validated here.
+	 *
+	 * Handle must be a valid operand object of type reference. Also, the
+	 * ddb_handle must still be marked valid (table has not been previously
+	 * unloaded)
 	 */
 	if ((!ddb_handle) ||
 	    (ACPI_GET_DESCRIPTOR_TYPE(ddb_handle) != ACPI_DESC_TYPE_OPERAND) ||
-	    (ddb_handle->common.type != ACPI_TYPE_LOCAL_REFERENCE)) {
+	    (ddb_handle->common.type != ACPI_TYPE_LOCAL_REFERENCE) ||
+	    (!(ddb_handle->common.flags & AOPOBJ_DATA_VALID))) {
 		return_ACPI_STATUS(AE_BAD_PARAMETER);
 	}
 
@@ -509,6 +580,12 @@
 
 	table_index = table_desc->reference.value;
 
+	/* Ensure the table is still loaded */
+
+	if (!acpi_tb_is_table_loaded(table_index)) {
+		return_ACPI_STATUS(AE_NOT_EXIST);
+	}
+
 	/* Invoke table handler if present */
 
 	if (acpi_gbl_table_handler) {
@@ -530,8 +607,10 @@
 	(void)acpi_tb_release_owner_id(table_index);
 	acpi_tb_set_table_loaded_flag(table_index, FALSE);
 
-	/* Table unloaded, remove a reference to the ddb_handle object */
-
-	acpi_ut_remove_reference(ddb_handle);
+	/*
+	 * Invalidate the handle. We do this because the handle may be stored
+	 * in a named object and may not be actually deleted until much later.
+	 */
+	ddb_handle->common.flags &= ~AOPOBJ_DATA_VALID;
 	return_ACPI_STATUS(AE_OK);
 }
diff --git a/drivers/acpi/acpica/excreate.c b/drivers/acpi/acpica/excreate.c
index a57ad25..02b25d2 100644
--- a/drivers/acpi/acpica/excreate.c
+++ b/drivers/acpi/acpica/excreate.c
@@ -502,7 +502,7 @@
 		 * ACPI 2.0: sync_level = sync_level in method declaration
 		 */
 		obj_desc->method.sync_level = (u8)
-		    ((method_flags & AML_METHOD_SYNCH_LEVEL) >> 4);
+		    ((method_flags & AML_METHOD_SYNC_LEVEL) >> 4);
 	}
 
 	/* Attach the new object to the method Node */
diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c
index 89d141f..ec52461 100644
--- a/drivers/acpi/acpica/exdump.c
+++ b/drivers/acpi/acpica/exdump.c
@@ -120,9 +120,11 @@
 	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(event.os_semaphore), "OsSemaphore"}
 };
 
-static struct acpi_exdump_info acpi_ex_dump_method[8] = {
+static struct acpi_exdump_info acpi_ex_dump_method[9] = {
 	{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_method), NULL},
-	{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.param_count), "ParamCount"},
+	{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.method_flags), "Method Flags"},
+	{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.param_count),
+	 "Parameter Count"},
 	{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.sync_level), "Sync Level"},
 	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(method.mutex), "Mutex"},
 	{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.owner_id), "Owner Id"},
diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c
index 99cee61..d4075b8 100644
--- a/drivers/acpi/acpica/exfldio.c
+++ b/drivers/acpi/acpica/exfldio.c
@@ -222,7 +222,7 @@
 {
 	acpi_status status;
 	union acpi_operand_object *rgn_desc;
-	acpi_physical_address address;
+	u32 region_offset;
 
 	ACPI_FUNCTION_TRACE(ex_access_region);
 
@@ -243,7 +243,7 @@
 	 * 3) The current offset into the field
 	 */
 	rgn_desc = obj_desc->common_field.region_obj;
-	address = rgn_desc->region.address +
+	region_offset =
 	    obj_desc->common_field.base_byte_offset + field_datum_byte_offset;
 
 	if ((function & ACPI_IO_MASK) == ACPI_READ) {
@@ -260,16 +260,18 @@
 			      obj_desc->common_field.access_byte_width,
 			      obj_desc->common_field.base_byte_offset,
 			      field_datum_byte_offset, ACPI_CAST_PTR(void,
-								     address)));
+								     (rgn_desc->
+								      region.
+								      address +
+								      region_offset))));
 
 	/* Invoke the appropriate address_space/op_region handler */
 
-	status = acpi_ev_address_space_dispatch(rgn_desc, function,
-						address,
-						ACPI_MUL_8(obj_desc->
-							   common_field.
-							   access_byte_width),
-						value);
+	status =
+	    acpi_ev_address_space_dispatch(rgn_desc, function, region_offset,
+					   ACPI_MUL_8(obj_desc->common_field.
+						      access_byte_width),
+					   value);
 
 	if (ACPI_FAILURE(status)) {
 		if (status == AE_NOT_IMPLEMENTED) {
diff --git a/drivers/acpi/acpica/exmutex.c b/drivers/acpi/acpica/exmutex.c
index d301c1f..2f01142 100644
--- a/drivers/acpi/acpica/exmutex.c
+++ b/drivers/acpi/acpica/exmutex.c
@@ -83,6 +83,15 @@
 
 	if (obj_desc->mutex.prev) {
 		(obj_desc->mutex.prev)->mutex.next = obj_desc->mutex.next;
+
+		/*
+		 * Migrate the previous sync level associated with this mutex to the
+		 * previous mutex on the list so that it may be preserved. This handles
+		 * the case where several mutexes have been acquired at the same level,
+		 * but are not released in opposite order.
+		 */
+		(obj_desc->mutex.prev)->mutex.original_sync_level =
+		    obj_desc->mutex.original_sync_level;
 	} else {
 		thread->acquired_mutex_list = obj_desc->mutex.next;
 	}
@@ -349,6 +358,7 @@
 		      struct acpi_walk_state *walk_state)
 {
 	acpi_status status = AE_OK;
+	u8 previous_sync_level;
 
 	ACPI_FUNCTION_TRACE(ex_release_mutex);
 
@@ -373,11 +383,12 @@
 	     walk_state->thread->thread_id)
 	    && (obj_desc != acpi_gbl_global_lock_mutex)) {
 		ACPI_ERROR((AE_INFO,
-			    "Thread %lX cannot release Mutex [%4.4s] acquired by thread %lX",
-			    (unsigned long)walk_state->thread->thread_id,
+			    "Thread %p cannot release Mutex [%4.4s] acquired by thread %p",
+			    ACPI_CAST_PTR(void, walk_state->thread->thread_id),
 			    acpi_ut_get_node_name(obj_desc->mutex.node),
-			    (unsigned long)obj_desc->mutex.owner_thread->
-			    thread_id));
+			    ACPI_CAST_PTR(void,
+					  obj_desc->mutex.owner_thread->
+					  thread_id)));
 		return_ACPI_STATUS(AE_AML_NOT_OWNER);
 	}
 
@@ -391,10 +402,14 @@
 	}
 
 	/*
-	 * The sync level of the mutex must be less than or equal to the current
-	 * sync level
+	 * The sync level of the mutex must be equal to the current sync level. In
+	 * other words, the current level means that at least one mutex at that
+	 * level is currently being held. Attempting to release a mutex of a
+	 * different level can only mean that the mutex ordering rule is being
+	 * violated. This behavior is clarified in ACPI 4.0 specification.
 	 */
-	if (obj_desc->mutex.sync_level > walk_state->thread->current_sync_level) {
+	if (obj_desc->mutex.sync_level !=
+	    walk_state->thread->current_sync_level) {
 		ACPI_ERROR((AE_INFO,
 			    "Cannot release Mutex [%4.4s], SyncLevel mismatch: mutex %d current %d",
 			    acpi_ut_get_node_name(obj_desc->mutex.node),
@@ -403,14 +418,24 @@
 		return_ACPI_STATUS(AE_AML_MUTEX_ORDER);
 	}
 
+	/*
+	 * Get the previous sync_level from the head of the acquired mutex list.
+	 * This handles the case where several mutexes at the same level have been
+	 * acquired, but are not released in reverse order.
+	 */
+	previous_sync_level =
+	    walk_state->thread->acquired_mutex_list->mutex.original_sync_level;
+
 	status = acpi_ex_release_mutex_object(obj_desc);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
 
 	if (obj_desc->mutex.acquisition_depth == 0) {
 
-		/* Restore the original sync_level */
+		/* Restore the previous sync_level */
 
-		walk_state->thread->current_sync_level =
-		    obj_desc->mutex.original_sync_level;
+		walk_state->thread->current_sync_level = previous_sync_level;
 	}
 	return_ACPI_STATUS(status);
 }
diff --git a/drivers/acpi/acpica/exstore.c b/drivers/acpi/acpica/exstore.c
index 90d6061..6efd07a 100644
--- a/drivers/acpi/acpica/exstore.c
+++ b/drivers/acpi/acpica/exstore.c
@@ -193,10 +193,12 @@
 
 		case ACPI_REFCLASS_TABLE:
 
+			/* Case for ddb_handle */
+
 			ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT,
 					      "Table Index 0x%X\n",
 					      source_desc->reference.value));
-			break;
+			return;
 
 		default:
 			break;
diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c
index 7b2fb60..23d5505 100644
--- a/drivers/acpi/acpica/hwregs.c
+++ b/drivers/acpi/acpica/hwregs.c
@@ -81,9 +81,9 @@
 
 	ACPI_FUNCTION_TRACE(hw_clear_acpi_status);
 
-	ACPI_DEBUG_PRINT((ACPI_DB_IO, "About to write %04X to %0llX\n",
+	ACPI_DEBUG_PRINT((ACPI_DB_IO, "About to write %04X to %8.8X%8.8X\n",
 			  ACPI_BITMASK_ALL_FIXED_STATUS,
-			  acpi_gbl_xpm1a_status.address));
+			  ACPI_FORMAT_UINT64(acpi_gbl_xpm1a_status.address)));
 
 	lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock);
 
diff --git a/drivers/acpi/acpica/nsalloc.c b/drivers/acpi/acpica/nsalloc.c
index aceb931..efc971a 100644
--- a/drivers/acpi/acpica/nsalloc.c
+++ b/drivers/acpi/acpica/nsalloc.c
@@ -334,9 +334,7 @@
 
 		/* Get the next node in this scope (NULL if none) */
 
-		child_node =
-		    acpi_ns_get_next_node(ACPI_TYPE_ANY, parent_node,
-					  child_node);
+		child_node = acpi_ns_get_next_node(parent_node, child_node);
 		if (child_node) {
 
 			/* Found a child node - detach any attached object */
@@ -345,8 +343,7 @@
 
 			/* Check if this node has any children */
 
-			if (acpi_ns_get_next_node
-			    (ACPI_TYPE_ANY, child_node, NULL)) {
+			if (child_node->child) {
 				/*
 				 * There is at least one child of this node,
 				 * visit the node
@@ -432,9 +429,7 @@
 		 * Get the next child of this parent node. When child_node is NULL,
 		 * the first child of the parent is returned
 		 */
-		child_node =
-		    acpi_ns_get_next_node(ACPI_TYPE_ANY, parent_node,
-					  child_node);
+		child_node = acpi_ns_get_next_node(parent_node, child_node);
 
 		if (deletion_node) {
 			acpi_ns_delete_children(deletion_node);
@@ -452,8 +447,7 @@
 
 			/* Check if this node has any children */
 
-			if (acpi_ns_get_next_node
-			    (ACPI_TYPE_ANY, child_node, NULL)) {
+			if (child_node->child) {
 				/*
 				 * There is at least one child of this node,
 				 * visit the node
diff --git a/drivers/acpi/acpica/nsnames.c b/drivers/acpi/acpica/nsnames.c
index ae3dc10..af8e6bc 100644
--- a/drivers/acpi/acpica/nsnames.c
+++ b/drivers/acpi/acpica/nsnames.c
@@ -149,7 +149,7 @@
 
 	name_buffer = ACPI_ALLOCATE_ZEROED(size);
 	if (!name_buffer) {
-		ACPI_ERROR((AE_INFO, "Allocation failure"));
+		ACPI_ERROR((AE_INFO, "Could not allocate %u bytes", (u32)size));
 		return_PTR(NULL);
 	}
 
diff --git a/drivers/acpi/acpica/nsobject.c b/drivers/acpi/acpica/nsobject.c
index 3eb20bf..60f3af0 100644
--- a/drivers/acpi/acpica/nsobject.c
+++ b/drivers/acpi/acpica/nsobject.c
@@ -213,6 +213,15 @@
 		return_VOID;
 	}
 
+	if (node->flags & ANOBJ_ALLOCATED_BUFFER) {
+
+		/* Free the dynamic aml buffer */
+
+		if (obj_desc->common.type == ACPI_TYPE_METHOD) {
+			ACPI_FREE(obj_desc->method.aml_start);
+		}
+	}
+
 	/* Clear the entry in all cases */
 
 	node->object = NULL;
diff --git a/drivers/acpi/acpica/nspredef.c b/drivers/acpi/acpica/nspredef.c
index d9e8cbc..7f8e066 100644
--- a/drivers/acpi/acpica/nspredef.c
+++ b/drivers/acpi/acpica/nspredef.c
@@ -144,7 +144,7 @@
 
 	pathname = acpi_ns_get_external_pathname(node);
 	if (!pathname) {
-		pathname = ACPI_CAST_PTR(char, predefined->info.name);
+		return AE_OK;	/* Could not get pathname, ignore */
 	}
 
 	/*
@@ -230,10 +230,7 @@
 	}
 
       exit:
-	if (pathname != predefined->info.name) {
-		ACPI_FREE(pathname);
-	}
-
+	ACPI_FREE(pathname);
 	return (status);
 }
 
diff --git a/drivers/acpi/acpica/nssearch.c b/drivers/acpi/acpica/nssearch.c
index f9b4f51..7e86563 100644
--- a/drivers/acpi/acpica/nssearch.c
+++ b/drivers/acpi/acpica/nssearch.c
@@ -45,6 +45,10 @@
 #include "accommon.h"
 #include "acnamesp.h"
 
+#ifdef ACPI_ASL_COMPILER
+#include "amlcode.h"
+#endif
+
 #define _COMPONENT          ACPI_NAMESPACE
 ACPI_MODULE_NAME("nssearch")
 
diff --git a/drivers/acpi/acpica/nswalk.c b/drivers/acpi/acpica/nswalk.c
index 83e3aa6..35539df 100644
--- a/drivers/acpi/acpica/nswalk.c
+++ b/drivers/acpi/acpica/nswalk.c
@@ -52,6 +52,51 @@
  *
  * FUNCTION:    acpi_ns_get_next_node
  *
+ * PARAMETERS:  parent_node         - Parent node whose children we are
+ *                                    getting
+ *              child_node          - Previous child that was found.
+ *                                    The NEXT child will be returned
+ *
+ * RETURN:      struct acpi_namespace_node - Pointer to the NEXT child or NULL if
+ *                                    none is found.
+ *
+ * DESCRIPTION: Return the next peer node within the namespace.  If Handle
+ *              is valid, Scope is ignored.  Otherwise, the first node
+ *              within Scope is returned.
+ *
+ ******************************************************************************/
+struct acpi_namespace_node *acpi_ns_get_next_node(struct acpi_namespace_node
+						  *parent_node,
+						  struct acpi_namespace_node
+						  *child_node)
+{
+	ACPI_FUNCTION_ENTRY();
+
+	if (!child_node) {
+
+		/* It's really the parent's _scope_ that we want */
+
+		return parent_node->child;
+	}
+
+	/*
+	 * Get the next node.
+	 *
+	 * If we are at the end of this peer list, return NULL
+	 */
+	if (child_node->flags & ANOBJ_END_OF_PEER_LIST) {
+		return NULL;
+	}
+
+	/* Otherwise just return the next peer */
+
+	return child_node->peer;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_get_next_node_typed
+ *
  * PARAMETERS:  Type                - Type of node to be searched for
  *              parent_node         - Parent node whose children we are
  *                                    getting
@@ -66,26 +111,21 @@
  *              within Scope is returned.
  *
  ******************************************************************************/
-struct acpi_namespace_node *acpi_ns_get_next_node(acpi_object_type type, struct acpi_namespace_node
-						  *parent_node, struct acpi_namespace_node
-						  *child_node)
+
+struct acpi_namespace_node *acpi_ns_get_next_node_typed(acpi_object_type type,
+							struct
+							acpi_namespace_node
+							*parent_node,
+							struct
+							acpi_namespace_node
+							*child_node)
 {
 	struct acpi_namespace_node *next_node = NULL;
 
 	ACPI_FUNCTION_ENTRY();
 
-	if (!child_node) {
+	next_node = acpi_ns_get_next_node(parent_node, child_node);
 
-		/* It's really the parent's _scope_ that we want */
-
-		next_node = parent_node->child;
-	}
-
-	else {
-		/* Start search at the NEXT node */
-
-		next_node = acpi_ns_get_next_valid_node(child_node);
-	}
 
 	/* If any type is OK, we are done */
 
@@ -186,9 +226,7 @@
 		/* Get the next node in this scope.  Null if not found */
 
 		status = AE_OK;
-		child_node =
-		    acpi_ns_get_next_node(ACPI_TYPE_ANY, parent_node,
-					  child_node);
+		child_node = acpi_ns_get_next_node(parent_node, child_node);
 		if (child_node) {
 
 			/* Found next child, get the type if we are not searching for ANY */
@@ -269,8 +307,7 @@
 			 * function has specified that the maximum depth has been reached.
 			 */
 			if ((level < max_depth) && (status != AE_CTRL_DEPTH)) {
-				if (acpi_ns_get_next_node
-				    (ACPI_TYPE_ANY, child_node, NULL)) {
+				if (child_node->child) {
 
 					/* There is at least one child of this node, visit it */
 
diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c
index 9589fea..f23593d 100644
--- a/drivers/acpi/acpica/nsxfname.c
+++ b/drivers/acpi/acpica/nsxfname.c
@@ -45,6 +45,8 @@
 #include <acpi/acpi.h>
 #include "accommon.h"
 #include "acnamesp.h"
+#include "acparser.h"
+#include "amlcode.h"
 
 #define _COMPONENT          ACPI_NAMESPACE
 ACPI_MODULE_NAME("nsxfname")
@@ -358,3 +360,151 @@
 }
 
 ACPI_EXPORT_SYMBOL(acpi_get_object_info)
+
+/******************************************************************************
+ *
+ * FUNCTION:    acpi_install_method
+ *
+ * PARAMETERS:  Buffer         - An ACPI table containing one control method
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Install a control method into the namespace. If the method
+ *              name already exists in the namespace, it is overwritten. The
+ *              input buffer must contain a valid DSDT or SSDT containing a
+ *              single control method.
+ *
+ ******************************************************************************/
+acpi_status acpi_install_method(u8 *buffer)
+{
+	struct acpi_table_header *table =
+	    ACPI_CAST_PTR(struct acpi_table_header, buffer);
+	u8 *aml_buffer;
+	u8 *aml_start;
+	char *path;
+	struct acpi_namespace_node *node;
+	union acpi_operand_object *method_obj;
+	struct acpi_parse_state parser_state;
+	u32 aml_length;
+	u16 opcode;
+	u8 method_flags;
+	acpi_status status;
+
+	/* Parameter validation */
+
+	if (!buffer) {
+		return AE_BAD_PARAMETER;
+	}
+
+	/* Table must be a DSDT or SSDT */
+
+	if (!ACPI_COMPARE_NAME(table->signature, ACPI_SIG_DSDT) &&
+	    !ACPI_COMPARE_NAME(table->signature, ACPI_SIG_SSDT)) {
+		return AE_BAD_HEADER;
+	}
+
+	/* First AML opcode in the table must be a control method */
+
+	parser_state.aml = buffer + sizeof(struct acpi_table_header);
+	opcode = acpi_ps_peek_opcode(&parser_state);
+	if (opcode != AML_METHOD_OP) {
+		return AE_BAD_PARAMETER;
+	}
+
+	/* Extract method information from the raw AML */
+
+	parser_state.aml += acpi_ps_get_opcode_size(opcode);
+	parser_state.pkg_end = acpi_ps_get_next_package_end(&parser_state);
+	path = acpi_ps_get_next_namestring(&parser_state);
+	method_flags = *parser_state.aml++;
+	aml_start = parser_state.aml;
+	aml_length = ACPI_PTR_DIFF(parser_state.pkg_end, aml_start);
+
+	/*
+	 * Allocate resources up-front. We don't want to have to delete a new
+	 * node from the namespace if we cannot allocate memory.
+	 */
+	aml_buffer = ACPI_ALLOCATE(aml_length);
+	if (!aml_buffer) {
+		return AE_NO_MEMORY;
+	}
+
+	method_obj = acpi_ut_create_internal_object(ACPI_TYPE_METHOD);
+	if (!method_obj) {
+		ACPI_FREE(aml_buffer);
+		return AE_NO_MEMORY;
+	}
+
+	/* Lock namespace for acpi_ns_lookup, we may be creating a new node */
+
+	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+	if (ACPI_FAILURE(status)) {
+		goto error_exit;
+	}
+
+	/* The lookup either returns an existing node or creates a new one */
+
+	status =
+	    acpi_ns_lookup(NULL, path, ACPI_TYPE_METHOD, ACPI_IMODE_LOAD_PASS1,
+			   ACPI_NS_DONT_OPEN_SCOPE | ACPI_NS_ERROR_IF_FOUND,
+			   NULL, &node);
+
+	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+
+	if (ACPI_FAILURE(status)) {	/* ns_lookup */
+		if (status != AE_ALREADY_EXISTS) {
+			goto error_exit;
+		}
+
+		/* Node existed previously, make sure it is a method node */
+
+		if (node->type != ACPI_TYPE_METHOD) {
+			status = AE_TYPE;
+			goto error_exit;
+		}
+	}
+
+	/* Copy the method AML to the local buffer */
+
+	ACPI_MEMCPY(aml_buffer, aml_start, aml_length);
+
+	/* Initialize the method object with the new method's information */
+
+	method_obj->method.aml_start = aml_buffer;
+	method_obj->method.aml_length = aml_length;
+
+	method_obj->method.param_count = (u8)
+	    (method_flags & AML_METHOD_ARG_COUNT);
+
+	method_obj->method.method_flags = (u8)
+	    (method_flags & ~AML_METHOD_ARG_COUNT);
+
+	if (method_flags & AML_METHOD_SERIALIZED) {
+		method_obj->method.sync_level = (u8)
+		    ((method_flags & AML_METHOD_SYNC_LEVEL) >> 4);
+	}
+
+	/*
+	 * Now that it is complete, we can attach the new method object to
+	 * the method Node (detaches/deletes any existing object)
+	 */
+	status = acpi_ns_attach_object(node, method_obj, ACPI_TYPE_METHOD);
+
+	/*
+	 * Flag indicates AML buffer is dynamic, must be deleted later.
+	 * Must be set only after attach above.
+	 */
+	node->flags |= ANOBJ_ALLOCATED_BUFFER;
+
+	/* Remove local reference to the method object */
+
+	acpi_ut_remove_reference(method_obj);
+	return status;
+
+error_exit:
+
+	ACPI_FREE(aml_buffer);
+	ACPI_FREE(method_obj);
+	return status;
+}
+ACPI_EXPORT_SYMBOL(acpi_install_method)
diff --git a/drivers/acpi/acpica/nsxfobj.c b/drivers/acpi/acpica/nsxfobj.c
index 1c7efc1..4071bad 100644
--- a/drivers/acpi/acpica/nsxfobj.c
+++ b/drivers/acpi/acpica/nsxfobj.c
@@ -162,6 +162,7 @@
 acpi_status acpi_get_parent(acpi_handle handle, acpi_handle * ret_handle)
 {
 	struct acpi_namespace_node *node;
+	struct acpi_namespace_node *parent_node;
 	acpi_status status;
 
 	if (!ret_handle) {
@@ -189,12 +190,12 @@
 
 	/* Get the parent entry */
 
-	*ret_handle =
-	    acpi_ns_convert_entry_to_handle(acpi_ns_get_parent_node(node));
+	parent_node = acpi_ns_get_parent_node(node);
+	*ret_handle = acpi_ns_convert_entry_to_handle(parent_node);
 
 	/* Return exception if parent is null */
 
-	if (!acpi_ns_get_parent_node(node)) {
+	if (!parent_node) {
 		status = AE_NULL_ENTRY;
 	}
 
@@ -268,7 +269,7 @@
 
 	/* Internal function does the real work */
 
-	node = acpi_ns_get_next_node(type, parent_node, child_node);
+	node = acpi_ns_get_next_node_typed(type, parent_node, child_node);
 	if (!node) {
 		status = AE_NOT_FOUND;
 		goto unlock_and_exit;
diff --git a/drivers/acpi/acpica/rscalc.c b/drivers/acpi/acpica/rscalc.c
index 88b5a2c..3c4dcc3 100644
--- a/drivers/acpi/acpica/rscalc.c
+++ b/drivers/acpi/acpica/rscalc.c
@@ -547,7 +547,7 @@
 
 		if (!package_element ||
 		    (package_element->common.type != ACPI_TYPE_PACKAGE)) {
-			return_ACPI_STATUS (AE_AML_OPERAND_TYPE);
+			return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
 		}
 
 		/*
@@ -593,9 +593,6 @@
 			} else {
 				temp_size_needed +=
 				    acpi_ns_get_pathname_length((*sub_object_list)->reference.node);
-				if (!temp_size_needed) {
-					return_ACPI_STATUS(AE_BAD_PARAMETER);
-				}
 			}
 		} else {
 			/*
diff --git a/drivers/acpi/acpica/rsxface.c b/drivers/acpi/acpica/rsxface.c
index 69a2aa5..395212b 100644
--- a/drivers/acpi/acpica/rsxface.c
+++ b/drivers/acpi/acpica/rsxface.c
@@ -338,13 +338,17 @@
 	switch (resource->type) {
 	case ACPI_RESOURCE_TYPE_ADDRESS16:
 
-		address16 = (struct acpi_resource_address16 *)&resource->data;
+		address16 =
+		    ACPI_CAST_PTR(struct acpi_resource_address16,
+				  &resource->data);
 		ACPI_COPY_ADDRESS(out, address16);
 		break;
 
 	case ACPI_RESOURCE_TYPE_ADDRESS32:
 
-		address32 = (struct acpi_resource_address32 *)&resource->data;
+		address32 =
+		    ACPI_CAST_PTR(struct acpi_resource_address32,
+				  &resource->data);
 		ACPI_COPY_ADDRESS(out, address32);
 		break;
 
diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c
index 71e655d..82b02dc 100644
--- a/drivers/acpi/acpica/tbfadt.c
+++ b/drivers/acpi/acpica/tbfadt.c
@@ -284,9 +284,9 @@
 	if (length > sizeof(struct acpi_table_fadt)) {
 		ACPI_WARNING((AE_INFO,
 			      "FADT (revision %u) is longer than ACPI 2.0 version, "
-			      "truncating length 0x%X to 0x%zX",
-			      table->revision, (unsigned)length,
-			      sizeof(struct acpi_table_fadt)));
+			      "truncating length 0x%X to 0x%X",
+			      table->revision, length,
+			      (u32)sizeof(struct acpi_table_fadt)));
 	}
 
 	/* Clear the entire local FADT */
@@ -441,7 +441,7 @@
 								   &acpi_gbl_FADT,
 								   fadt_info_table
 								   [i].length),
-						     address32);
+						     (u64) address32);
 		}
 	}
 }
@@ -469,7 +469,6 @@
 static void acpi_tb_validate_fadt(void)
 {
 	char *name;
-	u32 *address32;
 	struct acpi_generic_address *address64;
 	u8 length;
 	u32 i;
@@ -505,15 +504,12 @@
 
 	for (i = 0; i < ACPI_FADT_INFO_ENTRIES; i++) {
 		/*
-		 * Generate pointers to the 32-bit and 64-bit addresses, get the
-		 * register length (width), and the register name
+		 * Generate pointer to the 64-bit address, get the register
+		 * length (width) and the register name
 		 */
 		address64 = ACPI_ADD_PTR(struct acpi_generic_address,
 					 &acpi_gbl_FADT,
 					 fadt_info_table[i].address64);
-		address32 =
-		    ACPI_ADD_PTR(u32, &acpi_gbl_FADT,
-				 fadt_info_table[i].address32);
 		length =
 		    *ACPI_ADD_PTR(u8, &acpi_gbl_FADT,
 				  fadt_info_table[i].length);
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
index f865d5a..63e8232 100644
--- a/drivers/acpi/acpica/tbinstal.c
+++ b/drivers/acpi/acpica/tbinstal.c
@@ -472,7 +472,7 @@
 	 * lock may block, and also since the execution of a namespace walk
 	 * must be allowed to use the interpreter.
 	 */
-	acpi_ut_release_mutex(ACPI_MTX_INTERPRETER);
+	(void)acpi_ut_release_mutex(ACPI_MTX_INTERPRETER);
 	status = acpi_ut_acquire_write_lock(&acpi_gbl_namespace_rw_lock);
 
 	acpi_ns_delete_namespace_by_owner(owner_id);
diff --git a/drivers/acpi/acpica/utcopy.c b/drivers/acpi/acpica/utcopy.c
index 919624f..0f0c64b 100644
--- a/drivers/acpi/acpica/utcopy.c
+++ b/drivers/acpi/acpica/utcopy.c
@@ -676,6 +676,7 @@
 {
 	u16 reference_count;
 	union acpi_operand_object *next_object;
+	acpi_status status;
 
 	/* Save fields from destination that we don't want to overwrite */
 
@@ -768,6 +769,28 @@
 		}
 		break;
 
+		/*
+		 * For Mutex and Event objects, we cannot simply copy the underlying
+		 * OS object. We must create a new one.
+		 */
+	case ACPI_TYPE_MUTEX:
+
+		status = acpi_os_create_mutex(&dest_desc->mutex.os_mutex);
+		if (ACPI_FAILURE(status)) {
+			return status;
+		}
+		break;
+
+	case ACPI_TYPE_EVENT:
+
+		status = acpi_os_create_semaphore(ACPI_NO_UNIT_LIMIT, 0,
+						  &dest_desc->event.
+						  os_semaphore);
+		if (ACPI_FAILURE(status)) {
+			return status;
+		}
+		break;
+
 	default:
 		/* Nothing to do for other simple objects */
 		break;
diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c
index 38821f5..527d729 100644
--- a/drivers/acpi/acpica/utdebug.c
+++ b/drivers/acpi/acpica/utdebug.c
@@ -179,9 +179,9 @@
 	if (thread_id != acpi_gbl_prev_thread_id) {
 		if (ACPI_LV_THREADS & acpi_dbg_level) {
 			acpi_os_printf
-			    ("\n**** Context Switch from TID %lX to TID %lX ****\n\n",
-			     (unsigned long)acpi_gbl_prev_thread_id,
-			     (unsigned long)thread_id);
+			    ("\n**** Context Switch from TID %p to TID %p ****\n\n",
+			     ACPI_CAST_PTR(void, acpi_gbl_prev_thread_id),
+			     ACPI_CAST_PTR(void, thread_id));
 		}
 
 		acpi_gbl_prev_thread_id = thread_id;
@@ -194,7 +194,7 @@
 	acpi_os_printf("%8s-%04ld ", module_name, line_number);
 
 	if (ACPI_LV_THREADS & acpi_dbg_level) {
-		acpi_os_printf("[%04lX] ", (unsigned long)thread_id);
+		acpi_os_printf("[%p] ", ACPI_CAST_PTR(void, thread_id));
 	}
 
 	acpi_os_printf("[%02ld] %-22.22s: ",
diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c
index a5ee23b..bc17103 100644
--- a/drivers/acpi/acpica/utdelete.c
+++ b/drivers/acpi/acpica/utdelete.c
@@ -75,6 +75,7 @@
 	union acpi_operand_object *handler_desc;
 	union acpi_operand_object *second_desc;
 	union acpi_operand_object *next_desc;
+	union acpi_operand_object **last_obj_ptr;
 
 	ACPI_FUNCTION_TRACE_PTR(ut_delete_internal_obj, object);
 
@@ -223,6 +224,26 @@
 			 */
 			handler_desc = object->region.handler;
 			if (handler_desc) {
+				next_desc =
+				    handler_desc->address_space.region_list;
+				last_obj_ptr =
+				    &handler_desc->address_space.region_list;
+
+				/* Remove the region object from the handler's list */
+
+				while (next_desc) {
+					if (next_desc == object) {
+						*last_obj_ptr =
+						    next_desc->region.next;
+						break;
+					}
+
+					/* Walk the linked list of handler */
+
+					last_obj_ptr = &next_desc->region.next;
+					next_desc = next_desc->region.next;
+				}
+
 				if (handler_desc->address_space.handler_flags &
 				    ACPI_ADDR_HANDLER_DEFAULT_INSTALLED) {
 
diff --git a/drivers/acpi/acpica/utmisc.c b/drivers/acpi/acpica/utmisc.c
index 1c9e250..fbe7823 100644
--- a/drivers/acpi/acpica/utmisc.c
+++ b/drivers/acpi/acpica/utmisc.c
@@ -1033,11 +1033,12 @@
 {
 	va_list args;
 
-	acpi_os_printf("ACPI Error (%s-%04d): ", module_name, line_number);
+	acpi_os_printf("ACPI Error: ");
 
 	va_start(args, format);
 	acpi_os_vprintf(format, args);
-	acpi_os_printf(" [%X]\n", ACPI_CA_VERSION);
+	acpi_os_printf(" %8.8X %s-%u\n", ACPI_CA_VERSION, module_name,
+		       line_number);
 	va_end(args);
 }
 
@@ -1047,12 +1048,12 @@
 {
 	va_list args;
 
-	acpi_os_printf("ACPI Exception (%s-%04d): %s, ", module_name,
-		       line_number, acpi_format_exception(status));
+	acpi_os_printf("ACPI Exception: %s, ", acpi_format_exception(status));
 
 	va_start(args, format);
 	acpi_os_vprintf(format, args);
-	acpi_os_printf(" [%X]\n", ACPI_CA_VERSION);
+	acpi_os_printf(" %8.8X %s-%u\n", ACPI_CA_VERSION, module_name,
+		       line_number);
 	va_end(args);
 }
 
@@ -1061,11 +1062,12 @@
 {
 	va_list args;
 
-	acpi_os_printf("ACPI Warning (%s-%04d): ", module_name, line_number);
+	acpi_os_printf("ACPI Warning: ");
 
 	va_start(args, format);
 	acpi_os_vprintf(format, args);
-	acpi_os_printf(" [%X]\n", ACPI_CA_VERSION);
+	acpi_os_printf(" %8.8X %s-%u\n", ACPI_CA_VERSION, module_name,
+		       line_number);
 	va_end(args);
 }
 
@@ -1074,10 +1076,6 @@
 {
 	va_list args;
 
-	/*
-	 * Removed module_name, line_number, and acpica version, not needed
-	 * for info output
-	 */
 	acpi_os_printf("ACPI: ");
 
 	va_start(args, format);
diff --git a/drivers/acpi/acpica/utmutex.c b/drivers/acpi/acpica/utmutex.c
index 26c93a7..80bb651 100644
--- a/drivers/acpi/acpica/utmutex.c
+++ b/drivers/acpi/acpica/utmutex.c
@@ -230,17 +230,18 @@
 			if (acpi_gbl_mutex_info[i].thread_id == this_thread_id) {
 				if (i == mutex_id) {
 					ACPI_ERROR((AE_INFO,
-						    "Mutex [%s] already acquired by this thread [%X]",
+						    "Mutex [%s] already acquired by this thread [%p]",
 						    acpi_ut_get_mutex_name
 						    (mutex_id),
-						    this_thread_id));
+						    ACPI_CAST_PTR(void,
+								  this_thread_id)));
 
 					return (AE_ALREADY_ACQUIRED);
 				}
 
 				ACPI_ERROR((AE_INFO,
-					    "Invalid acquire order: Thread %X owns [%s], wants [%s]",
-					    this_thread_id,
+					    "Invalid acquire order: Thread %p owns [%s], wants [%s]",
+					    ACPI_CAST_PTR(void, this_thread_id),
 					    acpi_ut_get_mutex_name(i),
 					    acpi_ut_get_mutex_name(mutex_id)));
 
@@ -251,24 +252,24 @@
 #endif
 
 	ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
-			  "Thread %lX attempting to acquire Mutex [%s]\n",
-			  (unsigned long)this_thread_id,
+			  "Thread %p attempting to acquire Mutex [%s]\n",
+			  ACPI_CAST_PTR(void, this_thread_id),
 			  acpi_ut_get_mutex_name(mutex_id)));
 
 	status = acpi_os_acquire_mutex(acpi_gbl_mutex_info[mutex_id].mutex,
 				       ACPI_WAIT_FOREVER);
 	if (ACPI_SUCCESS(status)) {
 		ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
-				  "Thread %lX acquired Mutex [%s]\n",
-				  (unsigned long)this_thread_id,
+				  "Thread %p acquired Mutex [%s]\n",
+				  ACPI_CAST_PTR(void, this_thread_id),
 				  acpi_ut_get_mutex_name(mutex_id)));
 
 		acpi_gbl_mutex_info[mutex_id].use_count++;
 		acpi_gbl_mutex_info[mutex_id].thread_id = this_thread_id;
 	} else {
 		ACPI_EXCEPTION((AE_INFO, status,
-				"Thread %lX could not acquire Mutex [%X]",
-				(unsigned long)this_thread_id, mutex_id));
+				"Thread %p could not acquire Mutex [%X]",
+				ACPI_CAST_PTR(void, this_thread_id), mutex_id));
 	}
 
 	return (status);
@@ -293,9 +294,8 @@
 	ACPI_FUNCTION_NAME(ut_release_mutex);
 
 	this_thread_id = acpi_os_get_thread_id();
-	ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
-			  "Thread %lX releasing Mutex [%s]\n",
-			  (unsigned long)this_thread_id,
+	ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Thread %p releasing Mutex [%s]\n",
+			  ACPI_CAST_PTR(void, this_thread_id),
 			  acpi_ut_get_mutex_name(mutex_id)));
 
 	if (mutex_id > ACPI_MAX_MUTEX) {
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 1977d4b..7ecb193 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -22,6 +22,7 @@
 #include <linux/kallsyms.h>
 #include <linux/semaphore.h>
 #include <linux/mutex.h>
+#include <linux/async.h>
 
 #include "base.h"
 #include "power/power.h"
@@ -161,10 +162,18 @@
 	struct device *dev = to_dev(kobj);
 	int retval = 0;
 
-	/* add the major/minor if present */
+	/* add device node properties if present */
 	if (MAJOR(dev->devt)) {
+		const char *tmp;
+		const char *name;
+
 		add_uevent_var(env, "MAJOR=%u", MAJOR(dev->devt));
 		add_uevent_var(env, "MINOR=%u", MINOR(dev->devt));
+		name = device_get_nodename(dev, &tmp);
+		if (name) {
+			add_uevent_var(env, "DEVNAME=%s", name);
+			kfree(tmp);
+		}
 	}
 
 	if (dev->type && dev->type->name)
@@ -874,7 +883,7 @@
 	 * the name, and force the use of dev_name()
 	 */
 	if (dev->init_name) {
-		dev_set_name(dev, dev->init_name);
+		dev_set_name(dev, "%s", dev->init_name);
 		dev->init_name = NULL;
 	}
 
@@ -1128,6 +1137,47 @@
 }
 
 /**
+ * device_get_nodename - path of device node file
+ * @dev: device
+ * @tmp: possibly allocated string
+ *
+ * Return the relative path of a possible device node.
+ * Non-default names may need to allocate a memory to compose
+ * a name. This memory is returned in tmp and needs to be
+ * freed by the caller.
+ */
+const char *device_get_nodename(struct device *dev, const char **tmp)
+{
+	char *s;
+
+	*tmp = NULL;
+
+	/* the device type may provide a specific name */
+	if (dev->type && dev->type->nodename)
+		*tmp = dev->type->nodename(dev);
+	if (*tmp)
+		return *tmp;
+
+	/* the class may provide a specific name */
+	if (dev->class && dev->class->nodename)
+		*tmp = dev->class->nodename(dev);
+	if (*tmp)
+		return *tmp;
+
+	/* return name without allocation, tmp == NULL */
+	if (strchr(dev_name(dev), '!') == NULL)
+		return dev_name(dev);
+
+	/* replace '!' in the name with '/' */
+	*tmp = kstrdup(dev_name(dev), GFP_KERNEL);
+	if (!*tmp)
+		return NULL;
+	while ((s = strchr(*tmp, '!')))
+		s[0] = '/';
+	return *tmp;
+}
+
+/**
  * device_for_each_child - device child iterator.
  * @parent: parent struct device.
  * @data: data for the callback.
@@ -1271,7 +1321,7 @@
 	if (!root)
 		return ERR_PTR(err);
 
-	err = dev_set_name(&root->dev, name);
+	err = dev_set_name(&root->dev, "%s", name);
 	if (err) {
 		kfree(root);
 		return ERR_PTR(err);
@@ -1665,4 +1715,5 @@
 	kobject_put(sysfs_dev_char_kobj);
 	kobject_put(sysfs_dev_block_kobj);
 	kobject_put(dev_kobj);
+	async_synchronize_full();
 }
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 742cbe6..f010687 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -226,7 +226,7 @@
  * pair is found, break out and return.
  *
  * Returns 1 if the device was bound to a driver;
- * 0 if no matching device was found;
+ * 0 if no matching driver was found;
  * -ENODEV if the device is not registered.
  *
  * When called for a USB interface, @dev->parent->sem must be held.
@@ -320,6 +320,10 @@
 		devres_release_all(dev);
 		dev->driver = NULL;
 		klist_remove(&dev->p->knode_driver);
+		if (dev->bus)
+			blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
+						     BUS_NOTIFY_UNBOUND_DRIVER,
+						     dev);
 	}
 }
 
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 8a267c4..ddeb819 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -40,7 +40,7 @@
 static DEFINE_MUTEX(fw_lock);
 
 struct firmware_priv {
-	char fw_id[FIRMWARE_NAME_MAX];
+	char *fw_id;
 	struct completion completion;
 	struct bin_attribute attr_data;
 	struct firmware *fw;
@@ -355,8 +355,9 @@
 	for (i = 0; i < fw_priv->nr_pages; i++)
 		__free_page(fw_priv->pages[i]);
 	kfree(fw_priv->pages);
+	kfree(fw_priv->fw_id);
 	kfree(fw_priv);
-	kfree(dev);
+	put_device(dev);
 
 	module_put(THIS_MODULE);
 }
@@ -386,13 +387,19 @@
 
 	init_completion(&fw_priv->completion);
 	fw_priv->attr_data = firmware_attr_data_tmpl;
-	strlcpy(fw_priv->fw_id, fw_name, FIRMWARE_NAME_MAX);
+	fw_priv->fw_id = kstrdup(fw_name, GFP_KERNEL);
+	if (!fw_priv->fw_id) {
+		dev_err(device, "%s: Firmware name allocation failed\n",
+			__func__);
+		retval = -ENOMEM;
+		goto error_kfree;
+	}
 
 	fw_priv->timeout.function = firmware_class_timeout;
 	fw_priv->timeout.data = (u_long) fw_priv;
 	init_timer(&fw_priv->timeout);
 
-	dev_set_name(f_dev, dev_name(device));
+	dev_set_name(f_dev, "%s", dev_name(device));
 	f_dev->parent = device;
 	f_dev->class = &firmware_class;
 	dev_set_drvdata(f_dev, fw_priv);
@@ -400,14 +407,17 @@
 	retval = device_register(f_dev);
 	if (retval) {
 		dev_err(device, "%s: device_register failed\n", __func__);
-		goto error_kfree;
+		put_device(f_dev);
+		goto error_kfree_fw_id;
 	}
 	*dev_p = f_dev;
 	return 0;
 
+error_kfree_fw_id:
+	kfree(fw_priv->fw_id);
 error_kfree:
-	kfree(fw_priv);
 	kfree(f_dev);
+	kfree(fw_priv);
 	return retval;
 }
 
@@ -615,8 +625,9 @@
  * @cont: function will be called asynchronously when the firmware
  *	request is over.
  *
- *	Asynchronous variant of request_firmware() for contexts where
- *	it is not possible to sleep.
+ *	Asynchronous variant of request_firmware() for user contexts where
+ *	it is not possible to sleep for long time. It can't be called
+ *	in atomic contexts.
  **/
 int
 request_firmware_nowait(
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index ead3f64..81cb01b 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -69,7 +69,8 @@
  * @name: resource name
  */
 struct resource *platform_get_resource_byname(struct platform_device *dev,
-					      unsigned int type, char *name)
+					      unsigned int type,
+					      const char *name)
 {
 	int i;
 
@@ -88,7 +89,7 @@
  * @dev: platform device
  * @name: IRQ name
  */
-int platform_get_irq_byname(struct platform_device *dev, char *name)
+int platform_get_irq_byname(struct platform_device *dev, const char *name)
 {
 	struct resource *r = platform_get_resource_byname(dev, IORESOURCE_IRQ,
 							  name);
@@ -244,7 +245,7 @@
 	if (pdev->id != -1)
 		dev_set_name(&pdev->dev, "%s.%d", pdev->name,  pdev->id);
 	else
-		dev_set_name(&pdev->dev, pdev->name);
+		dev_set_name(&pdev->dev, "%s", pdev->name);
 
 	for (i = 0; i < pdev->num_resources; i++) {
 		struct resource *p, *r = &pdev->resource[i];
diff --git a/drivers/base/sys.c b/drivers/base/sys.c
index 9742a78..79a9ae5 100644
--- a/drivers/base/sys.c
+++ b/drivers/base/sys.c
@@ -131,6 +131,8 @@
 
 int sysdev_class_register(struct sysdev_class *cls)
 {
+	int retval;
+
 	pr_debug("Registering sysdev class '%s'\n", cls->name);
 
 	INIT_LIST_HEAD(&cls->drivers);
@@ -138,7 +140,11 @@
 	cls->kset.kobj.parent = &system_kset->kobj;
 	cls->kset.kobj.ktype = &ktype_sysdev_class;
 	cls->kset.kobj.kset = system_kset;
-	kobject_set_name(&cls->kset.kobj, cls->name);
+
+	retval = kobject_set_name(&cls->kset.kobj, "%s", cls->name);
+	if (retval)
+		return retval;
+
 	return kset_register(&cls->kset);
 }
 
diff --git a/drivers/block/aoe/aoechr.c b/drivers/block/aoe/aoechr.c
index 200efc4..1988835 100644
--- a/drivers/block/aoe/aoechr.c
+++ b/drivers/block/aoe/aoechr.c
@@ -266,6 +266,11 @@
 	.owner = THIS_MODULE,
 };
 
+static char *aoe_nodename(struct device *dev)
+{
+	return kasprintf(GFP_KERNEL, "etherd/%s", dev_name(dev));
+}
+
 int __init
 aoechr_init(void)
 {
@@ -283,6 +288,8 @@
 		unregister_chrdev(AOE_MAJOR, "aoechr");
 		return PTR_ERR(aoe_class);
 	}
+	aoe_class->nodename = aoe_nodename;
+
 	for (i = 0; i < ARRAY_SIZE(chardevs); ++i)
 		device_create(aoe_class, NULL,
 			      MKDEV(AOE_MAJOR, chardevs[i].minor), NULL,
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index b22cec9..c7a527c 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -38,7 +38,6 @@
 #include <linux/hdreg.h>
 #include <linux/spinlock.h>
 #include <linux/compat.h>
-#include <linux/blktrace_api.h>
 #include <asm/uaccess.h>
 #include <asm/io.h>
 
diff --git a/drivers/block/mg_disk.c b/drivers/block/mg_disk.c
index 60de5a0..f703f54 100644
--- a/drivers/block/mg_disk.c
+++ b/drivers/block/mg_disk.c
@@ -22,13 +22,12 @@
 #include <linux/delay.h>
 #include <linux/platform_device.h>
 #include <linux/gpio.h>
+#include <linux/mg_disk.h>
 
 #define MG_RES_SEC (CONFIG_MG_DISK_RES << 1)
 
 /* name for block device */
 #define MG_DISK_NAME "mgd"
-/* name for platform device */
-#define MG_DEV_NAME "mg_disk"
 
 #define MG_DISK_MAJ 0
 #define MG_DISK_MAX_PART 16
@@ -103,33 +102,8 @@
 #define MG_TMAX_SWRST_TO_RDY	500
 #define MG_TMAX_RSTOUT		3000
 
-/* device attribution */
-/* use mflash as boot device */
-#define MG_BOOT_DEV		(1 << 0)
-/* use mflash as storage device */
-#define MG_STORAGE_DEV		(1 << 1)
-/* same as MG_STORAGE_DEV, but bootloader already done reset sequence */
-#define MG_STORAGE_DEV_SKIP_RST	(1 << 2)
-
 #define MG_DEV_MASK (MG_BOOT_DEV | MG_STORAGE_DEV | MG_STORAGE_DEV_SKIP_RST)
 
-/* names of GPIO resource */
-#define MG_RST_PIN	"mg_rst"
-/* except MG_BOOT_DEV, reset-out pin should be assigned */
-#define MG_RSTOUT_PIN	"mg_rstout"
-
-/* private driver data */
-struct mg_drv_data {
-	/* disk resource */
-	u32 use_polling;
-
-	/* device attribution */
-	u32 dev_attr;
-
-	/* internally used */
-	struct mg_host *host;
-};
-
 /* main structure for mflash driver */
 struct mg_host {
 	struct device *dev;
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index d57f117..83650e0 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -430,7 +430,7 @@
 /********************************************************************
   entries in debugfs
 
-  /debugfs/pktcdvd[0-7]/
+  /sys/kernel/debug/pktcdvd[0-7]/
 			info
 
  *******************************************************************/
@@ -2855,6 +2855,11 @@
 	.media_changed =	pkt_media_changed,
 };
 
+static char *pktcdvd_nodename(struct gendisk *gd)
+{
+	return kasprintf(GFP_KERNEL, "pktcdvd/%s", gd->disk_name);
+}
+
 /*
  * Set up mapping from pktcdvd device to CD-ROM device.
  */
@@ -2907,6 +2912,7 @@
 	disk->fops = &pktcdvd_ops;
 	disk->flags = GENHD_FL_REMOVABLE;
 	strcpy(disk->disk_name, pd->name);
+	disk->nodename = pktcdvd_nodename;
 	disk->private_data = pd;
 	disk->queue = blk_alloc_queue(GFP_KERNEL);
 	if (!disk->queue)
@@ -3062,6 +3068,7 @@
 static struct miscdevice pkt_misc = {
 	.minor 		= MISC_DYNAMIC_MINOR,
 	.name  		= DRIVER_NAME,
+	.name  		= "pktcdvd/control",
 	.fops  		= &pkt_ctl_fops
 };
 
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index aaeeb54..34cbb7f 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -120,7 +120,7 @@
 static int ps3disk_submit_request_sg(struct ps3_storage_device *dev,
 				     struct request *req)
 {
-	struct ps3disk_private *priv = dev->sbd.core.driver_data;
+	struct ps3disk_private *priv = ps3_system_bus_get_drvdata(&dev->sbd);
 	int write = rq_data_dir(req), res;
 	const char *op = write ? "write" : "read";
 	u64 start_sector, sectors;
@@ -168,7 +168,7 @@
 static int ps3disk_submit_flush_request(struct ps3_storage_device *dev,
 					struct request *req)
 {
-	struct ps3disk_private *priv = dev->sbd.core.driver_data;
+	struct ps3disk_private *priv = ps3_system_bus_get_drvdata(&dev->sbd);
 	u64 res;
 
 	dev_dbg(&dev->sbd.core, "%s:%u: flush request\n", __func__, __LINE__);
@@ -213,7 +213,7 @@
 static void ps3disk_request(struct request_queue *q)
 {
 	struct ps3_storage_device *dev = q->queuedata;
-	struct ps3disk_private *priv = dev->sbd.core.driver_data;
+	struct ps3disk_private *priv = ps3_system_bus_get_drvdata(&dev->sbd);
 
 	if (priv->req) {
 		dev_dbg(&dev->sbd.core, "%s:%u busy\n", __func__, __LINE__);
@@ -245,7 +245,7 @@
 		return IRQ_HANDLED;
 	}
 
-	priv = dev->sbd.core.driver_data;
+	priv = ps3_system_bus_get_drvdata(&dev->sbd);
 	req = priv->req;
 	if (!req) {
 		dev_dbg(&dev->sbd.core,
@@ -364,7 +364,7 @@
 
 static int ps3disk_identify(struct ps3_storage_device *dev)
 {
-	struct ps3disk_private *priv = dev->sbd.core.driver_data;
+	struct ps3disk_private *priv = ps3_system_bus_get_drvdata(&dev->sbd);
 	struct lv1_ata_cmnd_block ata_cmnd;
 	u16 *id = dev->bounce_buf;
 	u64 res;
@@ -445,7 +445,7 @@
 		goto fail;
 	}
 
-	dev->sbd.core.driver_data = priv;
+	ps3_system_bus_set_drvdata(_dev, priv);
 	spin_lock_init(&priv->lock);
 
 	dev->bounce_size = BOUNCE_SIZE;
@@ -523,7 +523,7 @@
 	kfree(dev->bounce_buf);
 fail_free_priv:
 	kfree(priv);
-	dev->sbd.core.driver_data = NULL;
+	ps3_system_bus_set_drvdata(_dev, NULL);
 fail:
 	mutex_lock(&ps3disk_mask_mutex);
 	__clear_bit(devidx, &ps3disk_mask);
@@ -534,7 +534,7 @@
 static int ps3disk_remove(struct ps3_system_bus_device *_dev)
 {
 	struct ps3_storage_device *dev = to_ps3_storage_device(&_dev->core);
-	struct ps3disk_private *priv = dev->sbd.core.driver_data;
+	struct ps3disk_private *priv = ps3_system_bus_get_drvdata(&dev->sbd);
 
 	mutex_lock(&ps3disk_mask_mutex);
 	__clear_bit(MINOR(disk_devt(priv->gendisk)) / PS3DISK_MINORS,
@@ -548,7 +548,7 @@
 	ps3stor_teardown(dev);
 	kfree(dev->bounce_buf);
 	kfree(priv);
-	dev->sbd.core.driver_data = NULL;
+	ps3_system_bus_set_drvdata(_dev, NULL);
 	return 0;
 }
 
diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c
index 8eddef3..095f97e 100644
--- a/drivers/block/ps3vram.c
+++ b/drivers/block/ps3vram.c
@@ -14,8 +14,10 @@
 #include <linux/seq_file.h>
 
 #include <asm/firmware.h>
+#include <asm/iommu.h>
 #include <asm/lv1call.h>
 #include <asm/ps3.h>
+#include <asm/ps3gpu.h>
 
 
 #define DEVICE_NAME		"ps3vram"
@@ -45,8 +47,6 @@
 #define NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN	0x0000030c
 #define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY	0x00000104
 
-#define L1GPU_CONTEXT_ATTRIBUTE_FB_BLIT 0x601
-
 #define CACHE_PAGE_PRESENT 1
 #define CACHE_PAGE_DIRTY   2
 
@@ -72,8 +72,7 @@
 	u64 memory_handle;
 	u64 context_handle;
 	u32 *ctrl;
-	u32 *reports;
-	u8 __iomem *ddr_base;
+	void *reports;
 	u8 *xdr_buf;
 
 	u32 *fifo_base;
@@ -81,8 +80,8 @@
 
 	struct ps3vram_cache cache;
 
-	/* Used to serialize cache/DMA operations */
-	struct mutex lock;
+	spinlock_t lock;	/* protecting list of bios */
+	struct bio_list list;
 };
 
 
@@ -103,15 +102,15 @@
 module_param(size, charp, 0);
 MODULE_PARM_DESC(size, "memory size");
 
-static u32 *ps3vram_get_notifier(u32 *reports, int notifier)
+static u32 *ps3vram_get_notifier(void *reports, int notifier)
 {
-	return (void *)reports + DMA_NOTIFIER_OFFSET_BASE +
+	return reports + DMA_NOTIFIER_OFFSET_BASE +
 	       DMA_NOTIFIER_SIZE * notifier;
 }
 
 static void ps3vram_notifier_reset(struct ps3_system_bus_device *dev)
 {
-	struct ps3vram_priv *priv = dev->core.driver_data;
+	struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
 	u32 *notify = ps3vram_get_notifier(priv->reports, NOTIFIER);
 	int i;
 
@@ -122,7 +121,7 @@
 static int ps3vram_notifier_wait(struct ps3_system_bus_device *dev,
 				 unsigned int timeout_ms)
 {
-	struct ps3vram_priv *priv = dev->core.driver_data;
+	struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
 	u32 *notify = ps3vram_get_notifier(priv->reports, NOTIFIER);
 	unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
 
@@ -137,7 +136,7 @@
 
 static void ps3vram_init_ring(struct ps3_system_bus_device *dev)
 {
-	struct ps3vram_priv *priv = dev->core.driver_data;
+	struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
 
 	priv->ctrl[CTRL_PUT] = FIFO_BASE + FIFO_OFFSET;
 	priv->ctrl[CTRL_GET] = FIFO_BASE + FIFO_OFFSET;
@@ -146,7 +145,7 @@
 static int ps3vram_wait_ring(struct ps3_system_bus_device *dev,
 			     unsigned int timeout_ms)
 {
-	struct ps3vram_priv *priv = dev->core.driver_data;
+	struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
 	unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
 
 	do {
@@ -175,7 +174,7 @@
 
 static void ps3vram_rewind_ring(struct ps3_system_bus_device *dev)
 {
-	struct ps3vram_priv *priv = dev->core.driver_data;
+	struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
 	int status;
 
 	ps3vram_out_ring(priv, 0x20000000 | (FIFO_BASE + FIFO_OFFSET));
@@ -183,20 +182,17 @@
 	priv->ctrl[CTRL_PUT] = FIFO_BASE + FIFO_OFFSET;
 
 	/* asking the HV for a blit will kick the FIFO */
-	status = lv1_gpu_context_attribute(priv->context_handle,
-					   L1GPU_CONTEXT_ATTRIBUTE_FB_BLIT, 0,
-					   0, 0, 0);
+	status = lv1_gpu_fb_blit(priv->context_handle, 0, 0, 0, 0);
 	if (status)
-		dev_err(&dev->core,
-			"%s: lv1_gpu_context_attribute failed %d\n", __func__,
-			status);
+		dev_err(&dev->core, "%s: lv1_gpu_fb_blit failed %d\n",
+			__func__, status);
 
 	priv->fifo_ptr = priv->fifo_base;
 }
 
 static void ps3vram_fire_ring(struct ps3_system_bus_device *dev)
 {
-	struct ps3vram_priv *priv = dev->core.driver_data;
+	struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
 	int status;
 
 	mutex_lock(&ps3_gpu_mutex);
@@ -205,13 +201,10 @@
 			       (priv->fifo_ptr - priv->fifo_base) * sizeof(u32);
 
 	/* asking the HV for a blit will kick the FIFO */
-	status = lv1_gpu_context_attribute(priv->context_handle,
-					   L1GPU_CONTEXT_ATTRIBUTE_FB_BLIT, 0,
-					   0, 0, 0);
+	status = lv1_gpu_fb_blit(priv->context_handle, 0, 0, 0, 0);
 	if (status)
-		dev_err(&dev->core,
-			"%s: lv1_gpu_context_attribute failed %d\n", __func__,
-			status);
+		dev_err(&dev->core, "%s: lv1_gpu_fb_blit failed %d\n",
+			__func__, status);
 
 	if ((priv->fifo_ptr - priv->fifo_base) * sizeof(u32) >
 	    FIFO_SIZE - 1024) {
@@ -225,7 +218,7 @@
 
 static void ps3vram_bind(struct ps3_system_bus_device *dev)
 {
-	struct ps3vram_priv *priv = dev->core.driver_data;
+	struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
 
 	ps3vram_begin_ring(priv, UPLOAD_SUBCH, 0, 1);
 	ps3vram_out_ring(priv, 0x31337303);
@@ -248,7 +241,7 @@
 			  unsigned int src_offset, unsigned int dst_offset,
 			  int len, int count)
 {
-	struct ps3vram_priv *priv = dev->core.driver_data;
+	struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
 
 	ps3vram_begin_ring(priv, UPLOAD_SUBCH,
 			   NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
@@ -280,7 +273,7 @@
 			    unsigned int src_offset, unsigned int dst_offset,
 			    int len, int count)
 {
-	struct ps3vram_priv *priv = dev->core.driver_data;
+	struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
 
 	ps3vram_begin_ring(priv, DOWNLOAD_SUBCH,
 			   NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
@@ -310,7 +303,7 @@
 
 static void ps3vram_cache_evict(struct ps3_system_bus_device *dev, int entry)
 {
-	struct ps3vram_priv *priv = dev->core.driver_data;
+	struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
 	struct ps3vram_cache *cache = &priv->cache;
 
 	if (!(cache->tags[entry].flags & CACHE_PAGE_DIRTY))
@@ -332,7 +325,7 @@
 static void ps3vram_cache_load(struct ps3_system_bus_device *dev, int entry,
 			       unsigned int address)
 {
-	struct ps3vram_priv *priv = dev->core.driver_data;
+	struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
 	struct ps3vram_cache *cache = &priv->cache;
 
 	dev_dbg(&dev->core, "Fetching %d: 0x%08x\n", entry, address);
@@ -352,7 +345,7 @@
 
 static void ps3vram_cache_flush(struct ps3_system_bus_device *dev)
 {
-	struct ps3vram_priv *priv = dev->core.driver_data;
+	struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
 	struct ps3vram_cache *cache = &priv->cache;
 	int i;
 
@@ -366,7 +359,7 @@
 static unsigned int ps3vram_cache_match(struct ps3_system_bus_device *dev,
 					loff_t address)
 {
-	struct ps3vram_priv *priv = dev->core.driver_data;
+	struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
 	struct ps3vram_cache *cache = &priv->cache;
 	unsigned int base;
 	unsigned int offset;
@@ -400,7 +393,7 @@
 
 static int ps3vram_cache_init(struct ps3_system_bus_device *dev)
 {
-	struct ps3vram_priv *priv = dev->core.driver_data;
+	struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
 
 	priv->cache.page_count = CACHE_PAGE_COUNT;
 	priv->cache.page_size = CACHE_PAGE_SIZE;
@@ -419,7 +412,7 @@
 
 static void ps3vram_cache_cleanup(struct ps3_system_bus_device *dev)
 {
-	struct ps3vram_priv *priv = dev->core.driver_data;
+	struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
 
 	ps3vram_cache_flush(dev);
 	kfree(priv->cache.tags);
@@ -428,7 +421,7 @@
 static int ps3vram_read(struct ps3_system_bus_device *dev, loff_t from,
 			size_t len, size_t *retlen, u_char *buf)
 {
-	struct ps3vram_priv *priv = dev->core.driver_data;
+	struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
 	unsigned int cached, count;
 
 	dev_dbg(&dev->core, "%s: from=0x%08x len=0x%zx\n", __func__,
@@ -449,8 +442,6 @@
 		offset = (unsigned int) (from & (priv->cache.page_size - 1));
 		avail  = priv->cache.page_size - offset;
 
-		mutex_lock(&priv->lock);
-
 		entry = ps3vram_cache_match(dev, from);
 		cached = CACHE_OFFSET + entry * priv->cache.page_size + offset;
 
@@ -462,8 +453,6 @@
 			avail = count;
 		memcpy(buf, priv->xdr_buf + cached, avail);
 
-		mutex_unlock(&priv->lock);
-
 		buf += avail;
 		count -= avail;
 		from += avail;
@@ -476,7 +465,7 @@
 static int ps3vram_write(struct ps3_system_bus_device *dev, loff_t to,
 			 size_t len, size_t *retlen, const u_char *buf)
 {
-	struct ps3vram_priv *priv = dev->core.driver_data;
+	struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
 	unsigned int cached, count;
 
 	if (to >= priv->size)
@@ -494,8 +483,6 @@
 		offset = (unsigned int) (to & (priv->cache.page_size - 1));
 		avail  = priv->cache.page_size - offset;
 
-		mutex_lock(&priv->lock);
-
 		entry = ps3vram_cache_match(dev, to);
 		cached = CACHE_OFFSET + entry * priv->cache.page_size + offset;
 
@@ -509,8 +496,6 @@
 
 		priv->cache.tags[entry].flags |= CACHE_PAGE_DIRTY;
 
-		mutex_unlock(&priv->lock);
-
 		buf += avail;
 		count -= avail;
 		to += avail;
@@ -543,28 +528,26 @@
 
 static void __devinit ps3vram_proc_init(struct ps3_system_bus_device *dev)
 {
-	struct ps3vram_priv *priv = dev->core.driver_data;
+	struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
 	struct proc_dir_entry *pde;
 
-	pde = proc_create(DEVICE_NAME, 0444, NULL, &ps3vram_proc_fops);
-	if (!pde) {
+	pde = proc_create_data(DEVICE_NAME, 0444, NULL, &ps3vram_proc_fops,
+			       priv);
+	if (!pde)
 		dev_warn(&dev->core, "failed to create /proc entry\n");
-		return;
-	}
-	pde->data = priv;
 }
 
-static int ps3vram_make_request(struct request_queue *q, struct bio *bio)
+static struct bio *ps3vram_do_bio(struct ps3_system_bus_device *dev,
+				  struct bio *bio)
 {
-	struct ps3_system_bus_device *dev = q->queuedata;
+	struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
 	int write = bio_data_dir(bio) == WRITE;
 	const char *op = write ? "write" : "read";
 	loff_t offset = bio->bi_sector << 9;
 	int error = 0;
 	struct bio_vec *bvec;
 	unsigned int i;
-
-	dev_dbg(&dev->core, "%s\n", __func__);
+	struct bio *next;
 
 	bio_for_each_segment(bvec, bio, i) {
 		/* PS3 is ppc64, so we don't handle highmem */
@@ -585,6 +568,7 @@
 
 		if (retlen != len) {
 			dev_err(&dev->core, "Short %s\n", op);
+			error = -EIO;
 			goto out;
 		}
 
@@ -594,7 +578,35 @@
 	dev_dbg(&dev->core, "%s completed\n", op);
 
 out:
+	spin_lock_irq(&priv->lock);
+	bio_list_pop(&priv->list);
+	next = bio_list_peek(&priv->list);
+	spin_unlock_irq(&priv->lock);
+
 	bio_endio(bio, error);
+	return next;
+}
+
+static int ps3vram_make_request(struct request_queue *q, struct bio *bio)
+{
+	struct ps3_system_bus_device *dev = q->queuedata;
+	struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
+	int busy;
+
+	dev_dbg(&dev->core, "%s\n", __func__);
+
+	spin_lock_irq(&priv->lock);
+	busy = !bio_list_empty(&priv->list);
+	bio_list_add(&priv->list, bio);
+	spin_unlock_irq(&priv->lock);
+
+	if (busy)
+		return 0;
+
+	do {
+		bio = ps3vram_do_bio(dev, bio);
+	} while (bio);
+
 	return 0;
 }
 
@@ -604,8 +616,8 @@
 	int error, status;
 	struct request_queue *queue;
 	struct gendisk *gendisk;
-	u64 ddr_lpar, ctrl_lpar, info_lpar, reports_lpar, ddr_size,
-	    reports_size;
+	u64 ddr_size, ddr_lpar, ctrl_lpar, info_lpar, reports_lpar,
+	    reports_size, xdr_lpar;
 	char *rest;
 
 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
@@ -614,10 +626,9 @@
 		goto fail;
 	}
 
-	mutex_init(&priv->lock);
-	dev->core.driver_data = priv;
-
-	priv = dev->core.driver_data;
+	spin_lock_init(&priv->lock);
+	bio_list_init(&priv->list);
+	ps3_system_bus_set_drvdata(dev, priv);
 
 	/* Allocate XDR buffer (1MiB aligned) */
 	priv->xdr_buf = (void *)__get_free_pages(GFP_KERNEL,
@@ -636,7 +647,7 @@
 	if (ps3_open_hv_device(dev)) {
 		dev_err(&dev->core, "ps3_open_hv_device failed\n");
 		error = -EAGAIN;
-		goto out_close_gpu;
+		goto out_free_xdr_buf;
 	}
 
 	/* Request memory */
@@ -660,7 +671,7 @@
 		dev_err(&dev->core, "lv1_gpu_memory_allocate failed %d\n",
 			status);
 		error = -ENOMEM;
-		goto out_free_xdr_buf;
+		goto out_close_gpu;
 	}
 
 	/* Request context */
@@ -676,9 +687,11 @@
 	}
 
 	/* Map XDR buffer to RSX */
+	xdr_lpar = ps3_mm_phys_to_lpar(__pa(priv->xdr_buf));
 	status = lv1_gpu_context_iomap(priv->context_handle, XDR_IOIF,
-				       ps3_mm_phys_to_lpar(__pa(priv->xdr_buf)),
-				       XDR_BUF_SIZE, 0);
+				       xdr_lpar, XDR_BUF_SIZE,
+				       CBE_IOPTE_PP_W | CBE_IOPTE_PP_R |
+				       CBE_IOPTE_M);
 	if (status) {
 		dev_err(&dev->core, "lv1_gpu_context_iomap failed %d\n",
 			status);
@@ -686,19 +699,11 @@
 		goto out_free_context;
 	}
 
-	priv->ddr_base = ioremap_flags(ddr_lpar, ddr_size, _PAGE_NO_CACHE);
-
-	if (!priv->ddr_base) {
-		dev_err(&dev->core, "ioremap DDR failed\n");
-		error = -ENOMEM;
-		goto out_free_context;
-	}
-
 	priv->ctrl = ioremap(ctrl_lpar, 64 * 1024);
 	if (!priv->ctrl) {
 		dev_err(&dev->core, "ioremap CTRL failed\n");
 		error = -ENOMEM;
-		goto out_unmap_vram;
+		goto out_unmap_context;
 	}
 
 	priv->reports = ioremap(reports_lpar, reports_size);
@@ -775,8 +780,9 @@
 	iounmap(priv->reports);
 out_unmap_ctrl:
 	iounmap(priv->ctrl);
-out_unmap_vram:
-	iounmap(priv->ddr_base);
+out_unmap_context:
+	lv1_gpu_context_iomap(priv->context_handle, XDR_IOIF, xdr_lpar,
+			      XDR_BUF_SIZE, CBE_IOPTE_M);
 out_free_context:
 	lv1_gpu_context_free(priv->context_handle);
 out_free_memory:
@@ -787,14 +793,14 @@
 	free_pages((unsigned long) priv->xdr_buf, get_order(XDR_BUF_SIZE));
 fail_free_priv:
 	kfree(priv);
-	dev->core.driver_data = NULL;
+	ps3_system_bus_set_drvdata(dev, NULL);
 fail:
 	return error;
 }
 
 static int ps3vram_remove(struct ps3_system_bus_device *dev)
 {
-	struct ps3vram_priv *priv = dev->core.driver_data;
+	struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
 
 	del_gendisk(priv->gendisk);
 	put_disk(priv->gendisk);
@@ -803,13 +809,15 @@
 	ps3vram_cache_cleanup(dev);
 	iounmap(priv->reports);
 	iounmap(priv->ctrl);
-	iounmap(priv->ddr_base);
+	lv1_gpu_context_iomap(priv->context_handle, XDR_IOIF,
+			      ps3_mm_phys_to_lpar(__pa(priv->xdr_buf)),
+			      XDR_BUF_SIZE, CBE_IOPTE_M);
 	lv1_gpu_context_free(priv->context_handle);
 	lv1_gpu_memory_free(priv->memory_handle);
 	ps3_close_hv_device(dev);
 	free_pages((unsigned long) priv->xdr_buf, get_order(XDR_BUF_SIZE));
 	kfree(priv);
-	dev->core.driver_data = NULL;
+	ps3_system_bus_set_drvdata(dev, NULL);
 	return 0;
 }
 
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index c199682..e532847 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -753,12 +753,12 @@
 
 	/* Front end dir is a number, which is used as the id. */
 	info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0);
-	dev->dev.driver_data = info;
+	dev_set_drvdata(&dev->dev, info);
 
 	err = talk_to_backend(dev, info);
 	if (err) {
 		kfree(info);
-		dev->dev.driver_data = NULL;
+		dev_set_drvdata(&dev->dev, NULL);
 		return err;
 	}
 
@@ -843,7 +843,7 @@
  */
 static int blkfront_resume(struct xenbus_device *dev)
 {
-	struct blkfront_info *info = dev->dev.driver_data;
+	struct blkfront_info *info = dev_get_drvdata(&dev->dev);
 	int err;
 
 	dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename);
@@ -922,7 +922,7 @@
  */
 static void blkfront_closing(struct xenbus_device *dev)
 {
-	struct blkfront_info *info = dev->dev.driver_data;
+	struct blkfront_info *info = dev_get_drvdata(&dev->dev);
 	unsigned long flags;
 
 	dev_dbg(&dev->dev, "blkfront_closing: %s removed\n", dev->nodename);
@@ -957,7 +957,7 @@
 static void backend_changed(struct xenbus_device *dev,
 			    enum xenbus_state backend_state)
 {
-	struct blkfront_info *info = dev->dev.driver_data;
+	struct blkfront_info *info = dev_get_drvdata(&dev->dev);
 	struct block_device *bd;
 
 	dev_dbg(&dev->dev, "blkfront:backend_changed.\n");
@@ -997,7 +997,7 @@
 
 static int blkfront_remove(struct xenbus_device *dev)
 {
-	struct blkfront_info *info = dev->dev.driver_data;
+	struct blkfront_info *info = dev_get_drvdata(&dev->dev);
 
 	dev_dbg(&dev->dev, "blkfront_remove: %s removed\n", dev->nodename);
 
@@ -1010,7 +1010,7 @@
 
 static int blkfront_is_ready(struct xenbus_device *dev)
 {
-	struct blkfront_info *info = dev->dev.driver_data;
+	struct blkfront_info *info = dev_get_drvdata(&dev->dev);
 
 	return info->is_ready;
 }
diff --git a/drivers/char/hvc_iucv.c b/drivers/char/hvc_iucv.c
index 54481a8..86105ef 100644
--- a/drivers/char/hvc_iucv.c
+++ b/drivers/char/hvc_iucv.c
@@ -4,7 +4,7 @@
  * This HVC device driver provides terminal access using
  * z/VM IUCV communication paths.
  *
- * Copyright IBM Corp. 2008
+ * Copyright IBM Corp. 2008, 2009
  *
  * Author(s):	Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
  */
@@ -15,6 +15,7 @@
 #include <asm/ebcdic.h>
 #include <linux/ctype.h>
 #include <linux/delay.h>
+#include <linux/device.h>
 #include <linux/init.h>
 #include <linux/mempool.h>
 #include <linux/moduleparam.h>
@@ -74,6 +75,7 @@
 	wait_queue_head_t	sndbuf_waitq;	/* wait for send completion */
 	struct list_head	tty_outqueue;	/* outgoing IUCV messages */
 	struct list_head	tty_inqueue;	/* incoming IUCV messages */
+	struct device		*dev;		/* device structure */
 };
 
 struct iucv_tty_buffer {
@@ -542,7 +544,68 @@
 
 	if (sync_wait)
 		wait_event_timeout(priv->sndbuf_waitq,
-				   tty_outqueue_empty(priv), HZ);
+				   tty_outqueue_empty(priv), HZ/10);
+}
+
+/**
+ * hvc_iucv_hangup() - Sever IUCV path and schedule hvc tty hang up
+ * @priv:	Pointer to hvc_iucv_private structure
+ *
+ * This routine severs an existing IUCV communication path and hangs
+ * up the underlying HVC terminal device.
+ * The hang-up occurs only if an IUCV communication path is established;
+ * otherwise there is no need to hang up the terminal device.
+ *
+ * The IUCV HVC hang-up is separated into two steps:
+ * 1. After the IUCV path has been severed, the iucv_state is set to
+ *    IUCV_SEVERED.
+ * 2. Later, when the HVC thread calls hvc_iucv_get_chars(), the
+ *    IUCV_SEVERED state causes the tty hang-up in the HVC layer.
+ *
+ * If the tty has not yet been opened, clean up the hvc_iucv_private
+ * structure to allow re-connects.
+ * If the tty has been opened, let get_chars() return -EPIPE to signal
+ * the HVC layer to hang up the tty and, if so, wake up the HVC thread
+ * to call get_chars()...
+ *
+ * Special notes on hanging up a HVC terminal instantiated as console:
+ * Hang-up:	1. do_tty_hangup() replaces file ops (= hung_up_tty_fops)
+ *		2. do_tty_hangup() calls tty->ops->close() for console_filp
+ *			=> no hangup notifier is called by HVC (default)
+ *		2. hvc_close() returns because of tty_hung_up_p(filp)
+ *			=> no delete notifier is called!
+ * Finally, the back-end is not being notified, thus, the tty session is
+ * kept active (TTY_OPEN) to be ready for re-connects.
+ *
+ * Locking:	spin_lock(&priv->lock) w/o disabling bh
+ */
+static void hvc_iucv_hangup(struct hvc_iucv_private *priv)
+{
+	struct iucv_path *path;
+
+	path = NULL;
+	spin_lock(&priv->lock);
+	if (priv->iucv_state == IUCV_CONNECTED) {
+		path = priv->path;
+		priv->path = NULL;
+		priv->iucv_state = IUCV_SEVERED;
+		if (priv->tty_state == TTY_CLOSED)
+			hvc_iucv_cleanup(priv);
+		else
+			/* console is special (see above) */
+			if (priv->is_console) {
+				hvc_iucv_cleanup(priv);
+				priv->tty_state = TTY_OPENED;
+			} else
+				hvc_kick();
+	}
+	spin_unlock(&priv->lock);
+
+	/* finally sever path (outside of priv->lock due to lock ordering) */
+	if (path) {
+		iucv_path_sever(path, NULL);
+		iucv_path_free(path);
+	}
 }
 
 /**
@@ -735,11 +798,8 @@
  * @ipuser:	User specified data for this path
  *		(AF_IUCV: port/service name and originator port)
  *
- * The function also severs the path (as required by the IUCV protocol) and
- * sets the iucv state to IUCV_SEVERED for the associated struct
- * hvc_iucv_private instance. Later, the IUCV_SEVERED state triggers a tty
- * hangup (hvc_iucv_get_chars() / hvc_iucv_write()).
- * If tty portion of the HVC is closed, clean up the outqueue.
+ * This function calls the hvc_iucv_hangup() function for the
+ * respective IUCV HVC terminal.
  *
  * Locking:	struct hvc_iucv_private->lock
  */
@@ -747,33 +807,7 @@
 {
 	struct hvc_iucv_private *priv = path->private;
 
-	spin_lock(&priv->lock);
-	priv->iucv_state = IUCV_SEVERED;
-
-	/* If the tty has not yet been opened, clean up the hvc_iucv_private
-	 * structure to allow re-connects.
-	 * This is also done for our console device because console hangups
-	 * are handled specially and no notifier is called by HVC.
-	 * The tty session is active (TTY_OPEN) and ready for re-connects...
-	 *
-	 * If it has been opened, let get_chars() return -EPIPE to signal the
-	 * HVC layer to hang up the tty.
-	 * If so, we need to wake up the HVC thread to call get_chars()...
-	 */
-	priv->path = NULL;
-	if (priv->tty_state == TTY_CLOSED)
-		hvc_iucv_cleanup(priv);
-	else
-		if (priv->is_console) {
-			hvc_iucv_cleanup(priv);
-			priv->tty_state = TTY_OPENED;
-		} else
-			hvc_kick();
-	spin_unlock(&priv->lock);
-
-	/* finally sever path (outside of priv->lock due to lock ordering) */
-	iucv_path_sever(path, ipuser);
-	iucv_path_free(path);
+	hvc_iucv_hangup(priv);
 }
 
 /**
@@ -853,6 +887,37 @@
 	destroy_tty_buffer_list(&list_remove);
 }
 
+/**
+ * hvc_iucv_pm_freeze() - Freeze PM callback
+ * @dev:	IUVC HVC terminal device
+ *
+ * Sever an established IUCV communication path and
+ * trigger a hang-up of the underlying HVC terminal.
+ */
+static int hvc_iucv_pm_freeze(struct device *dev)
+{
+	struct hvc_iucv_private *priv = dev_get_drvdata(dev);
+
+	local_bh_disable();
+	hvc_iucv_hangup(priv);
+	local_bh_enable();
+
+	return 0;
+}
+
+/**
+ * hvc_iucv_pm_restore_thaw() - Thaw and restore PM callback
+ * @dev:	IUVC HVC terminal device
+ *
+ * Wake up the HVC thread to trigger hang-up and respective
+ * HVC back-end notifier invocations.
+ */
+static int hvc_iucv_pm_restore_thaw(struct device *dev)
+{
+	hvc_kick();
+	return 0;
+}
+
 
 /* HVC operations */
 static struct hv_ops hvc_iucv_ops = {
@@ -863,6 +928,20 @@
 	.notifier_hangup = hvc_iucv_notifier_hangup,
 };
 
+/* Suspend / resume device operations */
+static struct dev_pm_ops hvc_iucv_pm_ops = {
+	.freeze	  = hvc_iucv_pm_freeze,
+	.thaw	  = hvc_iucv_pm_restore_thaw,
+	.restore  = hvc_iucv_pm_restore_thaw,
+};
+
+/* IUCV HVC device driver */
+static struct device_driver hvc_iucv_driver = {
+	.name = KMSG_COMPONENT,
+	.bus  = &iucv_bus,
+	.pm   = &hvc_iucv_pm_ops,
+};
+
 /**
  * hvc_iucv_alloc() - Allocates a new struct hvc_iucv_private instance
  * @id:			hvc_iucv_table index
@@ -897,14 +976,12 @@
 	/* set console flag */
 	priv->is_console = is_console;
 
-	/* finally allocate hvc */
+	/* allocate hvc device */
 	priv->hvc = hvc_alloc(HVC_IUCV_MAGIC + id, /*		  PAGE_SIZE */
 			      HVC_IUCV_MAGIC + id, &hvc_iucv_ops, 256);
 	if (IS_ERR(priv->hvc)) {
 		rc = PTR_ERR(priv->hvc);
-		free_page((unsigned long) priv->sndbuf);
-		kfree(priv);
-		return rc;
+		goto out_error_hvc;
 	}
 
 	/* notify HVC thread instead of using polling */
@@ -915,8 +992,45 @@
 	memcpy(priv->srv_name, name, 8);
 	ASCEBC(priv->srv_name, 8);
 
+	/* create and setup device */
+	priv->dev = kzalloc(sizeof(*priv->dev), GFP_KERNEL);
+	if (!priv->dev) {
+		rc = -ENOMEM;
+		goto out_error_dev;
+	}
+	dev_set_name(priv->dev, "hvc_iucv%d", id);
+	dev_set_drvdata(priv->dev, priv);
+	priv->dev->bus = &iucv_bus;
+	priv->dev->parent = iucv_root;
+	priv->dev->driver = &hvc_iucv_driver;
+	priv->dev->release = (void (*)(struct device *)) kfree;
+	rc = device_register(priv->dev);
+	if (rc) {
+		kfree(priv->dev);
+		goto out_error_dev;
+	}
+
 	hvc_iucv_table[id] = priv;
 	return 0;
+
+out_error_dev:
+	hvc_remove(priv->hvc);
+out_error_hvc:
+	free_page((unsigned long) priv->sndbuf);
+	kfree(priv);
+
+	return rc;
+}
+
+/**
+ * hvc_iucv_destroy() - Destroy and free hvc_iucv_private instances
+ */
+static void __init hvc_iucv_destroy(struct hvc_iucv_private *priv)
+{
+	hvc_remove(priv->hvc);
+	device_unregister(priv->dev);
+	free_page((unsigned long) priv->sndbuf);
+	kfree(priv);
 }
 
 /**
@@ -1109,6 +1223,11 @@
 		goto out_error;
 	}
 
+	/* register IUCV HVC device driver */
+	rc = driver_register(&hvc_iucv_driver);
+	if (rc)
+		goto out_error;
+
 	/* parse hvc_iucv_allow string and create z/VM user ID filter list */
 	if (hvc_iucv_filter_string) {
 		rc = hvc_iucv_setup_filter(hvc_iucv_filter_string);
@@ -1183,15 +1302,14 @@
 	iucv_unregister(&hvc_iucv_handler, 0);
 out_error_hvc:
 	for (i = 0; i < hvc_iucv_devices; i++)
-		if (hvc_iucv_table[i]) {
-			if (hvc_iucv_table[i]->hvc)
-				hvc_remove(hvc_iucv_table[i]->hvc);
-			kfree(hvc_iucv_table[i]);
-		}
+		if (hvc_iucv_table[i])
+			hvc_iucv_destroy(hvc_iucv_table[i]);
 out_error_memory:
 	mempool_destroy(hvc_iucv_mempool);
 	kmem_cache_destroy(hvc_iucv_buffer_cache);
 out_error:
+	if (hvc_iucv_filter)
+		kfree(hvc_iucv_filter);
 	hvc_iucv_devices = 0; /* ensure that we do not provide any device */
 	return rc;
 }
diff --git a/drivers/char/hvcs.c b/drivers/char/hvcs.c
index c76bccf..7d64e42 100644
--- a/drivers/char/hvcs.c
+++ b/drivers/char/hvcs.c
@@ -347,7 +347,7 @@
 
 static inline struct hvcs_struct *from_vio_dev(struct vio_dev *viod)
 {
-	return viod->dev.driver_data;
+	return dev_get_drvdata(&viod->dev);
 }
 /* The sysfs interface for the driver and devices */
 
@@ -785,7 +785,7 @@
 	kref_init(&hvcsd->kref);
 
 	hvcsd->vdev = dev;
-	dev->dev.driver_data = hvcsd;
+	dev_set_drvdata(&dev->dev, hvcsd);
 
 	hvcsd->index = index;
 
@@ -831,7 +831,7 @@
 
 static int __devexit hvcs_remove(struct vio_dev *dev)
 {
-	struct hvcs_struct *hvcsd = dev->dev.driver_data;
+	struct hvcs_struct *hvcsd = dev_get_drvdata(&dev->dev);
 	unsigned long flags;
 	struct tty_struct *tty;
 
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index e5d583c..fc93e2f 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -153,6 +153,7 @@
 static struct miscdevice rng_miscdev = {
 	.minor		= RNG_MISCDEV_MINOR,
 	.name		= RNG_MODULE_NAME,
+	.devnode	= "hwrng",
 	.fops		= &rng_chrdev_ops,
 };
 
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 2596446..d2e6980 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -2375,14 +2375,14 @@
 		info->io.addr_data, info->io.regsize, info->io.regspacing,
 		info->irq);
 
-	dev->dev.driver_data = (void *) info;
+	dev_set_drvdata(&dev->dev, info);
 
 	return try_smi_init(info);
 }
 
 static int __devexit ipmi_of_remove(struct of_device *dev)
 {
-	cleanup_one_si(dev->dev.driver_data);
+	cleanup_one_si(dev_get_drvdata(&dev->dev));
 	return 0;
 }
 
diff --git a/drivers/char/misc.c b/drivers/char/misc.c
index a5e0db9..62c99fa 100644
--- a/drivers/char/misc.c
+++ b/drivers/char/misc.c
@@ -168,7 +168,6 @@
 	.open		= misc_open,
 };
 
-
 /**
  *	misc_register	-	register a miscellaneous device
  *	@misc: device structure
@@ -217,8 +216,8 @@
 		misc_minors[misc->minor >> 3] |= 1 << (misc->minor & 7);
 	dev = MKDEV(MISC_MAJOR, misc->minor);
 
-	misc->this_device = device_create(misc_class, misc->parent, dev, NULL,
-					  "%s", misc->name);
+	misc->this_device = device_create(misc_class, misc->parent, dev,
+					  misc, "%s", misc->name);
 	if (IS_ERR(misc->this_device)) {
 		err = PTR_ERR(misc->this_device);
 		goto out;
@@ -264,6 +263,15 @@
 EXPORT_SYMBOL(misc_register);
 EXPORT_SYMBOL(misc_deregister);
 
+static char *misc_nodename(struct device *dev)
+{
+	struct miscdevice *c = dev_get_drvdata(dev);
+
+	if (c->devnode)
+		return kstrdup(c->devnode, GFP_KERNEL);
+	return NULL;
+}
+
 static int __init misc_init(void)
 {
 	int err;
@@ -279,6 +287,7 @@
 	err = -EIO;
 	if (register_chrdev(MISC_MAJOR,"misc",&misc_fops))
 		goto fail_printk;
+	misc_class->nodename = misc_nodename;
 	return 0;
 
 fail_printk:
diff --git a/drivers/char/ps3flash.c b/drivers/char/ps3flash.c
index afbe456..f424d39 100644
--- a/drivers/char/ps3flash.c
+++ b/drivers/char/ps3flash.c
@@ -33,48 +33,64 @@
 
 struct ps3flash_private {
 	struct mutex mutex;	/* Bounce buffer mutex */
+	u64 chunk_sectors;
+	int tag;		/* Start sector of buffer, -1 if invalid */
+	bool dirty;
 };
 
 static struct ps3_storage_device *ps3flash_dev;
 
-static ssize_t ps3flash_read_write_sectors(struct ps3_storage_device *dev,
-					   u64 lpar, u64 start_sector,
-					   u64 sectors, int write)
+static int ps3flash_read_write_sectors(struct ps3_storage_device *dev,
+				       u64 start_sector, int write)
 {
-	u64 res = ps3stor_read_write_sectors(dev, lpar, start_sector, sectors,
+	struct ps3flash_private *priv = ps3_system_bus_get_drvdata(&dev->sbd);
+	u64 res = ps3stor_read_write_sectors(dev, dev->bounce_lpar,
+					     start_sector, priv->chunk_sectors,
 					     write);
 	if (res) {
 		dev_err(&dev->sbd.core, "%s:%u: %s failed 0x%llx\n", __func__,
 			__LINE__, write ? "write" : "read", res);
 		return -EIO;
 	}
-	return sectors;
+	return 0;
 }
 
-static ssize_t ps3flash_read_sectors(struct ps3_storage_device *dev,
-				     u64 start_sector, u64 sectors,
-				     unsigned int sector_offset)
+static int ps3flash_writeback(struct ps3_storage_device *dev)
 {
-	u64 max_sectors, lpar;
+	struct ps3flash_private *priv = ps3_system_bus_get_drvdata(&dev->sbd);
+	int res;
 
-	max_sectors = dev->bounce_size / dev->blk_size;
-	if (sectors > max_sectors) {
-		dev_dbg(&dev->sbd.core, "%s:%u Limiting sectors to %llu\n",
-			__func__, __LINE__, max_sectors);
-		sectors = max_sectors;
-	}
+	if (!priv->dirty || priv->tag < 0)
+		return 0;
 
-	lpar = dev->bounce_lpar + sector_offset * dev->blk_size;
-	return ps3flash_read_write_sectors(dev, lpar, start_sector, sectors,
-					   0);
+	res = ps3flash_read_write_sectors(dev, priv->tag, 1);
+	if (res)
+		return res;
+
+	priv->dirty = false;
+	return 0;
 }
 
-static ssize_t ps3flash_write_chunk(struct ps3_storage_device *dev,
-				    u64 start_sector)
+static int ps3flash_fetch(struct ps3_storage_device *dev, u64 start_sector)
 {
-       u64 sectors = dev->bounce_size / dev->blk_size;
-       return ps3flash_read_write_sectors(dev, dev->bounce_lpar, start_sector,
-					  sectors, 1);
+	struct ps3flash_private *priv = ps3_system_bus_get_drvdata(&dev->sbd);
+	int res;
+
+	if (start_sector == priv->tag)
+		return 0;
+
+	res = ps3flash_writeback(dev);
+	if (res)
+		return res;
+
+	priv->tag = -1;
+
+	res = ps3flash_read_write_sectors(dev, start_sector, 0);
+	if (res)
+		return res;
+
+	priv->tag = start_sector;
+	return 0;
 }
 
 static loff_t ps3flash_llseek(struct file *file, loff_t offset, int origin)
@@ -104,18 +120,19 @@
 	return res;
 }
 
-static ssize_t ps3flash_read(struct file *file, char __user *buf, size_t count,
-			     loff_t *pos)
+static ssize_t ps3flash_read(char __user *userbuf, void *kernelbuf,
+			     size_t count, loff_t *pos)
 {
 	struct ps3_storage_device *dev = ps3flash_dev;
-	struct ps3flash_private *priv = dev->sbd.core.driver_data;
-	u64 size, start_sector, end_sector, offset;
-	ssize_t sectors_read;
+	struct ps3flash_private *priv = ps3_system_bus_get_drvdata(&dev->sbd);
+	u64 size, sector, offset;
+	int res;
 	size_t remaining, n;
+	const void *src;
 
 	dev_dbg(&dev->sbd.core,
-		"%s:%u: Reading %zu bytes at position %lld to user 0x%p\n",
-		__func__, __LINE__, count, *pos, buf);
+		"%s:%u: Reading %zu bytes at position %lld to U0x%p/K0x%p\n",
+		__func__, __LINE__, count, *pos, userbuf, kernelbuf);
 
 	size = dev->regions[dev->region_idx].size*dev->blk_size;
 	if (*pos >= size || !count)
@@ -128,156 +145,40 @@
 		count = size - *pos;
 	}
 
-	start_sector = *pos / dev->blk_size;
-	offset = *pos % dev->blk_size;
-	end_sector = DIV_ROUND_UP(*pos + count, dev->blk_size);
-
-	remaining = count;
-	do {
-		mutex_lock(&priv->mutex);
-
-		sectors_read = ps3flash_read_sectors(dev, start_sector,
-						     end_sector-start_sector,
-						     0);
-		if (sectors_read < 0) {
-			mutex_unlock(&priv->mutex);
-			goto fail;
-		}
-
-		n = min_t(u64, remaining, sectors_read*dev->blk_size-offset);
-		dev_dbg(&dev->sbd.core,
-			"%s:%u: copy %lu bytes from 0x%p to user 0x%p\n",
-			__func__, __LINE__, n, dev->bounce_buf+offset, buf);
-		if (copy_to_user(buf, dev->bounce_buf+offset, n)) {
-			mutex_unlock(&priv->mutex);
-			sectors_read = -EFAULT;
-			goto fail;
-		}
-
-		mutex_unlock(&priv->mutex);
-
-		*pos += n;
-		buf += n;
-		remaining -= n;
-		start_sector += sectors_read;
-		offset = 0;
-	} while (remaining > 0);
-
-	return count;
-
-fail:
-	return sectors_read;
-}
-
-static ssize_t ps3flash_write(struct file *file, const char __user *buf,
-			      size_t count, loff_t *pos)
-{
-	struct ps3_storage_device *dev = ps3flash_dev;
-	struct ps3flash_private *priv = dev->sbd.core.driver_data;
-	u64 size, chunk_sectors, start_write_sector, end_write_sector,
-	    end_read_sector, start_read_sector, head, tail, offset;
-	ssize_t res;
-	size_t remaining, n;
-	unsigned int sec_off;
-
-	dev_dbg(&dev->sbd.core,
-		"%s:%u: Writing %zu bytes at position %lld from user 0x%p\n",
-		__func__, __LINE__, count, *pos, buf);
-
-	size = dev->regions[dev->region_idx].size*dev->blk_size;
-	if (*pos >= size || !count)
-		return 0;
-
-	if (*pos + count > size) {
-		dev_dbg(&dev->sbd.core,
-			"%s:%u Truncating count from %zu to %llu\n", __func__,
-			__LINE__, count, size - *pos);
-		count = size - *pos;
-	}
-
-	chunk_sectors = dev->bounce_size / dev->blk_size;
-
-	start_write_sector = *pos / dev->bounce_size * chunk_sectors;
+	sector = *pos / dev->bounce_size * priv->chunk_sectors;
 	offset = *pos % dev->bounce_size;
-	end_write_sector = DIV_ROUND_UP(*pos + count, dev->bounce_size) *
-			   chunk_sectors;
-
-	end_read_sector = DIV_ROUND_UP(*pos, dev->blk_size);
-	start_read_sector = (*pos + count) / dev->blk_size;
-
-	/*
-	 * As we have to write in 256 KiB chunks, while we can read in blk_size
-	 * (usually 512 bytes) chunks, we perform the following steps:
-	 *   1. Read from start_write_sector to end_read_sector ("head")
-	 *   2. Read from start_read_sector to end_write_sector ("tail")
-	 *   3. Copy data to buffer
-	 *   4. Write from start_write_sector to end_write_sector
-	 * All of this is complicated by using only one 256 KiB bounce buffer.
-	 */
-
-	head = end_read_sector - start_write_sector;
-	tail = end_write_sector - start_read_sector;
 
 	remaining = count;
 	do {
+		n = min_t(u64, remaining, dev->bounce_size - offset);
+		src = dev->bounce_buf + offset;
+
 		mutex_lock(&priv->mutex);
 
-		if (end_read_sector >= start_read_sector) {
-			/* Merge head and tail */
-			dev_dbg(&dev->sbd.core,
-				"Merged head and tail: %llu sectors at %llu\n",
-				chunk_sectors, start_write_sector);
-			res = ps3flash_read_sectors(dev, start_write_sector,
-						    chunk_sectors, 0);
-			if (res < 0)
-				goto fail;
-		} else {
-			if (head) {
-				/* Read head */
-				dev_dbg(&dev->sbd.core,
-					"head: %llu sectors at %llu\n", head,
-					start_write_sector);
-				res = ps3flash_read_sectors(dev,
-							    start_write_sector,
-							    head, 0);
-				if (res < 0)
-					goto fail;
-			}
-			if (start_read_sector <
-			    start_write_sector+chunk_sectors) {
-				/* Read tail */
-				dev_dbg(&dev->sbd.core,
-					"tail: %llu sectors at %llu\n", tail,
-					start_read_sector);
-				sec_off = start_read_sector-start_write_sector;
-				res = ps3flash_read_sectors(dev,
-							    start_read_sector,
-							    tail, sec_off);
-				if (res < 0)
-					goto fail;
-			}
-		}
+		res = ps3flash_fetch(dev, sector);
+		if (res)
+			goto fail;
 
-		n = min_t(u64, remaining, dev->bounce_size-offset);
 		dev_dbg(&dev->sbd.core,
-			"%s:%u: copy %lu bytes from user 0x%p to 0x%p\n",
-			__func__, __LINE__, n, buf, dev->bounce_buf+offset);
-		if (copy_from_user(dev->bounce_buf+offset, buf, n)) {
-			res = -EFAULT;
-			goto fail;
+			"%s:%u: copy %lu bytes from 0x%p to U0x%p/K0x%p\n",
+			__func__, __LINE__, n, src, userbuf, kernelbuf);
+		if (userbuf) {
+			if (copy_to_user(userbuf, src, n)) {
+				res = -EFAULT;
+				goto fail;
+			}
+			userbuf += n;
 		}
-
-		res = ps3flash_write_chunk(dev, start_write_sector);
-		if (res < 0)
-			goto fail;
+		if (kernelbuf) {
+			memcpy(kernelbuf, src, n);
+			kernelbuf += n;
+		}
 
 		mutex_unlock(&priv->mutex);
 
 		*pos += n;
-		buf += n;
 		remaining -= n;
-		start_write_sector += chunk_sectors;
-		head = 0;
+		sector += priv->chunk_sectors;
 		offset = 0;
 	} while (remaining > 0);
 
@@ -288,6 +189,126 @@
 	return res;
 }
 
+static ssize_t ps3flash_write(const char __user *userbuf,
+			      const void *kernelbuf, size_t count, loff_t *pos)
+{
+	struct ps3_storage_device *dev = ps3flash_dev;
+	struct ps3flash_private *priv = ps3_system_bus_get_drvdata(&dev->sbd);
+	u64 size, sector, offset;
+	int res = 0;
+	size_t remaining, n;
+	void *dst;
+
+	dev_dbg(&dev->sbd.core,
+		"%s:%u: Writing %zu bytes at position %lld from U0x%p/K0x%p\n",
+		__func__, __LINE__, count, *pos, userbuf, kernelbuf);
+
+	size = dev->regions[dev->region_idx].size*dev->blk_size;
+	if (*pos >= size || !count)
+		return 0;
+
+	if (*pos + count > size) {
+		dev_dbg(&dev->sbd.core,
+			"%s:%u Truncating count from %zu to %llu\n", __func__,
+			__LINE__, count, size - *pos);
+		count = size - *pos;
+	}
+
+	sector = *pos / dev->bounce_size * priv->chunk_sectors;
+	offset = *pos % dev->bounce_size;
+
+	remaining = count;
+	do {
+		n = min_t(u64, remaining, dev->bounce_size - offset);
+		dst = dev->bounce_buf + offset;
+
+		mutex_lock(&priv->mutex);
+
+		if (n != dev->bounce_size)
+			res = ps3flash_fetch(dev, sector);
+		else if (sector != priv->tag)
+			res = ps3flash_writeback(dev);
+		if (res)
+			goto fail;
+
+		dev_dbg(&dev->sbd.core,
+			"%s:%u: copy %lu bytes from U0x%p/K0x%p to 0x%p\n",
+			__func__, __LINE__, n, userbuf, kernelbuf, dst);
+		if (userbuf) {
+			if (copy_from_user(dst, userbuf, n)) {
+				res = -EFAULT;
+				goto fail;
+			}
+			userbuf += n;
+		}
+		if (kernelbuf) {
+			memcpy(dst, kernelbuf, n);
+			kernelbuf += n;
+		}
+
+		priv->tag = sector;
+		priv->dirty = true;
+
+		mutex_unlock(&priv->mutex);
+
+		*pos += n;
+		remaining -= n;
+		sector += priv->chunk_sectors;
+		offset = 0;
+	} while (remaining > 0);
+
+	return count;
+
+fail:
+	mutex_unlock(&priv->mutex);
+	return res;
+}
+
+static ssize_t ps3flash_user_read(struct file *file, char __user *buf,
+				  size_t count, loff_t *pos)
+{
+	return ps3flash_read(buf, NULL, count, pos);
+}
+
+static ssize_t ps3flash_user_write(struct file *file, const char __user *buf,
+				   size_t count, loff_t *pos)
+{
+	return ps3flash_write(buf, NULL, count, pos);
+}
+
+static ssize_t ps3flash_kernel_read(void *buf, size_t count, loff_t pos)
+{
+	return ps3flash_read(NULL, buf, count, &pos);
+}
+
+static ssize_t ps3flash_kernel_write(const void *buf, size_t count,
+				     loff_t pos)
+{
+	ssize_t res;
+	int wb;
+
+	res = ps3flash_write(NULL, buf, count, &pos);
+	if (res < 0)
+		return res;
+
+	/* Make kernel writes synchronous */
+	wb = ps3flash_writeback(ps3flash_dev);
+	if (wb)
+		return wb;
+
+	return res;
+}
+
+static int ps3flash_flush(struct file *file, fl_owner_t id)
+{
+	return ps3flash_writeback(ps3flash_dev);
+}
+
+static int ps3flash_fsync(struct file *file, struct dentry *dentry,
+			  int datasync)
+{
+	return ps3flash_writeback(ps3flash_dev);
+}
 
 static irqreturn_t ps3flash_interrupt(int irq, void *data)
 {
@@ -312,12 +333,18 @@
 	return IRQ_HANDLED;
 }
 
-
 static const struct file_operations ps3flash_fops = {
 	.owner	= THIS_MODULE,
 	.llseek	= ps3flash_llseek,
-	.read	= ps3flash_read,
-	.write	= ps3flash_write,
+	.read	= ps3flash_user_read,
+	.write	= ps3flash_user_write,
+	.flush	= ps3flash_flush,
+	.fsync	= ps3flash_fsync,
+};
+
+static const struct ps3_os_area_flash_ops ps3flash_kernel_ops = {
+	.read	= ps3flash_kernel_read,
+	.write	= ps3flash_kernel_write,
 };
 
 static struct miscdevice ps3flash_misc = {
@@ -366,11 +393,13 @@
 		goto fail;
 	}
 
-	dev->sbd.core.driver_data = priv;
+	ps3_system_bus_set_drvdata(&dev->sbd, priv);
 	mutex_init(&priv->mutex);
+	priv->tag = -1;
 
 	dev->bounce_size = ps3flash_bounce_buffer.size;
 	dev->bounce_buf = ps3flash_bounce_buffer.address;
+	priv->chunk_sectors = dev->bounce_size / dev->blk_size;
 
 	error = ps3stor_setup(dev, ps3flash_interrupt);
 	if (error)
@@ -386,13 +415,15 @@
 
 	dev_info(&dev->sbd.core, "%s:%u: registered misc device %d\n",
 		 __func__, __LINE__, ps3flash_misc.minor);
+
+	ps3_os_area_flash_register(&ps3flash_kernel_ops);
 	return 0;
 
 fail_teardown:
 	ps3stor_teardown(dev);
 fail_free_priv:
 	kfree(priv);
-	dev->sbd.core.driver_data = NULL;
+	ps3_system_bus_set_drvdata(&dev->sbd, NULL);
 fail:
 	ps3flash_dev = NULL;
 	return error;
@@ -402,10 +433,11 @@
 {
 	struct ps3_storage_device *dev = to_ps3_storage_device(&_dev->core);
 
+	ps3_os_area_flash_register(NULL);
 	misc_deregister(&ps3flash_misc);
 	ps3stor_teardown(dev);
-	kfree(dev->sbd.core.driver_data);
-	dev->sbd.core.driver_data = NULL;
+	kfree(ps3_system_bus_get_drvdata(&dev->sbd));
+	ps3_system_bus_set_drvdata(&dev->sbd, NULL);
 	ps3flash_dev = NULL;
 	return 0;
 }
diff --git a/drivers/char/pty.c b/drivers/char/pty.c
index 5acd29e..daebe1b 100644
--- a/drivers/char/pty.c
+++ b/drivers/char/pty.c
@@ -95,23 +95,34 @@
  * a count.
  *
  * FIXME: Our pty_write method is called with our ldisc lock held but
- * not our partners. We can't just take the other one blindly without
- * risking deadlocks.
+ * not our partners. We can't just wait on the other one blindly without
+ * risking deadlocks. At some point when everything has settled down we need
+ * to look into making pty_write at least able to sleep over an ldisc change.
+ *
+ * The return on no ldisc is a bit counter intuitive but the logic works
+ * like this. During an ldisc change the other end will flush its buffers. We
+ * thus return the full length which is identical to the case where we had
+ * proper locking and happened to queue the bytes just before the flush during
+ * the ldisc change.
  */
 static int pty_write(struct tty_struct *tty, const unsigned char *buf,
 								int count)
 {
 	struct tty_struct *to = tty->link;
-	int	c;
+	struct tty_ldisc *ld;
+	int c = count;
 
 	if (!to || tty->stopped)
 		return 0;
+	ld = tty_ldisc_ref(to);
 
-	c = to->receive_room;
-	if (c > count)
-		c = count;
-	to->ldisc->ops->receive_buf(to, buf, NULL, c);
-
+	if (ld) {
+		c = to->receive_room;
+		if (c > count)
+			c = count;
+		ld->ops->receive_buf(to, buf, NULL, c);
+		tty_ldisc_deref(ld);
+	}
 	return c;
 }
 
@@ -145,14 +156,23 @@
 static int pty_chars_in_buffer(struct tty_struct *tty)
 {
 	struct tty_struct *to = tty->link;
-	int count;
+	struct tty_ldisc *ld;
+	int count = 0;
 
 	/* We should get the line discipline lock for "tty->link" */
-	if (!to || !to->ldisc->ops->chars_in_buffer)
+	if (!to)
+		return 0;
+	/* We cannot take a sleeping reference here without deadlocking with
+	   an ldisc change - but it doesn't really matter */
+	ld = tty_ldisc_ref(to);
+	if (ld == NULL)
 		return 0;
 
 	/* The ldisc must report 0 if no characters available to be read */
-	count = to->ldisc->ops->chars_in_buffer(to);
+	if (ld->ops->chars_in_buffer)
+		count = ld->ops->chars_in_buffer(to);
+
+	tty_ldisc_deref(ld);
 
 	if (tty->driver->subtype == PTY_TYPE_SLAVE)
 		return count;
@@ -182,12 +202,19 @@
 {
 	struct tty_struct *to = tty->link;
 	unsigned long flags;
+	struct tty_ldisc *ld;
 
 	if (!to)
 		return;
+	ld = tty_ldisc_ref(to);
 
-	if (to->ldisc->ops->flush_buffer)
+	/* The other end is changing discipline */
+	if (!ld)
+		return;
+
+	if (ld->ops->flush_buffer)
 		to->ldisc->ops->flush_buffer(to);
+	tty_ldisc_deref(ld);
 
 	if (to->packet) {
 		spin_lock_irqsave(&tty->ctrl_lock, flags);
diff --git a/drivers/char/raw.c b/drivers/char/raw.c
index db32f0e..05f9d18 100644
--- a/drivers/char/raw.c
+++ b/drivers/char/raw.c
@@ -261,6 +261,11 @@
 
 static struct cdev raw_cdev;
 
+static char *raw_nodename(struct device *dev)
+{
+	return kasprintf(GFP_KERNEL, "raw/%s", dev_name(dev));
+}
+
 static int __init raw_init(void)
 {
 	dev_t dev = MKDEV(RAW_MAJOR, 0);
@@ -284,6 +289,7 @@
 		ret = PTR_ERR(raw_class);
 		goto error_region;
 	}
+	raw_class->nodename = raw_nodename;
 	device_create(raw_class, NULL, MKDEV(RAW_MAJOR, 0), NULL, "rawctl");
 
 	return 0;
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index 939e198..a3afa0c 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -1263,7 +1263,9 @@
 	tty->count++;
 	tty->driver = driver; /* N.B. why do this every time?? */
 
+	mutex_lock(&tty->ldisc_mutex);
 	WARN_ON(!test_bit(TTY_LDISC, &tty->flags));
+	mutex_unlock(&tty->ldisc_mutex);
 
 	return 0;
 }
diff --git a/drivers/char/tty_ioctl.c b/drivers/char/tty_ioctl.c
index 8116bb1c..b24f6c6 100644
--- a/drivers/char/tty_ioctl.c
+++ b/drivers/char/tty_ioctl.c
@@ -947,7 +947,6 @@
 	void __user *p = (void __user *)arg;
 	int ret = 0;
 	struct ktermios kterm;
-	struct termiox ktermx;
 
 	if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
 	    tty->driver->subtype == PTY_TYPE_MASTER)
@@ -1049,7 +1048,8 @@
 		return ret;
 #endif
 #ifdef TCGETX
-	case TCGETX:
+	case TCGETX: {
+		struct termiox ktermx;
 		if (real_tty->termiox == NULL)
 			return -EINVAL;
 		mutex_lock(&real_tty->termios_mutex);
@@ -1058,6 +1058,7 @@
 		if (copy_to_user(p, &ktermx, sizeof(struct termiox)))
 			ret = -EFAULT;
 		return ret;
+	}
 	case TCSETX:
 		return set_termiox(real_tty, p, 0);
 	case TCSETXW:
diff --git a/drivers/char/tty_ldisc.c b/drivers/char/tty_ldisc.c
index 39c8f86..a19e935 100644
--- a/drivers/char/tty_ldisc.c
+++ b/drivers/char/tty_ldisc.c
@@ -148,8 +148,10 @@
 		}
 	}
 	spin_unlock_irqrestore(&tty_ldisc_lock, flags);
-	if (err)
+	if (err) {
+		kfree(ld);
 		return ERR_PTR(err);
+	}
 	return ld;
 }
 
@@ -205,6 +207,7 @@
 	ldo->refcount--;
 	module_put(ldo->owner);
 	spin_unlock_irqrestore(&tty_ldisc_lock, flags);
+	WARN_ON(ld->refcount);
 	kfree(ld);
 }
 
@@ -262,7 +265,7 @@
  *	@ld: line discipline
  *
  *	Install an instance of a line discipline into a tty structure. The
- *	ldisc must have a reference count above zero to ensure it remains/
+ *	ldisc must have a reference count above zero to ensure it remains.
  *	The tty instance refcount starts at zero.
  *
  *	Locking:
@@ -791,6 +794,8 @@
 		/* Avoid racing set_ldisc */
 		mutex_lock(&tty->ldisc_mutex);
 		/* Switch back to N_TTY */
+		tty_ldisc_halt(tty);
+		tty_ldisc_wait_idle(tty);
 		tty_ldisc_reinit(tty);
 		/* At this point we have a closed ldisc and we want to
 		   reopen it. We could defer this to the next open but
diff --git a/drivers/eisa/pci_eisa.c b/drivers/eisa/pci_eisa.c
index 74edb1d..0dd0f63 100644
--- a/drivers/eisa/pci_eisa.c
+++ b/drivers/eisa/pci_eisa.c
@@ -31,11 +31,11 @@
 	}
 
 	pci_eisa_root.dev              = &pdev->dev;
-	pci_eisa_root.dev->driver_data = &pci_eisa_root;
 	pci_eisa_root.res	       = pdev->bus->resource[0];
 	pci_eisa_root.bus_base_addr    = pdev->bus->resource[0]->start;
 	pci_eisa_root.slots	       = EISA_MAX_SLOTS;
 	pci_eisa_root.dma_mask         = pdev->dma_mask;
+	dev_set_drvdata(pci_eisa_root.dev, &pci_eisa_root);
 
 	if (eisa_root_register (&pci_eisa_root)) {
 		printk (KERN_ERR "pci_eisa : Could not register EISA root\n");
diff --git a/drivers/eisa/virtual_root.c b/drivers/eisa/virtual_root.c
index 3074879..535e4f9 100644
--- a/drivers/eisa/virtual_root.c
+++ b/drivers/eisa/virtual_root.c
@@ -57,7 +57,7 @@
 
 	eisa_bus_root.force_probe = force_probe;
 	
-	eisa_root_dev.dev.driver_data = &eisa_bus_root;
+	dev_set_drvdata(&eisa_root_dev.dev, &eisa_bus_root);
 
 	if (eisa_root_register (&eisa_bus_root)) {
 		/* A real bridge may have been registered before
diff --git a/drivers/firewire/Makefile b/drivers/firewire/Makefile
index a7c31e9..bc3b9bf 100644
--- a/drivers/firewire/Makefile
+++ b/drivers/firewire/Makefile
@@ -2,10 +2,10 @@
 # Makefile for the Linux IEEE 1394 implementation
 #
 
-firewire-core-y += fw-card.o fw-topology.o fw-transaction.o fw-iso.o \
-                   fw-device.o fw-cdev.o
-firewire-ohci-y += fw-ohci.o
-firewire-sbp2-y += fw-sbp2.o
+firewire-core-y += core-card.o core-cdev.o core-device.o \
+                   core-iso.o core-topology.o core-transaction.o
+firewire-ohci-y += ohci.o
+firewire-sbp2-y += sbp2.o
 
 obj-$(CONFIG_FIREWIRE) += firewire-core.o
 obj-$(CONFIG_FIREWIRE_OHCI) += firewire-ohci.o
diff --git a/drivers/firewire/fw-card.c b/drivers/firewire/core-card.c
similarity index 97%
rename from drivers/firewire/fw-card.c
rename to drivers/firewire/core-card.c
index 8b8c8c2..4c1be64 100644
--- a/drivers/firewire/fw-card.c
+++ b/drivers/firewire/core-card.c
@@ -16,18 +16,27 @@
  * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  */
 
+#include <linux/bug.h>
 #include <linux/completion.h>
 #include <linux/crc-itu-t.h>
-#include <linux/delay.h>
 #include <linux/device.h>
 #include <linux/errno.h>
+#include <linux/firewire.h>
+#include <linux/firewire-constants.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
 #include <linux/kref.h>
+#include <linux/list.h>
 #include <linux/module.h>
 #include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/timer.h>
+#include <linux/workqueue.h>
 
-#include "fw-transaction.h"
-#include "fw-topology.h"
-#include "fw-device.h"
+#include <asm/atomic.h>
+#include <asm/byteorder.h>
+
+#include "core.h"
 
 int fw_compute_block_crc(u32 *block)
 {
@@ -181,12 +190,6 @@
 	mutex_unlock(&card_mutex);
 }
 
-static int set_broadcast_channel(struct device *dev, void *data)
-{
-	fw_device_set_broadcast_channel(fw_device(dev), (long)data);
-	return 0;
-}
-
 static void allocate_broadcast_channel(struct fw_card *card, int generation)
 {
 	int channel, bandwidth = 0;
@@ -196,7 +199,7 @@
 	if (channel == 31) {
 		card->broadcast_channel_allocated = true;
 		device_for_each_child(card->device, (void *)(long)generation,
-				      set_broadcast_channel);
+				      fw_device_set_broadcast_channel);
 	}
 }
 
diff --git a/drivers/firewire/fw-cdev.c b/drivers/firewire/core-cdev.c
similarity index 99%
rename from drivers/firewire/fw-cdev.c
rename to drivers/firewire/core-cdev.c
index 7eb6594..d1d30c6 100644
--- a/drivers/firewire/fw-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -22,6 +22,7 @@
 #include <linux/delay.h>
 #include <linux/device.h>
 #include <linux/errno.h>
+#include <linux/firewire.h>
 #include <linux/firewire-cdev.h>
 #include <linux/idr.h>
 #include <linux/jiffies.h>
@@ -34,16 +35,14 @@
 #include <linux/preempt.h>
 #include <linux/spinlock.h>
 #include <linux/time.h>
+#include <linux/uaccess.h>
 #include <linux/vmalloc.h>
 #include <linux/wait.h>
 #include <linux/workqueue.h>
 
 #include <asm/system.h>
-#include <asm/uaccess.h>
 
-#include "fw-device.h"
-#include "fw-topology.h"
-#include "fw-transaction.h"
+#include "core.h"
 
 struct client {
 	u32 version;
@@ -739,15 +738,11 @@
 static int ioctl_add_descriptor(struct client *client, void *buffer)
 {
 	struct fw_cdev_add_descriptor *request = buffer;
-	struct fw_card *card = client->device->card;
 	struct descriptor_resource *r;
 	int ret;
 
 	/* Access policy: Allow this ioctl only on local nodes' device files. */
-	spin_lock_irq(&card->lock);
-	ret = client->device->node_id != card->local_node->node_id;
-	spin_unlock_irq(&card->lock);
-	if (ret)
+	if (!client->device->is_local)
 		return -ENOSYS;
 
 	if (request->length > 256)
diff --git a/drivers/firewire/fw-device.c b/drivers/firewire/core-device.c
similarity index 88%
rename from drivers/firewire/fw-device.c
rename to drivers/firewire/core-device.c
index a47e212..97e656a 100644
--- a/drivers/firewire/fw-device.c
+++ b/drivers/firewire/core-device.c
@@ -22,10 +22,14 @@
 #include <linux/delay.h>
 #include <linux/device.h>
 #include <linux/errno.h>
+#include <linux/firewire.h>
+#include <linux/firewire-constants.h>
 #include <linux/idr.h>
 #include <linux/jiffies.h>
 #include <linux/kobject.h>
 #include <linux/list.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
 #include <linux/mutex.h>
 #include <linux/rwsem.h>
 #include <linux/semaphore.h>
@@ -33,11 +37,11 @@
 #include <linux/string.h>
 #include <linux/workqueue.h>
 
+#include <asm/atomic.h>
+#include <asm/byteorder.h>
 #include <asm/system.h>
 
-#include "fw-device.h"
-#include "fw-topology.h"
-#include "fw-transaction.h"
+#include "core.h"
 
 void fw_csr_iterator_init(struct fw_csr_iterator *ci, u32 * p)
 {
@@ -55,9 +59,10 @@
 }
 EXPORT_SYMBOL(fw_csr_iterator_next);
 
-static int is_fw_unit(struct device *dev);
+static bool is_fw_unit(struct device *dev);
 
-static int match_unit_directory(u32 * directory, const struct fw_device_id *id)
+static int match_unit_directory(u32 *directory, u32 match_flags,
+				const struct ieee1394_device_id *id)
 {
 	struct fw_csr_iterator ci;
 	int key, value, match;
@@ -65,31 +70,42 @@
 	match = 0;
 	fw_csr_iterator_init(&ci, directory);
 	while (fw_csr_iterator_next(&ci, &key, &value)) {
-		if (key == CSR_VENDOR && value == id->vendor)
-			match |= FW_MATCH_VENDOR;
-		if (key == CSR_MODEL && value == id->model)
-			match |= FW_MATCH_MODEL;
+		if (key == CSR_VENDOR && value == id->vendor_id)
+			match |= IEEE1394_MATCH_VENDOR_ID;
+		if (key == CSR_MODEL && value == id->model_id)
+			match |= IEEE1394_MATCH_MODEL_ID;
 		if (key == CSR_SPECIFIER_ID && value == id->specifier_id)
-			match |= FW_MATCH_SPECIFIER_ID;
+			match |= IEEE1394_MATCH_SPECIFIER_ID;
 		if (key == CSR_VERSION && value == id->version)
-			match |= FW_MATCH_VERSION;
+			match |= IEEE1394_MATCH_VERSION;
 	}
 
-	return (match & id->match_flags) == id->match_flags;
+	return (match & match_flags) == match_flags;
 }
 
 static int fw_unit_match(struct device *dev, struct device_driver *drv)
 {
 	struct fw_unit *unit = fw_unit(dev);
-	struct fw_driver *driver = fw_driver(drv);
-	int i;
+	struct fw_device *device;
+	const struct ieee1394_device_id *id;
 
 	/* We only allow binding to fw_units. */
 	if (!is_fw_unit(dev))
 		return 0;
 
-	for (i = 0; driver->id_table[i].match_flags != 0; i++) {
-		if (match_unit_directory(unit->directory, &driver->id_table[i]))
+	device = fw_parent_device(unit);
+	id = container_of(drv, struct fw_driver, driver)->id_table;
+
+	for (; id->match_flags != 0; id++) {
+		if (match_unit_directory(unit->directory, id->match_flags, id))
+			return 1;
+
+		/* Also check vendor ID in the root directory. */
+		if ((id->match_flags & IEEE1394_MATCH_VENDOR_ID) &&
+		    match_unit_directory(&device->config_rom[5],
+				IEEE1394_MATCH_VENDOR_ID, id) &&
+		    match_unit_directory(unit->directory, id->match_flags
+				& ~IEEE1394_MATCH_VENDOR_ID, id))
 			return 1;
 	}
 
@@ -98,7 +114,7 @@
 
 static int get_modalias(struct fw_unit *unit, char *buffer, size_t buffer_size)
 {
-	struct fw_device *device = fw_device(unit->device.parent);
+	struct fw_device *device = fw_parent_device(unit);
 	struct fw_csr_iterator ci;
 
 	int key, value;
@@ -292,8 +308,7 @@
 		group->attrs[j++] = &attr->attr;
 	}
 
-	BUG_ON(j >= ARRAY_SIZE(group->attrs));
-	group->attrs[j++] = NULL;
+	group->attrs[j] = NULL;
 	group->groups[0] = &group->group;
 	group->groups[1] = NULL;
 	group->group.attrs = group->attrs;
@@ -356,9 +371,56 @@
 	return ret;
 }
 
+static int units_sprintf(char *buf, u32 *directory)
+{
+	struct fw_csr_iterator ci;
+	int key, value;
+	int specifier_id = 0;
+	int version = 0;
+
+	fw_csr_iterator_init(&ci, directory);
+	while (fw_csr_iterator_next(&ci, &key, &value)) {
+		switch (key) {
+		case CSR_SPECIFIER_ID:
+			specifier_id = value;
+			break;
+		case CSR_VERSION:
+			version = value;
+			break;
+		}
+	}
+
+	return sprintf(buf, "0x%06x:0x%06x ", specifier_id, version);
+}
+
+static ssize_t units_show(struct device *dev,
+			  struct device_attribute *attr, char *buf)
+{
+	struct fw_device *device = fw_device(dev);
+	struct fw_csr_iterator ci;
+	int key, value, i = 0;
+
+	down_read(&fw_device_rwsem);
+	fw_csr_iterator_init(&ci, &device->config_rom[5]);
+	while (fw_csr_iterator_next(&ci, &key, &value)) {
+		if (key != (CSR_UNIT | CSR_DIRECTORY))
+			continue;
+		i += units_sprintf(&buf[i], ci.p + value - 1);
+		if (i >= PAGE_SIZE - (8 + 1 + 8 + 1))
+			break;
+	}
+	up_read(&fw_device_rwsem);
+
+	if (i)
+		buf[i - 1] = '\n';
+
+	return i;
+}
+
 static struct device_attribute fw_device_attributes[] = {
 	__ATTR_RO(config_rom),
 	__ATTR_RO(guid),
+	__ATTR_RO(units),
 	__ATTR_NULL,
 };
 
@@ -518,7 +580,9 @@
 
 	kfree(old_rom);
 	ret = 0;
-	device->cmc = rom[2] >> 30 & 1;
+	device->max_rec	= rom[2] >> 12 & 0xf;
+	device->cmc	= rom[2] >> 30 & 1;
+	device->irmc	= rom[2] >> 31 & 1;
  out:
 	kfree(rom);
 
@@ -537,7 +601,7 @@
 	.release	= fw_unit_release,
 };
 
-static int is_fw_unit(struct device *dev)
+static bool is_fw_unit(struct device *dev)
 {
 	return dev->type == &fw_unit_type;
 }
@@ -570,9 +634,13 @@
 		unit->device.parent = &device->device;
 		dev_set_name(&unit->device, "%s.%d", dev_name(&device->device), i++);
 
+		BUILD_BUG_ON(ARRAY_SIZE(unit->attribute_group.attrs) <
+				ARRAY_SIZE(fw_unit_attributes) +
+				ARRAY_SIZE(config_rom_attributes));
 		init_fw_attribute_group(&unit->device,
 					fw_unit_attributes,
 					&unit->attribute_group);
+
 		if (device_register(&unit->device) < 0)
 			goto skip_unit;
 
@@ -683,6 +751,11 @@
 	.release = fw_device_release,
 };
 
+static bool is_fw_device(struct device *dev)
+{
+	return dev->type == &fw_device_type;
+}
+
 static int update_unit(struct device *dev, void *data)
 {
 	struct fw_unit *unit = fw_unit(dev);
@@ -719,6 +792,9 @@
 	struct fw_card *card = new->card;
 	int match = 0;
 
+	if (!is_fw_device(dev))
+		return 0;
+
 	down_read(&fw_device_rwsem); /* serialize config_rom access */
 	spin_lock_irq(&card->lock);  /* serialize node access */
 
@@ -758,7 +834,7 @@
 
 enum { BC_UNKNOWN = 0, BC_UNIMPLEMENTED, BC_IMPLEMENTED, };
 
-void fw_device_set_broadcast_channel(struct fw_device *device, int generation)
+static void set_broadcast_channel(struct fw_device *device, int generation)
 {
 	struct fw_card *card = device->card;
 	__be32 data;
@@ -767,6 +843,20 @@
 	if (!card->broadcast_channel_allocated)
 		return;
 
+	/*
+	 * The Broadcast_Channel Valid bit is required by nodes which want to
+	 * transmit on this channel.  Such transmissions are practically
+	 * exclusive to IP over 1394 (RFC 2734).  IP capable nodes are required
+	 * to be IRM capable and have a max_rec of 8 or more.  We use this fact
+	 * to narrow down to which nodes we send Broadcast_Channel updates.
+	 */
+	if (!device->irmc || device->max_rec < 8)
+		return;
+
+	/*
+	 * Some 1394-1995 nodes crash if this 1394a-2000 register is written.
+	 * Perform a read test first.
+	 */
 	if (device->bc_implemented == BC_UNKNOWN) {
 		rcode = fw_run_transaction(card, TCODE_READ_QUADLET_REQUEST,
 				device->node_id, generation, device->max_speed,
@@ -794,6 +884,14 @@
 	}
 }
 
+int fw_device_set_broadcast_channel(struct device *dev, void *gen)
+{
+	if (is_fw_device(dev))
+		set_broadcast_channel(fw_device(dev), (long)gen);
+
+	return 0;
+}
+
 static void fw_device_init(struct work_struct *work)
 {
 	struct fw_device *device =
@@ -849,9 +947,13 @@
 	device->device.devt = MKDEV(fw_cdev_major, minor);
 	dev_set_name(&device->device, "fw%d", minor);
 
+	BUILD_BUG_ON(ARRAY_SIZE(device->attribute_group.attrs) <
+			ARRAY_SIZE(fw_device_attributes) +
+			ARRAY_SIZE(config_rom_attributes));
 	init_fw_attribute_group(&device->device,
 				fw_device_attributes,
 				&device->attribute_group);
+
 	if (device_add(&device->device)) {
 		fw_error("Failed to add device.\n");
 		goto error_with_cdev;
@@ -888,7 +990,7 @@
 				  1 << device->max_speed);
 		device->config_rom_retries = 0;
 
-		fw_device_set_broadcast_channel(device, device->generation);
+		set_broadcast_channel(device, device->generation);
 	}
 
 	/*
@@ -993,6 +1095,9 @@
 
 	create_units(device);
 
+	/* Userspace may want to re-read attributes. */
+	kobject_uevent(&device->device.kobj, KOBJ_CHANGE);
+
 	if (atomic_cmpxchg(&device->state,
 			   FW_DEVICE_INITIALIZING,
 			   FW_DEVICE_RUNNING) == FW_DEVICE_GONE)
@@ -1042,6 +1147,7 @@
 		device->node = fw_node_get(node);
 		device->node_id = node->node_id;
 		device->generation = card->generation;
+		device->is_local = node == card->local_node;
 		mutex_init(&device->client_list_mutex);
 		INIT_LIST_HEAD(&device->client_list);
 
@@ -1075,7 +1181,7 @@
 			    FW_DEVICE_INITIALIZING) == FW_DEVICE_RUNNING) {
 			PREPARE_DELAYED_WORK(&device->work, fw_device_refresh);
 			schedule_delayed_work(&device->work,
-				node == card->local_node ? 0 : INITIAL_DELAY);
+				device->is_local ? 0 : INITIAL_DELAY);
 		}
 		break;
 
diff --git a/drivers/firewire/fw-iso.c b/drivers/firewire/core-iso.c
similarity index 98%
rename from drivers/firewire/fw-iso.c
rename to drivers/firewire/core-iso.c
index 2baf100..28076c8 100644
--- a/drivers/firewire/fw-iso.c
+++ b/drivers/firewire/core-iso.c
@@ -22,14 +22,16 @@
 
 #include <linux/dma-mapping.h>
 #include <linux/errno.h>
+#include <linux/firewire.h>
 #include <linux/firewire-constants.h>
 #include <linux/kernel.h>
 #include <linux/mm.h>
 #include <linux/spinlock.h>
 #include <linux/vmalloc.h>
 
-#include "fw-topology.h"
-#include "fw-transaction.h"
+#include <asm/byteorder.h>
+
+#include "core.h"
 
 /*
  * Isochronous DMA context management
diff --git a/drivers/firewire/fw-topology.c b/drivers/firewire/core-topology.c
similarity index 97%
rename from drivers/firewire/fw-topology.c
rename to drivers/firewire/core-topology.c
index d0deecc..fddf2b3 100644
--- a/drivers/firewire/fw-topology.c
+++ b/drivers/firewire/core-topology.c
@@ -18,13 +18,22 @@
  * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  */
 
-#include <linux/module.h>
-#include <linux/wait.h>
+#include <linux/bug.h>
 #include <linux/errno.h>
-#include <asm/bug.h>
+#include <linux/firewire.h>
+#include <linux/firewire-constants.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+
+#include <asm/atomic.h>
 #include <asm/system.h>
-#include "fw-transaction.h"
-#include "fw-topology.h"
+
+#include "core.h"
 
 #define SELF_ID_PHY_ID(q)		(((q) >> 24) & 0x3f)
 #define SELF_ID_EXTENDED(q)		(((q) >> 23) & 0x01)
@@ -37,6 +46,11 @@
 
 #define SELF_ID_EXT_SEQUENCE(q)		(((q) >> 20) & 0x07)
 
+#define SELFID_PORT_CHILD	0x3
+#define SELFID_PORT_PARENT	0x2
+#define SELFID_PORT_NCONN	0x1
+#define SELFID_PORT_NONE	0x0
+
 static u32 *count_ports(u32 *sid, int *total_port_count, int *child_port_count)
 {
 	u32 q;
diff --git a/drivers/firewire/fw-transaction.c b/drivers/firewire/core-transaction.c
similarity index 97%
rename from drivers/firewire/fw-transaction.c
rename to drivers/firewire/core-transaction.c
index 283dac6..479b22f 100644
--- a/drivers/firewire/fw-transaction.c
+++ b/drivers/firewire/core-transaction.c
@@ -18,24 +18,28 @@
  * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  */
 
+#include <linux/bug.h>
 #include <linux/completion.h>
-#include <linux/idr.h>
-#include <linux/kernel.h>
-#include <linux/kref.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/firewire.h>
+#include <linux/firewire-constants.h>
+#include <linux/fs.h>
 #include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <linux/poll.h>
+#include <linux/idr.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
 #include <linux/list.h>
-#include <linux/kthread.h>
-#include <asm/uaccess.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/types.h>
 
-#include "fw-transaction.h"
-#include "fw-topology.h"
-#include "fw-device.h"
+#include <asm/byteorder.h>
+
+#include "core.h"
 
 #define HEADER_PRI(pri)			((pri) << 0)
 #define HEADER_TCODE(tcode)		((tcode) << 4)
@@ -60,6 +64,10 @@
 #define HEADER_DESTINATION_IS_BROADCAST(q) \
 	(((q) & HEADER_DESTINATION(0x3f)) == HEADER_DESTINATION(0x3f))
 
+#define PHY_PACKET_CONFIG	0x0
+#define PHY_PACKET_LINK_ON	0x1
+#define PHY_PACKET_SELF_ID	0x2
+
 #define PHY_CONFIG_GAP_COUNT(gap_count)	(((gap_count) << 16) | (1 << 22))
 #define PHY_CONFIG_ROOT_ID(node_id)	((((node_id) & 0x3f) << 24) | (1 << 23))
 #define PHY_IDENTIFIER(id)		((id) << 30)
@@ -74,7 +82,7 @@
 	list_for_each_entry(t, &card->transaction_list, link) {
 		if (t == transaction) {
 			list_del(&t->link);
-			card->tlabel_mask &= ~(1 << t->tlabel);
+			card->tlabel_mask &= ~(1ULL << t->tlabel);
 			break;
 		}
 	}
@@ -280,14 +288,14 @@
 	spin_lock_irqsave(&card->lock, flags);
 
 	tlabel = card->current_tlabel;
-	if (card->tlabel_mask & (1 << tlabel)) {
+	if (card->tlabel_mask & (1ULL << tlabel)) {
 		spin_unlock_irqrestore(&card->lock, flags);
 		callback(card, RCODE_SEND_ERROR, NULL, 0, callback_data);
 		return;
 	}
 
-	card->current_tlabel = (card->current_tlabel + 1) & 0x1f;
-	card->tlabel_mask |= (1 << tlabel);
+	card->current_tlabel = (card->current_tlabel + 1) & 0x3f;
+	card->tlabel_mask |= (1ULL << tlabel);
 
 	t->node_id = destination_id;
 	t->tlabel = tlabel;
diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
new file mode 100644
index 0000000..0a25a7b
--- /dev/null
+++ b/drivers/firewire/core.h
@@ -0,0 +1,293 @@
+#ifndef _FIREWIRE_CORE_H
+#define _FIREWIRE_CORE_H
+
+#include <linux/dma-mapping.h>
+#include <linux/fs.h>
+#include <linux/list.h>
+#include <linux/idr.h>
+#include <linux/mm_types.h>
+#include <linux/rwsem.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include <asm/atomic.h>
+
+struct device;
+struct fw_card;
+struct fw_device;
+struct fw_iso_buffer;
+struct fw_iso_context;
+struct fw_iso_packet;
+struct fw_node;
+struct fw_packet;
+
+
+/* -card */
+
+/* bitfields within the PHY registers */
+#define PHY_LINK_ACTIVE		0x80
+#define PHY_CONTENDER		0x40
+#define PHY_BUS_RESET		0x40
+#define PHY_BUS_SHORT_RESET	0x40
+
+#define BANDWIDTH_AVAILABLE_INITIAL	4915
+#define BROADCAST_CHANNEL_INITIAL	(1 << 31 | 31)
+#define BROADCAST_CHANNEL_VALID		(1 << 30)
+
+struct fw_card_driver {
+	/*
+	 * Enable the given card with the given initial config rom.
+	 * This function is expected to activate the card, and either
+	 * enable the PHY or set the link_on bit and initiate a bus
+	 * reset.
+	 */
+	int (*enable)(struct fw_card *card, u32 *config_rom, size_t length);
+
+	int (*update_phy_reg)(struct fw_card *card, int address,
+			      int clear_bits, int set_bits);
+
+	/*
+	 * Update the config rom for an enabled card.  This function
+	 * should change the config rom that is presented on the bus
+	 * an initiate a bus reset.
+	 */
+	int (*set_config_rom)(struct fw_card *card,
+			      u32 *config_rom, size_t length);
+
+	void (*send_request)(struct fw_card *card, struct fw_packet *packet);
+	void (*send_response)(struct fw_card *card, struct fw_packet *packet);
+	/* Calling cancel is valid once a packet has been submitted. */
+	int (*cancel_packet)(struct fw_card *card, struct fw_packet *packet);
+
+	/*
+	 * Allow the specified node ID to do direct DMA out and in of
+	 * host memory.  The card will disable this for all node when
+	 * a bus reset happens, so driver need to reenable this after
+	 * bus reset.  Returns 0 on success, -ENODEV if the card
+	 * doesn't support this, -ESTALE if the generation doesn't
+	 * match.
+	 */
+	int (*enable_phys_dma)(struct fw_card *card,
+			       int node_id, int generation);
+
+	u64 (*get_bus_time)(struct fw_card *card);
+
+	struct fw_iso_context *
+	(*allocate_iso_context)(struct fw_card *card,
+				int type, int channel, size_t header_size);
+	void (*free_iso_context)(struct fw_iso_context *ctx);
+
+	int (*start_iso)(struct fw_iso_context *ctx,
+			 s32 cycle, u32 sync, u32 tags);
+
+	int (*queue_iso)(struct fw_iso_context *ctx,
+			 struct fw_iso_packet *packet,
+			 struct fw_iso_buffer *buffer,
+			 unsigned long payload);
+
+	int (*stop_iso)(struct fw_iso_context *ctx);
+};
+
+void fw_card_initialize(struct fw_card *card,
+		const struct fw_card_driver *driver, struct device *device);
+int fw_card_add(struct fw_card *card,
+		u32 max_receive, u32 link_speed, u64 guid);
+void fw_core_remove_card(struct fw_card *card);
+int fw_core_initiate_bus_reset(struct fw_card *card, int short_reset);
+int fw_compute_block_crc(u32 *block);
+void fw_schedule_bm_work(struct fw_card *card, unsigned long delay);
+
+struct fw_descriptor {
+	struct list_head link;
+	size_t length;
+	u32 immediate;
+	u32 key;
+	const u32 *data;
+};
+
+int fw_core_add_descriptor(struct fw_descriptor *desc);
+void fw_core_remove_descriptor(struct fw_descriptor *desc);
+
+
+/* -cdev */
+
+extern const struct file_operations fw_device_ops;
+
+void fw_device_cdev_update(struct fw_device *device);
+void fw_device_cdev_remove(struct fw_device *device);
+
+
+/* -device */
+
+extern struct rw_semaphore fw_device_rwsem;
+extern struct idr fw_device_idr;
+extern int fw_cdev_major;
+
+struct fw_device *fw_device_get_by_devt(dev_t devt);
+int fw_device_set_broadcast_channel(struct device *dev, void *gen);
+void fw_node_event(struct fw_card *card, struct fw_node *node, int event);
+
+
+/* -iso */
+
+/*
+ * The iso packet format allows for an immediate header/payload part
+ * stored in 'header' immediately after the packet info plus an
+ * indirect payload part that is pointer to by the 'payload' field.
+ * Applications can use one or the other or both to implement simple
+ * low-bandwidth streaming (e.g. audio) or more advanced
+ * scatter-gather streaming (e.g. assembling video frame automatically).
+ */
+struct fw_iso_packet {
+	u16 payload_length;	/* Length of indirect payload. */
+	u32 interrupt:1;	/* Generate interrupt on this packet */
+	u32 skip:1;		/* Set to not send packet at all. */
+	u32 tag:2;
+	u32 sy:4;
+	u32 header_length:8;	/* Length of immediate header. */
+	u32 header[0];
+};
+
+#define FW_ISO_CONTEXT_TRANSMIT	0
+#define FW_ISO_CONTEXT_RECEIVE	1
+
+#define FW_ISO_CONTEXT_MATCH_TAG0	 1
+#define FW_ISO_CONTEXT_MATCH_TAG1	 2
+#define FW_ISO_CONTEXT_MATCH_TAG2	 4
+#define FW_ISO_CONTEXT_MATCH_TAG3	 8
+#define FW_ISO_CONTEXT_MATCH_ALL_TAGS	15
+
+/*
+ * An iso buffer is just a set of pages mapped for DMA in the
+ * specified direction.  Since the pages are to be used for DMA, they
+ * are not mapped into the kernel virtual address space.  We store the
+ * DMA address in the page private. The helper function
+ * fw_iso_buffer_map() will map the pages into a given vma.
+ */
+struct fw_iso_buffer {
+	enum dma_data_direction direction;
+	struct page **pages;
+	int page_count;
+};
+
+typedef void (*fw_iso_callback_t)(struct fw_iso_context *context,
+				  u32 cycle, size_t header_length,
+				  void *header, void *data);
+
+struct fw_iso_context {
+	struct fw_card *card;
+	int type;
+	int channel;
+	int speed;
+	size_t header_size;
+	fw_iso_callback_t callback;
+	void *callback_data;
+};
+
+int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
+		       int page_count, enum dma_data_direction direction);
+int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma);
+void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, struct fw_card *card);
+
+struct fw_iso_context *fw_iso_context_create(struct fw_card *card,
+		int type, int channel, int speed, size_t header_size,
+		fw_iso_callback_t callback, void *callback_data);
+int fw_iso_context_queue(struct fw_iso_context *ctx,
+			 struct fw_iso_packet *packet,
+			 struct fw_iso_buffer *buffer,
+			 unsigned long payload);
+int fw_iso_context_start(struct fw_iso_context *ctx,
+			 int cycle, int sync, int tags);
+int fw_iso_context_stop(struct fw_iso_context *ctx);
+void fw_iso_context_destroy(struct fw_iso_context *ctx);
+
+void fw_iso_resource_manage(struct fw_card *card, int generation,
+		u64 channels_mask, int *channel, int *bandwidth, bool allocate);
+
+
+/* -topology */
+
+enum {
+	FW_NODE_CREATED,
+	FW_NODE_UPDATED,
+	FW_NODE_DESTROYED,
+	FW_NODE_LINK_ON,
+	FW_NODE_LINK_OFF,
+	FW_NODE_INITIATED_RESET,
+};
+
+struct fw_node {
+	u16 node_id;
+	u8 color;
+	u8 port_count;
+	u8 link_on:1;
+	u8 initiated_reset:1;
+	u8 b_path:1;
+	u8 phy_speed:2;	/* As in the self ID packet. */
+	u8 max_speed:2;	/* Minimum of all phy-speeds on the path from the
+			 * local node to this node. */
+	u8 max_depth:4;	/* Maximum depth to any leaf node */
+	u8 max_hops:4;	/* Max hops in this sub tree */
+	atomic_t ref_count;
+
+	/* For serializing node topology into a list. */
+	struct list_head link;
+
+	/* Upper layer specific data. */
+	void *data;
+
+	struct fw_node *ports[0];
+};
+
+static inline struct fw_node *fw_node_get(struct fw_node *node)
+{
+	atomic_inc(&node->ref_count);
+
+	return node;
+}
+
+static inline void fw_node_put(struct fw_node *node)
+{
+	if (atomic_dec_and_test(&node->ref_count))
+		kfree(node);
+}
+
+void fw_core_handle_bus_reset(struct fw_card *card, int node_id,
+			      int generation, int self_id_count, u32 *self_ids);
+void fw_destroy_nodes(struct fw_card *card);
+
+/*
+ * Check whether new_generation is the immediate successor of old_generation.
+ * Take counter roll-over at 255 (as per OHCI) into account.
+ */
+static inline bool is_next_generation(int new_generation, int old_generation)
+{
+	return (new_generation & 0xff) == ((old_generation + 1) & 0xff);
+}
+
+
+/* -transaction */
+
+#define TCODE_IS_READ_REQUEST(tcode)	(((tcode) & ~1) == 4)
+#define TCODE_IS_BLOCK_PACKET(tcode)	(((tcode) &  1) != 0)
+#define TCODE_IS_REQUEST(tcode)		(((tcode) &  2) == 0)
+#define TCODE_IS_RESPONSE(tcode)	(((tcode) &  2) != 0)
+#define TCODE_HAS_REQUEST_DATA(tcode)	(((tcode) & 12) != 4)
+#define TCODE_HAS_RESPONSE_DATA(tcode)	(((tcode) & 12) != 0)
+
+#define LOCAL_BUS 0xffc0
+
+void fw_core_handle_request(struct fw_card *card, struct fw_packet *request);
+void fw_core_handle_response(struct fw_card *card, struct fw_packet *packet);
+void fw_fill_response(struct fw_packet *response, u32 *request_header,
+		      int rcode, void *payload, size_t length);
+void fw_flush_transactions(struct fw_card *card);
+void fw_send_phy_config(struct fw_card *card,
+			int node_id, int generation, int gap_count);
+
+static inline int fw_stream_packet_destination_id(int tag, int channel, int sy)
+{
+	return tag << 14 | channel << 8 | sy;
+}
+
+#endif /* _FIREWIRE_CORE_H */
diff --git a/drivers/firewire/fw-device.h b/drivers/firewire/fw-device.h
deleted file mode 100644
index 9758893..0000000
--- a/drivers/firewire/fw-device.h
+++ /dev/null
@@ -1,202 +0,0 @@
-/*
- * Copyright (C) 2005-2006  Kristian Hoegsberg <krh@bitplanet.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-
-#ifndef __fw_device_h
-#define __fw_device_h
-
-#include <linux/device.h>
-#include <linux/fs.h>
-#include <linux/idr.h>
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/mutex.h>
-#include <linux/rwsem.h>
-#include <linux/sysfs.h>
-#include <linux/types.h>
-#include <linux/workqueue.h>
-
-#include <asm/atomic.h>
-
-enum fw_device_state {
-	FW_DEVICE_INITIALIZING,
-	FW_DEVICE_RUNNING,
-	FW_DEVICE_GONE,
-	FW_DEVICE_SHUTDOWN,
-};
-
-struct fw_attribute_group {
-	struct attribute_group *groups[2];
-	struct attribute_group group;
-	struct attribute *attrs[11];
-};
-
-struct fw_node;
-struct fw_card;
-
-/*
- * Note, fw_device.generation always has to be read before fw_device.node_id.
- * Use SMP memory barriers to ensure this.  Otherwise requests will be sent
- * to an outdated node_id if the generation was updated in the meantime due
- * to a bus reset.
- *
- * Likewise, fw-core will take care to update .node_id before .generation so
- * that whenever fw_device.generation is current WRT the actual bus generation,
- * fw_device.node_id is guaranteed to be current too.
- *
- * The same applies to fw_device.card->node_id vs. fw_device.generation.
- *
- * fw_device.config_rom and fw_device.config_rom_length may be accessed during
- * the lifetime of any fw_unit belonging to the fw_device, before device_del()
- * was called on the last fw_unit.  Alternatively, they may be accessed while
- * holding fw_device_rwsem.
- */
-struct fw_device {
-	atomic_t state;
-	struct fw_node *node;
-	int node_id;
-	int generation;
-	unsigned max_speed;
-	struct fw_card *card;
-	struct device device;
-
-	struct mutex client_list_mutex;
-	struct list_head client_list;
-
-	u32 *config_rom;
-	size_t config_rom_length;
-	int config_rom_retries;
-	unsigned cmc:1;
-	unsigned bc_implemented:2;
-
-	struct delayed_work work;
-	struct fw_attribute_group attribute_group;
-};
-
-static inline struct fw_device *fw_device(struct device *dev)
-{
-	return container_of(dev, struct fw_device, device);
-}
-
-static inline int fw_device_is_shutdown(struct fw_device *device)
-{
-	return atomic_read(&device->state) == FW_DEVICE_SHUTDOWN;
-}
-
-static inline struct fw_device *fw_device_get(struct fw_device *device)
-{
-	get_device(&device->device);
-
-	return device;
-}
-
-static inline void fw_device_put(struct fw_device *device)
-{
-	put_device(&device->device);
-}
-
-struct fw_device *fw_device_get_by_devt(dev_t devt);
-int fw_device_enable_phys_dma(struct fw_device *device);
-void fw_device_set_broadcast_channel(struct fw_device *device, int generation);
-
-void fw_device_cdev_update(struct fw_device *device);
-void fw_device_cdev_remove(struct fw_device *device);
-
-extern struct rw_semaphore fw_device_rwsem;
-extern struct idr fw_device_idr;
-extern int fw_cdev_major;
-
-/*
- * fw_unit.directory must not be accessed after device_del(&fw_unit.device).
- */
-struct fw_unit {
-	struct device device;
-	u32 *directory;
-	struct fw_attribute_group attribute_group;
-};
-
-static inline struct fw_unit *fw_unit(struct device *dev)
-{
-	return container_of(dev, struct fw_unit, device);
-}
-
-static inline struct fw_unit *fw_unit_get(struct fw_unit *unit)
-{
-	get_device(&unit->device);
-
-	return unit;
-}
-
-static inline void fw_unit_put(struct fw_unit *unit)
-{
-	put_device(&unit->device);
-}
-
-#define CSR_OFFSET	0x40
-#define CSR_LEAF	0x80
-#define CSR_DIRECTORY	0xc0
-
-#define CSR_DESCRIPTOR		0x01
-#define CSR_VENDOR		0x03
-#define CSR_HARDWARE_VERSION	0x04
-#define CSR_NODE_CAPABILITIES	0x0c
-#define CSR_UNIT		0x11
-#define CSR_SPECIFIER_ID	0x12
-#define CSR_VERSION		0x13
-#define CSR_DEPENDENT_INFO	0x14
-#define CSR_MODEL		0x17
-#define CSR_INSTANCE		0x18
-#define CSR_DIRECTORY_ID	0x20
-
-struct fw_csr_iterator {
-	u32 *p;
-	u32 *end;
-};
-
-void fw_csr_iterator_init(struct fw_csr_iterator *ci, u32 *p);
-int fw_csr_iterator_next(struct fw_csr_iterator *ci,
-			 int *key, int *value);
-
-#define FW_MATCH_VENDOR		0x0001
-#define FW_MATCH_MODEL		0x0002
-#define FW_MATCH_SPECIFIER_ID	0x0004
-#define FW_MATCH_VERSION	0x0008
-
-struct fw_device_id {
-	u32 match_flags;
-	u32 vendor;
-	u32 model;
-	u32 specifier_id;
-	u32 version;
-	void *driver_data;
-};
-
-struct fw_driver {
-	struct device_driver driver;
-	/* Called when the parent device sits through a bus reset. */
-	void (*update) (struct fw_unit *unit);
-	const struct fw_device_id *id_table;
-};
-
-static inline struct fw_driver *fw_driver(struct device_driver *drv)
-{
-	return container_of(drv, struct fw_driver, driver);
-}
-
-extern const struct file_operations fw_device_ops;
-
-#endif /* __fw_device_h */
diff --git a/drivers/firewire/fw-topology.h b/drivers/firewire/fw-topology.h
deleted file mode 100644
index 3c497bb..0000000
--- a/drivers/firewire/fw-topology.h
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-
-#ifndef __fw_topology_h
-#define __fw_topology_h
-
-#include <linux/list.h>
-#include <linux/slab.h>
-
-#include <asm/atomic.h>
-
-enum {
-	FW_NODE_CREATED,
-	FW_NODE_UPDATED,
-	FW_NODE_DESTROYED,
-	FW_NODE_LINK_ON,
-	FW_NODE_LINK_OFF,
-	FW_NODE_INITIATED_RESET,
-};
-
-struct fw_node {
-	u16 node_id;
-	u8 color;
-	u8 port_count;
-	u8 link_on : 1;
-	u8 initiated_reset : 1;
-	u8 b_path : 1;
-	u8 phy_speed : 2; /* As in the self ID packet. */
-	u8 max_speed : 2; /* Minimum of all phy-speeds on the path from the
-			   * local node to this node. */
-	u8 max_depth : 4; /* Maximum depth to any leaf node */
-	u8 max_hops : 4;  /* Max hops in this sub tree */
-	atomic_t ref_count;
-
-	/* For serializing node topology into a list. */
-	struct list_head link;
-
-	/* Upper layer specific data. */
-	void *data;
-
-	struct fw_node *ports[0];
-};
-
-static inline struct fw_node *fw_node_get(struct fw_node *node)
-{
-	atomic_inc(&node->ref_count);
-
-	return node;
-}
-
-static inline void fw_node_put(struct fw_node *node)
-{
-	if (atomic_dec_and_test(&node->ref_count))
-		kfree(node);
-}
-
-struct fw_card;
-void fw_destroy_nodes(struct fw_card *card);
-
-int fw_compute_block_crc(u32 *block);
-
-#endif /* __fw_topology_h */
diff --git a/drivers/firewire/fw-transaction.h b/drivers/firewire/fw-transaction.h
deleted file mode 100644
index dfa7990..0000000
--- a/drivers/firewire/fw-transaction.h
+++ /dev/null
@@ -1,446 +0,0 @@
-/*
- * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-
-#ifndef __fw_transaction_h
-#define __fw_transaction_h
-
-#include <linux/completion.h>
-#include <linux/device.h>
-#include <linux/dma-mapping.h>
-#include <linux/firewire-constants.h>
-#include <linux/kref.h>
-#include <linux/list.h>
-#include <linux/spinlock_types.h>
-#include <linux/timer.h>
-#include <linux/types.h>
-#include <linux/workqueue.h>
-
-#define TCODE_IS_READ_REQUEST(tcode)	(((tcode) & ~1) == 4)
-#define TCODE_IS_BLOCK_PACKET(tcode)	(((tcode) &  1) != 0)
-#define TCODE_IS_REQUEST(tcode)		(((tcode) &  2) == 0)
-#define TCODE_IS_RESPONSE(tcode)	(((tcode) &  2) != 0)
-#define TCODE_HAS_REQUEST_DATA(tcode)	(((tcode) & 12) != 4)
-#define TCODE_HAS_RESPONSE_DATA(tcode)	(((tcode) & 12) != 0)
-
-#define LOCAL_BUS 0xffc0
-
-#define SELFID_PORT_CHILD	0x3
-#define SELFID_PORT_PARENT	0x2
-#define SELFID_PORT_NCONN	0x1
-#define SELFID_PORT_NONE	0x0
-
-#define PHY_PACKET_CONFIG	0x0
-#define PHY_PACKET_LINK_ON	0x1
-#define PHY_PACKET_SELF_ID	0x2
-
-/* Bit fields _within_ the PHY registers. */
-#define PHY_LINK_ACTIVE		0x80
-#define PHY_CONTENDER		0x40
-#define PHY_BUS_RESET		0x40
-#define PHY_BUS_SHORT_RESET	0x40
-
-#define CSR_REGISTER_BASE		0xfffff0000000ULL
-
-/* register offsets relative to CSR_REGISTER_BASE */
-#define CSR_STATE_CLEAR			0x0
-#define CSR_STATE_SET			0x4
-#define CSR_NODE_IDS			0x8
-#define CSR_RESET_START			0xc
-#define CSR_SPLIT_TIMEOUT_HI		0x18
-#define CSR_SPLIT_TIMEOUT_LO		0x1c
-#define CSR_CYCLE_TIME			0x200
-#define CSR_BUS_TIME			0x204
-#define CSR_BUSY_TIMEOUT		0x210
-#define CSR_BUS_MANAGER_ID		0x21c
-#define CSR_BANDWIDTH_AVAILABLE		0x220
-#define CSR_CHANNELS_AVAILABLE		0x224
-#define CSR_CHANNELS_AVAILABLE_HI	0x224
-#define CSR_CHANNELS_AVAILABLE_LO	0x228
-#define CSR_BROADCAST_CHANNEL		0x234
-#define CSR_CONFIG_ROM			0x400
-#define CSR_CONFIG_ROM_END		0x800
-#define CSR_FCP_COMMAND			0xB00
-#define CSR_FCP_RESPONSE		0xD00
-#define CSR_FCP_END			0xF00
-#define CSR_TOPOLOGY_MAP		0x1000
-#define CSR_TOPOLOGY_MAP_END		0x1400
-#define CSR_SPEED_MAP			0x2000
-#define CSR_SPEED_MAP_END		0x3000
-
-#define BANDWIDTH_AVAILABLE_INITIAL	4915
-#define BROADCAST_CHANNEL_INITIAL	(1 << 31 | 31)
-#define BROADCAST_CHANNEL_VALID		(1 << 30)
-
-#define fw_notify(s, args...) printk(KERN_NOTICE KBUILD_MODNAME ": " s, ## args)
-#define fw_error(s, args...) printk(KERN_ERR KBUILD_MODNAME ": " s, ## args)
-
-static inline void fw_memcpy_from_be32(void *_dst, void *_src, size_t size)
-{
-	u32    *dst = _dst;
-	__be32 *src = _src;
-	int i;
-
-	for (i = 0; i < size / 4; i++)
-		dst[i] = be32_to_cpu(src[i]);
-}
-
-static inline void fw_memcpy_to_be32(void *_dst, void *_src, size_t size)
-{
-	fw_memcpy_from_be32(_dst, _src, size);
-}
-
-struct fw_card;
-struct fw_packet;
-struct fw_node;
-struct fw_request;
-
-struct fw_descriptor {
-	struct list_head link;
-	size_t length;
-	u32 immediate;
-	u32 key;
-	const u32 *data;
-};
-
-int fw_core_add_descriptor(struct fw_descriptor *desc);
-void fw_core_remove_descriptor(struct fw_descriptor *desc);
-
-typedef void (*fw_packet_callback_t)(struct fw_packet *packet,
-				     struct fw_card *card, int status);
-
-typedef void (*fw_transaction_callback_t)(struct fw_card *card, int rcode,
-					  void *data, size_t length,
-					  void *callback_data);
-
-/*
- * Important note:  The callback must guarantee that either fw_send_response()
- * or kfree() is called on the @request.
- */
-typedef void (*fw_address_callback_t)(struct fw_card *card,
-				      struct fw_request *request,
-				      int tcode, int destination, int source,
-				      int generation, int speed,
-				      unsigned long long offset,
-				      void *data, size_t length,
-				      void *callback_data);
-
-struct fw_packet {
-	int speed;
-	int generation;
-	u32 header[4];
-	size_t header_length;
-	void *payload;
-	size_t payload_length;
-	dma_addr_t payload_bus;
-	u32 timestamp;
-
-	/*
-	 * This callback is called when the packet transmission has
-	 * completed; for successful transmission, the status code is
-	 * the ack received from the destination, otherwise it's a
-	 * negative errno: ENOMEM, ESTALE, ETIMEDOUT, ENODEV, EIO.
-	 * The callback can be called from tasklet context and thus
-	 * must never block.
-	 */
-	fw_packet_callback_t callback;
-	int ack;
-	struct list_head link;
-	void *driver_data;
-};
-
-struct fw_transaction {
-	int node_id; /* The generation is implied; it is always the current. */
-	int tlabel;
-	int timestamp;
-	struct list_head link;
-
-	struct fw_packet packet;
-
-	/*
-	 * The data passed to the callback is valid only during the
-	 * callback.
-	 */
-	fw_transaction_callback_t callback;
-	void *callback_data;
-};
-
-struct fw_address_handler {
-	u64 offset;
-	size_t length;
-	fw_address_callback_t address_callback;
-	void *callback_data;
-	struct list_head link;
-};
-
-struct fw_address_region {
-	u64 start;
-	u64 end;
-};
-
-extern const struct fw_address_region fw_high_memory_region;
-
-int fw_core_add_address_handler(struct fw_address_handler *handler,
-				const struct fw_address_region *region);
-void fw_core_remove_address_handler(struct fw_address_handler *handler);
-void fw_fill_response(struct fw_packet *response, u32 *request_header,
-		      int rcode, void *payload, size_t length);
-void fw_send_response(struct fw_card *card,
-		      struct fw_request *request, int rcode);
-
-extern struct bus_type fw_bus_type;
-
-struct fw_card {
-	const struct fw_card_driver *driver;
-	struct device *device;
-	struct kref kref;
-	struct completion done;
-
-	int node_id;
-	int generation;
-	int current_tlabel, tlabel_mask;
-	struct list_head transaction_list;
-	struct timer_list flush_timer;
-	unsigned long reset_jiffies;
-
-	unsigned long long guid;
-	unsigned max_receive;
-	int link_speed;
-	int config_rom_generation;
-
-	spinlock_t lock; /* Take this lock when handling the lists in
-			  * this struct. */
-	struct fw_node *local_node;
-	struct fw_node *root_node;
-	struct fw_node *irm_node;
-	u8 color; /* must be u8 to match the definition in struct fw_node */
-	int gap_count;
-	bool beta_repeaters_present;
-
-	int index;
-
-	struct list_head link;
-
-	/* Work struct for BM duties. */
-	struct delayed_work work;
-	int bm_retries;
-	int bm_generation;
-
-	bool broadcast_channel_allocated;
-	u32 broadcast_channel;
-	u32 topology_map[(CSR_TOPOLOGY_MAP_END - CSR_TOPOLOGY_MAP) / 4];
-};
-
-static inline struct fw_card *fw_card_get(struct fw_card *card)
-{
-	kref_get(&card->kref);
-
-	return card;
-}
-
-void fw_card_release(struct kref *kref);
-
-static inline void fw_card_put(struct fw_card *card)
-{
-	kref_put(&card->kref, fw_card_release);
-}
-
-extern void fw_schedule_bm_work(struct fw_card *card, unsigned long delay);
-
-/*
- * Check whether new_generation is the immediate successor of old_generation.
- * Take counter roll-over at 255 (as per to OHCI) into account.
- */
-static inline bool is_next_generation(int new_generation, int old_generation)
-{
-	return (new_generation & 0xff) == ((old_generation + 1) & 0xff);
-}
-
-/*
- * The iso packet format allows for an immediate header/payload part
- * stored in 'header' immediately after the packet info plus an
- * indirect payload part that is pointer to by the 'payload' field.
- * Applications can use one or the other or both to implement simple
- * low-bandwidth streaming (e.g. audio) or more advanced
- * scatter-gather streaming (e.g. assembling video frame automatically).
- */
-
-struct fw_iso_packet {
-	u16 payload_length;	/* Length of indirect payload. */
-	u32 interrupt : 1;	/* Generate interrupt on this packet */
-	u32 skip : 1;		/* Set to not send packet at all. */
-	u32 tag : 2;
-	u32 sy : 4;
-	u32 header_length : 8;	/* Length of immediate header. */
-	u32 header[0];
-};
-
-#define FW_ISO_CONTEXT_TRANSMIT	0
-#define FW_ISO_CONTEXT_RECEIVE	1
-
-#define FW_ISO_CONTEXT_MATCH_TAG0	 1
-#define FW_ISO_CONTEXT_MATCH_TAG1	 2
-#define FW_ISO_CONTEXT_MATCH_TAG2	 4
-#define FW_ISO_CONTEXT_MATCH_TAG3	 8
-#define FW_ISO_CONTEXT_MATCH_ALL_TAGS	15
-
-struct fw_iso_context;
-
-typedef void (*fw_iso_callback_t)(struct fw_iso_context *context,
-				  u32 cycle, size_t header_length,
-				  void *header, void *data);
-
-/*
- * An iso buffer is just a set of pages mapped for DMA in the
- * specified direction.  Since the pages are to be used for DMA, they
- * are not mapped into the kernel virtual address space.  We store the
- * DMA address in the page private. The helper function
- * fw_iso_buffer_map() will map the pages into a given vma.
- */
-
-struct fw_iso_buffer {
-	enum dma_data_direction direction;
-	struct page **pages;
-	int page_count;
-};
-
-struct fw_iso_context {
-	struct fw_card *card;
-	int type;
-	int channel;
-	int speed;
-	size_t header_size;
-	fw_iso_callback_t callback;
-	void *callback_data;
-};
-
-int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
-		       int page_count, enum dma_data_direction direction);
-int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma);
-void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, struct fw_card *card);
-
-struct fw_iso_context *fw_iso_context_create(struct fw_card *card,
-		int type, int channel, int speed, size_t header_size,
-		fw_iso_callback_t callback, void *callback_data);
-int fw_iso_context_queue(struct fw_iso_context *ctx,
-			 struct fw_iso_packet *packet,
-			 struct fw_iso_buffer *buffer,
-			 unsigned long payload);
-int fw_iso_context_start(struct fw_iso_context *ctx,
-			 int cycle, int sync, int tags);
-int fw_iso_context_stop(struct fw_iso_context *ctx);
-void fw_iso_context_destroy(struct fw_iso_context *ctx);
-
-void fw_iso_resource_manage(struct fw_card *card, int generation,
-		u64 channels_mask, int *channel, int *bandwidth, bool allocate);
-
-struct fw_card_driver {
-	/*
-	 * Enable the given card with the given initial config rom.
-	 * This function is expected to activate the card, and either
-	 * enable the PHY or set the link_on bit and initiate a bus
-	 * reset.
-	 */
-	int (*enable)(struct fw_card *card, u32 *config_rom, size_t length);
-
-	int (*update_phy_reg)(struct fw_card *card, int address,
-			      int clear_bits, int set_bits);
-
-	/*
-	 * Update the config rom for an enabled card.  This function
-	 * should change the config rom that is presented on the bus
-	 * an initiate a bus reset.
-	 */
-	int (*set_config_rom)(struct fw_card *card,
-			      u32 *config_rom, size_t length);
-
-	void (*send_request)(struct fw_card *card, struct fw_packet *packet);
-	void (*send_response)(struct fw_card *card, struct fw_packet *packet);
-	/* Calling cancel is valid once a packet has been submitted. */
-	int (*cancel_packet)(struct fw_card *card, struct fw_packet *packet);
-
-	/*
-	 * Allow the specified node ID to do direct DMA out and in of
-	 * host memory.  The card will disable this for all node when
-	 * a bus reset happens, so driver need to reenable this after
-	 * bus reset.  Returns 0 on success, -ENODEV if the card
-	 * doesn't support this, -ESTALE if the generation doesn't
-	 * match.
-	 */
-	int (*enable_phys_dma)(struct fw_card *card,
-			       int node_id, int generation);
-
-	u64 (*get_bus_time)(struct fw_card *card);
-
-	struct fw_iso_context *
-	(*allocate_iso_context)(struct fw_card *card,
-				int type, int channel, size_t header_size);
-	void (*free_iso_context)(struct fw_iso_context *ctx);
-
-	int (*start_iso)(struct fw_iso_context *ctx,
-			 s32 cycle, u32 sync, u32 tags);
-
-	int (*queue_iso)(struct fw_iso_context *ctx,
-			 struct fw_iso_packet *packet,
-			 struct fw_iso_buffer *buffer,
-			 unsigned long payload);
-
-	int (*stop_iso)(struct fw_iso_context *ctx);
-};
-
-int fw_core_initiate_bus_reset(struct fw_card *card, int short_reset);
-
-void fw_send_request(struct fw_card *card, struct fw_transaction *t,
-		int tcode, int destination_id, int generation, int speed,
-		unsigned long long offset, void *payload, size_t length,
-		fw_transaction_callback_t callback, void *callback_data);
-int fw_cancel_transaction(struct fw_card *card,
-			  struct fw_transaction *transaction);
-void fw_flush_transactions(struct fw_card *card);
-int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
-		       int generation, int speed, unsigned long long offset,
-		       void *payload, size_t length);
-void fw_send_phy_config(struct fw_card *card,
-			int node_id, int generation, int gap_count);
-
-static inline int fw_stream_packet_destination_id(int tag, int channel, int sy)
-{
-	return tag << 14 | channel << 8 | sy;
-}
-
-/*
- * Called by the topology code to inform the device code of node
- * activity; found, lost, or updated nodes.
- */
-void fw_node_event(struct fw_card *card, struct fw_node *node, int event);
-
-/* API used by card level drivers */
-
-void fw_card_initialize(struct fw_card *card,
-		const struct fw_card_driver *driver, struct device *device);
-int fw_card_add(struct fw_card *card,
-		u32 max_receive, u32 link_speed, u64 guid);
-void fw_core_remove_card(struct fw_card *card);
-void fw_core_handle_bus_reset(struct fw_card *card, int node_id,
-		int generation, int self_id_count, u32 *self_ids);
-void fw_core_handle_request(struct fw_card *card, struct fw_packet *request);
-void fw_core_handle_response(struct fw_card *card, struct fw_packet *packet);
-
-extern int fw_irm_set_broadcast_channel_register(struct device *dev,
-						 void *data);
-
-#endif /* __fw_transaction_h */
diff --git a/drivers/firewire/fw-ohci.c b/drivers/firewire/ohci.c
similarity index 99%
rename from drivers/firewire/fw-ohci.c
rename to drivers/firewire/ohci.c
index 1180d0b..ecddd11 100644
--- a/drivers/firewire/fw-ohci.c
+++ b/drivers/firewire/ohci.c
@@ -20,17 +20,25 @@
 
 #include <linux/compiler.h>
 #include <linux/delay.h>
+#include <linux/device.h>
 #include <linux/dma-mapping.h>
+#include <linux/firewire.h>
+#include <linux/firewire-constants.h>
 #include <linux/gfp.h>
 #include <linux/init.h>
 #include <linux/interrupt.h>
+#include <linux/io.h>
 #include <linux/kernel.h>
+#include <linux/list.h>
 #include <linux/mm.h>
 #include <linux/module.h>
 #include <linux/moduleparam.h>
 #include <linux/pci.h>
 #include <linux/spinlock.h>
+#include <linux/string.h>
 
+#include <asm/atomic.h>
+#include <asm/byteorder.h>
 #include <asm/page.h>
 #include <asm/system.h>
 
@@ -38,8 +46,8 @@
 #include <asm/pmac_feature.h>
 #endif
 
-#include "fw-ohci.h"
-#include "fw-transaction.h"
+#include "core.h"
+#include "ohci.h"
 
 #define DESCRIPTOR_OUTPUT_MORE		0
 #define DESCRIPTOR_OUTPUT_LAST		(1 << 12)
@@ -178,7 +186,7 @@
 	int node_id;
 	int generation;
 	int request_generation;	/* for timestamping incoming requests */
-	u32 bus_seconds;
+	atomic_t bus_seconds;
 
 	bool use_dualbuffer;
 	bool old_uninorth;
@@ -231,7 +239,6 @@
 #define OHCI1394_MAX_AT_RESP_RETRIES	0x2
 #define OHCI1394_MAX_PHYS_RESP_RETRIES	0x8
 
-#define FW_OHCI_MAJOR			240
 #define OHCI1394_REGISTER_SIZE		0x800
 #define OHCI_LOOP_COUNT			500
 #define OHCI1394_PCI_HCI_Control	0x40
@@ -1434,7 +1441,7 @@
 	if (event & OHCI1394_cycle64Seconds) {
 		cycle_time = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
 		if ((cycle_time & 0x80000000) == 0)
-			ohci->bus_seconds++;
+			atomic_inc(&ohci->bus_seconds);
 	}
 
 	return IRQ_HANDLED;
@@ -1770,7 +1777,7 @@
 	u64 bus_time;
 
 	cycle_time = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
-	bus_time = ((u64) ohci->bus_seconds << 32) | cycle_time;
+	bus_time = ((u64)atomic_read(&ohci->bus_seconds) << 32) | cycle_time;
 
 	return bus_time;
 }
diff --git a/drivers/firewire/fw-ohci.h b/drivers/firewire/ohci.h
similarity index 98%
rename from drivers/firewire/fw-ohci.h
rename to drivers/firewire/ohci.h
index a2fbb62..ba492d8 100644
--- a/drivers/firewire/fw-ohci.h
+++ b/drivers/firewire/ohci.h
@@ -1,5 +1,5 @@
-#ifndef __fw_ohci_h
-#define __fw_ohci_h
+#ifndef _FIREWIRE_OHCI_H
+#define _FIREWIRE_OHCI_H
 
 /* OHCI register map */
 
@@ -154,4 +154,4 @@
 
 #define OHCI1394_phy_tcode		0xe
 
-#endif /* __fw_ohci_h */
+#endif /* _FIREWIRE_OHCI_H */
diff --git a/drivers/firewire/fw-sbp2.c b/drivers/firewire/sbp2.c
similarity index 96%
rename from drivers/firewire/fw-sbp2.c
rename to drivers/firewire/sbp2.c
index 2bcf515..24c4563 100644
--- a/drivers/firewire/fw-sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -30,18 +30,28 @@
 
 #include <linux/blkdev.h>
 #include <linux/bug.h>
+#include <linux/completion.h>
 #include <linux/delay.h>
 #include <linux/device.h>
 #include <linux/dma-mapping.h>
+#include <linux/firewire.h>
+#include <linux/firewire-constants.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
 #include <linux/kernel.h>
+#include <linux/kref.h>
+#include <linux/list.h>
 #include <linux/mod_devicetable.h>
 #include <linux/module.h>
 #include <linux/moduleparam.h>
 #include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
 #include <linux/string.h>
 #include <linux/stringify.h>
-#include <linux/timer.h>
 #include <linux/workqueue.h>
+
+#include <asm/byteorder.h>
 #include <asm/system.h>
 
 #include <scsi/scsi.h>
@@ -49,10 +59,6 @@
 #include <scsi/scsi_device.h>
 #include <scsi/scsi_host.h>
 
-#include "fw-device.h"
-#include "fw-topology.h"
-#include "fw-transaction.h"
-
 /*
  * So far only bridges from Oxford Semiconductor are known to support
  * concurrent logins. Depending on firmware, four or two concurrent logins
@@ -174,6 +180,11 @@
 	int blocked;	/* ditto */
 };
 
+static struct fw_device *target_device(struct sbp2_target *tgt)
+{
+	return fw_parent_device(tgt->unit);
+}
+
 /* Impossible login_id, to detect logout attempt before successful login */
 #define INVALID_LOGIN_ID 0x10000
 
@@ -482,7 +493,7 @@
 static void sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu,
 			  int node_id, int generation, u64 offset)
 {
-	struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
+	struct fw_device *device = target_device(lu->tgt);
 	unsigned long flags;
 
 	orb->pointer.high = 0;
@@ -504,7 +515,7 @@
 
 static int sbp2_cancel_orbs(struct sbp2_logical_unit *lu)
 {
-	struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
+	struct fw_device *device = target_device(lu->tgt);
 	struct sbp2_orb *orb, *next;
 	struct list_head list;
 	unsigned long flags;
@@ -542,7 +553,7 @@
 				    int generation, int function,
 				    int lun_or_login_id, void *response)
 {
-	struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
+	struct fw_device *device = target_device(lu->tgt);
 	struct sbp2_management_orb *orb;
 	unsigned int timeout;
 	int retval = -ENOMEM;
@@ -638,7 +649,7 @@
 
 static void sbp2_agent_reset(struct sbp2_logical_unit *lu)
 {
-	struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
+	struct fw_device *device = target_device(lu->tgt);
 	__be32 d = 0;
 
 	fw_run_transaction(device->card, TCODE_WRITE_QUADLET_REQUEST,
@@ -655,7 +666,7 @@
 
 static void sbp2_agent_reset_no_wait(struct sbp2_logical_unit *lu)
 {
-	struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
+	struct fw_device *device = target_device(lu->tgt);
 	struct fw_transaction *t;
 	static __be32 d;
 
@@ -694,7 +705,7 @@
 static void sbp2_conditionally_block(struct sbp2_logical_unit *lu)
 {
 	struct sbp2_target *tgt = lu->tgt;
-	struct fw_card *card = fw_device(tgt->unit->device.parent)->card;
+	struct fw_card *card = target_device(tgt)->card;
 	struct Scsi_Host *shost =
 		container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
 	unsigned long flags;
@@ -718,7 +729,7 @@
 static void sbp2_conditionally_unblock(struct sbp2_logical_unit *lu)
 {
 	struct sbp2_target *tgt = lu->tgt;
-	struct fw_card *card = fw_device(tgt->unit->device.parent)->card;
+	struct fw_card *card = target_device(tgt)->card;
 	struct Scsi_Host *shost =
 		container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
 	unsigned long flags;
@@ -743,7 +754,7 @@
  */
 static void sbp2_unblock(struct sbp2_target *tgt)
 {
-	struct fw_card *card = fw_device(tgt->unit->device.parent)->card;
+	struct fw_card *card = target_device(tgt)->card;
 	struct Scsi_Host *shost =
 		container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
 	unsigned long flags;
@@ -773,7 +784,7 @@
 	struct Scsi_Host *shost =
 		container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
 	struct scsi_device *sdev;
-	struct fw_device *device = fw_device(tgt->unit->device.parent);
+	struct fw_device *device = target_device(tgt);
 
 	/* prevent deadlocks */
 	sbp2_unblock(tgt);
@@ -846,7 +857,7 @@
  */
 static void sbp2_set_busy_timeout(struct sbp2_logical_unit *lu)
 {
-	struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
+	struct fw_device *device = target_device(lu->tgt);
 	__be32 d = cpu_to_be32(SBP2_CYCLE_LIMIT | SBP2_RETRY_LIMIT);
 
 	fw_run_transaction(device->card, TCODE_WRITE_QUADLET_REQUEST,
@@ -862,7 +873,7 @@
 	struct sbp2_logical_unit *lu =
 		container_of(work, struct sbp2_logical_unit, work.work);
 	struct sbp2_target *tgt = lu->tgt;
-	struct fw_device *device = fw_device(tgt->unit->device.parent);
+	struct fw_device *device = target_device(tgt);
 	struct Scsi_Host *shost;
 	struct scsi_device *sdev;
 	struct sbp2_login_response response;
@@ -1110,7 +1121,7 @@
 static int sbp2_probe(struct device *dev)
 {
 	struct fw_unit *unit = fw_unit(dev);
-	struct fw_device *device = fw_device(unit->device.parent);
+	struct fw_device *device = fw_parent_device(unit);
 	struct sbp2_target *tgt;
 	struct sbp2_logical_unit *lu;
 	struct Scsi_Host *shost;
@@ -1125,7 +1136,7 @@
 		return -ENOMEM;
 
 	tgt = (struct sbp2_target *)shost->hostdata;
-	unit->device.driver_data = tgt;
+	dev_set_drvdata(&unit->device, tgt);
 	tgt->unit = unit;
 	kref_init(&tgt->kref);
 	INIT_LIST_HEAD(&tgt->lu_list);
@@ -1180,7 +1191,7 @@
 static int sbp2_remove(struct device *dev)
 {
 	struct fw_unit *unit = fw_unit(dev);
-	struct sbp2_target *tgt = unit->device.driver_data;
+	struct sbp2_target *tgt = dev_get_drvdata(&unit->device);
 
 	sbp2_target_put(tgt);
 	return 0;
@@ -1191,7 +1202,7 @@
 	struct sbp2_logical_unit *lu =
 		container_of(work, struct sbp2_logical_unit, work.work);
 	struct sbp2_target *tgt = lu->tgt;
-	struct fw_device *device = fw_device(tgt->unit->device.parent);
+	struct fw_device *device = target_device(tgt);
 	int generation, node_id, local_node_id;
 
 	if (fw_device_is_shutdown(device))
@@ -1240,10 +1251,10 @@
 
 static void sbp2_update(struct fw_unit *unit)
 {
-	struct sbp2_target *tgt = unit->device.driver_data;
+	struct sbp2_target *tgt = dev_get_drvdata(&unit->device);
 	struct sbp2_logical_unit *lu;
 
-	fw_device_enable_phys_dma(fw_device(unit->device.parent));
+	fw_device_enable_phys_dma(fw_parent_device(unit));
 
 	/*
 	 * Fw-core serializes sbp2_update() against sbp2_remove().
@@ -1259,9 +1270,10 @@
 #define SBP2_UNIT_SPEC_ID_ENTRY	0x0000609e
 #define SBP2_SW_VERSION_ENTRY	0x00010483
 
-static const struct fw_device_id sbp2_id_table[] = {
+static const struct ieee1394_device_id sbp2_id_table[] = {
 	{
-		.match_flags  = FW_MATCH_SPECIFIER_ID | FW_MATCH_VERSION,
+		.match_flags  = IEEE1394_MATCH_SPECIFIER_ID |
+				IEEE1394_MATCH_VERSION,
 		.specifier_id = SBP2_UNIT_SPEC_ID_ENTRY,
 		.version      = SBP2_SW_VERSION_ENTRY,
 	},
@@ -1335,7 +1347,7 @@
 {
 	struct sbp2_command_orb *orb =
 		container_of(base_orb, struct sbp2_command_orb, base);
-	struct fw_device *device = fw_device(orb->lu->tgt->unit->device.parent);
+	struct fw_device *device = target_device(orb->lu->tgt);
 	int result;
 
 	if (status != NULL) {
@@ -1442,7 +1454,7 @@
 static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done)
 {
 	struct sbp2_logical_unit *lu = cmd->device->hostdata;
-	struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
+	struct fw_device *device = target_device(lu->tgt);
 	struct sbp2_command_orb *orb;
 	int generation, retval = SCSI_MLQUEUE_HOST_BUSY;
 
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index c77c6c6..6ce0e26 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -105,7 +105,7 @@
 		ent = debugfs_create_file(files[i].name, S_IFREG | S_IRUGO,
 					  root, tmp, &drm_debugfs_fops);
 		if (!ent) {
-			DRM_ERROR("Cannot create /debugfs/dri/%s/%s\n",
+			DRM_ERROR("Cannot create /sys/kernel/debug/dri/%s/%s\n",
 				  name, files[i].name);
 			drm_free(tmp, sizeof(struct drm_info_node),
 				 _DRM_DRIVER);
@@ -133,9 +133,9 @@
  * \param minor device minor number
  * \param root DRI debugfs dir entry.
  *
- * Create the DRI debugfs root entry "/debugfs/dri", the device debugfs root entry
- * "/debugfs/dri/%minor%/", and each entry in debugfs_list as
- * "/debugfs/dri/%minor%/%name%".
+ * Create the DRI debugfs root entry "/sys/kernel/debug/dri", the device debugfs root entry
+ * "/sys/kernel/debug/dri/%minor%/", and each entry in debugfs_list as
+ * "/sys/kernel/debug/dri/%minor%/%name%".
  */
 int drm_debugfs_init(struct drm_minor *minor, int minor_id,
 		     struct dentry *root)
@@ -148,7 +148,7 @@
 	sprintf(name, "%d", minor_id);
 	minor->debugfs_root = debugfs_create_dir(name, root);
 	if (!minor->debugfs_root) {
-		DRM_ERROR("Cannot create /debugfs/dri/%s\n", name);
+		DRM_ERROR("Cannot create /sys/kernel/debug/dri/%s\n", name);
 		return -1;
 	}
 
@@ -165,7 +165,7 @@
 		ret = dev->driver->debugfs_init(minor);
 		if (ret) {
 			DRM_ERROR("DRM: Driver failed to initialize "
-				  "/debugfs/dri.\n");
+				  "/sys/kernel/debug/dri.\n");
 			return ret;
 		}
 	}
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 019b7c5..1bf7efd 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -339,7 +339,7 @@
 
 	drm_debugfs_root = debugfs_create_dir("dri", NULL);
 	if (!drm_debugfs_root) {
-		DRM_ERROR("Cannot create /debugfs/dri\n");
+		DRM_ERROR("Cannot create /sys/kernel/debug/dri\n");
 		ret = -1;
 		goto err_p3;
 	}
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index 8905068..387a8de 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -343,7 +343,7 @@
 #if defined(CONFIG_DEBUG_FS)
 	ret = drm_debugfs_init(new_minor, minor_id, drm_debugfs_root);
 	if (ret) {
-		DRM_ERROR("DRM: Failed to initialize /debugfs/dri.\n");
+		DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n");
 		goto err_g2;
 	}
 #endif
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 9987ab8..85ec31b 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -70,6 +70,11 @@
 		       CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE);
 }
 
+static char *drm_nodename(struct device *dev)
+{
+	return kasprintf(GFP_KERNEL, "dri/%s", dev_name(dev));
+}
+
 static CLASS_ATTR(version, S_IRUGO, version_show, NULL);
 
 /**
@@ -101,6 +106,8 @@
 	if (err)
 		goto err_out_class;
 
+	class->nodename = drm_nodename;
+
 	return class;
 
 err_out_class:
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index e9b436d..9e94215 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -850,8 +850,14 @@
 #endif
 };
 
+static char *hiddev_nodename(struct device *dev)
+{
+	return kasprintf(GFP_KERNEL, "usb/%s", dev_name(dev));
+}
+
 static struct usb_class_driver hiddev_class = {
 	.name =		"hiddev%d",
+	.nodename =	hiddev_nodename,
 	.fops =		&hiddev_fops,
 	.minor_base =	HIDDEV_MINOR_BASE,
 };
@@ -955,7 +961,6 @@
 	return -ENODEV;
 }
 
-
 static /* const */ struct usb_driver hiddev_driver = {
 	.name =		"hiddev",
 	.probe =	hiddev_usbd_probe,
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index eec7dca..2d50166 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -306,11 +306,11 @@
 	  will be called f71805f.
 
 config SENSORS_F71882FG
-	tristate "Fintek F71862FG, F71882FG and F8000"
+	tristate "Fintek F71858FG, F71862FG, F71882FG and F8000"
 	depends on EXPERIMENTAL
 	help
 	  If you say yes here you get support for hardware monitoring
-	  features of the Fintek F71882FG/F71883FG, F71862FG/71863FG
+	  features of the Fintek F71858FG, F71862FG/71863FG, F71882FG/F71883FG
 	  and F8000 Super-I/O chips.
 
 	  This driver can also be built as a module.  If so, the module
@@ -418,7 +418,7 @@
 	  power sensors and capping hardware in various IBM System X
 	  servers that support Active Energy Manager.  This includes
 	  the x3350, x3550, x3650, x3655, x3755, x3850 M2, x3950 M2,
-	  and certain HS2x/LS2x/QS2x blades.
+	  and certain HC10/HS2x/LS2x/QS2x blades.
 
 	  This driver can also be built as a module.  If so, the module
 	  will be called ibmaem.
@@ -787,6 +787,16 @@
 	  This driver can also be built as a module.  If so, the module
 	  will be called thmc50.
 
+config SENSORS_TMP401
+	tristate "Texas Instruments TMP401 and compatibles"
+	depends on I2C && EXPERIMENTAL
+	help
+	  If you say yes here you get support for Texas Instruments TMP401 and
+	  TMP411 temperature sensor chips.
+
+	  This driver can also be built as a module.  If so, the module
+	  will be called tmp401.
+
 config SENSORS_VIA686A
 	tristate "VIA686A"
 	depends on PCI
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 0ae2698..b793dce 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -82,6 +82,7 @@
 obj-$(CONFIG_SENSORS_SMSC47M1)	+= smsc47m1.o
 obj-$(CONFIG_SENSORS_SMSC47M192)+= smsc47m192.o
 obj-$(CONFIG_SENSORS_THMC50)	+= thmc50.o
+obj-$(CONFIG_SENSORS_TMP401)	+= tmp401.o
 obj-$(CONFIG_SENSORS_VIA686A)	+= via686a.o
 obj-$(CONFIG_SENSORS_VT1211)	+= vt1211.o
 obj-$(CONFIG_SENSORS_VT8231)	+= vt8231.o
diff --git a/drivers/hwmon/f71882fg.c b/drivers/hwmon/f71882fg.c
index 5f81ddf..4146105 100644
--- a/drivers/hwmon/f71882fg.c
+++ b/drivers/hwmon/f71882fg.c
@@ -1,6 +1,6 @@
 /***************************************************************************
  *   Copyright (C) 2006 by Hans Edgington <hans@edgington.nl>              *
- *   Copyright (C) 2007,2008 by Hans de Goede <hdegoede@redhat.com>        *
+ *   Copyright (C) 2007-2009 Hans de Goede <hdegoede@redhat.com>           *
  *                                                                         *
  *   This program is free software; you can redistribute it and/or modify  *
  *   it under the terms of the GNU General Public License as published by  *
@@ -32,6 +32,7 @@
 
 #define DRVNAME "f71882fg"
 
+#define SIO_F71858FG_LD_HWM	0x02	/* Hardware monitor logical device */
 #define SIO_F71882FG_LD_HWM	0x04	/* Hardware monitor logical device */
 #define SIO_UNLOCK_KEY		0x87	/* Key to enable Super-I/O */
 #define SIO_LOCK_KEY		0xAA	/* Key to diasble Super-I/O */
@@ -44,6 +45,7 @@
 #define SIO_REG_ADDR		0x60	/* Logical device address (2 bytes) */
 
 #define SIO_FINTEK_ID		0x1934	/* Manufacturers ID */
+#define SIO_F71858_ID		0x0507  /* Chipset ID */
 #define SIO_F71862_ID		0x0601	/* Chipset ID */
 #define SIO_F71882_ID		0x0541	/* Chipset ID */
 #define SIO_F8000_ID		0x0581	/* Chipset ID */
@@ -70,6 +72,7 @@
 #define F71882FG_REG_TEMP_HIGH(nr)	(0x81 + 2 * (nr))
 #define F71882FG_REG_TEMP_STATUS	0x62
 #define F71882FG_REG_TEMP_BEEP		0x63
+#define F71882FG_REG_TEMP_CONFIG	0x69
 #define F71882FG_REG_TEMP_HYST(nr)	(0x6C + (nr))
 #define F71882FG_REG_TEMP_TYPE		0x6B
 #define F71882FG_REG_TEMP_DIODE_OPEN	0x6F
@@ -92,9 +95,10 @@
 module_param(force_id, ushort, 0);
 MODULE_PARM_DESC(force_id, "Override the detected device ID");
 
-enum chips { f71862fg, f71882fg, f8000 };
+enum chips { f71858fg, f71862fg, f71882fg, f8000 };
 
 static const char *f71882fg_names[] = {
+	"f71858fg",
 	"f71862fg",
 	"f71882fg",
 	"f8000",
@@ -119,6 +123,7 @@
 	struct device *hwmon_dev;
 
 	struct mutex update_lock;
+	int temp_start;			/* temp numbering start (0 or 1) */
 	char valid;			/* !=0 if following fields are valid */
 	unsigned long last_updated;	/* In jiffies */
 	unsigned long last_limits;	/* In jiffies */
@@ -136,7 +141,7 @@
 	/* Note: all models have only 3 temperature channels, but on some
 	   they are addressed as 0-2 and on others as 1-3, so for coding
 	   convenience we reserve space for 4 channels */
-	u8	temp[4];
+	u16	temp[4];
 	u8	temp_ovt[4];
 	u8	temp_high[4];
 	u8	temp_hyst[2]; /* 2 hysts stored per reg */
@@ -144,6 +149,7 @@
 	u8	temp_status;
 	u8	temp_beep;
 	u8	temp_diode_open;
+	u8	temp_config;
 	u8	pwm[4];
 	u8	pwm_enable;
 	u8	pwm_auto_point_hyst[2];
@@ -247,11 +253,55 @@
 		.name	= DRVNAME,
 	},
 	.probe		= f71882fg_probe,
-	.remove		= __devexit_p(f71882fg_remove),
+	.remove		= f71882fg_remove,
 };
 
 static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
 
+/* Temp and in attr for the f71858fg */
+static struct sensor_device_attribute_2 f71858fg_in_temp_attr[] = {
+	SENSOR_ATTR_2(in0_input, S_IRUGO, show_in, NULL, 0, 0),
+	SENSOR_ATTR_2(in1_input, S_IRUGO, show_in, NULL, 0, 1),
+	SENSOR_ATTR_2(in2_input, S_IRUGO, show_in, NULL, 0, 2),
+	SENSOR_ATTR_2(temp1_input, S_IRUGO, show_temp, NULL, 0, 0),
+	SENSOR_ATTR_2(temp1_max, S_IRUGO|S_IWUSR, show_temp_max,
+		store_temp_max, 0, 0),
+	SENSOR_ATTR_2(temp1_max_hyst, S_IRUGO|S_IWUSR, show_temp_max_hyst,
+		store_temp_max_hyst, 0, 0),
+	SENSOR_ATTR_2(temp1_max_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 0),
+	SENSOR_ATTR_2(temp1_crit, S_IRUGO|S_IWUSR, show_temp_crit,
+		store_temp_crit, 0, 0),
+	SENSOR_ATTR_2(temp1_crit_hyst, S_IRUGO, show_temp_crit_hyst, NULL,
+		0, 0),
+	SENSOR_ATTR_2(temp1_crit_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 4),
+	SENSOR_ATTR_2(temp1_fault, S_IRUGO, show_temp_fault, NULL, 0, 0),
+	SENSOR_ATTR_2(temp2_input, S_IRUGO, show_temp, NULL, 0, 1),
+	SENSOR_ATTR_2(temp2_max, S_IRUGO|S_IWUSR, show_temp_max,
+		store_temp_max, 0, 1),
+	SENSOR_ATTR_2(temp2_max_hyst, S_IRUGO|S_IWUSR, show_temp_max_hyst,
+		store_temp_max_hyst, 0, 1),
+	SENSOR_ATTR_2(temp2_max_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 1),
+	SENSOR_ATTR_2(temp2_crit, S_IRUGO|S_IWUSR, show_temp_crit,
+		store_temp_crit, 0, 1),
+	SENSOR_ATTR_2(temp2_crit_hyst, S_IRUGO, show_temp_crit_hyst, NULL,
+		0, 1),
+	SENSOR_ATTR_2(temp2_crit_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 5),
+	SENSOR_ATTR_2(temp2_type, S_IRUGO, show_temp_type, NULL, 0, 1),
+	SENSOR_ATTR_2(temp2_fault, S_IRUGO, show_temp_fault, NULL, 0, 1),
+	SENSOR_ATTR_2(temp3_input, S_IRUGO, show_temp, NULL, 0, 2),
+	SENSOR_ATTR_2(temp3_max, S_IRUGO|S_IWUSR, show_temp_max,
+		store_temp_max, 0, 2),
+	SENSOR_ATTR_2(temp3_max_hyst, S_IRUGO|S_IWUSR, show_temp_max_hyst,
+		store_temp_max_hyst, 0, 2),
+	SENSOR_ATTR_2(temp3_max_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 2),
+	SENSOR_ATTR_2(temp3_crit, S_IRUGO|S_IWUSR, show_temp_crit,
+		store_temp_crit, 0, 2),
+	SENSOR_ATTR_2(temp3_crit_hyst, S_IRUGO, show_temp_crit_hyst, NULL,
+		0, 2),
+	SENSOR_ATTR_2(temp3_crit_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 6),
+	SENSOR_ATTR_2(temp3_fault, S_IRUGO, show_temp_fault, NULL, 0, 2),
+};
+
 /* Temp and in attr common to both the f71862fg and f71882fg */
 static struct sensor_device_attribute_2 f718x2fg_in_temp_attr[] = {
 	SENSOR_ATTR_2(in0_input, S_IRUGO, show_in, NULL, 0, 0),
@@ -344,6 +394,7 @@
 	SENSOR_ATTR_2(temp1_max_hyst, S_IRUGO|S_IWUSR, show_temp_max,
 		store_temp_max, 0, 0),
 	SENSOR_ATTR_2(temp1_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 4),
+	SENSOR_ATTR_2(temp1_fault, S_IRUGO, show_temp_fault, NULL, 0, 0),
 	SENSOR_ATTR_2(temp2_input, S_IRUGO, show_temp, NULL, 0, 1),
 	SENSOR_ATTR_2(temp2_max, S_IRUGO|S_IWUSR, show_temp_crit,
 		store_temp_crit, 0, 1),
@@ -351,12 +402,14 @@
 		store_temp_max, 0, 1),
 	SENSOR_ATTR_2(temp2_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 5),
 	SENSOR_ATTR_2(temp2_type, S_IRUGO, show_temp_type, NULL, 0, 1),
+	SENSOR_ATTR_2(temp2_fault, S_IRUGO, show_temp_fault, NULL, 0, 1),
 	SENSOR_ATTR_2(temp3_input, S_IRUGO, show_temp, NULL, 0, 2),
 	SENSOR_ATTR_2(temp3_max, S_IRUGO|S_IWUSR, show_temp_crit,
 		store_temp_crit, 0, 2),
 	SENSOR_ATTR_2(temp3_max_hyst, S_IRUGO|S_IWUSR, show_temp_max,
 		store_temp_max, 0, 2),
 	SENSOR_ATTR_2(temp3_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 6),
+	SENSOR_ATTR_2(temp3_fault, S_IRUGO, show_temp_fault, NULL, 0, 2),
 };
 
 /* Fan / PWM attr common to all models */
@@ -395,6 +448,9 @@
 		      show_pwm_auto_point_channel,
 		      store_pwm_auto_point_channel, 0, 1),
 
+	SENSOR_ATTR_2(pwm3, S_IRUGO|S_IWUSR, show_pwm, store_pwm, 0, 2),
+	SENSOR_ATTR_2(pwm3_enable, S_IRUGO|S_IWUSR, show_pwm_enable,
+		      store_pwm_enable, 0, 2),
 	SENSOR_ATTR_2(pwm3_interpolate, S_IRUGO|S_IWUSR,
 		      show_pwm_interpolate, store_pwm_interpolate, 0, 2),
 	SENSOR_ATTR_2(pwm3_auto_channels_temp, S_IRUGO|S_IWUSR,
@@ -450,9 +506,6 @@
 	SENSOR_ATTR_2(pwm2_auto_point2_temp_hyst, S_IRUGO,
 		      show_pwm_auto_point_temp_hyst, NULL, 3, 1),
 
-	SENSOR_ATTR_2(pwm3, S_IRUGO|S_IWUSR, show_pwm, store_pwm, 0, 2),
-	SENSOR_ATTR_2(pwm3_enable, S_IRUGO|S_IWUSR, show_pwm_enable,
-		      store_pwm_enable, 0, 2),
 	SENSOR_ATTR_2(pwm3_auto_point1_pwm, S_IRUGO|S_IWUSR,
 		      show_pwm_auto_point_pwm, store_pwm_auto_point_pwm,
 		      1, 2),
@@ -473,22 +526,8 @@
 		      show_pwm_auto_point_temp_hyst, NULL, 3, 2),
 };
 
-/* Fan / PWM attr for the f71882fg */
-static struct sensor_device_attribute_2 f71882fg_fan_attr[] = {
-	SENSOR_ATTR_2(fan1_beep, S_IRUGO|S_IWUSR, show_fan_beep,
-		store_fan_beep, 0, 0),
-	SENSOR_ATTR_2(fan2_beep, S_IRUGO|S_IWUSR, show_fan_beep,
-		store_fan_beep, 0, 1),
-	SENSOR_ATTR_2(fan3_beep, S_IRUGO|S_IWUSR, show_fan_beep,
-		store_fan_beep, 0, 2),
-	SENSOR_ATTR_2(fan4_input, S_IRUGO, show_fan, NULL, 0, 3),
-	SENSOR_ATTR_2(fan4_full_speed, S_IRUGO|S_IWUSR,
-		      show_fan_full_speed,
-		      store_fan_full_speed, 0, 3),
-	SENSOR_ATTR_2(fan4_beep, S_IRUGO|S_IWUSR, show_fan_beep,
-		store_fan_beep, 0, 3),
-	SENSOR_ATTR_2(fan4_alarm, S_IRUGO, show_fan_alarm, NULL, 0, 3),
-
+/* Fan / PWM attr common to both the f71882fg and f71858fg */
+static struct sensor_device_attribute_2 f71882fg_f71858fg_fan_attr[] = {
 	SENSOR_ATTR_2(pwm1_auto_point1_pwm, S_IRUGO|S_IWUSR,
 		      show_pwm_auto_point_pwm, store_pwm_auto_point_pwm,
 		      0, 0),
@@ -565,9 +604,6 @@
 	SENSOR_ATTR_2(pwm2_auto_point4_temp_hyst, S_IRUGO,
 		      show_pwm_auto_point_temp_hyst, NULL, 3, 1),
 
-	SENSOR_ATTR_2(pwm3, S_IRUGO|S_IWUSR, show_pwm, store_pwm, 0, 2),
-	SENSOR_ATTR_2(pwm3_enable, S_IRUGO|S_IWUSR, show_pwm_enable,
-		      store_pwm_enable, 0, 2),
 	SENSOR_ATTR_2(pwm3_auto_point1_pwm, S_IRUGO|S_IWUSR,
 		      show_pwm_auto_point_pwm, store_pwm_auto_point_pwm,
 		      0, 2),
@@ -605,6 +641,24 @@
 		      show_pwm_auto_point_temp_hyst, NULL, 2, 2),
 	SENSOR_ATTR_2(pwm3_auto_point4_temp_hyst, S_IRUGO,
 		      show_pwm_auto_point_temp_hyst, NULL, 3, 2),
+};
+
+/* Fan / PWM attr found on the f71882fg but not on the f71858fg */
+static struct sensor_device_attribute_2 f71882fg_fan_attr[] = {
+	SENSOR_ATTR_2(fan1_beep, S_IRUGO|S_IWUSR, show_fan_beep,
+		store_fan_beep, 0, 0),
+	SENSOR_ATTR_2(fan2_beep, S_IRUGO|S_IWUSR, show_fan_beep,
+		store_fan_beep, 0, 1),
+	SENSOR_ATTR_2(fan3_beep, S_IRUGO|S_IWUSR, show_fan_beep,
+		store_fan_beep, 0, 2),
+
+	SENSOR_ATTR_2(fan4_input, S_IRUGO, show_fan, NULL, 0, 3),
+	SENSOR_ATTR_2(fan4_full_speed, S_IRUGO|S_IWUSR,
+		      show_fan_full_speed,
+		      store_fan_full_speed, 0, 3),
+	SENSOR_ATTR_2(fan4_beep, S_IRUGO|S_IWUSR, show_fan_beep,
+		store_fan_beep, 0, 3),
+	SENSOR_ATTR_2(fan4_alarm, S_IRUGO, show_fan_alarm, NULL, 0, 3),
 
 	SENSOR_ATTR_2(pwm4, S_IRUGO|S_IWUSR, show_pwm, store_pwm, 0, 3),
 	SENSOR_ATTR_2(pwm4_enable, S_IRUGO|S_IWUSR, show_pwm_enable,
@@ -659,8 +713,6 @@
 static struct sensor_device_attribute_2 f8000_fan_attr[] = {
 	SENSOR_ATTR_2(fan4_input, S_IRUGO, show_fan, NULL, 0, 3),
 
-	SENSOR_ATTR_2(pwm3, S_IRUGO, show_pwm, NULL, 0, 2),
-
 	SENSOR_ATTR_2(temp1_auto_point1_pwm, S_IRUGO|S_IWUSR,
 		      show_pwm_auto_point_pwm, store_pwm_auto_point_pwm,
 		      0, 2),
@@ -857,13 +909,20 @@
 	outb(val & 255, data->addr + DATA_REG_OFFSET);
 }
 
+static u16 f71882fg_read_temp(struct f71882fg_data *data, int nr)
+{
+	if (data->type == f71858fg)
+		return f71882fg_read16(data, F71882FG_REG_TEMP(nr));
+	else
+		return f71882fg_read8(data, F71882FG_REG_TEMP(nr));
+}
+
 static struct f71882fg_data *f71882fg_update_device(struct device *dev)
 {
 	struct f71882fg_data *data = dev_get_drvdata(dev);
 	int nr, reg = 0, reg2;
 	int nr_fans = (data->type == f71882fg) ? 4 : 3;
-	int nr_ins = (data->type == f8000) ? 3 : 9;
-	int temp_start = (data->type == f8000) ? 0 : 1;
+	int nr_ins = (data->type == f71858fg || data->type == f8000) ? 3 : 9;
 
 	mutex_lock(&data->update_lock);
 
@@ -878,7 +937,7 @@
 		}
 
 		/* Get High & boundary temps*/
-		for (nr = temp_start; nr < 3 + temp_start; nr++) {
+		for (nr = data->temp_start; nr < 3 + data->temp_start; nr++) {
 			data->temp_ovt[nr] = f71882fg_read8(data,
 						F71882FG_REG_TEMP_OVT(nr));
 			data->temp_high[nr] = f71882fg_read8(data,
@@ -886,14 +945,17 @@
 		}
 
 		if (data->type != f8000) {
-			data->fan_beep = f71882fg_read8(data,
-						F71882FG_REG_FAN_BEEP);
-			data->temp_beep = f71882fg_read8(data,
-						F71882FG_REG_TEMP_BEEP);
 			data->temp_hyst[0] = f71882fg_read8(data,
 						F71882FG_REG_TEMP_HYST(0));
 			data->temp_hyst[1] = f71882fg_read8(data,
 						F71882FG_REG_TEMP_HYST(1));
+		}
+
+		if (data->type == f71862fg || data->type == f71882fg) {
+			data->fan_beep = f71882fg_read8(data,
+						F71882FG_REG_FAN_BEEP);
+			data->temp_beep = f71882fg_read8(data,
+						F71882FG_REG_TEMP_BEEP);
 			/* Have to hardcode type, because temp1 is special */
 			reg  = f71882fg_read8(data, F71882FG_REG_TEMP_TYPE);
 			data->temp_type[2] = (reg & 0x04) ? 2 : 4;
@@ -904,10 +966,10 @@
 			data->temp_type[1] = 6 /* PECI */;
 		else if ((reg2 & 0x03) == 0x02)
 			data->temp_type[1] = 5 /* AMDSI */;
-		else if (data->type != f8000)
+		else if (data->type == f71862fg || data->type == f71882fg)
 			data->temp_type[1] = (reg & 0x02) ? 2 : 4;
 		else
-			data->temp_type[1] = 2; /* F8000 only supports BJT */
+			data->temp_type[1] = 2; /* Only supports BJT */
 
 		data->pwm_enable = f71882fg_read8(data,
 						  F71882FG_REG_PWM_ENABLE);
@@ -963,9 +1025,8 @@
 						F71882FG_REG_TEMP_STATUS);
 		data->temp_diode_open = f71882fg_read8(data,
 						F71882FG_REG_TEMP_DIODE_OPEN);
-		for (nr = temp_start; nr < 3 + temp_start; nr++)
-			data->temp[nr] = f71882fg_read8(data,
-						F71882FG_REG_TEMP(nr));
+		for (nr = data->temp_start; nr < 3 + data->temp_start; nr++)
+			data->temp[nr] = f71882fg_read_temp(data, nr);
 
 		data->fan_status = f71882fg_read8(data,
 						F71882FG_REG_FAN_STATUS);
@@ -1168,8 +1229,24 @@
 {
 	struct f71882fg_data *data = f71882fg_update_device(dev);
 	int nr = to_sensor_dev_attr_2(devattr)->index;
+	int sign, temp;
 
-	return sprintf(buf, "%d\n", data->temp[nr] * 1000);
+	if (data->type == f71858fg) {
+		/* TEMP_TABLE_SEL 1 or 3 ? */
+		if (data->temp_config & 1) {
+			sign = data->temp[nr] & 0x0001;
+			temp = (data->temp[nr] >> 5) & 0x7ff;
+		} else {
+			sign = data->temp[nr] & 0x8000;
+			temp = (data->temp[nr] >> 5) & 0x3ff;
+		}
+		temp *= 125;
+		if (sign)
+			temp -= 128000;
+	} else
+		temp = data->temp[nr] * 1000;
+
+	return sprintf(buf, "%d\n", temp);
 }
 
 static ssize_t show_temp_max(struct device *dev, struct device_attribute
@@ -1440,6 +1517,10 @@
 	int nr = to_sensor_dev_attr_2(devattr)->index;
 	long val = simple_strtol(buf, NULL, 10);
 
+	/* Special case for F8000 pwm channel 3 which only does auto mode */
+	if (data->type == f8000 && nr == 2 && val != 2)
+		return -EINVAL;
+
 	mutex_lock(&data->update_lock);
 	data->pwm_enable = f71882fg_read8(data, F71882FG_REG_PWM_ENABLE);
 	/* Special case for F8000 auto PWM mode / Thermostat mode */
@@ -1458,6 +1539,12 @@
 	} else {
 		switch (val) {
 		case 1:
+			/* The f71858fg does not support manual RPM mode */
+			if (data->type == f71858fg &&
+			    ((data->pwm_enable >> (2 * nr)) & 1)) {
+				count = -EINVAL;
+				goto leave;
+			}
 			data->pwm_enable |= 2 << (2 * nr);
 			break;		/* Manual */
 		case 2:
@@ -1616,9 +1703,9 @@
 	int result;
 	struct f71882fg_data *data = f71882fg_update_device(dev);
 	int nr = to_sensor_dev_attr_2(devattr)->index;
-	int temp_start = (data->type == f8000) ? 0 : 1;
 
-	result = 1 << ((data->pwm_auto_point_mapping[nr] & 3) - temp_start);
+	result = 1 << ((data->pwm_auto_point_mapping[nr] & 3) -
+		       data->temp_start);
 
 	return sprintf(buf, "%d\n", result);
 }
@@ -1629,7 +1716,6 @@
 {
 	struct f71882fg_data *data = dev_get_drvdata(dev);
 	int nr = to_sensor_dev_attr_2(devattr)->index;
-	int temp_start = (data->type == f8000) ? 0 : 1;
 	long val = simple_strtol(buf, NULL, 10);
 
 	switch (val) {
@@ -1645,7 +1731,7 @@
 	default:
 		return -EINVAL;
 	}
-	val += temp_start;
+	val += data->temp_start;
 	mutex_lock(&data->update_lock);
 	data->pwm_auto_point_mapping[nr] =
 		f71882fg_read8(data, F71882FG_REG_POINT_MAPPING(nr));
@@ -1721,6 +1807,8 @@
 
 	data->addr = platform_get_resource(pdev, IORESOURCE_IO, 0)->start;
 	data->type = sio_data->type;
+	data->temp_start =
+	    (data->type == f71858fg || data->type == f8000) ? 0 : 1;
 	mutex_init(&data->update_lock);
 	platform_set_drvdata(pdev, data);
 
@@ -1736,19 +1824,6 @@
 		goto exit_free;
 	}
 
-	data->pwm_enable = f71882fg_read8(data, F71882FG_REG_PWM_ENABLE);
-	/* If it is a 71862 and the fan / pwm part is enabled sanity check
-	   the pwm settings */
-	if (data->type == f71862fg && (start_reg & 0x02)) {
-		if ((data->pwm_enable & 0x15) != 0x15) {
-			dev_err(&pdev->dev,
-				"Invalid (reserved) pwm settings: 0x%02x\n",
-				(unsigned int)data->pwm_enable);
-			err = -ENODEV;
-			goto exit_free;
-		}
-	}
-
 	/* Register sysfs interface files */
 	err = device_create_file(&pdev->dev, &dev_attr_name);
 	if (err)
@@ -1756,6 +1831,20 @@
 
 	if (start_reg & 0x01) {
 		switch (data->type) {
+		case f71858fg:
+			data->temp_config =
+				f71882fg_read8(data, F71882FG_REG_TEMP_CONFIG);
+			if (data->temp_config & 0x10)
+				/* The f71858fg temperature alarms behave as
+				   the f8000 alarms in this mode */
+				err = f71882fg_create_sysfs_files(pdev,
+					f8000_in_temp_attr,
+					ARRAY_SIZE(f8000_in_temp_attr));
+			else
+				err = f71882fg_create_sysfs_files(pdev,
+					f71858fg_in_temp_attr,
+					ARRAY_SIZE(f71858fg_in_temp_attr));
+			break;
 		case f71882fg:
 			err = f71882fg_create_sysfs_files(pdev,
 					f71882fg_in_temp_attr,
@@ -1779,6 +1868,35 @@
 	}
 
 	if (start_reg & 0x02) {
+		data->pwm_enable =
+			f71882fg_read8(data, F71882FG_REG_PWM_ENABLE);
+
+		/* Sanity check the pwm settings */
+		switch (data->type) {
+		case f71858fg:
+			err = 0;
+			for (i = 0; i < nr_fans; i++)
+				if (((data->pwm_enable >> (i * 2)) & 3) == 3)
+					err = 1;
+			break;
+		case f71862fg:
+			err = (data->pwm_enable & 0x15) != 0x15;
+			break;
+		case f71882fg:
+			err = 0;
+			break;
+		case f8000:
+			err = data->pwm_enable & 0x20;
+			break;
+		}
+		if (err) {
+			dev_err(&pdev->dev,
+				"Invalid (reserved) pwm settings: 0x%02x\n",
+				(unsigned int)data->pwm_enable);
+			err = -ENODEV;
+			goto exit_unregister_sysfs;
+		}
+
 		err = f71882fg_create_sysfs_files(pdev, fxxxx_fan_attr,
 					ARRAY_SIZE(fxxxx_fan_attr));
 		if (err)
@@ -1794,6 +1912,13 @@
 			err = f71882fg_create_sysfs_files(pdev,
 					f71882fg_fan_attr,
 					ARRAY_SIZE(f71882fg_fan_attr));
+			if (err)
+				goto exit_unregister_sysfs;
+			/* fall through! */
+		case f71858fg:
+			err = f71882fg_create_sysfs_files(pdev,
+					f71882fg_f71858fg_fan_attr,
+					ARRAY_SIZE(f71882fg_f71858fg_fan_attr));
 			break;
 		case f8000:
 			err = f71882fg_create_sysfs_files(pdev,
@@ -1878,6 +2003,9 @@
 
 	devid = force_id ? force_id : superio_inw(sioaddr, SIO_REG_DEVID);
 	switch (devid) {
+	case SIO_F71858_ID:
+		sio_data->type = f71858fg;
+		break;
 	case SIO_F71862_ID:
 		sio_data->type = f71862fg;
 		break;
@@ -1892,7 +2020,11 @@
 		goto exit;
 	}
 
-	superio_select(sioaddr, SIO_F71882FG_LD_HWM);
+	if (sio_data->type == f71858fg)
+		superio_select(sioaddr, SIO_F71858FG_LD_HWM);
+	else
+		superio_select(sioaddr, SIO_F71882FG_LD_HWM);
+
 	if (!(superio_inb(sioaddr, SIO_REG_ENABLE) & 0x01)) {
 		printk(KERN_WARNING DRVNAME ": Device not activated\n");
 		goto exit;
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
index e15c3e7..29ea675 100644
--- a/drivers/hwmon/hwmon.c
+++ b/drivers/hwmon/hwmon.c
@@ -18,6 +18,7 @@
 #include <linux/hwmon.h>
 #include <linux/gfp.h>
 #include <linux/spinlock.h>
+#include <linux/pci.h>
 
 #define HWMON_ID_PREFIX "hwmon"
 #define HWMON_ID_FORMAT HWMON_ID_PREFIX "%d"
@@ -86,8 +87,36 @@
 			"hwmon_device_unregister() failed: bad class ID!\n");
 }
 
+static void __init hwmon_pci_quirks(void)
+{
+#if defined CONFIG_X86 && defined CONFIG_PCI
+	struct pci_dev *sb;
+	u16 base;
+	u8 enable;
+
+	/* Open access to 0x295-0x296 on MSI MS-7031 */
+	sb = pci_get_device(PCI_VENDOR_ID_ATI, 0x436c, NULL);
+	if (sb &&
+	    (sb->subsystem_vendor == 0x1462 &&	/* MSI */
+	     sb->subsystem_device == 0x0031)) {	/* MS-7031 */
+
+		pci_read_config_byte(sb, 0x48, &enable);
+		pci_read_config_word(sb, 0x64, &base);
+
+		if (base == 0 && !(enable & BIT(2))) {
+			dev_info(&sb->dev,
+				 "Opening wide generic port at 0x295\n");
+			pci_write_config_word(sb, 0x64, 0x295);
+			pci_write_config_byte(sb, 0x48, enable | BIT(2));
+		}
+	}
+#endif
+}
+
 static int __init hwmon_init(void)
 {
+	hwmon_pci_quirks();
+
 	hwmon_class = class_create(THIS_MODULE, "hwmon");
 	if (IS_ERR(hwmon_class)) {
 		printk(KERN_ERR "hwmon.c: couldn't create sysfs class\n");
diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
index fe74609..405d3fb 100644
--- a/drivers/hwmon/ibmaem.c
+++ b/drivers/hwmon/ibmaem.c
@@ -1127,3 +1127,4 @@
 MODULE_ALIAS("dmi:bvnIBM:*:pnIBMSystemx3655-*");
 MODULE_ALIAS("dmi:bvnIBM:*:pnIBMSystemx3755-*");
 MODULE_ALIAS("dmi:bvnIBM:*:pnIBM3850M2/x3950M2-*");
+MODULE_ALIAS("dmi:bvnIBM:*:pnIBMBladeHC10-*");
diff --git a/drivers/hwmon/max6650.c b/drivers/hwmon/max6650.c
index f27af6a..86142a8 100644
--- a/drivers/hwmon/max6650.c
+++ b/drivers/hwmon/max6650.c
@@ -12,7 +12,7 @@
  * also work with the MAX6651. It does not distinguish max6650 and max6651
  * chips.
  *
- * Tha datasheet was last seen at:
+ * The datasheet was last seen at:
  *
  *        http://pdfserv.maxim-ic.com/en/ds/MAX6650-MAX6651.pdf
  *
@@ -98,6 +98,16 @@
 #define MAX6650_CFG_MODE_OPEN_LOOP	0x30
 #define MAX6650_COUNT_MASK		0x03
 
+/*
+ * Alarm status register bits
+ */
+
+#define MAX6650_ALRM_MAX	0x01
+#define MAX6650_ALRM_MIN	0x02
+#define MAX6650_ALRM_TACH	0x04
+#define MAX6650_ALRM_GPIO1	0x08
+#define MAX6650_ALRM_GPIO2	0x10
+
 /* Minimum and maximum values of the FAN-RPM */
 #define FAN_RPM_MIN 240
 #define FAN_RPM_MAX 30000
@@ -151,6 +161,7 @@
 	u8 tach[4];
 	u8 count;
 	u8 dac;
+	u8 alarm;
 };
 
 static ssize_t get_fan(struct device *dev, struct device_attribute *devattr,
@@ -418,6 +429,33 @@
 	return count;
 }
 
+/*
+ * Get alarm stati:
+ * Possible values:
+ * 0 = no alarm
+ * 1 = alarm
+ */
+
+static ssize_t get_alarm(struct device *dev, struct device_attribute *devattr,
+			 char *buf)
+{
+	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+	struct max6650_data *data = max6650_update_device(dev);
+	struct i2c_client *client = to_i2c_client(dev);
+	int alarm = 0;
+
+	if (data->alarm & attr->index) {
+		mutex_lock(&data->update_lock);
+		alarm = 1;
+		data->alarm &= ~attr->index;
+		data->alarm |= i2c_smbus_read_byte_data(client,
+							MAX6650_REG_ALARM);
+		mutex_unlock(&data->update_lock);
+	}
+
+	return sprintf(buf, "%d\n", alarm);
+}
+
 static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, get_fan, NULL, 0);
 static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, get_fan, NULL, 1);
 static SENSOR_DEVICE_ATTR(fan3_input, S_IRUGO, get_fan, NULL, 2);
@@ -426,7 +464,41 @@
 static DEVICE_ATTR(fan1_div, S_IWUSR | S_IRUGO, get_div, set_div);
 static DEVICE_ATTR(pwm1_enable, S_IWUSR | S_IRUGO, get_enable, set_enable);
 static DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, get_pwm, set_pwm);
+static SENSOR_DEVICE_ATTR(fan1_max_alarm, S_IRUGO, get_alarm, NULL,
+			  MAX6650_ALRM_MAX);
+static SENSOR_DEVICE_ATTR(fan1_min_alarm, S_IRUGO, get_alarm, NULL,
+			  MAX6650_ALRM_MIN);
+static SENSOR_DEVICE_ATTR(fan1_fault, S_IRUGO, get_alarm, NULL,
+			  MAX6650_ALRM_TACH);
+static SENSOR_DEVICE_ATTR(gpio1_alarm, S_IRUGO, get_alarm, NULL,
+			  MAX6650_ALRM_GPIO1);
+static SENSOR_DEVICE_ATTR(gpio2_alarm, S_IRUGO, get_alarm, NULL,
+			  MAX6650_ALRM_GPIO2);
 
+static mode_t max6650_attrs_visible(struct kobject *kobj, struct attribute *a,
+				    int n)
+{
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct i2c_client *client = to_i2c_client(dev);
+	u8 alarm_en = i2c_smbus_read_byte_data(client, MAX6650_REG_ALARM_EN);
+	struct device_attribute *devattr;
+
+	/*
+	 * Hide the alarms that have not been enabled by the firmware
+	 */
+
+	devattr = container_of(a, struct device_attribute, attr);
+	if (devattr == &sensor_dev_attr_fan1_max_alarm.dev_attr
+	 || devattr == &sensor_dev_attr_fan1_min_alarm.dev_attr
+	 || devattr == &sensor_dev_attr_fan1_fault.dev_attr
+	 || devattr == &sensor_dev_attr_gpio1_alarm.dev_attr
+	 || devattr == &sensor_dev_attr_gpio2_alarm.dev_attr) {
+		if (!(alarm_en & to_sensor_dev_attr(devattr)->index))
+			return 0;
+	}
+
+	return a->mode;
+}
 
 static struct attribute *max6650_attrs[] = {
 	&sensor_dev_attr_fan1_input.dev_attr.attr,
@@ -437,11 +509,17 @@
 	&dev_attr_fan1_div.attr,
 	&dev_attr_pwm1_enable.attr,
 	&dev_attr_pwm1.attr,
+	&sensor_dev_attr_fan1_max_alarm.dev_attr.attr,
+	&sensor_dev_attr_fan1_min_alarm.dev_attr.attr,
+	&sensor_dev_attr_fan1_fault.dev_attr.attr,
+	&sensor_dev_attr_gpio1_alarm.dev_attr.attr,
+	&sensor_dev_attr_gpio2_alarm.dev_attr.attr,
 	NULL
 };
 
 static struct attribute_group max6650_attr_grp = {
 	.attrs = max6650_attrs,
+	.is_visible = max6650_attrs_visible,
 };
 
 /*
@@ -659,6 +737,12 @@
 							MAX6650_REG_COUNT);
 		data->dac = i2c_smbus_read_byte_data(client, MAX6650_REG_DAC);
 
+		/* Alarms are cleared on read in case the condition that
+		 * caused the alarm is removed. Keep the value latched here
+		 * for providing the register through different alarm files. */
+		data->alarm |= i2c_smbus_read_byte_data(client,
+							MAX6650_REG_ALARM);
+
 		data->last_updated = jiffies;
 		data->valid = 1;
 	}
diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
index 6cbdc2f..56cd600 100644
--- a/drivers/hwmon/sht15.c
+++ b/drivers/hwmon/sht15.c
@@ -627,35 +627,35 @@
 			.owner = THIS_MODULE,
 		},
 		.probe = sht15_probe,
-		.remove = sht15_remove,
+		.remove = __devexit_p(sht15_remove),
 	}, {
 		.driver = {
 			.name = "sht11",
 			.owner = THIS_MODULE,
 		},
 		.probe = sht15_probe,
-		.remove = sht15_remove,
+		.remove = __devexit_p(sht15_remove),
 	}, {
 		.driver = {
 			.name = "sht15",
 			.owner = THIS_MODULE,
 		},
 		.probe = sht15_probe,
-		.remove = sht15_remove,
+		.remove = __devexit_p(sht15_remove),
 	}, {
 		.driver = {
 			.name = "sht71",
 			.owner = THIS_MODULE,
 		},
 		.probe = sht15_probe,
-		.remove = sht15_remove,
+		.remove = __devexit_p(sht15_remove),
 	}, {
 		.driver = {
 			.name = "sht75",
 			.owner = THIS_MODULE,
 		},
 		.probe = sht15_probe,
-		.remove = sht15_remove,
+		.remove = __devexit_p(sht15_remove),
 	},
 };
 
diff --git a/drivers/hwmon/tmp401.c b/drivers/hwmon/tmp401.c
new file mode 100644
index 0000000..7b34f2c
--- /dev/null
+++ b/drivers/hwmon/tmp401.c
@@ -0,0 +1,690 @@
+/* tmp401.c
+ *
+ * Copyright (C) 2007,2008 Hans de Goede <hdegoede@redhat.com>
+ * Preliminary tmp411 support by:
+ * Gabriel Konat, Sander Leget, Wouter Willems
+ * Copyright (C) 2009 Andre Prendel <andre.prendel@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/*
+ * Driver for the Texas Instruments TMP401 SMBUS temperature sensor IC.
+ *
+ * Note this IC is in some aspect similar to the LM90, but it has quite a
+ * few differences too, for example the local temp has a higher resolution
+ * and thus has 16 bits registers for its value and limit instead of 8 bits.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/i2c.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/sysfs.h>
+
+/* Addresses to scan */
+static const unsigned short normal_i2c[] = { 0x4c, I2C_CLIENT_END };
+
+/* Insmod parameters */
+I2C_CLIENT_INSMOD_2(tmp401, tmp411);
+
+/*
+ * The TMP401 registers, note some registers have different addresses for
+ * reading and writing
+ */
+#define TMP401_STATUS				0x02
+#define TMP401_CONFIG_READ			0x03
+#define TMP401_CONFIG_WRITE			0x09
+#define TMP401_CONVERSION_RATE_READ		0x04
+#define TMP401_CONVERSION_RATE_WRITE		0x0A
+#define TMP401_TEMP_CRIT_HYST			0x21
+#define TMP401_CONSECUTIVE_ALERT		0x22
+#define TMP401_MANUFACTURER_ID_REG		0xFE
+#define TMP401_DEVICE_ID_REG			0xFF
+#define TMP411_N_FACTOR_REG			0x18
+
+static const u8 TMP401_TEMP_MSB[2]			= { 0x00, 0x01 };
+static const u8 TMP401_TEMP_LSB[2]			= { 0x15, 0x10 };
+static const u8 TMP401_TEMP_LOW_LIMIT_MSB_READ[2]	= { 0x06, 0x08 };
+static const u8 TMP401_TEMP_LOW_LIMIT_MSB_WRITE[2]	= { 0x0C, 0x0E };
+static const u8 TMP401_TEMP_LOW_LIMIT_LSB[2]		= { 0x17, 0x14 };
+static const u8 TMP401_TEMP_HIGH_LIMIT_MSB_READ[2]	= { 0x05, 0x07 };
+static const u8 TMP401_TEMP_HIGH_LIMIT_MSB_WRITE[2]	= { 0x0B, 0x0D };
+static const u8 TMP401_TEMP_HIGH_LIMIT_LSB[2]		= { 0x16, 0x13 };
+/* These are called the THERM limit / hysteresis / mask in the datasheet */
+static const u8 TMP401_TEMP_CRIT_LIMIT[2]		= { 0x20, 0x19 };
+
+static const u8 TMP411_TEMP_LOWEST_MSB[2]		= { 0x30, 0x34 };
+static const u8 TMP411_TEMP_LOWEST_LSB[2]		= { 0x31, 0x35 };
+static const u8 TMP411_TEMP_HIGHEST_MSB[2]		= { 0x32, 0x36 };
+static const u8 TMP411_TEMP_HIGHEST_LSB[2]		= { 0x33, 0x37 };
+
+/* Flags */
+#define TMP401_CONFIG_RANGE		0x04
+#define TMP401_CONFIG_SHUTDOWN		0x40
+#define TMP401_STATUS_LOCAL_CRIT		0x01
+#define TMP401_STATUS_REMOTE_CRIT		0x02
+#define TMP401_STATUS_REMOTE_OPEN		0x04
+#define TMP401_STATUS_REMOTE_LOW		0x08
+#define TMP401_STATUS_REMOTE_HIGH		0x10
+#define TMP401_STATUS_LOCAL_LOW		0x20
+#define TMP401_STATUS_LOCAL_HIGH		0x40
+
+/* Manufacturer / Device ID's */
+#define TMP401_MANUFACTURER_ID			0x55
+#define TMP401_DEVICE_ID			0x11
+#define TMP411_DEVICE_ID			0x12
+
+/*
+ * Functions declarations
+ */
+
+static int tmp401_probe(struct i2c_client *client,
+			const struct i2c_device_id *id);
+static int tmp401_detect(struct i2c_client *client, int kind,
+			 struct i2c_board_info *info);
+static int tmp401_remove(struct i2c_client *client);
+static struct tmp401_data *tmp401_update_device(struct device *dev);
+
+/*
+ * Driver data (common to all clients)
+ */
+
+static const struct i2c_device_id tmp401_id[] = {
+	{ "tmp401", tmp401 },
+	{ "tmp411", tmp411 },
+	{ }
+};
+MODULE_DEVICE_TABLE(i2c, tmp401_id);
+
+static struct i2c_driver tmp401_driver = {
+	.class		= I2C_CLASS_HWMON,
+	.driver = {
+		.name	= "tmp401",
+	},
+	.probe		= tmp401_probe,
+	.remove		= tmp401_remove,
+	.id_table	= tmp401_id,
+	.detect		= tmp401_detect,
+	.address_data	= &addr_data,
+};
+
+/*
+ * Client data (each client gets its own)
+ */
+
+struct tmp401_data {
+	struct device *hwmon_dev;
+	struct mutex update_lock;
+	char valid; /* zero until following fields are valid */
+	unsigned long last_updated; /* in jiffies */
+	int kind;
+
+	/* register values */
+	u8 status;
+	u8 config;
+	u16 temp[2];
+	u16 temp_low[2];
+	u16 temp_high[2];
+	u8 temp_crit[2];
+	u8 temp_crit_hyst;
+	u16 temp_lowest[2];
+	u16 temp_highest[2];
+};
+
+/*
+ * Sysfs attr show / store functions
+ */
+
+static int tmp401_register_to_temp(u16 reg, u8 config)
+{
+	int temp = reg;
+
+	if (config & TMP401_CONFIG_RANGE)
+		temp -= 64 * 256;
+
+	return (temp * 625 + 80) / 160;
+}
+
+static u16 tmp401_temp_to_register(long temp, u8 config)
+{
+	if (config & TMP401_CONFIG_RANGE) {
+		temp = SENSORS_LIMIT(temp, -64000, 191000);
+		temp += 64000;
+	} else
+		temp = SENSORS_LIMIT(temp, 0, 127000);
+
+	return (temp * 160 + 312) / 625;
+}
+
+static int tmp401_crit_register_to_temp(u8 reg, u8 config)
+{
+	int temp = reg;
+
+	if (config & TMP401_CONFIG_RANGE)
+		temp -= 64;
+
+	return temp * 1000;
+}
+
+static u8 tmp401_crit_temp_to_register(long temp, u8 config)
+{
+	if (config & TMP401_CONFIG_RANGE) {
+		temp = SENSORS_LIMIT(temp, -64000, 191000);
+		temp += 64000;
+	} else
+		temp = SENSORS_LIMIT(temp, 0, 127000);
+
+	return (temp + 500) / 1000;
+}
+
+static ssize_t show_temp_value(struct device *dev,
+	struct device_attribute *devattr, char *buf)
+{
+	int index = to_sensor_dev_attr(devattr)->index;
+	struct tmp401_data *data = tmp401_update_device(dev);
+
+	return sprintf(buf, "%d\n",
+		tmp401_register_to_temp(data->temp[index], data->config));
+}
+
+static ssize_t show_temp_min(struct device *dev,
+	struct device_attribute *devattr, char *buf)
+{
+	int index = to_sensor_dev_attr(devattr)->index;
+	struct tmp401_data *data = tmp401_update_device(dev);
+
+	return sprintf(buf, "%d\n",
+		tmp401_register_to_temp(data->temp_low[index], data->config));
+}
+
+static ssize_t show_temp_max(struct device *dev,
+	struct device_attribute *devattr, char *buf)
+{
+	int index = to_sensor_dev_attr(devattr)->index;
+	struct tmp401_data *data = tmp401_update_device(dev);
+
+	return sprintf(buf, "%d\n",
+		tmp401_register_to_temp(data->temp_high[index], data->config));
+}
+
+static ssize_t show_temp_crit(struct device *dev,
+	struct device_attribute *devattr, char *buf)
+{
+	int index = to_sensor_dev_attr(devattr)->index;
+	struct tmp401_data *data = tmp401_update_device(dev);
+
+	return sprintf(buf, "%d\n",
+			tmp401_crit_register_to_temp(data->temp_crit[index],
+							data->config));
+}
+
+static ssize_t show_temp_crit_hyst(struct device *dev,
+	struct device_attribute *devattr, char *buf)
+{
+	int temp, index = to_sensor_dev_attr(devattr)->index;
+	struct tmp401_data *data = tmp401_update_device(dev);
+
+	mutex_lock(&data->update_lock);
+	temp = tmp401_crit_register_to_temp(data->temp_crit[index],
+						data->config);
+	temp -= data->temp_crit_hyst * 1000;
+	mutex_unlock(&data->update_lock);
+
+	return sprintf(buf, "%d\n", temp);
+}
+
+static ssize_t show_temp_lowest(struct device *dev,
+	struct device_attribute *devattr, char *buf)
+{
+	int index = to_sensor_dev_attr(devattr)->index;
+	struct tmp401_data *data = tmp401_update_device(dev);
+
+	return sprintf(buf, "%d\n",
+		tmp401_register_to_temp(data->temp_lowest[index],
+					data->config));
+}
+
+static ssize_t show_temp_highest(struct device *dev,
+	struct device_attribute *devattr, char *buf)
+{
+	int index = to_sensor_dev_attr(devattr)->index;
+	struct tmp401_data *data = tmp401_update_device(dev);
+
+	return sprintf(buf, "%d\n",
+		tmp401_register_to_temp(data->temp_highest[index],
+					data->config));
+}
+
+static ssize_t show_status(struct device *dev,
+	struct device_attribute *devattr, char *buf)
+{
+	int mask = to_sensor_dev_attr(devattr)->index;
+	struct tmp401_data *data = tmp401_update_device(dev);
+
+	if (data->status & mask)
+		return sprintf(buf, "1\n");
+	else
+		return sprintf(buf, "0\n");
+}
+
+static ssize_t store_temp_min(struct device *dev, struct device_attribute
+	*devattr, const char *buf, size_t count)
+{
+	int index = to_sensor_dev_attr(devattr)->index;
+	struct tmp401_data *data = tmp401_update_device(dev);
+	long val;
+	u16 reg;
+
+	if (strict_strtol(buf, 10, &val))
+		return -EINVAL;
+
+	reg = tmp401_temp_to_register(val, data->config);
+
+	mutex_lock(&data->update_lock);
+
+	i2c_smbus_write_byte_data(to_i2c_client(dev),
+		TMP401_TEMP_LOW_LIMIT_MSB_WRITE[index], reg >> 8);
+	i2c_smbus_write_byte_data(to_i2c_client(dev),
+		TMP401_TEMP_LOW_LIMIT_LSB[index], reg & 0xFF);
+
+	data->temp_low[index] = reg;
+
+	mutex_unlock(&data->update_lock);
+
+	return count;
+}
+
+static ssize_t store_temp_max(struct device *dev, struct device_attribute
+	*devattr, const char *buf, size_t count)
+{
+	int index = to_sensor_dev_attr(devattr)->index;
+	struct tmp401_data *data = tmp401_update_device(dev);
+	long val;
+	u16 reg;
+
+	if (strict_strtol(buf, 10, &val))
+		return -EINVAL;
+
+	reg = tmp401_temp_to_register(val, data->config);
+
+	mutex_lock(&data->update_lock);
+
+	i2c_smbus_write_byte_data(to_i2c_client(dev),
+		TMP401_TEMP_HIGH_LIMIT_MSB_WRITE[index], reg >> 8);
+	i2c_smbus_write_byte_data(to_i2c_client(dev),
+		TMP401_TEMP_HIGH_LIMIT_LSB[index], reg & 0xFF);
+
+	data->temp_high[index] = reg;
+
+	mutex_unlock(&data->update_lock);
+
+	return count;
+}
+
+static ssize_t store_temp_crit(struct device *dev, struct device_attribute
+	*devattr, const char *buf, size_t count)
+{
+	int index = to_sensor_dev_attr(devattr)->index;
+	struct tmp401_data *data = tmp401_update_device(dev);
+	long val;
+	u8 reg;
+
+	if (strict_strtol(buf, 10, &val))
+		return -EINVAL;
+
+	reg = tmp401_crit_temp_to_register(val, data->config);
+
+	mutex_lock(&data->update_lock);
+
+	i2c_smbus_write_byte_data(to_i2c_client(dev),
+		TMP401_TEMP_CRIT_LIMIT[index], reg);
+
+	data->temp_crit[index] = reg;
+
+	mutex_unlock(&data->update_lock);
+
+	return count;
+}
+
+static ssize_t store_temp_crit_hyst(struct device *dev, struct device_attribute
+	*devattr, const char *buf, size_t count)
+{
+	int temp, index = to_sensor_dev_attr(devattr)->index;
+	struct tmp401_data *data = tmp401_update_device(dev);
+	long val;
+	u8 reg;
+
+	if (strict_strtol(buf, 10, &val))
+		return -EINVAL;
+
+	if (data->config & TMP401_CONFIG_RANGE)
+		val = SENSORS_LIMIT(val, -64000, 191000);
+	else
+		val = SENSORS_LIMIT(val, 0, 127000);
+
+	mutex_lock(&data->update_lock);
+	temp = tmp401_crit_register_to_temp(data->temp_crit[index],
+						data->config);
+	val = SENSORS_LIMIT(val, temp - 255000, temp);
+	reg = ((temp - val) + 500) / 1000;
+
+	i2c_smbus_write_byte_data(to_i2c_client(dev),
+		TMP401_TEMP_CRIT_HYST, reg);
+
+	data->temp_crit_hyst = reg;
+
+	mutex_unlock(&data->update_lock);
+
+	return count;
+}
+
+/*
+ * Resets the historical measurements of minimum and maximum temperatures.
+ * This is done by writing any value to any of the minimum/maximum registers
+ * (0x30-0x37).
+ */
+static ssize_t reset_temp_history(struct device *dev,
+	struct device_attribute	*devattr, const char *buf, size_t count)
+{
+	long val;
+
+	if (strict_strtol(buf, 10, &val))
+		return -EINVAL;
+
+	if (val != 1) {
+		dev_err(dev, "temp_reset_history value %ld not"
+			" supported. Use 1 to reset the history!\n", val);
+		return -EINVAL;
+	}
+	i2c_smbus_write_byte_data(to_i2c_client(dev),
+		TMP411_TEMP_LOWEST_MSB[0], val);
+
+	return count;
+}
+
+static struct sensor_device_attribute tmp401_attr[] = {
+	SENSOR_ATTR(temp1_input, 0444, show_temp_value, NULL, 0),
+	SENSOR_ATTR(temp1_min, 0644, show_temp_min, store_temp_min, 0),
+	SENSOR_ATTR(temp1_max, 0644, show_temp_max, store_temp_max, 0),
+	SENSOR_ATTR(temp1_crit, 0644, show_temp_crit, store_temp_crit, 0),
+	SENSOR_ATTR(temp1_crit_hyst, 0644, show_temp_crit_hyst,
+		    store_temp_crit_hyst, 0),
+	SENSOR_ATTR(temp1_min_alarm, 0444, show_status, NULL,
+		    TMP401_STATUS_LOCAL_LOW),
+	SENSOR_ATTR(temp1_max_alarm, 0444, show_status, NULL,
+		    TMP401_STATUS_LOCAL_HIGH),
+	SENSOR_ATTR(temp1_crit_alarm, 0444, show_status, NULL,
+		    TMP401_STATUS_LOCAL_CRIT),
+	SENSOR_ATTR(temp2_input, 0444, show_temp_value, NULL, 1),
+	SENSOR_ATTR(temp2_min, 0644, show_temp_min, store_temp_min, 1),
+	SENSOR_ATTR(temp2_max, 0644, show_temp_max, store_temp_max, 1),
+	SENSOR_ATTR(temp2_crit, 0644, show_temp_crit, store_temp_crit, 1),
+	SENSOR_ATTR(temp2_crit_hyst, 0444, show_temp_crit_hyst, NULL, 1),
+	SENSOR_ATTR(temp2_fault, 0444, show_status, NULL,
+		    TMP401_STATUS_REMOTE_OPEN),
+	SENSOR_ATTR(temp2_min_alarm, 0444, show_status, NULL,
+		    TMP401_STATUS_REMOTE_LOW),
+	SENSOR_ATTR(temp2_max_alarm, 0444, show_status, NULL,
+		    TMP401_STATUS_REMOTE_HIGH),
+	SENSOR_ATTR(temp2_crit_alarm, 0444, show_status, NULL,
+		    TMP401_STATUS_REMOTE_CRIT),
+};
+
+/*
+ * Additional features of the TMP411 chip.
+ * The TMP411 stores the minimum and maximum
+ * temperature measured since power-on, chip-reset, or
+ * minimum and maximum register reset for both the local
+ * and remote channels.
+ */
+static struct sensor_device_attribute tmp411_attr[] = {
+	SENSOR_ATTR(temp1_highest, 0444, show_temp_highest, NULL, 0),
+	SENSOR_ATTR(temp1_lowest, 0444, show_temp_lowest, NULL, 0),
+	SENSOR_ATTR(temp2_highest, 0444, show_temp_highest, NULL, 1),
+	SENSOR_ATTR(temp2_lowest, 0444, show_temp_lowest, NULL, 1),
+	SENSOR_ATTR(temp_reset_history, 0200, NULL, reset_temp_history, 0),
+};
+
+/*
+ * Begin non sysfs callback code (aka Real code)
+ */
+
+static void tmp401_init_client(struct i2c_client *client)
+{
+	int config, config_orig;
+
+	/* Set the conversion rate to 2 Hz */
+	i2c_smbus_write_byte_data(client, TMP401_CONVERSION_RATE_WRITE, 5);
+
+	/* Start conversions (disable shutdown if necessary) */
+	config = i2c_smbus_read_byte_data(client, TMP401_CONFIG_READ);
+	if (config < 0) {
+		dev_warn(&client->dev, "Initialization failed!\n");
+		return;
+	}
+
+	config_orig = config;
+	config &= ~TMP401_CONFIG_SHUTDOWN;
+
+	if (config != config_orig)
+		i2c_smbus_write_byte_data(client, TMP401_CONFIG_WRITE, config);
+}
+
+static int tmp401_detect(struct i2c_client *client, int kind,
+			 struct i2c_board_info *info)
+{
+	struct i2c_adapter *adapter = client->adapter;
+
+	if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+		return -ENODEV;
+
+	/* Detect and identify the chip */
+	if (kind <= 0) {
+		u8 reg;
+
+		reg = i2c_smbus_read_byte_data(client,
+					       TMP401_MANUFACTURER_ID_REG);
+		if (reg != TMP401_MANUFACTURER_ID)
+			return -ENODEV;
+
+		reg = i2c_smbus_read_byte_data(client, TMP401_DEVICE_ID_REG);
+
+		switch (reg) {
+		case TMP401_DEVICE_ID:
+			kind = tmp401;
+			break;
+		case TMP411_DEVICE_ID:
+			kind = tmp411;
+			break;
+		default:
+			return -ENODEV;
+		}
+
+		reg = i2c_smbus_read_byte_data(client, TMP401_CONFIG_READ);
+		if (reg & 0x1b)
+			return -ENODEV;
+
+		reg = i2c_smbus_read_byte_data(client,
+					       TMP401_CONVERSION_RATE_READ);
+		/* Datasheet says: 0x1-0x6 */
+		if (reg > 15)
+			return -ENODEV;
+	}
+	strlcpy(info->type, tmp401_id[kind - 1].name, I2C_NAME_SIZE);
+
+	return 0;
+}
+
+static int tmp401_probe(struct i2c_client *client,
+			const struct i2c_device_id *id)
+{
+	int i, err = 0;
+	struct tmp401_data *data;
+	const char *names[] = { "TMP401", "TMP411" };
+
+	data = kzalloc(sizeof(struct tmp401_data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	i2c_set_clientdata(client, data);
+	mutex_init(&data->update_lock);
+	data->kind = id->driver_data;
+
+	/* Initialize the TMP401 chip */
+	tmp401_init_client(client);
+
+	/* Register sysfs hooks */
+	for (i = 0; i < ARRAY_SIZE(tmp401_attr); i++) {
+		err = device_create_file(&client->dev,
+					 &tmp401_attr[i].dev_attr);
+		if (err)
+			goto exit_remove;
+	}
+
+	/* Register aditional tmp411 sysfs hooks */
+	if (data->kind == tmp411) {
+		for (i = 0; i < ARRAY_SIZE(tmp411_attr); i++) {
+			err = device_create_file(&client->dev,
+						 &tmp411_attr[i].dev_attr);
+			if (err)
+				goto exit_remove;
+		}
+	}
+
+	data->hwmon_dev = hwmon_device_register(&client->dev);
+	if (IS_ERR(data->hwmon_dev)) {
+		err = PTR_ERR(data->hwmon_dev);
+		data->hwmon_dev = NULL;
+		goto exit_remove;
+	}
+
+	dev_info(&client->dev, "Detected TI %s chip\n",
+		 names[data->kind - 1]);
+
+	return 0;
+
+exit_remove:
+	tmp401_remove(client); /* will also free data for us */
+	return err;
+}
+
+static int tmp401_remove(struct i2c_client *client)
+{
+	struct tmp401_data *data = i2c_get_clientdata(client);
+	int i;
+
+	if (data->hwmon_dev)
+		hwmon_device_unregister(data->hwmon_dev);
+
+	for (i = 0; i < ARRAY_SIZE(tmp401_attr); i++)
+		device_remove_file(&client->dev, &tmp401_attr[i].dev_attr);
+
+	if (data->kind == tmp411) {
+		for (i = 0; i < ARRAY_SIZE(tmp411_attr); i++)
+			device_remove_file(&client->dev,
+					   &tmp411_attr[i].dev_attr);
+	}
+
+	kfree(data);
+	return 0;
+}
+
+static struct tmp401_data *tmp401_update_device_reg16(
+	struct i2c_client *client, struct tmp401_data *data)
+{
+	int i;
+
+	for (i = 0; i < 2; i++) {
+		/*
+		 * High byte must be read first immediately followed
+		 * by the low byte
+		 */
+		data->temp[i] = i2c_smbus_read_byte_data(client,
+			TMP401_TEMP_MSB[i]) << 8;
+		data->temp[i] |= i2c_smbus_read_byte_data(client,
+			TMP401_TEMP_LSB[i]);
+		data->temp_low[i] = i2c_smbus_read_byte_data(client,
+			TMP401_TEMP_LOW_LIMIT_MSB_READ[i]) << 8;
+		data->temp_low[i] |= i2c_smbus_read_byte_data(client,
+			TMP401_TEMP_LOW_LIMIT_LSB[i]);
+		data->temp_high[i] = i2c_smbus_read_byte_data(client,
+			TMP401_TEMP_HIGH_LIMIT_MSB_READ[i]) << 8;
+		data->temp_high[i] |= i2c_smbus_read_byte_data(client,
+			TMP401_TEMP_HIGH_LIMIT_LSB[i]);
+		data->temp_crit[i] = i2c_smbus_read_byte_data(client,
+			TMP401_TEMP_CRIT_LIMIT[i]);
+
+		if (data->kind == tmp411) {
+			data->temp_lowest[i] = i2c_smbus_read_byte_data(client,
+				TMP411_TEMP_LOWEST_MSB[i]) << 8;
+			data->temp_lowest[i] |= i2c_smbus_read_byte_data(
+				client, TMP411_TEMP_LOWEST_LSB[i]);
+
+			data->temp_highest[i] = i2c_smbus_read_byte_data(
+				client, TMP411_TEMP_HIGHEST_MSB[i]) << 8;
+			data->temp_highest[i] |= i2c_smbus_read_byte_data(
+				client, TMP411_TEMP_HIGHEST_LSB[i]);
+		}
+	}
+	return data;
+}
+
+static struct tmp401_data *tmp401_update_device(struct device *dev)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct tmp401_data *data = i2c_get_clientdata(client);
+
+	mutex_lock(&data->update_lock);
+
+	if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
+		data->status = i2c_smbus_read_byte_data(client, TMP401_STATUS);
+		data->config = i2c_smbus_read_byte_data(client,
+						TMP401_CONFIG_READ);
+		tmp401_update_device_reg16(client, data);
+
+		data->temp_crit_hyst = i2c_smbus_read_byte_data(client,
+						TMP401_TEMP_CRIT_HYST);
+
+		data->last_updated = jiffies;
+		data->valid = 1;
+	}
+
+	mutex_unlock(&data->update_lock);
+
+	return data;
+}
+
+static int __init tmp401_init(void)
+{
+	return i2c_add_driver(&tmp401_driver);
+}
+
+static void __exit tmp401_exit(void)
+{
+	i2c_del_driver(&tmp401_driver);
+}
+
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
+MODULE_DESCRIPTION("Texas Instruments TMP401 temperature sensor driver");
+MODULE_LICENSE("GPL");
+
+module_init(tmp401_init);
+module_exit(tmp401_exit);
diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c
index e64b420..0e97469 100644
--- a/drivers/hwmon/w83627ehf.c
+++ b/drivers/hwmon/w83627ehf.c
@@ -36,6 +36,7 @@
     w83627ehf   10      5       4       3      0x8850 0x88    0x5ca3
                                                0x8860 0xa1
     w83627dhg    9      5       4       3      0xa020 0xc1    0x5ca3
+    w83627dhg-p  9      5       4       3      0xb070 0xc1    0x5ca3
     w83667hg     9      5       3       3      0xa510 0xc1    0x5ca3
 */
 
@@ -53,12 +54,13 @@
 #include <asm/io.h>
 #include "lm75.h"
 
-enum kinds { w83627ehf, w83627dhg, w83667hg };
+enum kinds { w83627ehf, w83627dhg, w83627dhg_p, w83667hg };
 
 /* used to set data->name = w83627ehf_device_names[data->sio_kind] */
 static const char * w83627ehf_device_names[] = {
 	"w83627ehf",
 	"w83627dhg",
+	"w83627dhg",
 	"w83667hg",
 };
 
@@ -86,6 +88,7 @@
 #define SIO_W83627EHF_ID	0x8850
 #define SIO_W83627EHG_ID	0x8860
 #define SIO_W83627DHG_ID	0xa020
+#define SIO_W83627DHG_P_ID	0xb070
 #define SIO_W83667HG_ID 	0xa510
 #define SIO_ID_MASK		0xFFF0
 
@@ -1517,6 +1520,7 @@
 	static const char __initdata sio_name_W83627EHF[] = "W83627EHF";
 	static const char __initdata sio_name_W83627EHG[] = "W83627EHG";
 	static const char __initdata sio_name_W83627DHG[] = "W83627DHG";
+	static const char __initdata sio_name_W83627DHG_P[] = "W83627DHG-P";
 	static const char __initdata sio_name_W83667HG[] = "W83667HG";
 
 	u16 val;
@@ -1542,6 +1546,10 @@
 		sio_data->kind = w83627dhg;
 		sio_name = sio_name_W83627DHG;
 		break;
+	case SIO_W83627DHG_P_ID:
+		sio_data->kind = w83627dhg_p;
+		sio_name = sio_name_W83627DHG_P;
+		break;
 	case SIO_W83667HG_ID:
 		sio_data->kind = w83667hg;
 		sio_name = sio_name_W83667HG;
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index c8460fa..0d04d3e 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -211,7 +211,7 @@
 	  will be called i2c-via.
 
 config I2C_VIAPRO
-	tristate "VIA VT82C596/82C686/82xx and CX700/VX800/VX820"
+	tristate "VIA VT82C596/82C686/82xx and CX700/VX8xx"
 	depends on PCI
 	help
 	  If you say yes to this option, support will be included for the VIA
@@ -225,8 +225,8 @@
 	    VT8237R/A/S
 	    VT8251
 	    CX700
-	    VX800
-	    VX820
+	    VX800/VX820
+	    VX855/VX875
 
 	  This driver can also be built as a module.  If so, the module
 	  will be called i2c-viapro.
diff --git a/drivers/i2c/busses/i2c-viapro.c b/drivers/i2c/busses/i2c-viapro.c
index 02e6f72..54d810a 100644
--- a/drivers/i2c/busses/i2c-viapro.c
+++ b/drivers/i2c/busses/i2c-viapro.c
@@ -37,6 +37,7 @@
    VT8251             0x3287             yes
    CX700              0x8324             yes
    VX800/VX820        0x8353             yes
+   VX855/VX875        0x8409             yes
 
    Note: we assume there can only be one device, with one SMBus interface.
 */
@@ -404,6 +405,7 @@
 	switch (pdev->device) {
 	case PCI_DEVICE_ID_VIA_CX700:
 	case PCI_DEVICE_ID_VIA_VX800:
+	case PCI_DEVICE_ID_VIA_VX855:
 	case PCI_DEVICE_ID_VIA_8251:
 	case PCI_DEVICE_ID_VIA_8237:
 	case PCI_DEVICE_ID_VIA_8237A:
@@ -469,6 +471,8 @@
 	  .driver_data = SMBBA3 },
 	{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VX800),
 	  .driver_data = SMBBA3 },
+	{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VX855),
+	  .driver_data = SMBBA3 },
 	{ 0, }
 };
 
diff --git a/drivers/i2c/busses/i2c-voodoo3.c b/drivers/i2c/busses/i2c-voodoo3.c
index 1a474ac..7663d57 100644
--- a/drivers/i2c/busses/i2c-voodoo3.c
+++ b/drivers/i2c/busses/i2c-voodoo3.c
@@ -163,7 +163,6 @@
 
 static struct i2c_adapter voodoo3_i2c_adapter = {
 	.owner		= THIS_MODULE,
-	.class		= I2C_CLASS_TV_ANALOG, 
 	.name		= "I2C Voodoo3/Banshee adapter",
 	.algo_data	= &voo_i2c_bit_data,
 };
diff --git a/drivers/i2c/chips/Kconfig b/drivers/i2c/chips/Kconfig
index 8f8c81e..02d746c 100644
--- a/drivers/i2c/chips/Kconfig
+++ b/drivers/i2c/chips/Kconfig
@@ -64,21 +64,6 @@
 	  This driver is deprecated and will be dropped soon. Use
 	  drivers/gpio/pca953x.c instead.
 
-config SENSORS_MAX6875
-	tristate "Maxim MAX6875 Power supply supervisor"
-	depends on EXPERIMENTAL
-	help
-	  If you say yes here you get support for the Maxim MAX6875
-	  EEPROM-programmable, quad power-supply sequencer/supervisor.
-
-	  This provides an interface to program the EEPROM and reset the chip.
-
-	  This driver also supports the Maxim MAX6874 hex power-supply
-	  sequencer/supervisor if found at a compatible address.
-
-	  This driver can also be built as a module.  If so, the module
-	  will be called max6875.
-
 config SENSORS_TSL2550
 	tristate "Taos TSL2550 ambient light sensor"
 	depends on EXPERIMENTAL
diff --git a/drivers/i2c/chips/Makefile b/drivers/i2c/chips/Makefile
index 55a3760..f4680d1 100644
--- a/drivers/i2c/chips/Makefile
+++ b/drivers/i2c/chips/Makefile
@@ -11,7 +11,6 @@
 #
 
 obj-$(CONFIG_DS1682)		+= ds1682.o
-obj-$(CONFIG_SENSORS_MAX6875)	+= max6875.o
 obj-$(CONFIG_SENSORS_PCA9539)	+= pca9539.o
 obj-$(CONFIG_SENSORS_PCF8574)	+= pcf8574.o
 obj-$(CONFIG_PCF8575)		+= pcf8575.o
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 85e2e919..5ed622e 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -29,7 +29,6 @@
 #include <linux/i2c.h>
 #include <linux/init.h>
 #include <linux/idr.h>
-#include <linux/platform_device.h>
 #include <linux/mutex.h>
 #include <linux/completion.h>
 #include <linux/hardirq.h>
@@ -451,16 +450,6 @@
 
 	mutex_lock(&core_lock);
 
-	/* Add the adapter to the driver core.
-	 * If the parent pointer is not set up,
-	 * we add this adapter to the host bus.
-	 */
-	if (adap->dev.parent == NULL) {
-		adap->dev.parent = &platform_bus;
-		pr_debug("I2C adapter driver [%s] forgot to specify "
-			 "physical device\n", adap->name);
-	}
-
 	/* Set default timeout to 1 second if not already set */
 	if (adap->timeout == 0)
 		adap->timeout = HZ;
@@ -1022,7 +1011,8 @@
  */
 int i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
 {
-	int ret;
+	unsigned long orig_jiffies;
+	int ret, try;
 
 	/* REVISIT the fault reporting model here is weak:
 	 *
@@ -1060,7 +1050,15 @@
 			mutex_lock_nested(&adap->bus_lock, adap->level);
 		}
 
-		ret = adap->algo->master_xfer(adap,msgs,num);
+		/* Retry automatically on arbitration loss */
+		orig_jiffies = jiffies;
+		for (ret = 0, try = 0; try <= adap->retries; try++) {
+			ret = adap->algo->master_xfer(adap, msgs, num);
+			if (ret != -EAGAIN)
+				break;
+			if (time_after(jiffies, orig_jiffies + adap->timeout))
+				break;
+		}
 		mutex_unlock(&adap->bus_lock);
 
 		return ret;
@@ -1509,7 +1507,7 @@
 	struct i2c_adapter *adapter;
 
 	mutex_lock(&core_lock);
-	adapter = (struct i2c_adapter *)idr_find(&i2c_adapter_idr, id);
+	adapter = idr_find(&i2c_adapter_idr, id);
 	if (adapter && !try_module_get(adapter->owner))
 		adapter = NULL;
 
@@ -1995,14 +1993,27 @@
 		   char read_write, u8 command, int protocol,
 		   union i2c_smbus_data *data)
 {
+	unsigned long orig_jiffies;
+	int try;
 	s32 res;
 
 	flags &= I2C_M_TEN | I2C_CLIENT_PEC;
 
 	if (adapter->algo->smbus_xfer) {
 		mutex_lock(&adapter->bus_lock);
-		res = adapter->algo->smbus_xfer(adapter,addr,flags,read_write,
-						command, protocol, data);
+
+		/* Retry automatically on arbitration loss */
+		orig_jiffies = jiffies;
+		for (res = 0, try = 0; try <= adapter->retries; try++) {
+			res = adapter->algo->smbus_xfer(adapter, addr, flags,
+							read_write, command,
+							protocol, data);
+			if (res != -EAGAIN)
+				break;
+			if (time_after(jiffies,
+				       orig_jiffies + adapter->timeout))
+				break;
+		}
 		mutex_unlock(&adapter->bus_lock);
 	} else
 		res = i2c_smbus_xfer_emulated(adapter,addr,flags,read_write,
diff --git a/drivers/ide/ide-pm.c b/drivers/ide/ide-pm.c
index ba1488b..c14ca14 100644
--- a/drivers/ide/ide-pm.c
+++ b/drivers/ide/ide-pm.c
@@ -3,7 +3,8 @@
 
 int generic_ide_suspend(struct device *dev, pm_message_t mesg)
 {
-	ide_drive_t *drive = dev->driver_data, *pair = ide_get_pair_dev(drive);
+	ide_drive_t *drive = dev_get_drvdata(dev);
+	ide_drive_t *pair = ide_get_pair_dev(drive);
 	ide_hwif_t *hwif = drive->hwif;
 	struct request *rq;
 	struct request_pm_state rqpm;
@@ -34,7 +35,8 @@
 
 int generic_ide_resume(struct device *dev)
 {
-	ide_drive_t *drive = dev->driver_data, *pair = ide_get_pair_dev(drive);
+	ide_drive_t *drive = dev_get_drvdata(dev);
+	ide_drive_t *pair = ide_get_pair_dev(drive);
 	ide_hwif_t *hwif = drive->hwif;
 	struct request *rq;
 	struct request_pm_state rqpm;
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index f371b0d..79e0af3 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -535,7 +535,7 @@
 
 	/* register with global device tree */
 	dev_set_name(&hwif->gendev, hwif->name);
-	hwif->gendev.driver_data = hwif;
+	dev_set_drvdata(&hwif->gendev, hwif);
 	if (hwif->gendev.parent == NULL)
 		hwif->gendev.parent = hwif->dev;
 	hwif->gendev.release = hwif_release_dev;
@@ -987,9 +987,9 @@
 		int ret;
 
 		dev_set_name(dev, "%u.%u", hwif->index, i);
+		dev_set_drvdata(dev, drive);
 		dev->parent = &hwif->gendev;
 		dev->bus = &ide_bus_type;
-		dev->driver_data = drive;
 		dev->release = drive_release_dev;
 
 		ret = device_register(dev);
diff --git a/drivers/ide/ide_platform.c b/drivers/ide/ide_platform.c
index ee9b55e..b579fbe 100644
--- a/drivers/ide/ide_platform.c
+++ b/drivers/ide/ide_platform.c
@@ -112,7 +112,7 @@
 
 static int __devexit plat_ide_remove(struct platform_device *pdev)
 {
-	struct ide_host *host = pdev->dev.driver_data;
+	struct ide_host *host = dev_get_drvdata(&pdev->dev);
 
 	ide_host_remove(host);
 
diff --git a/drivers/ieee1394/csr1212.c b/drivers/ieee1394/csr1212.c
index a6dfeb0..e76cac6 100644
--- a/drivers/ieee1394/csr1212.c
+++ b/drivers/ieee1394/csr1212.c
@@ -35,6 +35,7 @@
 
 #include <linux/errno.h>
 #include <linux/kernel.h>
+#include <linux/kmemcheck.h>
 #include <linux/string.h>
 #include <asm/bug.h>
 #include <asm/byteorder.h>
@@ -387,6 +388,7 @@
 	if (!kv)
 		return NULL;
 
+	kmemcheck_annotate_variable(kv->value.leaf.data[0]);
 	CSR1212_DESCRIPTOR_LEAF_SET_TYPE(kv, dtype);
 	CSR1212_DESCRIPTOR_LEAF_SET_SPECIFIER_ID(kv, specifier_id);
 
diff --git a/drivers/ieee1394/eth1394.c b/drivers/ieee1394/eth1394.c
index 4ca1035..f5c586c 100644
--- a/drivers/ieee1394/eth1394.c
+++ b/drivers/ieee1394/eth1394.c
@@ -361,7 +361,7 @@
 	node_info->pdg.sz = 0;
 	node_info->fifo = CSR1212_INVALID_ADDR_SPACE;
 
-	ud->device.driver_data = node_info;
+	dev_set_drvdata(&ud->device, node_info);
 	new_node->ud = ud;
 
 	priv = netdev_priv(hi->dev);
@@ -406,7 +406,7 @@
 	list_del(&old_node->list);
 	kfree(old_node);
 
-	node_info = (struct eth1394_node_info*)ud->device.driver_data;
+	node_info = dev_get_drvdata(&ud->device);
 
 	spin_lock_irqsave(&node_info->pdg.lock, flags);
 	/* The partial datagram list should be empty, but we'll just
@@ -416,7 +416,7 @@
 	spin_unlock_irqrestore(&node_info->pdg.lock, flags);
 
 	kfree(node_info);
-	ud->device.driver_data = NULL;
+	dev_set_drvdata(&ud->device, NULL);
 	return 0;
 }
 
@@ -688,7 +688,7 @@
 	ether1394_reset_priv(dev, 0);
 
 	list_for_each_entry(node, &priv->ip_node_list, list) {
-		node_info = node->ud->device.driver_data;
+		node_info = dev_get_drvdata(&node->ud->device);
 
 		spin_lock_irqsave(&node_info->pdg.lock, flags);
 
@@ -872,8 +872,7 @@
 		if (!node)
 			return cpu_to_be16(0);
 
-		node_info =
-		    (struct eth1394_node_info *)node->ud->device.driver_data;
+		node_info = dev_get_drvdata(&node->ud->device);
 
 		/* Update our speed/payload/fifo_offset table */
 		node_info->maxpayload =	maxpayload;
@@ -1080,7 +1079,7 @@
 		priv->ud_list[NODEID_TO_NODE(srcid)] = ud;
 	}
 
-	node_info = (struct eth1394_node_info *)ud->device.driver_data;
+	node_info = dev_get_drvdata(&ud->device);
 
 	/* First, did we receive a fragmented or unfragmented datagram? */
 	hdr->words.word1 = ntohs(hdr->words.word1);
@@ -1617,8 +1616,7 @@
 		if (!node)
 			goto fail;
 
-		node_info =
-		    (struct eth1394_node_info *)node->ud->device.driver_data;
+		node_info = dev_get_drvdata(&node->ud->device);
 		if (node_info->fifo == CSR1212_INVALID_ADDR_SPACE)
 			goto fail;
 
diff --git a/drivers/ieee1394/nodemgr.c b/drivers/ieee1394/nodemgr.c
index a6d55be..5122b5a 100644
--- a/drivers/ieee1394/nodemgr.c
+++ b/drivers/ieee1394/nodemgr.c
@@ -10,6 +10,7 @@
 
 #include <linux/bitmap.h>
 #include <linux/kernel.h>
+#include <linux/kmemcheck.h>
 #include <linux/list.h>
 #include <linux/slab.h>
 #include <linux/delay.h>
@@ -39,7 +40,10 @@
 	struct hpsb_host *host;
 	nodeid_t nodeid;
 	unsigned int generation;
+
+	kmemcheck_bitfield_begin(flags);
 	unsigned int speed_unverified:1;
+	kmemcheck_bitfield_end(flags);
 };
 
 
@@ -1293,6 +1297,7 @@
 	u8 *speed;
 
 	ci = kmalloc(sizeof(*ci), GFP_KERNEL);
+	kmemcheck_annotate_bitfield(ci, flags);
 	if (!ci)
 		return;
 
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
index a51ab23..83b734a 100644
--- a/drivers/ieee1394/sbp2.c
+++ b/drivers/ieee1394/sbp2.c
@@ -718,7 +718,7 @@
 	struct scsi_device *sdev;
 
 	ud = container_of(dev, struct unit_directory, device);
-	lu = ud->device.driver_data;
+	lu = dev_get_drvdata(&ud->device);
 	if (!lu)
 		return 0;
 
@@ -746,7 +746,7 @@
 
 static int sbp2_update(struct unit_directory *ud)
 {
-	struct sbp2_lu *lu = ud->device.driver_data;
+	struct sbp2_lu *lu = dev_get_drvdata(&ud->device);
 
 	if (sbp2_reconnect_device(lu) != 0) {
 		/*
@@ -815,7 +815,7 @@
 	atomic_set(&lu->state, SBP2LU_STATE_RUNNING);
 	INIT_WORK(&lu->protocol_work, NULL);
 
-	ud->device.driver_data = lu;
+	dev_set_drvdata(&ud->device, lu);
 
 	hi = hpsb_get_hostinfo(&sbp2_highlevel, ud->ne->host);
 	if (!hi) {
@@ -1051,7 +1051,7 @@
 		hpsb_unregister_addrspace(&sbp2_highlevel, hi->host,
 					  lu->status_fifo_addr);
 
-	lu->ud->device.driver_data = NULL;
+	dev_set_drvdata(&lu->ud->device, NULL);
 
 	module_put(hi->host->driver->owner);
 no_hi:
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index 5c04cfb..158a214 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -760,9 +760,9 @@
 	int i;
 
 	class_dev->class      = &ib_class;
-	class_dev->driver_data = device;
 	class_dev->parent     = device->dma_device;
 	dev_set_name(class_dev, device->name);
+	dev_set_drvdata(class_dev, device);
 
 	INIT_LIST_HEAD(&device->port_list);
 
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
index 85905ab..ce4e6ef 100644
--- a/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/drivers/infiniband/hw/ehca/ehca_main.c
@@ -636,7 +636,7 @@
 	struct hipz_query_hca *rblock;				           \
 	int data;                                                          \
 									   \
-	shca = dev->driver_data;					   \
+	shca = dev_get_drvdata(dev);					   \
 									   \
 	rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);			   \
 	if (!rblock) {						           \
@@ -680,7 +680,7 @@
 					struct device_attribute *attr,
 					char *buf)
 {
-	struct ehca_shca *shca = dev->driver_data;
+	struct ehca_shca *shca = dev_get_drvdata(dev);
 
 	return sprintf(buf, "%llx\n", shca->ipz_hca_handle.handle);
 
@@ -749,7 +749,7 @@
 
 	shca->ofdev = dev;
 	shca->ipz_hca_handle.handle = *handle;
-	dev->dev.driver_data = shca;
+	dev_set_drvdata(&dev->dev, shca);
 
 	ret = ehca_sense_attributes(shca);
 	if (ret < 0) {
@@ -878,7 +878,7 @@
 
 static int __devexit ehca_remove(struct of_device *dev)
 {
-	struct ehca_shca *shca = dev->dev.driver_data;
+	struct ehca_shca *shca = dev_get_drvdata(&dev->dev);
 	unsigned long flags;
 	int ret;
 
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 5d445f4..7c237e6 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -1265,8 +1265,14 @@
 	.uevent		= input_dev_uevent,
 };
 
+static char *input_nodename(struct device *dev)
+{
+	return kasprintf(GFP_KERNEL, "input/%s", dev_name(dev));
+}
+
 struct class input_class = {
 	.name		= "input",
+	.nodename	= input_nodename,
 };
 EXPORT_SYMBOL_GPL(input_class);
 
diff --git a/drivers/input/touchscreen/wm97xx-core.c b/drivers/input/touchscreen/wm97xx-core.c
index 69af838..2957d48 100644
--- a/drivers/input/touchscreen/wm97xx-core.c
+++ b/drivers/input/touchscreen/wm97xx-core.c
@@ -569,7 +569,7 @@
 	mutex_init(&wm->codec_mutex);
 
 	wm->dev = dev;
-	dev->driver_data = wm;
+	dev_set_drvdata(dev, wm);
 	wm->ac97 = to_ac97_t(dev);
 
 	/* check that we have a supported codec */
diff --git a/drivers/input/xen-kbdfront.c b/drivers/input/xen-kbdfront.c
index 928d2ed..b115726 100644
--- a/drivers/input/xen-kbdfront.c
+++ b/drivers/input/xen-kbdfront.c
@@ -114,7 +114,7 @@
 		xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure");
 		return -ENOMEM;
 	}
-	dev->dev.driver_data = info;
+	dev_set_drvdata(&dev->dev, info);
 	info->xbdev = dev;
 	info->irq = -1;
 	snprintf(info->phys, sizeof(info->phys), "xenbus/%s", dev->nodename);
@@ -186,7 +186,7 @@
 
 static int xenkbd_resume(struct xenbus_device *dev)
 {
-	struct xenkbd_info *info = dev->dev.driver_data;
+	struct xenkbd_info *info = dev_get_drvdata(&dev->dev);
 
 	xenkbd_disconnect_backend(info);
 	memset(info->page, 0, PAGE_SIZE);
@@ -195,7 +195,7 @@
 
 static int xenkbd_remove(struct xenbus_device *dev)
 {
-	struct xenkbd_info *info = dev->dev.driver_data;
+	struct xenkbd_info *info = dev_get_drvdata(&dev->dev);
 
 	xenkbd_disconnect_backend(info);
 	if (info->kbd)
@@ -266,7 +266,7 @@
 static void xenkbd_backend_changed(struct xenbus_device *dev,
 				   enum xenbus_state backend_state)
 {
-	struct xenkbd_info *info = dev->dev.driver_data;
+	struct xenkbd_info *info = dev_get_drvdata(&dev->dev);
 	int ret, val;
 
 	switch (backend_state) {
diff --git a/drivers/macintosh/therm_adt746x.c b/drivers/macintosh/therm_adt746x.c
index 0ddf904..fde377c 100644
--- a/drivers/macintosh/therm_adt746x.c
+++ b/drivers/macintosh/therm_adt746x.c
@@ -72,7 +72,7 @@
 		 "(default 0)");
 
 struct thermostat {
-	struct i2c_client	clt;
+	struct i2c_client	*clt;
 	u8			temps[3];
 	u8			cached_temp[3];
 	u8			initial_limits[3];
@@ -87,9 +87,6 @@
 static struct thermostat* thermostat;
 static struct task_struct *thread_therm = NULL;
 
-static int attach_one_thermostat(struct i2c_adapter *adapter, int addr,
-				 int busno);
-
 static void write_both_fan_speed(struct thermostat *th, int speed);
 static void write_fan_speed(struct thermostat *th, int speed, int fan);
 
@@ -101,7 +98,7 @@
 	
 	tmp[0] = reg;
 	tmp[1] = data;
-	rc = i2c_master_send(&th->clt, (const char *)tmp, 2);
+	rc = i2c_master_send(th->clt, (const char *)tmp, 2);
 	if (rc < 0)
 		return rc;
 	if (rc != 2)
@@ -116,12 +113,12 @@
 	int rc;
 
 	reg_addr = (u8)reg;
-	rc = i2c_master_send(&th->clt, &reg_addr, 1);
+	rc = i2c_master_send(th->clt, &reg_addr, 1);
 	if (rc < 0)
 		return rc;
 	if (rc != 1)
 		return -ENODEV;
-	rc = i2c_master_recv(&th->clt, (char *)&data, 1);
+	rc = i2c_master_recv(th->clt, (char *)&data, 1);
 	if (rc < 0)
 		return rc;
 	return data;
@@ -131,26 +128,36 @@
 attach_thermostat(struct i2c_adapter *adapter)
 {
 	unsigned long bus_no;
+	struct i2c_board_info info;
+	struct i2c_client *client;
 
 	if (strncmp(adapter->name, "uni-n", 5))
 		return -ENODEV;
 	bus_no = simple_strtoul(adapter->name + 6, NULL, 10);
 	if (bus_no != therm_bus)
 		return -ENODEV;
-	return attach_one_thermostat(adapter, therm_address, bus_no);
+
+	memset(&info, 0, sizeof(struct i2c_board_info));
+	strlcpy(info.type, "therm_adt746x", I2C_NAME_SIZE);
+	info.addr = therm_address;
+	client = i2c_new_device(adapter, &info);
+	if (!client)
+		return -ENODEV;
+
+	/*
+	 * Let i2c-core delete that device on driver removal.
+	 * This is safe because i2c-core holds the core_lock mutex for us.
+	 */
+	list_add_tail(&client->detected, &client->driver->clients);
+	return 0;
 }
 
 static int
-detach_thermostat(struct i2c_adapter *adapter)
+remove_thermostat(struct i2c_client *client)
 {
-	struct thermostat* th;
+	struct thermostat *th = i2c_get_clientdata(client);
 	int i;
 	
-	if (thermostat == NULL)
-		return 0;
-
-	th = thermostat;
-
 	if (thread_therm != NULL) {
 		kthread_stop(thread_therm);
 	}
@@ -166,8 +173,6 @@
 
 	write_both_fan_speed(th, -1);
 
-	i2c_detach_client(&th->clt);
-
 	thermostat = NULL;
 
 	kfree(th);
@@ -175,14 +180,6 @@
 	return 0;
 }
 
-static struct i2c_driver thermostat_driver = {  
-	.driver = {
-		.name	= "therm_adt746x",
-	},
-	.attach_adapter	= attach_thermostat,
-	.detach_adapter	= detach_thermostat,
-};
-
 static int read_fan_speed(struct thermostat *th, u8 addr)
 {
 	u8 tmp[2];
@@ -371,8 +368,8 @@
 		th->limits[i] = default_limits_local[i] + limit_adjust;
 }
 
-static int attach_one_thermostat(struct i2c_adapter *adapter, int addr,
-				 int busno)
+static int probe_thermostat(struct i2c_client *client,
+			    const struct i2c_device_id *id)
 {
 	struct thermostat* th;
 	int rc;
@@ -385,16 +382,12 @@
 	if (!th)
 		return -ENOMEM;
 
-	th->clt.addr = addr;
-	th->clt.adapter = adapter;
-	th->clt.driver = &thermostat_driver;
-	strcpy(th->clt.name, "thermostat");
+	i2c_set_clientdata(client, th);
+	th->clt = client;
 
 	rc = read_reg(th, 0);
 	if (rc < 0) {
-		printk(KERN_ERR "adt746x: Thermostat failed to read config "
-				"from bus %d !\n",
-				busno);
+		dev_err(&client->dev, "Thermostat failed to read config!\n");
 		kfree(th);
 		return -ENODEV;
 	}
@@ -423,14 +416,6 @@
 
 	thermostat = th;
 
-	if (i2c_attach_client(&th->clt)) {
-		printk(KERN_INFO "adt746x: Thermostat failed to attach "
-				 "client !\n");
-		thermostat = NULL;
-		kfree(th);
-		return -ENODEV;
-	}
-
 	/* be sure to really write fan speed the first time */
 	th->last_speed[0] = -2;
 	th->last_speed[1] = -2;
@@ -456,6 +441,21 @@
 	return 0;
 }
 
+static const struct i2c_device_id therm_adt746x_id[] = {
+	{ "therm_adt746x", 0 },
+	{ }
+};
+
+static struct i2c_driver thermostat_driver = {
+	.driver = {
+		.name	= "therm_adt746x",
+	},
+	.attach_adapter	= attach_thermostat,
+	.probe = probe_thermostat,
+	.remove = remove_thermostat,
+	.id_table = therm_adt746x_id,
+};
+
 /* 
  * Now, unfortunately, sysfs doesn't give us a nice void * we could
  * pass around to the attribute functions, so we don't really have
diff --git a/drivers/macintosh/therm_pm72.c b/drivers/macintosh/therm_pm72.c
index 817607e..a028598 100644
--- a/drivers/macintosh/therm_pm72.c
+++ b/drivers/macintosh/therm_pm72.c
@@ -287,22 +287,6 @@
 };
 
 /*
- * i2c_driver structure to attach to the host i2c controller
- */
-
-static int therm_pm72_attach(struct i2c_adapter *adapter);
-static int therm_pm72_detach(struct i2c_adapter *adapter);
-
-static struct i2c_driver therm_pm72_driver =
-{
-	.driver = {
-		.name	= "therm_pm72",
-	},
-	.attach_adapter	= therm_pm72_attach,
-	.detach_adapter	= therm_pm72_detach,
-};
-
-/*
  * Utility function to create an i2c_client structure and
  * attach it to one of u3 adapters
  */
@@ -310,6 +294,7 @@
 {
 	struct i2c_client *clt;
 	struct i2c_adapter *adap;
+	struct i2c_board_info info;
 
 	if (id & 0x200)
 		adap = k2;
@@ -320,31 +305,21 @@
 	if (adap == NULL)
 		return NULL;
 
-	clt = kzalloc(sizeof(struct i2c_client), GFP_KERNEL);
-	if (clt == NULL)
-		return NULL;
-
-	clt->addr = (id >> 1) & 0x7f;
-	clt->adapter = adap;
-	clt->driver = &therm_pm72_driver;
-	strncpy(clt->name, name, I2C_NAME_SIZE-1);
-
-	if (i2c_attach_client(clt)) {
+	memset(&info, 0, sizeof(struct i2c_board_info));
+	info.addr = (id >> 1) & 0x7f;
+	strlcpy(info.type, "therm_pm72", I2C_NAME_SIZE);
+	clt = i2c_new_device(adap, &info);
+	if (!clt) {
 		printk(KERN_ERR "therm_pm72: Failed to attach to i2c ID 0x%x\n", id);
-		kfree(clt);
 		return NULL;
 	}
-	return clt;
-}
 
-/*
- * Utility function to get rid of the i2c_client structure
- * (will also detach from the adapter hopepfully)
- */
-static void detach_i2c_chip(struct i2c_client *clt)
-{
-	i2c_detach_client(clt);
-	kfree(clt);
+	/*
+	 * Let i2c-core delete that device on driver removal.
+	 * This is safe because i2c-core holds the core_lock mutex for us.
+	 */
+	list_add_tail(&clt->detected, &clt->driver->clients);
+	return clt;
 }
 
 /*
@@ -1203,8 +1178,6 @@
 
 	return 0;
  fail:
-	if (state->monitor)
-		detach_i2c_chip(state->monitor);
 	state->monitor = NULL;
 	
 	return -ENODEV;
@@ -1232,7 +1205,6 @@
 		device_remove_file(&of_dev->dev, &dev_attr_cpu1_intake_fan_rpm);
 	}
 
-	detach_i2c_chip(state->monitor);
 	state->monitor = NULL;
 }
 
@@ -1407,7 +1379,6 @@
 	device_remove_file(&of_dev->dev, &dev_attr_backside_temperature);
 	device_remove_file(&of_dev->dev, &dev_attr_backside_fan_pwm);
 
-	detach_i2c_chip(state->monitor);
 	state->monitor = NULL;
 }
  
@@ -1532,7 +1503,6 @@
 	device_remove_file(&of_dev->dev, &dev_attr_drives_temperature);
 	device_remove_file(&of_dev->dev, &dev_attr_drives_fan_rpm);
 
-	detach_i2c_chip(state->monitor);
 	state->monitor = NULL;
 }
 
@@ -1654,7 +1624,6 @@
 
 	device_remove_file(&of_dev->dev, &dev_attr_dimms_temperature);
 
-	detach_i2c_chip(state->monitor);
 	state->monitor = NULL;
 }
 
@@ -1779,7 +1748,6 @@
 	device_remove_file(&of_dev->dev, &dev_attr_slots_temperature);
 	device_remove_file(&of_dev->dev, &dev_attr_slots_fan_pwm);
 
-	detach_i2c_chip(state->monitor);
 	state->monitor = NULL;
 }
 
@@ -2008,8 +1976,6 @@
  */
 static void detach_fcu(void)
 {
-	if (fcu)
-		detach_i2c_chip(fcu);
 	fcu = NULL;
 }
 
@@ -2060,12 +2026,21 @@
 	return 0;
 }
 
+static int therm_pm72_probe(struct i2c_client *client,
+			    const struct i2c_device_id *id)
+{
+	/* Always succeed, the real work was done in therm_pm72_attach() */
+	return 0;
+}
+
 /*
- * Called on every adapter when the driver or the i2c controller
+ * Called when any of the devices which participates into thermal management
  * is going away.
  */
-static int therm_pm72_detach(struct i2c_adapter *adapter)
+static int therm_pm72_remove(struct i2c_client *client)
 {
+	struct i2c_adapter *adapter = client->adapter;
+
 	mutex_lock(&driver_lock);
 
 	if (state != state_detached)
@@ -2096,6 +2071,30 @@
 	return 0;
 }
 
+/*
+ * i2c_driver structure to attach to the host i2c controller
+ */
+
+static const struct i2c_device_id therm_pm72_id[] = {
+	/*
+	 * Fake device name, thermal management is done by several
+	 * chips but we don't need to differentiate between them at
+	 * this point.
+	 */
+	{ "therm_pm72", 0 },
+	{ }
+};
+
+static struct i2c_driver therm_pm72_driver = {
+	.driver = {
+		.name	= "therm_pm72",
+	},
+	.attach_adapter	= therm_pm72_attach,
+	.probe		= therm_pm72_probe,
+	.remove		= therm_pm72_remove,
+	.id_table	= therm_pm72_id,
+};
+
 static int fan_check_loc_match(const char *loc, int fan)
 {
 	char	tmp[64];
diff --git a/drivers/macintosh/therm_windtunnel.c b/drivers/macintosh/therm_windtunnel.c
index 3da0a02..4002331 100644
--- a/drivers/macintosh/therm_windtunnel.c
+++ b/drivers/macintosh/therm_windtunnel.c
@@ -48,16 +48,6 @@
 
 #define LOG_TEMP		0			/* continously log temperature */
 
-static int 			do_probe( struct i2c_adapter *adapter, int addr, int kind);
-
-/* scan 0x48-0x4f (DS1775) and 0x2c-2x2f (ADM1030) */
-static const unsigned short	normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b,
-						 0x4c, 0x4d, 0x4e, 0x4f,
-						 0x2c, 0x2d, 0x2e, 0x2f,
-						 I2C_CLIENT_END };
-
-I2C_CLIENT_INSMOD;
-
 static struct {
 	volatile int		running;
 	struct task_struct	*poll_task;
@@ -315,53 +305,54 @@
 static int
 do_attach( struct i2c_adapter *adapter )
 {
-	int ret = 0;
+	/* scan 0x48-0x4f (DS1775) and 0x2c-2x2f (ADM1030) */
+	static const unsigned short scan_ds1775[] = {
+		0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+		I2C_CLIENT_END
+	};
+	static const unsigned short scan_adm1030[] = {
+		0x2c, 0x2d, 0x2e, 0x2f,
+		I2C_CLIENT_END
+	};
 
 	if( strncmp(adapter->name, "uni-n", 5) )
 		return 0;
 
 	if( !x.running ) {
-		ret = i2c_probe( adapter, &addr_data, &do_probe );
+		struct i2c_board_info info;
+
+		memset(&info, 0, sizeof(struct i2c_board_info));
+		strlcpy(info.type, "therm_ds1775", I2C_NAME_SIZE);
+		i2c_new_probed_device(adapter, &info, scan_ds1775);
+
+		strlcpy(info.type, "therm_adm1030", I2C_NAME_SIZE);
+		i2c_new_probed_device(adapter, &info, scan_adm1030);
+
 		if( x.thermostat && x.fan ) {
 			x.running = 1;
 			x.poll_task = kthread_run(control_loop, NULL, "g4fand");
 		}
 	}
-	return ret;
+	return 0;
 }
 
 static int
-do_detach( struct i2c_client *client )
+do_remove(struct i2c_client *client)
 {
-	int err;
-
-	if( (err=i2c_detach_client(client)) )
-		printk(KERN_ERR "failed to detach thermostat client\n");
-	else {
-		if( x.running ) {
-			x.running = 0;
-			kthread_stop(x.poll_task);
-			x.poll_task = NULL;
-		}
-		if( client == x.thermostat )
-			x.thermostat = NULL;
-		else if( client == x.fan )
-			x.fan = NULL;
-		else {
-			printk(KERN_ERR "g4fan: bad client\n");
-		}
-		kfree( client );
+	if (x.running) {
+		x.running = 0;
+		kthread_stop(x.poll_task);
+		x.poll_task = NULL;
 	}
-	return err;
-}
+	if (client == x.thermostat)
+		x.thermostat = NULL;
+	else if (client == x.fan)
+		x.fan = NULL;
+	else
+		printk(KERN_ERR "g4fan: bad client\n");
 
-static struct i2c_driver g4fan_driver = {  
-	.driver = {
-		.name	= "therm_windtunnel",
-	},
-	.attach_adapter = do_attach,
-	.detach_client	= do_detach,
-};
+	return 0;
+}
 
 static int
 attach_fan( struct i2c_client *cl )
@@ -374,13 +365,8 @@
 		goto out;
 	printk("ADM1030 fan controller [@%02x]\n", cl->addr );
 
-	strlcpy( cl->name, "ADM1030 fan controller", sizeof(cl->name) );
-
-	if( !i2c_attach_client(cl) )
-		x.fan = cl;
+	x.fan = cl;
  out:
-	if( cl != x.fan )
-		kfree( cl );
 	return 0;
 }
 
@@ -412,39 +398,47 @@
 	x.temp = temp;
 	x.overheat_temp = os_temp;
 	x.overheat_hyst = hyst_temp;
-	
-	strlcpy( cl->name, "DS1775 thermostat", sizeof(cl->name) );
-
-	if( !i2c_attach_client(cl) )
-		x.thermostat = cl;
+	x.thermostat = cl;
 out:
-	if( cl != x.thermostat )
-		kfree( cl );
 	return 0;
 }
 
+enum chip { ds1775, adm1030 };
+
+static const struct i2c_device_id therm_windtunnel_id[] = {
+	{ "therm_ds1775", ds1775 },
+	{ "therm_adm1030", adm1030 },
+	{ }
+};
+
 static int
-do_probe( struct i2c_adapter *adapter, int addr, int kind )
+do_probe(struct i2c_client *cl, const struct i2c_device_id *id)
 {
-	struct i2c_client *cl;
+	struct i2c_adapter *adapter = cl->adapter;
 
 	if( !i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA
 				     | I2C_FUNC_SMBUS_WRITE_BYTE) )
 		return 0;
 
-	if( !(cl=kzalloc(sizeof(*cl), GFP_KERNEL)) )
-		return -ENOMEM;
-
-	cl->addr = addr;
-	cl->adapter = adapter;
-	cl->driver = &g4fan_driver;
-	cl->flags = 0;
-
-	if( addr < 0x48 )
+	switch (id->driver_data) {
+	case adm1030:
 		return attach_fan( cl );
-	return attach_thermostat( cl );
+	case ds1775:
+		return attach_thermostat(cl);
+	}
+	return 0;
 }
 
+static struct i2c_driver g4fan_driver = {
+	.driver = {
+		.name	= "therm_windtunnel",
+	},
+	.attach_adapter = do_attach,
+	.probe		= do_probe,
+	.remove		= do_remove,
+	.id_table	= therm_windtunnel_id,
+};
+
 
 /************************************************************************/
 /*	initialization / cleanup					*/
diff --git a/drivers/macintosh/windfarm_lm75_sensor.c b/drivers/macintosh/windfarm_lm75_sensor.c
index b92b959..529886c 100644
--- a/drivers/macintosh/windfarm_lm75_sensor.c
+++ b/drivers/macintosh/windfarm_lm75_sensor.c
@@ -37,34 +37,22 @@
 struct wf_lm75_sensor {
 	int			ds1775 : 1;
 	int			inited : 1;
-	struct 	i2c_client	i2c;
+	struct 	i2c_client	*i2c;
 	struct 	wf_sensor	sens;
 };
 #define wf_to_lm75(c) container_of(c, struct wf_lm75_sensor, sens)
-#define i2c_to_lm75(c) container_of(c, struct wf_lm75_sensor, i2c)
-
-static int wf_lm75_attach(struct i2c_adapter *adapter);
-static int wf_lm75_detach(struct i2c_client *client);
-
-static struct i2c_driver wf_lm75_driver = {
-	.driver = {
-		.name	= "wf_lm75",
-	},
-	.attach_adapter	= wf_lm75_attach,
-	.detach_client	= wf_lm75_detach,
-};
 
 static int wf_lm75_get(struct wf_sensor *sr, s32 *value)
 {
 	struct wf_lm75_sensor *lm = wf_to_lm75(sr);
 	s32 data;
 
-	if (lm->i2c.adapter == NULL)
+	if (lm->i2c == NULL)
 		return -ENODEV;
 
 	/* Init chip if necessary */
 	if (!lm->inited) {
-		u8 cfg_new, cfg = (u8)i2c_smbus_read_byte_data(&lm->i2c, 1);
+		u8 cfg_new, cfg = (u8)i2c_smbus_read_byte_data(lm->i2c, 1);
 
 		DBG("wf_lm75: Initializing %s, cfg was: %02x\n",
 		    sr->name, cfg);
@@ -73,7 +61,7 @@
 		 * the firmware for now
 		 */
 		cfg_new = cfg & ~0x01;
-		i2c_smbus_write_byte_data(&lm->i2c, 1, cfg_new);
+		i2c_smbus_write_byte_data(lm->i2c, 1, cfg_new);
 		lm->inited = 1;
 
 		/* If we just powered it up, let's wait 200 ms */
@@ -81,7 +69,7 @@
 	}
 
 	/* Read temperature register */
-	data = (s32)le16_to_cpu(i2c_smbus_read_word_data(&lm->i2c, 0));
+	data = (s32)le16_to_cpu(i2c_smbus_read_word_data(lm->i2c, 0));
 	data <<= 8;
 	*value = data;
 
@@ -92,12 +80,6 @@
 {
 	struct wf_lm75_sensor *lm = wf_to_lm75(sr);
 
-	/* check if client is registered and detach from i2c */
-	if (lm->i2c.adapter) {
-		i2c_detach_client(&lm->i2c);
-		lm->i2c.adapter = NULL;
-	}
-
 	kfree(lm);
 }
 
@@ -107,59 +89,77 @@
 	.owner		= THIS_MODULE,
 };
 
-static struct wf_lm75_sensor *wf_lm75_create(struct i2c_adapter *adapter,
-					     u8 addr, int ds1775,
-					     const char *loc)
+static int wf_lm75_probe(struct i2c_client *client,
+			 const struct i2c_device_id *id)
 {
 	struct wf_lm75_sensor *lm;
 	int rc;
 
-	DBG("wf_lm75: creating  %s device at address 0x%02x\n",
-	    ds1775 ? "ds1775" : "lm75", addr);
-
 	lm = kzalloc(sizeof(struct wf_lm75_sensor), GFP_KERNEL);
 	if (lm == NULL)
-		return NULL;
+		return -ENODEV;
+
+	lm->inited = 0;
+	lm->ds1775 = id->driver_data;
+	lm->i2c = client;
+	lm->sens.name = client->dev.platform_data;
+	lm->sens.ops = &wf_lm75_ops;
+	i2c_set_clientdata(client, lm);
+
+	rc = wf_register_sensor(&lm->sens);
+	if (rc) {
+		i2c_set_clientdata(client, NULL);
+		kfree(lm);
+	}
+
+	return rc;
+}
+
+static struct i2c_client *wf_lm75_create(struct i2c_adapter *adapter,
+					     u8 addr, int ds1775,
+					     const char *loc)
+{
+	struct i2c_board_info info;
+	struct i2c_client *client;
+	char *name;
+
+	DBG("wf_lm75: creating  %s device at address 0x%02x\n",
+	    ds1775 ? "ds1775" : "lm75", addr);
 
 	/* Usual rant about sensor names not beeing very consistent in
 	 * the device-tree, oh well ...
 	 * Add more entries below as you deal with more setups
 	 */
 	if (!strcmp(loc, "Hard drive") || !strcmp(loc, "DRIVE BAY"))
-		lm->sens.name = "hd-temp";
+		name = "hd-temp";
 	else if (!strcmp(loc, "Incoming Air Temp"))
-		lm->sens.name = "incoming-air-temp";
+		name = "incoming-air-temp";
 	else if (!strcmp(loc, "ODD Temp"))
-		lm->sens.name = "optical-drive-temp";
+		name = "optical-drive-temp";
 	else if (!strcmp(loc, "HD Temp"))
-		lm->sens.name = "hard-drive-temp";
+		name = "hard-drive-temp";
 	else
 		goto fail;
 
-	lm->inited = 0;
-	lm->sens.ops = &wf_lm75_ops;
-	lm->ds1775 = ds1775;
-	lm->i2c.addr = (addr >> 1) & 0x7f;
-	lm->i2c.adapter = adapter;
-	lm->i2c.driver = &wf_lm75_driver;
-	strncpy(lm->i2c.name, lm->sens.name, I2C_NAME_SIZE-1);
+	memset(&info, 0, sizeof(struct i2c_board_info));
+	info.addr = (addr >> 1) & 0x7f;
+	info.platform_data = name;
+	strlcpy(info.type, ds1775 ? "wf_ds1775" : "wf_lm75", I2C_NAME_SIZE);
 
-	rc = i2c_attach_client(&lm->i2c);
-	if (rc) {
-		printk(KERN_ERR "windfarm: failed to attach %s %s to i2c,"
-		       " err %d\n", ds1775 ? "ds1775" : "lm75",
-		       lm->i2c.name, rc);
+	client = i2c_new_device(adapter, &info);
+	if (client == NULL) {
+		printk(KERN_ERR "windfarm: failed to attach %s %s to i2c\n",
+		       ds1775 ? "ds1775" : "lm75", name);
 		goto fail;
 	}
 
-	if (wf_register_sensor(&lm->sens)) {
-		i2c_detach_client(&lm->i2c);
-		goto fail;
-	}
-
-	return lm;
+	/*
+	 * Let i2c-core delete that device on driver removal.
+	 * This is safe because i2c-core holds the core_lock mutex for us.
+	 */
+	list_add_tail(&client->detected, &client->driver->clients);
+	return client;
  fail:
-	kfree(lm);
 	return NULL;
 }
 
@@ -202,21 +202,38 @@
 	return 0;
 }
 
-static int wf_lm75_detach(struct i2c_client *client)
+static int wf_lm75_remove(struct i2c_client *client)
 {
-	struct wf_lm75_sensor *lm = i2c_to_lm75(client);
+	struct wf_lm75_sensor *lm = i2c_get_clientdata(client);
 
 	DBG("wf_lm75: i2c detatch called for %s\n", lm->sens.name);
 
 	/* Mark client detached */
-	lm->i2c.adapter = NULL;
+	lm->i2c = NULL;
 
 	/* release sensor */
 	wf_unregister_sensor(&lm->sens);
 
+	i2c_set_clientdata(client, NULL);
 	return 0;
 }
 
+static const struct i2c_device_id wf_lm75_id[] = {
+	{ "wf_lm75", 0 },
+	{ "wf_ds1775", 1 },
+	{ }
+};
+
+static struct i2c_driver wf_lm75_driver = {
+	.driver = {
+		.name	= "wf_lm75",
+	},
+	.attach_adapter	= wf_lm75_attach,
+	.probe		= wf_lm75_probe,
+	.remove		= wf_lm75_remove,
+	.id_table	= wf_lm75_id,
+};
+
 static int __init wf_lm75_sensor_init(void)
 {
 	/* Don't register on old machines that use therm_pm72 for now */
diff --git a/drivers/macintosh/windfarm_max6690_sensor.c b/drivers/macintosh/windfarm_max6690_sensor.c
index e207a90..e2a55ec 100644
--- a/drivers/macintosh/windfarm_max6690_sensor.c
+++ b/drivers/macintosh/windfarm_max6690_sensor.c
@@ -26,34 +26,22 @@
 #define MAX6690_EXTERNAL_TEMP	1
 
 struct wf_6690_sensor {
-	struct i2c_client	i2c;
+	struct i2c_client	*i2c;
 	struct wf_sensor	sens;
 };
 
 #define wf_to_6690(x)	container_of((x), struct wf_6690_sensor, sens)
-#define i2c_to_6690(x)	container_of((x), struct wf_6690_sensor, i2c)
-
-static int wf_max6690_attach(struct i2c_adapter *adapter);
-static int wf_max6690_detach(struct i2c_client *client);
-
-static struct i2c_driver wf_max6690_driver = {
-	.driver = {
-		.name		= "wf_max6690",
-	},
-	.attach_adapter	= wf_max6690_attach,
-	.detach_client	= wf_max6690_detach,
-};
 
 static int wf_max6690_get(struct wf_sensor *sr, s32 *value)
 {
 	struct wf_6690_sensor *max = wf_to_6690(sr);
 	s32 data;
 
-	if (max->i2c.adapter == NULL)
+	if (max->i2c == NULL)
 		return -ENODEV;
 
 	/* chip gets initialized by firmware */
-	data = i2c_smbus_read_byte_data(&max->i2c, MAX6690_EXTERNAL_TEMP);
+	data = i2c_smbus_read_byte_data(max->i2c, MAX6690_EXTERNAL_TEMP);
 	if (data < 0)
 		return data;
 	*value = data << 16;
@@ -64,10 +52,6 @@
 {
 	struct wf_6690_sensor *max = wf_to_6690(sr);
 
-	if (max->i2c.adapter) {
-		i2c_detach_client(&max->i2c);
-		max->i2c.adapter = NULL;
-	}
 	kfree(max);
 }
 
@@ -77,19 +61,40 @@
 	.owner		= THIS_MODULE,
 };
 
-static void wf_max6690_create(struct i2c_adapter *adapter, u8 addr,
-			      const char *loc)
+static int wf_max6690_probe(struct i2c_client *client,
+			    const struct i2c_device_id *id)
 {
 	struct wf_6690_sensor *max;
-	char *name;
+	int rc;
 
 	max = kzalloc(sizeof(struct wf_6690_sensor), GFP_KERNEL);
 	if (max == NULL) {
-		printk(KERN_ERR "windfarm: Couldn't create MAX6690 sensor %s: "
-		       "no memory\n", loc);
-		return;
+		printk(KERN_ERR "windfarm: Couldn't create MAX6690 sensor: "
+		       "no memory\n");
+		return -ENOMEM;
 	}
 
+	max->i2c = client;
+	max->sens.name = client->dev.platform_data;
+	max->sens.ops = &wf_max6690_ops;
+	i2c_set_clientdata(client, max);
+
+	rc = wf_register_sensor(&max->sens);
+	if (rc) {
+		i2c_set_clientdata(client, NULL);
+		kfree(max);
+	}
+
+	return rc;
+}
+
+static struct i2c_client *wf_max6690_create(struct i2c_adapter *adapter,
+					    u8 addr, const char *loc)
+{
+	struct i2c_board_info info;
+	struct i2c_client *client;
+	char *name;
+
 	if (!strcmp(loc, "BACKSIDE"))
 		name = "backside-temp";
 	else if (!strcmp(loc, "NB Ambient"))
@@ -99,27 +104,26 @@
 	else
 		goto fail;
 
-	max->sens.ops = &wf_max6690_ops;
-	max->sens.name = name;
-	max->i2c.addr = addr >> 1;
-	max->i2c.adapter = adapter;
-	max->i2c.driver = &wf_max6690_driver;
-	strncpy(max->i2c.name, name, I2C_NAME_SIZE-1);
+	memset(&info, 0, sizeof(struct i2c_board_info));
+	info.addr = addr >> 1;
+	info.platform_data = name;
+	strlcpy(info.type, "wf_max6690", I2C_NAME_SIZE);
 
-	if (i2c_attach_client(&max->i2c)) {
+	client = i2c_new_device(adapter, &info);
+	if (client == NULL) {
 		printk(KERN_ERR "windfarm: failed to attach MAX6690 sensor\n");
 		goto fail;
 	}
 
-	if (wf_register_sensor(&max->sens)) {
-		i2c_detach_client(&max->i2c);
-		goto fail;
-	}
-
-	return;
+	/*
+	 * Let i2c-core delete that device on driver removal.
+	 * This is safe because i2c-core holds the core_lock mutex for us.
+	 */
+	list_add_tail(&client->detected, &client->driver->clients);
+	return client;
 
  fail:
-	kfree(max);
+	return NULL;
 }
 
 static int wf_max6690_attach(struct i2c_adapter *adapter)
@@ -154,16 +158,31 @@
 	return 0;
 }
 
-static int wf_max6690_detach(struct i2c_client *client)
+static int wf_max6690_remove(struct i2c_client *client)
 {
-	struct wf_6690_sensor *max = i2c_to_6690(client);
+	struct wf_6690_sensor *max = i2c_get_clientdata(client);
 
-	max->i2c.adapter = NULL;
+	max->i2c = NULL;
 	wf_unregister_sensor(&max->sens);
 
 	return 0;
 }
 
+static const struct i2c_device_id wf_max6690_id[] = {
+	{ "wf_max6690", 0 },
+	{ }
+};
+
+static struct i2c_driver wf_max6690_driver = {
+	.driver = {
+		.name		= "wf_max6690",
+	},
+	.attach_adapter	= wf_max6690_attach,
+	.probe		= wf_max6690_probe,
+	.remove		= wf_max6690_remove,
+	.id_table	= wf_max6690_id,
+};
+
 static int __init wf_max6690_sensor_init(void)
 {
 	/* Don't register on old machines that use therm_pm72 for now */
diff --git a/drivers/macintosh/windfarm_smu_sat.c b/drivers/macintosh/windfarm_smu_sat.c
index 7847e98..5da729e 100644
--- a/drivers/macintosh/windfarm_smu_sat.c
+++ b/drivers/macintosh/windfarm_smu_sat.c
@@ -39,7 +39,7 @@
 	struct mutex		mutex;
 	unsigned long		last_read; /* jiffies when cache last updated */
 	u8			cache[16];
-	struct i2c_client	i2c;
+	struct i2c_client	*i2c;
 	struct device_node	*node;
 };
 
@@ -54,18 +54,6 @@
 };
 
 #define wf_to_sat(c)	container_of(c, struct wf_sat_sensor, sens)
-#define i2c_to_sat(c)	container_of(c, struct wf_sat, i2c)
-
-static int wf_sat_attach(struct i2c_adapter *adapter);
-static int wf_sat_detach(struct i2c_client *client);
-
-static struct i2c_driver wf_sat_driver = {
-	.driver = {
-		.name		= "wf_smu_sat",
-	},
-	.attach_adapter	= wf_sat_attach,
-	.detach_client	= wf_sat_detach,
-};
 
 struct smu_sdbp_header *smu_sat_get_sdb_partition(unsigned int sat_id, int id,
 						  unsigned int *size)
@@ -81,13 +69,13 @@
 	if (sat_id > 1 || (sat = sats[sat_id]) == NULL)
 		return NULL;
 
-	err = i2c_smbus_write_word_data(&sat->i2c, 8, id << 8);
+	err = i2c_smbus_write_word_data(sat->i2c, 8, id << 8);
 	if (err) {
 		printk(KERN_ERR "smu_sat_get_sdb_part wr error %d\n", err);
 		return NULL;
 	}
 
-	err = i2c_smbus_read_word_data(&sat->i2c, 9);
+	err = i2c_smbus_read_word_data(sat->i2c, 9);
 	if (err < 0) {
 		printk(KERN_ERR "smu_sat_get_sdb_part rd len error\n");
 		return NULL;
@@ -105,7 +93,7 @@
 		return NULL;
 
 	for (i = 0; i < len; i += 4) {
-		err = i2c_smbus_read_i2c_block_data(&sat->i2c, 0xa, 4, data);
+		err = i2c_smbus_read_i2c_block_data(sat->i2c, 0xa, 4, data);
 		if (err < 0) {
 			printk(KERN_ERR "smu_sat_get_sdb_part rd err %d\n",
 			       err);
@@ -138,7 +126,7 @@
 {
 	int err;
 
-	err = i2c_smbus_read_i2c_block_data(&sat->i2c, 0x3f, 16, sat->cache);
+	err = i2c_smbus_read_i2c_block_data(sat->i2c, 0x3f, 16, sat->cache);
 	if (err < 0)
 		return err;
 	sat->last_read = jiffies;
@@ -161,7 +149,7 @@
 	int i, err;
 	s32 val;
 
-	if (sat->i2c.adapter == NULL)
+	if (sat->i2c == NULL)
 		return -ENODEV;
 
 	mutex_lock(&sat->mutex);
@@ -193,10 +181,6 @@
 	struct wf_sat *sat = sens->sat;
 
 	if (atomic_dec_and_test(&sat->refcnt)) {
-		if (sat->i2c.adapter) {
-			i2c_detach_client(&sat->i2c);
-			sat->i2c.adapter = NULL;
-		}
 		if (sat->nr >= 0)
 			sats[sat->nr] = NULL;
 		kfree(sat);
@@ -212,15 +196,10 @@
 
 static void wf_sat_create(struct i2c_adapter *adapter, struct device_node *dev)
 {
-	struct wf_sat *sat;
-	struct wf_sat_sensor *sens;
+	struct i2c_board_info info;
+	struct i2c_client *client;
 	const u32 *reg;
-	const char *loc, *type;
-	u8 addr, chip, core;
-	struct device_node *child;
-	int shift, cpu, index;
-	char *name;
-	int vsens[2], isens[2];
+	u8 addr;
 
 	reg = of_get_property(dev, "reg", NULL);
 	if (reg == NULL)
@@ -228,22 +207,47 @@
 	addr = *reg;
 	DBG(KERN_DEBUG "wf_sat: creating sat at address %x\n", addr);
 
+	memset(&info, 0, sizeof(struct i2c_board_info));
+	info.addr = (addr >> 1) & 0x7f;
+	info.platform_data = dev;
+	strlcpy(info.type, "wf_sat", I2C_NAME_SIZE);
+
+	client = i2c_new_device(adapter, &info);
+	if (client == NULL) {
+		printk(KERN_ERR "windfarm: failed to attach smu-sat to i2c\n");
+		return;
+	}
+
+	/*
+	 * Let i2c-core delete that device on driver removal.
+	 * This is safe because i2c-core holds the core_lock mutex for us.
+	 */
+	list_add_tail(&client->detected, &client->driver->clients);
+}
+
+static int wf_sat_probe(struct i2c_client *client,
+			const struct i2c_device_id *id)
+{
+	struct device_node *dev = client->dev.platform_data;
+	struct wf_sat *sat;
+	struct wf_sat_sensor *sens;
+	const u32 *reg;
+	const char *loc, *type;
+	u8 chip, core;
+	struct device_node *child;
+	int shift, cpu, index;
+	char *name;
+	int vsens[2], isens[2];
+
 	sat = kzalloc(sizeof(struct wf_sat), GFP_KERNEL);
 	if (sat == NULL)
-		return;
+		return -ENOMEM;
 	sat->nr = -1;
 	sat->node = of_node_get(dev);
 	atomic_set(&sat->refcnt, 0);
 	mutex_init(&sat->mutex);
-	sat->i2c.addr = (addr >> 1) & 0x7f;
-	sat->i2c.adapter = adapter;
-	sat->i2c.driver = &wf_sat_driver;
-	strncpy(sat->i2c.name, "smu-sat", I2C_NAME_SIZE-1);
-
-	if (i2c_attach_client(&sat->i2c)) {
-		printk(KERN_ERR "windfarm: failed to attach smu-sat to i2c\n");
-		goto fail;
-	}
+	sat->i2c = client;
+	i2c_set_clientdata(client, sat);
 
 	vsens[0] = vsens[1] = -1;
 	isens[0] = isens[1] = -1;
@@ -344,10 +348,7 @@
 	if (sat->nr >= 0)
 		sats[sat->nr] = sat;
 
-	return;
-
- fail:
-	kfree(sat);
+	return 0;
 }
 
 static int wf_sat_attach(struct i2c_adapter *adapter)
@@ -366,16 +367,32 @@
 	return 0;
 }
 
-static int wf_sat_detach(struct i2c_client *client)
+static int wf_sat_remove(struct i2c_client *client)
 {
-	struct wf_sat *sat = i2c_to_sat(client);
+	struct wf_sat *sat = i2c_get_clientdata(client);
 
 	/* XXX TODO */
 
-	sat->i2c.adapter = NULL;
+	sat->i2c = NULL;
+	i2c_set_clientdata(client, NULL);
 	return 0;
 }
 
+static const struct i2c_device_id wf_sat_id[] = {
+	{ "wf_sat", 0 },
+	{ }
+};
+
+static struct i2c_driver wf_sat_driver = {
+	.driver = {
+		.name		= "wf_smu_sat",
+	},
+	.attach_adapter	= wf_sat_attach,
+	.probe		= wf_sat_probe,
+	.remove		= wf_sat_remove,
+	.id_table	= wf_sat_id,
+};
+
 static int __init sat_sensors_init(void)
 {
 	return i2c_add_driver(&wf_sat_driver);
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 823ceba..1128d3f 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -1513,6 +1513,7 @@
 static struct miscdevice _dm_misc = {
 	.minor 		= MISC_DYNAMIC_MINOR,
 	.name  		= DM_NAME,
+	.devnode	= "mapper/control",
 	.fops  		= &_ctl_fops
 };
 
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 3fd8b1e..48db308 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -19,7 +19,6 @@
 #include <linux/slab.h>
 #include <linux/idr.h>
 #include <linux/hdreg.h>
-#include <linux/blktrace_api.h>
 
 #include <trace/events/block.h>
 
diff --git a/drivers/media/common/tuners/tuner-xc2028.c b/drivers/media/common/tuners/tuner-xc2028.c
index 1adce9f..7636c33 100644
--- a/drivers/media/common/tuners/tuner-xc2028.c
+++ b/drivers/media/common/tuners/tuner-xc2028.c
@@ -48,7 +48,7 @@
 	"NICAM/A\n"
 	"NICAM/B\n");
 
-static char firmware_name[FIRMWARE_NAME_MAX];
+static char firmware_name[30];
 module_param_string(firmware_name, firmware_name, sizeof(firmware_name), 0);
 MODULE_PARM_DESC(firmware_name, "Firmware file name. Allows overriding the "
 				"default firmware name\n");
diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
index a454ee8..479dd05 100644
--- a/drivers/media/dvb/dvb-core/dvbdev.c
+++ b/drivers/media/dvb/dvb-core/dvbdev.c
@@ -447,6 +447,15 @@
 	return 0;
 }
 
+static char *dvb_nodename(struct device *dev)
+{
+	struct dvb_device *dvbdev = dev_get_drvdata(dev);
+
+	return kasprintf(GFP_KERNEL, "dvb/adapter%d/%s%d",
+		dvbdev->adapter->num, dnames[dvbdev->type], dvbdev->id);
+}
+
+
 static int __init init_dvbdev(void)
 {
 	int retval;
@@ -469,6 +478,7 @@
 		goto error;
 	}
 	dvb_class->dev_uevent = dvb_uevent;
+	dvb_class->nodename = dvb_nodename;
 	return 0;
 
 error:
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb.h b/drivers/media/dvb/dvb-usb/dvb-usb.h
index 2d5352e..b515751 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb.h
+++ b/drivers/media/dvb/dvb-usb/dvb-usb.h
@@ -196,7 +196,7 @@
 #define CYPRESS_FX2     3
 	int        usb_ctrl;
 	int        (*download_firmware) (struct usb_device *, const struct firmware *);
-	const char firmware[FIRMWARE_NAME_MAX];
+	const char *firmware;
 	int        no_reconnect;
 
 	int size_of_priv;
diff --git a/drivers/media/dvb/firewire/firedtv-1394.c b/drivers/media/dvb/firewire/firedtv-1394.c
index 4e20765..2b6eeea 100644
--- a/drivers/media/dvb/firewire/firedtv-1394.c
+++ b/drivers/media/dvb/firewire/firedtv-1394.c
@@ -225,7 +225,7 @@
 
 static int node_remove(struct device *dev)
 {
-	struct firedtv *fdtv = dev->driver_data;
+	struct firedtv *fdtv = dev_get_drvdata(dev);
 
 	fdtv_dvb_unregister(fdtv);
 
@@ -242,7 +242,7 @@
 
 static int node_update(struct unit_directory *ud)
 {
-	struct firedtv *fdtv = ud->device.driver_data;
+	struct firedtv *fdtv = dev_get_drvdata(&ud->device);
 
 	if (fdtv->isochannel >= 0)
 		cmp_establish_pp_connection(fdtv, fdtv->subunit,
diff --git a/drivers/media/dvb/firewire/firedtv-dvb.c b/drivers/media/dvb/firewire/firedtv-dvb.c
index 9d308dd..5742fde 100644
--- a/drivers/media/dvb/firewire/firedtv-dvb.c
+++ b/drivers/media/dvb/firewire/firedtv-dvb.c
@@ -268,7 +268,7 @@
 	if (!fdtv)
 		return NULL;
 
-	dev->driver_data	= fdtv;
+	dev_set_drvdata(dev, fdtv);
 	fdtv->device		= dev;
 	fdtv->isochannel	= -1;
 	fdtv->voltage		= 0xff;
diff --git a/drivers/media/video/dabusb.c b/drivers/media/video/dabusb.c
index ba3709b..ec2f45d 100644
--- a/drivers/media/video/dabusb.c
+++ b/drivers/media/video/dabusb.c
@@ -747,8 +747,14 @@
 	.release =	dabusb_release,
 };
 
+static char *dabusb_nodename(struct device *dev)
+{
+	return kasprintf(GFP_KERNEL, "usb/%s", dev_name(dev));
+}
+
 static struct usb_class_driver dabusb_class = {
 	.name =		"dabusb%d",
+	.nodename =	dabusb_nodename,
 	.fops =		&dabusb_fops,
 	.minor_base =	DABUSB_MINOR,
 };
diff --git a/drivers/media/video/pvrusb2/pvrusb2-sysfs.c b/drivers/media/video/pvrusb2/pvrusb2-sysfs.c
index 299c1cb..6c23456 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-sysfs.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-sysfs.c
@@ -539,7 +539,7 @@
 					 &sfp->attr_unit_number);
 	}
 	pvr2_sysfs_trace("Destroying class_dev id=%p",sfp->class_dev);
-	sfp->class_dev->driver_data = NULL;
+	dev_set_drvdata(sfp->class_dev, NULL);
 	device_unregister(sfp->class_dev);
 	sfp->class_dev = NULL;
 }
@@ -549,7 +549,7 @@
 				     struct device_attribute *attr, char *buf)
 {
 	struct pvr2_sysfs *sfp;
-	sfp = (struct pvr2_sysfs *)class_dev->driver_data;
+	sfp = dev_get_drvdata(class_dev);
 	if (!sfp) return -EINVAL;
 	return scnprintf(buf,PAGE_SIZE,"%d\n",
 			 pvr2_hdw_v4l_get_minor_number(sfp->channel.hdw,
@@ -561,7 +561,7 @@
 			     struct device_attribute *attr, char *buf)
 {
 	struct pvr2_sysfs *sfp;
-	sfp = (struct pvr2_sysfs *)class_dev->driver_data;
+	sfp = dev_get_drvdata(class_dev);
 	if (!sfp) return -EINVAL;
 	return scnprintf(buf,PAGE_SIZE,"%s\n",
 			 pvr2_hdw_get_bus_info(sfp->channel.hdw));
@@ -572,7 +572,7 @@
 			     struct device_attribute *attr, char *buf)
 {
 	struct pvr2_sysfs *sfp;
-	sfp = (struct pvr2_sysfs *)class_dev->driver_data;
+	sfp = dev_get_drvdata(class_dev);
 	if (!sfp) return -EINVAL;
 	return scnprintf(buf,PAGE_SIZE,"%s\n",
 			 pvr2_hdw_get_type(sfp->channel.hdw));
@@ -583,7 +583,7 @@
 			     struct device_attribute *attr, char *buf)
 {
 	struct pvr2_sysfs *sfp;
-	sfp = (struct pvr2_sysfs *)class_dev->driver_data;
+	sfp = dev_get_drvdata(class_dev);
 	if (!sfp) return -EINVAL;
 	return scnprintf(buf,PAGE_SIZE,"%s\n",
 			 pvr2_hdw_get_desc(sfp->channel.hdw));
@@ -595,7 +595,7 @@
 					   char *buf)
 {
 	struct pvr2_sysfs *sfp;
-	sfp = (struct pvr2_sysfs *)class_dev->driver_data;
+	sfp = dev_get_drvdata(class_dev);
 	if (!sfp) return -EINVAL;
 	return scnprintf(buf,PAGE_SIZE,"%d\n",
 			 pvr2_hdw_v4l_get_minor_number(sfp->channel.hdw,
@@ -607,7 +607,7 @@
 				struct device_attribute *attr, char *buf)
 {
 	struct pvr2_sysfs *sfp;
-	sfp = (struct pvr2_sysfs *)class_dev->driver_data;
+	sfp = dev_get_drvdata(class_dev);
 	if (!sfp) return -EINVAL;
 	return scnprintf(buf,PAGE_SIZE,"%d\n",
 			 pvr2_hdw_get_unit_number(sfp->channel.hdw));
@@ -635,7 +635,7 @@
 	class_dev->parent = &usb_dev->dev;
 
 	sfp->class_dev = class_dev;
-	class_dev->driver_data = sfp;
+	dev_set_drvdata(class_dev, sfp);
 	ret = device_register(class_dev);
 	if (ret) {
 		pvr2_trace(PVR2_TRACE_ERROR_LEGS,
@@ -792,7 +792,7 @@
 			      struct device_attribute *attr, char *buf)
 {
 	struct pvr2_sysfs *sfp;
-	sfp = (struct pvr2_sysfs *)class_dev->driver_data;
+	sfp = dev_get_drvdata(class_dev);
 	if (!sfp) return -EINVAL;
 	pvr2_hdw_trigger_module_log(sfp->channel.hdw);
 	return pvr2_debugifc_print_info(sfp->channel.hdw,buf,PAGE_SIZE);
@@ -803,7 +803,7 @@
 			     struct device_attribute *attr, char *buf)
 {
 	struct pvr2_sysfs *sfp;
-	sfp = (struct pvr2_sysfs *)class_dev->driver_data;
+	sfp = dev_get_drvdata(class_dev);
 	if (!sfp) return -EINVAL;
 	return pvr2_debugifc_print_status(sfp->channel.hdw,buf,PAGE_SIZE);
 }
@@ -816,7 +816,7 @@
 	struct pvr2_sysfs *sfp;
 	int ret;
 
-	sfp = (struct pvr2_sysfs *)class_dev->driver_data;
+	sfp = dev_get_drvdata(class_dev);
 	if (!sfp) return -EINVAL;
 
 	ret = pvr2_debugifc_docmd(sfp->channel.hdw,buf,count);
diff --git a/drivers/mfd/htc-pasic3.c b/drivers/mfd/htc-pasic3.c
index 386da15..cb73051 100644
--- a/drivers/mfd/htc-pasic3.c
+++ b/drivers/mfd/htc-pasic3.c
@@ -35,7 +35,7 @@
  */
 void pasic3_write_register(struct device *dev, u32 reg, u8 val)
 {
-	struct pasic3_data *asic = dev->driver_data;
+	struct pasic3_data *asic = dev_get_drvdata(dev);
 	int bus_shift = asic->bus_shift;
 	void __iomem *addr = asic->mapping + (REG_ADDR << bus_shift);
 	void __iomem *data = asic->mapping + (REG_DATA << bus_shift);
@@ -50,7 +50,7 @@
  */
 u8 pasic3_read_register(struct device *dev, u32 reg)
 {
-	struct pasic3_data *asic = dev->driver_data;
+	struct pasic3_data *asic = dev_get_drvdata(dev);
 	int bus_shift = asic->bus_shift;
 	void __iomem *addr = asic->mapping + (REG_ADDR << bus_shift);
 	void __iomem *data = asic->mapping + (REG_DATA << bus_shift);
diff --git a/drivers/mfd/pcf50633-core.c b/drivers/mfd/pcf50633-core.c
index 11a6248..082c197 100644
--- a/drivers/mfd/pcf50633-core.c
+++ b/drivers/mfd/pcf50633-core.c
@@ -618,7 +618,7 @@
 
 		pdev->dev.parent = pcf->dev;
 		pdev->dev.platform_data = &pdata->reg_init_data[i];
-		pdev->dev.driver_data = pcf;
+		dev_set_drvdata(&pdev->dev, pcf);
 		pcf->regulator_pdev[i] = pdev;
 
 		platform_device_add(pdev);
diff --git a/drivers/mfd/wm8400-core.c b/drivers/mfd/wm8400-core.c
index cf30d06..7c21bf7 100644
--- a/drivers/mfd/wm8400-core.c
+++ b/drivers/mfd/wm8400-core.c
@@ -265,7 +265,7 @@
 
 	mutex_init(&wm8400->io_lock);
 
-	wm8400->dev->driver_data = wm8400;
+	dev_set_drvdata(wm8400->dev, wm8400);
 
 	/* Check that this is actually a WM8400 */
 	ret = wm8400->read_dev(wm8400->io_data, WM8400_RESET_ID, 1, &reg);
diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c
index 0207dd5..b5346b4 100644
--- a/drivers/misc/c2port/core.c
+++ b/drivers/misc/c2port/core.c
@@ -15,6 +15,7 @@
 #include <linux/errno.h>
 #include <linux/err.h>
 #include <linux/kernel.h>
+#include <linux/kmemcheck.h>
 #include <linux/ctype.h>
 #include <linux/delay.h>
 #include <linux/idr.h>
@@ -891,6 +892,7 @@
 		return ERR_PTR(-EINVAL);
 
 	c2dev = kmalloc(sizeof(struct c2port_device), GFP_KERNEL);
+	kmemcheck_annotate_bitfield(c2dev, flags);
 	if (unlikely(!c2dev))
 		return ERR_PTR(-ENOMEM);
 
diff --git a/drivers/misc/eeprom/Kconfig b/drivers/misc/eeprom/Kconfig
index 89fec05..9118613 100644
--- a/drivers/misc/eeprom/Kconfig
+++ b/drivers/misc/eeprom/Kconfig
@@ -48,6 +48,20 @@
 	  This driver can also be built as a module.  If so, the module
 	  will be called eeprom.
 
+config EEPROM_MAX6875
+	tristate "Maxim MAX6874/5 power supply supervisor"
+	depends on I2C && EXPERIMENTAL
+	help
+	  If you say yes here you get read-only support for the user EEPROM of
+	  the Maxim MAX6874/5 EEPROM-programmable, quad power-supply
+	  sequencer/supervisor.
+
+	  All other features of this chip should be accessed via i2c-dev.
+
+	  This driver can also be built as a module.  If so, the module
+	  will be called max6875.
+
+
 config EEPROM_93CX6
 	tristate "EEPROM 93CX6 support"
 	help
diff --git a/drivers/misc/eeprom/Makefile b/drivers/misc/eeprom/Makefile
index 539dd8f..df3d68f 100644
--- a/drivers/misc/eeprom/Makefile
+++ b/drivers/misc/eeprom/Makefile
@@ -1,4 +1,5 @@
 obj-$(CONFIG_EEPROM_AT24)	+= at24.o
 obj-$(CONFIG_EEPROM_AT25)	+= at25.o
 obj-$(CONFIG_EEPROM_LEGACY)	+= eeprom.o
+obj-$(CONFIG_EEPROM_MAX6875)	+= max6875.o
 obj-$(CONFIG_EEPROM_93CX6)	+= eeprom_93cx6.o
diff --git a/drivers/i2c/chips/max6875.c b/drivers/misc/eeprom/max6875.c
similarity index 99%
rename from drivers/i2c/chips/max6875.c
rename to drivers/misc/eeprom/max6875.c
index 033d9d8..3c0c58e 100644
--- a/drivers/i2c/chips/max6875.c
+++ b/drivers/misc/eeprom/max6875.c
@@ -3,7 +3,7 @@
 
     Copyright (C) 2005 Ben Gardner <bgardner@wabtec.com>
 
-    Based on i2c/chips/eeprom.c
+    Based on eeprom.c
 
     The MAX6875 has a bank of registers and two banks of EEPROM.
     Address ranges are defined as follows:
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 01f282c..3b63831 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2206,7 +2206,7 @@
        depends on SKGE && DEBUG_FS
        help
 	 This option adds the ability to dump driver state for debugging.
-	 The file debugfs/skge/ethX displays the state of the internal
+	 The file /sys/kernel/debug/skge/ethX displays the state of the internal
 	 transmit and receive rings.
 
 	 If unsure, say N.
@@ -2232,7 +2232,7 @@
        depends on SKY2 && DEBUG_FS
        help
 	 This option adds the ability to dump driver state for debugging.
-	 The file debugfs/sky2/ethX displays the state of the internal
+	 The file /sys/kernel/debug/sky2/ethX displays the state of the internal
 	 transmit and receive rings.
 
 	 If unsure, say N.
diff --git a/drivers/net/ps3_gelic_net.c b/drivers/net/ps3_gelic_net.c
index 30900b3..2b38f39 100644
--- a/drivers/net/ps3_gelic_net.c
+++ b/drivers/net/ps3_gelic_net.c
@@ -1648,7 +1648,7 @@
 		result = -ENOMEM;
 		goto fail_alloc_card;
 	}
-	ps3_system_bus_set_driver_data(dev, card);
+	ps3_system_bus_set_drvdata(dev, card);
 	card->dev = dev;
 
 	/* get internal vlan info */
@@ -1749,7 +1749,7 @@
 					       bus_id(card),
 					       0, 0);
 fail_status_indicator:
-	ps3_system_bus_set_driver_data(dev, NULL);
+	ps3_system_bus_set_drvdata(dev, NULL);
 	kfree(netdev_card(netdev)->unalign);
 	free_netdev(netdev);
 fail_alloc_card:
@@ -1766,7 +1766,7 @@
 
 static int ps3_gelic_driver_remove(struct ps3_system_bus_device *dev)
 {
-	struct gelic_card *card = ps3_system_bus_get_driver_data(dev);
+	struct gelic_card *card = ps3_system_bus_get_drvdata(dev);
 	struct net_device *netdev0;
 	pr_debug("%s: called\n", __func__);
 
@@ -1803,7 +1803,7 @@
 	kfree(netdev_card(netdev0)->unalign);
 	free_netdev(netdev0);
 
-	ps3_system_bus_set_driver_data(dev, NULL);
+	ps3_system_bus_set_drvdata(dev, NULL);
 
 	ps3_dma_region_free(dev->d_region);
 
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 811d351..11a0ba4 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1366,6 +1366,7 @@
 static struct miscdevice tun_miscdev = {
 	.minor = TUN_MINOR,
 	.name = "tun",
+	.devnode = "net/tun",
 	.fops = &tun_fops,
 };
 
diff --git a/drivers/net/wimax/i2400m/i2400m.h b/drivers/net/wimax/i2400m/i2400m.h
index 1fe5da4..60330f3 100644
--- a/drivers/net/wimax/i2400m/i2400m.h
+++ b/drivers/net/wimax/i2400m/i2400m.h
@@ -432,7 +432,7 @@
 	unsigned ready:1;		/* all probing steps done */
 	unsigned rx_reorder:1;		/* RX reorder is enabled */
 	u8 trace_msg_from_user;		/* echo rx msgs to 'trace' pipe */
-					/* typed u8 so debugfs/u8 can tweak */
+					/* typed u8 so /sys/kernel/debug/u8 can tweak */
 	enum i2400m_system_state state;
 	wait_queue_head_t state_wq;	/* Woken up when on state updates */
 
diff --git a/drivers/net/wireless/ath/ath5k/Kconfig b/drivers/net/wireless/ath/ath5k/Kconfig
index 509b6f9..daf0c83 100644
--- a/drivers/net/wireless/ath/ath5k/Kconfig
+++ b/drivers/net/wireless/ath/ath5k/Kconfig
@@ -28,11 +28,10 @@
 	  Say Y, if and you will get debug options for ath5k.
 	  To use this, you need to mount debugfs:
 
-	  mkdir /debug/
-	  mount -t debugfs debug /debug/
+	  mount -t debugfs debug /sys/kernel/debug
 
 	  You will get access to files under:
-	  /debug/ath5k/phy0/
+	  /sys/kernel/debug/ath5k/phy0/
 
 	  To enable debug, pass the debug level to the debug module
 	  parameter. For example:
diff --git a/drivers/net/wireless/libertas/README b/drivers/net/wireless/libertas/README
index d860fc3..ab6a2d5 100644
--- a/drivers/net/wireless/libertas/README
+++ b/drivers/net/wireless/libertas/README
@@ -72,7 +72,7 @@
 	location that is to be read.  This parameter must be specified in
 	hexadecimal (its possible to preceed preceding the number with a "0x").
 
-	Path: /debugfs/libertas_wireless/ethX/registers/
+	Path: /sys/kernel/debug/libertas_wireless/ethX/registers/
 
 	Usage:
 		echo "0xa123" > rdmac ; cat rdmac
@@ -95,7 +95,7 @@
 sleepparams
 	This command is used to set the sleepclock configurations
 
-	Path: /debugfs/libertas_wireless/ethX/
+	Path: /sys/kernel/debug/libertas_wireless/ethX/
 
 	Usage:
 		cat sleepparams: reads the current sleepclock configuration
@@ -115,7 +115,7 @@
 	The subscribed_events directory contains the interface for the
 	subscribed events API.
 
-	Path: /debugfs/libertas_wireless/ethX/subscribed_events/
+	Path: /sys/kernel/debug/libertas_wireless/ethX/subscribed_events/
 
 	Each event is represented by a filename. Each filename consists of the
 	following three fields:
@@ -165,7 +165,7 @@
 extscan
 	This command is used to do a specific scan.
 
-	Path: /debugfs/libertas_wireless/ethX/
+	Path: /sys/kernel/debug/libertas_wireless/ethX/
 
 	Usage: echo "SSID" > extscan
 
@@ -179,7 +179,7 @@
 	Display the current contents of the driver scan table (ie. get the
 	scan results).
 
-	Path: /debugfs/libertas_wireless/ethX/
+	Path: /sys/kernel/debug/libertas_wireless/ethX/
 
 	Usage:
 		cat getscantable
@@ -188,7 +188,7 @@
 	Initiate a customized scan and retrieve the results
 
 
-	Path: /debugfs/libertas_wireless/ethX/
+	Path: /sys/kernel/debug/libertas_wireless/ethX/
 
     Usage:
        echo "[ARGS]" > setuserscan
diff --git a/drivers/net/wireless/libertas/if_spi.c b/drivers/net/wireless/libertas/if_spi.c
index f8c2898..06a46d7 100644
--- a/drivers/net/wireless/libertas/if_spi.c
+++ b/drivers/net/wireless/libertas/if_spi.c
@@ -43,8 +43,8 @@
 	struct lbs_private		*priv;
 	struct libertas_spi_platform_data *pdata;
 
-	char				helper_fw_name[FIRMWARE_NAME_MAX];
-	char				main_fw_name[FIRMWARE_NAME_MAX];
+	char				helper_fw_name[IF_SPI_FW_NAME_MAX];
+	char				main_fw_name[IF_SPI_FW_NAME_MAX];
 
 	/* The card ID and card revision, as reported by the hardware. */
 	u16				card_id;
@@ -1019,9 +1019,9 @@
 		lbs_pr_err("Unsupported chip_id: 0x%02x\n", card_id);
 		return -EAFNOSUPPORT;
 	}
-	snprintf(helper_fw, FIRMWARE_NAME_MAX, "libertas/gspi%d_hlp.bin",
+	snprintf(helper_fw, IF_SPI_FW_NAME_MAX, "libertas/gspi%d_hlp.bin",
 		 chip_id_to_device_name[i].name);
-	snprintf(main_fw, FIRMWARE_NAME_MAX, "libertas/gspi%d.bin",
+	snprintf(main_fw, IF_SPI_FW_NAME_MAX, "libertas/gspi%d.bin",
 		 chip_id_to_device_name[i].name);
 	return 0;
 }
diff --git a/drivers/net/wireless/libertas/if_spi.h b/drivers/net/wireless/libertas/if_spi.h
index 2103869..f87eec4 100644
--- a/drivers/net/wireless/libertas/if_spi.h
+++ b/drivers/net/wireless/libertas/if_spi.h
@@ -22,6 +22,9 @@
 #define IF_SPI_CMD_BUF_SIZE 2400
 
 /***************** Firmware *****************/
+
+#define IF_SPI_FW_NAME_MAX 30
+
 struct chip_ident {
 	u16 chip_id;
 	u16 name;
diff --git a/drivers/net/wireless/libertas/if_usb.c b/drivers/net/wireless/libertas/if_usb.c
index d649cae..1844c5a 100644
--- a/drivers/net/wireless/libertas/if_usb.c
+++ b/drivers/net/wireless/libertas/if_usb.c
@@ -61,11 +61,9 @@
 {
 	struct lbs_private *priv = to_net_dev(dev)->ml_priv;
 	struct if_usb_card *cardp = priv->card;
-	char fwname[FIRMWARE_NAME_MAX];
 	int ret;
 
-	sscanf(buf, "%29s", fwname); /* FIRMWARE_NAME_MAX - 1 = 29 */
-	ret = if_usb_prog_firmware(cardp, fwname, BOOT_CMD_UPDATE_FW);
+	ret = if_usb_prog_firmware(cardp, buf, BOOT_CMD_UPDATE_FW);
 	if (ret == 0)
 		return count;
 
@@ -88,11 +86,9 @@
 {
 	struct lbs_private *priv = to_net_dev(dev)->ml_priv;
 	struct if_usb_card *cardp = priv->card;
-	char fwname[FIRMWARE_NAME_MAX];
 	int ret;
 
-	sscanf(buf, "%29s", fwname); /* FIRMWARE_NAME_MAX - 1 = 29 */
-	ret = if_usb_prog_firmware(cardp, fwname, BOOT_CMD_UPDATE_BOOT2);
+	ret = if_usb_prog_firmware(cardp, buf, BOOT_CMD_UPDATE_BOOT2);
 	if (ret == 0)
 		return count;
 
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index f673253..8d88dae 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1212,7 +1212,7 @@
 	}
 
 	info = netdev_priv(netdev);
-	dev->dev.driver_data = info;
+	dev_set_drvdata(&dev->dev, info);
 
 	err = register_netdev(info->netdev);
 	if (err) {
@@ -1233,7 +1233,7 @@
 
  fail:
 	free_netdev(netdev);
-	dev->dev.driver_data = NULL;
+	dev_set_drvdata(&dev->dev, NULL);
 	return err;
 }
 
@@ -1275,7 +1275,7 @@
  */
 static int netfront_resume(struct xenbus_device *dev)
 {
-	struct netfront_info *info = dev->dev.driver_data;
+	struct netfront_info *info = dev_get_drvdata(&dev->dev);
 
 	dev_dbg(&dev->dev, "%s\n", dev->nodename);
 
@@ -1600,7 +1600,7 @@
 static void backend_changed(struct xenbus_device *dev,
 			    enum xenbus_state backend_state)
 {
-	struct netfront_info *np = dev->dev.driver_data;
+	struct netfront_info *np = dev_get_drvdata(&dev->dev);
 	struct net_device *netdev = np->netdev;
 
 	dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state));
@@ -1774,7 +1774,7 @@
 
 static int __devexit xennet_remove(struct xenbus_device *dev)
 {
-	struct netfront_info *info = dev->dev.driver_data;
+	struct netfront_info *info = dev_get_drvdata(&dev->dev);
 
 	dev_dbg(&dev->dev, "%s\n", dev->nodename);
 
diff --git a/drivers/parisc/eisa.c b/drivers/parisc/eisa.c
index f415fdd..5b89f40 100644
--- a/drivers/parisc/eisa.c
+++ b/drivers/parisc/eisa.c
@@ -373,7 +373,7 @@
 	if (result >= 0) {
 		/* FIXME : Don't enumerate the bus twice. */
 		eisa_dev.root.dev = &dev->dev;
-		dev->dev.driver_data = &eisa_dev.root;
+		dev_set_drvdata(&dev->dev, &eisa_dev.root);
 		eisa_dev.root.bus_base_addr = 0;
 		eisa_dev.root.res = &eisa_dev.hba.io_space;
 		eisa_dev.root.slots = result;
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
index e5999c4..d46dd57 100644
--- a/drivers/parisc/sba_iommu.c
+++ b/drivers/parisc/sba_iommu.c
@@ -2010,7 +2010,7 @@
 void * sba_get_iommu(struct parisc_device *pci_hba)
 {
 	struct parisc_device *sba_dev = parisc_parent(pci_hba);
-	struct sba_device *sba = sba_dev->dev.driver_data;
+	struct sba_device *sba = dev_get_drvdata(&sba_dev->dev);
 	char t = sba_dev->id.hw_type;
 	int iocnum = (pci_hba->hw_path >> 3);	/* rope # */
 
@@ -2031,7 +2031,7 @@
 void sba_directed_lmmio(struct parisc_device *pci_hba, struct resource *r)
 {
 	struct parisc_device *sba_dev = parisc_parent(pci_hba);
-	struct sba_device *sba = sba_dev->dev.driver_data;
+	struct sba_device *sba = dev_get_drvdata(&sba_dev->dev);
 	char t = sba_dev->id.hw_type;
 	int i;
 	int rope = (pci_hba->hw_path & (ROPES_PER_IOC-1));  /* rope # */
@@ -2073,7 +2073,7 @@
 void sba_distributed_lmmio(struct parisc_device *pci_hba, struct resource *r )
 {
 	struct parisc_device *sba_dev = parisc_parent(pci_hba);
-	struct sba_device *sba = sba_dev->dev.driver_data;
+	struct sba_device *sba = dev_get_drvdata(&sba_dev->dev);
 	char t = sba_dev->id.hw_type;
 	int base, size;
 	int rope = (pci_hba->hw_path & (ROPES_PER_IOC-1));  /* rope # */
diff --git a/drivers/parport/parport_gsc.c b/drivers/parport/parport_gsc.c
index ea31a45..5d6de38 100644
--- a/drivers/parport/parport_gsc.c
+++ b/drivers/parport/parport_gsc.c
@@ -376,14 +376,14 @@
 			/* PARPORT_IRQ_NONE */ PARPORT_DMA_NONE, NULL);
 	if (p)
 		parport_count++;
-	dev->dev.driver_data = p;
+	dev_set_drvdata(&dev->dev, p);
 
 	return 0;
 }
 
 static int __devexit parport_remove_chip(struct parisc_device *dev)
 {
-	struct parport *p = dev->dev.driver_data;
+	struct parport *p = dev_get_drvdata(&dev->dev);
 	if (p) {
 		struct parport_gsc_private *priv = p->private_data;
 		struct parport_operations *ops = p->ops;
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 1a91bf9..07bbb9b 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -24,6 +24,11 @@
 #include <asm/setup.h>
 #include "pci.h"
 
+const char *pci_power_names[] = {
+	"error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
+};
+EXPORT_SYMBOL_GPL(pci_power_names);
+
 unsigned int pci_pm_d3_delay = PCI_PM_D3_WAIT;
 
 #ifdef CONFIG_PCI_DOMAINS
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index e399825..13ffdc3 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -275,7 +275,7 @@
 	memset(device, 0, sizeof(struct device));
 	device->bus = &pcie_port_bus_type;
 	device->driver = NULL;
-	device->driver_data = NULL;
+	dev_set_drvdata(device, NULL);
 	device->release = release_pcie_device;	/* callback to free pcie dev */
 	dev_set_name(device, "%s:pcie%02x",
 		 pci_name(parent), get_descriptor_id(port_type, service_type));
diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c
index 47cab31..304ff6d 100644
--- a/drivers/pcmcia/ds.c
+++ b/drivers/pcmcia/ds.c
@@ -394,7 +394,7 @@
 	p_drv = to_pcmcia_drv(dev->driver);
 	s = p_dev->socket;
 
-	/* The PCMCIA code passes the match data in via dev->driver_data
+	/* The PCMCIA code passes the match data in via dev_set_drvdata(dev)
 	 * which is an ugly hack. Once the driver probe is called it may
 	 * and often will overwrite the match data so we must save it first
 	 *
@@ -404,7 +404,7 @@
 	 * call which will then check whether there are two
 	 * pseudo devices, and if not, add the second one.
 	 */
-	did = p_dev->dev.driver_data;
+	did = dev_get_drvdata(&p_dev->dev);
 
 	ds_dev_dbg(1, dev, "trying to bind to %s\n", p_drv->drv.name);
 
@@ -499,7 +499,7 @@
 	 * pseudo multi-function card, we need to unbind
 	 * all devices
 	 */
-	did = p_dev->dev.driver_data;
+	did = dev_get_drvdata(&p_dev->dev);
 	if (did && (did->match_flags & PCMCIA_DEV_ID_MATCH_DEVICE_NO) &&
 	    (p_dev->socket->device_count != 0) &&
 	    (p_dev->device_no == 0))
@@ -828,7 +828,6 @@
 {
 	struct pcmcia_socket *s = dev->socket;
 	const struct firmware *fw;
-	char path[FIRMWARE_NAME_MAX];
 	int ret = -ENOMEM;
 	int no_funcs;
 	int old_funcs;
@@ -839,16 +838,7 @@
 
 	ds_dev_dbg(1, &dev->dev, "trying to load CIS file %s\n", filename);
 
-	if (strlen(filename) > (FIRMWARE_NAME_MAX - 1)) {
-		dev_printk(KERN_WARNING, &dev->dev,
-			   "pcmcia: CIS filename is too long [%s]\n",
-			   filename);
-		return -EINVAL;
-	}
-
-	snprintf(path, sizeof(path), "%s", filename);
-
-	if (request_firmware(&fw, path, &dev->dev) == 0) {
+	if (request_firmware(&fw, filename, &dev->dev) == 0) {
 		if (fw->size >= CISTPL_MAX_CIS_SIZE) {
 			ret = -EINVAL;
 			dev_printk(KERN_ERR, &dev->dev,
@@ -988,7 +978,7 @@
 			return 0;
 	}
 
-	dev->dev.driver_data = (void *) did;
+	dev_set_drvdata(&dev->dev, did);
 
 	return 1;
 }
diff --git a/drivers/ps3/ps3-sys-manager.c b/drivers/ps3/ps3-sys-manager.c
index f17513d..88cb740 100644
--- a/drivers/ps3/ps3-sys-manager.c
+++ b/drivers/ps3/ps3-sys-manager.c
@@ -706,7 +706,7 @@
 	ps3_vuart_read_async(dev, PS3_SM_RX_MSG_LEN_MIN);
 }
 
-static int ps3_sys_manager_probe(struct ps3_system_bus_device *dev)
+static int __devinit ps3_sys_manager_probe(struct ps3_system_bus_device *dev)
 {
 	int result;
 	struct ps3_sys_manager_ops ops;
diff --git a/drivers/ps3/ps3av.c b/drivers/ps3/ps3av.c
index 235e87f..e82d8c9 100644
--- a/drivers/ps3/ps3av.c
+++ b/drivers/ps3/ps3av.c
@@ -80,12 +80,12 @@
 	{     0, }, /* auto */
 	{YUV444, XRGB, PS3AV_CMD_VIDEO_VID_480I,       A_N,  720,  480},
 	{YUV444, XRGB, PS3AV_CMD_VIDEO_VID_480P,       A_N,  720,  480},
-	{YUV444, XRGB, PS3AV_CMD_VIDEO_VID_720P_60HZ,  A_N, 1280,  720},
+	{YUV444, XRGB, PS3AV_CMD_VIDEO_VID_720P_60HZ,  A_W, 1280,  720},
 	{YUV444, XRGB, PS3AV_CMD_VIDEO_VID_1080I_60HZ, A_W, 1920, 1080},
 	{YUV444, XRGB, PS3AV_CMD_VIDEO_VID_1080P_60HZ, A_W, 1920, 1080},
 	{YUV444, XRGB, PS3AV_CMD_VIDEO_VID_576I,       A_N,  720,  576},
 	{YUV444, XRGB, PS3AV_CMD_VIDEO_VID_576P,       A_N,  720,  576},
-	{YUV444, XRGB, PS3AV_CMD_VIDEO_VID_720P_50HZ,  A_N, 1280,  720},
+	{YUV444, XRGB, PS3AV_CMD_VIDEO_VID_720P_50HZ,  A_W, 1280,  720},
 	{YUV444, XRGB, PS3AV_CMD_VIDEO_VID_1080I_50HZ, A_W, 1920, 1080},
 	{YUV444, XRGB, PS3AV_CMD_VIDEO_VID_1080P_50HZ, A_W, 1920, 1080},
 	{  RGB8, XRGB, PS3AV_CMD_VIDEO_VID_WXGA,       A_W, 1280,  768},
@@ -937,7 +937,7 @@
 
 EXPORT_SYMBOL_GPL(ps3av_audio_mute);
 
-static int ps3av_probe(struct ps3_system_bus_device *dev)
+static int __devinit ps3av_probe(struct ps3_system_bus_device *dev)
 {
 	int res;
 	int id;
@@ -1048,7 +1048,7 @@
 	.shutdown = ps3av_shutdown,
 };
 
-static int ps3av_module_init(void)
+static int __init ps3av_module_init(void)
 {
 	int error;
 
diff --git a/drivers/ps3/ps3av_cmd.c b/drivers/ps3/ps3av_cmd.c
index 716596e..f555fed 100644
--- a/drivers/ps3/ps3av_cmd.c
+++ b/drivers/ps3/ps3av_cmd.c
@@ -21,9 +21,10 @@
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/delay.h>
+
 #include <asm/ps3av.h>
-#include <asm/ps3fb.h>
 #include <asm/ps3.h>
+#include <asm/ps3gpu.h>
 
 #include "vuart.h"
 
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 442bb98..e5b84db 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -5,8 +5,7 @@
  *		    Carsten Otte <Cotte@de.ibm.com>
  *		    Martin Schwidefsky <schwidefsky@de.ibm.com>
  * Bugreports.to..: <Linux390@de.ibm.com>
- * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001
- *
+ * Copyright IBM Corp. 1999, 2009
  */
 
 #define KMSG_COMPONENT "dasd"
@@ -61,6 +60,7 @@
 static void dasd_device_tasklet(struct dasd_device *);
 static void dasd_block_tasklet(struct dasd_block *);
 static void do_kick_device(struct work_struct *);
+static void do_restore_device(struct work_struct *);
 static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *);
 static void dasd_device_timeout(unsigned long);
 static void dasd_block_timeout(unsigned long);
@@ -109,6 +109,7 @@
 	device->timer.function = dasd_device_timeout;
 	device->timer.data = (unsigned long) device;
 	INIT_WORK(&device->kick_work, do_kick_device);
+	INIT_WORK(&device->restore_device, do_restore_device);
 	device->state = DASD_STATE_NEW;
 	device->target = DASD_STATE_NEW;
 
@@ -512,6 +513,25 @@
 }
 
 /*
+ * dasd_restore_device will schedule a call do do_restore_device to the kernel
+ * event daemon.
+ */
+static void do_restore_device(struct work_struct *work)
+{
+	struct dasd_device *device = container_of(work, struct dasd_device,
+						  restore_device);
+	device->cdev->drv->restore(device->cdev);
+	dasd_put_device(device);
+}
+
+void dasd_restore_device(struct dasd_device *device)
+{
+	dasd_get_device(device);
+	/* queue call to dasd_restore_device to the kernel event daemon. */
+	schedule_work(&device->restore_device);
+}
+
+/*
  * Set the target state for a device and starts the state change.
  */
 void dasd_set_target_state(struct dasd_device *device, int target)
@@ -908,6 +928,12 @@
 		DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
 			      "start_IO: -EIO device gone, retry");
 		break;
+	case -EINVAL:
+		/* most likely caused in power management context */
+		DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
+			      "start_IO: -EINVAL device currently "
+			      "not accessible");
+		break;
 	default:
 		/* internal error 11 - unknown rc */
 		snprintf(errorstring, ERRORLENGTH, "11 %d", rc);
@@ -2400,6 +2426,12 @@
 	case CIO_OPER:
 		/* FIXME: add a sanity check. */
 		device->stopped &= ~DASD_STOPPED_DC_WAIT;
+		if (device->stopped & DASD_UNRESUMED_PM) {
+			device->stopped &= ~DASD_UNRESUMED_PM;
+			dasd_restore_device(device);
+			ret = 1;
+			break;
+		}
 		dasd_schedule_device_bh(device);
 		if (device->block)
 			dasd_schedule_block_bh(device->block);
@@ -2410,6 +2442,79 @@
 	return ret;
 }
 
+int dasd_generic_pm_freeze(struct ccw_device *cdev)
+{
+	struct dasd_ccw_req *cqr, *n;
+	int rc;
+	struct list_head freeze_queue;
+	struct dasd_device *device = dasd_device_from_cdev(cdev);
+
+	if (IS_ERR(device))
+		return PTR_ERR(device);
+	/* disallow new I/O  */
+	device->stopped |= DASD_STOPPED_PM;
+	/* clear active requests */
+	INIT_LIST_HEAD(&freeze_queue);
+	spin_lock_irq(get_ccwdev_lock(cdev));
+	rc = 0;
+	list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) {
+		/* Check status and move request to flush_queue */
+		if (cqr->status == DASD_CQR_IN_IO) {
+			rc = device->discipline->term_IO(cqr);
+			if (rc) {
+				/* unable to terminate requeust */
+				dev_err(&device->cdev->dev,
+					"Unable to terminate request %p "
+					"on suspend\n", cqr);
+				spin_unlock_irq(get_ccwdev_lock(cdev));
+				dasd_put_device(device);
+				return rc;
+			}
+		}
+		list_move_tail(&cqr->devlist, &freeze_queue);
+	}
+
+	spin_unlock_irq(get_ccwdev_lock(cdev));
+
+	list_for_each_entry_safe(cqr, n, &freeze_queue, devlist) {
+		wait_event(dasd_flush_wq,
+			   (cqr->status != DASD_CQR_CLEAR_PENDING));
+		if (cqr->status == DASD_CQR_CLEARED)
+			cqr->status = DASD_CQR_QUEUED;
+	}
+	/* move freeze_queue to start of the ccw_queue */
+	spin_lock_irq(get_ccwdev_lock(cdev));
+	list_splice_tail(&freeze_queue, &device->ccw_queue);
+	spin_unlock_irq(get_ccwdev_lock(cdev));
+
+	if (device->discipline->freeze)
+		rc = device->discipline->freeze(device);
+
+	dasd_put_device(device);
+	return rc;
+}
+EXPORT_SYMBOL_GPL(dasd_generic_pm_freeze);
+
+int dasd_generic_restore_device(struct ccw_device *cdev)
+{
+	struct dasd_device *device = dasd_device_from_cdev(cdev);
+	int rc = 0;
+
+	if (IS_ERR(device))
+		return PTR_ERR(device);
+
+	dasd_schedule_device_bh(device);
+	if (device->block)
+		dasd_schedule_block_bh(device->block);
+
+	if (device->discipline->restore)
+		rc = device->discipline->restore(device);
+
+	dasd_put_device(device);
+	return rc;
+}
+EXPORT_SYMBOL_GPL(dasd_generic_restore_device);
+
 static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
 						   void *rdc_buffer,
 						   int rdc_buffer_size,
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index e77666c..4cac5b5 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -1098,6 +1098,7 @@
 	spin_unlock(&dasd_devmap_lock);
 	return 0;
 }
+EXPORT_SYMBOL_GPL(dasd_get_uid);
 
 /*
  * Register the given device unique identifier into devmap struct.
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index cf0cfdb..1c28ec3 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -5,10 +5,9 @@
  *		    Carsten Otte <Cotte@de.ibm.com>
  *		    Martin Schwidefsky <schwidefsky@de.ibm.com>
  * Bugreports.to..: <Linux390@de.ibm.com>
- * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
+ * Copyright IBM Corp. 1999, 2009
  * EMC Symmetrix ioctl Copyright EMC Corporation, 2008
  * Author.........: Nigel Hislop <hislop_nigel@emc.com>
- *
  */
 
 #define KMSG_COMPONENT "dasd"
@@ -104,17 +103,6 @@
 	return dasd_generic_set_online(cdev, &dasd_eckd_discipline);
 }
 
-static struct ccw_driver dasd_eckd_driver = {
-	.name        = "dasd-eckd",
-	.owner       = THIS_MODULE,
-	.ids         = dasd_eckd_ids,
-	.probe       = dasd_eckd_probe,
-	.remove      = dasd_generic_remove,
-	.set_offline = dasd_generic_set_offline,
-	.set_online  = dasd_eckd_set_online,
-	.notify      = dasd_generic_notify,
-};
-
 static const int sizes_trk0[] = { 28, 148, 84 };
 #define LABEL_SIZE 140
 
@@ -3236,6 +3224,98 @@
 		dasd_eckd_dump_sense_ccw(device, req, irb);
 }
 
+int dasd_eckd_pm_freeze(struct dasd_device *device)
+{
+	/*
+	 * the device should be disconnected from our LCU structure
+	 * on restore we will reconnect it and reread LCU specific
+	 * information like PAV support that might have changed
+	 */
+	dasd_alias_remove_device(device);
+	dasd_alias_disconnect_device_from_lcu(device);
+
+	return 0;
+}
+
+int dasd_eckd_restore_device(struct dasd_device *device)
+{
+	struct dasd_eckd_private *private;
+	int is_known, rc;
+	struct dasd_uid temp_uid;
+
+	/* allow new IO again */
+	device->stopped &= ~DASD_STOPPED_PM;
+
+	private = (struct dasd_eckd_private *) device->private;
+
+	/* Read Configuration Data */
+	rc = dasd_eckd_read_conf(device);
+	if (rc)
+		goto out_err;
+
+	/* Generate device unique id and register in devmap */
+	rc = dasd_eckd_generate_uid(device, &private->uid);
+	dasd_get_uid(device->cdev, &temp_uid);
+	if (memcmp(&private->uid, &temp_uid, sizeof(struct dasd_uid)) != 0)
+		dev_err(&device->cdev->dev, "The UID of the DASD has changed\n");
+	if (rc)
+		goto out_err;
+	dasd_set_uid(device->cdev, &private->uid);
+
+	/* register lcu with alias handling, enable PAV if this is a new lcu */
+	is_known = dasd_alias_make_device_known_to_lcu(device);
+	if (is_known < 0)
+		return is_known;
+	if (!is_known) {
+		/* new lcu found */
+		rc = dasd_eckd_validate_server(device); /* will switch pav on */
+		if (rc)
+			goto out_err;
+	}
+
+	/* Read Feature Codes */
+	rc = dasd_eckd_read_features(device);
+	if (rc)
+		goto out_err;
+
+	/* Read Device Characteristics */
+	memset(&private->rdc_data, 0, sizeof(private->rdc_data));
+	rc = dasd_generic_read_dev_chars(device, "ECKD",
+					 &private->rdc_data, 64);
+	if (rc) {
+		DBF_EVENT(DBF_WARNING,
+			  "Read device characteristics failed, rc=%d for "
+			  "device: %s", rc, dev_name(&device->cdev->dev));
+		goto out_err;
+	}
+
+	/* add device to alias management */
+	dasd_alias_add_device(device);
+
+	return 0;
+
+out_err:
+	/*
+	 * if the resume failed for the DASD we put it in
+	 * an UNRESUMED stop state
+	 */
+	device->stopped |= DASD_UNRESUMED_PM;
+	return 0;
+}
+
+static struct ccw_driver dasd_eckd_driver = {
+	.name	     = "dasd-eckd",
+	.owner	     = THIS_MODULE,
+	.ids	     = dasd_eckd_ids,
+	.probe	     = dasd_eckd_probe,
+	.remove      = dasd_generic_remove,
+	.set_offline = dasd_generic_set_offline,
+	.set_online  = dasd_eckd_set_online,
+	.notify      = dasd_generic_notify,
+	.freeze      = dasd_generic_pm_freeze,
+	.thaw	     = dasd_generic_restore_device,
+	.restore     = dasd_generic_restore_device,
+};
 
 /*
  * max_blocks is dependent on the amount of storage that is available
@@ -3274,6 +3354,8 @@
 	.dump_sense_dbf = dasd_eckd_dump_sense_dbf,
 	.fill_info = dasd_eckd_fill_info,
 	.ioctl = dasd_eckd_ioctl,
+	.freeze = dasd_eckd_pm_freeze,
+	.restore = dasd_eckd_restore_device,
 };
 
 static int __init
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index 597c6ff..e21ee73 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -2,8 +2,7 @@
  * File...........: linux/drivers/s390/block/dasd_fba.c
  * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
  * Bugreports.to..: <Linux390@de.ibm.com>
- * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
- *
+ * Copyright IBM Corp. 1999, 2009
  */
 
 #define KMSG_COMPONENT "dasd"
@@ -75,6 +74,9 @@
 	.set_offline = dasd_generic_set_offline,
 	.set_online  = dasd_fba_set_online,
 	.notify      = dasd_generic_notify,
+	.freeze      = dasd_generic_pm_freeze,
+	.thaw	     = dasd_generic_restore_device,
+	.restore     = dasd_generic_restore_device,
 };
 
 static void
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index f97ceb7..fd63b2f 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -4,8 +4,7 @@
  *		    Horst Hummel <Horst.Hummel@de.ibm.com>
  *		    Martin Schwidefsky <schwidefsky@de.ibm.com>
  * Bugreports.to..: <Linux390@de.ibm.com>
- * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
- *
+ * Copyright IBM Corp. 1999, 2009
  */
 
 #ifndef DASD_INT_H
@@ -295,6 +294,10 @@
 	int (*fill_geometry) (struct dasd_block *, struct hd_geometry *);
 	int (*fill_info) (struct dasd_device *, struct dasd_information2_t *);
 	int (*ioctl) (struct dasd_block *, unsigned int, void __user *);
+
+	/* suspend/resume functions */
+	int (*freeze) (struct dasd_device *);
+	int (*restore) (struct dasd_device *);
 };
 
 extern struct dasd_discipline *dasd_diag_discipline_pointer;
@@ -367,6 +370,7 @@
 	atomic_t tasklet_scheduled;
         struct tasklet_struct tasklet;
 	struct work_struct kick_work;
+	struct work_struct restore_device;
 	struct timer_list timer;
 
 	debug_info_t *debug_area;
@@ -410,6 +414,8 @@
 #define DASD_STOPPED_PENDING 4         /* long busy */
 #define DASD_STOPPED_DC_WAIT 8         /* disconnected, wait */
 #define DASD_STOPPED_SU      16        /* summary unit check handling */
+#define DASD_STOPPED_PM      32        /* pm state transition */
+#define DASD_UNRESUMED_PM    64        /* pm resume failed state */
 
 /* per device flags */
 #define DASD_FLAG_OFFLINE	3	/* device is in offline processing */
@@ -556,6 +562,7 @@
 void dasd_enable_device(struct dasd_device *);
 void dasd_set_target_state(struct dasd_device *, int);
 void dasd_kick_device(struct dasd_device *);
+void dasd_restore_device(struct dasd_device *);
 
 void dasd_add_request_head(struct dasd_ccw_req *);
 void dasd_add_request_tail(struct dasd_ccw_req *);
@@ -578,6 +585,8 @@
 int dasd_generic_set_offline (struct ccw_device *cdev);
 int dasd_generic_notify(struct ccw_device *, int);
 void dasd_generic_handle_state_change(struct dasd_device *);
+int dasd_generic_pm_freeze(struct ccw_device *);
+int dasd_generic_restore_device(struct ccw_device *);
 
 int dasd_generic_read_dev_chars(struct dasd_device *, char *, void *, int);
 char *dasd_get_sense(struct irb *);
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index b21caf1..016f9e9 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -14,10 +14,11 @@
 #include <linux/init.h>
 #include <linux/slab.h>
 #include <linux/blkdev.h>
-#include <asm/extmem.h>
-#include <asm/io.h>
 #include <linux/completion.h>
 #include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <asm/extmem.h>
+#include <asm/io.h>
 
 #define DCSSBLK_NAME "dcssblk"
 #define DCSSBLK_MINORS_PER_DISK 1
@@ -940,11 +941,94 @@
 }
 
 /*
+ * Suspend / Resume
+ */
+static int dcssblk_freeze(struct device *dev)
+{
+	struct dcssblk_dev_info *dev_info;
+	int rc = 0;
+
+	list_for_each_entry(dev_info, &dcssblk_devices, lh) {
+		switch (dev_info->segment_type) {
+			case SEG_TYPE_SR:
+			case SEG_TYPE_ER:
+			case SEG_TYPE_SC:
+				if (!dev_info->is_shared)
+					rc = -EINVAL;
+				break;
+			default:
+				rc = -EINVAL;
+				break;
+		}
+		if (rc)
+			break;
+	}
+	if (rc)
+		pr_err("Suspend failed because device %s is writeable.\n",
+		       dev_info->segment_name);
+	return rc;
+}
+
+static int dcssblk_restore(struct device *dev)
+{
+	struct dcssblk_dev_info *dev_info;
+	struct segment_info *entry;
+	unsigned long start, end;
+	int rc = 0;
+
+	list_for_each_entry(dev_info, &dcssblk_devices, lh) {
+		list_for_each_entry(entry, &dev_info->seg_list, lh) {
+			segment_unload(entry->segment_name);
+			rc = segment_load(entry->segment_name, SEGMENT_SHARED,
+					  &start, &end);
+			if (rc < 0) {
+// TODO in_use check ?
+				segment_warning(rc, entry->segment_name);
+				goto out_panic;
+			}
+			if (start != entry->start || end != entry->end) {
+				pr_err("Mismatch of start / end address after "
+				       "resuming device %s\n",
+				       entry->segment_name);
+				goto out_panic;
+			}
+		}
+	}
+	return 0;
+out_panic:
+	panic("fatal dcssblk resume error\n");
+}
+
+static int dcssblk_thaw(struct device *dev)
+{
+	return 0;
+}
+
+static struct dev_pm_ops dcssblk_pm_ops = {
+	.freeze		= dcssblk_freeze,
+	.thaw		= dcssblk_thaw,
+	.restore	= dcssblk_restore,
+};
+
+static struct platform_driver dcssblk_pdrv = {
+	.driver = {
+		.name	= "dcssblk",
+		.owner	= THIS_MODULE,
+		.pm	= &dcssblk_pm_ops,
+	},
+};
+
+static struct platform_device *dcssblk_pdev;
+
+
+/*
  * The init/exit functions.
  */
 static void __exit
 dcssblk_exit(void)
 {
+	platform_device_unregister(dcssblk_pdev);
+	platform_driver_unregister(&dcssblk_pdrv);
 	root_device_unregister(dcssblk_root_dev);
 	unregister_blkdev(dcssblk_major, DCSSBLK_NAME);
 }
@@ -954,30 +1038,44 @@
 {
 	int rc;
 
+	rc = platform_driver_register(&dcssblk_pdrv);
+	if (rc)
+		return rc;
+
+	dcssblk_pdev = platform_device_register_simple("dcssblk", -1, NULL,
+							0);
+	if (IS_ERR(dcssblk_pdev)) {
+		rc = PTR_ERR(dcssblk_pdev);
+		goto out_pdrv;
+	}
+
 	dcssblk_root_dev = root_device_register("dcssblk");
-	if (IS_ERR(dcssblk_root_dev))
-		return PTR_ERR(dcssblk_root_dev);
+	if (IS_ERR(dcssblk_root_dev)) {
+		rc = PTR_ERR(dcssblk_root_dev);
+		goto out_pdev;
+	}
 	rc = device_create_file(dcssblk_root_dev, &dev_attr_add);
-	if (rc) {
-		root_device_unregister(dcssblk_root_dev);
-		return rc;
-	}
+	if (rc)
+		goto out_root;
 	rc = device_create_file(dcssblk_root_dev, &dev_attr_remove);
-	if (rc) {
-		root_device_unregister(dcssblk_root_dev);
-		return rc;
-	}
+	if (rc)
+		goto out_root;
 	rc = register_blkdev(0, DCSSBLK_NAME);
-	if (rc < 0) {
-		root_device_unregister(dcssblk_root_dev);
-		return rc;
-	}
+	if (rc < 0)
+		goto out_root;
 	dcssblk_major = rc;
 	init_rwsem(&dcssblk_devices_sem);
 
 	dcssblk_check_params();
-
 	return 0;
+
+out_root:
+	root_device_unregister(dcssblk_root_dev);
+out_pdev:
+	platform_device_unregister(dcssblk_pdev);
+out_pdrv:
+	platform_driver_unregister(&dcssblk_pdrv);
+	return rc;
 }
 
 module_init(dcssblk_init);
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index 0ae0c83..2e9e1ec 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -39,7 +39,10 @@
 #include <linux/hdreg.h>  /* HDIO_GETGEO */
 #include <linux/sysdev.h>
 #include <linux/bio.h>
+#include <linux/suspend.h>
+#include <linux/platform_device.h>
 #include <asm/uaccess.h>
+#include <asm/checksum.h>
 
 #define XPRAM_NAME	"xpram"
 #define XPRAM_DEVS	1	/* one partition */
@@ -48,6 +51,7 @@
 typedef struct {
 	unsigned int	size;		/* size of xpram segment in pages */
 	unsigned int	offset;		/* start page of xpram segment */
+	unsigned int	csum;		/* partition checksum for suspend */
 } xpram_device_t;
 
 static xpram_device_t xpram_devices[XPRAM_MAX_DEVS];
@@ -138,7 +142,7 @@
 /*
  * Check if xpram is available.
  */
-static int __init xpram_present(void)
+static int xpram_present(void)
 {
 	unsigned long mem_page;
 	int rc;
@@ -154,7 +158,7 @@
 /*
  * Return index of the last available xpram page.
  */
-static unsigned long __init xpram_highest_page_index(void)
+static unsigned long xpram_highest_page_index(void)
 {
 	unsigned int page_index, add_bit;
 	unsigned long mem_page;
@@ -383,6 +387,106 @@
 }
 
 /*
+ * Save checksums for all partitions.
+ */
+static int xpram_save_checksums(void)
+{
+	unsigned long mem_page;
+	int rc, i;
+
+	rc = 0;
+	mem_page = (unsigned long) __get_free_page(GFP_KERNEL);
+	if (!mem_page)
+		return -ENOMEM;
+	for (i = 0; i < xpram_devs; i++) {
+		rc = xpram_page_in(mem_page, xpram_devices[i].offset);
+		if (rc)
+			goto fail;
+		xpram_devices[i].csum = csum_partial((const void *) mem_page,
+						     PAGE_SIZE, 0);
+	}
+fail:
+	free_page(mem_page);
+	return rc ? -ENXIO : 0;
+}
+
+/*
+ * Verify checksums for all partitions.
+ */
+static int xpram_validate_checksums(void)
+{
+	unsigned long mem_page;
+	unsigned int csum;
+	int rc, i;
+
+	rc = 0;
+	mem_page = (unsigned long) __get_free_page(GFP_KERNEL);
+	if (!mem_page)
+		return -ENOMEM;
+	for (i = 0; i < xpram_devs; i++) {
+		rc = xpram_page_in(mem_page, xpram_devices[i].offset);
+		if (rc)
+			goto fail;
+		csum = csum_partial((const void *) mem_page, PAGE_SIZE, 0);
+		if (xpram_devices[i].csum != csum) {
+			rc = -EINVAL;
+			goto fail;
+		}
+	}
+fail:
+	free_page(mem_page);
+	return rc ? -ENXIO : 0;
+}
+
+/*
+ * Resume failed: Print error message and call panic.
+ */
+static void xpram_resume_error(const char *message)
+{
+	pr_err("Resume error: %s\n", message);
+	panic("xpram resume error\n");
+}
+
+/*
+ * Check if xpram setup changed between suspend and resume.
+ */
+static int xpram_restore(struct device *dev)
+{
+	if (!xpram_pages)
+		return 0;
+	if (xpram_present() != 0)
+		xpram_resume_error("xpram disappeared");
+	if (xpram_pages != xpram_highest_page_index() + 1)
+		xpram_resume_error("Size of xpram changed");
+	if (xpram_validate_checksums())
+		xpram_resume_error("Data of xpram changed");
+	return 0;
+}
+
+/*
+ * Save necessary state in suspend.
+ */
+static int xpram_freeze(struct device *dev)
+{
+	return xpram_save_checksums();
+}
+
+static struct dev_pm_ops xpram_pm_ops = {
+	.freeze		= xpram_freeze,
+	.restore	= xpram_restore,
+};
+
+static struct platform_driver xpram_pdrv = {
+	.driver = {
+		.name	= XPRAM_NAME,
+		.owner	= THIS_MODULE,
+		.pm	= &xpram_pm_ops,
+	},
+};
+
+static struct platform_device *xpram_pdev;
+
+/*
  * Finally, the init/exit functions.
  */
 static void __exit xpram_exit(void)
@@ -394,6 +498,8 @@
 		put_disk(xpram_disks[i]);
 	}
 	unregister_blkdev(XPRAM_MAJOR, XPRAM_NAME);
+	platform_device_unregister(xpram_pdev);
+	platform_driver_unregister(&xpram_pdrv);
 }
 
 static int __init xpram_init(void)
@@ -411,7 +517,24 @@
 	rc = xpram_setup_sizes(xpram_pages);
 	if (rc)
 		return rc;
-	return xpram_setup_blkdev();
+	rc = platform_driver_register(&xpram_pdrv);
+	if (rc)
+		return rc;
+	xpram_pdev = platform_device_register_simple(XPRAM_NAME, -1, NULL, 0);
+	if (IS_ERR(xpram_pdev)) {
+		rc = PTR_ERR(xpram_pdev);
+		goto fail_platform_driver_unregister;
+	}
+	rc = xpram_setup_blkdev();
+	if (rc)
+		goto fail_platform_device_unregister;
+	return 0;
+
+fail_platform_device_unregister:
+	platform_device_unregister(xpram_pdev);
+fail_platform_driver_unregister:
+	platform_driver_unregister(&xpram_pdrv);
+	return rc;
 }
 
 module_init(xpram_init);
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index 9ab06e0..04dc734 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -1,14 +1,12 @@
 /*
- *  drivers/s390/char/con3215.c
- *    3215 line mode terminal driver.
+ * 3215 line mode terminal driver.
  *
- *  S390 version
- *    Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
- *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
+ * Copyright IBM Corp. 1999, 2009
+ * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
  *
- *  Updated:
- *   Aug-2000: Added tab support
- *	       Dan Morrison, IBM Corporation (dmorriso@cse.buffalo.edu)
+ * Updated:
+ *  Aug-2000: Added tab support
+ *	      Dan Morrison, IBM Corporation <dmorriso@cse.buffalo.edu>
  */
 
 #include <linux/module.h>
@@ -56,6 +54,7 @@
 #define RAW3215_CLOSING	    32	      /* set while in close process */
 #define RAW3215_TIMER_RUNS  64	      /* set if the output delay timer is on */
 #define RAW3215_FLUSHING    128	      /* set to flush buffer (no delay) */
+#define RAW3215_FROZEN	    256	      /* set if 3215 is frozen for suspend */
 
 #define TAB_STOP_SIZE	    8	      /* tab stop size */
 
@@ -111,8 +110,8 @@
 /*
  * Get a request structure from the free list
  */
-static inline struct raw3215_req *
-raw3215_alloc_req(void) {
+static inline struct raw3215_req *raw3215_alloc_req(void)
+{
 	struct raw3215_req *req;
 	unsigned long flags;
 
@@ -126,8 +125,8 @@
 /*
  * Put a request structure back to the free list
  */
-static inline void
-raw3215_free_req(struct raw3215_req *req) {
+static inline void raw3215_free_req(struct raw3215_req *req)
+{
 	unsigned long flags;
 
 	if (req->type == RAW3215_FREE)
@@ -145,8 +144,7 @@
  * because a 3215 terminal won't accept a new read before the old one is
  * completed.
  */
-static void
-raw3215_mk_read_req(struct raw3215_info *raw)
+static void raw3215_mk_read_req(struct raw3215_info *raw)
 {
 	struct raw3215_req *req;
 	struct ccw1 *ccw;
@@ -174,8 +172,7 @@
  * buffer to the 3215 device. If a queued write exists it is replaced by
  * the new, probably lengthened request.
  */
-static void
-raw3215_mk_write_req(struct raw3215_info *raw)
+static void raw3215_mk_write_req(struct raw3215_info *raw)
 {
 	struct raw3215_req *req;
 	struct ccw1 *ccw;
@@ -251,8 +248,7 @@
 /*
  * Start a read or a write request
  */
-static void
-raw3215_start_io(struct raw3215_info *raw)
+static void raw3215_start_io(struct raw3215_info *raw)
 {
 	struct raw3215_req *req;
 	int res;
@@ -290,8 +286,7 @@
 /*
  * Function to start a delayed output after RAW3215_TIMEOUT seconds
  */
-static void
-raw3215_timeout(unsigned long __data)
+static void raw3215_timeout(unsigned long __data)
 {
 	struct raw3215_info *raw = (struct raw3215_info *) __data;
 	unsigned long flags;
@@ -300,8 +295,10 @@
 	if (raw->flags & RAW3215_TIMER_RUNS) {
 		del_timer(&raw->timer);
 		raw->flags &= ~RAW3215_TIMER_RUNS;
-		raw3215_mk_write_req(raw);
-		raw3215_start_io(raw);
+		if (!(raw->flags & RAW3215_FROZEN)) {
+			raw3215_mk_write_req(raw);
+			raw3215_start_io(raw);
+		}
 	}
 	spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
 }
@@ -312,10 +309,9 @@
  * amount of data is bigger than RAW3215_MIN_WRITE. If a write is not
  * done immediately a timer is started with a delay of RAW3215_TIMEOUT.
  */
-static inline void
-raw3215_try_io(struct raw3215_info *raw)
+static inline void raw3215_try_io(struct raw3215_info *raw)
 {
-	if (!(raw->flags & RAW3215_ACTIVE))
+	if (!(raw->flags & RAW3215_ACTIVE) || (raw->flags & RAW3215_FROZEN))
 		return;
 	if (raw->queued_read != NULL)
 		raw3215_start_io(raw);
@@ -359,8 +355,8 @@
 /*
  * Interrupt routine, called from common io layer
  */
-static void
-raw3215_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
+static void raw3215_irq(struct ccw_device *cdev, unsigned long intparm,
+			struct irb *irb)
 {
 	struct raw3215_info *raw;
 	struct raw3215_req *req;
@@ -368,7 +364,7 @@
 	int cstat, dstat;
 	int count;
 
-	raw = cdev->dev.driver_data;
+	raw = dev_get_drvdata(&cdev->dev);
 	req = (struct raw3215_req *) intparm;
 	cstat = irb->scsw.cmd.cstat;
 	dstat = irb->scsw.cmd.dstat;
@@ -459,14 +455,40 @@
 }
 
 /*
+ * Drop the oldest line from the output buffer.
+ */
+static void raw3215_drop_line(struct raw3215_info *raw)
+{
+	int ix;
+	char ch;
+
+	BUG_ON(raw->written != 0);
+	ix = (raw->head - raw->count) & (RAW3215_BUFFER_SIZE - 1);
+	while (raw->count > 0) {
+		ch = raw->buffer[ix];
+		ix = (ix + 1) & (RAW3215_BUFFER_SIZE - 1);
+		raw->count--;
+		if (ch == 0x15)
+			break;
+	}
+	raw->head = ix;
+}
+
+/*
  * Wait until length bytes are available int the output buffer.
  * Has to be called with the s390irq lock held. Can be called
  * disabled.
  */
-static void
-raw3215_make_room(struct raw3215_info *raw, unsigned int length)
+static void raw3215_make_room(struct raw3215_info *raw, unsigned int length)
 {
 	while (RAW3215_BUFFER_SIZE - raw->count < length) {
+		/* While console is frozen for suspend we have no other
+		 * choice but to drop message from the buffer to make
+		 * room for even more messages. */
+		if (raw->flags & RAW3215_FROZEN) {
+			raw3215_drop_line(raw);
+			continue;
+		}
 		/* there might be a request pending */
 		raw->flags |= RAW3215_FLUSHING;
 		raw3215_mk_write_req(raw);
@@ -488,8 +510,8 @@
 /*
  * String write routine for 3215 devices
  */
-static void
-raw3215_write(struct raw3215_info *raw, const char *str, unsigned int length)
+static void raw3215_write(struct raw3215_info *raw, const char *str,
+			  unsigned int length)
 {
 	unsigned long flags;
 	int c, count;
@@ -529,8 +551,7 @@
 /*
  * Put character routine for 3215 devices
  */
-static void
-raw3215_putchar(struct raw3215_info *raw, unsigned char ch)
+static void raw3215_putchar(struct raw3215_info *raw, unsigned char ch)
 {
 	unsigned long flags;
 	unsigned int length, i;
@@ -566,8 +587,7 @@
  * Flush routine, it simply sets the flush flag and tries to start
  * pending IO.
  */
-static void
-raw3215_flush_buffer(struct raw3215_info *raw)
+static void raw3215_flush_buffer(struct raw3215_info *raw)
 {
 	unsigned long flags;
 
@@ -583,8 +603,7 @@
 /*
  * Fire up a 3215 device.
  */
-static int
-raw3215_startup(struct raw3215_info *raw)
+static int raw3215_startup(struct raw3215_info *raw)
 {
 	unsigned long flags;
 
@@ -602,8 +621,7 @@
 /*
  * Shutdown a 3215 device.
  */
-static void
-raw3215_shutdown(struct raw3215_info *raw)
+static void raw3215_shutdown(struct raw3215_info *raw)
 {
 	DECLARE_WAITQUEUE(wait, current);
 	unsigned long flags;
@@ -628,14 +646,13 @@
 	spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
 }
 
-static int
-raw3215_probe (struct ccw_device *cdev)
+static int raw3215_probe (struct ccw_device *cdev)
 {
 	struct raw3215_info *raw;
 	int line;
 
 	/* Console is special. */
-	if (raw3215[0] && (cdev->dev.driver_data == raw3215[0]))
+	if (raw3215[0] && (raw3215[0] == dev_get_drvdata(&cdev->dev)))
 		return 0;
 	raw = kmalloc(sizeof(struct raw3215_info) +
 		      RAW3215_INBUF_SIZE, GFP_KERNEL|GFP_DMA);
@@ -669,44 +686,41 @@
 	}
 	init_waitqueue_head(&raw->empty_wait);
 
-	cdev->dev.driver_data = raw;
+	dev_set_drvdata(&cdev->dev, raw);
 	cdev->handler = raw3215_irq;
 
 	return 0;
 }
 
-static void
-raw3215_remove (struct ccw_device *cdev)
+static void raw3215_remove (struct ccw_device *cdev)
 {
 	struct raw3215_info *raw;
 
 	ccw_device_set_offline(cdev);
-	raw = cdev->dev.driver_data;
+	raw = dev_get_drvdata(&cdev->dev);
 	if (raw) {
-		cdev->dev.driver_data = NULL;
+		dev_set_drvdata(&cdev->dev, NULL);
 		kfree(raw->buffer);
 		kfree(raw);
 	}
 }
 
-static int
-raw3215_set_online (struct ccw_device *cdev)
+static int raw3215_set_online (struct ccw_device *cdev)
 {
 	struct raw3215_info *raw;
 
-	raw = cdev->dev.driver_data;
+	raw = dev_get_drvdata(&cdev->dev);
 	if (!raw)
 		return -ENODEV;
 
 	return raw3215_startup(raw);
 }
 
-static int
-raw3215_set_offline (struct ccw_device *cdev)
+static int raw3215_set_offline (struct ccw_device *cdev)
 {
 	struct raw3215_info *raw;
 
-	raw = cdev->dev.driver_data;
+	raw = dev_get_drvdata(&cdev->dev);
 	if (!raw)
 		return -ENODEV;
 
@@ -715,6 +729,36 @@
 	return 0;
 }
 
+static int raw3215_pm_stop(struct ccw_device *cdev)
+{
+	struct raw3215_info *raw;
+	unsigned long flags;
+
+	/* Empty the output buffer, then prevent new I/O. */
+	raw = cdev->dev.driver_data;
+	spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
+	raw3215_make_room(raw, RAW3215_BUFFER_SIZE);
+	raw->flags |= RAW3215_FROZEN;
+	spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
+	return 0;
+}
+
+static int raw3215_pm_start(struct ccw_device *cdev)
+{
+	struct raw3215_info *raw;
+	unsigned long flags;
+
+	/* Allow I/O again and flush output buffer. */
+	raw = cdev->dev.driver_data;
+	spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
+	raw->flags &= ~RAW3215_FROZEN;
+	raw->flags |= RAW3215_FLUSHING;
+	raw3215_try_io(raw);
+	raw->flags &= ~RAW3215_FLUSHING;
+	spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
+	return 0;
+}
+
 static struct ccw_device_id raw3215_id[] = {
 	{ CCW_DEVICE(0x3215, 0) },
 	{ /* end of list */ },
@@ -728,14 +772,17 @@
 	.remove		= &raw3215_remove,
 	.set_online	= &raw3215_set_online,
 	.set_offline	= &raw3215_set_offline,
+	.freeze		= &raw3215_pm_stop,
+	.thaw		= &raw3215_pm_start,
+	.restore	= &raw3215_pm_start,
 };
 
 #ifdef CONFIG_TN3215_CONSOLE
 /*
  * Write a string to the 3215 console
  */
-static void
-con3215_write(struct console *co, const char *str, unsigned int count)
+static void con3215_write(struct console *co, const char *str,
+			  unsigned int count)
 {
 	struct raw3215_info *raw;
 	int i;
@@ -768,13 +815,17 @@
  * panic() calls con3215_flush through a panic_notifier
  * before the system enters a disabled, endless loop.
  */
-static void
-con3215_flush(void)
+static void con3215_flush(void)
 {
 	struct raw3215_info *raw;
 	unsigned long flags;
 
 	raw = raw3215[0];  /* console 3215 is the first one */
+	if (raw->flags & RAW3215_FROZEN)
+		/* The console is still frozen for suspend. */
+		if (ccw_device_force_console())
+			/* Forcing didn't work, no panic message .. */
+			return;
 	spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
 	raw3215_make_room(raw, RAW3215_BUFFER_SIZE);
 	spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
@@ -811,8 +862,7 @@
  * 3215 console initialization code called from console_init().
  * NOTE: This is called before kmalloc is available.
  */
-static int __init
-con3215_init(void)
+static int __init con3215_init(void)
 {
 	struct ccw_device *cdev;
 	struct raw3215_info *raw;
@@ -848,7 +898,7 @@
 	raw->buffer = (char *) alloc_bootmem_low(RAW3215_BUFFER_SIZE);
 	raw->inbuf = (char *) alloc_bootmem_low(RAW3215_INBUF_SIZE);
 	raw->cdev = cdev;
-	cdev->dev.driver_data = raw;
+	dev_set_drvdata(&cdev->dev, raw);
 	cdev->handler = raw3215_irq;
 
 	raw->flags |= RAW3215_FIXED;
@@ -875,8 +925,7 @@
  *
  * This routine is called whenever a 3215 tty is opened.
  */
-static int
-tty3215_open(struct tty_struct *tty, struct file * filp)
+static int tty3215_open(struct tty_struct *tty, struct file * filp)
 {
 	struct raw3215_info *raw;
 	int retval, line;
@@ -909,8 +958,7 @@
  * This routine is called when the 3215 tty is closed. We wait
  * for the remaining request to be completed. Then we clean up.
  */
-static void
-tty3215_close(struct tty_struct *tty, struct file * filp)
+static void tty3215_close(struct tty_struct *tty, struct file * filp)
 {
 	struct raw3215_info *raw;
 
@@ -927,8 +975,7 @@
 /*
  * Returns the amount of free space in the output buffer.
  */
-static int
-tty3215_write_room(struct tty_struct *tty)
+static int tty3215_write_room(struct tty_struct *tty)
 {
 	struct raw3215_info *raw;
 
@@ -944,9 +991,8 @@
 /*
  * String write routine for 3215 ttys
  */
-static int
-tty3215_write(struct tty_struct * tty,
-	      const unsigned char *buf, int count)
+static int tty3215_write(struct tty_struct * tty,
+			 const unsigned char *buf, int count)
 {
 	struct raw3215_info *raw;
 
@@ -960,8 +1006,7 @@
 /*
  * Put character routine for 3215 ttys
  */
-static int
-tty3215_put_char(struct tty_struct *tty, unsigned char ch)
+static int tty3215_put_char(struct tty_struct *tty, unsigned char ch)
 {
 	struct raw3215_info *raw;
 
@@ -972,16 +1017,14 @@
 	return 1;
 }
 
-static void
-tty3215_flush_chars(struct tty_struct *tty)
+static void tty3215_flush_chars(struct tty_struct *tty)
 {
 }
 
 /*
  * Returns the number of characters in the output buffer
  */
-static int
-tty3215_chars_in_buffer(struct tty_struct *tty)
+static int tty3215_chars_in_buffer(struct tty_struct *tty)
 {
 	struct raw3215_info *raw;
 
@@ -989,8 +1032,7 @@
 	return raw->count;
 }
 
-static void
-tty3215_flush_buffer(struct tty_struct *tty)
+static void tty3215_flush_buffer(struct tty_struct *tty)
 {
 	struct raw3215_info *raw;
 
@@ -1002,9 +1044,8 @@
 /*
  * Currently we don't have any io controls for 3215 ttys
  */
-static int
-tty3215_ioctl(struct tty_struct *tty, struct file * file,
-	      unsigned int cmd, unsigned long arg)
+static int tty3215_ioctl(struct tty_struct *tty, struct file * file,
+			 unsigned int cmd, unsigned long arg)
 {
 	if (tty->flags & (1 << TTY_IO_ERROR))
 		return -EIO;
@@ -1019,8 +1060,7 @@
 /*
  * Disable reading from a 3215 tty
  */
-static void
-tty3215_throttle(struct tty_struct * tty)
+static void tty3215_throttle(struct tty_struct * tty)
 {
 	struct raw3215_info *raw;
 
@@ -1031,8 +1071,7 @@
 /*
  * Enable reading from a 3215 tty
  */
-static void
-tty3215_unthrottle(struct tty_struct * tty)
+static void tty3215_unthrottle(struct tty_struct * tty)
 {
 	struct raw3215_info *raw;
 	unsigned long flags;
@@ -1049,8 +1088,7 @@
 /*
  * Disable writing to a 3215 tty
  */
-static void
-tty3215_stop(struct tty_struct *tty)
+static void tty3215_stop(struct tty_struct *tty)
 {
 	struct raw3215_info *raw;
 
@@ -1061,8 +1099,7 @@
 /*
  * Enable writing to a 3215 tty
  */
-static void
-tty3215_start(struct tty_struct *tty)
+static void tty3215_start(struct tty_struct *tty)
 {
 	struct raw3215_info *raw;
 	unsigned long flags;
@@ -1096,8 +1133,7 @@
  * 3215 tty registration code called from tty_init().
  * Most kernel services (incl. kmalloc) are available at this poimt.
  */
-static int __init
-tty3215_init(void)
+static int __init tty3215_init(void)
 {
 	struct tty_driver *driver;
 	int ret;
@@ -1142,8 +1178,7 @@
 	return 0;
 }
 
-static void __exit
-tty3215_exit(void)
+static void __exit tty3215_exit(void)
 {
 	tty_unregister_driver(tty3215_driver);
 	put_tty_driver(tty3215_driver);
diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c
index ed5396d..44d02e3 100644
--- a/drivers/s390/char/con3270.c
+++ b/drivers/s390/char/con3270.c
@@ -1,11 +1,10 @@
 /*
- *  drivers/s390/char/con3270.c
- *    IBM/3270 Driver - console view.
+ * IBM/3270 Driver - console view.
  *
- *  Author(s):
- *    Original 3270 Code for 2.4 written by Richard Hitt (UTS Global)
- *    Rewritten for 2.5 by Martin Schwidefsky <schwidefsky@de.ibm.com>
- *	-- Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s):
+ *   Original 3270 Code for 2.4 written by Richard Hitt (UTS Global)
+ *   Rewritten for 2.5 by Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *     Copyright IBM Corp. 2003, 2009
  */
 
 #include <linux/bootmem.h>
@@ -530,6 +529,7 @@
 	cp = condev;
 	if (!cp->view.dev)
 		return;
+	raw3270_pm_unfreeze(&cp->view);
 	spin_lock_irqsave(&cp->view.lock, flags);
 	con3270_wait_write(cp);
 	cp->nr_up = 0;
diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c
index 40759c3..097d384 100644
--- a/drivers/s390/char/fs3270.c
+++ b/drivers/s390/char/fs3270.c
@@ -1,11 +1,10 @@
 /*
- *  drivers/s390/char/fs3270.c
- *    IBM/3270 Driver - fullscreen driver.
+ * IBM/3270 Driver - fullscreen driver.
  *
- *  Author(s):
- *    Original 3270 Code for 2.4 written by Richard Hitt (UTS Global)
- *    Rewritten for 2.5/2.6 by Martin Schwidefsky <schwidefsky@de.ibm.com>
- *	-- Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s):
+ *   Original 3270 Code for 2.4 written by Richard Hitt (UTS Global)
+ *   Rewritten for 2.5/2.6 by Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *     Copyright IBM Corp. 2003, 2009
  */
 
 #include <linux/bootmem.h>
@@ -399,6 +398,11 @@
 static void
 fs3270_release(struct raw3270_view *view)
 {
+	struct fs3270 *fp;
+
+	fp = (struct fs3270 *) view;
+	if (fp->fs_pid)
+		kill_pid(fp->fs_pid, SIGHUP, 1);
 }
 
 /* View to a 3270 device. Can be console, tty or fullscreen. */
diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c
index 97e63cf..75a8831 100644
--- a/drivers/s390/char/monreader.c
+++ b/drivers/s390/char/monreader.c
@@ -1,10 +1,9 @@
 /*
- * drivers/s390/char/monreader.c
- *
  * Character device driver for reading z/VM *MONITOR service records.
  *
- *   Copyright IBM Corp. 2004, 2008
- *   Author: Gerald Schaefer <gerald.schaefer@de.ibm.com>
+ * Copyright IBM Corp. 2004, 2009
+ *
+ * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com>
  */
 
 #define KMSG_COMPONENT "monreader"
@@ -22,6 +21,7 @@
 #include <linux/spinlock.h>
 #include <linux/interrupt.h>
 #include <linux/poll.h>
+#include <linux/device.h>
 #include <net/iucv/iucv.h>
 #include <asm/uaccess.h>
 #include <asm/ebcdic.h>
@@ -78,6 +78,7 @@
 	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
 };
 
+static struct device *monreader_device;
 
 /******************************************************************************
  *                             helper functions                               *
@@ -319,11 +320,12 @@
 		goto out_path;
 	}
 	filp->private_data = monpriv;
+	monreader_device->driver_data = monpriv;
 	unlock_kernel();
 	return nonseekable_open(inode, filp);
 
 out_path:
-	kfree(monpriv->path);
+	iucv_path_free(monpriv->path);
 out_priv:
 	mon_free_mem(monpriv);
 out_use:
@@ -341,10 +343,13 @@
 	/*
 	 * Close IUCV connection and unregister
 	 */
-	rc = iucv_path_sever(monpriv->path, user_data_sever);
-	if (rc)
-		pr_warning("Disconnecting the z/VM *MONITOR system service "
-			   "failed with rc=%i\n", rc);
+	if (monpriv->path) {
+		rc = iucv_path_sever(monpriv->path, user_data_sever);
+		if (rc)
+			pr_warning("Disconnecting the z/VM *MONITOR system "
+				   "service failed with rc=%i\n", rc);
+		iucv_path_free(monpriv->path);
+	}
 
 	atomic_set(&monpriv->iucv_severed, 0);
 	atomic_set(&monpriv->iucv_connected, 0);
@@ -452,6 +457,94 @@
 	.minor      = MISC_DYNAMIC_MINOR,
 };
 
+
+/******************************************************************************
+ *				suspend / resume			      *
+ *****************************************************************************/
+static int monreader_freeze(struct device *dev)
+{
+	struct mon_private *monpriv = dev->driver_data;
+	int rc;
+
+	if (!monpriv)
+		return 0;
+	if (monpriv->path) {
+		rc = iucv_path_sever(monpriv->path, user_data_sever);
+		if (rc)
+			pr_warning("Disconnecting the z/VM *MONITOR system "
+				   "service failed with rc=%i\n", rc);
+		iucv_path_free(monpriv->path);
+	}
+	atomic_set(&monpriv->iucv_severed, 0);
+	atomic_set(&monpriv->iucv_connected, 0);
+	atomic_set(&monpriv->read_ready, 0);
+	atomic_set(&monpriv->msglim_count, 0);
+	monpriv->write_index  = 0;
+	monpriv->read_index   = 0;
+	monpriv->path = NULL;
+	return 0;
+}
+
+static int monreader_thaw(struct device *dev)
+{
+	struct mon_private *monpriv = dev->driver_data;
+	int rc;
+
+	if (!monpriv)
+		return 0;
+	rc = -ENOMEM;
+	monpriv->path = iucv_path_alloc(MON_MSGLIM, IUCV_IPRMDATA, GFP_KERNEL);
+	if (!monpriv->path)
+		goto out;
+	rc = iucv_path_connect(monpriv->path, &monreader_iucv_handler,
+			       MON_SERVICE, NULL, user_data_connect, monpriv);
+	if (rc) {
+		pr_err("Connecting to the z/VM *MONITOR system service "
+		       "failed with rc=%i\n", rc);
+		goto out_path;
+	}
+	wait_event(mon_conn_wait_queue,
+		   atomic_read(&monpriv->iucv_connected) ||
+		   atomic_read(&monpriv->iucv_severed));
+	if (atomic_read(&monpriv->iucv_severed))
+		goto out_path;
+	return 0;
+out_path:
+	rc = -EIO;
+	iucv_path_free(monpriv->path);
+	monpriv->path = NULL;
+out:
+	atomic_set(&monpriv->iucv_severed, 1);
+	return rc;
+}
+
+static int monreader_restore(struct device *dev)
+{
+	int rc;
+
+	segment_unload(mon_dcss_name);
+	rc = segment_load(mon_dcss_name, SEGMENT_SHARED,
+			  &mon_dcss_start, &mon_dcss_end);
+	if (rc < 0) {
+		segment_warning(rc, mon_dcss_name);
+		panic("fatal monreader resume error: no monitor dcss\n");
+	}
+	return monreader_thaw(dev);
+}
+
+static struct dev_pm_ops monreader_pm_ops = {
+	.freeze  = monreader_freeze,
+	.thaw	 = monreader_thaw,
+	.restore = monreader_restore,
+};
+
+static struct device_driver monreader_driver = {
+	.name = "monreader",
+	.bus  = &iucv_bus,
+	.pm   = &monreader_pm_ops,
+};
+
+
 /******************************************************************************
  *                              module init/exit                              *
  *****************************************************************************/
@@ -475,16 +568,33 @@
 		return rc;
 	}
 
+	rc = driver_register(&monreader_driver);
+	if (rc)
+		goto out_iucv;
+	monreader_device = kzalloc(sizeof(struct device), GFP_KERNEL);
+	if (!monreader_device)
+		goto out_driver;
+	dev_set_name(monreader_device, "monreader-dev");
+	monreader_device->bus = &iucv_bus;
+	monreader_device->parent = iucv_root;
+	monreader_device->driver = &monreader_driver;
+	monreader_device->release = (void (*)(struct device *))kfree;
+	rc = device_register(monreader_device);
+	if (rc) {
+		kfree(monreader_device);
+		goto out_driver;
+	}
+
 	rc = segment_type(mon_dcss_name);
 	if (rc < 0) {
 		segment_warning(rc, mon_dcss_name);
-		goto out_iucv;
+		goto out_device;
 	}
 	if (rc != SEG_TYPE_SC) {
 		pr_err("The specified *MONITOR DCSS %s does not have the "
 		       "required type SC\n", mon_dcss_name);
 		rc = -EINVAL;
-		goto out_iucv;
+		goto out_device;
 	}
 
 	rc = segment_load(mon_dcss_name, SEGMENT_SHARED,
@@ -492,7 +602,7 @@
 	if (rc < 0) {
 		segment_warning(rc, mon_dcss_name);
 		rc = -EINVAL;
-		goto out_iucv;
+		goto out_device;
 	}
 	dcss_mkname(mon_dcss_name, &user_data_connect[8]);
 
@@ -503,6 +613,10 @@
 
 out:
 	segment_unload(mon_dcss_name);
+out_device:
+	device_unregister(monreader_device);
+out_driver:
+	driver_unregister(&monreader_driver);
 out_iucv:
 	iucv_unregister(&monreader_iucv_handler, 1);
 	return rc;
@@ -512,6 +626,8 @@
 {
 	segment_unload(mon_dcss_name);
 	WARN_ON(misc_deregister(&mon_dev) != 0);
+	device_unregister(monreader_device);
+	driver_unregister(&monreader_driver);
 	iucv_unregister(&monreader_iucv_handler, 1);
 	return;
 }
diff --git a/drivers/s390/char/monwriter.c b/drivers/s390/char/monwriter.c
index c7d7483..66fb8eb 100644
--- a/drivers/s390/char/monwriter.c
+++ b/drivers/s390/char/monwriter.c
@@ -1,9 +1,7 @@
 /*
- * drivers/s390/char/monwriter.c
- *
  * Character device driver for writing z/VM *MONITOR service records.
  *
- * Copyright (C) IBM Corp. 2006
+ * Copyright IBM Corp. 2006, 2009
  *
  * Author(s): Melissa Howland <Melissa.Howland@us.ibm.com>
  */
@@ -22,6 +20,7 @@
 #include <linux/ctype.h>
 #include <linux/poll.h>
 #include <linux/mutex.h>
+#include <linux/platform_device.h>
 #include <asm/uaccess.h>
 #include <asm/ebcdic.h>
 #include <asm/io.h>
@@ -40,7 +39,10 @@
 	char *data;
 };
 
+static LIST_HEAD(mon_priv_list);
+
 struct mon_private {
+	struct list_head priv_list;
 	struct list_head list;
 	struct monwrite_hdr hdr;
 	size_t hdr_to_read;
@@ -188,6 +190,7 @@
 	monpriv->hdr_to_read = sizeof(monpriv->hdr);
 	mutex_init(&monpriv->thread_mutex);
 	filp->private_data = monpriv;
+	list_add_tail(&monpriv->priv_list, &mon_priv_list);
 	unlock_kernel();
 	return nonseekable_open(inode, filp);
 }
@@ -206,6 +209,7 @@
 		kfree(entry->data);
 		kfree(entry);
 	}
+	list_del(&monpriv->priv_list);
 	kfree(monpriv);
 	return 0;
 }
@@ -281,20 +285,102 @@
 };
 
 /*
+ * suspend/resume
+ */
+
+static int monwriter_freeze(struct device *dev)
+{
+	struct mon_private *monpriv;
+	struct mon_buf *monbuf;
+
+	list_for_each_entry(monpriv, &mon_priv_list, priv_list) {
+		list_for_each_entry(monbuf, &monpriv->list, list) {
+			if (monbuf->hdr.mon_function != MONWRITE_GEN_EVENT)
+				monwrite_diag(&monbuf->hdr, monbuf->data,
+					      APPLDATA_STOP_REC);
+		}
+	}
+	return 0;
+}
+
+static int monwriter_restore(struct device *dev)
+{
+	struct mon_private *monpriv;
+	struct mon_buf *monbuf;
+
+	list_for_each_entry(monpriv, &mon_priv_list, priv_list) {
+		list_for_each_entry(monbuf, &monpriv->list, list) {
+			if (monbuf->hdr.mon_function == MONWRITE_START_INTERVAL)
+				monwrite_diag(&monbuf->hdr, monbuf->data,
+					      APPLDATA_START_INTERVAL_REC);
+			if (monbuf->hdr.mon_function == MONWRITE_START_CONFIG)
+				monwrite_diag(&monbuf->hdr, monbuf->data,
+					      APPLDATA_START_CONFIG_REC);
+		}
+	}
+	return 0;
+}
+
+static int monwriter_thaw(struct device *dev)
+{
+	return monwriter_restore(dev);
+}
+
+static struct dev_pm_ops monwriter_pm_ops = {
+	.freeze		= monwriter_freeze,
+	.thaw		= monwriter_thaw,
+	.restore	= monwriter_restore,
+};
+
+static struct platform_driver monwriter_pdrv = {
+	.driver = {
+		.name	= "monwriter",
+		.owner	= THIS_MODULE,
+		.pm	= &monwriter_pm_ops,
+	},
+};
+
+static struct platform_device *monwriter_pdev;
+
+/*
  * module init/exit
  */
 
 static int __init mon_init(void)
 {
-	if (MACHINE_IS_VM)
-		return misc_register(&mon_dev);
-	else
+	int rc;
+
+	if (!MACHINE_IS_VM)
 		return -ENODEV;
+
+	rc = platform_driver_register(&monwriter_pdrv);
+	if (rc)
+		return rc;
+
+	monwriter_pdev = platform_device_register_simple("monwriter", -1, NULL,
+							0);
+	if (IS_ERR(monwriter_pdev)) {
+		rc = PTR_ERR(monwriter_pdev);
+		goto out_driver;
+	}
+
+	rc = misc_register(&mon_dev);
+	if (rc)
+		goto out_device;
+	return 0;
+
+out_device:
+	platform_device_unregister(monwriter_pdev);
+out_driver:
+	platform_driver_unregister(&monwriter_pdrv);
+	return rc;
 }
 
 static void __exit mon_exit(void)
 {
 	WARN_ON(misc_deregister(&mon_dev) != 0);
+	platform_device_unregister(monwriter_pdev);
+	platform_driver_unregister(&monwriter_pdrv);
 }
 
 module_init(mon_init);
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index 0b15cf1..acab7b2 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -1,11 +1,10 @@
 /*
- *  drivers/s390/char/raw3270.c
- *    IBM/3270 Driver - core functions.
+ * IBM/3270 Driver - core functions.
  *
- *  Author(s):
- *    Original 3270 Code for 2.4 written by Richard Hitt (UTS Global)
- *    Rewritten for 2.5 by Martin Schwidefsky <schwidefsky@de.ibm.com>
- *	-- Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s):
+ *   Original 3270 Code for 2.4 written by Richard Hitt (UTS Global)
+ *   Rewritten for 2.5 by Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *     Copyright IBM Corp. 2003, 2009
  */
 
 #include <linux/bootmem.h>
@@ -61,6 +60,7 @@
 #define RAW3270_FLAGS_ATTN	2	/* Device sent an ATTN interrupt */
 #define RAW3270_FLAGS_READY	4	/* Device is useable by views */
 #define RAW3270_FLAGS_CONSOLE	8	/* Device is the console. */
+#define RAW3270_FLAGS_FROZEN	16	/* set if 3270 is frozen for suspend */
 
 /* Semaphore to protect global data of raw3270 (devices, views, etc). */
 static DEFINE_MUTEX(raw3270_mutex);
@@ -306,7 +306,8 @@
 
 	spin_lock_irqsave(get_ccwdev_lock(view->dev->cdev), flags);
 	rp = view->dev;
-	if (!rp || rp->view != view)
+	if (!rp || rp->view != view ||
+	    test_bit(RAW3270_FLAGS_FROZEN, &rp->flags))
 		rc = -EACCES;
 	else if (!test_bit(RAW3270_FLAGS_READY, &rp->flags))
 		rc = -ENODEV;
@@ -323,7 +324,8 @@
 	int rc;
 
 	rp = view->dev;
-	if (!rp || rp->view != view)
+	if (!rp || rp->view != view ||
+	    test_bit(RAW3270_FLAGS_FROZEN, &rp->flags))
 		rc = -EACCES;
 	else if (!test_bit(RAW3270_FLAGS_READY, &rp->flags))
 		rc = -ENODEV;
@@ -355,7 +357,7 @@
 	struct raw3270_request *rq;
 	int rc;
 
-	rp = (struct raw3270 *) cdev->dev.driver_data;
+	rp = dev_get_drvdata(&cdev->dev);
 	if (!rp)
 		return;
 	rq = (struct raw3270_request *) intparm;
@@ -764,7 +766,8 @@
 	int rc;
 
 	rp = view->dev;
-	if (!rp || rp->view != view)
+	if (!rp || rp->view != view ||
+	    test_bit(RAW3270_FLAGS_FROZEN, &rp->flags))
 		rc = -EACCES;
 	else if (!test_bit(RAW3270_FLAGS_READY, &rp->flags))
 		rc = -ENODEV;
@@ -828,7 +831,7 @@
 	if (rp->minor == -1)
 		return -EUSERS;
 	rp->cdev = cdev;
-	cdev->dev.driver_data = rp;
+	dev_set_drvdata(&cdev->dev, rp);
 	cdev->handler = raw3270_irq;
 	return 0;
 }
@@ -922,6 +925,8 @@
 		rc = 0;
 	else if (!test_bit(RAW3270_FLAGS_READY, &rp->flags))
 		rc = -ENODEV;
+	else if (test_bit(RAW3270_FLAGS_FROZEN, &rp->flags))
+		rc = -EACCES;
 	else {
 		oldview = NULL;
 		if (rp->view) {
@@ -969,7 +974,8 @@
 		list_del_init(&view->list);
 		list_add_tail(&view->list, &rp->view_list);
 		/* Try to activate another view. */
-		if (test_bit(RAW3270_FLAGS_READY, &rp->flags)) {
+		if (test_bit(RAW3270_FLAGS_READY, &rp->flags) &&
+		    !test_bit(RAW3270_FLAGS_FROZEN, &rp->flags)) {
 			list_for_each_entry(view, &rp->view_list, list) {
 				rp->view = view;
 				if (view->fn->activate(view) == 0)
@@ -1068,7 +1074,8 @@
 		rp->view = NULL;
 	}
 	list_del_init(&view->list);
-	if (!rp->view && test_bit(RAW3270_FLAGS_READY, &rp->flags)) {
+	if (!rp->view && test_bit(RAW3270_FLAGS_READY, &rp->flags) &&
+	    !test_bit(RAW3270_FLAGS_FROZEN, &rp->flags)) {
 		/* Try to activate another view. */
 		list_for_each_entry(nv, &rp->view_list, list) {
 			if (nv->fn->activate(nv) == 0) {
@@ -1105,7 +1112,7 @@
 	/* Disconnect from ccw_device. */
 	cdev = rp->cdev;
 	rp->cdev = NULL;
-	cdev->dev.driver_data = NULL;
+	dev_set_drvdata(&cdev->dev, NULL);
 	cdev->handler = NULL;
 
 	/* Put ccw_device structure. */
@@ -1129,7 +1136,7 @@
 raw3270_model_show(struct device *dev, struct device_attribute *attr, char *buf)
 {
 	return snprintf(buf, PAGE_SIZE, "%i\n",
-			((struct raw3270 *) dev->driver_data)->model);
+			((struct raw3270 *) dev_get_drvdata(dev))->model);
 }
 static DEVICE_ATTR(model, 0444, raw3270_model_show, NULL);
 
@@ -1137,7 +1144,7 @@
 raw3270_rows_show(struct device *dev, struct device_attribute *attr, char *buf)
 {
 	return snprintf(buf, PAGE_SIZE, "%i\n",
-			((struct raw3270 *) dev->driver_data)->rows);
+			((struct raw3270 *) dev_get_drvdata(dev))->rows);
 }
 static DEVICE_ATTR(rows, 0444, raw3270_rows_show, NULL);
 
@@ -1145,7 +1152,7 @@
 raw3270_columns_show(struct device *dev, struct device_attribute *attr, char *buf)
 {
 	return snprintf(buf, PAGE_SIZE, "%i\n",
-			((struct raw3270 *) dev->driver_data)->cols);
+			((struct raw3270 *) dev_get_drvdata(dev))->cols);
 }
 static DEVICE_ATTR(columns, 0444, raw3270_columns_show, NULL);
 
@@ -1282,7 +1289,7 @@
 	struct raw3270_view *v;
 	struct raw3270_notifier *np;
 
-	rp = cdev->dev.driver_data;
+	rp = dev_get_drvdata(&cdev->dev);
 	/*
 	 * _remove is the opposite of _probe; it's probe that
 	 * should set up rp.  raw3270_remove gets entered for
@@ -1330,13 +1337,65 @@
 {
 	struct raw3270 *rp;
 
-	rp = cdev->dev.driver_data;
+	rp = dev_get_drvdata(&cdev->dev);
 	if (test_bit(RAW3270_FLAGS_CONSOLE, &rp->flags))
 		return -EBUSY;
 	raw3270_remove(cdev);
 	return 0;
 }
 
+static int raw3270_pm_stop(struct ccw_device *cdev)
+{
+	struct raw3270 *rp;
+	struct raw3270_view *view;
+	unsigned long flags;
+
+	rp = cdev->dev.driver_data;
+	if (!rp)
+		return 0;
+	spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
+	if (rp->view)
+		rp->view->fn->deactivate(rp->view);
+	if (!test_bit(RAW3270_FLAGS_CONSOLE, &rp->flags)) {
+		/*
+		 * Release tty and fullscreen for all non-console
+		 * devices.
+		 */
+		list_for_each_entry(view, &rp->view_list, list) {
+			if (view->fn->release)
+				view->fn->release(view);
+		}
+	}
+	set_bit(RAW3270_FLAGS_FROZEN, &rp->flags);
+	spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
+	return 0;
+}
+
+static int raw3270_pm_start(struct ccw_device *cdev)
+{
+	struct raw3270 *rp;
+	unsigned long flags;
+
+	rp = cdev->dev.driver_data;
+	if (!rp)
+		return 0;
+	spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
+	clear_bit(RAW3270_FLAGS_FROZEN, &rp->flags);
+	if (rp->view)
+		rp->view->fn->activate(rp->view);
+	spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
+	return 0;
+}
+
+void raw3270_pm_unfreeze(struct raw3270_view *view)
+{
+	struct raw3270 *rp;
+
+	rp = view->dev;
+	if (rp && test_bit(RAW3270_FLAGS_FROZEN, &rp->flags))
+		ccw_device_force_console();
+}
+
 static struct ccw_device_id raw3270_id[] = {
 	{ CCW_DEVICE(0x3270, 0) },
 	{ CCW_DEVICE(0x3271, 0) },
@@ -1360,6 +1419,9 @@
 	.remove		= &raw3270_remove,
 	.set_online	= &raw3270_set_online,
 	.set_offline	= &raw3270_set_offline,
+	.freeze		= &raw3270_pm_stop,
+	.thaw		= &raw3270_pm_start,
+	.restore	= &raw3270_pm_start,
 };
 
 static int
diff --git a/drivers/s390/char/raw3270.h b/drivers/s390/char/raw3270.h
index 90beaa8..ed34eb2 100644
--- a/drivers/s390/char/raw3270.h
+++ b/drivers/s390/char/raw3270.h
@@ -1,11 +1,10 @@
 /*
- *  drivers/s390/char/raw3270.h
- *    IBM/3270 Driver
+ * IBM/3270 Driver
  *
- *  Author(s):
- *    Original 3270 Code for 2.4 written by Richard Hitt (UTS Global)
- *    Rewritten for 2.5 by Martin Schwidefsky <schwidefsky@de.ibm.com>
- *	-- Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s):
+ *   Original 3270 Code for 2.4 written by Richard Hitt (UTS Global)
+ *   Rewritten for 2.5 by Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *     Copyright IBM Corp. 2003, 2009
  */
 
 #include <asm/idals.h>
@@ -195,6 +194,7 @@
 /* Notifier for device addition/removal */
 int raw3270_register_notifier(void (*notifier)(int, int));
 void raw3270_unregister_notifier(void (*notifier)(int, int));
+void raw3270_pm_unfreeze(struct raw3270_view *);
 
 /*
  * Little memory allocator for string objects. 
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index 4377e93..a983f50 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -1,11 +1,10 @@
 /*
- *  drivers/s390/char/sclp.c
- *     core function to access sclp interface
+ * core function to access sclp interface
  *
- *  S390 version
- *    Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
- *    Author(s): Martin Peschke <mpeschke@de.ibm.com>
- *		 Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Copyright IBM Corp. 1999, 2009
+ *
+ * Author(s): Martin Peschke <mpeschke@de.ibm.com>
+ *	      Martin Schwidefsky <schwidefsky@de.ibm.com>
  */
 
 #include <linux/module.h>
@@ -16,6 +15,9 @@
 #include <linux/reboot.h>
 #include <linux/jiffies.h>
 #include <linux/init.h>
+#include <linux/suspend.h>
+#include <linux/completion.h>
+#include <linux/platform_device.h>
 #include <asm/types.h>
 #include <asm/s390_ext.h>
 
@@ -47,6 +49,16 @@
 static char sclp_read_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
 static char sclp_init_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
 
+/* Suspend request */
+static DECLARE_COMPLETION(sclp_request_queue_flushed);
+
+static void sclp_suspend_req_cb(struct sclp_req *req, void *data)
+{
+	complete(&sclp_request_queue_flushed);
+}
+
+static struct sclp_req sclp_suspend_req;
+
 /* Timer for request retries. */
 static struct timer_list sclp_request_timer;
 
@@ -84,6 +96,12 @@
 	sclp_mask_state_initializing
 } sclp_mask_state = sclp_mask_state_idle;
 
+/* Internal state: is the driver suspended? */
+static enum sclp_suspend_state_t {
+	sclp_suspend_state_running,
+	sclp_suspend_state_suspended,
+} sclp_suspend_state = sclp_suspend_state_running;
+
 /* Maximum retry counts */
 #define SCLP_INIT_RETRY		3
 #define SCLP_MASK_RETRY		3
@@ -211,6 +229,8 @@
 	del_timer(&sclp_request_timer);
 	while (!list_empty(&sclp_req_queue)) {
 		req = list_entry(sclp_req_queue.next, struct sclp_req, list);
+		if (!req->sccb)
+			goto do_post;
 		rc = __sclp_start_request(req);
 		if (rc == 0)
 			break;
@@ -222,6 +242,7 @@
 						 sclp_request_timeout, 0);
 			break;
 		}
+do_post:
 		/* Post-processing for aborted request */
 		list_del(&req->list);
 		if (req->callback) {
@@ -233,6 +254,19 @@
 	spin_unlock_irqrestore(&sclp_lock, flags);
 }
 
+static int __sclp_can_add_request(struct sclp_req *req)
+{
+	if (req == &sclp_suspend_req || req == &sclp_init_req)
+		return 1;
+	if (sclp_suspend_state != sclp_suspend_state_running)
+		return 0;
+	if (sclp_init_state != sclp_init_state_initialized)
+		return 0;
+	if (sclp_activation_state != sclp_activation_state_active)
+		return 0;
+	return 1;
+}
+
 /* Queue a new request. Return zero on success, non-zero otherwise. */
 int
 sclp_add_request(struct sclp_req *req)
@@ -241,9 +275,7 @@
 	int rc;
 
 	spin_lock_irqsave(&sclp_lock, flags);
-	if ((sclp_init_state != sclp_init_state_initialized ||
-	     sclp_activation_state != sclp_activation_state_active) &&
-	    req != &sclp_init_req) {
+	if (!__sclp_can_add_request(req)) {
 		spin_unlock_irqrestore(&sclp_lock, flags);
 		return -EIO;
 	}
@@ -254,10 +286,16 @@
 	/* Start if request is first in list */
 	if (sclp_running_state == sclp_running_state_idle &&
 	    req->list.prev == &sclp_req_queue) {
+		if (!req->sccb) {
+			list_del(&req->list);
+			rc = -ENODATA;
+			goto out;
+		}
 		rc = __sclp_start_request(req);
 		if (rc)
 			list_del(&req->list);
 	}
+out:
 	spin_unlock_irqrestore(&sclp_lock, flags);
 	return rc;
 }
@@ -560,6 +598,7 @@
 	/* Trigger initial state change callback */
 	reg->sclp_receive_mask = 0;
 	reg->sclp_send_mask = 0;
+	reg->pm_event_posted = 0;
 	list_add(&reg->list, &sclp_reg_list);
 	spin_unlock_irqrestore(&sclp_lock, flags);
 	rc = sclp_init_mask(1);
@@ -880,20 +919,134 @@
 	.notifier_call = sclp_reboot_event
 };
 
+/*
+ * Suspend/resume SCLP notifier implementation
+ */
+
+static void sclp_pm_event(enum sclp_pm_event sclp_pm_event, int rollback)
+{
+	struct sclp_register *reg;
+	unsigned long flags;
+
+	if (!rollback) {
+		spin_lock_irqsave(&sclp_lock, flags);
+		list_for_each_entry(reg, &sclp_reg_list, list)
+			reg->pm_event_posted = 0;
+		spin_unlock_irqrestore(&sclp_lock, flags);
+	}
+	do {
+		spin_lock_irqsave(&sclp_lock, flags);
+		list_for_each_entry(reg, &sclp_reg_list, list) {
+			if (rollback && reg->pm_event_posted)
+				goto found;
+			if (!rollback && !reg->pm_event_posted)
+				goto found;
+		}
+		spin_unlock_irqrestore(&sclp_lock, flags);
+		return;
+found:
+		spin_unlock_irqrestore(&sclp_lock, flags);
+		if (reg->pm_event_fn)
+			reg->pm_event_fn(reg, sclp_pm_event);
+		reg->pm_event_posted = rollback ? 0 : 1;
+	} while (1);
+}
+
+/*
+ * Susend/resume callbacks for platform device
+ */
+
+static int sclp_freeze(struct device *dev)
+{
+	unsigned long flags;
+	int rc;
+
+	sclp_pm_event(SCLP_PM_EVENT_FREEZE, 0);
+
+	spin_lock_irqsave(&sclp_lock, flags);
+	sclp_suspend_state = sclp_suspend_state_suspended;
+	spin_unlock_irqrestore(&sclp_lock, flags);
+
+	/* Init supend data */
+	memset(&sclp_suspend_req, 0, sizeof(sclp_suspend_req));
+	sclp_suspend_req.callback = sclp_suspend_req_cb;
+	sclp_suspend_req.status = SCLP_REQ_FILLED;
+	init_completion(&sclp_request_queue_flushed);
+
+	rc = sclp_add_request(&sclp_suspend_req);
+	if (rc == 0)
+		wait_for_completion(&sclp_request_queue_flushed);
+	else if (rc != -ENODATA)
+		goto fail_thaw;
+
+	rc = sclp_deactivate();
+	if (rc)
+		goto fail_thaw;
+	return 0;
+
+fail_thaw:
+	spin_lock_irqsave(&sclp_lock, flags);
+	sclp_suspend_state = sclp_suspend_state_running;
+	spin_unlock_irqrestore(&sclp_lock, flags);
+	sclp_pm_event(SCLP_PM_EVENT_THAW, 1);
+	return rc;
+}
+
+static int sclp_undo_suspend(enum sclp_pm_event event)
+{
+	unsigned long flags;
+	int rc;
+
+	rc = sclp_reactivate();
+	if (rc)
+		return rc;
+
+	spin_lock_irqsave(&sclp_lock, flags);
+	sclp_suspend_state = sclp_suspend_state_running;
+	spin_unlock_irqrestore(&sclp_lock, flags);
+
+	sclp_pm_event(event, 0);
+	return 0;
+}
+
+static int sclp_thaw(struct device *dev)
+{
+	return sclp_undo_suspend(SCLP_PM_EVENT_THAW);
+}
+
+static int sclp_restore(struct device *dev)
+{
+	return sclp_undo_suspend(SCLP_PM_EVENT_RESTORE);
+}
+
+static struct dev_pm_ops sclp_pm_ops = {
+	.freeze		= sclp_freeze,
+	.thaw		= sclp_thaw,
+	.restore	= sclp_restore,
+};
+
+static struct platform_driver sclp_pdrv = {
+	.driver = {
+		.name	= "sclp",
+		.owner	= THIS_MODULE,
+		.pm	= &sclp_pm_ops,
+	},
+};
+
+static struct platform_device *sclp_pdev;
+
 /* Initialize SCLP driver. Return zero if driver is operational, non-zero
  * otherwise. */
 static int
 sclp_init(void)
 {
 	unsigned long flags;
-	int rc;
+	int rc = 0;
 
 	spin_lock_irqsave(&sclp_lock, flags);
 	/* Check for previous or running initialization */
-	if (sclp_init_state != sclp_init_state_uninitialized) {
-		spin_unlock_irqrestore(&sclp_lock, flags);
-		return 0;
-	}
+	if (sclp_init_state != sclp_init_state_uninitialized)
+		goto fail_unlock;
 	sclp_init_state = sclp_init_state_initializing;
 	/* Set up variables */
 	INIT_LIST_HEAD(&sclp_req_queue);
@@ -904,27 +1057,17 @@
 	spin_unlock_irqrestore(&sclp_lock, flags);
 	rc = sclp_check_interface();
 	spin_lock_irqsave(&sclp_lock, flags);
-	if (rc) {
-		sclp_init_state = sclp_init_state_uninitialized;
-		spin_unlock_irqrestore(&sclp_lock, flags);
-		return rc;
-	}
+	if (rc)
+		goto fail_init_state_uninitialized;
 	/* Register reboot handler */
 	rc = register_reboot_notifier(&sclp_reboot_notifier);
-	if (rc) {
-		sclp_init_state = sclp_init_state_uninitialized;
-		spin_unlock_irqrestore(&sclp_lock, flags);
-		return rc;
-	}
+	if (rc)
+		goto fail_init_state_uninitialized;
 	/* Register interrupt handler */
 	rc = register_early_external_interrupt(0x2401, sclp_interrupt_handler,
 					       &ext_int_info_hwc);
-	if (rc) {
-		unregister_reboot_notifier(&sclp_reboot_notifier);
-		sclp_init_state = sclp_init_state_uninitialized;
-		spin_unlock_irqrestore(&sclp_lock, flags);
-		return rc;
-	}
+	if (rc)
+		goto fail_unregister_reboot_notifier;
 	sclp_init_state = sclp_init_state_initialized;
 	spin_unlock_irqrestore(&sclp_lock, flags);
 	/* Enable service-signal external interruption - needs to happen with
@@ -932,11 +1075,56 @@
 	ctl_set_bit(0, 9);
 	sclp_init_mask(1);
 	return 0;
+
+fail_unregister_reboot_notifier:
+	unregister_reboot_notifier(&sclp_reboot_notifier);
+fail_init_state_uninitialized:
+	sclp_init_state = sclp_init_state_uninitialized;
+fail_unlock:
+	spin_unlock_irqrestore(&sclp_lock, flags);
+	return rc;
 }
 
+/*
+ * SCLP panic notifier: If we are suspended, we thaw SCLP in order to be able
+ * to print the panic message.
+ */
+static int sclp_panic_notify(struct notifier_block *self,
+			     unsigned long event, void *data)
+{
+	if (sclp_suspend_state == sclp_suspend_state_suspended)
+		sclp_undo_suspend(SCLP_PM_EVENT_THAW);
+	return NOTIFY_OK;
+}
+
+static struct notifier_block sclp_on_panic_nb = {
+	.notifier_call = sclp_panic_notify,
+	.priority = SCLP_PANIC_PRIO,
+};
+
 static __init int sclp_initcall(void)
 {
+	int rc;
+
+	rc = platform_driver_register(&sclp_pdrv);
+	if (rc)
+		return rc;
+	sclp_pdev = platform_device_register_simple("sclp", -1, NULL, 0);
+	rc = IS_ERR(sclp_pdev) ? PTR_ERR(sclp_pdev) : 0;
+	if (rc)
+		goto fail_platform_driver_unregister;
+	rc = atomic_notifier_chain_register(&panic_notifier_list,
+					    &sclp_on_panic_nb);
+	if (rc)
+		goto fail_platform_device_unregister;
+
 	return sclp_init();
+
+fail_platform_device_unregister:
+	platform_device_unregister(sclp_pdev);
+fail_platform_driver_unregister:
+	platform_driver_unregister(&sclp_pdrv);
+	return rc;
 }
 
 arch_initcall(sclp_initcall);
diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h
index bac80e8..60e7cb0 100644
--- a/drivers/s390/char/sclp.h
+++ b/drivers/s390/char/sclp.h
@@ -1,10 +1,8 @@
 /*
- *  drivers/s390/char/sclp.h
+ * Copyright IBM Corp. 1999, 2009
  *
- *  S390 version
- *    Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
- *    Author(s): Martin Peschke <mpeschke@de.ibm.com>
- *		 Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Author(s): Martin Peschke <mpeschke@de.ibm.com>
+ *	      Martin Schwidefsky <schwidefsky@de.ibm.com>
  */
 
 #ifndef __SCLP_H__
@@ -17,7 +15,7 @@
 
 /* maximum number of pages concerning our own memory management */
 #define MAX_KMEM_PAGES (sizeof(unsigned long) << 3)
-#define MAX_CONSOLE_PAGES	4
+#define MAX_CONSOLE_PAGES	6
 
 #define EVTYP_OPCMD		0x01
 #define EVTYP_MSG		0x02
@@ -68,6 +66,15 @@
 
 #define GDS_KEY_SELFDEFTEXTMSG	0x31
 
+enum sclp_pm_event {
+	SCLP_PM_EVENT_FREEZE,
+	SCLP_PM_EVENT_THAW,
+	SCLP_PM_EVENT_RESTORE,
+};
+
+#define SCLP_PANIC_PRIO		1
+#define SCLP_PANIC_PRIO_CLIENT	0
+
 typedef u32 sccb_mask_t;	/* ATTENTION: assumes 32bit mask !!! */
 
 struct sccb_header {
@@ -134,6 +141,10 @@
 	void (*state_change_fn)(struct sclp_register *);
 	/* called for events in cp_receive_mask/sclp_receive_mask */
 	void (*receiver_fn)(struct evbuf_header *);
+	/* called for power management events */
+	void (*pm_event_fn)(struct sclp_register *, enum sclp_pm_event);
+	/* pm event posted flag */
+	int pm_event_posted;
 };
 
 /* externals from sclp.c */
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index 77ab6e3..5cc11c6 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -1,9 +1,8 @@
 /*
- *  drivers/s390/char/sclp_cmd.c
+ * Copyright IBM Corp. 2007, 2009
  *
- *    Copyright IBM Corp. 2007
- *    Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
- *		 Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
+ * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
+ *	      Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
  */
 
 #define KMSG_COMPONENT "sclp_cmd"
@@ -12,11 +11,13 @@
 #include <linux/completion.h>
 #include <linux/init.h>
 #include <linux/errno.h>
+#include <linux/err.h>
 #include <linux/slab.h>
 #include <linux/string.h>
 #include <linux/mm.h>
 #include <linux/mmzone.h>
 #include <linux/memory.h>
+#include <linux/platform_device.h>
 #include <asm/chpid.h>
 #include <asm/sclp.h>
 #include <asm/setup.h>
@@ -292,6 +293,7 @@
 static LIST_HEAD(sclp_mem_list);
 static u8 sclp_max_storage_id;
 static unsigned long sclp_storage_ids[256 / BITS_PER_LONG];
+static int sclp_mem_state_changed;
 
 struct memory_increment {
 	struct list_head list;
@@ -450,6 +452,8 @@
 		rc = -EINVAL;
 		break;
 	}
+	if (!rc)
+		sclp_mem_state_changed = 1;
 	mutex_unlock(&sclp_mem_mutex);
 	return rc ? NOTIFY_BAD : NOTIFY_OK;
 }
@@ -525,6 +529,14 @@
 	list_add(&new_incr->list, prev);
 }
 
+static int sclp_mem_freeze(struct device *dev)
+{
+	if (!sclp_mem_state_changed)
+		return 0;
+	pr_err("Memory hotplug state changed, suspend refused.\n");
+	return -EPERM;
+}
+
 struct read_storage_sccb {
 	struct sccb_header header;
 	u16 max_id;
@@ -534,8 +546,20 @@
 	u32 entries[0];
 } __packed;
 
+static struct dev_pm_ops sclp_mem_pm_ops = {
+	.freeze		= sclp_mem_freeze,
+};
+
+static struct platform_driver sclp_mem_pdrv = {
+	.driver = {
+		.name	= "sclp_mem",
+		.pm	= &sclp_mem_pm_ops,
+	},
+};
+
 static int __init sclp_detect_standby_memory(void)
 {
+	struct platform_device *sclp_pdev;
 	struct read_storage_sccb *sccb;
 	int i, id, assigned, rc;
 
@@ -588,7 +612,17 @@
 	rc = register_memory_notifier(&sclp_mem_nb);
 	if (rc)
 		goto out;
+	rc = platform_driver_register(&sclp_mem_pdrv);
+	if (rc)
+		goto out;
+	sclp_pdev = platform_device_register_simple("sclp_mem", -1, NULL, 0);
+	rc = IS_ERR(sclp_pdev) ? PTR_ERR(sclp_pdev) : 0;
+	if (rc)
+		goto out_driver;
 	sclp_add_standby_memory();
+	goto out;
+out_driver:
+	platform_driver_unregister(&sclp_mem_pdrv);
 out:
 	free_page((unsigned long) sccb);
 	return rc;
diff --git a/drivers/s390/char/sclp_con.c b/drivers/s390/char/sclp_con.c
index 9a25c4b..336811a 100644
--- a/drivers/s390/char/sclp_con.c
+++ b/drivers/s390/char/sclp_con.c
@@ -1,11 +1,9 @@
 /*
- *  drivers/s390/char/sclp_con.c
- *    SCLP line mode console driver
+ * SCLP line mode console driver
  *
- *  S390 version
- *    Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
- *    Author(s): Martin Peschke <mpeschke@de.ibm.com>
- *		 Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Copyright IBM Corp. 1999, 2009
+ * Author(s): Martin Peschke <mpeschke@de.ibm.com>
+ *	      Martin Schwidefsky <schwidefsky@de.ibm.com>
  */
 
 #include <linux/kmod.h>
@@ -32,13 +30,14 @@
 static struct list_head sclp_con_pages;
 /* List of full struct sclp_buffer structures ready for output */
 static struct list_head sclp_con_outqueue;
-/* Counter how many buffers are emitted (max 1) and how many */
-/* are on the output queue. */
-static int sclp_con_buffer_count;
 /* Pointer to current console buffer */
 static struct sclp_buffer *sclp_conbuf;
 /* Timer for delayed output of console messages */
 static struct timer_list sclp_con_timer;
+/* Suspend mode flag */
+static int sclp_con_suspended;
+/* Flag that output queue is currently running */
+static int sclp_con_queue_running;
 
 /* Output format for console messages */
 static unsigned short sclp_con_columns;
@@ -53,42 +52,71 @@
 	do {
 		page = sclp_unmake_buffer(buffer);
 		spin_lock_irqsave(&sclp_con_lock, flags);
+
 		/* Remove buffer from outqueue */
 		list_del(&buffer->list);
-		sclp_con_buffer_count--;
 		list_add_tail((struct list_head *) page, &sclp_con_pages);
+
 		/* Check if there is a pending buffer on the out queue. */
 		buffer = NULL;
 		if (!list_empty(&sclp_con_outqueue))
-			buffer = list_entry(sclp_con_outqueue.next,
-					    struct sclp_buffer, list);
+			buffer = list_first_entry(&sclp_con_outqueue,
+						  struct sclp_buffer, list);
+		if (!buffer || sclp_con_suspended) {
+			sclp_con_queue_running = 0;
+			spin_unlock_irqrestore(&sclp_con_lock, flags);
+			break;
+		}
 		spin_unlock_irqrestore(&sclp_con_lock, flags);
-	} while (buffer && sclp_emit_buffer(buffer, sclp_conbuf_callback));
+	} while (sclp_emit_buffer(buffer, sclp_conbuf_callback));
 }
 
-static void
-sclp_conbuf_emit(void)
+/*
+ * Finalize and emit first pending buffer.
+ */
+static void sclp_conbuf_emit(void)
 {
 	struct sclp_buffer* buffer;
 	unsigned long flags;
-	int count;
 	int rc;
 
 	spin_lock_irqsave(&sclp_con_lock, flags);
-	buffer = sclp_conbuf;
+	if (sclp_conbuf)
+		list_add_tail(&sclp_conbuf->list, &sclp_con_outqueue);
 	sclp_conbuf = NULL;
-	if (buffer == NULL) {
-		spin_unlock_irqrestore(&sclp_con_lock, flags);
-		return;
-	}
-	list_add_tail(&buffer->list, &sclp_con_outqueue);
-	count = sclp_con_buffer_count++;
+	if (sclp_con_queue_running || sclp_con_suspended)
+		goto out_unlock;
+	if (list_empty(&sclp_con_outqueue))
+		goto out_unlock;
+	buffer = list_first_entry(&sclp_con_outqueue, struct sclp_buffer,
+				  list);
+	sclp_con_queue_running = 1;
 	spin_unlock_irqrestore(&sclp_con_lock, flags);
-	if (count)
-		return;
+
 	rc = sclp_emit_buffer(buffer, sclp_conbuf_callback);
 	if (rc)
 		sclp_conbuf_callback(buffer, rc);
+	return;
+out_unlock:
+	spin_unlock_irqrestore(&sclp_con_lock, flags);
+}
+
+/*
+ * Wait until out queue is empty
+ */
+static void sclp_console_sync_queue(void)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&sclp_con_lock, flags);
+	if (timer_pending(&sclp_con_timer))
+		del_timer_sync(&sclp_con_timer);
+	while (sclp_con_queue_running) {
+		spin_unlock_irqrestore(&sclp_con_lock, flags);
+		sclp_sync_wait();
+		spin_lock_irqsave(&sclp_con_lock, flags);
+	}
+	spin_unlock_irqrestore(&sclp_con_lock, flags);
 }
 
 /*
@@ -123,6 +151,8 @@
 		/* make sure we have a console output buffer */
 		if (sclp_conbuf == NULL) {
 			while (list_empty(&sclp_con_pages)) {
+				if (sclp_con_suspended)
+					goto out;
 				spin_unlock_irqrestore(&sclp_con_lock, flags);
 				sclp_sync_wait();
 				spin_lock_irqsave(&sclp_con_lock, flags);
@@ -157,6 +187,7 @@
 		sclp_con_timer.expires = jiffies + HZ/10;
 		add_timer(&sclp_con_timer);
 	}
+out:
 	spin_unlock_irqrestore(&sclp_con_lock, flags);
 }
 
@@ -168,30 +199,43 @@
 }
 
 /*
- * This routine is called from panic when the kernel
- * is going to give up. We have to make sure that all buffers
- * will be flushed to the SCLP.
+ * Make sure that all buffers will be flushed to the SCLP.
  */
 static void
 sclp_console_flush(void)
 {
-	unsigned long flags;
-
 	sclp_conbuf_emit();
-	spin_lock_irqsave(&sclp_con_lock, flags);
-	if (timer_pending(&sclp_con_timer))
-		del_timer(&sclp_con_timer);
-	while (sclp_con_buffer_count > 0) {
-		spin_unlock_irqrestore(&sclp_con_lock, flags);
-		sclp_sync_wait();
-		spin_lock_irqsave(&sclp_con_lock, flags);
-	}
-	spin_unlock_irqrestore(&sclp_con_lock, flags);
+	sclp_console_sync_queue();
 }
 
-static int
-sclp_console_notify(struct notifier_block *self,
-			  unsigned long event, void *data)
+/*
+ * Resume console: If there are cached messages, emit them.
+ */
+static void sclp_console_resume(void)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&sclp_con_lock, flags);
+	sclp_con_suspended = 0;
+	spin_unlock_irqrestore(&sclp_con_lock, flags);
+	sclp_conbuf_emit();
+}
+
+/*
+ * Suspend console: Set suspend flag and flush console
+ */
+static void sclp_console_suspend(void)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&sclp_con_lock, flags);
+	sclp_con_suspended = 1;
+	spin_unlock_irqrestore(&sclp_con_lock, flags);
+	sclp_console_flush();
+}
+
+static int sclp_console_notify(struct notifier_block *self,
+			       unsigned long event, void *data)
 {
 	sclp_console_flush();
 	return NOTIFY_OK;
@@ -199,7 +243,7 @@
 
 static struct notifier_block on_panic_nb = {
 	.notifier_call = sclp_console_notify,
-	.priority = 1,
+	.priority = SCLP_PANIC_PRIO_CLIENT,
 };
 
 static struct notifier_block on_reboot_nb = {
@@ -221,6 +265,22 @@
 };
 
 /*
+ * This function is called for SCLP suspend and resume events.
+ */
+void sclp_console_pm_event(enum sclp_pm_event sclp_pm_event)
+{
+	switch (sclp_pm_event) {
+	case SCLP_PM_EVENT_FREEZE:
+		sclp_console_suspend();
+		break;
+	case SCLP_PM_EVENT_RESTORE:
+	case SCLP_PM_EVENT_THAW:
+		sclp_console_resume();
+		break;
+	}
+}
+
+/*
  * called by console_init() in drivers/char/tty_io.c at boot-time.
  */
 static int __init
@@ -243,7 +303,6 @@
 	}
 	INIT_LIST_HEAD(&sclp_con_outqueue);
 	spin_lock_init(&sclp_con_lock);
-	sclp_con_buffer_count = 0;
 	sclp_conbuf = NULL;
 	init_timer(&sclp_con_timer);
 
diff --git a/drivers/s390/char/sclp_rw.c b/drivers/s390/char/sclp_rw.c
index 710af42..4be63be 100644
--- a/drivers/s390/char/sclp_rw.c
+++ b/drivers/s390/char/sclp_rw.c
@@ -1,11 +1,10 @@
 /*
- *  drivers/s390/char/sclp_rw.c
- *     driver: reading from and writing to system console on S/390 via SCLP
+ * driver: reading from and writing to system console on S/390 via SCLP
  *
- *  S390 version
- *    Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
- *    Author(s): Martin Peschke <mpeschke@de.ibm.com>
- *		 Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Copyright IBM Corp. 1999, 2009
+ *
+ * Author(s): Martin Peschke <mpeschke@de.ibm.com>
+ *	      Martin Schwidefsky <schwidefsky@de.ibm.com>
  */
 
 #include <linux/kmod.h>
@@ -26,9 +25,16 @@
  */
 #define MAX_SCCB_ROOM (PAGE_SIZE - sizeof(struct sclp_buffer))
 
+static void sclp_rw_pm_event(struct sclp_register *reg,
+			     enum sclp_pm_event sclp_pm_event)
+{
+	sclp_console_pm_event(sclp_pm_event);
+}
+
 /* Event type structure for write message and write priority message */
 static struct sclp_register sclp_rw_event = {
-	.send_mask = EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK
+	.send_mask = EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK,
+	.pm_event_fn = sclp_rw_pm_event,
 };
 
 /*
diff --git a/drivers/s390/char/sclp_rw.h b/drivers/s390/char/sclp_rw.h
index 6aa7a69..85f491e 100644
--- a/drivers/s390/char/sclp_rw.h
+++ b/drivers/s390/char/sclp_rw.h
@@ -1,11 +1,10 @@
 /*
- *  drivers/s390/char/sclp_rw.h
- *    interface to the SCLP-read/write driver
+ * interface to the SCLP-read/write driver
  *
- *  S390 version
- *    Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
- *    Author(s): Martin Peschke <mpeschke@de.ibm.com>
- *		 Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Copyright IBM Corporation 1999, 2009
+ *
+ * Author(s): Martin Peschke <mpeschke@de.ibm.com>
+ *	      Martin Schwidefsky <schwidefsky@de.ibm.com>
  */
 
 #ifndef __SCLP_RW_H__
@@ -93,4 +92,5 @@
 void sclp_set_htab(struct sclp_buffer *, unsigned short);
 int sclp_chars_in_buffer(struct sclp_buffer *);
 
+void sclp_console_pm_event(enum sclp_pm_event sclp_pm_event);
 #endif	/* __SCLP_RW_H__ */
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c
index a839aa5..5518e24 100644
--- a/drivers/s390/char/sclp_vt220.c
+++ b/drivers/s390/char/sclp_vt220.c
@@ -1,10 +1,9 @@
 /*
- *  drivers/s390/char/sclp_vt220.c
- *    SCLP VT220 terminal driver.
+ * SCLP VT220 terminal driver.
  *
- *  S390 version
- *    Copyright IBM Corp. 2003,2008
- *    Author(s): Peter Oberparleiter <Peter.Oberparleiter@de.ibm.com>
+ * Copyright IBM Corp. 2003, 2009
+ *
+ * Author(s): Peter Oberparleiter <Peter.Oberparleiter@de.ibm.com>
  */
 
 #include <linux/module.h>
@@ -69,8 +68,11 @@
 /* List of pending requests */
 static struct list_head sclp_vt220_outqueue;
 
-/* Number of requests in outqueue */
-static int sclp_vt220_outqueue_count;
+/* Suspend mode flag */
+static int sclp_vt220_suspended;
+
+/* Flag that output queue is currently running */
+static int sclp_vt220_queue_running;
 
 /* Timer used for delaying write requests to merge subsequent messages into
  * a single buffer */
@@ -92,6 +94,8 @@
 static int sclp_vt220_flush_later;
 
 static void sclp_vt220_receiver_fn(struct evbuf_header *evbuf);
+static void sclp_vt220_pm_event_fn(struct sclp_register *reg,
+				   enum sclp_pm_event sclp_pm_event);
 static int __sclp_vt220_emit(struct sclp_vt220_request *request);
 static void sclp_vt220_emit_current(void);
 
@@ -100,7 +104,8 @@
 	.send_mask		= EVTYP_VT220MSG_MASK,
 	.receive_mask		= EVTYP_VT220MSG_MASK,
 	.state_change_fn	= NULL,
-	.receiver_fn		= sclp_vt220_receiver_fn
+	.receiver_fn		= sclp_vt220_receiver_fn,
+	.pm_event_fn		= sclp_vt220_pm_event_fn,
 };
 
 
@@ -120,15 +125,19 @@
 		spin_lock_irqsave(&sclp_vt220_lock, flags);
 		/* Move request from outqueue to empty queue */
 		list_del(&request->list);
-		sclp_vt220_outqueue_count--;
 		list_add_tail((struct list_head *) page, &sclp_vt220_empty);
 		/* Check if there is a pending buffer on the out queue. */
 		request = NULL;
 		if (!list_empty(&sclp_vt220_outqueue))
 			request = list_entry(sclp_vt220_outqueue.next,
 					     struct sclp_vt220_request, list);
+		if (!request || sclp_vt220_suspended) {
+			sclp_vt220_queue_running = 0;
+			spin_unlock_irqrestore(&sclp_vt220_lock, flags);
+			break;
+		}
 		spin_unlock_irqrestore(&sclp_vt220_lock, flags);
-	} while (request && __sclp_vt220_emit(request));
+	} while (__sclp_vt220_emit(request));
 	if (request == NULL && sclp_vt220_flush_later)
 		sclp_vt220_emit_current();
 	/* Check if the tty needs a wake up call */
@@ -212,26 +221,7 @@
 }
 
 /*
- * Queue and emit given request.
- */
-static void
-sclp_vt220_emit(struct sclp_vt220_request *request)
-{
-	unsigned long flags;
-	int count;
-
-	spin_lock_irqsave(&sclp_vt220_lock, flags);
-	list_add_tail(&request->list, &sclp_vt220_outqueue);
-	count = sclp_vt220_outqueue_count++;
-	spin_unlock_irqrestore(&sclp_vt220_lock, flags);
-	/* Emit only the first buffer immediately - callback takes care of
-	 * the rest */
-	if (count == 0 && __sclp_vt220_emit(request))
-		sclp_vt220_process_queue(request);
-}
-
-/*
- * Queue and emit current request. Return zero on success, non-zero otherwise.
+ * Queue and emit current request.
  */
 static void
 sclp_vt220_emit_current(void)
@@ -241,22 +231,33 @@
 	struct sclp_vt220_sccb *sccb;
 
 	spin_lock_irqsave(&sclp_vt220_lock, flags);
-	request = NULL;
-	if (sclp_vt220_current_request != NULL) {
+	if (sclp_vt220_current_request) {
 		sccb = (struct sclp_vt220_sccb *) 
 				sclp_vt220_current_request->sclp_req.sccb;
 		/* Only emit buffers with content */
 		if (sccb->header.length != sizeof(struct sclp_vt220_sccb)) {
-			request = sclp_vt220_current_request;
+			list_add_tail(&sclp_vt220_current_request->list,
+				      &sclp_vt220_outqueue);
 			sclp_vt220_current_request = NULL;
 			if (timer_pending(&sclp_vt220_timer))
 				del_timer(&sclp_vt220_timer);
 		}
 		sclp_vt220_flush_later = 0;
 	}
+	if (sclp_vt220_queue_running || sclp_vt220_suspended)
+		goto out_unlock;
+	if (list_empty(&sclp_vt220_outqueue))
+		goto out_unlock;
+	request = list_first_entry(&sclp_vt220_outqueue,
+				   struct sclp_vt220_request, list);
+	sclp_vt220_queue_running = 1;
 	spin_unlock_irqrestore(&sclp_vt220_lock, flags);
-	if (request != NULL)
-		sclp_vt220_emit(request);
+
+	if (__sclp_vt220_emit(request))
+		sclp_vt220_process_queue(request);
+	return;
+out_unlock:
+	spin_unlock_irqrestore(&sclp_vt220_lock, flags);
 }
 
 #define SCLP_NORMAL_WRITE	0x00
@@ -396,7 +397,7 @@
 		if (sclp_vt220_current_request == NULL) {
 			while (list_empty(&sclp_vt220_empty)) {
 				spin_unlock_irqrestore(&sclp_vt220_lock, flags);
-				if (may_fail)
+				if (may_fail || sclp_vt220_suspended)
 					goto out;
 				else
 					sclp_sync_wait();
@@ -531,7 +532,7 @@
 static void
 sclp_vt220_flush_chars(struct tty_struct *tty)
 {
-	if (sclp_vt220_outqueue_count == 0)
+	if (!sclp_vt220_queue_running)
 		sclp_vt220_emit_current();
 	else
 		sclp_vt220_flush_later = 1;
@@ -635,7 +636,6 @@
 	init_timer(&sclp_vt220_timer);
 	sclp_vt220_current_request = NULL;
 	sclp_vt220_buffered_chars = 0;
-	sclp_vt220_outqueue_count = 0;
 	sclp_vt220_tty = NULL;
 	sclp_vt220_flush_later = 0;
 
@@ -736,7 +736,7 @@
 	spin_lock_irqsave(&sclp_vt220_lock, flags);
 	if (timer_pending(&sclp_vt220_timer))
 		del_timer(&sclp_vt220_timer);
-	while (sclp_vt220_outqueue_count > 0) {
+	while (sclp_vt220_queue_running) {
 		spin_unlock_irqrestore(&sclp_vt220_lock, flags);
 		sclp_sync_wait();
 		spin_lock_irqsave(&sclp_vt220_lock, flags);
@@ -744,6 +744,46 @@
 	spin_unlock_irqrestore(&sclp_vt220_lock, flags);
 }
 
+/*
+ * Resume console: If there are cached messages, emit them.
+ */
+static void sclp_vt220_resume(void)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&sclp_vt220_lock, flags);
+	sclp_vt220_suspended = 0;
+	spin_unlock_irqrestore(&sclp_vt220_lock, flags);
+	sclp_vt220_emit_current();
+}
+
+/*
+ * Suspend console: Set suspend flag and flush console
+ */
+static void sclp_vt220_suspend(void)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&sclp_vt220_lock, flags);
+	sclp_vt220_suspended = 1;
+	spin_unlock_irqrestore(&sclp_vt220_lock, flags);
+	__sclp_vt220_flush_buffer();
+}
+
+static void sclp_vt220_pm_event_fn(struct sclp_register *reg,
+				   enum sclp_pm_event sclp_pm_event)
+{
+	switch (sclp_pm_event) {
+	case SCLP_PM_EVENT_FREEZE:
+		sclp_vt220_suspend();
+		break;
+	case SCLP_PM_EVENT_RESTORE:
+	case SCLP_PM_EVENT_THAW:
+		sclp_vt220_resume();
+		break;
+	}
+}
+
 static int
 sclp_vt220_notify(struct notifier_block *self,
 			  unsigned long event, void *data)
diff --git a/drivers/s390/char/tape.h b/drivers/s390/char/tape.h
index 5469e09..a263337 100644
--- a/drivers/s390/char/tape.h
+++ b/drivers/s390/char/tape.h
@@ -3,7 +3,7 @@
  *    tape device driver for 3480/3490E/3590 tapes.
  *
  *  S390 and zSeries version
- *    Copyright IBM Corp. 2001,2006
+ *    Copyright IBM Corp. 2001, 2009
  *    Author(s): Carsten Otte <cotte@de.ibm.com>
  *		 Tuan Ngo-Anh <ngoanh@de.ibm.com>
  *		 Martin Schwidefsky <schwidefsky@de.ibm.com>
@@ -286,6 +286,7 @@
 
 extern int tape_generic_online(struct tape_device *, struct tape_discipline *);
 extern int tape_generic_offline(struct ccw_device *);
+extern int tape_generic_pm_suspend(struct ccw_device *);
 
 /* Externals from tape_devmap.c */
 extern int tape_generic_probe(struct ccw_device *);
diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c
index 2d00a38..5a519fa 100644
--- a/drivers/s390/char/tape_34xx.c
+++ b/drivers/s390/char/tape_34xx.c
@@ -2,7 +2,7 @@
  *  drivers/s390/char/tape_34xx.c
  *    tape device discipline for 3480/3490 tapes.
  *
- *    Copyright (C) IBM Corp. 2001,2006
+ *    Copyright IBM Corp. 2001, 2009
  *    Author(s): Carsten Otte <cotte@de.ibm.com>
  *		 Tuan Ngo-Anh <ngoanh@de.ibm.com>
  *		 Martin Schwidefsky <schwidefsky@de.ibm.com>
@@ -1289,7 +1289,7 @@
 tape_34xx_online(struct ccw_device *cdev)
 {
 	return tape_generic_online(
-		cdev->dev.driver_data,
+		dev_get_drvdata(&cdev->dev),
 		&tape_discipline_34xx
 	);
 }
@@ -1302,6 +1302,7 @@
 	.remove = tape_generic_remove,
 	.set_online = tape_34xx_online,
 	.set_offline = tape_generic_offline,
+	.freeze = tape_generic_pm_suspend,
 };
 
 static int
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c
index c453b2f..418f72d 100644
--- a/drivers/s390/char/tape_3590.c
+++ b/drivers/s390/char/tape_3590.c
@@ -2,7 +2,7 @@
  *  drivers/s390/char/tape_3590.c
  *    tape device discipline for 3590 tapes.
  *
- *    Copyright IBM Corp. 2001,2006
+ *    Copyright IBM Corp. 2001, 2009
  *    Author(s): Stefan Bader <shbader@de.ibm.com>
  *		 Michael Holzheu <holzheu@de.ibm.com>
  *		 Martin Schwidefsky <schwidefsky@de.ibm.com>
@@ -1703,7 +1703,7 @@
 static int
 tape_3590_online(struct ccw_device *cdev)
 {
-	return tape_generic_online(cdev->dev.driver_data,
+	return tape_generic_online(dev_get_drvdata(&cdev->dev),
 				   &tape_discipline_3590);
 }
 
@@ -1715,6 +1715,7 @@
 	.remove = tape_generic_remove,
 	.set_offline = tape_generic_offline,
 	.set_online = tape_3590_online,
+	.freeze = tape_generic_pm_suspend,
 };
 
 /*
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c
index 8a109f3..595aa04 100644
--- a/drivers/s390/char/tape_core.c
+++ b/drivers/s390/char/tape_core.c
@@ -3,7 +3,7 @@
  *    basic function of the tape device driver
  *
  *  S390 and zSeries version
- *    Copyright IBM Corp. 2001,2006
+ *    Copyright IBM Corp. 2001, 2009
  *    Author(s): Carsten Otte <cotte@de.ibm.com>
  *		 Michael Holzheu <holzheu@de.ibm.com>
  *		 Tuan Ngo-Anh <ngoanh@de.ibm.com>
@@ -92,7 +92,7 @@
 {
 	struct tape_device *tdev;
 
-	tdev = (struct tape_device *) dev->driver_data;
+	tdev = dev_get_drvdata(dev);
 	return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->medium_state);
 }
 
@@ -104,7 +104,7 @@
 {
 	struct tape_device *tdev;
 
-	tdev = (struct tape_device *) dev->driver_data;
+	tdev = dev_get_drvdata(dev);
 	return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->first_minor);
 }
 
@@ -116,7 +116,7 @@
 {
 	struct tape_device *tdev;
 
-	tdev = (struct tape_device *) dev->driver_data;
+	tdev = dev_get_drvdata(dev);
 	return scnprintf(buf, PAGE_SIZE, "%s\n", (tdev->first_minor < 0) ?
 		"OFFLINE" : tape_state_verbose[tdev->tape_state]);
 }
@@ -130,7 +130,7 @@
 	struct tape_device *tdev;
 	ssize_t rc;
 
-	tdev = (struct tape_device *) dev->driver_data;
+	tdev = dev_get_drvdata(dev);
 	if (tdev->first_minor < 0)
 		return scnprintf(buf, PAGE_SIZE, "N/A\n");
 
@@ -156,7 +156,7 @@
 {
 	struct tape_device *tdev;
 
-	tdev = (struct tape_device *) dev->driver_data;
+	tdev = dev_get_drvdata(dev);
 
 	return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->char_data.block_size);
 }
@@ -380,6 +380,55 @@
 }
 
 /*
+ * Suspend device.
+ *
+ * Called by the common I/O layer if the drive should be suspended on user
+ * request. We refuse to suspend if the device is loaded or in use for the
+ * following reason:
+ * While the Linux guest is suspended, it might be logged off which causes
+ * devices to be detached. Tape devices are automatically rewound and unloaded
+ * during DETACH processing (unless the tape device was attached with the
+ * NOASSIGN or MULTIUSER option). After rewind/unload, there is no way to
+ * resume the original state of the tape device, since we would need to
+ * manually re-load the cartridge which was active at suspend time.
+ */
+int tape_generic_pm_suspend(struct ccw_device *cdev)
+{
+	struct tape_device *device;
+
+	device = cdev->dev.driver_data;
+	if (!device) {
+		return -ENODEV;
+	}
+
+	DBF_LH(3, "(%08x): tape_generic_pm_suspend(%p)\n",
+		device->cdev_id, device);
+
+	if (device->medium_state != MS_UNLOADED) {
+		pr_err("A cartridge is loaded in tape device %s, "
+		       "refusing to suspend\n", dev_name(&cdev->dev));
+		return -EBUSY;
+	}
+
+	spin_lock_irq(get_ccwdev_lock(device->cdev));
+	switch (device->tape_state) {
+		case TS_INIT:
+		case TS_NOT_OPER:
+		case TS_UNUSED:
+			spin_unlock_irq(get_ccwdev_lock(device->cdev));
+			break;
+		default:
+			pr_err("Tape device %s is busy, refusing to "
+			       "suspend\n", dev_name(&cdev->dev));
+			spin_unlock_irq(get_ccwdev_lock(device->cdev));
+			return -EBUSY;
+	}
+
+	DBF_LH(3, "(%08x): Drive suspended.\n", device->cdev_id);
+	return 0;
+}
+
+/*
  * Set device offline.
  *
  * Called by the common I/O layer if the drive should set offline on user
@@ -391,7 +440,7 @@
 {
 	struct tape_device *device;
 
-	device = cdev->dev.driver_data;
+	device = dev_get_drvdata(&cdev->dev);
 	if (!device) {
 		return -ENODEV;
 	}
@@ -534,7 +583,7 @@
 		tape_put_device(device);
 		return ret;
 	}
-	cdev->dev.driver_data = device;
+	dev_set_drvdata(&cdev->dev, device);
 	cdev->handler = __tape_do_irq;
 	device->cdev = cdev;
 	ccw_device_get_id(cdev, &dev_id);
@@ -573,7 +622,7 @@
 {
 	struct tape_device *	device;
 
-	device = cdev->dev.driver_data;
+	device = dev_get_drvdata(&cdev->dev);
 	if (!device) {
 		return;
 	}
@@ -613,9 +662,9 @@
 			tape_cleanup_device(device);
 	}
 
-	if (cdev->dev.driver_data != NULL) {
+	if (!dev_get_drvdata(&cdev->dev)) {
 		sysfs_remove_group(&cdev->dev.kobj, &tape_attr_group);
-		cdev->dev.driver_data = tape_put_device(cdev->dev.driver_data);
+		dev_set_drvdata(&cdev->dev, tape_put_device(dev_get_drvdata(&cdev->dev)));
 	}
 }
 
@@ -1011,7 +1060,7 @@
 	struct tape_request *request;
 	int rc;
 
-	device = (struct tape_device *) cdev->dev.driver_data;
+	device = dev_get_drvdata(&cdev->dev);
 	if (device == NULL) {
 		return;
 	}
@@ -1273,6 +1322,7 @@
 EXPORT_SYMBOL(tape_generic_probe);
 EXPORT_SYMBOL(tape_generic_online);
 EXPORT_SYMBOL(tape_generic_offline);
+EXPORT_SYMBOL(tape_generic_pm_suspend);
 EXPORT_SYMBOL(tape_put_device);
 EXPORT_SYMBOL(tape_get_device_reference);
 EXPORT_SYMBOL(tape_state_verbose);
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index d8a2289..411cfa3 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -3,7 +3,7 @@
  *	character device driver for reading z/VM system service records
  *
  *
- *	Copyright 2004 IBM Corporation
+ *	Copyright IBM Corp. 2004, 2009
  *	character device driver for reading z/VM system service records,
  *	Version 1.0
  *	Author(s): Xenia Tkatschow <xenia@us.ibm.com>
@@ -504,7 +504,7 @@
 					struct device_attribute *attr,
 					const char * buf, size_t count)
 {
-	struct vmlogrdr_priv_t *priv = dev->driver_data;
+	struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
 	ssize_t ret = count;
 
 	switch (buf[0]) {
@@ -525,7 +525,7 @@
 				       struct device_attribute *attr,
 				       char *buf)
 {
-	struct vmlogrdr_priv_t *priv = dev->driver_data;
+	struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
 	return sprintf(buf, "%u\n", priv->autopurge);
 }
 
@@ -541,7 +541,7 @@
 
 	char cp_command[80];
 	char cp_response[80];
-	struct vmlogrdr_priv_t *priv = dev->driver_data;
+	struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
 
 	if (buf[0] != '1')
 		return -EINVAL;
@@ -578,7 +578,7 @@
 					    struct device_attribute *attr,
 					    const char *buf, size_t count)
 {
-	struct vmlogrdr_priv_t *priv = dev->driver_data;
+	struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
 	ssize_t ret = count;
 
 	switch (buf[0]) {
@@ -599,7 +599,7 @@
 					   struct device_attribute *attr,
 					   char *buf)
 {
-	struct vmlogrdr_priv_t *priv = dev->driver_data;
+	struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
 	return sprintf(buf, "%u\n", priv->autorecording);
 }
 
@@ -612,7 +612,7 @@
 					struct device_attribute *attr,
 					const char * buf, size_t count)
 {
-	struct vmlogrdr_priv_t *priv = dev->driver_data;
+	struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
 	ssize_t ret;
 
 	switch (buf[0]) {
@@ -660,6 +660,29 @@
 	NULL,
 };
 
+static int vmlogrdr_pm_prepare(struct device *dev)
+{
+	int rc;
+	struct vmlogrdr_priv_t *priv = dev->driver_data;
+
+	rc = 0;
+	if (priv) {
+		spin_lock_bh(&priv->priv_lock);
+		if (priv->dev_in_use)
+			rc = -EBUSY;
+		spin_unlock_bh(&priv->priv_lock);
+	}
+	if (rc)
+		pr_err("vmlogrdr: device %s is busy. Refuse to suspend.\n",
+		       dev_name(dev));
+	return rc;
+}
+
+
+static struct dev_pm_ops vmlogrdr_pm_ops = {
+	.prepare = vmlogrdr_pm_prepare,
+};
+
 static struct attribute_group vmlogrdr_attr_group = {
 	.attrs = vmlogrdr_attrs,
 };
@@ -668,6 +691,7 @@
 static struct device_driver vmlogrdr_driver = {
 	.name = "vmlogrdr",
 	.bus  = &iucv_bus,
+	.pm = &vmlogrdr_pm_ops,
 };
 
 
@@ -729,6 +753,7 @@
 		dev->bus = &iucv_bus;
 		dev->parent = iucv_root;
 		dev->driver = &vmlogrdr_driver;
+		dev->driver_data = priv;
 		/*
 		 * The release function could be called after the
 		 * module has been unloaded. It's _only_ task is to
diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c
index 5dcef81..7d9e67c 100644
--- a/drivers/s390/char/vmur.c
+++ b/drivers/s390/char/vmur.c
@@ -2,7 +2,7 @@
  * Linux driver for System z and s390 unit record devices
  * (z/VM virtual punch, reader, printer)
  *
- * Copyright IBM Corp. 2001, 2007
+ * Copyright IBM Corp. 2001, 2009
  * Authors: Malcolm Beattie <beattiem@uk.ibm.com>
  *	    Michael Holzheu <holzheu@de.ibm.com>
  *	    Frank Munzert <munzert@de.ibm.com>
@@ -60,6 +60,7 @@
 static void ur_remove(struct ccw_device *cdev);
 static int ur_set_online(struct ccw_device *cdev);
 static int ur_set_offline(struct ccw_device *cdev);
+static int ur_pm_suspend(struct ccw_device *cdev);
 
 static struct ccw_driver ur_driver = {
 	.name		= "vmur",
@@ -69,6 +70,7 @@
 	.remove		= ur_remove,
 	.set_online	= ur_set_online,
 	.set_offline	= ur_set_offline,
+	.freeze		= ur_pm_suspend,
 };
 
 static DEFINE_MUTEX(vmur_mutex);
@@ -78,11 +80,11 @@
  *
  * Each ur device (urd) contains a reference to its corresponding ccw device
  * (cdev) using the urd->cdev pointer. Each ccw device has a reference to the
- * ur device using the cdev->dev.driver_data pointer.
+ * ur device using dev_get_drvdata(&cdev->dev) pointer.
  *
  * urd references:
  * - ur_probe gets a urd reference, ur_remove drops the reference
- *   (cdev->dev.driver_data)
+ *   dev_get_drvdata(&cdev->dev)
  * - ur_open gets a urd reference, ur_relase drops the reference
  *   (urf->urd)
  *
@@ -90,7 +92,7 @@
  * - urdev_alloc get a cdev reference (urd->cdev)
  * - urdev_free drops the cdev reference (urd->cdev)
  *
- * Setting and clearing of cdev->dev.driver_data is protected by the ccwdev lock
+ * Setting and clearing of dev_get_drvdata(&cdev->dev) is protected by the ccwdev lock
  */
 static struct urdev *urdev_alloc(struct ccw_device *cdev)
 {
@@ -129,7 +131,7 @@
 	unsigned long flags;
 
 	spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
-	urd = cdev->dev.driver_data;
+	urd = dev_get_drvdata(&cdev->dev);
 	if (urd)
 		urdev_get(urd);
 	spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
@@ -158,6 +160,28 @@
 }
 
 /*
+ * State and contents of ur devices can be changed by class D users issuing
+ * CP commands such as PURGE or TRANSFER, while the Linux guest is suspended.
+ * Also the Linux guest might be logged off, which causes all active spool
+ * files to be closed.
+ * So we cannot guarantee that spool files are still the same when the Linux
+ * guest is resumed. In order to avoid unpredictable results at resume time
+ * we simply refuse to suspend if a ur device node is open.
+ */
+static int ur_pm_suspend(struct ccw_device *cdev)
+{
+	struct urdev *urd = cdev->dev.driver_data;
+
+	TRACE("ur_pm_suspend: cdev=%p\n", cdev);
+	if (urd->open_flag) {
+		pr_err("Unit record device %s is busy, %s refusing to "
+		       "suspend.\n", dev_name(&cdev->dev), ur_banner);
+		return -EBUSY;
+	}
+	return 0;
+}
+
+/*
  * Low-level functions to do I/O to a ur device.
  *     alloc_chan_prog
  *     free_chan_prog
@@ -286,7 +310,7 @@
 		TRACE("ur_int_handler: unsolicited interrupt\n");
 		return;
 	}
-	urd = cdev->dev.driver_data;
+	urd = dev_get_drvdata(&cdev->dev);
 	BUG_ON(!urd);
 	/* On special conditions irb is an error pointer */
 	if (IS_ERR(irb))
@@ -832,7 +856,7 @@
 		goto fail_remove_attr;
 	}
 	spin_lock_irq(get_ccwdev_lock(cdev));
-	cdev->dev.driver_data = urd;
+	dev_set_drvdata(&cdev->dev, urd);
 	spin_unlock_irq(get_ccwdev_lock(cdev));
 
 	mutex_unlock(&vmur_mutex);
@@ -972,8 +996,8 @@
 	ur_remove_attributes(&cdev->dev);
 
 	spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
-	urdev_put(cdev->dev.driver_data);
-	cdev->dev.driver_data = NULL;
+	urdev_put(dev_get_drvdata(&cdev->dev));
+	dev_set_drvdata(&cdev->dev, NULL);
 	spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
 
 	mutex_unlock(&vmur_mutex);
diff --git a/drivers/s390/char/vmwatchdog.c b/drivers/s390/char/vmwatchdog.c
index 21a2a82..cb7854c 100644
--- a/drivers/s390/char/vmwatchdog.c
+++ b/drivers/s390/char/vmwatchdog.c
@@ -1,17 +1,23 @@
 /*
  * Watchdog implementation based on z/VM Watchdog Timer API
  *
+ * Copyright IBM Corp. 2004,2009
+ *
  * The user space watchdog daemon can use this driver as
  * /dev/vmwatchdog to have z/VM execute the specified CP
  * command when the timeout expires. The default command is
  * "IPL", which which cause an immediate reboot.
  */
+#define KMSG_COMPONENT "vmwatchdog"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
 #include <linux/init.h>
 #include <linux/fs.h>
 #include <linux/kernel.h>
 #include <linux/miscdevice.h>
 #include <linux/module.h>
 #include <linux/moduleparam.h>
+#include <linux/suspend.h>
 #include <linux/watchdog.h>
 #include <linux/smp_lock.h>
 
@@ -43,6 +49,9 @@
 static unsigned long vmwdt_is_open;
 static int vmwdt_expect_close;
 
+#define VMWDT_OPEN	0	/* devnode is open or suspend in progress */
+#define VMWDT_RUNNING	1	/* The watchdog is armed */
+
 enum vmwdt_func {
 	/* function codes */
 	wdt_init   = 0,
@@ -92,6 +101,7 @@
 	EBC_TOUPPER(ebc_cmd, MAX_CMDLEN);
 
 	func = vmwdt_conceal ? (wdt_init | wdt_conceal) : wdt_init;
+	set_bit(VMWDT_RUNNING, &vmwdt_is_open);
 	ret = __diag288(func, vmwdt_interval, ebc_cmd, len);
 	WARN_ON(ret != 0);
 	kfree(ebc_cmd);
@@ -102,6 +112,7 @@
 {
 	int ret = __diag288(wdt_cancel, 0, "", 0);
 	WARN_ON(ret != 0);
+	clear_bit(VMWDT_RUNNING, &vmwdt_is_open);
 	return ret;
 }
 
@@ -123,13 +134,13 @@
 {
 	int ret;
 	lock_kernel();
-	if (test_and_set_bit(0, &vmwdt_is_open)) {
+	if (test_and_set_bit(VMWDT_OPEN, &vmwdt_is_open)) {
 		unlock_kernel();
 		return -EBUSY;
 	}
 	ret = vmwdt_keepalive();
 	if (ret)
-		clear_bit(0, &vmwdt_is_open);
+		clear_bit(VMWDT_OPEN, &vmwdt_is_open);
 	unlock_kernel();
 	return ret ? ret : nonseekable_open(i, f);
 }
@@ -139,7 +150,7 @@
 	if (vmwdt_expect_close == 42)
 		vmwdt_disable();
 	vmwdt_expect_close = 0;
-	clear_bit(0, &vmwdt_is_open);
+	clear_bit(VMWDT_OPEN, &vmwdt_is_open);
 	return 0;
 }
 
@@ -223,6 +234,57 @@
 	return count;
 }
 
+static int vmwdt_resume(void)
+{
+	clear_bit(VMWDT_OPEN, &vmwdt_is_open);
+	return NOTIFY_DONE;
+}
+
+/*
+ * It makes no sense to go into suspend while the watchdog is running.
+ * Depending on the memory size, the watchdog might trigger, while we
+ * are still saving the memory.
+ * We reuse the open flag to ensure that suspend and watchdog open are
+ * exclusive operations
+ */
+static int vmwdt_suspend(void)
+{
+	if (test_and_set_bit(VMWDT_OPEN, &vmwdt_is_open)) {
+		pr_err("The watchdog is in use. "
+			"This prevents hibernation or suspend.\n");
+		return NOTIFY_BAD;
+	}
+	if (test_bit(VMWDT_RUNNING, &vmwdt_is_open)) {
+		clear_bit(VMWDT_OPEN, &vmwdt_is_open);
+		pr_err("The watchdog is running. "
+			"This prevents hibernation or suspend.\n");
+		return NOTIFY_BAD;
+	}
+	return NOTIFY_DONE;
+}
+
+/*
+ * This function is called for suspend and resume.
+ */
+static int vmwdt_power_event(struct notifier_block *this, unsigned long event,
+			     void *ptr)
+{
+	switch (event) {
+	case PM_POST_HIBERNATION:
+	case PM_POST_SUSPEND:
+		return vmwdt_resume();
+	case PM_HIBERNATION_PREPARE:
+	case PM_SUSPEND_PREPARE:
+		return vmwdt_suspend();
+	default:
+		return NOTIFY_DONE;
+	}
+}
+
+static struct notifier_block vmwdt_power_notifier = {
+	.notifier_call = vmwdt_power_event,
+};
+
 static const struct file_operations vmwdt_fops = {
 	.open    = &vmwdt_open,
 	.release = &vmwdt_close,
@@ -244,12 +306,21 @@
 	ret = vmwdt_probe();
 	if (ret)
 		return ret;
-	return misc_register(&vmwdt_dev);
+	ret = register_pm_notifier(&vmwdt_power_notifier);
+	if (ret)
+		return ret;
+	ret = misc_register(&vmwdt_dev);
+	if (ret) {
+		unregister_pm_notifier(&vmwdt_power_notifier);
+		return ret;
+	}
+	return 0;
 }
 module_init(vmwdt_init);
 
 static void __exit vmwdt_exit(void)
 {
-	WARN_ON(misc_deregister(&vmwdt_dev) != 0);
+	unregister_pm_notifier(&vmwdt_power_notifier);
+	misc_deregister(&vmwdt_dev);
 }
 module_exit(vmwdt_exit);
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index 22ce765..a5a62f1 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -1,11 +1,10 @@
 /*
- *  drivers/s390/cio/ccwgroup.c
  *  bus driver for ccwgroup
  *
- *    Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
- *                       IBM Corporation
- *    Author(s): Arnd Bergmann (arndb@de.ibm.com)
- *               Cornelia Huck (cornelia.huck@de.ibm.com)
+ *  Copyright IBM Corp. 2002, 2009
+ *
+ *  Author(s): Arnd Bergmann (arndb@de.ibm.com)
+ *	       Cornelia Huck (cornelia.huck@de.ibm.com)
  */
 #include <linux/module.h>
 #include <linux/errno.h>
@@ -501,6 +500,74 @@
 		gdrv->shutdown(gdev);
 }
 
+static int ccwgroup_pm_prepare(struct device *dev)
+{
+	struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
+	struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
+
+	/* Fail while device is being set online/offline. */
+	if (atomic_read(&gdev->onoff))
+		return -EAGAIN;
+
+	if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE)
+		return 0;
+
+	return gdrv->prepare ? gdrv->prepare(gdev) : 0;
+}
+
+static void ccwgroup_pm_complete(struct device *dev)
+{
+	struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
+	struct ccwgroup_driver *gdrv = to_ccwgroupdrv(dev->driver);
+
+	if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE)
+		return;
+
+	if (gdrv->complete)
+		gdrv->complete(gdev);
+}
+
+static int ccwgroup_pm_freeze(struct device *dev)
+{
+	struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
+	struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
+
+	if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE)
+		return 0;
+
+	return gdrv->freeze ? gdrv->freeze(gdev) : 0;
+}
+
+static int ccwgroup_pm_thaw(struct device *dev)
+{
+	struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
+	struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
+
+	if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE)
+		return 0;
+
+	return gdrv->thaw ? gdrv->thaw(gdev) : 0;
+}
+
+static int ccwgroup_pm_restore(struct device *dev)
+{
+	struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
+	struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
+
+	if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE)
+		return 0;
+
+	return gdrv->restore ? gdrv->restore(gdev) : 0;
+}
+
+static struct dev_pm_ops ccwgroup_pm_ops = {
+	.prepare = ccwgroup_pm_prepare,
+	.complete = ccwgroup_pm_complete,
+	.freeze = ccwgroup_pm_freeze,
+	.thaw = ccwgroup_pm_thaw,
+	.restore = ccwgroup_pm_restore,
+};
+
 static struct bus_type ccwgroup_bus_type = {
 	.name   = "ccwgroup",
 	.match  = ccwgroup_bus_match,
@@ -508,6 +575,7 @@
 	.probe  = ccwgroup_probe,
 	.remove = ccwgroup_remove,
 	.shutdown = ccwgroup_shutdown,
+	.pm = &ccwgroup_pm_ops,
 };
 
 
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 883f16f..1ecd3e5 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -549,8 +549,7 @@
 	return ret;
 }
 
-static int
-__chsc_do_secm(struct channel_subsystem *css, int enable, void *page)
+int __chsc_do_secm(struct channel_subsystem *css, int enable, void *page)
 {
 	struct {
 		struct chsc_header request;
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index ba59bce..425e8f8 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -90,6 +90,7 @@
 extern int chsc_enable_facility(int);
 struct channel_subsystem;
 extern int chsc_secm(struct channel_subsystem *, int);
+int __chsc_do_secm(struct channel_subsystem *css, int enable, void *page);
 
 int chsc_chp_vary(struct chp_id chpid, int on);
 int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt,
diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c
index 93eca17..cc5144b 100644
--- a/drivers/s390/cio/chsc_sch.c
+++ b/drivers/s390/cio/chsc_sch.c
@@ -1,7 +1,8 @@
 /*
  * Driver for s390 chsc subchannels
  *
- * Copyright IBM Corp. 2008
+ * Copyright IBM Corp. 2008, 2009
+ *
  * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
  *
  */
@@ -112,6 +113,31 @@
 	cio_disable_subchannel(sch);
 }
 
+static int chsc_subchannel_prepare(struct subchannel *sch)
+{
+	int cc;
+	struct schib schib;
+	/*
+	 * Don't allow suspend while the subchannel is not idle
+	 * since we don't have a way to clear the subchannel and
+	 * cannot disable it with a request running.
+	 */
+	cc = stsch(sch->schid, &schib);
+	if (!cc && scsw_stctl(&schib.scsw))
+		return -EAGAIN;
+	return 0;
+}
+
+static int chsc_subchannel_freeze(struct subchannel *sch)
+{
+	return cio_disable_subchannel(sch);
+}
+
+static int chsc_subchannel_restore(struct subchannel *sch)
+{
+	return cio_enable_subchannel(sch, (u32)(unsigned long)sch);
+}
+
 static struct css_device_id chsc_subchannel_ids[] = {
 	{ .match_flags = 0x1, .type =SUBCHANNEL_TYPE_CHSC, },
 	{ /* end of list */ },
@@ -125,6 +151,10 @@
 	.probe = chsc_subchannel_probe,
 	.remove = chsc_subchannel_remove,
 	.shutdown = chsc_subchannel_shutdown,
+	.prepare = chsc_subchannel_prepare,
+	.freeze = chsc_subchannel_freeze,
+	.thaw = chsc_subchannel_restore,
+	.restore = chsc_subchannel_restore,
 	.name = "chsc_subchannel",
 };
 
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c
index dc98b2c..30f5161 100644
--- a/drivers/s390/cio/cmf.c
+++ b/drivers/s390/cio/cmf.c
@@ -1204,6 +1204,11 @@
 
 DEVICE_ATTR(cmb_enable, 0644, cmb_enable_show, cmb_enable_store);
 
+int ccw_set_cmf(struct ccw_device *cdev, int enable)
+{
+	return cmbops->set(cdev, enable ? 2 : 0);
+}
+
 /**
  * enable_cmf() - switch on the channel measurement for a specific device
  *  @cdev:	The ccw device to be enabled
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 0085d89..85d43c6 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -1,10 +1,10 @@
 /*
- *  drivers/s390/cio/css.c
- *  driver for channel subsystem
+ * driver for channel subsystem
  *
- *    Copyright IBM Corp. 2002,2008
- *    Author(s): Arnd Bergmann (arndb@de.ibm.com)
- *		 Cornelia Huck (cornelia.huck@de.ibm.com)
+ * Copyright IBM Corp. 2002, 2009
+ *
+ * Author(s): Arnd Bergmann (arndb@de.ibm.com)
+ *	      Cornelia Huck (cornelia.huck@de.ibm.com)
  */
 
 #define KMSG_COMPONENT "cio"
@@ -17,6 +17,7 @@
 #include <linux/errno.h>
 #include <linux/list.h>
 #include <linux/reboot.h>
+#include <linux/suspend.h>
 #include <asm/isc.h>
 #include <asm/crw.h>
 
@@ -780,6 +781,79 @@
 };
 
 /*
+ * Since the css devices are neither on a bus nor have a class
+ * nor have a special device type, we cannot stop/restart channel
+ * path measurements via the normal suspend/resume callbacks, but have
+ * to use notifiers.
+ */
+static int css_power_event(struct notifier_block *this, unsigned long event,
+			   void *ptr)
+{
+	void *secm_area;
+	int ret, i;
+
+	switch (event) {
+	case PM_HIBERNATION_PREPARE:
+	case PM_SUSPEND_PREPARE:
+		ret = NOTIFY_DONE;
+		for (i = 0; i <= __MAX_CSSID; i++) {
+			struct channel_subsystem *css;
+
+			css = channel_subsystems[i];
+			mutex_lock(&css->mutex);
+			if (!css->cm_enabled) {
+				mutex_unlock(&css->mutex);
+				continue;
+			}
+			secm_area = (void *)get_zeroed_page(GFP_KERNEL |
+							    GFP_DMA);
+			if (secm_area) {
+				if (__chsc_do_secm(css, 0, secm_area))
+					ret = NOTIFY_BAD;
+				free_page((unsigned long)secm_area);
+			} else
+				ret = NOTIFY_BAD;
+
+			mutex_unlock(&css->mutex);
+		}
+		break;
+	case PM_POST_HIBERNATION:
+	case PM_POST_SUSPEND:
+		ret = NOTIFY_DONE;
+		for (i = 0; i <= __MAX_CSSID; i++) {
+			struct channel_subsystem *css;
+
+			css = channel_subsystems[i];
+			mutex_lock(&css->mutex);
+			if (!css->cm_enabled) {
+				mutex_unlock(&css->mutex);
+				continue;
+			}
+			secm_area = (void *)get_zeroed_page(GFP_KERNEL |
+							    GFP_DMA);
+			if (secm_area) {
+				if (__chsc_do_secm(css, 1, secm_area))
+					ret = NOTIFY_BAD;
+				free_page((unsigned long)secm_area);
+			} else
+				ret = NOTIFY_BAD;
+
+			mutex_unlock(&css->mutex);
+		}
+		/* search for subchannels, which appeared during hibernation */
+		css_schedule_reprobe();
+		break;
+	default:
+		ret = NOTIFY_DONE;
+	}
+	return ret;
+
+}
+static struct notifier_block css_power_notifier = {
+	.notifier_call = css_power_event,
+};
+
+/*
  * Now that the driver core is running, we can setup our channel subsystem.
  * The struct subchannel's are created during probing (except for the
  * static console subchannel).
@@ -852,6 +926,11 @@
 	ret = register_reboot_notifier(&css_reboot_notifier);
 	if (ret)
 		goto out_unregister;
+	ret = register_pm_notifier(&css_power_notifier);
+	if (ret) {
+		unregister_reboot_notifier(&css_reboot_notifier);
+		goto out_unregister;
+	}
 	css_init_done = 1;
 
 	/* Enable default isc for I/O subchannels. */
@@ -953,6 +1032,73 @@
 	return ret;
 }
 
+static int css_pm_prepare(struct device *dev)
+{
+	struct subchannel *sch = to_subchannel(dev);
+	struct css_driver *drv;
+
+	if (mutex_is_locked(&sch->reg_mutex))
+		return -EAGAIN;
+	if (!sch->dev.driver)
+		return 0;
+	drv = to_cssdriver(sch->dev.driver);
+	/* Notify drivers that they may not register children. */
+	return drv->prepare ? drv->prepare(sch) : 0;
+}
+
+static void css_pm_complete(struct device *dev)
+{
+	struct subchannel *sch = to_subchannel(dev);
+	struct css_driver *drv;
+
+	if (!sch->dev.driver)
+		return;
+	drv = to_cssdriver(sch->dev.driver);
+	if (drv->complete)
+		drv->complete(sch);
+}
+
+static int css_pm_freeze(struct device *dev)
+{
+	struct subchannel *sch = to_subchannel(dev);
+	struct css_driver *drv;
+
+	if (!sch->dev.driver)
+		return 0;
+	drv = to_cssdriver(sch->dev.driver);
+	return drv->freeze ? drv->freeze(sch) : 0;
+}
+
+static int css_pm_thaw(struct device *dev)
+{
+	struct subchannel *sch = to_subchannel(dev);
+	struct css_driver *drv;
+
+	if (!sch->dev.driver)
+		return 0;
+	drv = to_cssdriver(sch->dev.driver);
+	return drv->thaw ? drv->thaw(sch) : 0;
+}
+
+static int css_pm_restore(struct device *dev)
+{
+	struct subchannel *sch = to_subchannel(dev);
+	struct css_driver *drv;
+
+	if (!sch->dev.driver)
+		return 0;
+	drv = to_cssdriver(sch->dev.driver);
+	return drv->restore ? drv->restore(sch) : 0;
+}
+
+static struct dev_pm_ops css_pm_ops = {
+	.prepare = css_pm_prepare,
+	.complete = css_pm_complete,
+	.freeze = css_pm_freeze,
+	.thaw = css_pm_thaw,
+	.restore = css_pm_restore,
+};
+
 struct bus_type css_bus_type = {
 	.name     = "css",
 	.match    = css_bus_match,
@@ -960,6 +1106,7 @@
 	.remove   = css_remove,
 	.shutdown = css_shutdown,
 	.uevent   = css_uevent,
+	.pm = &css_pm_ops,
 };
 
 /**
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index 57ebf12..9763eee 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -70,6 +70,11 @@
  * @probe: function called on probe
  * @remove: function called on remove
  * @shutdown: called at device shutdown
+ * @prepare: prepare for pm state transition
+ * @complete: undo work done in @prepare
+ * @freeze: callback for freezing during hibernation snapshotting
+ * @thaw: undo work done in @freeze
+ * @restore: callback for restoring after hibernation
  * @name: name of the device driver
  */
 struct css_driver {
@@ -82,6 +87,11 @@
 	int (*probe)(struct subchannel *);
 	int (*remove)(struct subchannel *);
 	void (*shutdown)(struct subchannel *);
+	int (*prepare) (struct subchannel *);
+	void (*complete) (struct subchannel *);
+	int (*freeze)(struct subchannel *);
+	int (*thaw) (struct subchannel *);
+	int (*restore)(struct subchannel *);
 	const char *name;
 };
 
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 35441fa..3c57c1a 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -138,6 +138,19 @@
 };
 MODULE_DEVICE_TABLE(css, io_subchannel_ids);
 
+static int io_subchannel_prepare(struct subchannel *sch)
+{
+	struct ccw_device *cdev;
+	/*
+	 * Don't allow suspend while a ccw device registration
+	 * is still outstanding.
+	 */
+	cdev = sch_get_cdev(sch);
+	if (cdev && !device_is_registered(&cdev->dev))
+		return -EAGAIN;
+	return 0;
+}
+
 static struct css_driver io_subchannel_driver = {
 	.owner = THIS_MODULE,
 	.subchannel_type = io_subchannel_ids,
@@ -148,6 +161,7 @@
 	.probe = io_subchannel_probe,
 	.remove = io_subchannel_remove,
 	.shutdown = io_subchannel_shutdown,
+	.prepare = io_subchannel_prepare,
 };
 
 struct workqueue_struct *ccw_device_work;
@@ -1775,6 +1789,15 @@
 	return &console_cdev;
 }
 
+static int ccw_device_pm_restore(struct device *dev);
+
+int ccw_device_force_console(void)
+{
+	if (!console_cdev_in_use)
+		return -ENODEV;
+	return ccw_device_pm_restore(&console_cdev.dev);
+}
+EXPORT_SYMBOL_GPL(ccw_device_force_console);
 
 const char *cio_get_console_cdev_name(struct subchannel *sch)
 {
@@ -1895,6 +1918,242 @@
 	disable_cmf(cdev);
 }
 
+static int ccw_device_pm_prepare(struct device *dev)
+{
+	struct ccw_device *cdev = to_ccwdev(dev);
+
+	if (work_pending(&cdev->private->kick_work))
+		return -EAGAIN;
+	/* Fail while device is being set online/offline. */
+	if (atomic_read(&cdev->private->onoff))
+		return -EAGAIN;
+
+	if (cdev->online && cdev->drv && cdev->drv->prepare)
+		return cdev->drv->prepare(cdev);
+
+	return 0;
+}
+
+static void ccw_device_pm_complete(struct device *dev)
+{
+	struct ccw_device *cdev = to_ccwdev(dev);
+
+	if (cdev->online && cdev->drv && cdev->drv->complete)
+		cdev->drv->complete(cdev);
+}
+
+static int ccw_device_pm_freeze(struct device *dev)
+{
+	struct ccw_device *cdev = to_ccwdev(dev);
+	struct subchannel *sch = to_subchannel(cdev->dev.parent);
+	int ret, cm_enabled;
+
+	/* Fail suspend while device is in transistional state. */
+	if (!dev_fsm_final_state(cdev))
+		return -EAGAIN;
+	if (!cdev->online)
+		return 0;
+	if (cdev->drv && cdev->drv->freeze) {
+		ret = cdev->drv->freeze(cdev);
+		if (ret)
+			return ret;
+	}
+
+	spin_lock_irq(sch->lock);
+	cm_enabled = cdev->private->cmb != NULL;
+	spin_unlock_irq(sch->lock);
+	if (cm_enabled) {
+		/* Don't have the css write on memory. */
+		ret = ccw_set_cmf(cdev, 0);
+		if (ret)
+			return ret;
+	}
+	/* From here on, disallow device driver I/O. */
+	spin_lock_irq(sch->lock);
+	ret = cio_disable_subchannel(sch);
+	spin_unlock_irq(sch->lock);
+
+	return ret;
+}
+
+static int ccw_device_pm_thaw(struct device *dev)
+{
+	struct ccw_device *cdev = to_ccwdev(dev);
+	struct subchannel *sch = to_subchannel(cdev->dev.parent);
+	int ret, cm_enabled;
+
+	if (!cdev->online)
+		return 0;
+
+	spin_lock_irq(sch->lock);
+	/* Allow device driver I/O again. */
+	ret = cio_enable_subchannel(sch, (u32)(addr_t)sch);
+	cm_enabled = cdev->private->cmb != NULL;
+	spin_unlock_irq(sch->lock);
+	if (ret)
+		return ret;
+
+	if (cm_enabled) {
+		ret = ccw_set_cmf(cdev, 1);
+		if (ret)
+			return ret;
+	}
+
+	if (cdev->drv && cdev->drv->thaw)
+		ret = cdev->drv->thaw(cdev);
+
+	return ret;
+}
+
+static void __ccw_device_pm_restore(struct ccw_device *cdev)
+{
+	struct subchannel *sch = to_subchannel(cdev->dev.parent);
+	int ret;
+
+	if (cio_is_console(sch->schid))
+		goto out;
+	/*
+	 * While we were sleeping, devices may have gone or become
+	 * available again. Kick re-detection.
+	 */
+	spin_lock_irq(sch->lock);
+	cdev->private->flags.resuming = 1;
+	ret = ccw_device_recognition(cdev);
+	spin_unlock_irq(sch->lock);
+	if (ret) {
+		CIO_MSG_EVENT(0, "Couldn't start recognition for device "
+			      "%s (ret=%d)\n", dev_name(&cdev->dev), ret);
+		spin_lock_irq(sch->lock);
+		cdev->private->state = DEV_STATE_DISCONNECTED;
+		spin_unlock_irq(sch->lock);
+		/* notify driver after the resume cb */
+		goto out;
+	}
+	wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev) ||
+		   cdev->private->state == DEV_STATE_DISCONNECTED);
+
+out:
+	cdev->private->flags.resuming = 0;
+}
+
+static int resume_handle_boxed(struct ccw_device *cdev)
+{
+	cdev->private->state = DEV_STATE_BOXED;
+	if (ccw_device_notify(cdev, CIO_BOXED))
+		return 0;
+	ccw_device_schedule_sch_unregister(cdev);
+	return -ENODEV;
+}
+
+static int resume_handle_disc(struct ccw_device *cdev)
+{
+	cdev->private->state = DEV_STATE_DISCONNECTED;
+	if (ccw_device_notify(cdev, CIO_GONE))
+		return 0;
+	ccw_device_schedule_sch_unregister(cdev);
+	return -ENODEV;
+}
+
+static int ccw_device_pm_restore(struct device *dev)
+{
+	struct ccw_device *cdev = to_ccwdev(dev);
+	struct subchannel *sch = to_subchannel(cdev->dev.parent);
+	int ret = 0, cm_enabled;
+
+	__ccw_device_pm_restore(cdev);
+	spin_lock_irq(sch->lock);
+	if (cio_is_console(sch->schid)) {
+		cio_enable_subchannel(sch, (u32)(addr_t)sch);
+		spin_unlock_irq(sch->lock);
+		goto out_restore;
+	}
+	cdev->private->flags.donotify = 0;
+	/* check recognition results */
+	switch (cdev->private->state) {
+	case DEV_STATE_OFFLINE:
+		break;
+	case DEV_STATE_BOXED:
+		ret = resume_handle_boxed(cdev);
+		spin_unlock_irq(sch->lock);
+		if (ret)
+			goto out;
+		goto out_restore;
+	case DEV_STATE_DISCONNECTED:
+		goto out_disc_unlock;
+	default:
+		goto out_unreg_unlock;
+	}
+	/* check if the device id has changed */
+	if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) {
+		CIO_MSG_EVENT(0, "resume: sch %s: failed (devno changed from "
+			      "%04x to %04x)\n", dev_name(&sch->dev),
+			      cdev->private->dev_id.devno,
+			      sch->schib.pmcw.dev);
+		goto out_unreg_unlock;
+	}
+	/* check if the device type has changed */
+	if (!ccw_device_test_sense_data(cdev)) {
+		ccw_device_update_sense_data(cdev);
+		PREPARE_WORK(&cdev->private->kick_work,
+			     ccw_device_do_unbind_bind);
+		queue_work(ccw_device_work, &cdev->private->kick_work);
+		ret = -ENODEV;
+		goto out_unlock;
+	}
+	if (!cdev->online) {
+		ret = 0;
+		goto out_unlock;
+	}
+	ret = ccw_device_online(cdev);
+	if (ret)
+		goto out_disc_unlock;
+
+	cm_enabled = cdev->private->cmb != NULL;
+	spin_unlock_irq(sch->lock);
+
+	wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
+	if (cdev->private->state != DEV_STATE_ONLINE) {
+		spin_lock_irq(sch->lock);
+		goto out_disc_unlock;
+	}
+	if (cm_enabled) {
+		ret = ccw_set_cmf(cdev, 1);
+		if (ret) {
+			CIO_MSG_EVENT(2, "resume: cdev %s: cmf failed "
+				      "(rc=%d)\n", dev_name(&cdev->dev), ret);
+			ret = 0;
+		}
+	}
+
+out_restore:
+	if (cdev->online && cdev->drv && cdev->drv->restore)
+		ret = cdev->drv->restore(cdev);
+out:
+	return ret;
+
+out_disc_unlock:
+	ret = resume_handle_disc(cdev);
+	spin_unlock_irq(sch->lock);
+	if (ret)
+		return ret;
+	goto out_restore;
+
+out_unreg_unlock:
+	ccw_device_schedule_sch_unregister(cdev);
+	ret = -ENODEV;
+out_unlock:
+	spin_unlock_irq(sch->lock);
+	return ret;
+}
+
+static struct dev_pm_ops ccw_pm_ops = {
+	.prepare = ccw_device_pm_prepare,
+	.complete = ccw_device_pm_complete,
+	.freeze = ccw_device_pm_freeze,
+	.thaw = ccw_device_pm_thaw,
+	.restore = ccw_device_pm_restore,
+};
+
 struct bus_type ccw_bus_type = {
 	.name   = "ccw",
 	.match  = ccw_bus_match,
@@ -1902,6 +2161,7 @@
 	.probe  = ccw_device_probe,
 	.remove = ccw_device_remove,
 	.shutdown = ccw_device_shutdown,
+	.pm = &ccw_pm_ops,
 };
 
 /**
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
index f1cbbd9..e397510 100644
--- a/drivers/s390/cio/device.h
+++ b/drivers/s390/cio/device.h
@@ -87,6 +87,8 @@
 int ccw_device_recognition(struct ccw_device *);
 int ccw_device_online(struct ccw_device *);
 int ccw_device_offline(struct ccw_device *);
+void ccw_device_update_sense_data(struct ccw_device *);
+int ccw_device_test_sense_data(struct ccw_device *);
 void ccw_device_schedule_sch_unregister(struct ccw_device *);
 int ccw_purge_blacklisted(void);
 
@@ -133,5 +135,6 @@
 void retry_set_schib(struct ccw_device *cdev);
 void cmf_retry_copy_block(struct ccw_device *);
 int cmf_reenable(struct ccw_device *);
+int ccw_set_cmf(struct ccw_device *cdev, int enable);
 extern struct device_attribute dev_attr_cmb_enable;
 #endif
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index e460492..3db88c5 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -177,29 +177,21 @@
 	panic("Can't stop i/o on subchannel.\n");
 }
 
-static int
-ccw_device_handle_oper(struct ccw_device *cdev)
+void ccw_device_update_sense_data(struct ccw_device *cdev)
 {
-	struct subchannel *sch;
+	memset(&cdev->id, 0, sizeof(cdev->id));
+	cdev->id.cu_type   = cdev->private->senseid.cu_type;
+	cdev->id.cu_model  = cdev->private->senseid.cu_model;
+	cdev->id.dev_type  = cdev->private->senseid.dev_type;
+	cdev->id.dev_model = cdev->private->senseid.dev_model;
+}
 
-	sch = to_subchannel(cdev->dev.parent);
-	cdev->private->flags.recog_done = 1;
-	/*
-	 * Check if cu type and device type still match. If
-	 * not, it is certainly another device and we have to
-	 * de- and re-register.
-	 */
-	if (cdev->id.cu_type != cdev->private->senseid.cu_type ||
-	    cdev->id.cu_model != cdev->private->senseid.cu_model ||
-	    cdev->id.dev_type != cdev->private->senseid.dev_type ||
-	    cdev->id.dev_model != cdev->private->senseid.dev_model) {
-		PREPARE_WORK(&cdev->private->kick_work,
-			     ccw_device_do_unbind_bind);
-		queue_work(ccw_device_work, &cdev->private->kick_work);
-		return 0;
-	}
-	cdev->private->flags.donotify = 1;
-	return 1;
+int ccw_device_test_sense_data(struct ccw_device *cdev)
+{
+	return cdev->id.cu_type == cdev->private->senseid.cu_type &&
+		cdev->id.cu_model == cdev->private->senseid.cu_model &&
+		cdev->id.dev_type == cdev->private->senseid.dev_type &&
+		cdev->id.dev_model == cdev->private->senseid.dev_model;
 }
 
 /*
@@ -233,7 +225,7 @@
 ccw_device_recog_done(struct ccw_device *cdev, int state)
 {
 	struct subchannel *sch;
-	int notify, old_lpm, same_dev;
+	int old_lpm;
 
 	sch = to_subchannel(cdev->dev.parent);
 
@@ -263,8 +255,12 @@
 		wake_up(&cdev->private->wait_q);
 		return;
 	}
-	notify = 0;
-	same_dev = 0; /* Keep the compiler quiet... */
+	if (cdev->private->flags.resuming) {
+		cdev->private->state = state;
+		cdev->private->flags.recog_done = 1;
+		wake_up(&cdev->private->wait_q);
+		return;
+	}
 	switch (state) {
 	case DEV_STATE_NOT_OPER:
 		CIO_MSG_EVENT(2, "SenseID : unknown device %04x on "
@@ -273,34 +269,31 @@
 			      sch->schid.ssid, sch->schid.sch_no);
 		break;
 	case DEV_STATE_OFFLINE:
-		if (cdev->online) {
-			same_dev = ccw_device_handle_oper(cdev);
-			notify = 1;
+		if (!cdev->online) {
+			ccw_device_update_sense_data(cdev);
+			/* Issue device info message. */
+			CIO_MSG_EVENT(4, "SenseID : device 0.%x.%04x reports: "
+				      "CU  Type/Mod = %04X/%02X, Dev Type/Mod "
+				      "= %04X/%02X\n",
+				      cdev->private->dev_id.ssid,
+				      cdev->private->dev_id.devno,
+				      cdev->id.cu_type, cdev->id.cu_model,
+				      cdev->id.dev_type, cdev->id.dev_model);
+			break;
 		}
-		/* fill out sense information */
-		memset(&cdev->id, 0, sizeof(cdev->id));
-		cdev->id.cu_type   = cdev->private->senseid.cu_type;
-		cdev->id.cu_model  = cdev->private->senseid.cu_model;
-		cdev->id.dev_type  = cdev->private->senseid.dev_type;
-		cdev->id.dev_model = cdev->private->senseid.dev_model;
-		if (notify) {
-			cdev->private->state = DEV_STATE_OFFLINE;
-			if (same_dev) {
-				/* Get device online again. */
-				ccw_device_online(cdev);
-				wake_up(&cdev->private->wait_q);
-			}
-			return;
+		cdev->private->state = DEV_STATE_OFFLINE;
+		cdev->private->flags.recog_done = 1;
+		if (ccw_device_test_sense_data(cdev)) {
+			cdev->private->flags.donotify = 1;
+			ccw_device_online(cdev);
+			wake_up(&cdev->private->wait_q);
+		} else {
+			ccw_device_update_sense_data(cdev);
+			PREPARE_WORK(&cdev->private->kick_work,
+				     ccw_device_do_unbind_bind);
+			queue_work(ccw_device_work, &cdev->private->kick_work);
 		}
-		/* Issue device info message. */
-		CIO_MSG_EVENT(4, "SenseID : device 0.%x.%04x reports: "
-			      "CU  Type/Mod = %04X/%02X, Dev Type/Mod = "
-			      "%04X/%02X\n",
-			      cdev->private->dev_id.ssid,
-			      cdev->private->dev_id.devno,
-			      cdev->id.cu_type, cdev->id.cu_model,
-			      cdev->id.dev_type, cdev->id.dev_model);
-		break;
+		return;
 	case DEV_STATE_BOXED:
 		CIO_MSG_EVENT(0, "SenseID : boxed device %04x on "
 			      " subchannel 0.%x.%04x\n",
@@ -502,9 +495,6 @@
 	struct subchannel *sch;
 	int ret;
 
-	if ((cdev->private->state != DEV_STATE_NOT_OPER) &&
-	    (cdev->private->state != DEV_STATE_BOXED))
-		return -EINVAL;
 	sch = to_subchannel(cdev->dev.parent);
 	ret = cio_enable_subchannel(sch, (u32)(addr_t)sch);
 	if (ret != 0)
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index bf0a24a..2d0efee 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -1,10 +1,8 @@
 /*
- *  drivers/s390/cio/device_ops.c
+ * Copyright IBM Corp. 2002, 2009
  *
- *    Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
- *			 IBM Corporation
- *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
- *               Cornelia Huck (cornelia.huck@de.ibm.com)
+ * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
+ *	      Cornelia Huck (cornelia.huck@de.ibm.com)
  */
 #include <linux/module.h>
 #include <linux/init.h>
@@ -116,12 +114,15 @@
 
 	if (!cdev || !cdev->dev.parent)
 		return -ENODEV;
+	sch = to_subchannel(cdev->dev.parent);
+	if (!sch->schib.pmcw.ena)
+		return -EINVAL;
 	if (cdev->private->state == DEV_STATE_NOT_OPER)
 		return -ENODEV;
 	if (cdev->private->state != DEV_STATE_ONLINE &&
 	    cdev->private->state != DEV_STATE_W4SENSE)
 		return -EINVAL;
-	sch = to_subchannel(cdev->dev.parent);
+
 	ret = cio_clear(sch);
 	if (ret == 0)
 		cdev->private->intparm = intparm;
@@ -162,6 +163,8 @@
 	if (!cdev || !cdev->dev.parent)
 		return -ENODEV;
 	sch = to_subchannel(cdev->dev.parent);
+	if (!sch->schib.pmcw.ena)
+		return -EINVAL;
 	if (cdev->private->state == DEV_STATE_NOT_OPER)
 		return -ENODEV;
 	if (cdev->private->state == DEV_STATE_VERIFY ||
@@ -337,12 +340,15 @@
 
 	if (!cdev || !cdev->dev.parent)
 		return -ENODEV;
+	sch = to_subchannel(cdev->dev.parent);
+	if (!sch->schib.pmcw.ena)
+		return -EINVAL;
 	if (cdev->private->state == DEV_STATE_NOT_OPER)
 		return -ENODEV;
 	if (cdev->private->state != DEV_STATE_ONLINE &&
 	    cdev->private->state != DEV_STATE_W4SENSE)
 		return -EINVAL;
-	sch = to_subchannel(cdev->dev.parent);
+
 	ret = cio_halt(sch);
 	if (ret == 0)
 		cdev->private->intparm = intparm;
@@ -369,6 +375,8 @@
 	if (!cdev || !cdev->dev.parent)
 		return -ENODEV;
 	sch = to_subchannel(cdev->dev.parent);
+	if (!sch->schib.pmcw.ena)
+		return -EINVAL;
 	if (cdev->private->state == DEV_STATE_NOT_OPER)
 		return -ENODEV;
 	if (cdev->private->state != DEV_STATE_ONLINE ||
@@ -580,6 +588,8 @@
 	int rc;
 
 	sch = to_subchannel(cdev->dev.parent);
+	if (!sch->schib.pmcw.ena)
+		return -EINVAL;
 	if (cdev->private->state != DEV_STATE_ONLINE)
 		return -EIO;
 	/* Adjust requested path mask to excluded varied off paths. */
@@ -669,6 +679,8 @@
 {
 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
 
+	if (!sch->schib.pmcw.ena)
+		return -EINVAL;
 	if (cdev->private->state != DEV_STATE_ONLINE)
 		return -EIO;
 	if (!scsw_is_tm(&sch->schib.scsw) ||
diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h
index c4f3e7c..0b8f381 100644
--- a/drivers/s390/cio/io_sch.h
+++ b/drivers/s390/cio/io_sch.h
@@ -107,6 +107,7 @@
 		unsigned int recog_done:1;  /* dev. recog. complete */
 		unsigned int fake_irb:1;    /* deliver faked irb */
 		unsigned int intretry:1;    /* retry internal operation */
+		unsigned int resuming:1;    /* recognition while resume */
 	} __attribute__((packed)) flags;
 	unsigned long intparm;	/* user interruption parameter */
 	struct qdio_irq *qdio_data;
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c
index 7b6f46d..f370f8d 100644
--- a/drivers/s390/net/claw.c
+++ b/drivers/s390/net/claw.c
@@ -3,12 +3,12 @@
  *    ESCON CLAW network driver
  *
  *  Linux for zSeries version
- *    Copyright (C) 2002,2005 IBM Corporation
+ *    Copyright IBM Corp. 2002, 2009
  *  Author(s) Original code written by:
- *              Kazuo Iimura (iimura@jp.ibm.com)
+ *		Kazuo Iimura <iimura@jp.ibm.com>
  *   	      Rewritten by
- *              Andy Richter (richtera@us.ibm.com)
- *              Marc Price (mwprice@us.ibm.com)
+ *		Andy Richter <richtera@us.ibm.com>
+ *		Marc Price <mwprice@us.ibm.com>
  *
  *    sysfs parms:
  *   group x.x.rrrr,x.x.wwww
@@ -253,6 +253,11 @@
 /* Functions for unpack reads   */
 static void unpack_read(struct net_device *dev);
 
+static int claw_pm_prepare(struct ccwgroup_device *gdev)
+{
+	return -EPERM;
+}
+
 /* ccwgroup table  */
 
 static struct ccwgroup_driver claw_group_driver = {
@@ -264,6 +269,7 @@
         .remove      = claw_remove_device,
         .set_online  = claw_new_device,
         .set_offline = claw_shutdown_device,
+	.prepare     = claw_pm_prepare,
 };
 
 /*
@@ -284,7 +290,7 @@
 	if (!get_device(&cgdev->dev))
 		return -ENODEV;
 	privptr = kzalloc(sizeof(struct claw_privbk), GFP_KERNEL);
-	cgdev->dev.driver_data = privptr;
+	dev_set_drvdata(&cgdev->dev, privptr);
 	if (privptr == NULL) {
 		probe_error(cgdev);
 		put_device(&cgdev->dev);
@@ -591,14 +597,14 @@
 
 	CLAW_DBF_TEXT(4, trace, "clawirq");
         /* Bypass all 'unsolicited interrupts' */
-	if (!cdev->dev.driver_data) {
+	privptr = dev_get_drvdata(&cdev->dev);
+	if (!privptr) {
 		dev_warn(&cdev->dev, "An uninitialized CLAW device received an"
 			" IRQ, c-%02x d-%02x\n",
 			irb->scsw.cmd.cstat, irb->scsw.cmd.dstat);
 		CLAW_DBF_TEXT(2, trace, "badirq");
                 return;
         }
-	privptr = (struct claw_privbk *)cdev->dev.driver_data;
 
 	/* Try to extract channel from driver data. */
 	if (privptr->channel[READ].cdev == cdev)
@@ -1980,9 +1986,9 @@
 	struct claw_privbk *privptr;
 
 	CLAW_DBF_TEXT(4, trace, "proberr");
-	privptr = (struct claw_privbk *) cgdev->dev.driver_data;
+	privptr = dev_get_drvdata(&cgdev->dev);
 	if (privptr != NULL) {
-		cgdev->dev.driver_data = NULL;
+		dev_set_drvdata(&cgdev->dev, NULL);
 		kfree(privptr->p_env);
 		kfree(privptr->p_mtc_envelope);
 		kfree(privptr);
@@ -2911,9 +2917,9 @@
 	dev_info(&cgdev->dev, "add for %s\n",
 		 dev_name(&cgdev->cdev[READ]->dev));
 	CLAW_DBF_TEXT(2, setup, "new_dev");
-	privptr = cgdev->dev.driver_data;
-	cgdev->cdev[READ]->dev.driver_data = privptr;
-	cgdev->cdev[WRITE]->dev.driver_data = privptr;
+	privptr = dev_get_drvdata(&cgdev->dev);
+	dev_set_drvdata(&cgdev->cdev[READ]->dev, privptr);
+	dev_set_drvdata(&cgdev->cdev[WRITE]->dev, privptr);
 	if (!privptr)
 		return -ENODEV;
 	p_env = privptr->p_env;
@@ -2950,9 +2956,9 @@
 		goto out;
 	}
 	dev->ml_priv = privptr;
-	cgdev->dev.driver_data = privptr;
-        cgdev->cdev[READ]->dev.driver_data = privptr;
-        cgdev->cdev[WRITE]->dev.driver_data = privptr;
+	dev_set_drvdata(&cgdev->dev, privptr);
+	dev_set_drvdata(&cgdev->cdev[READ]->dev, privptr);
+	dev_set_drvdata(&cgdev->cdev[WRITE]->dev, privptr);
 	/* sysfs magic */
         SET_NETDEV_DEV(dev, &cgdev->dev);
 	if (register_netdev(dev) != 0) {
@@ -3018,7 +3024,7 @@
 	int	ret;
 
 	CLAW_DBF_TEXT_(2, setup, "%s", dev_name(&cgdev->dev));
-	priv = cgdev->dev.driver_data;
+	priv = dev_get_drvdata(&cgdev->dev);
 	if (!priv)
 		return -ENODEV;
 	ndev = priv->channel[READ].ndev;
@@ -3048,7 +3054,7 @@
 
 	BUG_ON(!cgdev);
 	CLAW_DBF_TEXT_(2, setup, "%s", dev_name(&cgdev->dev));
-	priv = cgdev->dev.driver_data;
+	priv = dev_get_drvdata(&cgdev->dev);
 	BUG_ON(!priv);
 	dev_info(&cgdev->dev, " will be removed.\n");
 	if (cgdev->state == CCWGROUP_ONLINE)
@@ -3063,9 +3069,9 @@
 	kfree(priv->channel[1].irb);
 	priv->channel[1].irb=NULL;
 	kfree(priv);
-	cgdev->dev.driver_data=NULL;
-	cgdev->cdev[READ]->dev.driver_data = NULL;
-	cgdev->cdev[WRITE]->dev.driver_data = NULL;
+	dev_set_drvdata(&cgdev->dev, NULL);
+	dev_set_drvdata(&cgdev->cdev[READ]->dev, NULL);
+	dev_set_drvdata(&cgdev->cdev[WRITE]->dev, NULL);
 	put_device(&cgdev->dev);
 
 	return;
@@ -3081,7 +3087,7 @@
 	struct claw_privbk *priv;
 	struct claw_env *  p_env;
 
-	priv = dev->driver_data;
+	priv = dev_get_drvdata(dev);
 	if (!priv)
 		return -ENODEV;
 	p_env = priv->p_env;
@@ -3095,7 +3101,7 @@
 	struct claw_privbk *priv;
 	struct claw_env *  p_env;
 
-	priv = dev->driver_data;
+	priv = dev_get_drvdata(dev);
 	if (!priv)
 		return -ENODEV;
 	p_env = priv->p_env;
@@ -3119,7 +3125,7 @@
 	struct claw_privbk *priv;
 	struct claw_env *  p_env;
 
-	priv = dev->driver_data;
+	priv = dev_get_drvdata(dev);
 	if (!priv)
 		return -ENODEV;
 	p_env = priv->p_env;
@@ -3133,7 +3139,7 @@
 	struct claw_privbk *priv;
 	struct claw_env *  p_env;
 
-	priv = dev->driver_data;
+	priv = dev_get_drvdata(dev);
 	if (!priv)
 		return -ENODEV;
 	p_env = priv->p_env;
@@ -3157,7 +3163,7 @@
 	struct claw_privbk *priv;
 	struct claw_env *  p_env;
 
-	priv = dev->driver_data;
+	priv = dev_get_drvdata(dev);
 	if (!priv)
 		return -ENODEV;
 	p_env = priv->p_env;
@@ -3172,7 +3178,7 @@
 	struct claw_privbk *priv;
 	struct claw_env *  p_env;
 
-	priv = dev->driver_data;
+	priv = dev_get_drvdata(dev);
 	if (!priv)
 		return -ENODEV;
 	p_env = priv->p_env;
@@ -3206,7 +3212,7 @@
 	struct claw_privbk *priv;
 	struct claw_env * p_env;
 
-	priv = dev->driver_data;
+	priv = dev_get_drvdata(dev);
 	if (!priv)
 		return -ENODEV;
 	p_env = priv->p_env;
@@ -3221,7 +3227,7 @@
 	struct claw_env *  p_env;
 	int nnn,max;
 
-	priv = dev->driver_data;
+	priv = dev_get_drvdata(dev);
 	if (!priv)
 		return -ENODEV;
 	p_env = priv->p_env;
@@ -3248,7 +3254,7 @@
 	struct claw_privbk *priv;
 	struct claw_env *  p_env;
 
-	priv = dev->driver_data;
+	priv = dev_get_drvdata(dev);
 	if (!priv)
 		return -ENODEV;
 	p_env = priv->p_env;
@@ -3263,7 +3269,7 @@
 	struct claw_env *p_env;
 	int nnn,max;
 
-	priv = dev->driver_data;
+	priv = dev_get_drvdata(dev);
 	if (!priv)
 		return -ENODEV;
 	p_env = priv->p_env;
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index 54c4649..222e473 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -1,7 +1,7 @@
 /*
  * drivers/s390/net/ctcm_main.c
  *
- * Copyright IBM Corp. 2001, 2007
+ * Copyright IBM Corp. 2001, 2009
  * Author(s):
  *	Original CTC driver(s):
  *		Fritz Elfert (felfert@millenux.com)
@@ -1688,6 +1688,38 @@
 	put_device(&cgdev->dev);
 }
 
+static int ctcm_pm_suspend(struct ccwgroup_device *gdev)
+{
+	struct ctcm_priv *priv = dev_get_drvdata(&gdev->dev);
+
+	if (gdev->state == CCWGROUP_OFFLINE)
+		return 0;
+	netif_device_detach(priv->channel[READ]->netdev);
+	ctcm_close(priv->channel[READ]->netdev);
+	ccw_device_set_offline(gdev->cdev[1]);
+	ccw_device_set_offline(gdev->cdev[0]);
+	return 0;
+}
+
+static int ctcm_pm_resume(struct ccwgroup_device *gdev)
+{
+	struct ctcm_priv *priv = dev_get_drvdata(&gdev->dev);
+	int rc;
+
+	if (gdev->state == CCWGROUP_OFFLINE)
+		return 0;
+	rc = ccw_device_set_online(gdev->cdev[1]);
+	if (rc)
+		goto err_out;
+	rc = ccw_device_set_online(gdev->cdev[0]);
+	if (rc)
+		goto err_out;
+	ctcm_open(priv->channel[READ]->netdev);
+err_out:
+	netif_device_attach(priv->channel[READ]->netdev);
+	return rc;
+}
+
 static struct ccwgroup_driver ctcm_group_driver = {
 	.owner       = THIS_MODULE,
 	.name        = CTC_DRIVER_NAME,
@@ -1697,6 +1729,9 @@
 	.remove      = ctcm_remove_device,
 	.set_online  = ctcm_new_device,
 	.set_offline = ctcm_shutdown_device,
+	.freeze	     = ctcm_pm_suspend,
+	.thaw	     = ctcm_pm_resume,
+	.restore     = ctcm_pm_resume,
 };
 
 
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index a45bc24..8c67590 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -1,15 +1,12 @@
 /*
- *  linux/drivers/s390/net/lcs.c
- *
  *  Linux for S/390 Lan Channel Station Network Driver
  *
- *  Copyright (C)  1999-2001 IBM Deutschland Entwicklung GmbH,
- *			     IBM Corporation
- *    Author(s): Original Code written by
- *			  DJ Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
- *		 Rewritten by
- *			  Frank Pavlic (fpavlic@de.ibm.com) and
- *		 	  Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *  Copyright IBM Corp. 1999, 2009
+ *  Author(s): Original Code written by
+ *			DJ Barrow <djbarrow@de.ibm.com,barrow_dj@yahoo.com>
+ *	       Rewritten by
+ *			Frank Pavlic <fpavlic@de.ibm.com> and
+ *			Martin Schwidefsky <schwidefsky@de.ibm.com>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -1939,7 +1936,7 @@
 {
         struct lcs_card *card;
 
-	card = (struct lcs_card *)dev->driver_data;
+	card = dev_get_drvdata(dev);
 
         if (!card)
                 return 0;
@@ -1956,7 +1953,7 @@
         struct lcs_card *card;
         int value;
 
-	card = (struct lcs_card *)dev->driver_data;
+	card = dev_get_drvdata(dev);
 
         if (!card)
                 return 0;
@@ -1990,7 +1987,7 @@
 {
 	struct lcs_card *card;
 
-	card = (struct lcs_card *)dev->driver_data;
+	card = dev_get_drvdata(dev);
 
 	return card ? sprintf(buf, "%u\n", card->lancmd_timeout) : 0;
 }
@@ -2001,7 +1998,7 @@
         struct lcs_card *card;
         int value;
 
-	card = (struct lcs_card *)dev->driver_data;
+	card = dev_get_drvdata(dev);
 
         if (!card)
                 return 0;
@@ -2020,7 +2017,7 @@
 lcs_dev_recover_store(struct device *dev, struct device_attribute *attr,
 		      const char *buf, size_t count)
 {
-	struct lcs_card *card = dev->driver_data;
+	struct lcs_card *card = dev_get_drvdata(dev);
 	char *tmp;
 	int i;
 
@@ -2073,7 +2070,7 @@
 		put_device(&ccwgdev->dev);
 		return ret;
         }
-	ccwgdev->dev.driver_data = card;
+	dev_set_drvdata(&ccwgdev->dev, card);
 	ccwgdev->cdev[0]->handler = lcs_irq;
 	ccwgdev->cdev[1]->handler = lcs_irq;
 	card->gdev = ccwgdev;
@@ -2090,7 +2087,7 @@
 	struct lcs_card *card;
 
 	LCS_DBF_TEXT(2, setup, "regnetdv");
-	card = (struct lcs_card *)ccwgdev->dev.driver_data;
+	card = dev_get_drvdata(&ccwgdev->dev);
 	if (card->dev->reg_state != NETREG_UNINITIALIZED)
 		return 0;
 	SET_NETDEV_DEV(card->dev, &ccwgdev->dev);
@@ -2123,7 +2120,7 @@
 	enum lcs_dev_states recover_state;
 	int rc;
 
-	card = (struct lcs_card *)ccwgdev->dev.driver_data;
+	card = dev_get_drvdata(&ccwgdev->dev);
 	if (!card)
 		return -ENODEV;
 
@@ -2229,7 +2226,7 @@
 	int ret;
 
 	LCS_DBF_TEXT(3, setup, "shtdndev");
-	card = (struct lcs_card *)ccwgdev->dev.driver_data;
+	card = dev_get_drvdata(&ccwgdev->dev);
 	if (!card)
 		return -ENODEV;
 	if (recovery_mode == 0) {
@@ -2296,7 +2293,7 @@
 {
 	struct lcs_card *card;
 
-	card = (struct lcs_card *)ccwgdev->dev.driver_data;
+	card = dev_get_drvdata(&ccwgdev->dev);
 	if (!card)
 		return;
 
@@ -2313,6 +2310,60 @@
 	put_device(&ccwgdev->dev);
 }
 
+static int lcs_pm_suspend(struct lcs_card *card)
+{
+	if (card->dev)
+		netif_device_detach(card->dev);
+	lcs_set_allowed_threads(card, 0);
+	lcs_wait_for_threads(card, 0xffffffff);
+	if (card->state != DEV_STATE_DOWN)
+		__lcs_shutdown_device(card->gdev, 1);
+	return 0;
+}
+
+static int lcs_pm_resume(struct lcs_card *card)
+{
+	int rc = 0;
+
+	if (card->state == DEV_STATE_RECOVER)
+		rc = lcs_new_device(card->gdev);
+	if (card->dev)
+		netif_device_attach(card->dev);
+	if (rc) {
+		dev_warn(&card->gdev->dev, "The lcs device driver "
+			"failed to recover the device\n");
+	}
+	return rc;
+}
+
+static int lcs_prepare(struct ccwgroup_device *gdev)
+{
+	return 0;
+}
+
+static void lcs_complete(struct ccwgroup_device *gdev)
+{
+	return;
+}
+
+static int lcs_freeze(struct ccwgroup_device *gdev)
+{
+	struct lcs_card *card = dev_get_drvdata(&gdev->dev);
+	return lcs_pm_suspend(card);
+}
+
+static int lcs_thaw(struct ccwgroup_device *gdev)
+{
+	struct lcs_card *card = dev_get_drvdata(&gdev->dev);
+	return lcs_pm_resume(card);
+}
+
+static int lcs_restore(struct ccwgroup_device *gdev)
+{
+	struct lcs_card *card = dev_get_drvdata(&gdev->dev);
+	return lcs_pm_resume(card);
+}
+
 /**
  * LCS ccwgroup driver registration
  */
@@ -2325,6 +2376,11 @@
 	.remove      = lcs_remove_device,
 	.set_online  = lcs_new_device,
 	.set_offline = lcs_shutdown_device,
+	.prepare     = lcs_prepare,
+	.complete    = lcs_complete,
+	.freeze	     = lcs_freeze,
+	.thaw	     = lcs_thaw,
+	.restore     = lcs_restore,
 };
 
 /**
diff --git a/drivers/s390/net/lcs.h b/drivers/s390/net/lcs.h
index d58fea5..6d668642 100644
--- a/drivers/s390/net/lcs.h
+++ b/drivers/s390/net/lcs.h
@@ -34,8 +34,8 @@
  *	sysfs related stuff
  */
 #define CARD_FROM_DEV(cdev) \
-	(struct lcs_card *) \
-	((struct ccwgroup_device *)cdev->dev.driver_data)->dev.driver_data;
+	(struct lcs_card *) dev_get_drvdata( \
+		&((struct ccwgroup_device *)dev_get_drvdata(&cdev->dev))->dev);
 /**
  * CCW commands used in this driver
  */
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index aec9e5d..52574ce 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -1,11 +1,15 @@
 /*
  * IUCV network driver
  *
- * Copyright 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
- * Author(s): Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
+ * Copyright IBM Corp. 2001, 2009
  *
- * Sysfs integration and all bugs therein by Cornelia Huck
- * (cornelia.huck@de.ibm.com)
+ * Author(s):
+ *	Original netiucv driver:
+ *		Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
+ *	Sysfs integration and all bugs therein:
+ *		Cornelia Huck (cornelia.huck@de.ibm.com)
+ *	PM functions:
+ *		Ursula Braun (ursula.braun@de.ibm.com)
  *
  * Documentation used:
  *  the source of the original IUCV driver by:
@@ -149,10 +153,27 @@
 
 #define PRINTK_HEADER " iucv: "       /* for debugging */
 
+/* dummy device to make sure netiucv_pm functions are called */
+static struct device *netiucv_dev;
+
+static int netiucv_pm_prepare(struct device *);
+static void netiucv_pm_complete(struct device *);
+static int netiucv_pm_freeze(struct device *);
+static int netiucv_pm_restore_thaw(struct device *);
+
+static struct dev_pm_ops netiucv_pm_ops = {
+	.prepare = netiucv_pm_prepare,
+	.complete = netiucv_pm_complete,
+	.freeze = netiucv_pm_freeze,
+	.thaw = netiucv_pm_restore_thaw,
+	.restore = netiucv_pm_restore_thaw,
+};
+
 static struct device_driver netiucv_driver = {
 	.owner = THIS_MODULE,
 	.name = "netiucv",
 	.bus  = &iucv_bus,
+	.pm = &netiucv_pm_ops,
 };
 
 static int netiucv_callback_connreq(struct iucv_path *,
@@ -233,6 +254,7 @@
 	fsm_instance            *fsm;
         struct iucv_connection  *conn;
 	struct device           *dev;
+	int			 pm_state;
 };
 
 /**
@@ -1265,6 +1287,72 @@
 	return 0;
 }
 
+static int netiucv_pm_prepare(struct device *dev)
+{
+	IUCV_DBF_TEXT(trace, 3, __func__);
+	return 0;
+}
+
+static void netiucv_pm_complete(struct device *dev)
+{
+	IUCV_DBF_TEXT(trace, 3, __func__);
+	return;
+}
+
+/**
+ * netiucv_pm_freeze() - Freeze PM callback
+ * @dev:	netiucv device
+ *
+ * close open netiucv interfaces
+ */
+static int netiucv_pm_freeze(struct device *dev)
+{
+	struct netiucv_priv *priv = dev->driver_data;
+	struct net_device *ndev = NULL;
+	int rc = 0;
+
+	IUCV_DBF_TEXT(trace, 3, __func__);
+	if (priv && priv->conn)
+		ndev = priv->conn->netdev;
+	if (!ndev)
+		goto out;
+	netif_device_detach(ndev);
+	priv->pm_state = fsm_getstate(priv->fsm);
+	rc = netiucv_close(ndev);
+out:
+	return rc;
+}
+
+/**
+ * netiucv_pm_restore_thaw() - Thaw and restore PM callback
+ * @dev:	netiucv device
+ *
+ * re-open netiucv interfaces closed during freeze
+ */
+static int netiucv_pm_restore_thaw(struct device *dev)
+{
+	struct netiucv_priv *priv = dev->driver_data;
+	struct net_device *ndev = NULL;
+	int rc = 0;
+
+	IUCV_DBF_TEXT(trace, 3, __func__);
+	if (priv && priv->conn)
+		ndev = priv->conn->netdev;
+	if (!ndev)
+		goto out;
+	switch (priv->pm_state) {
+	case DEV_STATE_RUNNING:
+	case DEV_STATE_STARTWAIT:
+		rc = netiucv_open(ndev);
+		break;
+	default:
+		break;
+	}
+	netif_device_attach(ndev);
+out:
+	return rc;
+}
+
 /**
  * Start transmission of a packet.
  * Called from generic network device layer.
@@ -1364,7 +1452,7 @@
 static ssize_t user_show(struct device *dev, struct device_attribute *attr,
 			 char *buf)
 {
-	struct netiucv_priv *priv = dev->driver_data;
+	struct netiucv_priv *priv = dev_get_drvdata(dev);
 
 	IUCV_DBF_TEXT(trace, 5, __func__);
 	return sprintf(buf, "%s\n", netiucv_printname(priv->conn->userid));
@@ -1373,7 +1461,7 @@
 static ssize_t user_write(struct device *dev, struct device_attribute *attr,
 			  const char *buf, size_t count)
 {
-	struct netiucv_priv *priv = dev->driver_data;
+	struct netiucv_priv *priv = dev_get_drvdata(dev);
 	struct net_device *ndev = priv->conn->netdev;
 	char    *p;
 	char    *tmp;
@@ -1430,7 +1518,8 @@
 
 static ssize_t buffer_show (struct device *dev, struct device_attribute *attr,
 			    char *buf)
-{	struct netiucv_priv *priv = dev->driver_data;
+{
+	struct netiucv_priv *priv = dev_get_drvdata(dev);
 
 	IUCV_DBF_TEXT(trace, 5, __func__);
 	return sprintf(buf, "%d\n", priv->conn->max_buffsize);
@@ -1439,7 +1528,7 @@
 static ssize_t buffer_write (struct device *dev, struct device_attribute *attr,
 			     const char *buf, size_t count)
 {
-	struct netiucv_priv *priv = dev->driver_data;
+	struct netiucv_priv *priv = dev_get_drvdata(dev);
 	struct net_device *ndev = priv->conn->netdev;
 	char         *e;
 	int          bs1;
@@ -1487,7 +1576,7 @@
 static ssize_t dev_fsm_show (struct device *dev, struct device_attribute *attr,
 			     char *buf)
 {
-	struct netiucv_priv *priv = dev->driver_data;
+	struct netiucv_priv *priv = dev_get_drvdata(dev);
 
 	IUCV_DBF_TEXT(trace, 5, __func__);
 	return sprintf(buf, "%s\n", fsm_getstate_str(priv->fsm));
@@ -1498,7 +1587,7 @@
 static ssize_t conn_fsm_show (struct device *dev,
 			      struct device_attribute *attr, char *buf)
 {
-	struct netiucv_priv *priv = dev->driver_data;
+	struct netiucv_priv *priv = dev_get_drvdata(dev);
 
 	IUCV_DBF_TEXT(trace, 5, __func__);
 	return sprintf(buf, "%s\n", fsm_getstate_str(priv->conn->fsm));
@@ -1509,7 +1598,7 @@
 static ssize_t maxmulti_show (struct device *dev,
 			      struct device_attribute *attr, char *buf)
 {
-	struct netiucv_priv *priv = dev->driver_data;
+	struct netiucv_priv *priv = dev_get_drvdata(dev);
 
 	IUCV_DBF_TEXT(trace, 5, __func__);
 	return sprintf(buf, "%ld\n", priv->conn->prof.maxmulti);
@@ -1519,7 +1608,7 @@
 			       struct device_attribute *attr,
 			       const char *buf, size_t count)
 {
-	struct netiucv_priv *priv = dev->driver_data;
+	struct netiucv_priv *priv = dev_get_drvdata(dev);
 
 	IUCV_DBF_TEXT(trace, 4, __func__);
 	priv->conn->prof.maxmulti = 0;
@@ -1531,7 +1620,7 @@
 static ssize_t maxcq_show (struct device *dev, struct device_attribute *attr,
 			   char *buf)
 {
-	struct netiucv_priv *priv = dev->driver_data;
+	struct netiucv_priv *priv = dev_get_drvdata(dev);
 
 	IUCV_DBF_TEXT(trace, 5, __func__);
 	return sprintf(buf, "%ld\n", priv->conn->prof.maxcqueue);
@@ -1540,7 +1629,7 @@
 static ssize_t maxcq_write (struct device *dev, struct device_attribute *attr,
 			    const char *buf, size_t count)
 {
-	struct netiucv_priv *priv = dev->driver_data;
+	struct netiucv_priv *priv = dev_get_drvdata(dev);
 
 	IUCV_DBF_TEXT(trace, 4, __func__);
 	priv->conn->prof.maxcqueue = 0;
@@ -1552,7 +1641,7 @@
 static ssize_t sdoio_show (struct device *dev, struct device_attribute *attr,
 			   char *buf)
 {
-	struct netiucv_priv *priv = dev->driver_data;
+	struct netiucv_priv *priv = dev_get_drvdata(dev);
 
 	IUCV_DBF_TEXT(trace, 5, __func__);
 	return sprintf(buf, "%ld\n", priv->conn->prof.doios_single);
@@ -1561,7 +1650,7 @@
 static ssize_t sdoio_write (struct device *dev, struct device_attribute *attr,
 			    const char *buf, size_t count)
 {
-	struct netiucv_priv *priv = dev->driver_data;
+	struct netiucv_priv *priv = dev_get_drvdata(dev);
 
 	IUCV_DBF_TEXT(trace, 4, __func__);
 	priv->conn->prof.doios_single = 0;
@@ -1573,7 +1662,7 @@
 static ssize_t mdoio_show (struct device *dev, struct device_attribute *attr,
 			   char *buf)
 {
-	struct netiucv_priv *priv = dev->driver_data;
+	struct netiucv_priv *priv = dev_get_drvdata(dev);
 
 	IUCV_DBF_TEXT(trace, 5, __func__);
 	return sprintf(buf, "%ld\n", priv->conn->prof.doios_multi);
@@ -1582,7 +1671,7 @@
 static ssize_t mdoio_write (struct device *dev, struct device_attribute *attr,
 			    const char *buf, size_t count)
 {
-	struct netiucv_priv *priv = dev->driver_data;
+	struct netiucv_priv *priv = dev_get_drvdata(dev);
 
 	IUCV_DBF_TEXT(trace, 5, __func__);
 	priv->conn->prof.doios_multi = 0;
@@ -1594,7 +1683,7 @@
 static ssize_t txlen_show (struct device *dev, struct device_attribute *attr,
 			   char *buf)
 {
-	struct netiucv_priv *priv = dev->driver_data;
+	struct netiucv_priv *priv = dev_get_drvdata(dev);
 
 	IUCV_DBF_TEXT(trace, 5, __func__);
 	return sprintf(buf, "%ld\n", priv->conn->prof.txlen);
@@ -1603,7 +1692,7 @@
 static ssize_t txlen_write (struct device *dev, struct device_attribute *attr,
 			    const char *buf, size_t count)
 {
-	struct netiucv_priv *priv = dev->driver_data;
+	struct netiucv_priv *priv = dev_get_drvdata(dev);
 
 	IUCV_DBF_TEXT(trace, 4, __func__);
 	priv->conn->prof.txlen = 0;
@@ -1615,7 +1704,7 @@
 static ssize_t txtime_show (struct device *dev, struct device_attribute *attr,
 			    char *buf)
 {
-	struct netiucv_priv *priv = dev->driver_data;
+	struct netiucv_priv *priv = dev_get_drvdata(dev);
 
 	IUCV_DBF_TEXT(trace, 5, __func__);
 	return sprintf(buf, "%ld\n", priv->conn->prof.tx_time);
@@ -1624,7 +1713,7 @@
 static ssize_t txtime_write (struct device *dev, struct device_attribute *attr,
 			     const char *buf, size_t count)
 {
-	struct netiucv_priv *priv = dev->driver_data;
+	struct netiucv_priv *priv = dev_get_drvdata(dev);
 
 	IUCV_DBF_TEXT(trace, 4, __func__);
 	priv->conn->prof.tx_time = 0;
@@ -1636,7 +1725,7 @@
 static ssize_t txpend_show (struct device *dev, struct device_attribute *attr,
 			    char *buf)
 {
-	struct netiucv_priv *priv = dev->driver_data;
+	struct netiucv_priv *priv = dev_get_drvdata(dev);
 
 	IUCV_DBF_TEXT(trace, 5, __func__);
 	return sprintf(buf, "%ld\n", priv->conn->prof.tx_pending);
@@ -1645,7 +1734,7 @@
 static ssize_t txpend_write (struct device *dev, struct device_attribute *attr,
 			     const char *buf, size_t count)
 {
-	struct netiucv_priv *priv = dev->driver_data;
+	struct netiucv_priv *priv = dev_get_drvdata(dev);
 
 	IUCV_DBF_TEXT(trace, 4, __func__);
 	priv->conn->prof.tx_pending = 0;
@@ -1657,7 +1746,7 @@
 static ssize_t txmpnd_show (struct device *dev, struct device_attribute *attr,
 			    char *buf)
 {
-	struct netiucv_priv *priv = dev->driver_data;
+	struct netiucv_priv *priv = dev_get_drvdata(dev);
 
 	IUCV_DBF_TEXT(trace, 5, __func__);
 	return sprintf(buf, "%ld\n", priv->conn->prof.tx_max_pending);
@@ -1666,7 +1755,7 @@
 static ssize_t txmpnd_write (struct device *dev, struct device_attribute *attr,
 			     const char *buf, size_t count)
 {
-	struct netiucv_priv *priv = dev->driver_data;
+	struct netiucv_priv *priv = dev_get_drvdata(dev);
 
 	IUCV_DBF_TEXT(trace, 4, __func__);
 	priv->conn->prof.tx_max_pending = 0;
@@ -1731,7 +1820,6 @@
 	struct device *dev = kzalloc(sizeof(struct device), GFP_KERNEL);
 	int ret;
 
-
 	IUCV_DBF_TEXT(trace, 3, __func__);
 
 	if (dev) {
@@ -1758,7 +1846,7 @@
 	if (ret)
 		goto out_unreg;
 	priv->dev = dev;
-	dev->driver_data = priv;
+	dev_set_drvdata(dev, priv);
 	return 0;
 
 out_unreg:
@@ -2100,6 +2188,7 @@
 		netiucv_unregister_device(dev);
 	}
 
+	device_unregister(netiucv_dev);
 	driver_unregister(&netiucv_driver);
 	iucv_unregister(&netiucv_handler, 1);
 	iucv_unregister_dbf_views();
@@ -2125,10 +2214,25 @@
 		IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_register\n", rc);
 		goto out_iucv;
 	}
-
+	/* establish dummy device */
+	netiucv_dev = kzalloc(sizeof(struct device), GFP_KERNEL);
+	if (!netiucv_dev) {
+		rc = -ENOMEM;
+		goto out_driver;
+	}
+	dev_set_name(netiucv_dev, "netiucv");
+	netiucv_dev->bus = &iucv_bus;
+	netiucv_dev->parent = iucv_root;
+	netiucv_dev->release = (void (*)(struct device *))kfree;
+	netiucv_dev->driver = &netiucv_driver;
+	rc = device_register(netiucv_dev);
+	if (rc)
+		goto out_driver;
 	netiucv_banner();
 	return rc;
 
+out_driver:
+	driver_unregister(&netiucv_driver);
 out_iucv:
 	iucv_unregister(&netiucv_handler, 1);
 out_dbf:
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 74c49d9..d53621c 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -1,7 +1,7 @@
 /*
  *  drivers/s390/net/qeth_core_main.c
  *
- *    Copyright IBM Corp. 2007
+ *    Copyright IBM Corp. 2007, 2009
  *    Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
  *		 Frank Pavlic <fpavlic@de.ibm.com>,
  *		 Thomas Spatzier <tspat@de.ibm.com>,
@@ -4195,6 +4195,50 @@
 		card->discipline.ccwgdriver->shutdown(gdev);
 }
 
+static int qeth_core_prepare(struct ccwgroup_device *gdev)
+{
+	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+	if (card->discipline.ccwgdriver &&
+	    card->discipline.ccwgdriver->prepare)
+		return card->discipline.ccwgdriver->prepare(gdev);
+	return 0;
+}
+
+static void qeth_core_complete(struct ccwgroup_device *gdev)
+{
+	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+	if (card->discipline.ccwgdriver &&
+	    card->discipline.ccwgdriver->complete)
+		card->discipline.ccwgdriver->complete(gdev);
+}
+
+static int qeth_core_freeze(struct ccwgroup_device *gdev)
+{
+	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+	if (card->discipline.ccwgdriver &&
+	    card->discipline.ccwgdriver->freeze)
+		return card->discipline.ccwgdriver->freeze(gdev);
+	return 0;
+}
+
+static int qeth_core_thaw(struct ccwgroup_device *gdev)
+{
+	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+	if (card->discipline.ccwgdriver &&
+	    card->discipline.ccwgdriver->thaw)
+		return card->discipline.ccwgdriver->thaw(gdev);
+	return 0;
+}
+
+static int qeth_core_restore(struct ccwgroup_device *gdev)
+{
+	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+	if (card->discipline.ccwgdriver &&
+	    card->discipline.ccwgdriver->restore)
+		return card->discipline.ccwgdriver->restore(gdev);
+	return 0;
+}
+
 static struct ccwgroup_driver qeth_core_ccwgroup_driver = {
 	.owner = THIS_MODULE,
 	.name = "qeth",
@@ -4204,6 +4248,11 @@
 	.set_online = qeth_core_set_online,
 	.set_offline = qeth_core_set_offline,
 	.shutdown = qeth_core_shutdown,
+	.prepare = qeth_core_prepare,
+	.complete = qeth_core_complete,
+	.freeze = qeth_core_freeze,
+	.thaw = qeth_core_thaw,
+	.restore = qeth_core_restore,
 };
 
 static ssize_t
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index ecd3d06..81d7f26 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -1,7 +1,7 @@
 /*
  *  drivers/s390/net/qeth_l2_main.c
  *
- *    Copyright IBM Corp. 2007
+ *    Copyright IBM Corp. 2007, 2009
  *    Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
  *		 Frank Pavlic <fpavlic@de.ibm.com>,
  *		 Thomas Spatzier <tspat@de.ibm.com>,
@@ -1141,12 +1141,62 @@
 	qeth_clear_qdio_buffers(card);
 }
 
+static int qeth_l2_pm_suspend(struct ccwgroup_device *gdev)
+{
+	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+
+	if (card->dev)
+		netif_device_detach(card->dev);
+	qeth_set_allowed_threads(card, 0, 1);
+	wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
+	if (gdev->state == CCWGROUP_OFFLINE)
+		return 0;
+	if (card->state == CARD_STATE_UP) {
+		card->use_hard_stop = 1;
+		__qeth_l2_set_offline(card->gdev, 1);
+	} else
+		__qeth_l2_set_offline(card->gdev, 0);
+	return 0;
+}
+
+static int qeth_l2_pm_resume(struct ccwgroup_device *gdev)
+{
+	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+	int rc = 0;
+
+	if (gdev->state == CCWGROUP_OFFLINE)
+		goto out;
+
+	if (card->state == CARD_STATE_RECOVER) {
+		rc = __qeth_l2_set_online(card->gdev, 1);
+		if (rc) {
+			if (card->dev) {
+				rtnl_lock();
+				dev_close(card->dev);
+				rtnl_unlock();
+			}
+		}
+	} else
+		rc = __qeth_l2_set_online(card->gdev, 0);
+out:
+	qeth_set_allowed_threads(card, 0xffffffff, 0);
+	if (card->dev)
+		netif_device_attach(card->dev);
+	if (rc)
+		dev_warn(&card->gdev->dev, "The qeth device driver "
+			"failed to recover an error on the device\n");
+	return rc;
+}
+
 struct ccwgroup_driver qeth_l2_ccwgroup_driver = {
 	.probe = qeth_l2_probe_device,
 	.remove = qeth_l2_remove_device,
 	.set_online = qeth_l2_set_online,
 	.set_offline = qeth_l2_set_offline,
 	.shutdown = qeth_l2_shutdown,
+	.freeze = qeth_l2_pm_suspend,
+	.thaw = qeth_l2_pm_resume,
+	.restore = qeth_l2_pm_resume,
 };
 EXPORT_SYMBOL_GPL(qeth_l2_ccwgroup_driver);
 
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 6f2386e..5487240 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -1,7 +1,7 @@
 /*
  *  drivers/s390/net/qeth_l3_main.c
  *
- *    Copyright IBM Corp. 2007
+ *    Copyright IBM Corp. 2007, 2009
  *    Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
  *		 Frank Pavlic <fpavlic@de.ibm.com>,
  *		 Thomas Spatzier <tspat@de.ibm.com>,
@@ -3283,12 +3283,62 @@
 	qeth_clear_qdio_buffers(card);
 }
 
+static int qeth_l3_pm_suspend(struct ccwgroup_device *gdev)
+{
+	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+
+	if (card->dev)
+		netif_device_detach(card->dev);
+	qeth_set_allowed_threads(card, 0, 1);
+	wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
+	if (gdev->state == CCWGROUP_OFFLINE)
+		return 0;
+	if (card->state == CARD_STATE_UP) {
+		card->use_hard_stop = 1;
+		__qeth_l3_set_offline(card->gdev, 1);
+	} else
+		__qeth_l3_set_offline(card->gdev, 0);
+	return 0;
+}
+
+static int qeth_l3_pm_resume(struct ccwgroup_device *gdev)
+{
+	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+	int rc = 0;
+
+	if (gdev->state == CCWGROUP_OFFLINE)
+		goto out;
+
+	if (card->state == CARD_STATE_RECOVER) {
+		rc = __qeth_l3_set_online(card->gdev, 1);
+		if (rc) {
+			if (card->dev) {
+				rtnl_lock();
+				dev_close(card->dev);
+				rtnl_unlock();
+			}
+		}
+	} else
+		rc = __qeth_l3_set_online(card->gdev, 0);
+out:
+	qeth_set_allowed_threads(card, 0xffffffff, 0);
+	if (card->dev)
+		netif_device_attach(card->dev);
+	if (rc)
+		dev_warn(&card->gdev->dev, "The qeth device driver "
+			"failed to recover an error on the device\n");
+	return rc;
+}
+
 struct ccwgroup_driver qeth_l3_ccwgroup_driver = {
 	.probe = qeth_l3_probe_device,
 	.remove = qeth_l3_remove_device,
 	.set_online = qeth_l3_set_online,
 	.set_offline = qeth_l3_set_offline,
 	.shutdown = qeth_l3_shutdown,
+	.freeze = qeth_l3_pm_suspend,
+	.thaw = qeth_l3_pm_resume,
+	.restore = qeth_l3_pm_resume,
 };
 EXPORT_SYMBOL_GPL(qeth_l3_ccwgroup_driver);
 
diff --git a/drivers/s390/net/smsgiucv.c b/drivers/s390/net/smsgiucv.c
index 164e090..e76a320 100644
--- a/drivers/s390/net/smsgiucv.c
+++ b/drivers/s390/net/smsgiucv.c
@@ -1,7 +1,8 @@
 /*
  * IUCV special message driver
  *
- * Copyright 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Copyright IBM Corp. 2003, 2009
+ *
  * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
  *
  * This program is free software; you can redistribute it and/or modify
@@ -40,6 +41,8 @@
 MODULE_DESCRIPTION ("Linux for S/390 IUCV special message driver");
 
 static struct iucv_path *smsg_path;
+/* dummy device used as trigger for PM functions */
+static struct device *smsg_dev;
 
 static DEFINE_SPINLOCK(smsg_list_lock);
 static LIST_HEAD(smsg_list);
@@ -132,14 +135,51 @@
 	kfree(cb);
 }
 
+static int smsg_pm_freeze(struct device *dev)
+{
+#ifdef CONFIG_PM_DEBUG
+	printk(KERN_WARNING "smsg_pm_freeze\n");
+#endif
+	if (smsg_path)
+		iucv_path_sever(smsg_path, NULL);
+	return 0;
+}
+
+static int smsg_pm_restore_thaw(struct device *dev)
+{
+	int rc;
+
+#ifdef CONFIG_PM_DEBUG
+	printk(KERN_WARNING "smsg_pm_restore_thaw\n");
+#endif
+	if (smsg_path) {
+		memset(smsg_path, 0, sizeof(*smsg_path));
+		smsg_path->msglim = 255;
+		smsg_path->flags = 0;
+		rc = iucv_path_connect(smsg_path, &smsg_handler, "*MSG    ",
+				       NULL, NULL, NULL);
+		printk(KERN_ERR "iucv_path_connect returned with rc %i\n", rc);
+	}
+	return 0;
+}
+
+static struct dev_pm_ops smsg_pm_ops = {
+	.freeze = smsg_pm_freeze,
+	.thaw = smsg_pm_restore_thaw,
+	.restore = smsg_pm_restore_thaw,
+};
+
 static struct device_driver smsg_driver = {
+	.owner = THIS_MODULE,
 	.name = "SMSGIUCV",
 	.bus  = &iucv_bus,
+	.pm = &smsg_pm_ops,
 };
 
 static void __exit smsg_exit(void)
 {
 	cpcmd("SET SMSG IUCV", NULL, 0, NULL);
+	device_unregister(smsg_dev);
 	iucv_unregister(&smsg_handler, 1);
 	driver_unregister(&smsg_driver);
 }
@@ -166,12 +206,29 @@
 	rc = iucv_path_connect(smsg_path, &smsg_handler, "*MSG    ",
 			       NULL, NULL, NULL);
 	if (rc)
-		goto out_free;
+		goto out_free_path;
+	smsg_dev = kzalloc(sizeof(struct device), GFP_KERNEL);
+	if (!smsg_dev) {
+		rc = -ENOMEM;
+		goto out_free_path;
+	}
+	dev_set_name(smsg_dev, "smsg_iucv");
+	smsg_dev->bus = &iucv_bus;
+	smsg_dev->parent = iucv_root;
+	smsg_dev->release = (void (*)(struct device *))kfree;
+	smsg_dev->driver = &smsg_driver;
+	rc = device_register(smsg_dev);
+	if (rc)
+		goto out_free_dev;
+
 	cpcmd("SET SMSG IUCV", NULL, 0, NULL);
 	return 0;
 
-out_free:
+out_free_dev:
+	kfree(smsg_dev);
+out_free_path:
 	iucv_path_free(smsg_path);
+	smsg_path = NULL;
 out_register:
 	iucv_unregister(&smsg_handler, 1);
 out_driver:
diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c
index b2fe5cd..d9da5c4 100644
--- a/drivers/s390/scsi/zfcp_ccw.c
+++ b/drivers/s390/scsi/zfcp_ccw.c
@@ -13,6 +13,36 @@
 
 #define ZFCP_MODEL_PRIV 0x4
 
+static int zfcp_ccw_suspend(struct ccw_device *cdev)
+
+{
+	struct zfcp_adapter *adapter = dev_get_drvdata(&cdev->dev);
+
+	down(&zfcp_data.config_sema);
+
+	zfcp_erp_adapter_shutdown(adapter, 0, "ccsusp1", NULL);
+	zfcp_erp_wait(adapter);
+
+	up(&zfcp_data.config_sema);
+
+	return 0;
+}
+
+static int zfcp_ccw_activate(struct ccw_device *cdev)
+
+{
+	struct zfcp_adapter *adapter = dev_get_drvdata(&cdev->dev);
+
+	zfcp_erp_modify_adapter_status(adapter, "ccresu1", NULL,
+				       ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET);
+	zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
+				"ccresu2", NULL);
+	zfcp_erp_wait(adapter);
+	flush_work(&adapter->scan_work);
+
+	return 0;
+}
+
 static struct ccw_device_id zfcp_ccw_device_id[] = {
 	{ CCW_DEVICE_DEVTYPE(0x1731, 0x3, 0x1732, 0x3) },
 	{ CCW_DEVICE_DEVTYPE(0x1731, 0x3, 0x1732, ZFCP_MODEL_PRIV) },
@@ -227,6 +257,9 @@
 	.set_offline = zfcp_ccw_set_offline,
 	.notify      = zfcp_ccw_notify,
 	.shutdown    = zfcp_ccw_shutdown,
+	.freeze      = zfcp_ccw_suspend,
+	.thaw	     = zfcp_ccw_activate,
+	.restore     = zfcp_ccw_activate,
 };
 
 /**
diff --git a/drivers/sbus/char/openprom.c b/drivers/sbus/char/openprom.c
index 124f660..75ac19b 100644
--- a/drivers/sbus/char/openprom.c
+++ b/drivers/sbus/char/openprom.c
@@ -303,7 +303,7 @@
 				struct device_node *dp)
 {
 	DATA *data = file->private_data;
-	struct openpromio *opp;
+	struct openpromio *opp = NULL;
 	int bufsize, error = 0;
 	static int cnt;
 	void __user *argp = (void __user *)arg;
diff --git a/drivers/scsi/aha1740.c b/drivers/scsi/aha1740.c
index ed0e3e5..5381357 100644
--- a/drivers/scsi/aha1740.c
+++ b/drivers/scsi/aha1740.c
@@ -646,7 +646,7 @@
 
 static __devexit int aha1740_remove (struct device *dev)
 {
-	struct Scsi_Host *shpnt = dev->driver_data;
+	struct Scsi_Host *shpnt = dev_get_drvdata(dev);
 	struct aha1740_hostdata *host = HOSTDATA (shpnt);
 
 	scsi_remove_host(shpnt);
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 11d2602..869a11b 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -1877,7 +1877,7 @@
 	unsigned long wait_switch = 0;
 	int rc;
 
-	vdev->dev.driver_data = NULL;
+	dev_set_drvdata(&vdev->dev, NULL);
 
 	host = scsi_host_alloc(&driver_template, sizeof(*hostdata));
 	if (!host) {
@@ -1949,7 +1949,7 @@
 			scsi_scan_host(host);
 	}
 
-	vdev->dev.driver_data = hostdata;
+	dev_set_drvdata(&vdev->dev, hostdata);
 	return 0;
 
       add_srp_port_failed:
@@ -1968,7 +1968,7 @@
 
 static int ibmvscsi_remove(struct vio_dev *vdev)
 {
-	struct ibmvscsi_host_data *hostdata = vdev->dev.driver_data;
+	struct ibmvscsi_host_data *hostdata = dev_get_drvdata(&vdev->dev);
 	unmap_persist_bufs(hostdata);
 	release_event_pool(&hostdata->pool, hostdata);
 	ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata,
diff --git a/drivers/scsi/ibmvscsi/ibmvstgt.c b/drivers/scsi/ibmvscsi/ibmvstgt.c
index e2dd6a4..d5eaf97 100644
--- a/drivers/scsi/ibmvscsi/ibmvstgt.c
+++ b/drivers/scsi/ibmvscsi/ibmvstgt.c
@@ -892,7 +892,7 @@
 
 static int ibmvstgt_remove(struct vio_dev *dev)
 {
-	struct srp_target *target = (struct srp_target *) dev->dev.driver_data;
+	struct srp_target *target = dev_get_drvdata(&dev->dev);
 	struct Scsi_Host *shost = target->shost;
 	struct vio_port *vport = target->ldata;
 
diff --git a/drivers/scsi/libsrp.c b/drivers/scsi/libsrp.c
index 15e2d13..2742ae8 100644
--- a/drivers/scsi/libsrp.c
+++ b/drivers/scsi/libsrp.c
@@ -135,7 +135,7 @@
 	INIT_LIST_HEAD(&target->cmd_queue);
 
 	target->dev = dev;
-	target->dev->driver_data = target;
+	dev_set_drvdata(target->dev, target);
 
 	target->srp_iu_size = iu_size;
 	target->rx_ring_size = nr;
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 2b02b1f..8d0f0de 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -53,8 +53,7 @@
  * debugfs interface
  *
  * To access this interface the user should:
- * # mkdir /debug
- * # mount -t debugfs none /debug
+ * # mount -t debugfs none /sys/kernel/debug
  *
  * The lpfc debugfs directory hierarchy is:
  * lpfc/lpfcX/vportY
diff --git a/drivers/serial/atmel_serial.c b/drivers/serial/atmel_serial.c
index b3497d7..338b15c 100644
--- a/drivers/serial/atmel_serial.c
+++ b/drivers/serial/atmel_serial.c
@@ -1104,11 +1104,13 @@
 	/* update the per-port timeout */
 	uart_update_timeout(port, termios->c_cflag, baud);
 
-	/* save/disable interrupts and drain transmitter */
+	/*
+	 * save/disable interrupts. The tty layer will ensure that the
+	 * transmitter is empty if requested by the caller, so there's
+	 * no need to wait for it here.
+	 */
 	imr = UART_GET_IMR(port);
 	UART_PUT_IDR(port, -1);
-	while (!(UART_GET_CSR(port) & ATMEL_US_TXEMPTY))
-		cpu_relax();
 
 	/* disable receiver and transmitter */
 	UART_PUT_CR(port, ATMEL_US_TXDIS | ATMEL_US_RXDIS);
diff --git a/drivers/serial/imx.c b/drivers/serial/imx.c
index 285b414..5d7b58f 100644
--- a/drivers/serial/imx.c
+++ b/drivers/serial/imx.c
@@ -924,11 +924,13 @@
 	rational_best_approximation(16 * div * baud, sport->port.uartclk,
 		1 << 16, 1 << 16, &num, &denom);
 
-	tdiv64 = sport->port.uartclk;
-	tdiv64 *= num;
-	do_div(tdiv64, denom * 16 * div);
-	tty_encode_baud_rate(sport->port.info->port.tty,
-		(speed_t)tdiv64, (speed_t)tdiv64);
+	if (port->info && port->info->port.tty) {
+		tdiv64 = sport->port.uartclk;
+		tdiv64 *= num;
+		do_div(tdiv64, denom * 16 * div);
+		tty_encode_baud_rate(sport->port.info->port.tty,
+				(speed_t)tdiv64, (speed_t)tdiv64);
+	}
 
 	num -= 1;
 	denom -= 1;
diff --git a/drivers/serial/of_serial.c b/drivers/serial/of_serial.c
index 14f8fa9..54483cd 100644
--- a/drivers/serial/of_serial.c
+++ b/drivers/serial/of_serial.c
@@ -122,7 +122,7 @@
 
 	info->type = port_type;
 	info->line = ret;
-	ofdev->dev.driver_data = info;
+	dev_set_drvdata(&ofdev->dev, info);
 	return 0;
 out:
 	kfree(info);
@@ -135,7 +135,7 @@
  */
 static int of_platform_serial_remove(struct of_device *ofdev)
 {
-	struct of_serial_info *info = ofdev->dev.driver_data;
+	struct of_serial_info *info = dev_get_drvdata(&ofdev->dev);
 	switch (info->type) {
 #ifdef CONFIG_SERIAL_8250
 	case PORT_8250 ... PORT_MAX_8250:
diff --git a/drivers/spi/spi_bfin5xx.c b/drivers/spi/spi_bfin5xx.c
index f014cc2..011c5bd 100644
--- a/drivers/spi/spi_bfin5xx.c
+++ b/drivers/spi/spi_bfin5xx.c
@@ -803,7 +803,7 @@
 				drv_data->rx, drv_data->len_in_bytes);
 
 			/* invalidate caches, if needed */
-			if (bfin_addr_dcachable((unsigned long) drv_data->rx))
+			if (bfin_addr_dcacheable((unsigned long) drv_data->rx))
 				invalidate_dcache_range((unsigned long) drv_data->rx,
 							(unsigned long) (drv_data->rx +
 							drv_data->len_in_bytes));
@@ -816,7 +816,7 @@
 			dev_dbg(&drv_data->pdev->dev, "doing DMA out.\n");
 
 			/* flush caches, if needed */
-			if (bfin_addr_dcachable((unsigned long) drv_data->tx))
+			if (bfin_addr_dcacheable((unsigned long) drv_data->tx))
 				flush_dcache_range((unsigned long) drv_data->tx,
 						(unsigned long) (drv_data->tx +
 						drv_data->len_in_bytes));
diff --git a/drivers/staging/uc2322/aten2011.c b/drivers/staging/uc2322/aten2011.c
index 9c62f78..39d0926 100644
--- a/drivers/staging/uc2322/aten2011.c
+++ b/drivers/staging/uc2322/aten2011.c
@@ -2336,7 +2336,7 @@
 	return 0;
 }
 
-static void ATEN2011_shutdown(struct usb_serial *serial)
+static void ATEN2011_release(struct usb_serial *serial)
 {
 	int i;
 	struct ATENINTL_port *ATEN2011_port;
@@ -2382,7 +2382,7 @@
 	.tiocmget =		ATEN2011_tiocmget,
 	.tiocmset =		ATEN2011_tiocmset,
 	.attach =		ATEN2011_startup,
-	.shutdown =		ATEN2011_shutdown,
+	.release =		ATEN2011_release,
 	.read_bulk_callback =	ATEN2011_bulk_in_callback,
 	.read_int_callback =	ATEN2011_interrupt_callback,
 };
diff --git a/drivers/thermal/thermal_sys.c b/drivers/thermal/thermal_sys.c
index 5e38ba1..0a69672 100644
--- a/drivers/thermal/thermal_sys.c
+++ b/drivers/thermal/thermal_sys.c
@@ -417,7 +417,7 @@
 static ssize_t
 name_show(struct device *dev, struct device_attribute *attr, char *buf)
 {
-	struct thermal_hwmon_device *hwmon = dev->driver_data;
+	struct thermal_hwmon_device *hwmon = dev_get_drvdata(dev);
 	return sprintf(buf, "%s\n", hwmon->type);
 }
 static DEVICE_ATTR(name, 0444, name_show, NULL);
@@ -488,7 +488,7 @@
 		result = PTR_ERR(hwmon->device);
 		goto free_mem;
 	}
-	hwmon->device->driver_data = hwmon;
+	dev_set_drvdata(hwmon->device, hwmon);
 	result = device_create_file(hwmon->device, &dev_attr_name);
 	if (result)
 		goto unregister_hwmon_device;
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index 5eee3f8..dcd49f1 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -64,6 +64,7 @@
 config USB
 	tristate "Support for Host-side USB"
 	depends on USB_ARCH_HAS_HCD
+	select NLS  # for UTF-8 strings
 	---help---
 	  Universal Serial Bus (USB) is a specification for a serial bus
 	  subsystem which offers higher speeds and more features than the
diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile
index 0a3dc5e..19cb7d5 100644
--- a/drivers/usb/Makefile
+++ b/drivers/usb/Makefile
@@ -14,6 +14,7 @@
 obj-$(CONFIG_USB_OHCI_HCD)	+= host/
 obj-$(CONFIG_USB_UHCI_HCD)	+= host/
 obj-$(CONFIG_USB_FHCI_HCD)	+= host/
+obj-$(CONFIG_USB_XHCI_HCD)	+= host/
 obj-$(CONFIG_USB_SL811_HCD)	+= host/
 obj-$(CONFIG_USB_U132_HCD)	+= host/
 obj-$(CONFIG_USB_R8A66597_HCD)	+= host/
diff --git a/drivers/usb/atm/ueagle-atm.c b/drivers/usb/atm/ueagle-atm.c
index 9cf9ff6..d171b56 100644
--- a/drivers/usb/atm/ueagle-atm.c
+++ b/drivers/usb/atm/ueagle-atm.c
@@ -306,6 +306,7 @@
 #define FW_GET_BYTE(p)	*((__u8 *) (p))
 
 #define FW_DIR "ueagle-atm/"
+#define UEA_FW_NAME_MAX 30
 #define NB_MODEM 4
 
 #define BULK_TIMEOUT 300
@@ -1564,9 +1565,9 @@
 		file = cmv_file[sc->modem_index];
 
 	strcpy(cmv_name, FW_DIR);
-	strlcat(cmv_name, file, FIRMWARE_NAME_MAX);
+	strlcat(cmv_name, file, UEA_FW_NAME_MAX);
 	if (ver == 2)
-		strlcat(cmv_name, ".v2", FIRMWARE_NAME_MAX);
+		strlcat(cmv_name, ".v2", UEA_FW_NAME_MAX);
 }
 
 static int request_cmvs_old(struct uea_softc *sc,
@@ -1574,7 +1575,7 @@
 {
 	int ret, size;
 	u8 *data;
-	char cmv_name[FIRMWARE_NAME_MAX]; /* 30 bytes stack variable */
+	char cmv_name[UEA_FW_NAME_MAX]; /* 30 bytes stack variable */
 
 	cmvs_file_name(sc, cmv_name, 1);
 	ret = request_firmware(fw, cmv_name, &sc->usb_dev->dev);
@@ -1608,7 +1609,7 @@
 	int ret, size;
 	u32 crc;
 	u8 *data;
-	char cmv_name[FIRMWARE_NAME_MAX]; /* 30 bytes stack variable */
+	char cmv_name[UEA_FW_NAME_MAX]; /* 30 bytes stack variable */
 
 	cmvs_file_name(sc, cmv_name, 2);
 	ret = request_firmware(fw, cmv_name, &sc->usb_dev->dev);
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index ddeb691..38bfdb0 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -937,9 +937,9 @@
 	int buflen = intf->altsetting->extralen;
 	struct usb_interface *control_interface;
 	struct usb_interface *data_interface;
-	struct usb_endpoint_descriptor *epctrl;
-	struct usb_endpoint_descriptor *epread;
-	struct usb_endpoint_descriptor *epwrite;
+	struct usb_endpoint_descriptor *epctrl = NULL;
+	struct usb_endpoint_descriptor *epread = NULL;
+	struct usb_endpoint_descriptor *epwrite = NULL;
 	struct usb_device *usb_dev = interface_to_usbdev(intf);
 	struct acm *acm;
 	int minor;
@@ -952,6 +952,7 @@
 	unsigned long quirks;
 	int num_rx_buf;
 	int i;
+	int combined_interfaces = 0;
 
 	/* normal quirks */
 	quirks = (unsigned long)id->driver_info;
@@ -1033,9 +1034,15 @@
 			data_interface = usb_ifnum_to_if(usb_dev, (data_interface_num = call_interface_num));
 			control_interface = intf;
 		} else {
-			dev_dbg(&intf->dev,
-					"No union descriptor, giving up\n");
-			return -ENODEV;
+			if (intf->cur_altsetting->desc.bNumEndpoints != 3) {
+				dev_dbg(&intf->dev,"No union descriptor, giving up\n");
+				return -ENODEV;
+			} else {
+				dev_warn(&intf->dev,"No union descriptor, testing for castrated device\n");
+				combined_interfaces = 1;
+				control_interface = data_interface = intf;
+				goto look_for_collapsed_interface;
+			}
 		}
 	} else {
 		control_interface = usb_ifnum_to_if(usb_dev, union_header->bMasterInterface0);
@@ -1049,6 +1056,36 @@
 	if (data_interface_num != call_interface_num)
 		dev_dbg(&intf->dev, "Separate call control interface. That is not fully supported.\n");
 
+	if (control_interface == data_interface) {
+		/* some broken devices designed for windows work this way */
+		dev_warn(&intf->dev,"Control and data interfaces are not separated!\n");
+		combined_interfaces = 1;
+		/* a popular other OS doesn't use it */
+		quirks |= NO_CAP_LINE;
+		if (data_interface->cur_altsetting->desc.bNumEndpoints != 3) {
+			dev_err(&intf->dev, "This needs exactly 3 endpoints\n");
+			return -EINVAL;
+		}
+look_for_collapsed_interface:
+		for (i = 0; i < 3; i++) {
+			struct usb_endpoint_descriptor *ep;
+			ep = &data_interface->cur_altsetting->endpoint[i].desc;
+
+			if (usb_endpoint_is_int_in(ep))
+				epctrl = ep;
+			else if (usb_endpoint_is_bulk_out(ep))
+				epwrite = ep;
+			else if (usb_endpoint_is_bulk_in(ep))
+				epread = ep;
+			else
+				return -EINVAL;
+		}
+		if (!epctrl || !epread || !epwrite)
+			return -ENODEV;
+		else
+			goto made_compressed_probe;
+	}
+
 skip_normal_probe:
 
 	/*workaround for switched interfaces */
@@ -1068,10 +1105,11 @@
 	}
 
 	/* Accept probe requests only for the control interface */
-	if (intf != control_interface)
+	if (!combined_interfaces && intf != control_interface)
 		return -ENODEV;
 
-	if (usb_interface_claimed(data_interface)) { /* valid in this context */
+	if (!combined_interfaces && usb_interface_claimed(data_interface)) {
+		/* valid in this context */
 		dev_dbg(&intf->dev, "The data interface isn't available\n");
 		return -EBUSY;
 	}
@@ -1095,6 +1133,7 @@
 		epread = epwrite;
 		epwrite = t;
 	}
+made_compressed_probe:
 	dbg("interfaces are valid");
 	for (minor = 0; minor < ACM_TTY_MINORS && acm_table[minor]; minor++);
 
@@ -1112,12 +1151,15 @@
 	ctrlsize = le16_to_cpu(epctrl->wMaxPacketSize);
 	readsize = le16_to_cpu(epread->wMaxPacketSize) *
 				(quirks == SINGLE_RX_URB ? 1 : 2);
+	acm->combined_interfaces = combined_interfaces;
 	acm->writesize = le16_to_cpu(epwrite->wMaxPacketSize) * 20;
 	acm->control = control_interface;
 	acm->data = data_interface;
 	acm->minor = minor;
 	acm->dev = usb_dev;
 	acm->ctrl_caps = ac_management_function;
+	if (quirks & NO_CAP_LINE)
+		acm->ctrl_caps &= ~USB_CDC_CAP_LINE;
 	acm->ctrlsize = ctrlsize;
 	acm->readsize = readsize;
 	acm->rx_buflimit = num_rx_buf;
@@ -1223,9 +1265,10 @@
 
 skip_countries:
 	usb_fill_int_urb(acm->ctrlurb, usb_dev,
-			usb_rcvintpipe(usb_dev, epctrl->bEndpointAddress),
-			acm->ctrl_buffer, ctrlsize, acm_ctrl_irq, acm,
-			epctrl->bInterval);
+			 usb_rcvintpipe(usb_dev, epctrl->bEndpointAddress),
+			 acm->ctrl_buffer, ctrlsize, acm_ctrl_irq, acm,
+			 /* works around buggy devices */
+			 epctrl->bInterval ? epctrl->bInterval : 0xff);
 	acm->ctrlurb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
 	acm->ctrlurb->transfer_dma = acm->ctrl_dma;
 
@@ -1312,7 +1355,8 @@
 								acm->ctrl_dma);
 	acm_read_buffers_free(acm);
 
-	usb_driver_release_interface(&acm_driver, intf == acm->control ?
+	if (!acm->combined_interfaces)
+		usb_driver_release_interface(&acm_driver, intf == acm->control ?
 					acm->data : acm->control);
 
 	if (acm->port.count == 0) {
@@ -1451,6 +1495,9 @@
 					   Maybe we should define a new
 					   quirk for this. */
 	},
+	{ USB_DEVICE(0x1bbb, 0x0003), /* Alcatel OT-I650 */
+	.driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
+	},
 
 	/* control interfaces with various AT-command sets */
 	{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
index 4c38564..1602324 100644
--- a/drivers/usb/class/cdc-acm.h
+++ b/drivers/usb/class/cdc-acm.h
@@ -125,6 +125,7 @@
 	unsigned char clocal;				/* termios CLOCAL */
 	unsigned int ctrl_caps;				/* control capabilities from the class specific header */
 	unsigned int susp_count;			/* number of suspended interfaces */
+	int combined_interfaces:1;			/* control and data collapsed */
 	struct acm_wb *delayed_wb;			/* write queued for a device about to be woken */
 };
 
@@ -133,3 +134,4 @@
 /* constants describing various quirks and errors */
 #define NO_UNION_NORMAL			1
 #define SINGLE_RX_URB			2
+#define NO_CAP_LINE			4
diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c
index d2747a4..26c09f0 100644
--- a/drivers/usb/class/usblp.c
+++ b/drivers/usb/class/usblp.c
@@ -1057,8 +1057,14 @@
 	.release =	usblp_release,
 };
 
+static char *usblp_nodename(struct device *dev)
+{
+	return kasprintf(GFP_KERNEL, "usb/%s", dev_name(dev));
+}
+
 static struct usb_class_driver usblp_class = {
 	.name =		"lp%d",
+	.nodename =	usblp_nodename,
 	.fops =		&usblp_fops,
 	.minor_base =	USBLP_MINOR_BASE,
 };
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
index c40a9b2..3703789 100644
--- a/drivers/usb/class/usbtmc.c
+++ b/drivers/usb/class/usbtmc.c
@@ -927,21 +927,27 @@
 	switch (cmd) {
 	case USBTMC_IOCTL_CLEAR_OUT_HALT:
 		retval = usbtmc_ioctl_clear_out_halt(data);
+		break;
 
 	case USBTMC_IOCTL_CLEAR_IN_HALT:
 		retval = usbtmc_ioctl_clear_in_halt(data);
+		break;
 
 	case USBTMC_IOCTL_INDICATOR_PULSE:
 		retval = usbtmc_ioctl_indicator_pulse(data);
+		break;
 
 	case USBTMC_IOCTL_CLEAR:
 		retval = usbtmc_ioctl_clear(data);
+		break;
 
 	case USBTMC_IOCTL_ABORT_BULK_OUT:
 		retval = usbtmc_ioctl_abort_bulk_out(data);
+		break;
 
 	case USBTMC_IOCTL_ABORT_BULK_IN:
 		retval = usbtmc_ioctl_abort_bulk_in(data);
+		break;
 	}
 
 	mutex_unlock(&data->io_mutex);
diff --git a/drivers/usb/core/Kconfig b/drivers/usb/core/Kconfig
index e1759d1..69280c3 100644
--- a/drivers/usb/core/Kconfig
+++ b/drivers/usb/core/Kconfig
@@ -28,7 +28,7 @@
 	depends on USB
 
 config USB_DEVICEFS
-	bool "USB device filesystem"
+	bool "USB device filesystem (DEPRECATED)" if EMBEDDED
 	depends on USB
 	---help---
 	  If you say Y here (and to "/proc file system support" in the "File
@@ -46,11 +46,15 @@
 	  For the format of the various /proc/bus/usb/ files, please read
 	  <file:Documentation/usb/proc_usb_info.txt>.
 
-	  Usbfs files can't handle Access Control Lists (ACL), which are the
-	  default way to grant access to USB devices for untrusted users of a
-	  desktop system. The usbfs functionality is replaced by real
-	  device-nodes managed by udev. These nodes live in /dev/bus/usb and
-	  are used by libusb.
+	  Modern Linux systems do not use this.
+
+	  Usbfs entries are files and not character devices; usbfs can't
+	  handle Access Control Lists (ACL) which are the default way to
+	  grant access to USB devices for untrusted users of a desktop
+	  system.
+
+	  The usbfs functionality is replaced by real device-nodes managed by
+	  udev.  These nodes lived in /dev/bus/usb and are used by libusb.
 
 config USB_DEVICE_CLASS
 	bool "USB device class-devices (DEPRECATED)"
diff --git a/drivers/usb/core/Makefile b/drivers/usb/core/Makefile
index b607870..ec16e60 100644
--- a/drivers/usb/core/Makefile
+++ b/drivers/usb/core/Makefile
@@ -4,14 +4,14 @@
 
 usbcore-objs	:= usb.o hub.o hcd.o urb.o message.o driver.o \
 			config.o file.o buffer.o sysfs.o endpoint.o \
-			devio.o notify.o generic.o quirks.o
+			devio.o notify.o generic.o quirks.o devices.o
 
 ifeq ($(CONFIG_PCI),y)
 	usbcore-objs	+= hcd-pci.o
 endif
 
 ifeq ($(CONFIG_USB_DEVICEFS),y)
-	usbcore-objs	+= inode.o devices.o
+	usbcore-objs	+= inode.o
 endif
 
 obj-$(CONFIG_USB)	+= usbcore.o
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 568244c..24dfb33 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -19,6 +19,32 @@
 	return (n == 1 ? "" : "s");
 }
 
+/* FIXME: this is a kludge */
+static int find_next_descriptor_more(unsigned char *buffer, int size,
+    int dt1, int dt2, int dt3, int *num_skipped)
+{
+	struct usb_descriptor_header *h;
+	int n = 0;
+	unsigned char *buffer0 = buffer;
+
+	/* Find the next descriptor of type dt1 or dt2 or dt3 */
+	while (size > 0) {
+		h = (struct usb_descriptor_header *) buffer;
+		if (h->bDescriptorType == dt1 || h->bDescriptorType == dt2 ||
+				h->bDescriptorType == dt3)
+			break;
+		buffer += h->bLength;
+		size -= h->bLength;
+		++n;
+	}
+
+	/* Store the number of descriptors skipped and return the
+	 * number of bytes skipped */
+	if (num_skipped)
+		*num_skipped = n;
+	return buffer - buffer0;
+}
+
 static int find_next_descriptor(unsigned char *buffer, int size,
     int dt1, int dt2, int *num_skipped)
 {
@@ -43,6 +69,129 @@
 	return buffer - buffer0;
 }
 
+static int usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
+		int inum, int asnum, struct usb_host_endpoint *ep,
+		int num_ep, unsigned char *buffer, int size)
+{
+	unsigned char *buffer_start = buffer;
+	struct usb_ss_ep_comp_descriptor	*desc;
+	int retval;
+	int num_skipped;
+	int max_tx;
+	int i;
+
+	/* Allocate space for the SS endpoint companion descriptor */
+	ep->ss_ep_comp = kzalloc(sizeof(struct usb_host_ss_ep_comp),
+			GFP_KERNEL);
+	if (!ep->ss_ep_comp)
+		return -ENOMEM;
+	desc = (struct usb_ss_ep_comp_descriptor *) buffer;
+	if (desc->bDescriptorType != USB_DT_SS_ENDPOINT_COMP) {
+		dev_warn(ddev, "No SuperSpeed endpoint companion for config %d "
+				" interface %d altsetting %d ep %d: "
+				"using minimum values\n",
+				cfgno, inum, asnum, ep->desc.bEndpointAddress);
+		ep->ss_ep_comp->desc.bLength = USB_DT_SS_EP_COMP_SIZE;
+		ep->ss_ep_comp->desc.bDescriptorType = USB_DT_SS_ENDPOINT_COMP;
+		ep->ss_ep_comp->desc.bMaxBurst = 0;
+		/*
+		 * Leave bmAttributes as zero, which will mean no streams for
+		 * bulk, and isoc won't support multiple bursts of packets.
+		 * With bursts of only one packet, and a Mult of 1, the max
+		 * amount of data moved per endpoint service interval is one
+		 * packet.
+		 */
+		if (usb_endpoint_xfer_isoc(&ep->desc) ||
+				usb_endpoint_xfer_int(&ep->desc))
+			ep->ss_ep_comp->desc.wBytesPerInterval =
+				ep->desc.wMaxPacketSize;
+		/*
+		 * The next descriptor is for an Endpoint or Interface,
+		 * no extra descriptors to copy into the companion structure,
+		 * and we didn't eat up any of the buffer.
+		 */
+		retval = 0;
+		goto valid;
+	}
+	memcpy(&ep->ss_ep_comp->desc, desc, USB_DT_SS_EP_COMP_SIZE);
+	desc = &ep->ss_ep_comp->desc;
+	buffer += desc->bLength;
+	size -= desc->bLength;
+
+	/* Eat up the other descriptors we don't care about */
+	ep->ss_ep_comp->extra = buffer;
+	i = find_next_descriptor(buffer, size, USB_DT_ENDPOINT,
+			USB_DT_INTERFACE, &num_skipped);
+	ep->ss_ep_comp->extralen = i;
+	buffer += i;
+	size -= i;
+	retval = buffer - buffer_start + i;
+	if (num_skipped > 0)
+		dev_dbg(ddev, "skipped %d descriptor%s after %s\n",
+				num_skipped, plural(num_skipped),
+				"SuperSpeed endpoint companion");
+
+	/* Check the various values */
+	if (usb_endpoint_xfer_control(&ep->desc) && desc->bMaxBurst != 0) {
+		dev_warn(ddev, "Control endpoint with bMaxBurst = %d in "
+				"config %d interface %d altsetting %d ep %d: "
+				"setting to zero\n", desc->bMaxBurst,
+				cfgno, inum, asnum, ep->desc.bEndpointAddress);
+		desc->bMaxBurst = 0;
+	}
+	if (desc->bMaxBurst > 15) {
+		dev_warn(ddev, "Endpoint with bMaxBurst = %d in "
+				"config %d interface %d altsetting %d ep %d: "
+				"setting to 15\n", desc->bMaxBurst,
+				cfgno, inum, asnum, ep->desc.bEndpointAddress);
+		desc->bMaxBurst = 15;
+	}
+	if ((usb_endpoint_xfer_control(&ep->desc) || usb_endpoint_xfer_int(&ep->desc))
+			&& desc->bmAttributes != 0) {
+		dev_warn(ddev, "%s endpoint with bmAttributes = %d in "
+				"config %d interface %d altsetting %d ep %d: "
+				"setting to zero\n",
+				usb_endpoint_xfer_control(&ep->desc) ? "Control" : "Bulk",
+				desc->bmAttributes,
+				cfgno, inum, asnum, ep->desc.bEndpointAddress);
+		desc->bmAttributes = 0;
+	}
+	if (usb_endpoint_xfer_bulk(&ep->desc) && desc->bmAttributes > 16) {
+		dev_warn(ddev, "Bulk endpoint with more than 65536 streams in "
+				"config %d interface %d altsetting %d ep %d: "
+				"setting to max\n",
+				cfgno, inum, asnum, ep->desc.bEndpointAddress);
+		desc->bmAttributes = 16;
+	}
+	if (usb_endpoint_xfer_isoc(&ep->desc) && desc->bmAttributes > 2) {
+		dev_warn(ddev, "Isoc endpoint has Mult of %d in "
+				"config %d interface %d altsetting %d ep %d: "
+				"setting to 3\n", desc->bmAttributes + 1,
+				cfgno, inum, asnum, ep->desc.bEndpointAddress);
+		desc->bmAttributes = 2;
+	}
+	if (usb_endpoint_xfer_isoc(&ep->desc)) {
+		max_tx = ep->desc.wMaxPacketSize * (desc->bMaxBurst + 1) *
+			(desc->bmAttributes + 1);
+	} else if (usb_endpoint_xfer_int(&ep->desc)) {
+		max_tx = ep->desc.wMaxPacketSize * (desc->bMaxBurst + 1);
+	} else {
+		goto valid;
+	}
+	if (desc->wBytesPerInterval > max_tx) {
+		dev_warn(ddev, "%s endpoint with wBytesPerInterval of %d in "
+				"config %d interface %d altsetting %d ep %d: "
+				"setting to %d\n",
+				usb_endpoint_xfer_isoc(&ep->desc) ? "Isoc" : "Int",
+				desc->wBytesPerInterval,
+				cfgno, inum, asnum, ep->desc.bEndpointAddress,
+				max_tx);
+		desc->wBytesPerInterval = max_tx;
+	}
+valid:
+	return retval;
+}
+
 static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
     int asnum, struct usb_host_interface *ifp, int num_ep,
     unsigned char *buffer, int size)
@@ -50,7 +199,7 @@
 	unsigned char *buffer0 = buffer;
 	struct usb_endpoint_descriptor *d;
 	struct usb_host_endpoint *endpoint;
-	int n, i, j;
+	int n, i, j, retval;
 
 	d = (struct usb_endpoint_descriptor *) buffer;
 	buffer += d->bLength;
@@ -92,6 +241,7 @@
 	if (usb_endpoint_xfer_int(d)) {
 		i = 1;
 		switch (to_usb_device(ddev)->speed) {
+		case USB_SPEED_SUPER:
 		case USB_SPEED_HIGH:
 			/* Many device manufacturers are using full-speed
 			 * bInterval values in high-speed interrupt endpoint
@@ -161,17 +311,39 @@
 				cfgno, inum, asnum, d->bEndpointAddress,
 				maxp);
 	}
+	/* Allocate room for and parse any SS endpoint companion descriptors */
+	if (to_usb_device(ddev)->speed == USB_SPEED_SUPER) {
+		endpoint->extra = buffer;
+		i = find_next_descriptor_more(buffer, size, USB_DT_SS_ENDPOINT_COMP,
+				USB_DT_ENDPOINT, USB_DT_INTERFACE, &n);
+		endpoint->extralen = i;
+		buffer += i;
+		size -= i;
 
-	/* Skip over any Class Specific or Vendor Specific descriptors;
-	 * find the next endpoint or interface descriptor */
-	endpoint->extra = buffer;
-	i = find_next_descriptor(buffer, size, USB_DT_ENDPOINT,
-	    USB_DT_INTERFACE, &n);
-	endpoint->extralen = i;
+		if (size > 0) {
+			retval = usb_parse_ss_endpoint_companion(ddev, cfgno,
+					inum, asnum, endpoint, num_ep, buffer,
+					size);
+			if (retval >= 0) {
+				buffer += retval;
+				retval = buffer - buffer0;
+			}
+		} else {
+			retval = buffer - buffer0;
+		}
+	} else {
+		/* Skip over any Class Specific or Vendor Specific descriptors;
+		 * find the next endpoint or interface descriptor */
+		endpoint->extra = buffer;
+		i = find_next_descriptor(buffer, size, USB_DT_ENDPOINT,
+				USB_DT_INTERFACE, &n);
+		endpoint->extralen = i;
+		retval = buffer - buffer0 + i;
+	}
 	if (n > 0)
 		dev_dbg(ddev, "skipped %d descriptor%s after %s\n",
 		    n, plural(n), "endpoint");
-	return buffer - buffer0 + i;
+	return retval;
 
 skip_to_next_endpoint_or_interface_descriptor:
 	i = find_next_descriptor(buffer, size, USB_DT_ENDPOINT,
@@ -452,6 +624,8 @@
 		kref_init(&intfc->ref);
 	}
 
+	/* FIXME: parse the BOS descriptor */
+
 	/* Skip over any Class Specific or Vendor Specific descriptors;
 	 * find the first interface descriptor */
 	config->extra = buffer;
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index d0a21a5..69e5773 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -154,16 +154,11 @@
 static int usb_probe_device(struct device *dev)
 {
 	struct usb_device_driver *udriver = to_usb_device_driver(dev->driver);
-	struct usb_device *udev;
+	struct usb_device *udev = to_usb_device(dev);
 	int error = -ENODEV;
 
 	dev_dbg(dev, "%s\n", __func__);
 
-	if (!is_usb_device(dev))	/* Sanity check */
-		return error;
-
-	udev = to_usb_device(dev);
-
 	/* TODO: Add real matching code */
 
 	/* The device should always appear to be in use
@@ -203,18 +198,13 @@
 static int usb_probe_interface(struct device *dev)
 {
 	struct usb_driver *driver = to_usb_driver(dev->driver);
-	struct usb_interface *intf;
-	struct usb_device *udev;
+	struct usb_interface *intf = to_usb_interface(dev);
+	struct usb_device *udev = interface_to_usbdev(intf);
 	const struct usb_device_id *id;
 	int error = -ENODEV;
 
 	dev_dbg(dev, "%s\n", __func__);
 
-	if (is_usb_device(dev))		/* Sanity check */
-		return error;
-
-	intf = to_usb_interface(dev);
-	udev = interface_to_usbdev(intf);
 	intf->needs_binding = 0;
 
 	if (udev->authorized == 0) {
@@ -385,7 +375,6 @@
 					struct usb_interface *iface)
 {
 	struct device *dev = &iface->dev;
-	struct usb_device *udev = interface_to_usbdev(iface);
 
 	/* this should never happen, don't release something that's not ours */
 	if (!dev->driver || dev->driver != &driver->drvwrap.driver)
@@ -394,23 +383,19 @@
 	/* don't release from within disconnect() */
 	if (iface->condition != USB_INTERFACE_BOUND)
 		return;
+	iface->condition = USB_INTERFACE_UNBINDING;
 
-	/* don't release if the interface hasn't been added yet */
+	/* Release via the driver core only if the interface
+	 * has already been registered
+	 */
 	if (device_is_registered(dev)) {
-		iface->condition = USB_INTERFACE_UNBINDING;
 		device_release_driver(dev);
 	} else {
-		iface->condition = USB_INTERFACE_UNBOUND;
-		usb_cancel_queued_reset(iface);
+		down(&dev->sem);
+		usb_unbind_interface(dev);
+		dev->driver = NULL;
+		up(&dev->sem);
 	}
-	dev->driver = NULL;
-	usb_set_intfdata(iface, NULL);
-
-	usb_pm_lock(udev);
-	iface->condition = USB_INTERFACE_UNBOUND;
-	mark_quiesced(iface);
-	iface->needs_remote_wakeup = 0;
-	usb_pm_unlock(udev);
 }
 EXPORT_SYMBOL_GPL(usb_driver_release_interface);
 
@@ -598,7 +583,7 @@
 		/* TODO: Add real matching code */
 		return 1;
 
-	} else {
+	} else if (is_usb_interface(dev)) {
 		struct usb_interface *intf;
 		struct usb_driver *usb_drv;
 		const struct usb_device_id *id;
@@ -630,11 +615,14 @@
 	/* driver is often null here; dev_dbg() would oops */
 	pr_debug("usb %s: uevent\n", dev_name(dev));
 
-	if (is_usb_device(dev))
+	if (is_usb_device(dev)) {
 		usb_dev = to_usb_device(dev);
-	else {
+	} else if (is_usb_interface(dev)) {
 		struct usb_interface *intf = to_usb_interface(dev);
+
 		usb_dev = interface_to_usbdev(intf);
+	} else {
+		return 0;
 	}
 
 	if (usb_dev->devnum < 0) {
@@ -1762,6 +1750,7 @@
 int usb_resume(struct device *dev, pm_message_t msg)
 {
 	struct usb_device	*udev;
+	int			status;
 
 	udev = to_usb_device(dev);
 
@@ -1771,7 +1760,14 @@
 	 */
 	if (udev->skip_sys_resume)
 		return 0;
-	return usb_external_resume_device(udev, msg);
+	status = usb_external_resume_device(udev, msg);
+
+	/* Avoid PM error messages for devices disconnected while suspended
+	 * as we'll display regular disconnect messages just a bit later.
+	 */
+	if (status == -ENODEV)
+		return 0;
+	return status;
 }
 
 #endif /* CONFIG_PM */
diff --git a/drivers/usb/core/endpoint.c b/drivers/usb/core/endpoint.c
index 40dee2a..bc39fc4 100644
--- a/drivers/usb/core/endpoint.c
+++ b/drivers/usb/core/endpoint.c
@@ -15,19 +15,18 @@
 #include <linux/usb.h>
 #include "usb.h"
 
-#define MAX_ENDPOINT_MINORS (64*128*32)
-static int usb_endpoint_major;
-static DEFINE_IDR(endpoint_idr);
-
 struct ep_device {
 	struct usb_endpoint_descriptor *desc;
 	struct usb_device *udev;
 	struct device dev;
-	int minor;
 };
 #define to_ep_device(_dev) \
 	container_of(_dev, struct ep_device, dev)
 
+struct device_type usb_ep_device_type = {
+	.name =		"usb_endpoint",
+};
+
 struct ep_attribute {
 	struct attribute attr;
 	ssize_t (*show)(struct usb_device *,
@@ -160,118 +159,10 @@
 	NULL
 };
 
-static int usb_endpoint_major_init(void)
-{
-	dev_t dev;
-	int error;
-
-	error = alloc_chrdev_region(&dev, 0, MAX_ENDPOINT_MINORS,
-				    "usb_endpoint");
-	if (error) {
-		printk(KERN_ERR "Unable to get a dynamic major for "
-		       "usb endpoints.\n");
-		return error;
-	}
-	usb_endpoint_major = MAJOR(dev);
-
-	return error;
-}
-
-static void usb_endpoint_major_cleanup(void)
-{
-	unregister_chrdev_region(MKDEV(usb_endpoint_major, 0),
-				 MAX_ENDPOINT_MINORS);
-}
-
-static int endpoint_get_minor(struct ep_device *ep_dev)
-{
-	static DEFINE_MUTEX(minor_lock);
-	int retval = -ENOMEM;
-	int id;
-
-	mutex_lock(&minor_lock);
-	if (idr_pre_get(&endpoint_idr, GFP_KERNEL) == 0)
-		goto exit;
-
-	retval = idr_get_new(&endpoint_idr, ep_dev, &id);
-	if (retval < 0) {
-		if (retval == -EAGAIN)
-			retval = -ENOMEM;
-		goto exit;
-	}
-	ep_dev->minor = id & MAX_ID_MASK;
-exit:
-	mutex_unlock(&minor_lock);
-	return retval;
-}
-
-static void endpoint_free_minor(struct ep_device *ep_dev)
-{
-	idr_remove(&endpoint_idr, ep_dev->minor);
-}
-
-static struct endpoint_class {
-	struct kref kref;
-	struct class *class;
-} *ep_class;
-
-static int init_endpoint_class(void)
-{
-	int result = 0;
-
-	if (ep_class != NULL) {
-		kref_get(&ep_class->kref);
-		goto exit;
-	}
-
-	ep_class = kmalloc(sizeof(*ep_class), GFP_KERNEL);
-	if (!ep_class) {
-		result = -ENOMEM;
-		goto exit;
-	}
-
-	kref_init(&ep_class->kref);
-	ep_class->class = class_create(THIS_MODULE, "usb_endpoint");
-	if (IS_ERR(ep_class->class)) {
-		result = PTR_ERR(ep_class->class);
-		goto class_create_error;
-	}
-
-	result = usb_endpoint_major_init();
-	if (result)
-		goto endpoint_major_error;
-
-	goto exit;
-
-endpoint_major_error:
-	class_destroy(ep_class->class);
-class_create_error:
-	kfree(ep_class);
-	ep_class = NULL;
-exit:
-	return result;
-}
-
-static void release_endpoint_class(struct kref *kref)
-{
-	/* Ok, we cheat as we know we only have one ep_class */
-	class_destroy(ep_class->class);
-	kfree(ep_class);
-	ep_class = NULL;
-	usb_endpoint_major_cleanup();
-}
-
-static void destroy_endpoint_class(void)
-{
-	if (ep_class)
-		kref_put(&ep_class->kref, release_endpoint_class);
-}
-
 static void ep_device_release(struct device *dev)
 {
 	struct ep_device *ep_dev = to_ep_device(dev);
 
-	endpoint_free_minor(ep_dev);
 	kfree(ep_dev);
 }
 
@@ -279,62 +170,32 @@
 			struct usb_host_endpoint *endpoint,
 			struct usb_device *udev)
 {
-	char name[8];
 	struct ep_device *ep_dev;
 	int retval;
 
-	retval = init_endpoint_class();
-	if (retval)
-		goto exit;
-
 	ep_dev = kzalloc(sizeof(*ep_dev), GFP_KERNEL);
 	if (!ep_dev) {
 		retval = -ENOMEM;
-		goto error_alloc;
-	}
-
-	retval = endpoint_get_minor(ep_dev);
-	if (retval) {
-		dev_err(parent, "can not allocate minor number for %s\n",
-			dev_name(&ep_dev->dev));
-		goto error_register;
+		goto exit;
 	}
 
 	ep_dev->desc = &endpoint->desc;
 	ep_dev->udev = udev;
 	ep_dev->dev.groups = ep_dev_groups;
-	ep_dev->dev.devt = MKDEV(usb_endpoint_major, ep_dev->minor);
-	ep_dev->dev.class = ep_class->class;
+	ep_dev->dev.type = &usb_ep_device_type;
 	ep_dev->dev.parent = parent;
 	ep_dev->dev.release = ep_device_release;
-	dev_set_name(&ep_dev->dev, "usbdev%d.%d_ep%02x",
-		 udev->bus->busnum, udev->devnum,
-		 endpoint->desc.bEndpointAddress);
+	dev_set_name(&ep_dev->dev, "ep_%02x", endpoint->desc.bEndpointAddress);
 
 	retval = device_register(&ep_dev->dev);
 	if (retval)
-		goto error_chrdev;
+		goto error_register;
 
-	/* create the symlink to the old-style "ep_XX" directory */
-	sprintf(name, "ep_%02x", endpoint->desc.bEndpointAddress);
-	retval = sysfs_create_link(&parent->kobj, &ep_dev->dev.kobj, name);
-	if (retval)
-		goto error_link;
 	endpoint->ep_dev = ep_dev;
 	return retval;
 
-error_link:
-	device_unregister(&ep_dev->dev);
-	destroy_endpoint_class();
-	return retval;
-
-error_chrdev:
-	endpoint_free_minor(ep_dev);
-
 error_register:
 	kfree(ep_dev);
-error_alloc:
-	destroy_endpoint_class();
 exit:
 	return retval;
 }
@@ -344,12 +205,7 @@
 	struct ep_device *ep_dev = endpoint->ep_dev;
 
 	if (ep_dev) {
-		char name[8];
-
-		sprintf(name, "ep_%02x", endpoint->desc.bEndpointAddress);
-		sysfs_remove_link(&ep_dev->dev.parent->kobj, name);
 		device_unregister(&ep_dev->dev);
 		endpoint->ep_dev = NULL;
-		destroy_endpoint_class();
 	}
 }
diff --git a/drivers/usb/core/file.c b/drivers/usb/core/file.c
index 997e659..5cef889 100644
--- a/drivers/usb/core/file.c
+++ b/drivers/usb/core/file.c
@@ -67,6 +67,16 @@
 	struct class *class;
 } *usb_class;
 
+static char *usb_nodename(struct device *dev)
+{
+	struct usb_class_driver *drv;
+
+	drv = dev_get_drvdata(dev);
+	if (!drv || !drv->nodename)
+		return NULL;
+	return drv->nodename(dev);
+}
+
 static int init_usb_class(void)
 {
 	int result = 0;
@@ -90,6 +100,7 @@
 		kfree(usb_class);
 		usb_class = NULL;
 	}
+	usb_class->class->nodename = usb_nodename;
 
 exit:
 	return result;
@@ -198,7 +209,7 @@
 	else
 		temp = name;
 	intf->usb_dev = device_create(usb_class->class, &intf->dev,
-				      MKDEV(USB_MAJOR, minor), NULL,
+				      MKDEV(USB_MAJOR, minor), class_driver,
 				      "%s", temp);
 	if (IS_ERR(intf->usb_dev)) {
 		down_write(&minor_rwsem);
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index a4301dc..91f2885 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -185,180 +185,6 @@
 }
 EXPORT_SYMBOL_GPL(usb_hcd_pci_remove);
 
-
-#ifdef	CONFIG_PM
-
-/**
- * usb_hcd_pci_suspend - power management suspend of a PCI-based HCD
- * @dev: USB Host Controller being suspended
- * @message: Power Management message describing this state transition
- *
- * Store this function in the HCD's struct pci_driver as .suspend.
- */
-int usb_hcd_pci_suspend(struct pci_dev *dev, pm_message_t message)
-{
-	struct usb_hcd		*hcd = pci_get_drvdata(dev);
-	int			retval = 0;
-	int			wake, w;
-	int			has_pci_pm;
-
-	/* Root hub suspend should have stopped all downstream traffic,
-	 * and all bus master traffic.  And done so for both the interface
-	 * and the stub usb_device (which we check here).  But maybe it
-	 * didn't; writing sysfs power/state files ignores such rules...
-	 *
-	 * We must ignore the FREEZE vs SUSPEND distinction here, because
-	 * otherwise the swsusp will save (and restore) garbage state.
-	 */
-	if (!(hcd->state == HC_STATE_SUSPENDED ||
-			hcd->state == HC_STATE_HALT)) {
-		dev_warn(&dev->dev, "Root hub is not suspended\n");
-		retval = -EBUSY;
-		goto done;
-	}
-
-	/* We might already be suspended (runtime PM -- not yet written) */
-	if (dev->current_state != PCI_D0)
-		goto done;
-
-	if (hcd->driver->pci_suspend) {
-		retval = hcd->driver->pci_suspend(hcd, message);
-		suspend_report_result(hcd->driver->pci_suspend, retval);
-		if (retval)
-			goto done;
-	}
-
-	synchronize_irq(dev->irq);
-
-	/* Downstream ports from this root hub should already be quiesced, so
-	 * there will be no DMA activity.  Now we can shut down the upstream
-	 * link (except maybe for PME# resume signaling) and enter some PCI
-	 * low power state, if the hardware allows.
-	 */
-	pci_disable_device(dev);
-
-	pci_save_state(dev);
-
-	/* Don't fail on error to enable wakeup.  We rely on pci code
-	 * to reject requests the hardware can't implement, rather
-	 * than coding the same thing.
-	 */
-	wake = (hcd->state == HC_STATE_SUSPENDED &&
-			device_may_wakeup(&dev->dev));
-	w = pci_wake_from_d3(dev, wake);
-	if (w < 0)
-		wake = w;
-	dev_dbg(&dev->dev, "wakeup: %d\n", wake);
-
-	/* Don't change state if we don't need to */
-	if (message.event == PM_EVENT_FREEZE ||
-			message.event == PM_EVENT_PRETHAW) {
-		dev_dbg(&dev->dev, "--> no state change\n");
-		goto done;
-	}
-
-	has_pci_pm = pci_find_capability(dev, PCI_CAP_ID_PM);
-	if (!has_pci_pm) {
-		dev_dbg(&dev->dev, "--> PCI D0 legacy\n");
-	} else {
-
-		/* NOTE:  dev->current_state becomes nonzero only here, and
-		 * only for devices that support PCI PM.  Also, exiting
-		 * PCI_D3 (but not PCI_D1 or PCI_D2) is allowed to reset
-		 * some device state (e.g. as part of clock reinit).
-		 */
-		retval = pci_set_power_state(dev, PCI_D3hot);
-		suspend_report_result(pci_set_power_state, retval);
-		if (retval == 0) {
-			dev_dbg(&dev->dev, "--> PCI D3\n");
-		} else {
-			dev_dbg(&dev->dev, "PCI D3 suspend fail, %d\n",
-					retval);
-			pci_restore_state(dev);
-		}
-	}
-
-#ifdef CONFIG_PPC_PMAC
-	if (retval == 0) {
-		/* Disable ASIC clocks for USB */
-		if (machine_is(powermac)) {
-			struct device_node	*of_node;
-
-			of_node = pci_device_to_OF_node(dev);
-			if (of_node)
-				pmac_call_feature(PMAC_FTR_USB_ENABLE,
-							of_node, 0, 0);
-		}
-	}
-#endif
-
- done:
-	return retval;
-}
-EXPORT_SYMBOL_GPL(usb_hcd_pci_suspend);
-
-/**
- * usb_hcd_pci_resume - power management resume of a PCI-based HCD
- * @dev: USB Host Controller being resumed
- *
- * Store this function in the HCD's struct pci_driver as .resume.
- */
-int usb_hcd_pci_resume(struct pci_dev *dev)
-{
-	struct usb_hcd		*hcd;
-	int			retval;
-
-#ifdef CONFIG_PPC_PMAC
-	/* Reenable ASIC clocks for USB */
-	if (machine_is(powermac)) {
-		struct device_node *of_node;
-
-		of_node = pci_device_to_OF_node(dev);
-		if (of_node)
-			pmac_call_feature(PMAC_FTR_USB_ENABLE,
-						of_node, 0, 1);
-	}
-#endif
-
-	pci_restore_state(dev);
-
-	hcd = pci_get_drvdata(dev);
-	if (hcd->state != HC_STATE_SUSPENDED) {
-		dev_dbg(hcd->self.controller,
-				"can't resume, not suspended!\n");
-		return 0;
-	}
-
-	pci_enable_wake(dev, PCI_D0, false);
-
-	retval = pci_enable_device(dev);
-	if (retval < 0) {
-		dev_err(&dev->dev, "can't re-enable after resume, %d!\n",
-				retval);
-		return retval;
-	}
-
-	pci_set_master(dev);
-
-	/* yes, ignore this result too... */
-	(void) pci_wake_from_d3(dev, 0);
-
-	clear_bit(HCD_FLAG_SAW_IRQ, &hcd->flags);
-
-	if (hcd->driver->pci_resume) {
-		retval = hcd->driver->pci_resume(hcd);
-		if (retval) {
-			dev_err(hcd->self.controller,
-				"PCI post-resume error %d!\n", retval);
-			usb_hc_died(hcd);
-		}
-	}
-	return retval;
-}
-EXPORT_SYMBOL_GPL(usb_hcd_pci_resume);
-
-#endif	/* CONFIG_PM */
-
 /**
  * usb_hcd_pci_shutdown - shutdown host controller
  * @dev: USB Host Controller being shutdown
@@ -376,3 +202,181 @@
 }
 EXPORT_SYMBOL_GPL(usb_hcd_pci_shutdown);
 
+#ifdef	CONFIG_PM_SLEEP
+
+static int check_root_hub_suspended(struct device *dev)
+{
+	struct pci_dev		*pci_dev = to_pci_dev(dev);
+	struct usb_hcd		*hcd = pci_get_drvdata(pci_dev);
+
+	if (!(hcd->state == HC_STATE_SUSPENDED ||
+			hcd->state == HC_STATE_HALT)) {
+		dev_warn(dev, "Root hub is not suspended\n");
+		return -EBUSY;
+	}
+	return 0;
+}
+
+static int hcd_pci_suspend(struct device *dev)
+{
+	struct pci_dev		*pci_dev = to_pci_dev(dev);
+	struct usb_hcd		*hcd = pci_get_drvdata(pci_dev);
+	int			retval;
+
+	/* Root hub suspend should have stopped all downstream traffic,
+	 * and all bus master traffic.  And done so for both the interface
+	 * and the stub usb_device (which we check here).  But maybe it
+	 * didn't; writing sysfs power/state files ignores such rules...
+	 */
+	retval = check_root_hub_suspended(dev);
+	if (retval)
+		return retval;
+
+	/* We might already be suspended (runtime PM -- not yet written) */
+	if (pci_dev->current_state != PCI_D0)
+		return retval;
+
+	if (hcd->driver->pci_suspend) {
+		retval = hcd->driver->pci_suspend(hcd);
+		suspend_report_result(hcd->driver->pci_suspend, retval);
+		if (retval)
+			return retval;
+	}
+
+	synchronize_irq(pci_dev->irq);
+
+	/* Downstream ports from this root hub should already be quiesced, so
+	 * there will be no DMA activity.  Now we can shut down the upstream
+	 * link (except maybe for PME# resume signaling).  We'll enter a
+	 * low power state during suspend_noirq, if the hardware allows.
+	 */
+	pci_disable_device(pci_dev);
+	return retval;
+}
+
+static int hcd_pci_suspend_noirq(struct device *dev)
+{
+	struct pci_dev		*pci_dev = to_pci_dev(dev);
+	struct usb_hcd		*hcd = pci_get_drvdata(pci_dev);
+	int			retval;
+
+	retval = check_root_hub_suspended(dev);
+	if (retval)
+		return retval;
+
+	pci_save_state(pci_dev);
+
+	/* If the root hub is HALTed rather than SUSPENDed,
+	 * disallow remote wakeup.
+	 */
+	if (hcd->state == HC_STATE_HALT)
+		device_set_wakeup_enable(dev, 0);
+	dev_dbg(dev, "wakeup: %d\n", device_may_wakeup(dev));
+
+	/* Possibly enable remote wakeup,
+	 * choose the appropriate low-power state, and go to that state.
+	 */
+	retval = pci_prepare_to_sleep(pci_dev);
+	if (retval == -EIO) {		/* Low-power not supported */
+		dev_dbg(dev, "--> PCI D0 legacy\n");
+		retval = 0;
+	} else if (retval == 0) {
+		dev_dbg(dev, "--> PCI %s\n",
+				pci_power_name(pci_dev->current_state));
+	} else {
+		suspend_report_result(pci_prepare_to_sleep, retval);
+		return retval;
+	}
+
+#ifdef CONFIG_PPC_PMAC
+	/* Disable ASIC clocks for USB */
+	if (machine_is(powermac)) {
+		struct device_node	*of_node;
+
+		of_node = pci_device_to_OF_node(pci_dev);
+		if (of_node)
+			pmac_call_feature(PMAC_FTR_USB_ENABLE, of_node, 0, 0);
+	}
+#endif
+	return retval;
+}
+
+static int hcd_pci_resume_noirq(struct device *dev)
+{
+	struct pci_dev		*pci_dev = to_pci_dev(dev);
+
+#ifdef CONFIG_PPC_PMAC
+	/* Reenable ASIC clocks for USB */
+	if (machine_is(powermac)) {
+		struct device_node *of_node;
+
+		of_node = pci_device_to_OF_node(pci_dev);
+		if (of_node)
+			pmac_call_feature(PMAC_FTR_USB_ENABLE,
+						of_node, 0, 1);
+	}
+#endif
+
+	/* Go back to D0 and disable remote wakeup */
+	pci_back_from_sleep(pci_dev);
+	return 0;
+}
+
+static int resume_common(struct device *dev, bool hibernated)
+{
+	struct pci_dev		*pci_dev = to_pci_dev(dev);
+	struct usb_hcd		*hcd = pci_get_drvdata(pci_dev);
+	int			retval;
+
+	if (hcd->state != HC_STATE_SUSPENDED) {
+		dev_dbg(dev, "can't resume, not suspended!\n");
+		return 0;
+	}
+
+	retval = pci_enable_device(pci_dev);
+	if (retval < 0) {
+		dev_err(dev, "can't re-enable after resume, %d!\n", retval);
+		return retval;
+	}
+
+	pci_set_master(pci_dev);
+
+	clear_bit(HCD_FLAG_SAW_IRQ, &hcd->flags);
+
+	if (hcd->driver->pci_resume) {
+		retval = hcd->driver->pci_resume(hcd, hibernated);
+		if (retval) {
+			dev_err(dev, "PCI post-resume error %d!\n", retval);
+			usb_hc_died(hcd);
+		}
+	}
+	return retval;
+}
+
+static int hcd_pci_resume(struct device *dev)
+{
+	return resume_common(dev, false);
+}
+
+static int hcd_pci_restore(struct device *dev)
+{
+	return resume_common(dev, true);
+}
+
+struct dev_pm_ops usb_hcd_pci_pm_ops = {
+	.suspend	= hcd_pci_suspend,
+	.suspend_noirq	= hcd_pci_suspend_noirq,
+	.resume_noirq	= hcd_pci_resume_noirq,
+	.resume		= hcd_pci_resume,
+	.freeze		= check_root_hub_suspended,
+	.freeze_noirq	= check_root_hub_suspended,
+	.thaw_noirq	= NULL,
+	.thaw		= NULL,
+	.poweroff	= hcd_pci_suspend,
+	.poweroff_noirq	= hcd_pci_suspend_noirq,
+	.restore_noirq	= hcd_pci_resume_noirq,
+	.restore	= hcd_pci_restore,
+};
+EXPORT_SYMBOL_GPL(usb_hcd_pci_pm_ops);
+
+#endif	/* CONFIG_PM_SLEEP */
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 42b93da..ce3f453 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -128,6 +128,27 @@
 #define KERNEL_REL	((LINUX_VERSION_CODE >> 16) & 0x0ff)
 #define KERNEL_VER	((LINUX_VERSION_CODE >> 8) & 0x0ff)
 
+/* usb 3.0 root hub device descriptor */
+static const u8 usb3_rh_dev_descriptor[18] = {
+	0x12,       /*  __u8  bLength; */
+	0x01,       /*  __u8  bDescriptorType; Device */
+	0x00, 0x03, /*  __le16 bcdUSB; v3.0 */
+
+	0x09,	    /*  __u8  bDeviceClass; HUB_CLASSCODE */
+	0x00,	    /*  __u8  bDeviceSubClass; */
+	0x03,       /*  __u8  bDeviceProtocol; USB 3.0 hub */
+	0x09,       /*  __u8  bMaxPacketSize0; 2^9 = 512 Bytes */
+
+	0x6b, 0x1d, /*  __le16 idVendor; Linux Foundation */
+	0x02, 0x00, /*  __le16 idProduct; device 0x0002 */
+	KERNEL_VER, KERNEL_REL, /*  __le16 bcdDevice */
+
+	0x03,       /*  __u8  iManufacturer; */
+	0x02,       /*  __u8  iProduct; */
+	0x01,       /*  __u8  iSerialNumber; */
+	0x01        /*  __u8  bNumConfigurations; */
+};
+
 /* usb 2.0 root hub device descriptor */
 static const u8 usb2_rh_dev_descriptor [18] = {
 	0x12,       /*  __u8  bLength; */
@@ -273,6 +294,47 @@
 	0x0c        /*  __u8  ep_bInterval; (256ms -- usb 2.0 spec) */
 };
 
+static const u8 ss_rh_config_descriptor[] = {
+	/* one configuration */
+	0x09,       /*  __u8  bLength; */
+	0x02,       /*  __u8  bDescriptorType; Configuration */
+	0x19, 0x00, /*  __le16 wTotalLength; FIXME */
+	0x01,       /*  __u8  bNumInterfaces; (1) */
+	0x01,       /*  __u8  bConfigurationValue; */
+	0x00,       /*  __u8  iConfiguration; */
+	0xc0,       /*  __u8  bmAttributes;
+				 Bit 7: must be set,
+				     6: Self-powered,
+				     5: Remote wakeup,
+				     4..0: resvd */
+	0x00,       /*  __u8  MaxPower; */
+
+	/* one interface */
+	0x09,       /*  __u8  if_bLength; */
+	0x04,       /*  __u8  if_bDescriptorType; Interface */
+	0x00,       /*  __u8  if_bInterfaceNumber; */
+	0x00,       /*  __u8  if_bAlternateSetting; */
+	0x01,       /*  __u8  if_bNumEndpoints; */
+	0x09,       /*  __u8  if_bInterfaceClass; HUB_CLASSCODE */
+	0x00,       /*  __u8  if_bInterfaceSubClass; */
+	0x00,       /*  __u8  if_bInterfaceProtocol; */
+	0x00,       /*  __u8  if_iInterface; */
+
+	/* one endpoint (status change endpoint) */
+	0x07,       /*  __u8  ep_bLength; */
+	0x05,       /*  __u8  ep_bDescriptorType; Endpoint */
+	0x81,       /*  __u8  ep_bEndpointAddress; IN Endpoint 1 */
+	0x03,       /*  __u8  ep_bmAttributes; Interrupt */
+		    /* __le16 ep_wMaxPacketSize; 1 + (MAX_ROOT_PORTS / 8)
+		     * see hub.c:hub_configure() for details. */
+	(USB_MAXCHILDREN + 1 + 7) / 8, 0x00,
+	0x0c        /*  __u8  ep_bInterval; (256ms -- usb 2.0 spec) */
+	/*
+	 * All 3.0 hubs should have an endpoint companion descriptor,
+	 * but we're ignoring that for now.  FIXME?
+	 */
+};
+
 /*-------------------------------------------------------------------------*/
 
 /*
@@ -426,23 +488,39 @@
 	case DeviceRequest | USB_REQ_GET_DESCRIPTOR:
 		switch (wValue & 0xff00) {
 		case USB_DT_DEVICE << 8:
-			if (hcd->driver->flags & HCD_USB2)
+			switch (hcd->driver->flags & HCD_MASK) {
+			case HCD_USB3:
+				bufp = usb3_rh_dev_descriptor;
+				break;
+			case HCD_USB2:
 				bufp = usb2_rh_dev_descriptor;
-			else if (hcd->driver->flags & HCD_USB11)
+				break;
+			case HCD_USB11:
 				bufp = usb11_rh_dev_descriptor;
-			else
+				break;
+			default:
 				goto error;
+			}
 			len = 18;
 			if (hcd->has_tt)
 				patch_protocol = 1;
 			break;
 		case USB_DT_CONFIG << 8:
-			if (hcd->driver->flags & HCD_USB2) {
+			switch (hcd->driver->flags & HCD_MASK) {
+			case HCD_USB3:
+				bufp = ss_rh_config_descriptor;
+				len = sizeof ss_rh_config_descriptor;
+				break;
+			case HCD_USB2:
 				bufp = hs_rh_config_descriptor;
 				len = sizeof hs_rh_config_descriptor;
-			} else {
+				break;
+			case HCD_USB11:
 				bufp = fs_rh_config_descriptor;
 				len = sizeof fs_rh_config_descriptor;
+				break;
+			default:
+				goto error;
 			}
 			if (device_can_wakeup(&hcd->self.root_hub->dev))
 				patch_wakeup = 1;
@@ -755,23 +833,6 @@
 
 /*-------------------------------------------------------------------------*/
 
-static struct class *usb_host_class;
-
-int usb_host_init(void)
-{
-	int retval = 0;
-
-	usb_host_class = class_create(THIS_MODULE, "usb_host");
-	if (IS_ERR(usb_host_class))
-		retval = PTR_ERR(usb_host_class);
-	return retval;
-}
-
-void usb_host_cleanup(void)
-{
-	class_destroy(usb_host_class);
-}
-
 /**
  * usb_bus_init - shared initialization code
  * @bus: the bus structure being initialized
@@ -818,12 +879,6 @@
 	set_bit (busnum, busmap.busmap);
 	bus->busnum = busnum;
 
-	bus->dev = device_create(usb_host_class, bus->controller, MKDEV(0, 0),
-				 bus, "usb_host%d", busnum);
-	result = PTR_ERR(bus->dev);
-	if (IS_ERR(bus->dev))
-		goto error_create_class_dev;
-
 	/* Add it to the local list of buses */
 	list_add (&bus->bus_list, &usb_bus_list);
 	mutex_unlock(&usb_bus_list_lock);
@@ -834,8 +889,6 @@
 		  "number %d\n", bus->busnum);
 	return 0;
 
-error_create_class_dev:
-	clear_bit(busnum, busmap.busmap);
 error_find_busnum:
 	mutex_unlock(&usb_bus_list_lock);
 	return result;
@@ -865,8 +918,6 @@
 	usb_notify_remove_bus(bus);
 
 	clear_bit (bus->busnum, busmap.busmap);
-
-	device_unregister(bus->dev);
 }
 
 /**
@@ -1199,7 +1250,8 @@
 
 	/* Map the URB's buffers for DMA access.
 	 * Lower level HCD code should use *_dma exclusively,
-	 * unless it uses pio or talks to another transport.
+	 * unless it uses pio or talks to another transport,
+	 * or uses the provided scatter gather list for bulk.
 	 */
 	if (is_root_hub(urb->dev))
 		return 0;
@@ -1520,6 +1572,92 @@
 	}
 }
 
+/* Check whether a new configuration or alt setting for an interface
+ * will exceed the bandwidth for the bus (or the host controller resources).
+ * Only pass in a non-NULL config or interface, not both!
+ * Passing NULL for both new_config and new_intf means the device will be
+ * de-configured by issuing a set configuration 0 command.
+ */
+int usb_hcd_check_bandwidth(struct usb_device *udev,
+		struct usb_host_config *new_config,
+		struct usb_interface *new_intf)
+{
+	int num_intfs, i, j;
+	struct usb_interface_cache *intf_cache;
+	struct usb_host_interface *alt = 0;
+	int ret = 0;
+	struct usb_hcd *hcd;
+	struct usb_host_endpoint *ep;
+
+	hcd = bus_to_hcd(udev->bus);
+	if (!hcd->driver->check_bandwidth)
+		return 0;
+
+	/* Configuration is being removed - set configuration 0 */
+	if (!new_config && !new_intf) {
+		for (i = 1; i < 16; ++i) {
+			ep = udev->ep_out[i];
+			if (ep)
+				hcd->driver->drop_endpoint(hcd, udev, ep);
+			ep = udev->ep_in[i];
+			if (ep)
+				hcd->driver->drop_endpoint(hcd, udev, ep);
+		}
+		hcd->driver->check_bandwidth(hcd, udev);
+		return 0;
+	}
+	/* Check if the HCD says there's enough bandwidth.  Enable all endpoints
+	 * each interface's alt setting 0 and ask the HCD to check the bandwidth
+	 * of the bus.  There will always be bandwidth for endpoint 0, so it's
+	 * ok to exclude it.
+	 */
+	if (new_config) {
+		num_intfs = new_config->desc.bNumInterfaces;
+		/* Remove endpoints (except endpoint 0, which is always on the
+		 * schedule) from the old config from the schedule
+		 */
+		for (i = 1; i < 16; ++i) {
+			ep = udev->ep_out[i];
+			if (ep) {
+				ret = hcd->driver->drop_endpoint(hcd, udev, ep);
+				if (ret < 0)
+					goto reset;
+			}
+			ep = udev->ep_in[i];
+			if (ep) {
+				ret = hcd->driver->drop_endpoint(hcd, udev, ep);
+				if (ret < 0)
+					goto reset;
+			}
+		}
+		for (i = 0; i < num_intfs; ++i) {
+
+			/* Dig the endpoints for alt setting 0 out of the
+			 * interface cache for this interface
+			 */
+			intf_cache = new_config->intf_cache[i];
+			for (j = 0; j < intf_cache->num_altsetting; j++) {
+				if (intf_cache->altsetting[j].desc.bAlternateSetting == 0)
+					alt = &intf_cache->altsetting[j];
+			}
+			if (!alt) {
+				printk(KERN_DEBUG "Did not find alt setting 0 for intf %d\n", i);
+				continue;
+			}
+			for (j = 0; j < alt->desc.bNumEndpoints; j++) {
+				ret = hcd->driver->add_endpoint(hcd, udev, &alt->endpoint[j]);
+				if (ret < 0)
+					goto reset;
+			}
+		}
+	}
+	ret = hcd->driver->check_bandwidth(hcd, udev);
+reset:
+	if (ret < 0)
+		hcd->driver->reset_bandwidth(hcd, udev);
+	return ret;
+}
+
 /* Disables the endpoint: synchronizes with the hcd to make sure all
  * endpoint state is gone from hardware.  usb_hcd_flush_endpoint() must
  * have been called previously.  Use for set_configuration, set_interface,
@@ -1897,8 +2035,20 @@
 		retval = -ENOMEM;
 		goto err_allocate_root_hub;
 	}
-	rhdev->speed = (hcd->driver->flags & HCD_USB2) ? USB_SPEED_HIGH :
-			USB_SPEED_FULL;
+
+	switch (hcd->driver->flags & HCD_MASK) {
+	case HCD_USB11:
+		rhdev->speed = USB_SPEED_FULL;
+		break;
+	case HCD_USB2:
+		rhdev->speed = USB_SPEED_HIGH;
+		break;
+	case HCD_USB3:
+		rhdev->speed = USB_SPEED_SUPER;
+		break;
+	default:
+		goto err_allocate_root_hub;
+	}
 	hcd->self.root_hub = rhdev;
 
 	/* wakeup flag init defaults to "everything works" for root hubs,
diff --git a/drivers/usb/core/hcd.h b/drivers/usb/core/hcd.h
index e7d4479..d397ecf 100644
--- a/drivers/usb/core/hcd.h
+++ b/drivers/usb/core/hcd.h
@@ -173,6 +173,8 @@
 #define	HCD_LOCAL_MEM	0x0002		/* HC needs local memory */
 #define	HCD_USB11	0x0010		/* USB 1.1 */
 #define	HCD_USB2	0x0020		/* USB 2.0 */
+#define	HCD_USB3	0x0040		/* USB 3.0 */
+#define	HCD_MASK	0x0070
 
 	/* called to init HCD and root hub */
 	int	(*reset) (struct usb_hcd *hcd);
@@ -182,10 +184,10 @@
 	 * a whole, not just the root hub; they're for PCI bus glue.
 	 */
 	/* called after suspending the hub, before entering D3 etc */
-	int	(*pci_suspend) (struct usb_hcd *hcd, pm_message_t message);
+	int	(*pci_suspend)(struct usb_hcd *hcd);
 
 	/* called after entering D0 (etc), before resuming the hub */
-	int	(*pci_resume) (struct usb_hcd *hcd);
+	int	(*pci_resume)(struct usb_hcd *hcd, bool hibernated);
 
 	/* cleanly make HCD stop writing memory and doing I/O */
 	void	(*stop) (struct usb_hcd *hcd);
@@ -224,6 +226,43 @@
 	void	(*relinquish_port)(struct usb_hcd *, int);
 		/* has a port been handed over to a companion? */
 	int	(*port_handed_over)(struct usb_hcd *, int);
+
+	/* xHCI specific functions */
+		/* Called by usb_alloc_dev to alloc HC device structures */
+	int	(*alloc_dev)(struct usb_hcd *, struct usb_device *);
+		/* Called by usb_release_dev to free HC device structures */
+	void	(*free_dev)(struct usb_hcd *, struct usb_device *);
+
+	/* Bandwidth computation functions */
+	/* Note that add_endpoint() can only be called once per endpoint before
+	 * check_bandwidth() or reset_bandwidth() must be called.
+	 * drop_endpoint() can only be called once per endpoint also.
+	 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
+	 * add the endpoint to the schedule with possibly new parameters denoted by a
+	 * different endpoint descriptor in usb_host_endpoint.
+	 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
+	 * not allowed.
+	 */
+		/* Allocate endpoint resources and add them to a new schedule */
+	int 	(*add_endpoint)(struct usb_hcd *, struct usb_device *, struct usb_host_endpoint *);
+		/* Drop an endpoint from a new schedule */
+	int 	(*drop_endpoint)(struct usb_hcd *, struct usb_device *, struct usb_host_endpoint *);
+		/* Check that a new hardware configuration, set using
+		 * endpoint_enable and endpoint_disable, does not exceed bus
+		 * bandwidth.  This must be called before any set configuration
+		 * or set interface requests are sent to the device.
+		 */
+	int	(*check_bandwidth)(struct usb_hcd *, struct usb_device *);
+		/* Reset the device schedule to the last known good schedule,
+		 * which was set from a previous successful call to
+		 * check_bandwidth().  This reverts any add_endpoint() and
+		 * drop_endpoint() calls since that last successful call.
+		 * Used for when a check_bandwidth() call fails due to resource
+		 * or bandwidth constraints.
+		 */
+	void	(*reset_bandwidth)(struct usb_hcd *, struct usb_device *);
+		/* Returns the hardware-chosen device address */
+	int	(*address_device)(struct usb_hcd *, struct usb_device *udev);
 };
 
 extern int usb_hcd_link_urb_to_ep(struct usb_hcd *hcd, struct urb *urb);
@@ -242,6 +281,9 @@
 extern void usb_hcd_reset_endpoint(struct usb_device *udev,
 		struct usb_host_endpoint *ep);
 extern void usb_hcd_synchronize_unlinks(struct usb_device *udev);
+extern int usb_hcd_check_bandwidth(struct usb_device *udev,
+		struct usb_host_config *new_config,
+		struct usb_interface *new_intf);
 extern int usb_hcd_get_frame_number(struct usb_device *udev);
 
 extern struct usb_hcd *usb_create_hcd(const struct hc_driver *driver,
@@ -261,14 +303,11 @@
 extern int usb_hcd_pci_probe(struct pci_dev *dev,
 				const struct pci_device_id *id);
 extern void usb_hcd_pci_remove(struct pci_dev *dev);
-
-#ifdef CONFIG_PM
-extern int usb_hcd_pci_suspend(struct pci_dev *dev, pm_message_t msg);
-extern int usb_hcd_pci_resume(struct pci_dev *dev);
-#endif /* CONFIG_PM */
-
 extern void usb_hcd_pci_shutdown(struct pci_dev *dev);
 
+#ifdef CONFIG_PM_SLEEP
+extern struct dev_pm_ops	usb_hcd_pci_pm_ops;
+#endif
 #endif /* CONFIG_PCI */
 
 /* pci-ish (pdev null is ok) buffer alloc/mapping support */
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index be86ae3..2af3b4f 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -155,6 +155,8 @@
     		return "480 Mb/s";
 	else if (portstatus & (1 << USB_PORT_FEAT_LOWSPEED))
 		return "1.5 Mb/s";
+	else if (portstatus & (1 << USB_PORT_FEAT_SUPERSPEED))
+		return "5.0 Gb/s";
 	else
 		return "12 Mb/s";
 }
@@ -457,13 +459,13 @@
 
 	spin_lock_irqsave (&hub->tt.lock, flags);
 	while (--limit && !list_empty (&hub->tt.clear_list)) {
-		struct list_head	*temp;
+		struct list_head	*next;
 		struct usb_tt_clear	*clear;
 		struct usb_device	*hdev = hub->hdev;
 		int			status;
 
-		temp = hub->tt.clear_list.next;
-		clear = list_entry (temp, struct usb_tt_clear, clear_list);
+		next = hub->tt.clear_list.next;
+		clear = list_entry (next, struct usb_tt_clear, clear_list);
 		list_del (&clear->clear_list);
 
 		/* drop lock so HCD can concurrently report other TT errors */
@@ -951,6 +953,9 @@
 					ret);
 			hub->tt.hub = hdev;
 			break;
+		case 3:
+			/* USB 3.0 hubs don't have a TT */
+			break;
 		default:
 			dev_dbg(hub_dev, "Unrecognized hub protocol %d\n",
 				hdev->descriptor.bDeviceProtocol);
@@ -1323,6 +1328,11 @@
  * 0 is reserved by USB for default address; (b) Linux's USB stack
  * uses always #1 for the root hub of the controller. So USB stack's
  * port #1, which is wusb virtual-port #0 has address #2.
+ *
+ * Devices connected under xHCI are not as simple.  The host controller
+ * supports virtualization, so the hardware assigns device addresses and
+ * the HCD must setup data structures before issuing a set address
+ * command to the hardware.
  */
 static void choose_address(struct usb_device *udev)
 {
@@ -1642,6 +1652,9 @@
 	err = usb_configure_device(udev);	/* detect & probe dev/intfs */
 	if (err < 0)
 		goto fail;
+	dev_dbg(&udev->dev, "udev %d, busnum %d, minor = %d\n",
+			udev->devnum, udev->bus->busnum,
+			(((udev->bus->busnum-1) * 128) + (udev->devnum-1)));
 	/* export the usbdev device-node for libusb */
 	udev->dev.devt = MKDEV(USB_DEVICE_MAJOR,
 			(((udev->bus->busnum-1) * 128) + (udev->devnum-1)));
@@ -2395,19 +2408,29 @@
 static int hub_set_address(struct usb_device *udev, int devnum)
 {
 	int retval;
+	struct usb_hcd *hcd = bus_to_hcd(udev->bus);
 
-	if (devnum <= 1)
+	/*
+	 * The host controller will choose the device address,
+	 * instead of the core having chosen it earlier
+	 */
+	if (!hcd->driver->address_device && devnum <= 1)
 		return -EINVAL;
 	if (udev->state == USB_STATE_ADDRESS)
 		return 0;
 	if (udev->state != USB_STATE_DEFAULT)
 		return -EINVAL;
-	retval = usb_control_msg(udev, usb_sndaddr0pipe(),
-		USB_REQ_SET_ADDRESS, 0, devnum, 0,
-		NULL, 0, USB_CTRL_SET_TIMEOUT);
+	if (hcd->driver->address_device) {
+		retval = hcd->driver->address_device(hcd, udev);
+	} else {
+		retval = usb_control_msg(udev, usb_sndaddr0pipe(),
+				USB_REQ_SET_ADDRESS, 0, devnum, 0,
+				NULL, 0, USB_CTRL_SET_TIMEOUT);
+		if (retval == 0)
+			update_address(udev, devnum);
+	}
 	if (retval == 0) {
 		/* Device now using proper address. */
-		update_address(udev, devnum);
 		usb_set_device_state(udev, USB_STATE_ADDRESS);
 		usb_ep0_reinit(udev);
 	}
@@ -2430,6 +2453,7 @@
 	static DEFINE_MUTEX(usb_address0_mutex);
 
 	struct usb_device	*hdev = hub->hdev;
+	struct usb_hcd		*hcd = bus_to_hcd(hdev->bus);
 	int			i, j, retval;
 	unsigned		delay = HUB_SHORT_RESET_TIME;
 	enum usb_device_speed	oldspeed = udev->speed;
@@ -2452,11 +2476,24 @@
 
 	mutex_lock(&usb_address0_mutex);
 
-	/* Reset the device; full speed may morph to high speed */
-	retval = hub_port_reset(hub, port1, udev, delay);
-	if (retval < 0)		/* error or disconnect */
+	if ((hcd->driver->flags & HCD_USB3) && udev->config) {
+		/* FIXME this will need special handling by the xHCI driver. */
+		dev_dbg(&udev->dev,
+				"xHCI reset of configured device "
+				"not supported yet.\n");
+		retval = -EINVAL;
 		goto fail;
-				/* success, speed is known */
+	} else if (!udev->config && oldspeed == USB_SPEED_SUPER) {
+		/* Don't reset USB 3.0 devices during an initial setup */
+		usb_set_device_state(udev, USB_STATE_DEFAULT);
+	} else {
+		/* Reset the device; full speed may morph to high speed */
+		/* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */
+		retval = hub_port_reset(hub, port1, udev, delay);
+		if (retval < 0)		/* error or disconnect */
+			goto fail;
+		/* success, speed is known */
+	}
 	retval = -ENODEV;
 
 	if (oldspeed != USB_SPEED_UNKNOWN && oldspeed != udev->speed) {
@@ -2471,6 +2508,7 @@
 	 * reported as 0xff in the device descriptor). WUSB1.0[4.8.1].
 	 */
 	switch (udev->speed) {
+	case USB_SPEED_SUPER:
 	case USB_SPEED_VARIABLE:	/* fixed at 512 */
 		udev->ep0.desc.wMaxPacketSize = cpu_to_le16(512);
 		break;
@@ -2496,16 +2534,20 @@
 	case USB_SPEED_LOW:	speed = "low";	break;
 	case USB_SPEED_FULL:	speed = "full";	break;
 	case USB_SPEED_HIGH:	speed = "high";	break;
+	case USB_SPEED_SUPER:
+				speed = "super";
+				break;
 	case USB_SPEED_VARIABLE:
 				speed = "variable";
 				type = "Wireless ";
 				break;
 	default: 		speed = "?";	break;
 	}
-	dev_info (&udev->dev,
-		  "%s %s speed %sUSB device using %s and address %d\n",
-		  (udev->config) ? "reset" : "new", speed, type,
-		  udev->bus->controller->driver->name, devnum);
+	if (udev->speed != USB_SPEED_SUPER)
+		dev_info(&udev->dev,
+				"%s %s speed %sUSB device using %s and address %d\n",
+				(udev->config) ? "reset" : "new", speed, type,
+				udev->bus->controller->driver->name, devnum);
 
 	/* Set up TT records, if needed  */
 	if (hdev->tt) {
@@ -2530,7 +2572,11 @@
 	 * value.
 	 */
 	for (i = 0; i < GET_DESCRIPTOR_TRIES; (++i, msleep(100))) {
-		if (USE_NEW_SCHEME(retry_counter)) {
+		/*
+		 * An xHCI controller cannot send any packets to a device until
+		 * a set address command successfully completes.
+		 */
+		if (USE_NEW_SCHEME(retry_counter) && !(hcd->driver->flags & HCD_USB3)) {
 			struct usb_device_descriptor *buf;
 			int r = 0;
 
@@ -2596,7 +2642,7 @@
  		 * unauthorized address in the Connect Ack sequence;
  		 * authorization will assign the final address.
  		 */
- 		if (udev->wusb == 0) {
+		if (udev->wusb == 0) {
 			for (j = 0; j < SET_ADDRESS_TRIES; ++j) {
 				retval = hub_set_address(udev, devnum);
 				if (retval >= 0)
@@ -2609,13 +2655,20 @@
 					devnum, retval);
 				goto fail;
 			}
+			if (udev->speed == USB_SPEED_SUPER) {
+				devnum = udev->devnum;
+				dev_info(&udev->dev,
+						"%s SuperSpeed USB device using %s and address %d\n",
+						(udev->config) ? "reset" : "new",
+						udev->bus->controller->driver->name, devnum);
+			}
 
 			/* cope with hardware quirkiness:
 			 *  - let SET_ADDRESS settle, some device hardware wants it
 			 *  - read ep0 maxpacket even for high and low speed,
 			 */
 			msleep(10);
-			if (USE_NEW_SCHEME(retry_counter))
+			if (USE_NEW_SCHEME(retry_counter) && !(hcd->driver->flags & HCD_USB3))
 				break;
   		}
 
@@ -2634,8 +2687,11 @@
 	if (retval)
 		goto fail;
 
-	i = udev->descriptor.bMaxPacketSize0 == 0xff?	/* wusb device? */
-	    512 : udev->descriptor.bMaxPacketSize0;
+	if (udev->descriptor.bMaxPacketSize0 == 0xff ||
+			udev->speed == USB_SPEED_SUPER)
+		i = 512;
+	else
+		i = udev->descriptor.bMaxPacketSize0;
 	if (le16_to_cpu(udev->ep0.desc.wMaxPacketSize) != i) {
 		if (udev->speed != USB_SPEED_FULL ||
 				!(i == 8 || i == 16 || i == 32 || i == 64)) {
@@ -2847,19 +2903,41 @@
 		}
 
 		usb_set_device_state(udev, USB_STATE_POWERED);
-		udev->speed = USB_SPEED_UNKNOWN;
  		udev->bus_mA = hub->mA_per_port;
 		udev->level = hdev->level + 1;
 		udev->wusb = hub_is_wusb(hub);
 
-		/* set the address */
-		choose_address(udev);
-		if (udev->devnum <= 0) {
-			status = -ENOTCONN;	/* Don't retry */
-			goto loop;
+		/*
+		 * USB 3.0 devices are reset automatically before the connect
+		 * port status change appears, and the root hub port status
+		 * shows the correct speed.  We also get port change
+		 * notifications for USB 3.0 devices from the USB 3.0 portion of
+		 * an external USB 3.0 hub, but this isn't handled correctly yet
+		 * FIXME.
+		 */
+
+		if (!(hcd->driver->flags & HCD_USB3))
+			udev->speed = USB_SPEED_UNKNOWN;
+		else if ((hdev->parent == NULL) &&
+				(portstatus & (1 << USB_PORT_FEAT_SUPERSPEED)))
+			udev->speed = USB_SPEED_SUPER;
+		else
+			udev->speed = USB_SPEED_UNKNOWN;
+
+		/*
+		 * xHCI needs to issue an address device command later
+		 * in the hub_port_init sequence for SS/HS/FS/LS devices.
+		 */
+		if (!(hcd->driver->flags & HCD_USB3)) {
+			/* set the address */
+			choose_address(udev);
+			if (udev->devnum <= 0) {
+				status = -ENOTCONN;	/* Don't retry */
+				goto loop;
+			}
 		}
 
-		/* reset and get descriptor */
+		/* reset (non-USB 3.0 devices) and get descriptor */
 		status = hub_port_init(hub, udev, port1, i);
 		if (status < 0)
 			goto loop;
diff --git a/drivers/usb/core/hub.h b/drivers/usb/core/hub.h
index 2a116ce..889c0f3 100644
--- a/drivers/usb/core/hub.h
+++ b/drivers/usb/core/hub.h
@@ -47,7 +47,10 @@
 #define USB_PORT_FEAT_L1		5	/* L1 suspend */
 #define USB_PORT_FEAT_POWER		8
 #define USB_PORT_FEAT_LOWSPEED		9
+/* This value was never in Table 11-17 */
 #define USB_PORT_FEAT_HIGHSPEED		10
+/* This value is also fake */
+#define USB_PORT_FEAT_SUPERSPEED	11
 #define USB_PORT_FEAT_C_CONNECTION	16
 #define USB_PORT_FEAT_C_ENABLE		17
 #define USB_PORT_FEAT_C_SUSPEND		18
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index b626283..2bed83c 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -10,6 +10,7 @@
 #include <linux/mm.h>
 #include <linux/timer.h>
 #include <linux/ctype.h>
+#include <linux/nls.h>
 #include <linux/device.h>
 #include <linux/scatterlist.h>
 #include <linux/usb/quirks.h>
@@ -364,6 +365,7 @@
 	int i;
 	int urb_flags;
 	int dma;
+	int use_sg;
 
 	if (!io || !dev || !sg
 			|| usb_pipecontrol(pipe)
@@ -391,7 +393,19 @@
 	if (io->entries <= 0)
 		return io->entries;
 
-	io->urbs = kmalloc(io->entries * sizeof *io->urbs, mem_flags);
+	/* If we're running on an xHCI host controller, queue the whole scatter
+	 * gather list with one call to urb_enqueue().  This is only for bulk,
+	 * as that endpoint type does not care how the data gets broken up
+	 * across frames.
+	 */
+	if (usb_pipebulk(pipe) &&
+			bus_to_hcd(dev->bus)->driver->flags & HCD_USB3) {
+		io->urbs = kmalloc(sizeof *io->urbs, mem_flags);
+		use_sg = true;
+	} else {
+		io->urbs = kmalloc(io->entries * sizeof *io->urbs, mem_flags);
+		use_sg = false;
+	}
 	if (!io->urbs)
 		goto nomem;
 
@@ -401,62 +415,92 @@
 	if (usb_pipein(pipe))
 		urb_flags |= URB_SHORT_NOT_OK;
 
-	for_each_sg(sg, sg, io->entries, i) {
-		unsigned len;
-
-		io->urbs[i] = usb_alloc_urb(0, mem_flags);
-		if (!io->urbs[i]) {
-			io->entries = i;
+	if (use_sg) {
+		io->urbs[0] = usb_alloc_urb(0, mem_flags);
+		if (!io->urbs[0]) {
+			io->entries = 0;
 			goto nomem;
 		}
 
-		io->urbs[i]->dev = NULL;
-		io->urbs[i]->pipe = pipe;
-		io->urbs[i]->interval = period;
-		io->urbs[i]->transfer_flags = urb_flags;
+		io->urbs[0]->dev = NULL;
+		io->urbs[0]->pipe = pipe;
+		io->urbs[0]->interval = period;
+		io->urbs[0]->transfer_flags = urb_flags;
 
-		io->urbs[i]->complete = sg_complete;
-		io->urbs[i]->context = io;
+		io->urbs[0]->complete = sg_complete;
+		io->urbs[0]->context = io;
+		/* A length of zero means transfer the whole sg list */
+		io->urbs[0]->transfer_buffer_length = length;
+		if (length == 0) {
+			for_each_sg(sg, sg, io->entries, i) {
+				io->urbs[0]->transfer_buffer_length +=
+					sg_dma_len(sg);
+			}
+		}
+		io->urbs[0]->sg = io;
+		io->urbs[0]->num_sgs = io->entries;
+		io->entries = 1;
+	} else {
+		for_each_sg(sg, sg, io->entries, i) {
+			unsigned len;
 
-		/*
-		 * Some systems need to revert to PIO when DMA is temporarily
-		 * unavailable.  For their sakes, both transfer_buffer and
-		 * transfer_dma are set when possible.  However this can only
-		 * work on systems without:
-		 *
-		 *  - HIGHMEM, since DMA buffers located in high memory are
-		 *    not directly addressable by the CPU for PIO;
-		 *
-		 *  - IOMMU, since dma_map_sg() is allowed to use an IOMMU to
-		 *    make virtually discontiguous buffers be "dma-contiguous"
-		 *    so that PIO and DMA need diferent numbers of URBs.
-		 *
-		 * So when HIGHMEM or IOMMU are in use, transfer_buffer is NULL
-		 * to prevent stale pointers and to help spot bugs.
-		 */
-		if (dma) {
-			io->urbs[i]->transfer_dma = sg_dma_address(sg);
-			len = sg_dma_len(sg);
+			io->urbs[i] = usb_alloc_urb(0, mem_flags);
+			if (!io->urbs[i]) {
+				io->entries = i;
+				goto nomem;
+			}
+
+			io->urbs[i]->dev = NULL;
+			io->urbs[i]->pipe = pipe;
+			io->urbs[i]->interval = period;
+			io->urbs[i]->transfer_flags = urb_flags;
+
+			io->urbs[i]->complete = sg_complete;
+			io->urbs[i]->context = io;
+
+			/*
+			 * Some systems need to revert to PIO when DMA is
+			 * temporarily unavailable.  For their sakes, both
+			 * transfer_buffer and transfer_dma are set when
+			 * possible.  However this can only work on systems
+			 * without:
+			 *
+			 *  - HIGHMEM, since DMA buffers located in high memory
+			 *    are not directly addressable by the CPU for PIO;
+			 *
+			 *  - IOMMU, since dma_map_sg() is allowed to use an
+			 *    IOMMU to make virtually discontiguous buffers be
+			 *    "dma-contiguous" so that PIO and DMA need diferent
+			 *    numbers of URBs.
+			 *
+			 * So when HIGHMEM or IOMMU are in use, transfer_buffer
+			 * is NULL to prevent stale pointers and to help spot
+			 * bugs.
+			 */
+			if (dma) {
+				io->urbs[i]->transfer_dma = sg_dma_address(sg);
+				len = sg_dma_len(sg);
 #if defined(CONFIG_HIGHMEM) || defined(CONFIG_GART_IOMMU)
-			io->urbs[i]->transfer_buffer = NULL;
+				io->urbs[i]->transfer_buffer = NULL;
 #else
-			io->urbs[i]->transfer_buffer = sg_virt(sg);
+				io->urbs[i]->transfer_buffer = sg_virt(sg);
 #endif
-		} else {
-			/* hc may use _only_ transfer_buffer */
-			io->urbs[i]->transfer_buffer = sg_virt(sg);
-			len = sg->length;
-		}
+			} else {
+				/* hc may use _only_ transfer_buffer */
+				io->urbs[i]->transfer_buffer = sg_virt(sg);
+				len = sg->length;
+			}
 
-		if (length) {
-			len = min_t(unsigned, len, length);
-			length -= len;
-			if (length == 0)
-				io->entries = i + 1;
+			if (length) {
+				len = min_t(unsigned, len, length);
+				length -= len;
+				if (length == 0)
+					io->entries = i + 1;
+			}
+			io->urbs[i]->transfer_buffer_length = len;
 		}
-		io->urbs[i]->transfer_buffer_length = len;
+		io->urbs[--i]->transfer_flags &= ~URB_NO_INTERRUPT;
 	}
-	io->urbs[--i]->transfer_flags &= ~URB_NO_INTERRUPT;
 
 	/* transaction state */
 	io->count = io->entries;
@@ -509,6 +553,10 @@
  * could be transferred.  That capability is less useful for low or full
  * speed interrupt endpoints, which allow at most one packet per millisecond,
  * of at most 8 or 64 bytes (respectively).
+ *
+ * It is not necessary to call this function to reserve bandwidth for devices
+ * under an xHCI host controller, as the bandwidth is reserved when the
+ * configuration or interface alt setting is selected.
  */
 void usb_sg_wait(struct usb_sg_request *io)
 {
@@ -759,7 +807,7 @@
 }
 
 /**
- * usb_string - returns ISO 8859-1 version of a string descriptor
+ * usb_string - returns UTF-8 version of a string descriptor
  * @dev: the device whose string descriptor is being retrieved
  * @index: the number of the descriptor
  * @buf: where to put the string
@@ -767,17 +815,10 @@
  * Context: !in_interrupt ()
  *
  * This converts the UTF-16LE encoded strings returned by devices, from
- * usb_get_string_descriptor(), to null-terminated ISO-8859-1 encoded ones
- * that are more usable in most kernel contexts.  Note that all characters
- * in the chosen descriptor that can't be encoded using ISO-8859-1
- * are converted to the question mark ("?") character, and this function
+ * usb_get_string_descriptor(), to null-terminated UTF-8 encoded ones
+ * that are more usable in most kernel contexts.  Note that this function
  * chooses strings in the first language supported by the device.
  *
- * The ASCII (or, redundantly, "US-ASCII") character set is the seven-bit
- * subset of ISO 8859-1. ISO-8859-1 is the eight-bit subset of Unicode,
- * and is appropriate for use many uses of English and several other
- * Western European languages.  (But it doesn't include the "Euro" symbol.)
- *
  * This call is synchronous, and may not be used in an interrupt context.
  *
  * Returns length of the string (>= 0) or usb_control_msg status (< 0).
@@ -786,7 +827,6 @@
 {
 	unsigned char *tbuf;
 	int err;
-	unsigned int u, idx;
 
 	if (dev->state == USB_STATE_SUSPENDED)
 		return -EHOSTUNREACH;
@@ -821,16 +861,9 @@
 		goto errout;
 
 	size--;		/* leave room for trailing NULL char in output buffer */
-	for (idx = 0, u = 2; u < err; u += 2) {
-		if (idx >= size)
-			break;
-		if (tbuf[u+1])			/* high byte */
-			buf[idx++] = '?';  /* non ISO-8859-1 character */
-		else
-			buf[idx++] = tbuf[u];
-	}
-	buf[idx] = 0;
-	err = idx;
+	err = utf16s_to_utf8s((wchar_t *) &tbuf[2], (err - 2) / 2,
+			UTF16_LITTLE_ENDIAN, buf, size);
+	buf[err] = 0;
 
 	if (tbuf[1] != USB_DT_STRING)
 		dev_dbg(&dev->dev,
@@ -843,6 +876,9 @@
 }
 EXPORT_SYMBOL_GPL(usb_string);
 
+/* one UTF-8-encoded 16-bit character has at most three bytes */
+#define MAX_USB_STRING_SIZE (127 * 3 + 1)
+
 /**
  * usb_cache_string - read a string descriptor and cache it for later use
  * @udev: the device whose string descriptor is being read
@@ -860,9 +896,9 @@
 	if (index <= 0)
 		return NULL;
 
-	buf = kmalloc(256, GFP_KERNEL);
+	buf = kmalloc(MAX_USB_STRING_SIZE, GFP_KERNEL);
 	if (buf) {
-		len = usb_string(udev, index, buf, 256);
+		len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
 		if (len > 0) {
 			smallbuf = kmalloc(++len, GFP_KERNEL);
 			if (!smallbuf)
@@ -1664,6 +1700,21 @@
 	if (ret)
 		goto free_interfaces;
 
+	/* Make sure we have bandwidth (and available HCD resources) for this
+	 * configuration.  Remove endpoints from the schedule if we're dropping
+	 * this configuration to set configuration 0.  After this point, the
+	 * host controller will not allow submissions to dropped endpoints.  If
+	 * this call fails, the device state is unchanged.
+	 */
+	if (cp)
+		ret = usb_hcd_check_bandwidth(dev, cp, NULL);
+	else
+		ret = usb_hcd_check_bandwidth(dev, NULL, NULL);
+	if (ret < 0) {
+		usb_autosuspend_device(dev);
+		goto free_interfaces;
+	}
+
 	/* if it's already configured, clear out old state first.
 	 * getting rid of old interfaces means unbinding their drivers.
 	 */
@@ -1686,6 +1737,7 @@
 	dev->actconfig = cp;
 	if (!cp) {
 		usb_set_device_state(dev, USB_STATE_ADDRESS);
+		usb_hcd_check_bandwidth(dev, NULL, NULL);
 		usb_autosuspend_device(dev);
 		goto free_interfaces;
 	}
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
index c667891..b5c72e4 100644
--- a/drivers/usb/core/sysfs.c
+++ b/drivers/usb/core/sysfs.c
@@ -552,8 +552,8 @@
 static mode_t dev_string_attrs_are_visible(struct kobject *kobj,
 		struct attribute *a, int n)
 {
-	struct usb_device *udev = to_usb_device(
-			container_of(kobj, struct device, kobj));
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct usb_device *udev = to_usb_device(dev);
 
 	if (a == &dev_attr_manufacturer.attr) {
 		if (udev->manufacturer == NULL)
@@ -585,8 +585,8 @@
 read_descriptors(struct kobject *kobj, struct bin_attribute *attr,
 		char *buf, loff_t off, size_t count)
 {
-	struct usb_device *udev = to_usb_device(
-			container_of(kobj, struct device, kobj));
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct usb_device *udev = to_usb_device(dev);
 	size_t nleft = count;
 	size_t srclen, n;
 	int cfgno;
@@ -786,8 +786,8 @@
 static mode_t intf_assoc_attrs_are_visible(struct kobject *kobj,
 		struct attribute *a, int n)
 {
-	struct usb_interface *intf = to_usb_interface(
-			container_of(kobj, struct device, kobj));
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct usb_interface *intf = to_usb_interface(dev);
 
 	if (intf->intf_assoc == NULL)
 		return 0;
diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
index 3376055..0885d4a 100644
--- a/drivers/usb/core/urb.c
+++ b/drivers/usb/core/urb.c
@@ -241,6 +241,12 @@
  * If the USB subsystem can't allocate sufficient bandwidth to perform
  * the periodic request, submitting such a periodic request should fail.
  *
+ * For devices under xHCI, the bandwidth is reserved at configuration time, or
+ * when the alt setting is selected.  If there is not enough bus bandwidth, the
+ * configuration/alt setting request will fail.  Therefore, submissions to
+ * periodic endpoints on devices under xHCI should never fail due to bandwidth
+ * constraints.
+ *
  * Device drivers must explicitly request that repetition, by ensuring that
  * some URB is always on the endpoint's queue (except possibly for short
  * periods during completion callacks).  When there is no longer an urb
@@ -351,6 +357,7 @@
 	if (xfertype == USB_ENDPOINT_XFER_ISOC) {
 		int	n, len;
 
+		/* FIXME SuperSpeed isoc endpoints have up to 16 bursts */
 		/* "high bandwidth" mode, 1-3 packets/uframe? */
 		if (dev->speed == USB_SPEED_HIGH) {
 			int	mult = 1 + ((max >> 11) & 0x03);
@@ -426,6 +433,11 @@
 			return -EINVAL;
 		/* too big? */
 		switch (dev->speed) {
+		case USB_SPEED_SUPER:	/* units are 125us */
+			/* Handle up to 2^(16-1) microframes */
+			if (urb->interval > (1 << 15))
+				return -EINVAL;
+			max = 1 << 15;
 		case USB_SPEED_HIGH:	/* units are microframes */
 			/* NOTE usb handles 2^15 */
 			if (urb->interval > (1024 * 8))
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 7eee400..a26f738 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -34,6 +34,7 @@
 #include <linux/usb.h>
 #include <linux/mutex.h>
 #include <linux/workqueue.h>
+#include <linux/debugfs.h>
 
 #include <asm/io.h>
 #include <linux/scatterlist.h>
@@ -139,8 +140,7 @@
 	struct find_interface_arg *arg = data;
 	struct usb_interface *intf;
 
-	/* can't look at usb devices, only interfaces */
-	if (is_usb_device(dev))
+	if (!is_usb_interface(dev))
 		return 0;
 
 	intf = to_usb_interface(dev);
@@ -184,11 +184,16 @@
 static void usb_release_dev(struct device *dev)
 {
 	struct usb_device *udev;
+	struct usb_hcd *hcd;
 
 	udev = to_usb_device(dev);
+	hcd = bus_to_hcd(udev->bus);
 
 	usb_destroy_configuration(udev);
-	usb_put_hcd(bus_to_hcd(udev->bus));
+	/* Root hubs aren't real devices, so don't free HCD resources */
+	if (hcd->driver->free_dev && udev->parent)
+		hcd->driver->free_dev(hcd, udev);
+	usb_put_hcd(hcd);
 	kfree(udev->product);
 	kfree(udev->manufacturer);
 	kfree(udev->serial);
@@ -305,10 +310,21 @@
 
 #endif	/* CONFIG_PM */
 
+
+static char *usb_nodename(struct device *dev)
+{
+	struct usb_device *usb_dev;
+
+	usb_dev = to_usb_device(dev);
+	return kasprintf(GFP_KERNEL, "bus/usb/%03d/%03d",
+			 usb_dev->bus->busnum, usb_dev->devnum);
+}
+
 struct device_type usb_device_type = {
 	.name =		"usb_device",
 	.release =	usb_release_dev,
 	.uevent =	usb_dev_uevent,
+	.nodename = 	usb_nodename,
 	.pm =		&usb_device_pm_ops,
 };
 
@@ -348,6 +364,13 @@
 		kfree(dev);
 		return NULL;
 	}
+	/* Root hubs aren't true devices, so don't allocate HCD resources */
+	if (usb_hcd->driver->alloc_dev && parent &&
+		!usb_hcd->driver->alloc_dev(usb_hcd, dev)) {
+		usb_put_hcd(bus_to_hcd(bus));
+		kfree(dev);
+		return NULL;
+	}
 
 	device_initialize(&dev->dev);
 	dev->dev.bus = &usb_bus_type;
@@ -375,18 +398,24 @@
 	 */
 	if (unlikely(!parent)) {
 		dev->devpath[0] = '0';
+		dev->route = 0;
 
 		dev->dev.parent = bus->controller;
 		dev_set_name(&dev->dev, "usb%d", bus->busnum);
 		root_hub = 1;
 	} else {
 		/* match any labeling on the hubs; it's one-based */
-		if (parent->devpath[0] == '0')
+		if (parent->devpath[0] == '0') {
 			snprintf(dev->devpath, sizeof dev->devpath,
 				"%d", port1);
-		else
+			/* Root ports are not counted in route string */
+			dev->route = 0;
+		} else {
 			snprintf(dev->devpath, sizeof dev->devpath,
 				"%s.%d", parent->devpath, port1);
+			dev->route = parent->route +
+				(port1 << ((parent->level - 1)*4));
+		}
 
 		dev->dev.parent = &parent->dev;
 		dev_set_name(&dev->dev, "%d-%s", bus->busnum, dev->devpath);
@@ -799,12 +828,12 @@
 		return;
 
 	if (controller->dma_mask) {
-		dma_sync_single(controller,
+		dma_sync_single_for_cpu(controller,
 			urb->transfer_dma, urb->transfer_buffer_length,
 			usb_pipein(urb->pipe)
 				? DMA_FROM_DEVICE : DMA_TO_DEVICE);
 		if (usb_pipecontrol(urb->pipe))
-			dma_sync_single(controller,
+			dma_sync_single_for_cpu(controller,
 					urb->setup_dma,
 					sizeof(struct usb_ctrlrequest),
 					DMA_TO_DEVICE);
@@ -922,8 +951,8 @@
 			|| !controller->dma_mask)
 		return;
 
-	dma_sync_sg(controller, sg, n_hw_ents,
-			is_in ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
+	dma_sync_sg_for_cpu(controller, sg, n_hw_ents,
+			    is_in ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
 }
 EXPORT_SYMBOL_GPL(usb_buffer_dmasync_sg);
 #endif
@@ -1001,6 +1030,35 @@
 	.notifier_call = usb_bus_notify,
 };
 
+struct dentry *usb_debug_root;
+EXPORT_SYMBOL_GPL(usb_debug_root);
+
+struct dentry *usb_debug_devices;
+
+static int usb_debugfs_init(void)
+{
+	usb_debug_root = debugfs_create_dir("usb", NULL);
+	if (!usb_debug_root)
+		return -ENOENT;
+
+	usb_debug_devices = debugfs_create_file("devices", 0444,
+						usb_debug_root, NULL,
+						&usbfs_devices_fops);
+	if (!usb_debug_devices) {
+		debugfs_remove(usb_debug_root);
+		usb_debug_root = NULL;
+		return -ENOENT;
+	}
+
+	return 0;
+}
+
+static void usb_debugfs_cleanup(void)
+{
+	debugfs_remove(usb_debug_devices);
+	debugfs_remove(usb_debug_root);
+}
+
 /*
  * Init
  */
@@ -1012,6 +1070,10 @@
 		return 0;
 	}
 
+	retval = usb_debugfs_init();
+	if (retval)
+		goto out;
+
 	retval = ksuspend_usb_init();
 	if (retval)
 		goto out;
@@ -1021,9 +1083,6 @@
 	retval = bus_register_notifier(&usb_bus_type, &usb_bus_nb);
 	if (retval)
 		goto bus_notifier_failed;
-	retval = usb_host_init();
-	if (retval)
-		goto host_init_failed;
 	retval = usb_major_init();
 	if (retval)
 		goto major_init_failed;
@@ -1053,8 +1112,6 @@
 driver_register_failed:
 	usb_major_cleanup();
 major_init_failed:
-	usb_host_cleanup();
-host_init_failed:
 	bus_unregister_notifier(&usb_bus_type, &usb_bus_nb);
 bus_notifier_failed:
 	bus_unregister(&usb_bus_type);
@@ -1079,10 +1136,10 @@
 	usb_deregister(&usbfs_driver);
 	usb_devio_cleanup();
 	usb_hub_cleanup();
-	usb_host_cleanup();
 	bus_unregister_notifier(&usb_bus_type, &usb_bus_nb);
 	bus_unregister(&usb_bus_type);
 	ksuspend_usb_cleanup();
+	usb_debugfs_cleanup();
 }
 
 subsys_initcall(usb_init);
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index 79d8a9e..e2a8cfa 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -41,8 +41,6 @@
 extern void usb_hub_cleanup(void);
 extern int usb_major_init(void);
 extern void usb_major_cleanup(void);
-extern int usb_host_init(void);
-extern void usb_host_cleanup(void);
 
 #ifdef	CONFIG_PM
 
@@ -106,6 +104,7 @@
 extern struct bus_type usb_bus_type;
 extern struct device_type usb_device_type;
 extern struct device_type usb_if_device_type;
+extern struct device_type usb_ep_device_type;
 extern struct usb_device_driver usb_generic_driver;
 
 static inline int is_usb_device(const struct device *dev)
@@ -113,6 +112,16 @@
 	return dev->type == &usb_device_type;
 }
 
+static inline int is_usb_interface(const struct device *dev)
+{
+	return dev->type == &usb_if_device_type;
+}
+
+static inline int is_usb_endpoint(const struct device *dev)
+{
+	return dev->type == &usb_ep_device_type;
+}
+
 /* Do the same for device drivers and interface drivers. */
 
 static inline int is_usb_device_driver(struct device_driver *drv)
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 080bb1e..5d1ddf4 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -156,7 +156,7 @@
 
 config USB_GADGET_FSL_USB2
 	boolean "Freescale Highspeed USB DR Peripheral Controller"
-	depends on FSL_SOC
+	depends on FSL_SOC || ARCH_MXC
 	select USB_GADGET_DUALSPEED
 	help
 	   Some of Freescale PowerPC processors have a High Speed
@@ -253,7 +253,7 @@
 
 config USB_GADGET_PXA27X
 	boolean "PXA 27x"
-	depends on ARCH_PXA && PXA27x
+	depends on ARCH_PXA && (PXA27x || PXA3xx)
 	select USB_OTG_UTILS
 	help
 	   Intel's PXA 27x series XScale ARM v5TE processors include
@@ -272,6 +272,20 @@
 	default USB_GADGET
 	select USB_GADGET_SELECTED
 
+config USB_GADGET_S3C_HSOTG
+	boolean "S3C HS/OtG USB Device controller"
+	depends on S3C_DEV_USB_HSOTG
+	select USB_GADGET_S3C_HSOTG_PIO
+	help
+	  The Samsung S3C64XX USB2.0 high-speed gadget controller
+	  integrated into the S3C64XX series SoC.
+
+config USB_S3C_HSOTG
+	tristate
+	depends on USB_GADGET_S3C_HSOTG
+	default USB_GADGET
+	select USB_GADGET_SELECTED
+
 config USB_GADGET_S3C2410
 	boolean "S3C2410 USB Device Controller"
 	depends on ARCH_S3C2410
@@ -460,6 +474,27 @@
 	default USB_GADGET
 	select USB_GADGET_SELECTED
 
+config USB_GADGET_LANGWELL
+	boolean "Intel Langwell USB Device Controller"
+	depends on PCI
+	select USB_GADGET_DUALSPEED
+	help
+	   Intel Langwell USB Device Controller is a High-Speed USB
+	   On-The-Go device controller.
+
+	   The number of programmable endpoints is different through
+	   controller revision.
+
+	   Say "y" to link the driver statically, or "m" to build a
+	   dynamically linked module called "langwell_udc" and force all
+	   gadget drivers to also be dynamically linked.
+
+config USB_LANGWELL
+	tristate
+	depends on USB_GADGET_LANGWELL
+	default USB_GADGET
+	select USB_GADGET_SELECTED
+
 
 #
 # LAST -- dummy/emulated controller
@@ -566,6 +601,20 @@
 	  the "B-Peripheral" role, that device will use HNP to let this
 	  one serve as the USB host instead (in the "B-Host" role).
 
+config USB_AUDIO
+	tristate "Audio Gadget (EXPERIMENTAL)"
+	depends on SND
+	help
+	  Gadget Audio is compatible with USB Audio Class specification 1.0.
+	  It will include at least one AudioControl interface, zero or more
+	  AudioStream interface and zero or more MIDIStream interface.
+
+	  Gadget Audio will use on-board ALSA (CONFIG_SND) audio card to
+	  playback or capture audio stream.
+
+	  Say "y" to link the driver statically, or "m" to build a
+	  dynamically linked module called "g_audio".
+
 config USB_ETH
 	tristate "Ethernet Gadget (with CDC Ethernet support)"
 	depends on NET
diff --git a/drivers/usb/gadget/Makefile b/drivers/usb/gadget/Makefile
index 39a51d7..e6017e6b 100644
--- a/drivers/usb/gadget/Makefile
+++ b/drivers/usb/gadget/Makefile
@@ -18,14 +18,21 @@
 obj-$(CONFIG_USB_AT91)		+= at91_udc.o
 obj-$(CONFIG_USB_ATMEL_USBA)	+= atmel_usba_udc.o
 obj-$(CONFIG_USB_FSL_USB2)	+= fsl_usb2_udc.o
+fsl_usb2_udc-objs		:= fsl_udc_core.o
+ifeq ($(CONFIG_ARCH_MXC),y)
+fsl_usb2_udc-objs		+= fsl_mx3_udc.o
+endif
 obj-$(CONFIG_USB_M66592)	+= m66592-udc.o
 obj-$(CONFIG_USB_FSL_QE)	+= fsl_qe_udc.o
 obj-$(CONFIG_USB_CI13XXX)	+= ci13xxx_udc.o
+obj-$(CONFIG_USB_S3C_HSOTG)	+= s3c-hsotg.o
+obj-$(CONFIG_USB_LANGWELL)	+= langwell_udc.o
 
 #
 # USB gadget drivers
 #
 g_zero-objs			:= zero.o
+g_audio-objs			:= audio.o
 g_ether-objs			:= ether.o
 g_serial-objs			:= serial.o
 g_midi-objs			:= gmidi.o
@@ -35,6 +42,7 @@
 g_cdc-objs			:= cdc2.o
 
 obj-$(CONFIG_USB_ZERO)		+= g_zero.o
+obj-$(CONFIG_USB_AUDIO)		+= g_audio.o
 obj-$(CONFIG_USB_ETH)		+= g_ether.o
 obj-$(CONFIG_USB_GADGETFS)	+= gadgetfs.o
 obj-$(CONFIG_USB_FILE_STORAGE)	+= g_file_storage.o
diff --git a/drivers/usb/gadget/at91_udc.c b/drivers/usb/gadget/at91_udc.c
index 0b2bb8f..72bae8f 100644
--- a/drivers/usb/gadget/at91_udc.c
+++ b/drivers/usb/gadget/at91_udc.c
@@ -485,7 +485,7 @@
 		return -ESHUTDOWN;
 	}
 
-	tmp = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
+	tmp = usb_endpoint_type(desc);
 	switch (tmp) {
 	case USB_ENDPOINT_XFER_CONTROL:
 		DBG("only one control endpoint\n");
@@ -517,7 +517,7 @@
 	local_irq_save(flags);
 
 	/* initialize endpoint to match this descriptor */
-	ep->is_in = (desc->bEndpointAddress & USB_DIR_IN) != 0;
+	ep->is_in = usb_endpoint_dir_in(desc);
 	ep->is_iso = (tmp == USB_ENDPOINT_XFER_ISOC);
 	ep->stopped = 0;
 	if (ep->is_in)
@@ -1574,7 +1574,7 @@
 
 	udc->driver = driver;
 	udc->gadget.dev.driver = &driver->driver;
-	udc->gadget.dev.driver_data = &driver->driver;
+	dev_set_drvdata(&udc->gadget.dev, &driver->driver);
 	udc->enabled = 1;
 	udc->selfpowered = 1;
 
@@ -1583,7 +1583,7 @@
 		DBG("driver->bind() returned %d\n", retval);
 		udc->driver = NULL;
 		udc->gadget.dev.driver = NULL;
-		udc->gadget.dev.driver_data = NULL;
+		dev_set_drvdata(&udc->gadget.dev, NULL);
 		udc->enabled = 0;
 		udc->selfpowered = 0;
 		return retval;
@@ -1613,7 +1613,7 @@
 
 	driver->unbind(&udc->gadget);
 	udc->gadget.dev.driver = NULL;
-	udc->gadget.dev.driver_data = NULL;
+	dev_set_drvdata(&udc->gadget.dev, NULL);
 	udc->driver = NULL;
 
 	DBG("unbound from %s\n", driver->driver.name);
diff --git a/drivers/usb/gadget/atmel_usba_udc.c b/drivers/usb/gadget/atmel_usba_udc.c
index 05c913c..4e970cf 100644
--- a/drivers/usb/gadget/atmel_usba_udc.c
+++ b/drivers/usb/gadget/atmel_usba_udc.c
@@ -326,13 +326,7 @@
 	return 1;
 }
 
-#if defined(CONFIG_AVR32)
-
-static void toggle_bias(int is_on)
-{
-}
-
-#elif defined(CONFIG_ARCH_AT91)
+#if defined(CONFIG_ARCH_AT91SAM9RL)
 
 #include <mach/at91_pmc.h>
 
@@ -346,7 +340,13 @@
 		at91_sys_write(AT91_CKGR_UCKR, uckr & ~(AT91_PMC_BIASEN));
 }
 
-#endif /* CONFIG_ARCH_AT91 */
+#else
+
+static void toggle_bias(int is_on)
+{
+}
+
+#endif /* CONFIG_ARCH_AT91SAM9RL */
 
 static void next_fifo_transaction(struct usba_ep *ep, struct usba_request *req)
 {
@@ -550,12 +550,12 @@
 	DBG(DBG_HW, "%s: EPT_SIZE = %lu (maxpacket = %lu)\n",
 			ep->ep.name, ept_cfg, maxpacket);
 
-	if ((desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN) {
+	if (usb_endpoint_dir_in(desc)) {
 		ep->is_in = 1;
 		ept_cfg |= USBA_EPT_DIR_IN;
 	}
 
-	switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
+	switch (usb_endpoint_type(desc)) {
 	case USB_ENDPOINT_XFER_CONTROL:
 		ept_cfg |= USBA_BF(EPT_TYPE, USBA_EPT_TYPE_CONTROL);
 		ept_cfg |= USBA_BF(BK_NUMBER, USBA_BK_NUMBER_ONE);
diff --git a/drivers/usb/gadget/audio.c b/drivers/usb/gadget/audio.c
new file mode 100644
index 0000000..94de7e8
--- /dev/null
+++ b/drivers/usb/gadget/audio.c
@@ -0,0 +1,302 @@
+/*
+ * audio.c -- Audio gadget driver
+ *
+ * Copyright (C) 2008 Bryan Wu <cooloney@kernel.org>
+ * Copyright (C) 2008 Analog Devices, Inc
+ *
+ * Enter bugs at http://blackfin.uclinux.org/
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+/* #define VERBOSE_DEBUG */
+
+#include <linux/kernel.h>
+#include <linux/utsname.h>
+
+#include "u_audio.h"
+
+#define DRIVER_DESC		"Linux USB Audio Gadget"
+#define DRIVER_VERSION		"Dec 18, 2008"
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * Kbuild is not very cooperative with respect to linking separately
+ * compiled library objects into one module.  So for now we won't use
+ * separate compilation ... ensuring init/exit sections work to shrink
+ * the runtime footprint, and giving us at least some parts of what
+ * a "gcc --combine ... part1.c part2.c part3.c ... " build would.
+ */
+#include "composite.c"
+#include "usbstring.c"
+#include "config.c"
+#include "epautoconf.c"
+
+#include "u_audio.c"
+#include "f_audio.c"
+
+/*-------------------------------------------------------------------------*/
+
+/* DO NOT REUSE THESE IDs with a protocol-incompatible driver!!  Ever!!
+ * Instead:  allocate your own, using normal USB-IF procedures.
+ */
+
+/* Thanks to NetChip Technologies for donating this product ID. */
+#define AUDIO_VENDOR_NUM		0x0525	/* NetChip */
+#define AUDIO_PRODUCT_NUM		0xa4a1	/* Linux-USB Audio Gadget */
+
+/*-------------------------------------------------------------------------*/
+
+static struct usb_device_descriptor device_desc = {
+	.bLength =		sizeof device_desc,
+	.bDescriptorType =	USB_DT_DEVICE,
+
+	.bcdUSB =		__constant_cpu_to_le16(0x200),
+
+	.bDeviceClass =		USB_CLASS_PER_INTERFACE,
+	.bDeviceSubClass =	0,
+	.bDeviceProtocol =	0,
+	/* .bMaxPacketSize0 = f(hardware) */
+
+	/* Vendor and product id defaults change according to what configs
+	 * we support.  (As does bNumConfigurations.)  These values can
+	 * also be overridden by module parameters.
+	 */
+	.idVendor =		__constant_cpu_to_le16(AUDIO_VENDOR_NUM),
+	.idProduct =		__constant_cpu_to_le16(AUDIO_PRODUCT_NUM),
+	/* .bcdDevice = f(hardware) */
+	/* .iManufacturer = DYNAMIC */
+	/* .iProduct = DYNAMIC */
+	/* NO SERIAL NUMBER */
+	.bNumConfigurations =	1,
+};
+
+static struct usb_otg_descriptor otg_descriptor = {
+	.bLength =		sizeof otg_descriptor,
+	.bDescriptorType =	USB_DT_OTG,
+
+	/* REVISIT SRP-only hardware is possible, although
+	 * it would not be called "OTG" ...
+	 */
+	.bmAttributes =		USB_OTG_SRP | USB_OTG_HNP,
+};
+
+static const struct usb_descriptor_header *otg_desc[] = {
+	(struct usb_descriptor_header *) &otg_descriptor,
+	NULL,
+};
+
+/*-------------------------------------------------------------------------*/
+
+/**
+ * Handle USB audio endpoint set/get command in setup class request
+ */
+
+static int audio_set_endpoint_req(struct usb_configuration *c,
+		const struct usb_ctrlrequest *ctrl)
+{
+	struct usb_composite_dev *cdev = c->cdev;
+	int			value = -EOPNOTSUPP;
+	u16			ep = le16_to_cpu(ctrl->wIndex);
+	u16			len = le16_to_cpu(ctrl->wLength);
+	u16			w_value = le16_to_cpu(ctrl->wValue);
+
+	DBG(cdev, "bRequest 0x%x, w_value 0x%04x, len %d, endpoint %d\n",
+			ctrl->bRequest, w_value, len, ep);
+
+	switch (ctrl->bRequest) {
+	case SET_CUR:
+		value = 0;
+		break;
+
+	case SET_MIN:
+		break;
+
+	case SET_MAX:
+		break;
+
+	case SET_RES:
+		break;
+
+	case SET_MEM:
+		break;
+
+	default:
+		break;
+	}
+
+	return value;
+}
+
+static int audio_get_endpoint_req(struct usb_configuration *c,
+		const struct usb_ctrlrequest *ctrl)
+{
+	struct usb_composite_dev *cdev = c->cdev;
+	int value = -EOPNOTSUPP;
+	u8 ep = ((le16_to_cpu(ctrl->wIndex) >> 8) & 0xFF);
+	u16 len = le16_to_cpu(ctrl->wLength);
+	u16 w_value = le16_to_cpu(ctrl->wValue);
+
+	DBG(cdev, "bRequest 0x%x, w_value 0x%04x, len %d, endpoint %d\n",
+			ctrl->bRequest, w_value, len, ep);
+
+	switch (ctrl->bRequest) {
+	case GET_CUR:
+	case GET_MIN:
+	case GET_MAX:
+	case GET_RES:
+		value = 3;
+		break;
+	case GET_MEM:
+		break;
+	default:
+		break;
+	}
+
+	return value;
+}
+
+static int
+audio_setup(struct usb_configuration *c, const struct usb_ctrlrequest *ctrl)
+{
+	struct usb_composite_dev *cdev = c->cdev;
+	struct usb_request *req = cdev->req;
+	int value = -EOPNOTSUPP;
+	u16 w_index = le16_to_cpu(ctrl->wIndex);
+	u16 w_value = le16_to_cpu(ctrl->wValue);
+	u16 w_length = le16_to_cpu(ctrl->wLength);
+
+	/* composite driver infrastructure handles everything except
+	 * Audio class messages; interface activation uses set_alt().
+	 */
+	switch (ctrl->bRequestType) {
+	case USB_AUDIO_SET_ENDPOINT:
+		value = audio_set_endpoint_req(c, ctrl);
+		break;
+
+	case USB_AUDIO_GET_ENDPOINT:
+		value = audio_get_endpoint_req(c, ctrl);
+		break;
+
+	default:
+		ERROR(cdev, "Invalid control req%02x.%02x v%04x i%04x l%d\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+	}
+
+	/* respond with data transfer or status phase? */
+	if (value >= 0) {
+		DBG(cdev, "Audio req%02x.%02x v%04x i%04x l%d\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+		req->zero = 0;
+		req->length = value;
+		value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+		if (value < 0)
+			ERROR(cdev, "Audio response on err %d\n", value);
+	}
+
+	/* device either stalls (value < 0) or reports success */
+	return value;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int __init audio_do_config(struct usb_configuration *c)
+{
+	/* FIXME alloc iConfiguration string, set it in c->strings */
+
+	if (gadget_is_otg(c->cdev->gadget)) {
+		c->descriptors = otg_desc;
+		c->bmAttributes |= USB_CONFIG_ATT_WAKEUP;
+	}
+
+	audio_bind_config(c);
+
+	return 0;
+}
+
+static struct usb_configuration audio_config_driver = {
+	.label			= DRIVER_DESC,
+	.bind			= audio_do_config,
+	.setup			= audio_setup,
+	.bConfigurationValue	= 1,
+	/* .iConfiguration = DYNAMIC */
+	.bmAttributes		= USB_CONFIG_ATT_SELFPOWER,
+};
+
+/*-------------------------------------------------------------------------*/
+
+static int __init audio_bind(struct usb_composite_dev *cdev)
+{
+	int			gcnum;
+	int			status;
+
+	gcnum = usb_gadget_controller_number(cdev->gadget);
+	if (gcnum >= 0)
+		device_desc.bcdDevice = cpu_to_le16(0x0300 | gcnum);
+	else {
+		ERROR(cdev, "controller '%s' not recognized; trying %s\n",
+			cdev->gadget->name,
+			audio_config_driver.label);
+		device_desc.bcdDevice =
+			__constant_cpu_to_le16(0x0300 | 0x0099);
+	}
+
+	/* device descriptor strings: manufacturer, product */
+	snprintf(manufacturer, sizeof manufacturer, "%s %s with %s",
+		init_utsname()->sysname, init_utsname()->release,
+		cdev->gadget->name);
+	status = usb_string_id(cdev);
+	if (status < 0)
+		goto fail;
+	strings_dev[STRING_MANUFACTURER_IDX].id = status;
+	device_desc.iManufacturer = status;
+
+	status = usb_string_id(cdev);
+	if (status < 0)
+		goto fail;
+	strings_dev[STRING_PRODUCT_IDX].id = status;
+	device_desc.iProduct = status;
+
+	status = usb_add_config(cdev, &audio_config_driver);
+	if (status < 0)
+		goto fail;
+
+	INFO(cdev, "%s, version: %s\n", DRIVER_DESC, DRIVER_VERSION);
+	return 0;
+
+fail:
+	return status;
+}
+
+static int __exit audio_unbind(struct usb_composite_dev *cdev)
+{
+	return 0;
+}
+
+static struct usb_composite_driver audio_driver = {
+	.name		= "g_audio",
+	.dev		= &device_desc,
+	.strings	= audio_strings,
+	.bind		= audio_bind,
+	.unbind		= __exit_p(audio_unbind),
+};
+
+static int __init init(void)
+{
+	return usb_composite_register(&audio_driver);
+}
+module_init(init);
+
+static void __exit cleanup(void)
+{
+	usb_composite_unregister(&audio_driver);
+}
+module_exit(cleanup);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("Bryan Wu <cooloney@kernel.org>");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/usb/gadget/ci13xxx_udc.c b/drivers/usb/gadget/ci13xxx_udc.c
index 38e531e..c7cb87a 100644
--- a/drivers/usb/gadget/ci13xxx_udc.c
+++ b/drivers/usb/gadget/ci13xxx_udc.c
@@ -1977,9 +1977,9 @@
 	if (!list_empty(&mEp->qh[mEp->dir].queue))
 		warn("enabling a non-empty endpoint!");
 
-	mEp->dir  = (desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK) ? TX : RX;
-	mEp->num  =  desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
-	mEp->type =  desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
+	mEp->dir  = usb_endpoint_dir_in(desc) ? TX : RX;
+	mEp->num  = usb_endpoint_num(desc);
+	mEp->type = usb_endpoint_type(desc);
 
 	mEp->ep.maxpacket = __constant_le16_to_cpu(desc->wMaxPacketSize);
 
diff --git a/drivers/usb/gadget/f_audio.c b/drivers/usb/gadget/f_audio.c
new file mode 100644
index 0000000..66527ba
--- /dev/null
+++ b/drivers/usb/gadget/f_audio.c
@@ -0,0 +1,707 @@
+/*
+ * f_audio.c -- USB Audio class function driver
+  *
+ * Copyright (C) 2008 Bryan Wu <cooloney@kernel.org>
+ * Copyright (C) 2008 Analog Devices, Inc
+ *
+ * Enter bugs at http://blackfin.uclinux.org/
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <asm/atomic.h>
+
+#include "u_audio.h"
+
+#define OUT_EP_MAX_PACKET_SIZE	200
+static int req_buf_size = OUT_EP_MAX_PACKET_SIZE;
+module_param(req_buf_size, int, S_IRUGO);
+MODULE_PARM_DESC(req_buf_size, "ISO OUT endpoint request buffer size");
+
+static int req_count = 256;
+module_param(req_count, int, S_IRUGO);
+MODULE_PARM_DESC(req_count, "ISO OUT endpoint request count");
+
+static int audio_buf_size = 48000;
+module_param(audio_buf_size, int, S_IRUGO);
+MODULE_PARM_DESC(audio_buf_size, "Audio buffer size");
+
+/*
+ * DESCRIPTORS ... most are static, but strings and full
+ * configuration descriptors are built on demand.
+ */
+
+/*
+ * We have two interfaces- AudioControl and AudioStreaming
+ * TODO: only supcard playback currently
+ */
+#define F_AUDIO_AC_INTERFACE	0
+#define F_AUDIO_AS_INTERFACE	1
+#define F_AUDIO_NUM_INTERFACES	2
+
+/* B.3.1  Standard AC Interface Descriptor */
+static struct usb_interface_descriptor ac_interface_desc __initdata = {
+	.bLength =		USB_DT_INTERFACE_SIZE,
+	.bDescriptorType =	USB_DT_INTERFACE,
+	.bNumEndpoints =	0,
+	.bInterfaceClass =	USB_CLASS_AUDIO,
+	.bInterfaceSubClass =	USB_SUBCLASS_AUDIOCONTROL,
+};
+
+DECLARE_USB_AC_HEADER_DESCRIPTOR(2);
+
+#define USB_DT_AC_HEADER_LENGH	USB_DT_AC_HEADER_SIZE(F_AUDIO_NUM_INTERFACES)
+/* B.3.2  Class-Specific AC Interface Descriptor */
+static struct usb_ac_header_descriptor_2 ac_header_desc = {
+	.bLength =		USB_DT_AC_HEADER_LENGH,
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype =	HEADER,
+	.bcdADC =		__constant_cpu_to_le16(0x0100),
+	.wTotalLength =		__constant_cpu_to_le16(USB_DT_AC_HEADER_LENGH),
+	.bInCollection =	F_AUDIO_NUM_INTERFACES,
+	.baInterfaceNr = {
+		[0] =		F_AUDIO_AC_INTERFACE,
+		[1] =		F_AUDIO_AS_INTERFACE,
+	}
+};
+
+#define INPUT_TERMINAL_ID	1
+static struct usb_input_terminal_descriptor input_terminal_desc = {
+	.bLength =		USB_DT_AC_INPUT_TERMINAL_SIZE,
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype =	INPUT_TERMINAL,
+	.bTerminalID =		INPUT_TERMINAL_ID,
+	.wTerminalType =	USB_AC_TERMINAL_STREAMING,
+	.bAssocTerminal =	0,
+	.wChannelConfig =	0x3,
+};
+
+DECLARE_USB_AC_FEATURE_UNIT_DESCRIPTOR(0);
+
+#define FEATURE_UNIT_ID		2
+static struct usb_ac_feature_unit_descriptor_0 feature_unit_desc = {
+	.bLength		= USB_DT_AC_FEATURE_UNIT_SIZE(0),
+	.bDescriptorType	= USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype	= FEATURE_UNIT,
+	.bUnitID		= FEATURE_UNIT_ID,
+	.bSourceID		= INPUT_TERMINAL_ID,
+	.bControlSize		= 2,
+	.bmaControls[0]		= (FU_MUTE | FU_VOLUME),
+};
+
+static struct usb_audio_control mute_control = {
+	.list = LIST_HEAD_INIT(mute_control.list),
+	.name = "Mute Control",
+	.type = MUTE_CONTROL,
+	/* Todo: add real Mute control code */
+	.set = generic_set_cmd,
+	.get = generic_get_cmd,
+};
+
+static struct usb_audio_control volume_control = {
+	.list = LIST_HEAD_INIT(volume_control.list),
+	.name = "Volume Control",
+	.type = VOLUME_CONTROL,
+	/* Todo: add real Volume control code */
+	.set = generic_set_cmd,
+	.get = generic_get_cmd,
+};
+
+static struct usb_audio_control_selector feature_unit = {
+	.list = LIST_HEAD_INIT(feature_unit.list),
+	.id = FEATURE_UNIT_ID,
+	.name = "Mute & Volume Control",
+	.type = FEATURE_UNIT,
+	.desc = (struct usb_descriptor_header *)&feature_unit_desc,
+};
+
+#define OUTPUT_TERMINAL_ID	3
+static struct usb_output_terminal_descriptor output_terminal_desc = {
+	.bLength		= USB_DT_AC_OUTPUT_TERMINAL_SIZE,
+	.bDescriptorType	= USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype	= OUTPUT_TERMINAL,
+	.bTerminalID		= OUTPUT_TERMINAL_ID,
+	.wTerminalType		= USB_AC_OUTPUT_TERMINAL_SPEAKER,
+	.bAssocTerminal		= FEATURE_UNIT_ID,
+	.bSourceID		= FEATURE_UNIT_ID,
+};
+
+/* B.4.1  Standard AS Interface Descriptor */
+static struct usb_interface_descriptor as_interface_alt_0_desc = {
+	.bLength =		USB_DT_INTERFACE_SIZE,
+	.bDescriptorType =	USB_DT_INTERFACE,
+	.bAlternateSetting =	0,
+	.bNumEndpoints =	0,
+	.bInterfaceClass =	USB_CLASS_AUDIO,
+	.bInterfaceSubClass =	USB_SUBCLASS_AUDIOSTREAMING,
+};
+
+static struct usb_interface_descriptor as_interface_alt_1_desc = {
+	.bLength =		USB_DT_INTERFACE_SIZE,
+	.bDescriptorType =	USB_DT_INTERFACE,
+	.bAlternateSetting =	1,
+	.bNumEndpoints =	1,
+	.bInterfaceClass =	USB_CLASS_AUDIO,
+	.bInterfaceSubClass =	USB_SUBCLASS_AUDIOSTREAMING,
+};
+
+/* B.4.2  Class-Specific AS Interface Descriptor */
+static struct usb_as_header_descriptor as_header_desc = {
+	.bLength =		USB_DT_AS_HEADER_SIZE,
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype =	AS_GENERAL,
+	.bTerminalLink =	INPUT_TERMINAL_ID,
+	.bDelay =		1,
+	.wFormatTag =		USB_AS_AUDIO_FORMAT_TYPE_I_PCM,
+};
+
+DECLARE_USB_AS_FORMAT_TYPE_I_DISCRETE_DESC(1);
+
+static struct usb_as_formate_type_i_discrete_descriptor_1 as_type_i_desc = {
+	.bLength =		USB_AS_FORMAT_TYPE_I_DISCRETE_DESC_SIZE(1),
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype =	FORMAT_TYPE,
+	.bFormatType =		USB_AS_FORMAT_TYPE_I,
+	.bSubframeSize =	2,
+	.bBitResolution =	16,
+	.bSamFreqType =		1,
+};
+
+/* Standard ISO OUT Endpoint Descriptor */
+static struct usb_endpoint_descriptor as_out_ep_desc __initdata = {
+	.bLength =		USB_DT_ENDPOINT_AUDIO_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_AS_ENDPOINT_ADAPTIVE
+				| USB_ENDPOINT_XFER_ISOC,
+	.wMaxPacketSize =	__constant_cpu_to_le16(OUT_EP_MAX_PACKET_SIZE),
+	.bInterval =		4,
+};
+
+/* Class-specific AS ISO OUT Endpoint Descriptor */
+static struct usb_as_iso_endpoint_descriptor as_iso_out_desc __initdata = {
+	.bLength =		USB_AS_ISO_ENDPOINT_DESC_SIZE,
+	.bDescriptorType =	USB_DT_CS_ENDPOINT,
+	.bDescriptorSubtype =	EP_GENERAL,
+	.bmAttributes = 	1,
+	.bLockDelayUnits =	1,
+	.wLockDelay =		__constant_cpu_to_le16(1),
+};
+
+static struct usb_descriptor_header *f_audio_desc[] __initdata = {
+	(struct usb_descriptor_header *)&ac_interface_desc,
+	(struct usb_descriptor_header *)&ac_header_desc,
+
+	(struct usb_descriptor_header *)&input_terminal_desc,
+	(struct usb_descriptor_header *)&output_terminal_desc,
+	(struct usb_descriptor_header *)&feature_unit_desc,
+
+	(struct usb_descriptor_header *)&as_interface_alt_0_desc,
+	(struct usb_descriptor_header *)&as_interface_alt_1_desc,
+	(struct usb_descriptor_header *)&as_header_desc,
+
+	(struct usb_descriptor_header *)&as_type_i_desc,
+
+	(struct usb_descriptor_header *)&as_out_ep_desc,
+	(struct usb_descriptor_header *)&as_iso_out_desc,
+	NULL,
+};
+
+/* string IDs are assigned dynamically */
+
+#define STRING_MANUFACTURER_IDX		0
+#define STRING_PRODUCT_IDX		1
+
+static char manufacturer[50];
+
+static struct usb_string strings_dev[] = {
+	[STRING_MANUFACTURER_IDX].s = manufacturer,
+	[STRING_PRODUCT_IDX].s = DRIVER_DESC,
+	{  } /* end of list */
+};
+
+static struct usb_gadget_strings stringtab_dev = {
+	.language	= 0x0409,	/* en-us */
+	.strings	= strings_dev,
+};
+
+static struct usb_gadget_strings *audio_strings[] = {
+	&stringtab_dev,
+	NULL,
+};
+
+/*
+ * This function is an ALSA sound card following USB Audio Class Spec 1.0.
+ */
+
+/*-------------------------------------------------------------------------*/
+struct f_audio_buf {
+	u8 *buf;
+	int actual;
+	struct list_head list;
+};
+
+static struct f_audio_buf *f_audio_buffer_alloc(int buf_size)
+{
+	struct f_audio_buf *copy_buf;
+
+	copy_buf = kzalloc(sizeof *copy_buf, GFP_ATOMIC);
+	if (!copy_buf)
+		return (struct f_audio_buf *)-ENOMEM;
+
+	copy_buf->buf = kzalloc(buf_size, GFP_ATOMIC);
+	if (!copy_buf->buf) {
+		kfree(copy_buf);
+		return (struct f_audio_buf *)-ENOMEM;
+	}
+
+	return copy_buf;
+}
+
+static void f_audio_buffer_free(struct f_audio_buf *audio_buf)
+{
+	kfree(audio_buf->buf);
+	kfree(audio_buf);
+}
+/*-------------------------------------------------------------------------*/
+
+struct f_audio {
+	struct gaudio			card;
+
+	/* endpoints handle full and/or high speeds */
+	struct usb_ep			*out_ep;
+	struct usb_endpoint_descriptor	*out_desc;
+
+	spinlock_t			lock;
+	struct f_audio_buf *copy_buf;
+	struct work_struct playback_work;
+	struct list_head play_queue;
+
+	/* Control Set command */
+	struct list_head cs;
+	u8 set_cmd;
+	struct usb_audio_control *set_con;
+};
+
+static inline struct f_audio *func_to_audio(struct usb_function *f)
+{
+	return container_of(f, struct f_audio, card.func);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void f_audio_playback_work(struct work_struct *data)
+{
+	struct f_audio *audio = container_of(data, struct f_audio,
+					playback_work);
+	struct f_audio_buf *play_buf;
+
+	spin_lock_irq(&audio->lock);
+	if (list_empty(&audio->play_queue)) {
+		spin_unlock_irq(&audio->lock);
+		return;
+	}
+	play_buf = list_first_entry(&audio->play_queue,
+			struct f_audio_buf, list);
+	list_del(&play_buf->list);
+	spin_unlock_irq(&audio->lock);
+
+	u_audio_playback(&audio->card, play_buf->buf, play_buf->actual);
+	f_audio_buffer_free(play_buf);
+
+	return;
+}
+
+static int f_audio_out_ep_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct f_audio *audio = req->context;
+	struct usb_composite_dev *cdev = audio->card.func.config->cdev;
+	struct f_audio_buf *copy_buf = audio->copy_buf;
+	int err;
+
+	if (!copy_buf)
+		return -EINVAL;
+
+	/* Copy buffer is full, add it to the play_queue */
+	if (audio_buf_size - copy_buf->actual < req->actual) {
+		list_add_tail(&copy_buf->list, &audio->play_queue);
+		schedule_work(&audio->playback_work);
+		copy_buf = f_audio_buffer_alloc(audio_buf_size);
+		if (copy_buf < 0)
+			return -ENOMEM;
+	}
+
+	memcpy(copy_buf->buf + copy_buf->actual, req->buf, req->actual);
+	copy_buf->actual += req->actual;
+	audio->copy_buf = copy_buf;
+
+	err = usb_ep_queue(ep, req, GFP_ATOMIC);
+	if (err)
+		ERROR(cdev, "%s queue req: %d\n", ep->name, err);
+
+	return 0;
+
+}
+
+static void f_audio_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct f_audio *audio = req->context;
+	int status = req->status;
+	u32 data = 0;
+	struct usb_ep *out_ep = audio->out_ep;
+
+	switch (status) {
+
+	case 0:				/* normal completion? */
+		if (ep == out_ep)
+			f_audio_out_ep_complete(ep, req);
+		else if (audio->set_con) {
+			memcpy(&data, req->buf, req->length);
+			audio->set_con->set(audio->set_con, audio->set_cmd,
+					le16_to_cpu(data));
+			audio->set_con = NULL;
+		}
+		break;
+	default:
+		break;
+	}
+}
+
+static int audio_set_intf_req(struct usb_function *f,
+		const struct usb_ctrlrequest *ctrl)
+{
+	struct f_audio		*audio = func_to_audio(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+	struct usb_request	*req = cdev->req;
+	u8			id = ((le16_to_cpu(ctrl->wIndex) >> 8) & 0xFF);
+	u16			len = le16_to_cpu(ctrl->wLength);
+	u16			w_value = le16_to_cpu(ctrl->wValue);
+	u8			con_sel = (w_value >> 8) & 0xFF;
+	u8			cmd = (ctrl->bRequest & 0x0F);
+	struct usb_audio_control_selector *cs;
+	struct usb_audio_control *con;
+
+	DBG(cdev, "bRequest 0x%x, w_value 0x%04x, len %d, entity %d\n",
+			ctrl->bRequest, w_value, len, id);
+
+	list_for_each_entry(cs, &audio->cs, list) {
+		if (cs->id == id) {
+			list_for_each_entry(con, &cs->control, list) {
+				if (con->type == con_sel) {
+					audio->set_con = con;
+					break;
+				}
+			}
+			break;
+		}
+	}
+
+	audio->set_cmd = cmd;
+	req->context = audio;
+	req->complete = f_audio_complete;
+
+	return len;
+}
+
+static int audio_get_intf_req(struct usb_function *f,
+		const struct usb_ctrlrequest *ctrl)
+{
+	struct f_audio		*audio = func_to_audio(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+	struct usb_request	*req = cdev->req;
+	int			value = -EOPNOTSUPP;
+	u8			id = ((le16_to_cpu(ctrl->wIndex) >> 8) & 0xFF);
+	u16			len = le16_to_cpu(ctrl->wLength);
+	u16			w_value = le16_to_cpu(ctrl->wValue);
+	u8			con_sel = (w_value >> 8) & 0xFF;
+	u8			cmd = (ctrl->bRequest & 0x0F);
+	struct usb_audio_control_selector *cs;
+	struct usb_audio_control *con;
+
+	DBG(cdev, "bRequest 0x%x, w_value 0x%04x, len %d, entity %d\n",
+			ctrl->bRequest, w_value, len, id);
+
+	list_for_each_entry(cs, &audio->cs, list) {
+		if (cs->id == id) {
+			list_for_each_entry(con, &cs->control, list) {
+				if (con->type == con_sel && con->get) {
+					value = con->get(con, cmd);
+					break;
+				}
+			}
+			break;
+		}
+	}
+
+	req->context = audio;
+	req->complete = f_audio_complete;
+	memcpy(req->buf, &value, len);
+
+	return len;
+}
+
+static int
+f_audio_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
+{
+	struct usb_composite_dev *cdev = f->config->cdev;
+	struct usb_request	*req = cdev->req;
+	int			value = -EOPNOTSUPP;
+	u16			w_index = le16_to_cpu(ctrl->wIndex);
+	u16			w_value = le16_to_cpu(ctrl->wValue);
+	u16			w_length = le16_to_cpu(ctrl->wLength);
+
+	/* composite driver infrastructure handles everything except
+	 * Audio class messages; interface activation uses set_alt().
+	 */
+	switch (ctrl->bRequestType) {
+	case USB_AUDIO_SET_INTF:
+		value = audio_set_intf_req(f, ctrl);
+		break;
+
+	case USB_AUDIO_GET_INTF:
+		value = audio_get_intf_req(f, ctrl);
+		break;
+
+	default:
+		ERROR(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+	}
+
+	/* respond with data transfer or status phase? */
+	if (value >= 0) {
+		DBG(cdev, "audio req%02x.%02x v%04x i%04x l%d\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+		req->zero = 0;
+		req->length = value;
+		value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+		if (value < 0)
+			ERROR(cdev, "audio response on err %d\n", value);
+	}
+
+	/* device either stalls (value < 0) or reports success */
+	return value;
+}
+
+static int f_audio_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+{
+	struct f_audio		*audio = func_to_audio(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+	struct usb_ep *out_ep = audio->out_ep;
+	struct usb_request *req;
+	int i = 0, err = 0;
+
+	DBG(cdev, "intf %d, alt %d\n", intf, alt);
+
+	if (intf == 1) {
+		if (alt == 1) {
+			usb_ep_enable(out_ep, audio->out_desc);
+			out_ep->driver_data = audio;
+			audio->copy_buf = f_audio_buffer_alloc(audio_buf_size);
+
+			/*
+			 * allocate a bunch of read buffers
+			 * and queue them all at once.
+			 */
+			for (i = 0; i < req_count && err == 0; i++) {
+				req = usb_ep_alloc_request(out_ep, GFP_ATOMIC);
+				if (req) {
+					req->buf = kzalloc(req_buf_size,
+							GFP_ATOMIC);
+					if (req->buf) {
+						req->length = req_buf_size;
+						req->context = audio;
+						req->complete =
+							f_audio_complete;
+						err = usb_ep_queue(out_ep,
+							req, GFP_ATOMIC);
+						if (err)
+							ERROR(cdev,
+							"%s queue req: %d\n",
+							out_ep->name, err);
+					} else
+						err = -ENOMEM;
+				} else
+					err = -ENOMEM;
+			}
+
+		} else {
+			struct f_audio_buf *copy_buf = audio->copy_buf;
+			if (copy_buf) {
+				list_add_tail(&copy_buf->list,
+						&audio->play_queue);
+				schedule_work(&audio->playback_work);
+			}
+		}
+	}
+
+	return err;
+}
+
+static void f_audio_disable(struct usb_function *f)
+{
+	return;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void f_audio_build_desc(struct f_audio *audio)
+{
+	struct gaudio *card = &audio->card;
+	u8 *sam_freq;
+	int rate;
+
+	/* Set channel numbers */
+	input_terminal_desc.bNrChannels = u_audio_get_playback_channels(card);
+	as_type_i_desc.bNrChannels = u_audio_get_playback_channels(card);
+
+	/* Set sample rates */
+	rate = u_audio_get_playback_rate(card);
+	sam_freq = as_type_i_desc.tSamFreq[0];
+	memcpy(sam_freq, &rate, 3);
+
+	/* Todo: Set Sample bits and other parameters */
+
+	return;
+}
+
+/* audio function driver setup/binding */
+static int __init
+f_audio_bind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct usb_composite_dev *cdev = c->cdev;
+	struct f_audio		*audio = func_to_audio(f);
+	int			status;
+	struct usb_ep		*ep;
+
+	f_audio_build_desc(audio);
+
+	/* allocate instance-specific interface IDs, and patch descriptors */
+	status = usb_interface_id(c, f);
+	if (status < 0)
+		goto fail;
+	ac_interface_desc.bInterfaceNumber = status;
+
+	status = usb_interface_id(c, f);
+	if (status < 0)
+		goto fail;
+	as_interface_alt_0_desc.bInterfaceNumber = status;
+	as_interface_alt_1_desc.bInterfaceNumber = status;
+
+	status = -ENODEV;
+
+	/* allocate instance-specific endpoints */
+	ep = usb_ep_autoconfig(cdev->gadget, &as_out_ep_desc);
+	if (!ep)
+		goto fail;
+	audio->out_ep = ep;
+	ep->driver_data = cdev;	/* claim */
+
+	status = -ENOMEM;
+
+	/* supcard all relevant hardware speeds... we expect that when
+	 * hardware is dual speed, all bulk-capable endpoints work at
+	 * both speeds
+	 */
+
+	/* copy descriptors, and track endpoint copies */
+	if (gadget_is_dualspeed(c->cdev->gadget)) {
+		c->highspeed = true;
+		f->hs_descriptors = usb_copy_descriptors(f_audio_desc);
+	} else
+		f->descriptors = usb_copy_descriptors(f_audio_desc);
+
+	return 0;
+
+fail:
+
+	return status;
+}
+
+static void
+f_audio_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct f_audio		*audio = func_to_audio(f);
+
+	usb_free_descriptors(f->descriptors);
+	kfree(audio);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* Todo: add more control selecotor dynamically */
+int __init control_selector_init(struct f_audio *audio)
+{
+	INIT_LIST_HEAD(&audio->cs);
+	list_add(&feature_unit.list, &audio->cs);
+
+	INIT_LIST_HEAD(&feature_unit.control);
+	list_add(&mute_control.list, &feature_unit.control);
+	list_add(&volume_control.list, &feature_unit.control);
+
+	volume_control.data[_CUR] = 0xffc0;
+	volume_control.data[_MIN] = 0xe3a0;
+	volume_control.data[_MAX] = 0xfff0;
+	volume_control.data[_RES] = 0x0030;
+
+	return 0;
+}
+
+/**
+ * audio_bind_config - add USB audio fucntion to a configuration
+ * @c: the configuration to supcard the USB audio function
+ * Context: single threaded during gadget setup
+ *
+ * Returns zero on success, else negative errno.
+ */
+int __init audio_bind_config(struct usb_configuration *c)
+{
+	struct f_audio *audio;
+	int status;
+
+	/* allocate and initialize one new instance */
+	audio = kzalloc(sizeof *audio, GFP_KERNEL);
+	if (!audio)
+		return -ENOMEM;
+
+	audio->card.func.name = "g_audio";
+	audio->card.gadget = c->cdev->gadget;
+
+	INIT_LIST_HEAD(&audio->play_queue);
+	spin_lock_init(&audio->lock);
+
+	/* set up ASLA audio devices */
+	status = gaudio_setup(&audio->card);
+	if (status < 0)
+		goto setup_fail;
+
+	audio->card.func.strings = audio_strings;
+	audio->card.func.bind = f_audio_bind;
+	audio->card.func.unbind = f_audio_unbind;
+	audio->card.func.set_alt = f_audio_set_alt;
+	audio->card.func.setup = f_audio_setup;
+	audio->card.func.disable = f_audio_disable;
+	audio->out_desc = &as_out_ep_desc;
+
+	control_selector_init(audio);
+
+	INIT_WORK(&audio->playback_work, f_audio_playback_work);
+
+	status = usb_add_function(c, &audio->card.func);
+	if (status)
+		goto add_fail;
+
+	INFO(c->cdev, "audio_buf_size %d, req_buf_size %d, req_count %d\n",
+		audio_buf_size, req_buf_size, req_count);
+
+	return status;
+
+add_fail:
+	gaudio_cleanup(&audio->card);
+setup_fail:
+	kfree(audio);
+	return status;
+}
diff --git a/drivers/usb/gadget/f_rndis.c b/drivers/usb/gadget/f_rndis.c
index 3279a47..424a37c 100644
--- a/drivers/usb/gadget/f_rndis.c
+++ b/drivers/usb/gadget/f_rndis.c
@@ -475,7 +475,9 @@
 		if (rndis->port.in_ep->driver_data) {
 			DBG(cdev, "reset rndis\n");
 			gether_disconnect(&rndis->port);
-		} else {
+		}
+
+		if (!rndis->port.in) {
 			DBG(cdev, "init rndis\n");
 			rndis->port.in = ep_choose(cdev->gadget,
 					rndis->hs.in, rndis->fs.in);
diff --git a/drivers/usb/gadget/file_storage.c b/drivers/usb/gadget/file_storage.c
index 381a53b..1e6aa50 100644
--- a/drivers/usb/gadget/file_storage.c
+++ b/drivers/usb/gadget/file_storage.c
@@ -248,6 +248,8 @@
 #include <linux/freezer.h>
 #include <linux/utsname.h>
 
+#include <asm/unaligned.h>
+
 #include <linux/usb/ch9.h>
 #include <linux/usb/gadget.h>
 
@@ -799,29 +801,9 @@
 
 /* Routines for unaligned data access */
 
-static u16 get_be16(u8 *buf)
+static u32 get_unaligned_be24(u8 *buf)
 {
-	return ((u16) buf[0] << 8) | ((u16) buf[1]);
-}
-
-static u32 get_be32(u8 *buf)
-{
-	return ((u32) buf[0] << 24) | ((u32) buf[1] << 16) |
-			((u32) buf[2] << 8) | ((u32) buf[3]);
-}
-
-static void put_be16(u8 *buf, u16 val)
-{
-	buf[0] = val >> 8;
-	buf[1] = val;
-}
-
-static void put_be32(u8 *buf, u32 val)
-{
-	buf[0] = val >> 24;
-	buf[1] = val >> 16;
-	buf[2] = val >> 8;
-	buf[3] = val & 0xff;
+	return 0xffffff & (u32) get_unaligned_be32(buf - 1);
 }
 
 
@@ -1582,9 +1564,9 @@
 	/* Get the starting Logical Block Address and check that it's
 	 * not too big */
 	if (fsg->cmnd[0] == SC_READ_6)
-		lba = (fsg->cmnd[1] << 16) | get_be16(&fsg->cmnd[2]);
+		lba = get_unaligned_be24(&fsg->cmnd[1]);
 	else {
-		lba = get_be32(&fsg->cmnd[2]);
+		lba = get_unaligned_be32(&fsg->cmnd[2]);
 
 		/* We allow DPO (Disable Page Out = don't save data in the
 		 * cache) and FUA (Force Unit Access = don't read from the
@@ -1717,9 +1699,9 @@
 	/* Get the starting Logical Block Address and check that it's
 	 * not too big */
 	if (fsg->cmnd[0] == SC_WRITE_6)
-		lba = (fsg->cmnd[1] << 16) | get_be16(&fsg->cmnd[2]);
+		lba = get_unaligned_be24(&fsg->cmnd[1]);
 	else {
-		lba = get_be32(&fsg->cmnd[2]);
+		lba = get_unaligned_be32(&fsg->cmnd[2]);
 
 		/* We allow DPO (Disable Page Out = don't save data in the
 		 * cache) and FUA (Force Unit Access = write directly to the
@@ -1940,7 +1922,7 @@
 
 	/* Get the starting Logical Block Address and check that it's
 	 * not too big */
-	lba = get_be32(&fsg->cmnd[2]);
+	lba = get_unaligned_be32(&fsg->cmnd[2]);
 	if (lba >= curlun->num_sectors) {
 		curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
 		return -EINVAL;
@@ -1953,7 +1935,7 @@
 		return -EINVAL;
 	}
 
-	verification_length = get_be16(&fsg->cmnd[7]);
+	verification_length = get_unaligned_be16(&fsg->cmnd[7]);
 	if (unlikely(verification_length == 0))
 		return -EIO;		// No default reply
 
@@ -2103,7 +2085,7 @@
 	memset(buf, 0, 18);
 	buf[0] = valid | 0x70;			// Valid, current error
 	buf[2] = SK(sd);
-	put_be32(&buf[3], sdinfo);		// Sense information
+	put_unaligned_be32(sdinfo, &buf[3]);	/* Sense information */
 	buf[7] = 18 - 8;			// Additional sense length
 	buf[12] = ASC(sd);
 	buf[13] = ASCQ(sd);
@@ -2114,7 +2096,7 @@
 static int do_read_capacity(struct fsg_dev *fsg, struct fsg_buffhd *bh)
 {
 	struct lun	*curlun = fsg->curlun;
-	u32		lba = get_be32(&fsg->cmnd[2]);
+	u32		lba = get_unaligned_be32(&fsg->cmnd[2]);
 	int		pmi = fsg->cmnd[8];
 	u8		*buf = (u8 *) bh->buf;
 
@@ -2124,8 +2106,9 @@
 		return -EINVAL;
 	}
 
-	put_be32(&buf[0], curlun->num_sectors - 1);	// Max logical block
-	put_be32(&buf[4], 512);				// Block length
+	put_unaligned_be32(curlun->num_sectors - 1, &buf[0]);
+						/* Max logical block */
+	put_unaligned_be32(512, &buf[4]);	/* Block length */
 	return 8;
 }
 
@@ -2144,7 +2127,7 @@
 		dest[0] = 0;		/* Reserved */
 	} else {
 		/* Absolute sector */
-		put_be32(dest, addr);
+		put_unaligned_be32(addr, dest);
 	}
 }
 
@@ -2152,7 +2135,7 @@
 {
 	struct lun	*curlun = fsg->curlun;
 	int		msf = fsg->cmnd[1] & 0x02;
-	u32		lba = get_be32(&fsg->cmnd[2]);
+	u32		lba = get_unaligned_be32(&fsg->cmnd[2]);
 	u8		*buf = (u8 *) bh->buf;
 
 	if ((fsg->cmnd[1] & ~0x02) != 0) {		/* Mask away MSF */
@@ -2252,10 +2235,13 @@
 			buf[2] = 0x04;	// Write cache enable,
 					// Read cache not disabled
 					// No cache retention priorities
-			put_be16(&buf[4], 0xffff);  // Don't disable prefetch
-					// Minimum prefetch = 0
-			put_be16(&buf[8], 0xffff);  // Maximum prefetch
-			put_be16(&buf[10], 0xffff); // Maximum prefetch ceiling
+			put_unaligned_be16(0xffff, &buf[4]);
+					/* Don't disable prefetch */
+					/* Minimum prefetch = 0 */
+			put_unaligned_be16(0xffff, &buf[8]);
+					/* Maximum prefetch */
+			put_unaligned_be16(0xffff, &buf[10]);
+					/* Maximum prefetch ceiling */
 		}
 		buf += 12;
 	}
@@ -2272,7 +2258,7 @@
 	if (mscmnd == SC_MODE_SENSE_6)
 		buf0[0] = len - 1;
 	else
-		put_be16(buf0, len - 2);
+		put_unaligned_be16(len - 2, buf0);
 	return len;
 }
 
@@ -2360,9 +2346,10 @@
 	buf[3] = 8;		// Only the Current/Maximum Capacity Descriptor
 	buf += 4;
 
-	put_be32(&buf[0], curlun->num_sectors);		// Number of blocks
-	put_be32(&buf[4], 512);				// Block length
-	buf[4] = 0x02;					// Current capacity
+	put_unaligned_be32(curlun->num_sectors, &buf[0]);
+						/* Number of blocks */
+	put_unaligned_be32(512, &buf[4]);	/* Block length */
+	buf[4] = 0x02;				/* Current capacity */
 	return 12;
 }
 
@@ -2882,7 +2869,7 @@
 		break;
 
 	case SC_MODE_SELECT_10:
-		fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]);
+		fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->cmnd[7]);
 		if ((reply = check_command(fsg, 10, DATA_DIR_FROM_HOST,
 				(1<<1) | (3<<7), 0,
 				"MODE SELECT(10)")) == 0)
@@ -2898,7 +2885,7 @@
 		break;
 
 	case SC_MODE_SENSE_10:
-		fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]);
+		fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->cmnd[7]);
 		if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
 				(1<<1) | (1<<2) | (3<<7), 0,
 				"MODE SENSE(10)")) == 0)
@@ -2923,7 +2910,8 @@
 		break;
 
 	case SC_READ_10:
-		fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]) << 9;
+		fsg->data_size_from_cmnd =
+				get_unaligned_be16(&fsg->cmnd[7]) << 9;
 		if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
 				(1<<1) | (0xf<<2) | (3<<7), 1,
 				"READ(10)")) == 0)
@@ -2931,7 +2919,8 @@
 		break;
 
 	case SC_READ_12:
-		fsg->data_size_from_cmnd = get_be32(&fsg->cmnd[6]) << 9;
+		fsg->data_size_from_cmnd =
+				get_unaligned_be32(&fsg->cmnd[6]) << 9;
 		if ((reply = check_command(fsg, 12, DATA_DIR_TO_HOST,
 				(1<<1) | (0xf<<2) | (0xf<<6), 1,
 				"READ(12)")) == 0)
@@ -2949,7 +2938,7 @@
 	case SC_READ_HEADER:
 		if (!mod_data.cdrom)
 			goto unknown_cmnd;
-		fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]);
+		fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->cmnd[7]);
 		if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
 				(3<<7) | (0x1f<<1), 1,
 				"READ HEADER")) == 0)
@@ -2959,7 +2948,7 @@
 	case SC_READ_TOC:
 		if (!mod_data.cdrom)
 			goto unknown_cmnd;
-		fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]);
+		fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->cmnd[7]);
 		if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
 				(7<<6) | (1<<1), 1,
 				"READ TOC")) == 0)
@@ -2967,7 +2956,7 @@
 		break;
 
 	case SC_READ_FORMAT_CAPACITIES:
-		fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]);
+		fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->cmnd[7]);
 		if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
 				(3<<7), 1,
 				"READ FORMAT CAPACITIES")) == 0)
@@ -3025,7 +3014,8 @@
 		break;
 
 	case SC_WRITE_10:
-		fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]) << 9;
+		fsg->data_size_from_cmnd =
+				get_unaligned_be16(&fsg->cmnd[7]) << 9;
 		if ((reply = check_command(fsg, 10, DATA_DIR_FROM_HOST,
 				(1<<1) | (0xf<<2) | (3<<7), 1,
 				"WRITE(10)")) == 0)
@@ -3033,7 +3023,8 @@
 		break;
 
 	case SC_WRITE_12:
-		fsg->data_size_from_cmnd = get_be32(&fsg->cmnd[6]) << 9;
+		fsg->data_size_from_cmnd =
+				get_unaligned_be32(&fsg->cmnd[6]) << 9;
 		if ((reply = check_command(fsg, 12, DATA_DIR_FROM_HOST,
 				(1<<1) | (0xf<<2) | (0xf<<6), 1,
 				"WRITE(12)")) == 0)
diff --git a/drivers/usb/gadget/fsl_mx3_udc.c b/drivers/usb/gadget/fsl_mx3_udc.c
new file mode 100644
index 0000000..4bc2bf3
--- /dev/null
+++ b/drivers/usb/gadget/fsl_mx3_udc.c
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2009
+ * Guennadi Liakhovetski, DENX Software Engineering, <lg@denx.de>
+ *
+ * Description:
+ * Helper routines for i.MX3x SoCs from Freescale, needed by the fsl_usb2_udc.c
+ * driver to function correctly on these systems.
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/fsl_devices.h>
+#include <linux/platform_device.h>
+
+static struct clk *mxc_ahb_clk;
+static struct clk *mxc_usb_clk;
+
+int fsl_udc_clk_init(struct platform_device *pdev)
+{
+	struct fsl_usb2_platform_data *pdata;
+	unsigned long freq;
+	int ret;
+
+	pdata = pdev->dev.platform_data;
+
+	mxc_ahb_clk = clk_get(&pdev->dev, "usb_ahb");
+	if (IS_ERR(mxc_ahb_clk))
+		return PTR_ERR(mxc_ahb_clk);
+
+	ret = clk_enable(mxc_ahb_clk);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "clk_enable(\"usb_ahb\") failed\n");
+		goto eenahb;
+	}
+
+	/* make sure USB_CLK is running at 60 MHz +/- 1000 Hz */
+	mxc_usb_clk = clk_get(&pdev->dev, "usb");
+	if (IS_ERR(mxc_usb_clk)) {
+		dev_err(&pdev->dev, "clk_get(\"usb\") failed\n");
+		ret = PTR_ERR(mxc_usb_clk);
+		goto egusb;
+	}
+
+	freq = clk_get_rate(mxc_usb_clk);
+	if (pdata->phy_mode != FSL_USB2_PHY_ULPI &&
+	    (freq < 59999000 || freq > 60001000)) {
+		dev_err(&pdev->dev, "USB_CLK=%lu, should be 60MHz\n", freq);
+		goto eclkrate;
+	}
+
+	ret = clk_enable(mxc_usb_clk);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "clk_enable(\"usb_clk\") failed\n");
+		goto eenusb;
+	}
+
+	return 0;
+
+eenusb:
+eclkrate:
+	clk_put(mxc_usb_clk);
+	mxc_usb_clk = NULL;
+egusb:
+	clk_disable(mxc_ahb_clk);
+eenahb:
+	clk_put(mxc_ahb_clk);
+	return ret;
+}
+
+void fsl_udc_clk_finalize(struct platform_device *pdev)
+{
+	struct fsl_usb2_platform_data *pdata = pdev->dev.platform_data;
+
+	/* ULPI transceivers don't need usbpll */
+	if (pdata->phy_mode == FSL_USB2_PHY_ULPI) {
+		clk_disable(mxc_usb_clk);
+		clk_put(mxc_usb_clk);
+		mxc_usb_clk = NULL;
+	}
+}
+
+void fsl_udc_clk_release(void)
+{
+	if (mxc_usb_clk) {
+		clk_disable(mxc_usb_clk);
+		clk_put(mxc_usb_clk);
+	}
+	clk_disable(mxc_ahb_clk);
+	clk_put(mxc_ahb_clk);
+}
diff --git a/drivers/usb/gadget/fsl_usb2_udc.c b/drivers/usb/gadget/fsl_udc_core.c
similarity index 98%
rename from drivers/usb/gadget/fsl_usb2_udc.c
rename to drivers/usb/gadget/fsl_udc_core.c
index 9d7b95d..42a74b8 100644
--- a/drivers/usb/gadget/fsl_usb2_udc.c
+++ b/drivers/usb/gadget/fsl_udc_core.c
@@ -38,6 +38,7 @@
 #include <linux/platform_device.h>
 #include <linux/fsl_devices.h>
 #include <linux/dmapool.h>
+#include <linux/delay.h>
 
 #include <asm/byteorder.h>
 #include <asm/io.h>
@@ -57,7 +58,9 @@
 static const char driver_desc[] = DRIVER_DESC;
 
 static struct usb_dr_device *dr_regs;
+#ifndef CONFIG_ARCH_MXC
 static struct usb_sys_interface *usb_sys_regs;
+#endif
 
 /* it is initialized in probe()  */
 static struct fsl_udc *udc_controller = NULL;
@@ -174,10 +177,34 @@
 
 static int dr_controller_setup(struct fsl_udc *udc)
 {
-	unsigned int tmp = 0, portctrl = 0, ctrl = 0;
+	unsigned int tmp, portctrl;
+#ifndef CONFIG_ARCH_MXC
+	unsigned int ctrl;
+#endif
 	unsigned long timeout;
 #define FSL_UDC_RESET_TIMEOUT 1000
 
+	/* Config PHY interface */
+	portctrl = fsl_readl(&dr_regs->portsc1);
+	portctrl &= ~(PORTSCX_PHY_TYPE_SEL | PORTSCX_PORT_WIDTH);
+	switch (udc->phy_mode) {
+	case FSL_USB2_PHY_ULPI:
+		portctrl |= PORTSCX_PTS_ULPI;
+		break;
+	case FSL_USB2_PHY_UTMI_WIDE:
+		portctrl |= PORTSCX_PTW_16BIT;
+		/* fall through */
+	case FSL_USB2_PHY_UTMI:
+		portctrl |= PORTSCX_PTS_UTMI;
+		break;
+	case FSL_USB2_PHY_SERIAL:
+		portctrl |= PORTSCX_PTS_FSLS;
+		break;
+	default:
+		return -EINVAL;
+	}
+	fsl_writel(portctrl, &dr_regs->portsc1);
+
 	/* Stop and reset the usb controller */
 	tmp = fsl_readl(&dr_regs->usbcmd);
 	tmp &= ~USB_CMD_RUN_STOP;
@@ -215,31 +242,12 @@
 		udc->ep_qh, (int)tmp,
 		fsl_readl(&dr_regs->endpointlistaddr));
 
-	/* Config PHY interface */
-	portctrl = fsl_readl(&dr_regs->portsc1);
-	portctrl &= ~(PORTSCX_PHY_TYPE_SEL | PORTSCX_PORT_WIDTH);
-	switch (udc->phy_mode) {
-	case FSL_USB2_PHY_ULPI:
-		portctrl |= PORTSCX_PTS_ULPI;
-		break;
-	case FSL_USB2_PHY_UTMI_WIDE:
-		portctrl |= PORTSCX_PTW_16BIT;
-		/* fall through */
-	case FSL_USB2_PHY_UTMI:
-		portctrl |= PORTSCX_PTS_UTMI;
-		break;
-	case FSL_USB2_PHY_SERIAL:
-		portctrl |= PORTSCX_PTS_FSLS;
-		break;
-	default:
-		return -EINVAL;
-	}
-	fsl_writel(portctrl, &dr_regs->portsc1);
-
 	/* Config control enable i/o output, cpu endian register */
+#ifndef CONFIG_ARCH_MXC
 	ctrl = __raw_readl(&usb_sys_regs->control);
 	ctrl |= USB_CTRL_IOENB;
 	__raw_writel(ctrl, &usb_sys_regs->control);
+#endif
 
 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
 	/* Turn on cache snooping hardware, since some PowerPC platforms
@@ -2043,6 +2051,7 @@
 	size -= t;
 	next += t;
 
+#ifndef CONFIG_ARCH_MXC
 	tmp_reg = usb_sys_regs->snoop1;
 	t = scnprintf(next, size, "Snoop1 Reg : = [0x%x]\n\n", tmp_reg);
 	size -= t;
@@ -2053,6 +2062,7 @@
 			tmp_reg);
 	size -= t;
 	next += t;
+#endif
 
 	/* ------fsl_udc, fsl_ep, fsl_request structure information ----- */
 	ep = &udc->eps[0];
@@ -2263,14 +2273,21 @@
 		goto err_kfree;
 	}
 
-	dr_regs = ioremap(res->start, res->end - res->start + 1);
+	dr_regs = ioremap(res->start, resource_size(res));
 	if (!dr_regs) {
 		ret = -ENOMEM;
 		goto err_release_mem_region;
 	}
 
+#ifndef CONFIG_ARCH_MXC
 	usb_sys_regs = (struct usb_sys_interface *)
 			((u32)dr_regs + USB_DR_SYS_OFFSET);
+#endif
+
+	/* Initialize USB clocks */
+	ret = fsl_udc_clk_init(pdev);
+	if (ret < 0)
+		goto err_iounmap_noclk;
 
 	/* Read Device Controller Capability Parameters register */
 	dccparams = fsl_readl(&dr_regs->dccparams);
@@ -2308,6 +2325,8 @@
 	 * leave usbintr reg untouched */
 	dr_controller_setup(udc_controller);
 
+	fsl_udc_clk_finalize(pdev);
+
 	/* Setup gadget structure */
 	udc_controller->gadget.ops = &fsl_gadget_ops;
 	udc_controller->gadget.is_dualspeed = 1;
@@ -2362,6 +2381,8 @@
 err_free_irq:
 	free_irq(udc_controller->irq, udc_controller);
 err_iounmap:
+	fsl_udc_clk_release();
+err_iounmap_noclk:
 	iounmap(dr_regs);
 err_release_mem_region:
 	release_mem_region(res->start, res->end - res->start + 1);
@@ -2384,6 +2405,8 @@
 		return -ENODEV;
 	udc_controller->done = &done;
 
+	fsl_udc_clk_release();
+
 	/* DR has been stopped in usb_gadget_unregister_driver() */
 	remove_proc_file();
 
diff --git a/drivers/usb/gadget/fsl_usb2_udc.h b/drivers/usb/gadget/fsl_usb2_udc.h
index e63ef12..20aecee 100644
--- a/drivers/usb/gadget/fsl_usb2_udc.h
+++ b/drivers/usb/gadget/fsl_usb2_udc.h
@@ -563,4 +563,22 @@
 					* 2 + ((windex & USB_DIR_IN) ? 1 : 0))
 #define get_pipe_by_ep(EP)	(ep_index(EP) * 2 + ep_is_in(EP))
 
+struct platform_device;
+#ifdef CONFIG_ARCH_MXC
+int fsl_udc_clk_init(struct platform_device *pdev);
+void fsl_udc_clk_finalize(struct platform_device *pdev);
+void fsl_udc_clk_release(void);
+#else
+static inline int fsl_udc_clk_init(struct platform_device *pdev)
+{
+	return 0;
+}
+static inline void fsl_udc_clk_finalize(struct platform_device *pdev)
+{
+}
+static inline void fsl_udc_clk_release(void)
+{
+}
+#endif
+
 #endif
diff --git a/drivers/usb/gadget/gadget_chips.h b/drivers/usb/gadget/gadget_chips.h
index ec6d439..8e0e9a0 100644
--- a/drivers/usb/gadget/gadget_chips.h
+++ b/drivers/usb/gadget/gadget_chips.h
@@ -137,6 +137,12 @@
 #define gadget_is_musbhdrc(g)	0
 #endif
 
+#ifdef CONFIG_USB_GADGET_LANGWELL
+#define gadget_is_langwell(g)	(!strcmp("langwell_udc", (g)->name))
+#else
+#define gadget_is_langwell(g)	0
+#endif
+
 /* from Montavista kernel (?) */
 #ifdef CONFIG_USB_GADGET_MPC8272
 #define gadget_is_mpc8272(g)	!strcmp("mpc8272_udc", (g)->name)
@@ -231,6 +237,8 @@
 		return 0x22;
 	else if (gadget_is_ci13xxx(gadget))
 		return 0x23;
+	else if (gadget_is_langwell(gadget))
+		return 0x24;
 	return -ENOENT;
 }
 
diff --git a/drivers/usb/gadget/goku_udc.c b/drivers/usb/gadget/goku_udc.c
index de010c9..112bb40 100644
--- a/drivers/usb/gadget/goku_udc.c
+++ b/drivers/usb/gadget/goku_udc.c
@@ -110,10 +110,10 @@
 		return -EINVAL;
 	if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
 		return -ESHUTDOWN;
-	if (ep->num != (desc->bEndpointAddress & 0x0f))
+	if (ep->num != usb_endpoint_num(desc))
 		return -EINVAL;
 
-	switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
+	switch (usb_endpoint_type(desc)) {
 	case USB_ENDPOINT_XFER_BULK:
 	case USB_ENDPOINT_XFER_INT:
 		break;
@@ -142,7 +142,7 @@
 	/* ep1/ep2 dma direction is chosen early; it works in the other
 	 * direction, with pio.  be cautious with out-dma.
 	 */
-	ep->is_in = (USB_DIR_IN & desc->bEndpointAddress) != 0;
+	ep->is_in = usb_endpoint_dir_in(desc);
 	if (ep->is_in) {
 		mode |= 1;
 		ep->dma = (use_dma != 0) && (ep->num == UDC_MSTRD_ENDPOINT);
diff --git a/drivers/usb/gadget/imx_udc.c b/drivers/usb/gadget/imx_udc.c
index 168658b..c52a681 100644
--- a/drivers/usb/gadget/imx_udc.c
+++ b/drivers/usb/gadget/imx_udc.c
@@ -415,6 +415,13 @@
 	u8	*buf;
 	int	length, count, temp;
 
+	if (unlikely(__raw_readl(imx_ep->imx_usb->base +
+				 USB_EP_STAT(EP_NO(imx_ep))) & EPSTAT_ZLPS)) {
+		D_TRX(imx_ep->imx_usb->dev, "<%s> zlp still queued in EP %s\n",
+			__func__, imx_ep->ep.name);
+		return -1;
+	}
+
 	buf = req->req.buf + req->req.actual;
 	prefetch(buf);
 
@@ -734,9 +741,12 @@
 {
 	struct imx_request *req;
 
+	if (!usb_ep)
+		return NULL;
+
 	req = kzalloc(sizeof *req, gfp_flags);
-	if (!req || !usb_ep)
-		return 0;
+	if (!req)
+		return NULL;
 
 	INIT_LIST_HEAD(&req->queue);
 	req->in_use = 0;
diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c
index d20937f..7d33f50 100644
--- a/drivers/usb/gadget/inode.c
+++ b/drivers/usb/gadget/inode.c
@@ -384,9 +384,8 @@
 		return value;
 
 	/* halt any endpoint by doing a "wrong direction" i/o call */
-	if (data->desc.bEndpointAddress & USB_DIR_IN) {
-		if ((data->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
-				== USB_ENDPOINT_XFER_ISOC)
+	if (usb_endpoint_dir_in(&data->desc)) {
+		if (usb_endpoint_xfer_isoc(&data->desc))
 			return -EINVAL;
 		DBG (data->dev, "%s halt\n", data->name);
 		spin_lock_irq (&data->dev->lock);
@@ -428,9 +427,8 @@
 		return value;
 
 	/* halt any endpoint by doing a "wrong direction" i/o call */
-	if (!(data->desc.bEndpointAddress & USB_DIR_IN)) {
-		if ((data->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
-				== USB_ENDPOINT_XFER_ISOC)
+	if (!usb_endpoint_dir_in(&data->desc)) {
+		if (usb_endpoint_xfer_isoc(&data->desc))
 			return -EINVAL;
 		DBG (data->dev, "%s halt\n", data->name);
 		spin_lock_irq (&data->dev->lock);
@@ -691,7 +689,7 @@
 	struct ep_data		*epdata = iocb->ki_filp->private_data;
 	char			*buf;
 
-	if (unlikely(epdata->desc.bEndpointAddress & USB_DIR_IN))
+	if (unlikely(usb_endpoint_dir_in(&epdata->desc)))
 		return -EINVAL;
 
 	buf = kmalloc(iocb->ki_left, GFP_KERNEL);
@@ -711,7 +709,7 @@
 	size_t			len = 0;
 	int			i = 0;
 
-	if (unlikely(!(epdata->desc.bEndpointAddress & USB_DIR_IN)))
+	if (unlikely(!usb_endpoint_dir_in(&epdata->desc)))
 		return -EINVAL;
 
 	buf = kmalloc(iocb->ki_left, GFP_KERNEL);
diff --git a/drivers/usb/gadget/langwell_udc.c b/drivers/usb/gadget/langwell_udc.c
new file mode 100644
index 0000000..6829d59
--- /dev/null
+++ b/drivers/usb/gadget/langwell_udc.c
@@ -0,0 +1,3373 @@
+/*
+ * Intel Langwell USB Device Controller driver
+ * Copyright (C) 2008-2009, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+
+/* #undef	DEBUG */
+/* #undef	VERBOSE */
+
+#if defined(CONFIG_USB_LANGWELL_OTG)
+#define	OTG_TRANSCEIVER
+#endif
+
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/smp_lock.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/timer.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/moduleparam.h>
+#include <linux/device.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/otg.h>
+#include <linux/pm.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <asm/system.h>
+#include <asm/unaligned.h>
+
+#include "langwell_udc.h"
+
+
+#define	DRIVER_DESC		"Intel Langwell USB Device Controller driver"
+#define	DRIVER_VERSION		"16 May 2009"
+
+static const char driver_name[] = "langwell_udc";
+static const char driver_desc[] = DRIVER_DESC;
+
+
+/* controller device global variable */
+static struct langwell_udc	*the_controller;
+
+/* for endpoint 0 operations */
+static const struct usb_endpoint_descriptor
+langwell_ep0_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	0,
+	.bmAttributes =		USB_ENDPOINT_XFER_CONTROL,
+	.wMaxPacketSize =	EP0_MAX_PKT_SIZE,
+};
+
+
+/*-------------------------------------------------------------------------*/
+/* debugging */
+
+#ifdef	DEBUG
+#define	DBG(dev, fmt, args...) \
+	pr_debug("%s %s: " fmt , driver_name, \
+			pci_name(dev->pdev), ## args)
+#else
+#define	DBG(dev, fmt, args...) \
+	do { } while (0)
+#endif /* DEBUG */
+
+
+#ifdef	VERBOSE
+#define	VDBG DBG
+#else
+#define	VDBG(dev, fmt, args...) \
+	do { } while (0)
+#endif	/* VERBOSE */
+
+
+#define	ERROR(dev, fmt, args...) \
+	pr_err("%s %s: " fmt , driver_name, \
+			pci_name(dev->pdev), ## args)
+
+#define	WARNING(dev, fmt, args...) \
+	pr_warning("%s %s: " fmt , driver_name, \
+			pci_name(dev->pdev), ## args)
+
+#define	INFO(dev, fmt, args...) \
+	pr_info("%s %s: " fmt , driver_name, \
+			pci_name(dev->pdev), ## args)
+
+
+#ifdef	VERBOSE
+static inline void print_all_registers(struct langwell_udc *dev)
+{
+	int	i;
+
+	/* Capability Registers */
+	printk(KERN_DEBUG "Capability Registers (offset: "
+			"0x%04x, length: 0x%08x)\n",
+			CAP_REG_OFFSET,
+			(u32)sizeof(struct langwell_cap_regs));
+	printk(KERN_DEBUG "caplength=0x%02x\n",
+			readb(&dev->cap_regs->caplength));
+	printk(KERN_DEBUG "hciversion=0x%04x\n",
+			readw(&dev->cap_regs->hciversion));
+	printk(KERN_DEBUG "hcsparams=0x%08x\n",
+			readl(&dev->cap_regs->hcsparams));
+	printk(KERN_DEBUG "hccparams=0x%08x\n",
+			readl(&dev->cap_regs->hccparams));
+	printk(KERN_DEBUG "dciversion=0x%04x\n",
+			readw(&dev->cap_regs->dciversion));
+	printk(KERN_DEBUG "dccparams=0x%08x\n",
+			readl(&dev->cap_regs->dccparams));
+
+	/* Operational Registers */
+	printk(KERN_DEBUG "Operational Registers (offset: "
+			"0x%04x, length: 0x%08x)\n",
+			OP_REG_OFFSET,
+			(u32)sizeof(struct langwell_op_regs));
+	printk(KERN_DEBUG "extsts=0x%08x\n",
+			readl(&dev->op_regs->extsts));
+	printk(KERN_DEBUG "extintr=0x%08x\n",
+			readl(&dev->op_regs->extintr));
+	printk(KERN_DEBUG "usbcmd=0x%08x\n",
+			readl(&dev->op_regs->usbcmd));
+	printk(KERN_DEBUG "usbsts=0x%08x\n",
+			readl(&dev->op_regs->usbsts));
+	printk(KERN_DEBUG "usbintr=0x%08x\n",
+			readl(&dev->op_regs->usbintr));
+	printk(KERN_DEBUG "frindex=0x%08x\n",
+			readl(&dev->op_regs->frindex));
+	printk(KERN_DEBUG "ctrldssegment=0x%08x\n",
+			readl(&dev->op_regs->ctrldssegment));
+	printk(KERN_DEBUG "deviceaddr=0x%08x\n",
+			readl(&dev->op_regs->deviceaddr));
+	printk(KERN_DEBUG "endpointlistaddr=0x%08x\n",
+			readl(&dev->op_regs->endpointlistaddr));
+	printk(KERN_DEBUG "ttctrl=0x%08x\n",
+			readl(&dev->op_regs->ttctrl));
+	printk(KERN_DEBUG "burstsize=0x%08x\n",
+			readl(&dev->op_regs->burstsize));
+	printk(KERN_DEBUG "txfilltuning=0x%08x\n",
+			readl(&dev->op_regs->txfilltuning));
+	printk(KERN_DEBUG "txttfilltuning=0x%08x\n",
+			readl(&dev->op_regs->txttfilltuning));
+	printk(KERN_DEBUG "ic_usb=0x%08x\n",
+			readl(&dev->op_regs->ic_usb));
+	printk(KERN_DEBUG "ulpi_viewport=0x%08x\n",
+			readl(&dev->op_regs->ulpi_viewport));
+	printk(KERN_DEBUG "configflag=0x%08x\n",
+			readl(&dev->op_regs->configflag));
+	printk(KERN_DEBUG "portsc1=0x%08x\n",
+			readl(&dev->op_regs->portsc1));
+	printk(KERN_DEBUG "devlc=0x%08x\n",
+			readl(&dev->op_regs->devlc));
+	printk(KERN_DEBUG "otgsc=0x%08x\n",
+			readl(&dev->op_regs->otgsc));
+	printk(KERN_DEBUG "usbmode=0x%08x\n",
+			readl(&dev->op_regs->usbmode));
+	printk(KERN_DEBUG "endptnak=0x%08x\n",
+			readl(&dev->op_regs->endptnak));
+	printk(KERN_DEBUG "endptnaken=0x%08x\n",
+			readl(&dev->op_regs->endptnaken));
+	printk(KERN_DEBUG "endptsetupstat=0x%08x\n",
+			readl(&dev->op_regs->endptsetupstat));
+	printk(KERN_DEBUG "endptprime=0x%08x\n",
+			readl(&dev->op_regs->endptprime));
+	printk(KERN_DEBUG "endptflush=0x%08x\n",
+			readl(&dev->op_regs->endptflush));
+	printk(KERN_DEBUG "endptstat=0x%08x\n",
+			readl(&dev->op_regs->endptstat));
+	printk(KERN_DEBUG "endptcomplete=0x%08x\n",
+			readl(&dev->op_regs->endptcomplete));
+
+	for (i = 0; i < dev->ep_max / 2; i++) {
+		printk(KERN_DEBUG "endptctrl[%d]=0x%08x\n",
+				i, readl(&dev->op_regs->endptctrl[i]));
+	}
+}
+#endif /* VERBOSE */
+
+
+/*-------------------------------------------------------------------------*/
+
+#define	DIR_STRING(bAddress)	(((bAddress) & USB_DIR_IN) ? "in" : "out")
+
+#define is_in(ep)	(((ep)->ep_num == 0) ? ((ep)->dev->ep0_dir == \
+			USB_DIR_IN) : ((ep)->desc->bEndpointAddress \
+			& USB_DIR_IN) == USB_DIR_IN)
+
+
+#ifdef	DEBUG
+static char *type_string(u8 bmAttributes)
+{
+	switch ((bmAttributes) & USB_ENDPOINT_XFERTYPE_MASK) {
+	case USB_ENDPOINT_XFER_BULK:
+		return "bulk";
+	case USB_ENDPOINT_XFER_ISOC:
+		return "iso";
+	case USB_ENDPOINT_XFER_INT:
+		return "int";
+	};
+
+	return "control";
+}
+#endif
+
+
+/* configure endpoint control registers */
+static void ep_reset(struct langwell_ep *ep, unsigned char ep_num,
+		unsigned char is_in, unsigned char ep_type)
+{
+	struct langwell_udc	*dev;
+	u32			endptctrl;
+
+	dev = ep->dev;
+	VDBG(dev, "---> %s()\n", __func__);
+
+	endptctrl = readl(&dev->op_regs->endptctrl[ep_num]);
+	if (is_in) {	/* TX */
+		if (ep_num)
+			endptctrl |= EPCTRL_TXR;
+		endptctrl |= EPCTRL_TXE;
+		endptctrl |= ep_type << EPCTRL_TXT_SHIFT;
+	} else {	/* RX */
+		if (ep_num)
+			endptctrl |= EPCTRL_RXR;
+		endptctrl |= EPCTRL_RXE;
+		endptctrl |= ep_type << EPCTRL_RXT_SHIFT;
+	}
+
+	writel(endptctrl, &dev->op_regs->endptctrl[ep_num]);
+
+	VDBG(dev, "<--- %s()\n", __func__);
+}
+
+
+/* reset ep0 dQH and endptctrl */
+static void ep0_reset(struct langwell_udc *dev)
+{
+	struct langwell_ep	*ep;
+	int			i;
+
+	VDBG(dev, "---> %s()\n", __func__);
+
+	/* ep0 in and out */
+	for (i = 0; i < 2; i++) {
+		ep = &dev->ep[i];
+		ep->dev = dev;
+
+		/* ep0 dQH */
+		ep->dqh = &dev->ep_dqh[i];
+
+		/* configure ep0 endpoint capabilities in dQH */
+		ep->dqh->dqh_ios = 1;
+		ep->dqh->dqh_mpl = EP0_MAX_PKT_SIZE;
+
+		/* FIXME: enable ep0-in HW zero length termination select */
+		if (is_in(ep))
+			ep->dqh->dqh_zlt = 0;
+		ep->dqh->dqh_mult = 0;
+
+		/* configure ep0 control registers */
+		ep_reset(&dev->ep[0], 0, i, USB_ENDPOINT_XFER_CONTROL);
+	}
+
+	VDBG(dev, "<--- %s()\n", __func__);
+	return;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/* endpoints operations */
+
+/* configure endpoint, making it usable */
+static int langwell_ep_enable(struct usb_ep *_ep,
+		const struct usb_endpoint_descriptor *desc)
+{
+	struct langwell_udc	*dev;
+	struct langwell_ep	*ep;
+	u16			max = 0;
+	unsigned long		flags;
+	int			retval = 0;
+	unsigned char		zlt, ios = 0, mult = 0;
+
+	ep = container_of(_ep, struct langwell_ep, ep);
+	dev = ep->dev;
+	VDBG(dev, "---> %s()\n", __func__);
+
+	if (!_ep || !desc || ep->desc
+			|| desc->bDescriptorType != USB_DT_ENDPOINT)
+		return -EINVAL;
+
+	if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
+		return -ESHUTDOWN;
+
+	max = le16_to_cpu(desc->wMaxPacketSize);
+
+	/*
+	 * disable HW zero length termination select
+	 * driver handles zero length packet through req->req.zero
+	 */
+	zlt = 1;
+
+	/*
+	 * sanity check type, direction, address, and then
+	 * initialize the endpoint capabilities fields in dQH
+	 */
+	switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
+	case USB_ENDPOINT_XFER_CONTROL:
+		ios = 1;
+		break;
+	case USB_ENDPOINT_XFER_BULK:
+		if ((dev->gadget.speed == USB_SPEED_HIGH
+					&& max != 512)
+				|| (dev->gadget.speed == USB_SPEED_FULL
+					&& max > 64)) {
+			goto done;
+		}
+		break;
+	case USB_ENDPOINT_XFER_INT:
+		if (strstr(ep->ep.name, "-iso")) /* bulk is ok */
+			goto done;
+
+		switch (dev->gadget.speed) {
+		case USB_SPEED_HIGH:
+			if (max <= 1024)
+				break;
+		case USB_SPEED_FULL:
+			if (max <= 64)
+				break;
+		default:
+			if (max <= 8)
+				break;
+			goto done;
+		}
+		break;
+	case USB_ENDPOINT_XFER_ISOC:
+		if (strstr(ep->ep.name, "-bulk")
+				|| strstr(ep->ep.name, "-int"))
+			goto done;
+
+		switch (dev->gadget.speed) {
+		case USB_SPEED_HIGH:
+			if (max <= 1024)
+				break;
+		case USB_SPEED_FULL:
+			if (max <= 1023)
+				break;
+		default:
+			goto done;
+		}
+		/*
+		 * FIXME:
+		 * calculate transactions needed for high bandwidth iso
+		 */
+		mult = (unsigned char)(1 + ((max >> 11) & 0x03));
+		max = max & 0x8ff;	/* bit 0~10 */
+		/* 3 transactions at most */
+		if (mult > 3)
+			goto done;
+		break;
+	default:
+		goto done;
+	}
+
+	spin_lock_irqsave(&dev->lock, flags);
+
+	/* configure endpoint capabilities in dQH */
+	ep->dqh->dqh_ios = ios;
+	ep->dqh->dqh_mpl = cpu_to_le16(max);
+	ep->dqh->dqh_zlt = zlt;
+	ep->dqh->dqh_mult = mult;
+
+	ep->ep.maxpacket = max;
+	ep->desc = desc;
+	ep->stopped = 0;
+	ep->ep_num = desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
+
+	/* ep_type */
+	ep->ep_type = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
+
+	/* configure endpoint control registers */
+	ep_reset(ep, ep->ep_num, is_in(ep), ep->ep_type);
+
+	DBG(dev, "enabled %s (ep%d%s-%s), max %04x\n",
+			_ep->name,
+			ep->ep_num,
+			DIR_STRING(desc->bEndpointAddress),
+			type_string(desc->bmAttributes),
+			max);
+
+	spin_unlock_irqrestore(&dev->lock, flags);
+done:
+	VDBG(dev, "<--- %s()\n", __func__);
+	return retval;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/* retire a request */
+static void done(struct langwell_ep *ep, struct langwell_request *req,
+		int status)
+{
+	struct langwell_udc	*dev = ep->dev;
+	unsigned		stopped = ep->stopped;
+	struct langwell_dtd	*curr_dtd, *next_dtd;
+	int			i;
+
+	VDBG(dev, "---> %s()\n", __func__);
+
+	/* remove the req from ep->queue */
+	list_del_init(&req->queue);
+
+	if (req->req.status == -EINPROGRESS)
+		req->req.status = status;
+	else
+		status = req->req.status;
+
+	/* free dTD for the request */
+	next_dtd = req->head;
+	for (i = 0; i < req->dtd_count; i++) {
+		curr_dtd = next_dtd;
+		if (i != req->dtd_count - 1)
+			next_dtd = curr_dtd->next_dtd_virt;
+		dma_pool_free(dev->dtd_pool, curr_dtd, curr_dtd->dtd_dma);
+	}
+
+	if (req->mapped) {
+		dma_unmap_single(&dev->pdev->dev, req->req.dma, req->req.length,
+			is_in(ep) ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
+		req->req.dma = DMA_ADDR_INVALID;
+		req->mapped = 0;
+	} else
+		dma_sync_single_for_cpu(&dev->pdev->dev, req->req.dma,
+				req->req.length,
+				is_in(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+
+	if (status != -ESHUTDOWN)
+		DBG(dev, "complete %s, req %p, stat %d, len %u/%u\n",
+			ep->ep.name, &req->req, status,
+			req->req.actual, req->req.length);
+
+	/* don't modify queue heads during completion callback */
+	ep->stopped = 1;
+
+	spin_unlock(&dev->lock);
+	/* complete routine from gadget driver */
+	if (req->req.complete)
+		req->req.complete(&ep->ep, &req->req);
+
+	spin_lock(&dev->lock);
+	ep->stopped = stopped;
+
+	VDBG(dev, "<--- %s()\n", __func__);
+}
+
+
+static void langwell_ep_fifo_flush(struct usb_ep *_ep);
+
+/* delete all endpoint requests, called with spinlock held */
+static void nuke(struct langwell_ep *ep, int status)
+{
+	/* called with spinlock held */
+	ep->stopped = 1;
+
+	/* endpoint fifo flush */
+	if (&ep->ep && ep->desc)
+		langwell_ep_fifo_flush(&ep->ep);
+
+	while (!list_empty(&ep->queue)) {
+		struct langwell_request	*req = NULL;
+		req = list_entry(ep->queue.next, struct langwell_request,
+				queue);
+		done(ep, req, status);
+	}
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/* endpoint is no longer usable */
+static int langwell_ep_disable(struct usb_ep *_ep)
+{
+	struct langwell_ep	*ep;
+	unsigned long		flags;
+	struct langwell_udc	*dev;
+	int			ep_num;
+	u32			endptctrl;
+
+	ep = container_of(_ep, struct langwell_ep, ep);
+	dev = ep->dev;
+	VDBG(dev, "---> %s()\n", __func__);
+
+	if (!_ep || !ep->desc)
+		return -EINVAL;
+
+	spin_lock_irqsave(&dev->lock, flags);
+
+	/* disable endpoint control register */
+	ep_num = ep->ep_num;
+	endptctrl = readl(&dev->op_regs->endptctrl[ep_num]);
+	if (is_in(ep))
+		endptctrl &= ~EPCTRL_TXE;
+	else
+		endptctrl &= ~EPCTRL_RXE;
+	writel(endptctrl, &dev->op_regs->endptctrl[ep_num]);
+
+	/* nuke all pending requests (does flush) */
+	nuke(ep, -ESHUTDOWN);
+
+	ep->desc = NULL;
+	ep->stopped = 1;
+
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	DBG(dev, "disabled %s\n", _ep->name);
+	VDBG(dev, "<--- %s()\n", __func__);
+
+	return 0;
+}
+
+
+/* allocate a request object to use with this endpoint */
+static struct usb_request *langwell_alloc_request(struct usb_ep *_ep,
+		gfp_t gfp_flags)
+{
+	struct langwell_ep	*ep;
+	struct langwell_udc	*dev;
+	struct langwell_request	*req = NULL;
+
+	if (!_ep)
+		return NULL;
+
+	ep = container_of(_ep, struct langwell_ep, ep);
+	dev = ep->dev;
+	VDBG(dev, "---> %s()\n", __func__);
+
+	req = kzalloc(sizeof(*req), gfp_flags);
+	if (!req)
+		return NULL;
+
+	req->req.dma = DMA_ADDR_INVALID;
+	INIT_LIST_HEAD(&req->queue);
+
+	VDBG(dev, "alloc request for %s\n", _ep->name);
+	VDBG(dev, "<--- %s()\n", __func__);
+	return &req->req;
+}
+
+
+/* free a request object */
+static void langwell_free_request(struct usb_ep *_ep,
+		struct usb_request *_req)
+{
+	struct langwell_ep	*ep;
+	struct langwell_udc	*dev;
+	struct langwell_request	*req = NULL;
+
+	ep = container_of(_ep, struct langwell_ep, ep);
+	dev = ep->dev;
+	VDBG(dev, "---> %s()\n", __func__);
+
+	if (!_ep || !_req)
+		return;
+
+	req = container_of(_req, struct langwell_request, req);
+	WARN_ON(!list_empty(&req->queue));
+
+	if (_req)
+		kfree(req);
+
+	VDBG(dev, "free request for %s\n", _ep->name);
+	VDBG(dev, "<--- %s()\n", __func__);
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/* queue dTD and PRIME endpoint */
+static int queue_dtd(struct langwell_ep *ep, struct langwell_request *req)
+{
+	u32			bit_mask, usbcmd, endptstat, dtd_dma;
+	u8			dtd_status;
+	int			i;
+	struct langwell_dqh	*dqh;
+	struct langwell_udc	*dev;
+
+	dev = ep->dev;
+	VDBG(dev, "---> %s()\n", __func__);
+
+	i = ep->ep_num * 2 + is_in(ep);
+	dqh = &dev->ep_dqh[i];
+
+	if (ep->ep_num)
+		VDBG(dev, "%s\n", ep->name);
+	else
+		/* ep0 */
+		VDBG(dev, "%s-%s\n", ep->name, is_in(ep) ? "in" : "out");
+
+	VDBG(dev, "ep_dqh[%d] addr: 0x%08x\n", i, (u32)&(dev->ep_dqh[i]));
+
+	bit_mask = is_in(ep) ?
+		(1 << (ep->ep_num + 16)) : (1 << (ep->ep_num));
+
+	VDBG(dev, "bit_mask = 0x%08x\n", bit_mask);
+
+	/* check if the pipe is empty */
+	if (!(list_empty(&ep->queue))) {
+		/* add dTD to the end of linked list */
+		struct langwell_request	*lastreq;
+		lastreq = list_entry(ep->queue.prev,
+				struct langwell_request, queue);
+
+		lastreq->tail->dtd_next =
+			cpu_to_le32(req->head->dtd_dma & DTD_NEXT_MASK);
+
+		/* read prime bit, if 1 goto out */
+		if (readl(&dev->op_regs->endptprime) & bit_mask)
+			goto out;
+
+		do {
+			/* set ATDTW bit in USBCMD */
+			usbcmd = readl(&dev->op_regs->usbcmd);
+			writel(usbcmd | CMD_ATDTW, &dev->op_regs->usbcmd);
+
+			/* read correct status bit */
+			endptstat = readl(&dev->op_regs->endptstat) & bit_mask;
+
+		} while (!(readl(&dev->op_regs->usbcmd) & CMD_ATDTW));
+
+		/* write ATDTW bit to 0 */
+		usbcmd = readl(&dev->op_regs->usbcmd);
+		writel(usbcmd & ~CMD_ATDTW, &dev->op_regs->usbcmd);
+
+		if (endptstat)
+			goto out;
+	}
+
+	/* write dQH next pointer and terminate bit to 0 */
+	dtd_dma = req->head->dtd_dma & DTD_NEXT_MASK;
+	dqh->dtd_next = cpu_to_le32(dtd_dma);
+
+	/* clear active and halt bit */
+	dtd_status = (u8) ~(DTD_STS_ACTIVE | DTD_STS_HALTED);
+	dqh->dtd_status &= dtd_status;
+	VDBG(dev, "dqh->dtd_status = 0x%x\n", dqh->dtd_status);
+
+	/* write 1 to endptprime register to PRIME endpoint */
+	bit_mask = is_in(ep) ? (1 << (ep->ep_num + 16)) : (1 << ep->ep_num);
+	VDBG(dev, "endprime bit_mask = 0x%08x\n", bit_mask);
+	writel(bit_mask, &dev->op_regs->endptprime);
+out:
+	VDBG(dev, "<--- %s()\n", __func__);
+	return 0;
+}
+
+
+/* fill in the dTD structure to build a transfer descriptor */
+static struct langwell_dtd *build_dtd(struct langwell_request *req,
+		unsigned *length, dma_addr_t *dma, int *is_last)
+{
+	u32			 buf_ptr;
+	struct langwell_dtd	*dtd;
+	struct langwell_udc	*dev;
+	int			i;
+
+	dev = req->ep->dev;
+	VDBG(dev, "---> %s()\n", __func__);
+
+	/* the maximum transfer length, up to 16k bytes */
+	*length = min(req->req.length - req->req.actual,
+			(unsigned)DTD_MAX_TRANSFER_LENGTH);
+
+	/* create dTD dma_pool resource */
+	dtd = dma_pool_alloc(dev->dtd_pool, GFP_KERNEL, dma);
+	if (dtd == NULL)
+		return dtd;
+	dtd->dtd_dma = *dma;
+
+	/* initialize buffer page pointers */
+	buf_ptr = (u32)(req->req.dma + req->req.actual);
+	for (i = 0; i < 5; i++)
+		dtd->dtd_buf[i] = cpu_to_le32(buf_ptr + i * PAGE_SIZE);
+
+	req->req.actual += *length;
+
+	/* fill in total bytes with transfer size */
+	dtd->dtd_total = cpu_to_le16(*length);
+	VDBG(dev, "dtd->dtd_total = %d\n", dtd->dtd_total);
+
+	/* set is_last flag if req->req.zero is set or not */
+	if (req->req.zero) {
+		if (*length == 0 || (*length % req->ep->ep.maxpacket) != 0)
+			*is_last = 1;
+		else
+			*is_last = 0;
+	} else if (req->req.length == req->req.actual) {
+		*is_last = 1;
+	} else
+		*is_last = 0;
+
+	if (*is_last == 0)
+		VDBG(dev, "multi-dtd request!\n");
+
+	/* set interrupt on complete bit for the last dTD */
+	if (*is_last && !req->req.no_interrupt)
+		dtd->dtd_ioc = 1;
+
+	/* set multiplier override 0 for non-ISO and non-TX endpoint */
+	dtd->dtd_multo = 0;
+
+	/* set the active bit of status field to 1 */
+	dtd->dtd_status = DTD_STS_ACTIVE;
+	VDBG(dev, "dtd->dtd_status = 0x%02x\n", dtd->dtd_status);
+
+	VDBG(dev, "length = %d, dma addr= 0x%08x\n", *length, (int)*dma);
+	VDBG(dev, "<--- %s()\n", __func__);
+	return dtd;
+}
+
+
+/* generate dTD linked list for a request */
+static int req_to_dtd(struct langwell_request *req)
+{
+	unsigned		count;
+	int			is_last, is_first = 1;
+	struct langwell_dtd	*dtd, *last_dtd = NULL;
+	struct langwell_udc	*dev;
+	dma_addr_t		dma;
+
+	dev = req->ep->dev;
+	VDBG(dev, "---> %s()\n", __func__);
+	do {
+		dtd = build_dtd(req, &count, &dma, &is_last);
+		if (dtd == NULL)
+			return -ENOMEM;
+
+		if (is_first) {
+			is_first = 0;
+			req->head = dtd;
+		} else {
+			last_dtd->dtd_next = cpu_to_le32(dma);
+			last_dtd->next_dtd_virt = dtd;
+		}
+		last_dtd = dtd;
+		req->dtd_count++;
+	} while (!is_last);
+
+	/* set terminate bit to 1 for the last dTD */
+	dtd->dtd_next = DTD_TERM;
+
+	req->tail = dtd;
+
+	VDBG(dev, "<--- %s()\n", __func__);
+	return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* queue (submits) an I/O requests to an endpoint */
+static int langwell_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
+		gfp_t gfp_flags)
+{
+	struct langwell_request	*req;
+	struct langwell_ep	*ep;
+	struct langwell_udc	*dev;
+	unsigned long		flags;
+	int			is_iso = 0, zlflag = 0;
+
+	/* always require a cpu-view buffer */
+	req = container_of(_req, struct langwell_request, req);
+	ep = container_of(_ep, struct langwell_ep, ep);
+
+	if (!_req || !_req->complete || !_req->buf
+			|| !list_empty(&req->queue)) {
+		return -EINVAL;
+	}
+
+	if (unlikely(!_ep || !ep->desc))
+		return -EINVAL;
+
+	dev = ep->dev;
+	req->ep = ep;
+	VDBG(dev, "---> %s()\n", __func__);
+
+	if (ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
+		if (req->req.length > ep->ep.maxpacket)
+			return -EMSGSIZE;
+		is_iso = 1;
+	}
+
+	if (unlikely(!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN))
+		return -ESHUTDOWN;
+
+	/* set up dma mapping in case the caller didn't */
+	if (_req->dma == DMA_ADDR_INVALID) {
+		/* WORKAROUND: WARN_ON(size == 0) */
+		if (_req->length == 0) {
+			VDBG(dev, "req->length: 0->1\n");
+			zlflag = 1;
+			_req->length++;
+		}
+
+		_req->dma = dma_map_single(&dev->pdev->dev,
+				_req->buf, _req->length,
+				is_in(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+		if (zlflag && (_req->length == 1)) {
+			VDBG(dev, "req->length: 1->0\n");
+			zlflag = 0;
+			_req->length = 0;
+		}
+
+		req->mapped = 1;
+		VDBG(dev, "req->mapped = 1\n");
+	} else {
+		dma_sync_single_for_device(&dev->pdev->dev,
+				_req->dma, _req->length,
+				is_in(ep) ?  DMA_TO_DEVICE : DMA_FROM_DEVICE);
+		req->mapped = 0;
+		VDBG(dev, "req->mapped = 0\n");
+	}
+
+	DBG(dev, "%s queue req %p, len %u, buf %p, dma 0x%08x\n",
+			_ep->name,
+			_req, _req->length, _req->buf, _req->dma);
+
+	_req->status = -EINPROGRESS;
+	_req->actual = 0;
+	req->dtd_count = 0;
+
+	spin_lock_irqsave(&dev->lock, flags);
+
+	/* build and put dTDs to endpoint queue */
+	if (!req_to_dtd(req)) {
+		queue_dtd(ep, req);
+	} else {
+		spin_unlock_irqrestore(&dev->lock, flags);
+		return -ENOMEM;
+	}
+
+	/* update ep0 state */
+	if (ep->ep_num == 0)
+		dev->ep0_state = DATA_STATE_XMIT;
+
+	if (likely(req != NULL)) {
+		list_add_tail(&req->queue, &ep->queue);
+		VDBG(dev, "list_add_tail() \n");
+	}
+
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	VDBG(dev, "<--- %s()\n", __func__);
+	return 0;
+}
+
+
+/* dequeue (cancels, unlinks) an I/O request from an endpoint */
+static int langwell_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
+{
+	struct langwell_ep	*ep;
+	struct langwell_udc	*dev;
+	struct langwell_request	*req;
+	unsigned long		flags;
+	int			stopped, ep_num, retval = 0;
+	u32			endptctrl;
+
+	ep = container_of(_ep, struct langwell_ep, ep);
+	dev = ep->dev;
+	VDBG(dev, "---> %s()\n", __func__);
+
+	if (!_ep || !ep->desc || !_req)
+		return -EINVAL;
+
+	if (!dev->driver)
+		return -ESHUTDOWN;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	stopped = ep->stopped;
+
+	/* quiesce dma while we patch the queue */
+	ep->stopped = 1;
+	ep_num = ep->ep_num;
+
+	/* disable endpoint control register */
+	endptctrl = readl(&dev->op_regs->endptctrl[ep_num]);
+	if (is_in(ep))
+		endptctrl &= ~EPCTRL_TXE;
+	else
+		endptctrl &= ~EPCTRL_RXE;
+	writel(endptctrl, &dev->op_regs->endptctrl[ep_num]);
+
+	/* make sure it's still queued on this endpoint */
+	list_for_each_entry(req, &ep->queue, queue) {
+		if (&req->req == _req)
+			break;
+	}
+
+	if (&req->req != _req) {
+		retval = -EINVAL;
+		goto done;
+	}
+
+	/* queue head may be partially complete. */
+	if (ep->queue.next == &req->queue) {
+		DBG(dev, "unlink (%s) dma\n", _ep->name);
+		_req->status = -ECONNRESET;
+		langwell_ep_fifo_flush(&ep->ep);
+
+		/* not the last request in endpoint queue */
+		if (likely(ep->queue.next == &req->queue)) {
+			struct langwell_dqh	*dqh;
+			struct langwell_request	*next_req;
+
+			dqh = ep->dqh;
+			next_req = list_entry(req->queue.next,
+					struct langwell_request, queue);
+
+			/* point the dQH to the first dTD of next request */
+			writel((u32) next_req->head, &dqh->dqh_current);
+		}
+	} else {
+		struct langwell_request	*prev_req;
+
+		prev_req = list_entry(req->queue.prev,
+				struct langwell_request, queue);
+		writel(readl(&req->tail->dtd_next),
+				&prev_req->tail->dtd_next);
+	}
+
+	done(ep, req, -ECONNRESET);
+
+done:
+	/* enable endpoint again */
+	endptctrl = readl(&dev->op_regs->endptctrl[ep_num]);
+	if (is_in(ep))
+		endptctrl |= EPCTRL_TXE;
+	else
+		endptctrl |= EPCTRL_RXE;
+	writel(endptctrl, &dev->op_regs->endptctrl[ep_num]);
+
+	ep->stopped = stopped;
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	VDBG(dev, "<--- %s()\n", __func__);
+	return retval;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/* endpoint set/clear halt */
+static void ep_set_halt(struct langwell_ep *ep, int value)
+{
+	u32			endptctrl = 0;
+	int			ep_num;
+	struct langwell_udc	*dev = ep->dev;
+	VDBG(dev, "---> %s()\n", __func__);
+
+	ep_num = ep->ep_num;
+	endptctrl = readl(&dev->op_regs->endptctrl[ep_num]);
+
+	/* value: 1 - set halt, 0 - clear halt */
+	if (value) {
+		/* set the stall bit */
+		if (is_in(ep))
+			endptctrl |= EPCTRL_TXS;
+		else
+			endptctrl |= EPCTRL_RXS;
+	} else {
+		/* clear the stall bit and reset data toggle */
+		if (is_in(ep)) {
+			endptctrl &= ~EPCTRL_TXS;
+			endptctrl |= EPCTRL_TXR;
+		} else {
+			endptctrl &= ~EPCTRL_RXS;
+			endptctrl |= EPCTRL_RXR;
+		}
+	}
+
+	writel(endptctrl, &dev->op_regs->endptctrl[ep_num]);
+
+	VDBG(dev, "<--- %s()\n", __func__);
+}
+
+
+/* set the endpoint halt feature */
+static int langwell_ep_set_halt(struct usb_ep *_ep, int value)
+{
+	struct langwell_ep	*ep;
+	struct langwell_udc	*dev;
+	unsigned long		flags;
+	int			retval = 0;
+
+	ep = container_of(_ep, struct langwell_ep, ep);
+	dev = ep->dev;
+
+	VDBG(dev, "---> %s()\n", __func__);
+
+	if (!_ep || !ep->desc)
+		return -EINVAL;
+
+	if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
+		return -ESHUTDOWN;
+
+	if (ep->desc && (ep->desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
+			== USB_ENDPOINT_XFER_ISOC)
+		return  -EOPNOTSUPP;
+
+	spin_lock_irqsave(&dev->lock, flags);
+
+	/*
+	 * attempt to halt IN ep will fail if any transfer requests
+	 * are still queue
+	 */
+	if (!list_empty(&ep->queue) && is_in(ep) && value) {
+		/* IN endpoint FIFO holds bytes */
+		DBG(dev, "%s FIFO holds bytes\n", _ep->name);
+		retval = -EAGAIN;
+		goto done;
+	}
+
+	/* endpoint set/clear halt */
+	if (ep->ep_num) {
+		ep_set_halt(ep, value);
+	} else { /* endpoint 0 */
+		dev->ep0_state = WAIT_FOR_SETUP;
+		dev->ep0_dir = USB_DIR_OUT;
+	}
+done:
+	spin_unlock_irqrestore(&dev->lock, flags);
+	DBG(dev, "%s %s halt\n", _ep->name, value ? "set" : "clear");
+	VDBG(dev, "<--- %s()\n", __func__);
+	return retval;
+}
+
+
+/* set the halt feature and ignores clear requests */
+static int langwell_ep_set_wedge(struct usb_ep *_ep)
+{
+	struct langwell_ep	*ep;
+	struct langwell_udc	*dev;
+
+	ep = container_of(_ep, struct langwell_ep, ep);
+	dev = ep->dev;
+
+	VDBG(dev, "---> %s()\n", __func__);
+
+	if (!_ep || !ep->desc)
+		return -EINVAL;
+
+	VDBG(dev, "<--- %s()\n", __func__);
+	return usb_ep_set_halt(_ep);
+}
+
+
+/* flush contents of a fifo */
+static void langwell_ep_fifo_flush(struct usb_ep *_ep)
+{
+	struct langwell_ep	*ep;
+	struct langwell_udc	*dev;
+	u32			flush_bit;
+	unsigned long		timeout;
+
+	ep = container_of(_ep, struct langwell_ep, ep);
+	dev = ep->dev;
+
+	VDBG(dev, "---> %s()\n", __func__);
+
+	if (!_ep || !ep->desc) {
+		VDBG(dev, "ep or ep->desc is NULL\n");
+		VDBG(dev, "<--- %s()\n", __func__);
+		return;
+	}
+
+	VDBG(dev, "%s-%s fifo flush\n", _ep->name, is_in(ep) ? "in" : "out");
+
+	/* flush endpoint buffer */
+	if (ep->ep_num == 0)
+		flush_bit = (1 << 16) | 1;
+	else if (is_in(ep))
+		flush_bit = 1 << (ep->ep_num + 16);	/* TX */
+	else
+		flush_bit = 1 << ep->ep_num;		/* RX */
+
+	/* wait until flush complete */
+	timeout = jiffies + FLUSH_TIMEOUT;
+	do {
+		writel(flush_bit, &dev->op_regs->endptflush);
+		while (readl(&dev->op_regs->endptflush)) {
+			if (time_after(jiffies, timeout)) {
+				ERROR(dev, "ep flush timeout\n");
+				goto done;
+			}
+			cpu_relax();
+		}
+	} while (readl(&dev->op_regs->endptstat) & flush_bit);
+done:
+	VDBG(dev, "<--- %s()\n", __func__);
+}
+
+
+/* endpoints operations structure */
+static const struct usb_ep_ops langwell_ep_ops = {
+
+	/* configure endpoint, making it usable */
+	.enable		= langwell_ep_enable,
+
+	/* endpoint is no longer usable */
+	.disable	= langwell_ep_disable,
+
+	/* allocate a request object to use with this endpoint */
+	.alloc_request	= langwell_alloc_request,
+
+	/* free a request object */
+	.free_request	= langwell_free_request,
+
+	/* queue (submits) an I/O requests to an endpoint */
+	.queue		= langwell_ep_queue,
+
+	/* dequeue (cancels, unlinks) an I/O request from an endpoint */
+	.dequeue	= langwell_ep_dequeue,
+
+	/* set the endpoint halt feature */
+	.set_halt	= langwell_ep_set_halt,
+
+	/* set the halt feature and ignores clear requests */
+	.set_wedge	= langwell_ep_set_wedge,
+
+	/* flush contents of a fifo */
+	.fifo_flush	= langwell_ep_fifo_flush,
+};
+
+
+/*-------------------------------------------------------------------------*/
+
+/* device controller usb_gadget_ops structure */
+
+/* returns the current frame number */
+static int langwell_get_frame(struct usb_gadget *_gadget)
+{
+	struct langwell_udc	*dev;
+	u16			retval;
+
+	if (!_gadget)
+		return -ENODEV;
+
+	dev = container_of(_gadget, struct langwell_udc, gadget);
+	VDBG(dev, "---> %s()\n", __func__);
+
+	retval = readl(&dev->op_regs->frindex) & FRINDEX_MASK;
+
+	VDBG(dev, "<--- %s()\n", __func__);
+	return retval;
+}
+
+
+/* tries to wake up the host connected to this gadget */
+static int langwell_wakeup(struct usb_gadget *_gadget)
+{
+	struct langwell_udc	*dev;
+	u32 			portsc1, devlc;
+	unsigned long   	flags;
+
+	if (!_gadget)
+		return 0;
+
+	dev = container_of(_gadget, struct langwell_udc, gadget);
+	VDBG(dev, "---> %s()\n", __func__);
+
+	/* Remote Wakeup feature not enabled by host */
+	if (!dev->remote_wakeup)
+		return -ENOTSUPP;
+
+	spin_lock_irqsave(&dev->lock, flags);
+
+	portsc1 = readl(&dev->op_regs->portsc1);
+	if (!(portsc1 & PORTS_SUSP)) {
+		spin_unlock_irqrestore(&dev->lock, flags);
+		return 0;
+	}
+
+	/* LPM L1 to L0, remote wakeup */
+	if (dev->lpm && dev->lpm_state == LPM_L1) {
+		portsc1 |= PORTS_SLP;
+		writel(portsc1, &dev->op_regs->portsc1);
+	}
+
+	/* force port resume */
+	if (dev->usb_state == USB_STATE_SUSPENDED) {
+		portsc1 |= PORTS_FPR;
+		writel(portsc1, &dev->op_regs->portsc1);
+	}
+
+	/* exit PHY low power suspend */
+	devlc = readl(&dev->op_regs->devlc);
+	VDBG(dev, "devlc = 0x%08x\n", devlc);
+	devlc &= ~LPM_PHCD;
+	writel(devlc, &dev->op_regs->devlc);
+
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	VDBG(dev, "<--- %s()\n", __func__);
+	return 0;
+}
+
+
+/* notify controller that VBUS is powered or not */
+static int langwell_vbus_session(struct usb_gadget *_gadget, int is_active)
+{
+	struct langwell_udc	*dev;
+	unsigned long		flags;
+	u32             	usbcmd;
+
+	if (!_gadget)
+		return -ENODEV;
+
+	dev = container_of(_gadget, struct langwell_udc, gadget);
+	VDBG(dev, "---> %s()\n", __func__);
+
+	spin_lock_irqsave(&dev->lock, flags);
+	VDBG(dev, "VBUS status: %s\n", is_active ? "on" : "off");
+
+	dev->vbus_active = (is_active != 0);
+	if (dev->driver && dev->softconnected && dev->vbus_active) {
+		usbcmd = readl(&dev->op_regs->usbcmd);
+		usbcmd |= CMD_RUNSTOP;
+		writel(usbcmd, &dev->op_regs->usbcmd);
+	} else {
+		usbcmd = readl(&dev->op_regs->usbcmd);
+		usbcmd &= ~CMD_RUNSTOP;
+		writel(usbcmd, &dev->op_regs->usbcmd);
+	}
+
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	VDBG(dev, "<--- %s()\n", __func__);
+	return 0;
+}
+
+
+/* constrain controller's VBUS power usage */
+static int langwell_vbus_draw(struct usb_gadget *_gadget, unsigned mA)
+{
+	struct langwell_udc	*dev;
+
+	if (!_gadget)
+		return -ENODEV;
+
+	dev = container_of(_gadget, struct langwell_udc, gadget);
+	VDBG(dev, "---> %s()\n", __func__);
+
+	if (dev->transceiver) {
+		VDBG(dev, "otg_set_power\n");
+		VDBG(dev, "<--- %s()\n", __func__);
+		return otg_set_power(dev->transceiver, mA);
+	}
+
+	VDBG(dev, "<--- %s()\n", __func__);
+	return -ENOTSUPP;
+}
+
+
+/* D+ pullup, software-controlled connect/disconnect to USB host */
+static int langwell_pullup(struct usb_gadget *_gadget, int is_on)
+{
+	struct langwell_udc	*dev;
+	u32             	usbcmd;
+	unsigned long   	flags;
+
+	if (!_gadget)
+		return -ENODEV;
+
+	dev = container_of(_gadget, struct langwell_udc, gadget);
+
+	VDBG(dev, "---> %s()\n", __func__);
+
+	spin_lock_irqsave(&dev->lock, flags);
+	dev->softconnected = (is_on != 0);
+
+	if (dev->driver && dev->softconnected && dev->vbus_active) {
+		usbcmd = readl(&dev->op_regs->usbcmd);
+		usbcmd |= CMD_RUNSTOP;
+		writel(usbcmd, &dev->op_regs->usbcmd);
+	} else {
+		usbcmd = readl(&dev->op_regs->usbcmd);
+		usbcmd &= ~CMD_RUNSTOP;
+		writel(usbcmd, &dev->op_regs->usbcmd);
+	}
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	VDBG(dev, "<--- %s()\n", __func__);
+	return 0;
+}
+
+
+/* device controller usb_gadget_ops structure */
+static const struct usb_gadget_ops langwell_ops = {
+
+	/* returns the current frame number */
+	.get_frame	= langwell_get_frame,
+
+	/* tries to wake up the host connected to this gadget */
+	.wakeup		= langwell_wakeup,
+
+	/* set the device selfpowered feature, always selfpowered */
+	/* .set_selfpowered = langwell_set_selfpowered, */
+
+	/* notify controller that VBUS is powered or not */
+	.vbus_session	= langwell_vbus_session,
+
+	/* constrain controller's VBUS power usage */
+	.vbus_draw	= langwell_vbus_draw,
+
+	/* D+ pullup, software-controlled connect/disconnect to USB host */
+	.pullup		= langwell_pullup,
+};
+
+
+/*-------------------------------------------------------------------------*/
+
+/* device controller operations */
+
+/* reset device controller */
+static int langwell_udc_reset(struct langwell_udc *dev)
+{
+	u32		usbcmd, usbmode, devlc, endpointlistaddr;
+	unsigned long	timeout;
+
+	if (!dev)
+		return -EINVAL;
+
+	DBG(dev, "---> %s()\n", __func__);
+
+	/* set controller to stop state */
+	usbcmd = readl(&dev->op_regs->usbcmd);
+	usbcmd &= ~CMD_RUNSTOP;
+	writel(usbcmd, &dev->op_regs->usbcmd);
+
+	/* reset device controller */
+	usbcmd = readl(&dev->op_regs->usbcmd);
+	usbcmd |= CMD_RST;
+	writel(usbcmd, &dev->op_regs->usbcmd);
+
+	/* wait for reset to complete */
+	timeout = jiffies + RESET_TIMEOUT;
+	while (readl(&dev->op_regs->usbcmd) & CMD_RST) {
+		if (time_after(jiffies, timeout)) {
+			ERROR(dev, "device reset timeout\n");
+			return -ETIMEDOUT;
+		}
+		cpu_relax();
+	}
+
+	/* set controller to device mode */
+	usbmode = readl(&dev->op_regs->usbmode);
+	usbmode |= MODE_DEVICE;
+
+	/* turn setup lockout off, require setup tripwire in usbcmd */
+	usbmode |= MODE_SLOM;
+
+	writel(usbmode, &dev->op_regs->usbmode);
+	usbmode = readl(&dev->op_regs->usbmode);
+	VDBG(dev, "usbmode=0x%08x\n", usbmode);
+
+	/* Write-Clear setup status */
+	writel(0, &dev->op_regs->usbsts);
+
+	/* if support USB LPM, ACK all LPM token */
+	if (dev->lpm) {
+		devlc = readl(&dev->op_regs->devlc);
+		devlc &= ~LPM_STL;	/* don't STALL LPM token */
+		devlc &= ~LPM_NYT_ACK;	/* ACK LPM token */
+		writel(devlc, &dev->op_regs->devlc);
+	}
+
+	/* fill endpointlistaddr register */
+	endpointlistaddr = dev->ep_dqh_dma;
+	endpointlistaddr &= ENDPOINTLISTADDR_MASK;
+	writel(endpointlistaddr, &dev->op_regs->endpointlistaddr);
+
+	VDBG(dev, "dQH base (vir: %p, phy: 0x%08x), endpointlistaddr=0x%08x\n",
+			dev->ep_dqh, endpointlistaddr,
+			readl(&dev->op_regs->endpointlistaddr));
+	DBG(dev, "<--- %s()\n", __func__);
+	return 0;
+}
+
+
+/* reinitialize device controller endpoints */
+static int eps_reinit(struct langwell_udc *dev)
+{
+	struct langwell_ep	*ep;
+	char			name[14];
+	int			i;
+
+	VDBG(dev, "---> %s()\n", __func__);
+
+	/* initialize ep0 */
+	ep = &dev->ep[0];
+	ep->dev = dev;
+	strncpy(ep->name, "ep0", sizeof(ep->name));
+	ep->ep.name = ep->name;
+	ep->ep.ops = &langwell_ep_ops;
+	ep->stopped = 0;
+	ep->ep.maxpacket = EP0_MAX_PKT_SIZE;
+	ep->ep_num = 0;
+	ep->desc = &langwell_ep0_desc;
+	INIT_LIST_HEAD(&ep->queue);
+
+	ep->ep_type = USB_ENDPOINT_XFER_CONTROL;
+
+	/* initialize other endpoints */
+	for (i = 2; i < dev->ep_max; i++) {
+		ep = &dev->ep[i];
+		if (i % 2)
+			snprintf(name, sizeof(name), "ep%din", i / 2);
+		else
+			snprintf(name, sizeof(name), "ep%dout", i / 2);
+		ep->dev = dev;
+		strncpy(ep->name, name, sizeof(ep->name));
+		ep->ep.name = ep->name;
+
+		ep->ep.ops = &langwell_ep_ops;
+		ep->stopped = 0;
+		ep->ep.maxpacket = (unsigned short) ~0;
+		ep->ep_num = i / 2;
+
+		INIT_LIST_HEAD(&ep->queue);
+		list_add_tail(&ep->ep.ep_list, &dev->gadget.ep_list);
+
+		ep->dqh = &dev->ep_dqh[i];
+	}
+
+	VDBG(dev, "<--- %s()\n", __func__);
+	return 0;
+}
+
+
+/* enable interrupt and set controller to run state */
+static void langwell_udc_start(struct langwell_udc *dev)
+{
+	u32	usbintr, usbcmd;
+	DBG(dev, "---> %s()\n", __func__);
+
+	/* enable interrupts */
+	usbintr = INTR_ULPIE	/* ULPI */
+		| INTR_SLE	/* suspend */
+		/* | INTR_SRE	SOF received */
+		| INTR_URE	/* USB reset */
+		| INTR_AAE	/* async advance */
+		| INTR_SEE	/* system error */
+		| INTR_FRE	/* frame list rollover */
+		| INTR_PCE	/* port change detect */
+		| INTR_UEE	/* USB error interrupt */
+		| INTR_UE;	/* USB interrupt */
+	writel(usbintr, &dev->op_regs->usbintr);
+
+	/* clear stopped bit */
+	dev->stopped = 0;
+
+	/* set controller to run */
+	usbcmd = readl(&dev->op_regs->usbcmd);
+	usbcmd |= CMD_RUNSTOP;
+	writel(usbcmd, &dev->op_regs->usbcmd);
+
+	DBG(dev, "<--- %s()\n", __func__);
+	return;
+}
+
+
+/* disable interrupt and set controller to stop state */
+static void langwell_udc_stop(struct langwell_udc *dev)
+{
+	u32	usbcmd;
+
+	DBG(dev, "---> %s()\n", __func__);
+
+	/* disable all interrupts */
+	writel(0, &dev->op_regs->usbintr);
+
+	/* set stopped bit */
+	dev->stopped = 1;
+
+	/* set controller to stop state */
+	usbcmd = readl(&dev->op_regs->usbcmd);
+	usbcmd &= ~CMD_RUNSTOP;
+	writel(usbcmd, &dev->op_regs->usbcmd);
+
+	DBG(dev, "<--- %s()\n", __func__);
+	return;
+}
+
+
+/* stop all USB activities */
+static void stop_activity(struct langwell_udc *dev,
+		struct usb_gadget_driver *driver)
+{
+	struct langwell_ep	*ep;
+	DBG(dev, "---> %s()\n", __func__);
+
+	nuke(&dev->ep[0], -ESHUTDOWN);
+
+	list_for_each_entry(ep, &dev->gadget.ep_list, ep.ep_list) {
+		nuke(ep, -ESHUTDOWN);
+	}
+
+	/* report disconnect; the driver is already quiesced */
+	if (driver) {
+		spin_unlock(&dev->lock);
+		driver->disconnect(&dev->gadget);
+		spin_lock(&dev->lock);
+	}
+
+	DBG(dev, "<--- %s()\n", __func__);
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/* device "function" sysfs attribute file */
+static ssize_t show_function(struct device *_dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct langwell_udc	*dev = the_controller;
+
+	if (!dev->driver || !dev->driver->function
+			|| strlen(dev->driver->function) > PAGE_SIZE)
+		return 0;
+
+	return scnprintf(buf, PAGE_SIZE, "%s\n", dev->driver->function);
+}
+static DEVICE_ATTR(function, S_IRUGO, show_function, NULL);
+
+
+/* device "langwell_udc" sysfs attribute file */
+static ssize_t show_langwell_udc(struct device *_dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct langwell_udc	*dev = the_controller;
+	struct langwell_request *req;
+	struct langwell_ep	*ep = NULL;
+	char			*next;
+	unsigned		size;
+	unsigned		t;
+	unsigned		i;
+	unsigned long		flags;
+	u32			tmp_reg;
+
+	next = buf;
+	size = PAGE_SIZE;
+	spin_lock_irqsave(&dev->lock, flags);
+
+	/* driver basic information */
+	t = scnprintf(next, size,
+			DRIVER_DESC "\n"
+			"%s version: %s\n"
+			"Gadget driver: %s\n\n",
+			driver_name, DRIVER_VERSION,
+			dev->driver ? dev->driver->driver.name : "(none)");
+	size -= t;
+	next += t;
+
+	/* device registers */
+	tmp_reg = readl(&dev->op_regs->usbcmd);
+	t = scnprintf(next, size,
+			"USBCMD reg:\n"
+			"SetupTW: %d\n"
+			"Run/Stop: %s\n\n",
+			(tmp_reg & CMD_SUTW) ? 1 : 0,
+			(tmp_reg & CMD_RUNSTOP) ? "Run" : "Stop");
+	size -= t;
+	next += t;
+
+	tmp_reg = readl(&dev->op_regs->usbsts);
+	t = scnprintf(next, size,
+			"USB Status Reg:\n"
+			"Device Suspend: %d\n"
+			"Reset Received: %d\n"
+			"System Error: %s\n"
+			"USB Error Interrupt: %s\n\n",
+			(tmp_reg & STS_SLI) ? 1 : 0,
+			(tmp_reg & STS_URI) ? 1 : 0,
+			(tmp_reg & STS_SEI) ? "Error" : "No error",
+			(tmp_reg & STS_UEI) ? "Error detected" : "No error");
+	size -= t;
+	next += t;
+
+	tmp_reg = readl(&dev->op_regs->usbintr);
+	t = scnprintf(next, size,
+			"USB Intrrupt Enable Reg:\n"
+			"Sleep Enable: %d\n"
+			"SOF Received Enable: %d\n"
+			"Reset Enable: %d\n"
+			"System Error Enable: %d\n"
+			"Port Change Dectected Enable: %d\n"
+			"USB Error Intr Enable: %d\n"
+			"USB Intr Enable: %d\n\n",
+			(tmp_reg & INTR_SLE) ? 1 : 0,
+			(tmp_reg & INTR_SRE) ? 1 : 0,
+			(tmp_reg & INTR_URE) ? 1 : 0,
+			(tmp_reg & INTR_SEE) ? 1 : 0,
+			(tmp_reg & INTR_PCE) ? 1 : 0,
+			(tmp_reg & INTR_UEE) ? 1 : 0,
+			(tmp_reg & INTR_UE) ? 1 : 0);
+	size -= t;
+	next += t;
+
+	tmp_reg = readl(&dev->op_regs->frindex);
+	t = scnprintf(next, size,
+			"USB Frame Index Reg:\n"
+			"Frame Number is 0x%08x\n\n",
+			(tmp_reg & FRINDEX_MASK));
+	size -= t;
+	next += t;
+
+	tmp_reg = readl(&dev->op_regs->deviceaddr);
+	t = scnprintf(next, size,
+			"USB Device Address Reg:\n"
+			"Device Addr is 0x%x\n\n",
+			USBADR(tmp_reg));
+	size -= t;
+	next += t;
+
+	tmp_reg = readl(&dev->op_regs->endpointlistaddr);
+	t = scnprintf(next, size,
+			"USB Endpoint List Address Reg:\n"
+			"Endpoint List Pointer is 0x%x\n\n",
+			EPBASE(tmp_reg));
+	size -= t;
+	next += t;
+
+	tmp_reg = readl(&dev->op_regs->portsc1);
+	t = scnprintf(next, size,
+		"USB Port Status & Control Reg:\n"
+		"Port Reset: %s\n"
+		"Port Suspend Mode: %s\n"
+		"Over-current Change: %s\n"
+		"Port Enable/Disable Change: %s\n"
+		"Port Enabled/Disabled: %s\n"
+		"Current Connect Status: %s\n\n",
+		(tmp_reg & PORTS_PR) ? "Reset" : "Not Reset",
+		(tmp_reg & PORTS_SUSP) ? "Suspend " : "Not Suspend",
+		(tmp_reg & PORTS_OCC) ? "Detected" : "No",
+		(tmp_reg & PORTS_PEC) ? "Changed" : "Not Changed",
+		(tmp_reg & PORTS_PE) ? "Enable" : "Not Correct",
+		(tmp_reg & PORTS_CCS) ?  "Attached" : "Not Attached");
+	size -= t;
+	next += t;
+
+	tmp_reg = readl(&dev->op_regs->devlc);
+	t = scnprintf(next, size,
+		"Device LPM Control Reg:\n"
+		"Parallel Transceiver : %d\n"
+		"Serial Transceiver : %d\n"
+		"Port Speed: %s\n"
+		"Port Force Full Speed Connenct: %s\n"
+		"PHY Low Power Suspend Clock Disable: %s\n"
+		"BmAttributes: %d\n\n",
+		LPM_PTS(tmp_reg),
+		(tmp_reg & LPM_STS) ? 1 : 0,
+		({
+			char	*s;
+			switch (LPM_PSPD(tmp_reg)) {
+			case LPM_SPEED_FULL:
+				s = "Full Speed"; break;
+			case LPM_SPEED_LOW:
+				s = "Low Speed"; break;
+			case LPM_SPEED_HIGH:
+				s = "High Speed"; break;
+			default:
+				s = "Unknown Speed"; break;
+			}
+			s;
+		}),
+		(tmp_reg & LPM_PFSC) ? "Force Full Speed" : "Not Force",
+		(tmp_reg & LPM_PHCD) ? "Disabled" : "Enabled",
+		LPM_BA(tmp_reg));
+	size -= t;
+	next += t;
+
+	tmp_reg = readl(&dev->op_regs->usbmode);
+	t = scnprintf(next, size,
+			"USB Mode Reg:\n"
+			"Controller Mode is : %s\n\n", ({
+				char *s;
+				switch (MODE_CM(tmp_reg)) {
+				case MODE_IDLE:
+					s = "Idle"; break;
+				case MODE_DEVICE:
+					s = "Device Controller"; break;
+				case MODE_HOST:
+					s = "Host Controller"; break;
+				default:
+					s = "None"; break;
+				}
+				s;
+			}));
+	size -= t;
+	next += t;
+
+	tmp_reg = readl(&dev->op_regs->endptsetupstat);
+	t = scnprintf(next, size,
+			"Endpoint Setup Status Reg:\n"
+			"SETUP on ep 0x%04x\n\n",
+			tmp_reg & SETUPSTAT_MASK);
+	size -= t;
+	next += t;
+
+	for (i = 0; i < dev->ep_max / 2; i++) {
+		tmp_reg = readl(&dev->op_regs->endptctrl[i]);
+		t = scnprintf(next, size, "EP Ctrl Reg [%d]: 0x%08x\n",
+				i, tmp_reg);
+		size -= t;
+		next += t;
+	}
+	tmp_reg = readl(&dev->op_regs->endptprime);
+	t = scnprintf(next, size, "EP Prime Reg: 0x%08x\n\n", tmp_reg);
+	size -= t;
+	next += t;
+
+	/* langwell_udc, langwell_ep, langwell_request structure information */
+	ep = &dev->ep[0];
+	t = scnprintf(next, size, "%s MaxPacketSize: 0x%x, ep_num: %d\n",
+			ep->ep.name, ep->ep.maxpacket, ep->ep_num);
+	size -= t;
+	next += t;
+
+	if (list_empty(&ep->queue)) {
+		t = scnprintf(next, size, "its req queue is empty\n\n");
+		size -= t;
+		next += t;
+	} else {
+		list_for_each_entry(req, &ep->queue, queue) {
+			t = scnprintf(next, size,
+				"req %p actual 0x%x length 0x%x  buf %p\n",
+				&req->req, req->req.actual,
+				req->req.length, req->req.buf);
+			size -= t;
+			next += t;
+		}
+	}
+	/* other gadget->eplist ep */
+	list_for_each_entry(ep, &dev->gadget.ep_list, ep.ep_list) {
+		if (ep->desc) {
+			t = scnprintf(next, size,
+					"\n%s MaxPacketSize: 0x%x, "
+					"ep_num: %d\n",
+					ep->ep.name, ep->ep.maxpacket,
+					ep->ep_num);
+			size -= t;
+			next += t;
+
+			if (list_empty(&ep->queue)) {
+				t = scnprintf(next, size,
+						"its req queue is empty\n\n");
+				size -= t;
+				next += t;
+			} else {
+				list_for_each_entry(req, &ep->queue, queue) {
+					t = scnprintf(next, size,
+						"req %p actual 0x%x length "
+						"0x%x  buf %p\n",
+						&req->req, req->req.actual,
+						req->req.length, req->req.buf);
+					size -= t;
+					next += t;
+				}
+			}
+		}
+	}
+
+	spin_unlock_irqrestore(&dev->lock, flags);
+	return PAGE_SIZE - size;
+}
+static DEVICE_ATTR(langwell_udc, S_IRUGO, show_langwell_udc, NULL);
+
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * when a driver is successfully registered, it will receive
+ * control requests including set_configuration(), which enables
+ * non-control requests.  then usb traffic follows until a
+ * disconnect is reported.  then a host may connect again, or
+ * the driver might get unbound.
+ */
+
+int usb_gadget_register_driver(struct usb_gadget_driver *driver)
+{
+	struct langwell_udc	*dev = the_controller;
+	unsigned long		flags;
+	int			retval;
+
+	if (!dev)
+		return -ENODEV;
+
+	DBG(dev, "---> %s()\n", __func__);
+
+	if (dev->driver)
+		return -EBUSY;
+
+	spin_lock_irqsave(&dev->lock, flags);
+
+	/* hook up the driver ... */
+	driver->driver.bus = NULL;
+	dev->driver = driver;
+	dev->gadget.dev.driver = &driver->driver;
+
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	retval = driver->bind(&dev->gadget);
+	if (retval) {
+		DBG(dev, "bind to driver %s --> %d\n",
+				driver->driver.name, retval);
+		dev->driver = NULL;
+		dev->gadget.dev.driver = NULL;
+		return retval;
+	}
+
+	retval = device_create_file(&dev->pdev->dev, &dev_attr_function);
+	if (retval)
+		goto err_unbind;
+
+	dev->usb_state = USB_STATE_ATTACHED;
+	dev->ep0_state = WAIT_FOR_SETUP;
+	dev->ep0_dir = USB_DIR_OUT;
+
+	/* enable interrupt and set controller to run state */
+	if (dev->got_irq)
+		langwell_udc_start(dev);
+
+	VDBG(dev, "After langwell_udc_start(), print all registers:\n");
+#ifdef	VERBOSE
+	print_all_registers(dev);
+#endif
+
+	INFO(dev, "register driver: %s\n", driver->driver.name);
+	VDBG(dev, "<--- %s()\n", __func__);
+	return 0;
+
+err_unbind:
+	driver->unbind(&dev->gadget);
+	dev->gadget.dev.driver = NULL;
+	dev->driver = NULL;
+
+	DBG(dev, "<--- %s()\n", __func__);
+	return retval;
+}
+EXPORT_SYMBOL(usb_gadget_register_driver);
+
+
+/* unregister gadget driver */
+int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
+{
+	struct langwell_udc	*dev = the_controller;
+	unsigned long		flags;
+
+	if (!dev)
+		return -ENODEV;
+
+	DBG(dev, "---> %s()\n", __func__);
+
+	if (unlikely(!driver || !driver->bind || !driver->unbind))
+		return -EINVAL;
+
+	/* unbind OTG transceiver */
+	if (dev->transceiver)
+		(void)otg_set_peripheral(dev->transceiver, 0);
+
+	/* disable interrupt and set controller to stop state */
+	langwell_udc_stop(dev);
+
+	dev->usb_state = USB_STATE_ATTACHED;
+	dev->ep0_state = WAIT_FOR_SETUP;
+	dev->ep0_dir = USB_DIR_OUT;
+
+	spin_lock_irqsave(&dev->lock, flags);
+
+	/* stop all usb activities */
+	dev->gadget.speed = USB_SPEED_UNKNOWN;
+	stop_activity(dev, driver);
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	/* unbind gadget driver */
+	driver->unbind(&dev->gadget);
+	dev->gadget.dev.driver = NULL;
+	dev->driver = NULL;
+
+	device_remove_file(&dev->pdev->dev, &dev_attr_function);
+
+	INFO(dev, "unregistered driver '%s'\n", driver->driver.name);
+	DBG(dev, "<--- %s()\n", __func__);
+	return 0;
+}
+EXPORT_SYMBOL(usb_gadget_unregister_driver);
+
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * setup tripwire is used as a semaphore to ensure that the setup data
+ * payload is extracted from a dQH without being corrupted
+ */
+static void setup_tripwire(struct langwell_udc *dev)
+{
+	u32			usbcmd,
+				endptsetupstat;
+	unsigned long		timeout;
+	struct langwell_dqh	*dqh;
+
+	VDBG(dev, "---> %s()\n", __func__);
+
+	/* ep0 OUT dQH */
+	dqh = &dev->ep_dqh[EP_DIR_OUT];
+
+	/* Write-Clear endptsetupstat */
+	endptsetupstat = readl(&dev->op_regs->endptsetupstat);
+	writel(endptsetupstat, &dev->op_regs->endptsetupstat);
+
+	/* wait until endptsetupstat is cleared */
+	timeout = jiffies + SETUPSTAT_TIMEOUT;
+	while (readl(&dev->op_regs->endptsetupstat)) {
+		if (time_after(jiffies, timeout)) {
+			ERROR(dev, "setup_tripwire timeout\n");
+			break;
+		}
+		cpu_relax();
+	}
+
+	/* while a hazard exists when setup packet arrives */
+	do {
+		/* set setup tripwire bit */
+		usbcmd = readl(&dev->op_regs->usbcmd);
+		writel(usbcmd | CMD_SUTW, &dev->op_regs->usbcmd);
+
+		/* copy the setup packet to local buffer */
+		memcpy(&dev->local_setup_buff, &dqh->dqh_setup, 8);
+	} while (!(readl(&dev->op_regs->usbcmd) & CMD_SUTW));
+
+	/* Write-Clear setup tripwire bit */
+	usbcmd = readl(&dev->op_regs->usbcmd);
+	writel(usbcmd & ~CMD_SUTW, &dev->op_regs->usbcmd);
+
+	VDBG(dev, "<--- %s()\n", __func__);
+}
+
+
+/* protocol ep0 stall, will automatically be cleared on new transaction */
+static void ep0_stall(struct langwell_udc *dev)
+{
+	u32	endptctrl;
+
+	VDBG(dev, "---> %s()\n", __func__);
+
+	/* set TX and RX to stall */
+	endptctrl = readl(&dev->op_regs->endptctrl[0]);
+	endptctrl |= EPCTRL_TXS | EPCTRL_RXS;
+	writel(endptctrl, &dev->op_regs->endptctrl[0]);
+
+	/* update ep0 state */
+	dev->ep0_state = WAIT_FOR_SETUP;
+	dev->ep0_dir = USB_DIR_OUT;
+
+	VDBG(dev, "<--- %s()\n", __func__);
+}
+
+
+/* PRIME a status phase for ep0 */
+static int prime_status_phase(struct langwell_udc *dev, int dir)
+{
+	struct langwell_request	*req;
+	struct langwell_ep	*ep;
+	int			status = 0;
+
+	VDBG(dev, "---> %s()\n", __func__);
+
+	if (dir == EP_DIR_IN)
+		dev->ep0_dir = USB_DIR_IN;
+	else
+		dev->ep0_dir = USB_DIR_OUT;
+
+	ep = &dev->ep[0];
+	dev->ep0_state = WAIT_FOR_OUT_STATUS;
+
+	req = dev->status_req;
+
+	req->ep = ep;
+	req->req.length = 0;
+	req->req.status = -EINPROGRESS;
+	req->req.actual = 0;
+	req->req.complete = NULL;
+	req->dtd_count = 0;
+
+	if (!req_to_dtd(req))
+		status = queue_dtd(ep, req);
+	else
+		return -ENOMEM;
+
+	if (status)
+		ERROR(dev, "can't queue ep0 status request\n");
+
+	list_add_tail(&req->queue, &ep->queue);
+
+	VDBG(dev, "<--- %s()\n", __func__);
+	return status;
+}
+
+
+/* SET_ADDRESS request routine */
+static void set_address(struct langwell_udc *dev, u16 value,
+		u16 index, u16 length)
+{
+	VDBG(dev, "---> %s()\n", __func__);
+
+	/* save the new address to device struct */
+	dev->dev_addr = (u8) value;
+	VDBG(dev, "dev->dev_addr = %d\n", dev->dev_addr);
+
+	/* update usb state */
+	dev->usb_state = USB_STATE_ADDRESS;
+
+	/* STATUS phase */
+	if (prime_status_phase(dev, EP_DIR_IN))
+		ep0_stall(dev);
+
+	VDBG(dev, "<--- %s()\n", __func__);
+}
+
+
+/* return endpoint by windex */
+static struct langwell_ep *get_ep_by_windex(struct langwell_udc *dev,
+		u16 wIndex)
+{
+	struct langwell_ep		*ep;
+	VDBG(dev, "---> %s()\n", __func__);
+
+	if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0)
+		return &dev->ep[0];
+
+	list_for_each_entry(ep, &dev->gadget.ep_list, ep.ep_list) {
+		u8	bEndpointAddress;
+		if (!ep->desc)
+			continue;
+
+		bEndpointAddress = ep->desc->bEndpointAddress;
+		if ((wIndex ^ bEndpointAddress) & USB_DIR_IN)
+			continue;
+
+		if ((wIndex & USB_ENDPOINT_NUMBER_MASK)
+			== (bEndpointAddress & USB_ENDPOINT_NUMBER_MASK))
+			return ep;
+	}
+
+	VDBG(dev, "<--- %s()\n", __func__);
+	return NULL;
+}
+
+
+/* return whether endpoint is stalled, 0: not stalled; 1: stalled */
+static int ep_is_stall(struct langwell_ep *ep)
+{
+	struct langwell_udc	*dev = ep->dev;
+	u32			endptctrl;
+	int			retval;
+
+	VDBG(dev, "---> %s()\n", __func__);
+
+	endptctrl = readl(&dev->op_regs->endptctrl[ep->ep_num]);
+	if (is_in(ep))
+		retval = endptctrl & EPCTRL_TXS ? 1 : 0;
+	else
+		retval = endptctrl & EPCTRL_RXS ? 1 : 0;
+
+	VDBG(dev, "<--- %s()\n", __func__);
+	return retval;
+}
+
+
+/* GET_STATUS request routine */
+static void get_status(struct langwell_udc *dev, u8 request_type, u16 value,
+		u16 index, u16 length)
+{
+	struct langwell_request	*req;
+	struct langwell_ep	*ep;
+	u16	status_data = 0;	/* 16 bits cpu view status data */
+	int	status = 0;
+
+	VDBG(dev, "---> %s()\n", __func__);
+
+	ep = &dev->ep[0];
+
+	if ((request_type & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
+		/* get device status */
+		status_data = 1 << USB_DEVICE_SELF_POWERED;
+		status_data |= dev->remote_wakeup << USB_DEVICE_REMOTE_WAKEUP;
+	} else if ((request_type & USB_RECIP_MASK) == USB_RECIP_INTERFACE) {
+		/* get interface status */
+		status_data = 0;
+	} else if ((request_type & USB_RECIP_MASK) == USB_RECIP_ENDPOINT) {
+		/* get endpoint status */
+		struct langwell_ep	*epn;
+		epn = get_ep_by_windex(dev, index);
+		/* stall if endpoint doesn't exist */
+		if (!epn)
+			goto stall;
+
+		status_data = ep_is_stall(epn) << USB_ENDPOINT_HALT;
+	}
+
+	dev->ep0_dir = USB_DIR_IN;
+
+	/* borrow the per device status_req */
+	req = dev->status_req;
+
+	/* fill in the reqest structure */
+	*((u16 *) req->req.buf) = cpu_to_le16(status_data);
+	req->ep = ep;
+	req->req.length = 2;
+	req->req.status = -EINPROGRESS;
+	req->req.actual = 0;
+	req->req.complete = NULL;
+	req->dtd_count = 0;
+
+	/* prime the data phase */
+	if (!req_to_dtd(req))
+		status = queue_dtd(ep, req);
+	else			/* no mem */
+		goto stall;
+
+	if (status) {
+		ERROR(dev, "response error on GET_STATUS request\n");
+		goto stall;
+	}
+
+	list_add_tail(&req->queue, &ep->queue);
+	dev->ep0_state = DATA_STATE_XMIT;
+
+	VDBG(dev, "<--- %s()\n", __func__);
+	return;
+stall:
+	ep0_stall(dev);
+	VDBG(dev, "<--- %s()\n", __func__);
+}
+
+
+/* setup packet interrupt handler */
+static void handle_setup_packet(struct langwell_udc *dev,
+		struct usb_ctrlrequest *setup)
+{
+	u16	wValue = le16_to_cpu(setup->wValue);
+	u16	wIndex = le16_to_cpu(setup->wIndex);
+	u16	wLength = le16_to_cpu(setup->wLength);
+
+	VDBG(dev, "---> %s()\n", __func__);
+
+	/* ep0 fifo flush */
+	nuke(&dev->ep[0], -ESHUTDOWN);
+
+	DBG(dev, "SETUP %02x.%02x v%04x i%04x l%04x\n",
+			setup->bRequestType, setup->bRequest,
+			wValue, wIndex, wLength);
+
+	/* RNDIS gadget delegate */
+	if ((setup->bRequestType == 0x21) && (setup->bRequest == 0x00)) {
+		/* USB_CDC_SEND_ENCAPSULATED_COMMAND */
+		goto delegate;
+	}
+
+	/* USB_CDC_GET_ENCAPSULATED_RESPONSE */
+	if ((setup->bRequestType == 0xa1) && (setup->bRequest == 0x01)) {
+		/* USB_CDC_GET_ENCAPSULATED_RESPONSE */
+		goto delegate;
+	}
+
+	/* We process some stardard setup requests here */
+	switch (setup->bRequest) {
+	case USB_REQ_GET_STATUS:
+		DBG(dev, "SETUP: USB_REQ_GET_STATUS\n");
+		/* get status, DATA and STATUS phase */
+		if ((setup->bRequestType & (USB_DIR_IN | USB_TYPE_MASK))
+					!= (USB_DIR_IN | USB_TYPE_STANDARD))
+			break;
+		get_status(dev, setup->bRequestType, wValue, wIndex, wLength);
+		goto end;
+
+	case USB_REQ_SET_ADDRESS:
+		DBG(dev, "SETUP: USB_REQ_SET_ADDRESS\n");
+		/* STATUS phase */
+		if (setup->bRequestType != (USB_DIR_OUT | USB_TYPE_STANDARD
+						| USB_RECIP_DEVICE))
+			break;
+		set_address(dev, wValue, wIndex, wLength);
+		goto end;
+
+	case USB_REQ_CLEAR_FEATURE:
+	case USB_REQ_SET_FEATURE:
+		/* STATUS phase */
+	{
+		int rc = -EOPNOTSUPP;
+		if (setup->bRequest == USB_REQ_SET_FEATURE)
+			DBG(dev, "SETUP: USB_REQ_SET_FEATURE\n");
+		else if (setup->bRequest == USB_REQ_CLEAR_FEATURE)
+			DBG(dev, "SETUP: USB_REQ_CLEAR_FEATURE\n");
+
+		if ((setup->bRequestType & (USB_RECIP_MASK | USB_TYPE_MASK))
+				== (USB_RECIP_ENDPOINT | USB_TYPE_STANDARD)) {
+			struct langwell_ep	*epn;
+			epn = get_ep_by_windex(dev, wIndex);
+			/* stall if endpoint doesn't exist */
+			if (!epn) {
+				ep0_stall(dev);
+				goto end;
+			}
+
+			if (wValue != 0 || wLength != 0
+					|| epn->ep_num > dev->ep_max)
+				break;
+
+			spin_unlock(&dev->lock);
+			rc = langwell_ep_set_halt(&epn->ep,
+					(setup->bRequest == USB_REQ_SET_FEATURE)
+						? 1 : 0);
+			spin_lock(&dev->lock);
+
+		} else if ((setup->bRequestType & (USB_RECIP_MASK
+				| USB_TYPE_MASK)) == (USB_RECIP_DEVICE
+				| USB_TYPE_STANDARD)) {
+			if (!gadget_is_otg(&dev->gadget))
+				break;
+			else if (setup->bRequest == USB_DEVICE_B_HNP_ENABLE) {
+				dev->gadget.b_hnp_enable = 1;
+#ifdef	OTG_TRANSCEIVER
+				if (!dev->lotg->otg.default_a)
+					dev->lotg->hsm.b_hnp_enable = 1;
+#endif
+			} else if (setup->bRequest == USB_DEVICE_A_HNP_SUPPORT)
+				dev->gadget.a_hnp_support = 1;
+			else if (setup->bRequest ==
+					USB_DEVICE_A_ALT_HNP_SUPPORT)
+				dev->gadget.a_alt_hnp_support = 1;
+			else
+				break;
+			rc = 0;
+		} else
+			break;
+
+		if (rc == 0) {
+			if (prime_status_phase(dev, EP_DIR_IN))
+				ep0_stall(dev);
+		}
+		goto end;
+	}
+
+	case USB_REQ_GET_DESCRIPTOR:
+		DBG(dev, "SETUP: USB_REQ_GET_DESCRIPTOR\n");
+		goto delegate;
+
+	case USB_REQ_SET_DESCRIPTOR:
+		DBG(dev, "SETUP: USB_REQ_SET_DESCRIPTOR unsupported\n");
+		goto delegate;
+
+	case USB_REQ_GET_CONFIGURATION:
+		DBG(dev, "SETUP: USB_REQ_GET_CONFIGURATION\n");
+		goto delegate;
+
+	case USB_REQ_SET_CONFIGURATION:
+		DBG(dev, "SETUP: USB_REQ_SET_CONFIGURATION\n");
+		goto delegate;
+
+	case USB_REQ_GET_INTERFACE:
+		DBG(dev, "SETUP: USB_REQ_GET_INTERFACE\n");
+		goto delegate;
+
+	case USB_REQ_SET_INTERFACE:
+		DBG(dev, "SETUP: USB_REQ_SET_INTERFACE\n");
+		goto delegate;
+
+	case USB_REQ_SYNCH_FRAME:
+		DBG(dev, "SETUP: USB_REQ_SYNCH_FRAME unsupported\n");
+		goto delegate;
+
+	default:
+		/* delegate USB standard requests to the gadget driver */
+		goto delegate;
+delegate:
+		/* USB requests handled by gadget */
+		if (wLength) {
+			/* DATA phase from gadget, STATUS phase from udc */
+			dev->ep0_dir = (setup->bRequestType & USB_DIR_IN)
+					?  USB_DIR_IN : USB_DIR_OUT;
+			VDBG(dev, "dev->ep0_dir = 0x%x, wLength = %d\n",
+					dev->ep0_dir, wLength);
+			spin_unlock(&dev->lock);
+			if (dev->driver->setup(&dev->gadget,
+					&dev->local_setup_buff) < 0)
+				ep0_stall(dev);
+			spin_lock(&dev->lock);
+			dev->ep0_state = (setup->bRequestType & USB_DIR_IN)
+					?  DATA_STATE_XMIT : DATA_STATE_RECV;
+		} else {
+			/* no DATA phase, IN STATUS phase from gadget */
+			dev->ep0_dir = USB_DIR_IN;
+			VDBG(dev, "dev->ep0_dir = 0x%x, wLength = %d\n",
+					dev->ep0_dir, wLength);
+			spin_unlock(&dev->lock);
+			if (dev->driver->setup(&dev->gadget,
+					&dev->local_setup_buff) < 0)
+				ep0_stall(dev);
+			spin_lock(&dev->lock);
+			dev->ep0_state = WAIT_FOR_OUT_STATUS;
+		}
+		break;
+	}
+end:
+	VDBG(dev, "<--- %s()\n", __func__);
+	return;
+}
+
+
+/* transfer completion, process endpoint request and free the completed dTDs
+ * for this request
+ */
+static int process_ep_req(struct langwell_udc *dev, int index,
+		struct langwell_request *curr_req)
+{
+	struct langwell_dtd	*curr_dtd;
+	struct langwell_dqh	*curr_dqh;
+	int			td_complete, actual, remaining_length;
+	int			i, dir;
+	u8			dtd_status = 0;
+	int			retval = 0;
+
+	curr_dqh = &dev->ep_dqh[index];
+	dir = index % 2;
+
+	curr_dtd = curr_req->head;
+	td_complete = 0;
+	actual = curr_req->req.length;
+
+	VDBG(dev, "---> %s()\n", __func__);
+
+	for (i = 0; i < curr_req->dtd_count; i++) {
+		remaining_length = le16_to_cpu(curr_dtd->dtd_total);
+		actual -= remaining_length;
+
+		/* command execution states by dTD */
+		dtd_status = curr_dtd->dtd_status;
+
+		if (!dtd_status) {
+			/* transfers completed successfully */
+			if (!remaining_length) {
+				td_complete++;
+				VDBG(dev, "dTD transmitted successfully\n");
+			} else {
+				if (dir) {
+					VDBG(dev, "TX dTD remains data\n");
+					retval = -EPROTO;
+					break;
+
+				} else {
+					td_complete++;
+					break;
+				}
+			}
+		} else {
+			/* transfers completed with errors */
+			if (dtd_status & DTD_STS_ACTIVE) {
+				DBG(dev, "request not completed\n");
+				retval = 1;
+				return retval;
+			} else if (dtd_status & DTD_STS_HALTED) {
+				ERROR(dev, "dTD error %08x dQH[%d]\n",
+						dtd_status, index);
+				/* clear the errors and halt condition */
+				curr_dqh->dtd_status = 0;
+				retval = -EPIPE;
+				break;
+			} else if (dtd_status & DTD_STS_DBE) {
+				DBG(dev, "data buffer (overflow) error\n");
+				retval = -EPROTO;
+				break;
+			} else if (dtd_status & DTD_STS_TRE) {
+				DBG(dev, "transaction(ISO) error\n");
+				retval = -EILSEQ;
+				break;
+			} else
+				ERROR(dev, "unknown error (0x%x)!\n",
+						dtd_status);
+		}
+
+		if (i != curr_req->dtd_count - 1)
+			curr_dtd = (struct langwell_dtd *)
+				curr_dtd->next_dtd_virt;
+	}
+
+	if (retval)
+		return retval;
+
+	curr_req->req.actual = actual;
+
+	VDBG(dev, "<--- %s()\n", __func__);
+	return 0;
+}
+
+
+/* complete DATA or STATUS phase of ep0 prime status phase if needed */
+static void ep0_req_complete(struct langwell_udc *dev,
+		struct langwell_ep *ep0, struct langwell_request *req)
+{
+	u32	new_addr;
+	VDBG(dev, "---> %s()\n", __func__);
+
+	if (dev->usb_state == USB_STATE_ADDRESS) {
+		/* set the new address */
+		new_addr = (u32)dev->dev_addr;
+		writel(new_addr << USBADR_SHIFT, &dev->op_regs->deviceaddr);
+
+		new_addr = USBADR(readl(&dev->op_regs->deviceaddr));
+		VDBG(dev, "new_addr = %d\n", new_addr);
+	}
+
+	done(ep0, req, 0);
+
+	switch (dev->ep0_state) {
+	case DATA_STATE_XMIT:
+		/* receive status phase */
+		if (prime_status_phase(dev, EP_DIR_OUT))
+			ep0_stall(dev);
+		break;
+	case DATA_STATE_RECV:
+		/* send status phase */
+		if (prime_status_phase(dev, EP_DIR_IN))
+			ep0_stall(dev);
+		break;
+	case WAIT_FOR_OUT_STATUS:
+		dev->ep0_state = WAIT_FOR_SETUP;
+		break;
+	case WAIT_FOR_SETUP:
+		ERROR(dev, "unexpect ep0 packets\n");
+		break;
+	default:
+		ep0_stall(dev);
+		break;
+	}
+
+	VDBG(dev, "<--- %s()\n", __func__);
+}
+
+
+/* USB transfer completion interrupt */
+static void handle_trans_complete(struct langwell_udc *dev)
+{
+	u32			complete_bits;
+	int			i, ep_num, dir, bit_mask, status;
+	struct langwell_ep	*epn;
+	struct langwell_request	*curr_req, *temp_req;
+
+	VDBG(dev, "---> %s()\n", __func__);
+
+	complete_bits = readl(&dev->op_regs->endptcomplete);
+	VDBG(dev, "endptcomplete register: 0x%08x\n", complete_bits);
+
+	/* Write-Clear the bits in endptcomplete register */
+	writel(complete_bits, &dev->op_regs->endptcomplete);
+
+	if (!complete_bits) {
+		DBG(dev, "complete_bits = 0\n");
+		goto done;
+	}
+
+	for (i = 0; i < dev->ep_max; i++) {
+		ep_num = i / 2;
+		dir = i % 2;
+
+		bit_mask = 1 << (ep_num + 16 * dir);
+
+		if (!(complete_bits & bit_mask))
+			continue;
+
+		/* ep0 */
+		if (i == 1)
+			epn = &dev->ep[0];
+		else
+			epn = &dev->ep[i];
+
+		if (epn->name == NULL) {
+			WARNING(dev, "invalid endpoint\n");
+			continue;
+		}
+
+		if (i < 2)
+			/* ep0 in and out */
+			DBG(dev, "%s-%s transfer completed\n",
+					epn->name,
+					is_in(epn) ? "in" : "out");
+		else
+			DBG(dev, "%s transfer completed\n", epn->name);
+
+		/* process the req queue until an uncomplete request */
+		list_for_each_entry_safe(curr_req, temp_req,
+				&epn->queue, queue) {
+			status = process_ep_req(dev, i, curr_req);
+			VDBG(dev, "%s req status: %d\n", epn->name, status);
+
+			if (status)
+				break;
+
+			/* write back status to req */
+			curr_req->req.status = status;
+
+			/* ep0 request completion */
+			if (ep_num == 0) {
+				ep0_req_complete(dev, epn, curr_req);
+				break;
+			} else {
+				done(epn, curr_req, status);
+			}
+		}
+	}
+done:
+	VDBG(dev, "<--- %s()\n", __func__);
+	return;
+}
+
+
+/* port change detect interrupt handler */
+static void handle_port_change(struct langwell_udc *dev)
+{
+	u32	portsc1, devlc;
+	u32	speed;
+
+	VDBG(dev, "---> %s()\n", __func__);
+
+	if (dev->bus_reset)
+		dev->bus_reset = 0;
+
+	portsc1 = readl(&dev->op_regs->portsc1);
+	devlc = readl(&dev->op_regs->devlc);
+	VDBG(dev, "portsc1 = 0x%08x, devlc = 0x%08x\n",
+			portsc1, devlc);
+
+	/* bus reset is finished */
+	if (!(portsc1 & PORTS_PR)) {
+		/* get the speed */
+		speed = LPM_PSPD(devlc);
+		switch (speed) {
+		case LPM_SPEED_HIGH:
+			dev->gadget.speed = USB_SPEED_HIGH;
+			break;
+		case LPM_SPEED_FULL:
+			dev->gadget.speed = USB_SPEED_FULL;
+			break;
+		case LPM_SPEED_LOW:
+			dev->gadget.speed = USB_SPEED_LOW;
+			break;
+		default:
+			dev->gadget.speed = USB_SPEED_UNKNOWN;
+			break;
+		}
+		VDBG(dev, "speed = %d, dev->gadget.speed = %d\n",
+				speed, dev->gadget.speed);
+	}
+
+	/* LPM L0 to L1 */
+	if (dev->lpm && dev->lpm_state == LPM_L0)
+		if (portsc1 & PORTS_SUSP && portsc1 & PORTS_SLP) {
+				INFO(dev, "LPM L0 to L1\n");
+				dev->lpm_state = LPM_L1;
+		}
+
+	/* LPM L1 to L0, force resume or remote wakeup finished */
+	if (dev->lpm && dev->lpm_state == LPM_L1)
+		if (!(portsc1 & PORTS_SUSP)) {
+			if (portsc1 & PORTS_SLP)
+				INFO(dev, "LPM L1 to L0, force resume\n");
+			else
+				INFO(dev, "LPM L1 to L0, remote wakeup\n");
+
+			dev->lpm_state = LPM_L0;
+		}
+
+	/* update USB state */
+	if (!dev->resume_state)
+		dev->usb_state = USB_STATE_DEFAULT;
+
+	VDBG(dev, "<--- %s()\n", __func__);
+}
+
+
+/* USB reset interrupt handler */
+static void handle_usb_reset(struct langwell_udc *dev)
+{
+	u32		deviceaddr,
+			endptsetupstat,
+			endptcomplete;
+	unsigned long	timeout;
+
+	VDBG(dev, "---> %s()\n", __func__);
+
+	/* Write-Clear the device address */
+	deviceaddr = readl(&dev->op_regs->deviceaddr);
+	writel(deviceaddr & ~USBADR_MASK, &dev->op_regs->deviceaddr);
+
+	dev->dev_addr = 0;
+
+	/* clear usb state */
+	dev->resume_state = 0;
+
+	/* LPM L1 to L0, reset */
+	if (dev->lpm)
+		dev->lpm_state = LPM_L0;
+
+	dev->ep0_dir = USB_DIR_OUT;
+	dev->ep0_state = WAIT_FOR_SETUP;
+	dev->remote_wakeup = 0;		/* default to 0 on reset */
+	dev->gadget.b_hnp_enable = 0;
+	dev->gadget.a_hnp_support = 0;
+	dev->gadget.a_alt_hnp_support = 0;
+
+	/* Write-Clear all the setup token semaphores */
+	endptsetupstat = readl(&dev->op_regs->endptsetupstat);
+	writel(endptsetupstat, &dev->op_regs->endptsetupstat);
+
+	/* Write-Clear all the endpoint complete status bits */
+	endptcomplete = readl(&dev->op_regs->endptcomplete);
+	writel(endptcomplete, &dev->op_regs->endptcomplete);
+
+	/* wait until all endptprime bits cleared */
+	timeout = jiffies + PRIME_TIMEOUT;
+	while (readl(&dev->op_regs->endptprime)) {
+		if (time_after(jiffies, timeout)) {
+			ERROR(dev, "USB reset timeout\n");
+			break;
+		}
+		cpu_relax();
+	}
+
+	/* write 1s to endptflush register to clear any primed buffers */
+	writel((u32) ~0, &dev->op_regs->endptflush);
+
+	if (readl(&dev->op_regs->portsc1) & PORTS_PR) {
+		VDBG(dev, "USB bus reset\n");
+		/* bus is reseting */
+		dev->bus_reset = 1;
+
+		/* reset all the queues, stop all USB activities */
+		stop_activity(dev, dev->driver);
+		dev->usb_state = USB_STATE_DEFAULT;
+	} else {
+		VDBG(dev, "device controller reset\n");
+		/* controller reset */
+		langwell_udc_reset(dev);
+
+		/* reset all the queues, stop all USB activities */
+		stop_activity(dev, dev->driver);
+
+		/* reset ep0 dQH and endptctrl */
+		ep0_reset(dev);
+
+		/* enable interrupt and set controller to run state */
+		langwell_udc_start(dev);
+
+		dev->usb_state = USB_STATE_ATTACHED;
+	}
+
+#ifdef	OTG_TRANSCEIVER
+	/* refer to USB OTG 6.6.2.3 b_hnp_en is cleared */
+	if (!dev->lotg->otg.default_a)
+		dev->lotg->hsm.b_hnp_enable = 0;
+#endif
+
+	VDBG(dev, "<--- %s()\n", __func__);
+}
+
+
+/* USB bus suspend/resume interrupt */
+static void handle_bus_suspend(struct langwell_udc *dev)
+{
+	u32		devlc;
+	DBG(dev, "---> %s()\n", __func__);
+
+	dev->resume_state = dev->usb_state;
+	dev->usb_state = USB_STATE_SUSPENDED;
+
+#ifdef	OTG_TRANSCEIVER
+	if (dev->lotg->otg.default_a) {
+		if (dev->lotg->hsm.b_bus_suspend_vld == 1) {
+			dev->lotg->hsm.b_bus_suspend = 1;
+			/* notify transceiver the state changes */
+			if (spin_trylock(&dev->lotg->wq_lock)) {
+				langwell_update_transceiver();
+				spin_unlock(&dev->lotg->wq_lock);
+			}
+		}
+		dev->lotg->hsm.b_bus_suspend_vld++;
+	} else {
+		if (!dev->lotg->hsm.a_bus_suspend) {
+			dev->lotg->hsm.a_bus_suspend = 1;
+			/* notify transceiver the state changes */
+			if (spin_trylock(&dev->lotg->wq_lock)) {
+				langwell_update_transceiver();
+				spin_unlock(&dev->lotg->wq_lock);
+			}
+		}
+	}
+#endif
+
+	/* report suspend to the driver */
+	if (dev->driver) {
+		if (dev->driver->suspend) {
+			spin_unlock(&dev->lock);
+			dev->driver->suspend(&dev->gadget);
+			spin_lock(&dev->lock);
+			DBG(dev, "suspend %s\n", dev->driver->driver.name);
+		}
+	}
+
+	/* enter PHY low power suspend */
+	devlc = readl(&dev->op_regs->devlc);
+	VDBG(dev, "devlc = 0x%08x\n", devlc);
+	devlc |= LPM_PHCD;
+	writel(devlc, &dev->op_regs->devlc);
+
+	DBG(dev, "<--- %s()\n", __func__);
+}
+
+
+static void handle_bus_resume(struct langwell_udc *dev)
+{
+	u32		devlc;
+	DBG(dev, "---> %s()\n", __func__);
+
+	dev->usb_state = dev->resume_state;
+	dev->resume_state = 0;
+
+	/* exit PHY low power suspend */
+	devlc = readl(&dev->op_regs->devlc);
+	VDBG(dev, "devlc = 0x%08x\n", devlc);
+	devlc &= ~LPM_PHCD;
+	writel(devlc, &dev->op_regs->devlc);
+
+#ifdef	OTG_TRANSCEIVER
+	if (dev->lotg->otg.default_a == 0)
+		dev->lotg->hsm.a_bus_suspend = 0;
+#endif
+
+	/* report resume to the driver */
+	if (dev->driver) {
+		if (dev->driver->resume) {
+			spin_unlock(&dev->lock);
+			dev->driver->resume(&dev->gadget);
+			spin_lock(&dev->lock);
+			DBG(dev, "resume %s\n", dev->driver->driver.name);
+		}
+	}
+
+	DBG(dev, "<--- %s()\n", __func__);
+}
+
+
+/* USB device controller interrupt handler */
+static irqreturn_t langwell_irq(int irq, void *_dev)
+{
+	struct langwell_udc	*dev = _dev;
+	u32			usbsts,
+				usbintr,
+				irq_sts,
+				portsc1;
+
+	VDBG(dev, "---> %s()\n", __func__);
+
+	if (dev->stopped) {
+		VDBG(dev, "handle IRQ_NONE\n");
+		VDBG(dev, "<--- %s()\n", __func__);
+		return IRQ_NONE;
+	}
+
+	spin_lock(&dev->lock);
+
+	/* USB status */
+	usbsts = readl(&dev->op_regs->usbsts);
+
+	/* USB interrupt enable */
+	usbintr = readl(&dev->op_regs->usbintr);
+
+	irq_sts = usbsts & usbintr;
+	VDBG(dev, "usbsts = 0x%08x, usbintr = 0x%08x, irq_sts = 0x%08x\n",
+			usbsts, usbintr, irq_sts);
+
+	if (!irq_sts) {
+		VDBG(dev, "handle IRQ_NONE\n");
+		VDBG(dev, "<--- %s()\n", __func__);
+		spin_unlock(&dev->lock);
+		return IRQ_NONE;
+	}
+
+	/* Write-Clear interrupt status bits */
+	writel(irq_sts, &dev->op_regs->usbsts);
+
+	/* resume from suspend */
+	portsc1 = readl(&dev->op_regs->portsc1);
+	if (dev->usb_state == USB_STATE_SUSPENDED)
+		if (!(portsc1 & PORTS_SUSP))
+			handle_bus_resume(dev);
+
+	/* USB interrupt */
+	if (irq_sts & STS_UI) {
+		VDBG(dev, "USB interrupt\n");
+
+		/* setup packet received from ep0 */
+		if (readl(&dev->op_regs->endptsetupstat)
+				& EP0SETUPSTAT_MASK) {
+			VDBG(dev, "USB SETUP packet received interrupt\n");
+			/* setup tripwire semaphone */
+			setup_tripwire(dev);
+			handle_setup_packet(dev, &dev->local_setup_buff);
+		}
+
+		/* USB transfer completion */
+		if (readl(&dev->op_regs->endptcomplete)) {
+			VDBG(dev, "USB transfer completion interrupt\n");
+			handle_trans_complete(dev);
+		}
+	}
+
+	/* SOF received interrupt (for ISO transfer) */
+	if (irq_sts & STS_SRI) {
+		/* FIXME */
+		/* VDBG(dev, "SOF received interrupt\n"); */
+	}
+
+	/* port change detect interrupt */
+	if (irq_sts & STS_PCI) {
+		VDBG(dev, "port change detect interrupt\n");
+		handle_port_change(dev);
+	}
+
+	/* suspend interrrupt */
+	if (irq_sts & STS_SLI) {
+		VDBG(dev, "suspend interrupt\n");
+		handle_bus_suspend(dev);
+	}
+
+	/* USB reset interrupt */
+	if (irq_sts & STS_URI) {
+		VDBG(dev, "USB reset interrupt\n");
+		handle_usb_reset(dev);
+	}
+
+	/* USB error or system error interrupt */
+	if (irq_sts & (STS_UEI | STS_SEI)) {
+		/* FIXME */
+		WARNING(dev, "error IRQ, irq_sts: %x\n", irq_sts);
+	}
+
+	spin_unlock(&dev->lock);
+
+	VDBG(dev, "<--- %s()\n", __func__);
+	return IRQ_HANDLED;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/* release device structure */
+static void gadget_release(struct device *_dev)
+{
+	struct langwell_udc	*dev = the_controller;
+
+	DBG(dev, "---> %s()\n", __func__);
+
+	complete(dev->done);
+
+	DBG(dev, "<--- %s()\n", __func__);
+	kfree(dev);
+}
+
+
+/* tear down the binding between this driver and the pci device */
+static void langwell_udc_remove(struct pci_dev *pdev)
+{
+	struct langwell_udc	*dev = the_controller;
+
+	DECLARE_COMPLETION(done);
+
+	BUG_ON(dev->driver);
+	DBG(dev, "---> %s()\n", __func__);
+
+	dev->done = &done;
+
+	/* free memory allocated in probe */
+	if (dev->dtd_pool)
+		dma_pool_destroy(dev->dtd_pool);
+
+	if (dev->status_req) {
+		kfree(dev->status_req->req.buf);
+		kfree(dev->status_req);
+	}
+
+	if (dev->ep_dqh)
+		dma_free_coherent(&pdev->dev, dev->ep_dqh_size,
+			dev->ep_dqh, dev->ep_dqh_dma);
+
+	kfree(dev->ep);
+
+	/* diable IRQ handler */
+	if (dev->got_irq)
+		free_irq(pdev->irq, dev);
+
+#ifndef	OTG_TRANSCEIVER
+	if (dev->cap_regs)
+		iounmap(dev->cap_regs);
+
+	if (dev->region)
+		release_mem_region(pci_resource_start(pdev, 0),
+				pci_resource_len(pdev, 0));
+
+	if (dev->enabled)
+		pci_disable_device(pdev);
+#else
+	if (dev->transceiver) {
+		otg_put_transceiver(dev->transceiver);
+		dev->transceiver = NULL;
+		dev->lotg = NULL;
+	}
+#endif
+
+	dev->cap_regs = NULL;
+
+	INFO(dev, "unbind\n");
+	DBG(dev, "<--- %s()\n", __func__);
+
+	device_unregister(&dev->gadget.dev);
+	device_remove_file(&pdev->dev, &dev_attr_langwell_udc);
+
+#ifndef	OTG_TRANSCEIVER
+	pci_set_drvdata(pdev, NULL);
+#endif
+
+	/* free dev, wait for the release() finished */
+	wait_for_completion(&done);
+
+	the_controller = NULL;
+}
+
+
+/*
+ * wrap this driver around the specified device, but
+ * don't respond over USB until a gadget driver binds to us.
+ */
+static int langwell_udc_probe(struct pci_dev *pdev,
+		const struct pci_device_id *id)
+{
+	struct langwell_udc	*dev;
+#ifndef	OTG_TRANSCEIVER
+	unsigned long		resource, len;
+#endif
+	void			__iomem *base = NULL;
+	size_t			size;
+	int			retval;
+
+	if (the_controller) {
+		dev_warn(&pdev->dev, "ignoring\n");
+		return -EBUSY;
+	}
+
+	/* alloc, and start init */
+	dev = kzalloc(sizeof *dev, GFP_KERNEL);
+	if (dev == NULL) {
+		retval = -ENOMEM;
+		goto error;
+	}
+
+	/* initialize device spinlock */
+	spin_lock_init(&dev->lock);
+
+	dev->pdev = pdev;
+	DBG(dev, "---> %s()\n", __func__);
+
+#ifdef	OTG_TRANSCEIVER
+	/* PCI device is already enabled by otg_transceiver driver */
+	dev->enabled = 1;
+
+	/* mem region and register base */
+	dev->region = 1;
+	dev->transceiver = otg_get_transceiver();
+	dev->lotg = otg_to_langwell(dev->transceiver);
+	base = dev->lotg->regs;
+#else
+	pci_set_drvdata(pdev, dev);
+
+	/* now all the pci goodies ... */
+	if (pci_enable_device(pdev) < 0) {
+		retval = -ENODEV;
+		goto error;
+	}
+	dev->enabled = 1;
+
+	/* control register: BAR 0 */
+	resource = pci_resource_start(pdev, 0);
+	len = pci_resource_len(pdev, 0);
+	if (!request_mem_region(resource, len, driver_name)) {
+		ERROR(dev, "controller already in use\n");
+		retval = -EBUSY;
+		goto error;
+	}
+	dev->region = 1;
+
+	base = ioremap_nocache(resource, len);
+#endif
+	if (base == NULL) {
+		ERROR(dev, "can't map memory\n");
+		retval = -EFAULT;
+		goto error;
+	}
+
+	dev->cap_regs = (struct langwell_cap_regs __iomem *) base;
+	VDBG(dev, "dev->cap_regs: %p\n", dev->cap_regs);
+	dev->op_regs = (struct langwell_op_regs __iomem *)
+		(base + OP_REG_OFFSET);
+	VDBG(dev, "dev->op_regs: %p\n", dev->op_regs);
+
+	/* irq setup after old hardware is cleaned up */
+	if (!pdev->irq) {
+		ERROR(dev, "No IRQ. Check PCI setup!\n");
+		retval = -ENODEV;
+		goto error;
+	}
+
+#ifndef	OTG_TRANSCEIVER
+	INFO(dev, "irq %d, io mem: 0x%08lx, len: 0x%08lx, pci mem 0x%p\n",
+			pdev->irq, resource, len, base);
+	/* enables bus-mastering for device dev */
+	pci_set_master(pdev);
+
+	if (request_irq(pdev->irq, langwell_irq, IRQF_SHARED,
+				driver_name, dev) != 0) {
+		ERROR(dev, "request interrupt %d failed\n", pdev->irq);
+		retval = -EBUSY;
+		goto error;
+	}
+	dev->got_irq = 1;
+#endif
+
+	/* set stopped bit */
+	dev->stopped = 1;
+
+	/* capabilities and endpoint number */
+	dev->lpm = (readl(&dev->cap_regs->hccparams) & HCC_LEN) ? 1 : 0;
+	dev->dciversion = readw(&dev->cap_regs->dciversion);
+	dev->devcap = (readl(&dev->cap_regs->dccparams) & DEVCAP) ? 1 : 0;
+	VDBG(dev, "dev->lpm: %d\n", dev->lpm);
+	VDBG(dev, "dev->dciversion: 0x%04x\n", dev->dciversion);
+	VDBG(dev, "dccparams: 0x%08x\n", readl(&dev->cap_regs->dccparams));
+	VDBG(dev, "dev->devcap: %d\n", dev->devcap);
+	if (!dev->devcap) {
+		ERROR(dev, "can't support device mode\n");
+		retval = -ENODEV;
+		goto error;
+	}
+
+	/* a pair of endpoints (out/in) for each address */
+	dev->ep_max = DEN(readl(&dev->cap_regs->dccparams)) * 2;
+	VDBG(dev, "dev->ep_max: %d\n", dev->ep_max);
+
+	/* allocate endpoints memory */
+	dev->ep = kzalloc(sizeof(struct langwell_ep) * dev->ep_max,
+			GFP_KERNEL);
+	if (!dev->ep) {
+		ERROR(dev, "allocate endpoints memory failed\n");
+		retval = -ENOMEM;
+		goto error;
+	}
+
+	/* allocate device dQH memory */
+	size = dev->ep_max * sizeof(struct langwell_dqh);
+	VDBG(dev, "orig size = %d\n", size);
+	if (size < DQH_ALIGNMENT)
+		size = DQH_ALIGNMENT;
+	else if ((size % DQH_ALIGNMENT) != 0) {
+		size += DQH_ALIGNMENT + 1;
+		size &= ~(DQH_ALIGNMENT - 1);
+	}
+	dev->ep_dqh = dma_alloc_coherent(&pdev->dev, size,
+					&dev->ep_dqh_dma, GFP_KERNEL);
+	if (!dev->ep_dqh) {
+		ERROR(dev, "allocate dQH memory failed\n");
+		retval = -ENOMEM;
+		goto error;
+	}
+	dev->ep_dqh_size = size;
+	VDBG(dev, "ep_dqh_size = %d\n", dev->ep_dqh_size);
+
+	/* initialize ep0 status request structure */
+	dev->status_req = kzalloc(sizeof(struct langwell_request), GFP_KERNEL);
+	if (!dev->status_req) {
+		ERROR(dev, "allocate status_req memory failed\n");
+		retval = -ENOMEM;
+		goto error;
+	}
+	INIT_LIST_HEAD(&dev->status_req->queue);
+
+	/* allocate a small amount of memory to get valid address */
+	dev->status_req->req.buf = kmalloc(8, GFP_KERNEL);
+	dev->status_req->req.dma = virt_to_phys(dev->status_req->req.buf);
+
+	dev->resume_state = USB_STATE_NOTATTACHED;
+	dev->usb_state = USB_STATE_POWERED;
+	dev->ep0_dir = USB_DIR_OUT;
+	dev->remote_wakeup = 0;	/* default to 0 on reset */
+
+#ifndef	OTG_TRANSCEIVER
+	/* reset device controller */
+	langwell_udc_reset(dev);
+#endif
+
+	/* initialize gadget structure */
+	dev->gadget.ops = &langwell_ops;	/* usb_gadget_ops */
+	dev->gadget.ep0 = &dev->ep[0].ep;	/* gadget ep0 */
+	INIT_LIST_HEAD(&dev->gadget.ep_list);	/* ep_list */
+	dev->gadget.speed = USB_SPEED_UNKNOWN;	/* speed */
+	dev->gadget.is_dualspeed = 1;		/* support dual speed */
+#ifdef	OTG_TRANSCEIVER
+	dev->gadget.is_otg = 1;			/* support otg mode */
+#endif
+
+	/* the "gadget" abstracts/virtualizes the controller */
+	dev_set_name(&dev->gadget.dev, "gadget");
+	dev->gadget.dev.parent = &pdev->dev;
+	dev->gadget.dev.dma_mask = pdev->dev.dma_mask;
+	dev->gadget.dev.release = gadget_release;
+	dev->gadget.name = driver_name;		/* gadget name */
+
+	/* controller endpoints reinit */
+	eps_reinit(dev);
+
+#ifndef	OTG_TRANSCEIVER
+	/* reset ep0 dQH and endptctrl */
+	ep0_reset(dev);
+#endif
+
+	/* create dTD dma_pool resource */
+	dev->dtd_pool = dma_pool_create("langwell_dtd",
+			&dev->pdev->dev,
+			sizeof(struct langwell_dtd),
+			DTD_ALIGNMENT,
+			DMA_BOUNDARY);
+
+	if (!dev->dtd_pool) {
+		retval = -ENOMEM;
+		goto error;
+	}
+
+	/* done */
+	INFO(dev, "%s\n", driver_desc);
+	INFO(dev, "irq %d, pci mem %p\n", pdev->irq, base);
+	INFO(dev, "Driver version: " DRIVER_VERSION "\n");
+	INFO(dev, "Support (max) %d endpoints\n", dev->ep_max);
+	INFO(dev, "Device interface version: 0x%04x\n", dev->dciversion);
+	INFO(dev, "Controller mode: %s\n", dev->devcap ? "Device" : "Host");
+	INFO(dev, "Support USB LPM: %s\n", dev->lpm ? "Yes" : "No");
+
+	VDBG(dev, "After langwell_udc_probe(), print all registers:\n");
+#ifdef	VERBOSE
+	print_all_registers(dev);
+#endif
+
+	the_controller = dev;
+
+	retval = device_register(&dev->gadget.dev);
+	if (retval)
+		goto error;
+
+	retval = device_create_file(&pdev->dev, &dev_attr_langwell_udc);
+	if (retval)
+		goto error;
+
+	VDBG(dev, "<--- %s()\n", __func__);
+	return 0;
+
+error:
+	if (dev) {
+		DBG(dev, "<--- %s()\n", __func__);
+		langwell_udc_remove(pdev);
+	}
+
+	return retval;
+}
+
+
+/* device controller suspend */
+static int langwell_udc_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+	struct langwell_udc	*dev = the_controller;
+	u32			devlc;
+
+	DBG(dev, "---> %s()\n", __func__);
+
+	/* disable interrupt and set controller to stop state */
+	langwell_udc_stop(dev);
+
+	/* diable IRQ handler */
+	if (dev->got_irq)
+		free_irq(pdev->irq, dev);
+	dev->got_irq = 0;
+
+
+	/* save PCI state */
+	pci_save_state(pdev);
+
+	/* set device power state */
+	pci_set_power_state(pdev, PCI_D3hot);
+
+	/* enter PHY low power suspend */
+	devlc = readl(&dev->op_regs->devlc);
+	VDBG(dev, "devlc = 0x%08x\n", devlc);
+	devlc |= LPM_PHCD;
+	writel(devlc, &dev->op_regs->devlc);
+
+	DBG(dev, "<--- %s()\n", __func__);
+	return 0;
+}
+
+
+/* device controller resume */
+static int langwell_udc_resume(struct pci_dev *pdev)
+{
+	struct langwell_udc	*dev = the_controller;
+	u32			devlc;
+
+	DBG(dev, "---> %s()\n", __func__);
+
+	/* exit PHY low power suspend */
+	devlc = readl(&dev->op_regs->devlc);
+	VDBG(dev, "devlc = 0x%08x\n", devlc);
+	devlc &= ~LPM_PHCD;
+	writel(devlc, &dev->op_regs->devlc);
+
+	/* set device D0 power state */
+	pci_set_power_state(pdev, PCI_D0);
+
+	/* restore PCI state */
+	pci_restore_state(pdev);
+
+	/* enable IRQ handler */
+	if (request_irq(pdev->irq, langwell_irq, IRQF_SHARED, driver_name, dev)
+			!= 0) {
+		ERROR(dev, "request interrupt %d failed\n", pdev->irq);
+		return -1;
+	}
+	dev->got_irq = 1;
+
+	/* reset and start controller to run state */
+	if (dev->stopped) {
+		/* reset device controller */
+		langwell_udc_reset(dev);
+
+		/* reset ep0 dQH and endptctrl */
+		ep0_reset(dev);
+
+		/* start device if gadget is loaded */
+		if (dev->driver)
+			langwell_udc_start(dev);
+	}
+
+	/* reset USB status */
+	dev->usb_state = USB_STATE_ATTACHED;
+	dev->ep0_state = WAIT_FOR_SETUP;
+	dev->ep0_dir = USB_DIR_OUT;
+
+	DBG(dev, "<--- %s()\n", __func__);
+	return 0;
+}
+
+
+/* pci driver shutdown */
+static void langwell_udc_shutdown(struct pci_dev *pdev)
+{
+	struct langwell_udc	*dev = the_controller;
+	u32			usbmode;
+
+	DBG(dev, "---> %s()\n", __func__);
+
+	/* reset controller mode to IDLE */
+	usbmode = readl(&dev->op_regs->usbmode);
+	DBG(dev, "usbmode = 0x%08x\n", usbmode);
+	usbmode &= (~3 | MODE_IDLE);
+	writel(usbmode, &dev->op_regs->usbmode);
+
+	DBG(dev, "<--- %s()\n", __func__);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static const struct pci_device_id pci_ids[] = { {
+	.class =	((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
+	.class_mask =	~0,
+	.vendor =	0x8086,
+	.device =	0x0811,
+	.subvendor =	PCI_ANY_ID,
+	.subdevice =	PCI_ANY_ID,
+}, { /* end: all zeroes */ }
+};
+
+
+MODULE_DEVICE_TABLE(pci, pci_ids);
+
+
+static struct pci_driver langwell_pci_driver = {
+	.name =		(char *) driver_name,
+	.id_table =	pci_ids,
+
+	.probe =	langwell_udc_probe,
+	.remove =	langwell_udc_remove,
+
+	/* device controller suspend/resume */
+	.suspend =	langwell_udc_suspend,
+	.resume =	langwell_udc_resume,
+
+	.shutdown =	langwell_udc_shutdown,
+};
+
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("Xiaochen Shen <xiaochen.shen@intel.com>");
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_LICENSE("GPL");
+
+
+static int __init init(void)
+{
+#ifdef	OTG_TRANSCEIVER
+	return langwell_register_peripheral(&langwell_pci_driver);
+#else
+	return pci_register_driver(&langwell_pci_driver);
+#endif
+}
+module_init(init);
+
+
+static void __exit cleanup(void)
+{
+#ifdef	OTG_TRANSCEIVER
+	return langwell_unregister_peripheral(&langwell_pci_driver);
+#else
+	pci_unregister_driver(&langwell_pci_driver);
+#endif
+}
+module_exit(cleanup);
+
diff --git a/drivers/usb/gadget/langwell_udc.h b/drivers/usb/gadget/langwell_udc.h
new file mode 100644
index 0000000..9719934
--- /dev/null
+++ b/drivers/usb/gadget/langwell_udc.h
@@ -0,0 +1,228 @@
+/*
+ * Intel Langwell USB Device Controller driver
+ * Copyright (C) 2008-2009, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#include <linux/usb/langwell_udc.h>
+
+#if defined(CONFIG_USB_LANGWELL_OTG)
+#include <linux/usb/langwell_otg.h>
+#endif
+
+
+/*-------------------------------------------------------------------------*/
+
+/* driver data structures and utilities */
+
+/*
+ * dTD: Device Endpoint Transfer Descriptor
+ * describe to the device controller the location and quantity of
+ * data to be send/received for given transfer
+ */
+struct langwell_dtd {
+	u32	dtd_next;
+/* bits 31:5, next transfer element pointer */
+#define	DTD_NEXT(d)	(((d)>>5)&0x7ffffff)
+#define	DTD_NEXT_MASK	(0x7ffffff << 5)
+/* terminate */
+#define	DTD_TERM	BIT(0)
+	/* bits 7:0, execution back states */
+	u32	dtd_status:8;
+#define	DTD_STATUS(d)	(((d)>>0)&0xff)
+#define	DTD_STS_ACTIVE	BIT(7)	/* active */
+#define	DTD_STS_HALTED	BIT(6)	/* halted */
+#define	DTD_STS_DBE	BIT(5)	/* data buffer error */
+#define	DTD_STS_TRE	BIT(3)	/* transaction error  */
+	/* bits 9:8 */
+	u32	dtd_res0:2;
+	/* bits 11:10, multipier override */
+	u32	dtd_multo:2;
+#define	DTD_MULTO	(BIT(11) | BIT(10))
+	/* bits 14:12 */
+	u32	dtd_res1:3;
+	/* bit 15, interrupt on complete */
+	u32	dtd_ioc:1;
+#define	DTD_IOC		BIT(15)
+	/* bits 30:16, total bytes */
+	u32	dtd_total:15;
+#define	DTD_TOTAL(d)	(((d)>>16)&0x7fff)
+#define	DTD_MAX_TRANSFER_LENGTH	0x4000
+	/* bit 31 */
+	u32	dtd_res2:1;
+	/* dTD buffer pointer page 0 to 4 */
+	u32	dtd_buf[5];
+#define	DTD_OFFSET_MASK	0xfff
+/* bits 31:12, buffer pointer */
+#define	DTD_BUFFER(d)	(((d)>>12)&0x3ff)
+/* bits 11:0, current offset */
+#define	DTD_C_OFFSET(d)	(((d)>>0)&0xfff)
+/* bits 10:0, frame number */
+#define	DTD_FRAME(d)	(((d)>>0)&0x7ff)
+
+	/* driver-private parts */
+
+	/* dtd dma address */
+	dma_addr_t		dtd_dma;
+	/* next dtd virtual address */
+	struct langwell_dtd	*next_dtd_virt;
+};
+
+
+/*
+ * dQH: Device Endpoint Queue Head
+ * describe where all transfers are managed
+ * 48-byte data structure, aligned on 64-byte boundary
+ *
+ * These are associated with dTD structure
+ */
+struct langwell_dqh {
+	/* endpoint capabilities and characteristics */
+	u32	dqh_res0:15;	/* bits 14:0 */
+	u32	dqh_ios:1;	/* bit 15, interrupt on setup */
+#define	DQH_IOS		BIT(15)
+	u32	dqh_mpl:11;	/* bits 26:16, maximum packet length */
+#define	DQH_MPL		(0x7ff << 16)
+	u32	dqh_res1:2;	/* bits 28:27 */
+	u32	dqh_zlt:1;	/* bit 29, zero length termination */
+#define	DQH_ZLT		BIT(29)
+	u32	dqh_mult:2;	/* bits 31:30 */
+#define	DQH_MULT	(BIT(30) | BIT(31))
+
+	/* current dTD pointer */
+	u32	dqh_current;	/* locate the transfer in progress */
+#define DQH_C_DTD(e)	\
+	(((e)>>5)&0x7ffffff)	/* bits 31:5, current dTD pointer */
+
+	/* transfer overlay, hardware parts of a struct langwell_dtd */
+	u32	dtd_next;
+	u32	dtd_status:8;	/* bits 7:0, execution back states */
+	u32	dtd_res0:2;	/* bits 9:8 */
+	u32	dtd_multo:2;	/* bits 11:10, multipier override */
+	u32	dtd_res1:3;	/* bits 14:12 */
+	u32	dtd_ioc:1;	/* bit 15, interrupt on complete */
+	u32	dtd_total:15;	/* bits 30:16, total bytes */
+	u32	dtd_res2:1;	/* bit 31 */
+	u32	dtd_buf[5];	/* dTD buffer pointer page 0 to 4 */
+
+	u32	dqh_res2;
+	struct usb_ctrlrequest	dqh_setup;	/* setup packet buffer */
+} __attribute__ ((aligned(64)));
+
+
+/* endpoint data structure */
+struct langwell_ep {
+	struct usb_ep		ep;
+	dma_addr_t		dma;
+	struct langwell_udc	*dev;
+	unsigned long		irqs;
+	struct list_head	queue;
+	struct langwell_dqh	*dqh;
+	const struct usb_endpoint_descriptor	*desc;
+	char			name[14];
+	unsigned		stopped:1,
+				ep_type:2,
+				ep_num:8;
+};
+
+
+/* request data structure */
+struct langwell_request {
+	struct usb_request	req;
+	struct langwell_dtd	*dtd, *head, *tail;
+	struct langwell_ep	*ep;
+	dma_addr_t		dtd_dma;
+	struct list_head	queue;
+	unsigned		dtd_count;
+	unsigned		mapped:1;
+};
+
+
+/* ep0 transfer state */
+enum ep0_state {
+	WAIT_FOR_SETUP,
+	DATA_STATE_XMIT,
+	DATA_STATE_NEED_ZLP,
+	WAIT_FOR_OUT_STATUS,
+	DATA_STATE_RECV,
+};
+
+
+/* device suspend state */
+enum lpm_state {
+	LPM_L0,	/* on */
+	LPM_L1,	/* LPM L1 sleep */
+	LPM_L2,	/* suspend */
+	LPM_L3,	/* off */
+};
+
+
+/* device data structure */
+struct langwell_udc {
+	/* each pci device provides one gadget, several endpoints */
+	struct usb_gadget	gadget;
+	spinlock_t		lock;	/* device lock */
+	struct langwell_ep	*ep;
+	struct usb_gadget_driver	*driver;
+	struct otg_transceiver	*transceiver;
+	u8			dev_addr;
+	u32			usb_state;
+	u32			resume_state;
+	u32			bus_reset;
+	enum lpm_state		lpm_state;
+	enum ep0_state		ep0_state;
+	u32			ep0_dir;
+	u16			dciversion;
+	unsigned		ep_max;
+	unsigned		devcap:1,
+				enabled:1,
+				region:1,
+				got_irq:1,
+				powered:1,
+				remote_wakeup:1,
+				rate:1,
+				is_reset:1,
+				softconnected:1,
+				vbus_active:1,
+				suspended:1,
+				stopped:1,
+				lpm:1;	/* LPM capability */
+
+	/* pci state used to access those endpoints */
+	struct pci_dev		*pdev;
+
+	/* Langwell otg transceiver */
+	struct langwell_otg	*lotg;
+
+	/* control registers */
+	struct langwell_cap_regs	__iomem	*cap_regs;
+	struct langwell_op_regs		__iomem	*op_regs;
+
+	struct usb_ctrlrequest	local_setup_buff;
+	struct langwell_dqh	*ep_dqh;
+	size_t			ep_dqh_size;
+	dma_addr_t		ep_dqh_dma;
+
+	/* ep0 status request */
+	struct langwell_request	*status_req;
+
+	/* dma pool */
+	struct dma_pool		*dtd_pool;
+
+	/* make sure release() is done */
+	struct completion	*done;
+};
+
diff --git a/drivers/usb/gadget/pxa27x_udc.c b/drivers/usb/gadget/pxa27x_udc.c
index 8cc676e..1937d8c 100644
--- a/drivers/usb/gadget/pxa27x_udc.c
+++ b/drivers/usb/gadget/pxa27x_udc.c
@@ -38,7 +38,6 @@
 #include <linux/usb.h>
 #include <linux/usb/ch9.h>
 #include <linux/usb/gadget.h>
-#include <mach/pxa2xx-regs.h> /* FIXME: for PSSR */
 #include <mach/udc.h>
 
 #include "pxa27x_udc.h"
@@ -474,6 +473,23 @@
 }
 
 /**
+ * ep_write_UDCCSR - set bits in UDCCSR
+ * @udc: udc device
+ * @mask: bits to set in UDCCR
+ *
+ * Sets bits in UDCCSR (UDCCSR0 and UDCCSR*).
+ *
+ * A specific case is applied to ep0 : the ACM bit is always set to 1, for
+ * SET_INTERFACE and SET_CONFIGURATION.
+ */
+static inline void ep_write_UDCCSR(struct pxa_ep *ep, int mask)
+{
+	if (is_ep0(ep))
+		mask |= UDCCSR0_ACM;
+	udc_ep_writel(ep, UDCCSR, mask);
+}
+
+/**
  * ep_count_bytes_remain - get how many bytes in udc endpoint
  * @ep: udc endpoint
  *
@@ -861,7 +877,7 @@
 		*buf++ = udc_ep_readl(ep, UDCDR);
 	req->req.actual += count;
 
-	udc_ep_writel(ep, UDCCSR, UDCCSR_PC);
+	ep_write_UDCCSR(ep, UDCCSR_PC);
 
 	return count;
 }
@@ -969,12 +985,12 @@
 		if (udccsr & UDCCSR_PC) {
 			ep_vdbg(ep, "Clearing Transmit Complete, udccsr=%x\n",
 				udccsr);
-			udc_ep_writel(ep, UDCCSR, UDCCSR_PC);
+			ep_write_UDCCSR(ep, UDCCSR_PC);
 		}
 		if (udccsr & UDCCSR_TRN) {
 			ep_vdbg(ep, "Clearing Underrun on, udccsr=%x\n",
 				udccsr);
-			udc_ep_writel(ep, UDCCSR, UDCCSR_TRN);
+			ep_write_UDCCSR(ep, UDCCSR_TRN);
 		}
 
 		count = write_packet(ep, req, max);
@@ -996,7 +1012,7 @@
 		}
 
 		if (is_short)
-			udc_ep_writel(ep, UDCCSR, UDCCSR_SP);
+			ep_write_UDCCSR(ep, UDCCSR_SP);
 
 		/* requests complete when all IN data is in the FIFO */
 		if (is_last) {
@@ -1029,7 +1045,7 @@
 
 	while (epout_has_pkt(ep)) {
 		count = read_packet(ep, req);
-		udc_ep_writel(ep, UDCCSR, UDCCSR0_OPC);
+		ep_write_UDCCSR(ep, UDCCSR0_OPC);
 		inc_ep_stats_bytes(ep, count, !USB_DIR_IN);
 
 		is_short = (count < ep->fifo_size);
@@ -1074,7 +1090,7 @@
 
 	/* Sends either a short packet or a 0 length packet */
 	if (unlikely(is_short))
-		udc_ep_writel(ep, UDCCSR, UDCCSR0_IPR);
+		ep_write_UDCCSR(ep, UDCCSR0_IPR);
 
 	ep_dbg(ep, "in %d bytes%s%s, %d left, req=%p, udccsr0=0x%03x\n",
 		count, is_short ? "/S" : "", is_last ? "/L" : "",
@@ -1277,7 +1293,7 @@
 
 	/* FST, FEF bits are the same for control and non control endpoints */
 	rc = 0;
-	udc_ep_writel(ep, UDCCSR, UDCCSR_FST | UDCCSR_FEF);
+	ep_write_UDCCSR(ep, UDCCSR_FST | UDCCSR_FEF);
 	if (is_ep0(ep))
 		set_ep0state(ep->dev, STALL);
 
@@ -1343,7 +1359,7 @@
 			udc_ep_readl(ep, UDCDR);
 	} else {
 		/* most IN status is the same, but ISO can't stall */
-		udc_ep_writel(ep, UDCCSR,
+		ep_write_UDCCSR(ep,
 				UDCCSR_PC | UDCCSR_FEF | UDCCSR_TRN
 				| (EPXFERTYPE_is_ISO(ep) ? 0 : UDCCSR_SST));
 	}
@@ -1728,6 +1744,7 @@
 	memset(&udc->stats, 0, sizeof(udc->stats));
 
 	udc_set_mask_UDCCR(udc, UDCCR_UDE);
+	ep_write_UDCCSR(&udc->pxa_ep[0], UDCCSR0_ACM);
 	udelay(2);
 	if (udc_readl(udc, UDCCR) & UDCCR_EMCE)
 		dev_err(udc->dev, "Configuration errors, udc disabled\n");
@@ -1893,6 +1910,15 @@
 
 	nuke(ep, -EPROTO);
 
+	/*
+	 * In the PXA320 manual, in the section about Back-to-Back setup
+	 * packets, it describes this situation.  The solution is to set OPC to
+	 * get rid of the status packet, and then continue with the setup
+	 * packet. Generalize to pxa27x CPUs.
+	 */
+	if (epout_has_pkt(ep) && (ep_count_bytes_remain(ep) == 0))
+		ep_write_UDCCSR(ep, UDCCSR0_OPC);
+
 	/* read SETUP packet */
 	for (i = 0; i < 2; i++) {
 		if (unlikely(ep_is_empty(ep)))
@@ -1919,7 +1945,7 @@
 		set_ep0state(udc, OUT_DATA_STAGE);
 
 	/* Tell UDC to enter Data Stage */
-	udc_ep_writel(ep, UDCCSR, UDCCSR0_SA | UDCCSR0_OPC);
+	ep_write_UDCCSR(ep, UDCCSR0_SA | UDCCSR0_OPC);
 
 	i = udc->driver->setup(&udc->gadget, &u.r);
 	if (i < 0)
@@ -1929,7 +1955,7 @@
 stall:
 	ep_dbg(ep, "protocol STALL, udccsr0=%03x err %d\n",
 		udc_ep_readl(ep, UDCCSR), i);
-	udc_ep_writel(ep, UDCCSR, UDCCSR0_FST | UDCCSR0_FTF);
+	ep_write_UDCCSR(ep, UDCCSR0_FST | UDCCSR0_FTF);
 	set_ep0state(udc, STALL);
 	goto out;
 }
@@ -1966,6 +1992,8 @@
  *     cleared by software.
  *   - clearing UDCCSR0_OPC always flushes ep0. If in setup stage, never do it
  *     before reading ep0.
+ *     This is true only for PXA27x. This is not true anymore for PXA3xx family
+ *     (check Back-to-Back setup packet in developers guide).
  *   - irq can be called on a "packet complete" event (opc_irq=1), while
  *     UDCCSR0_OPC is not yet raised (delta can be as big as 100ms
  *     from experimentation).
@@ -1998,7 +2026,7 @@
 	if (udccsr0 & UDCCSR0_SST) {
 		ep_dbg(ep, "clearing stall status\n");
 		nuke(ep, -EPIPE);
-		udc_ep_writel(ep, UDCCSR, UDCCSR0_SST);
+		ep_write_UDCCSR(ep, UDCCSR0_SST);
 		ep0_idle(udc);
 	}
 
@@ -2023,7 +2051,7 @@
 		break;
 	case IN_DATA_STAGE:			/* GET_DESCRIPTOR */
 		if (epout_has_pkt(ep))
-			udc_ep_writel(ep, UDCCSR, UDCCSR0_OPC);
+			ep_write_UDCCSR(ep, UDCCSR0_OPC);
 		if (req && !ep_is_full(ep))
 			completed = write_ep0_fifo(ep, req);
 		if (completed)
@@ -2036,7 +2064,7 @@
 			ep0_end_out_req(ep, req);
 		break;
 	case STALL:
-		udc_ep_writel(ep, UDCCSR, UDCCSR0_FST);
+		ep_write_UDCCSR(ep, UDCCSR0_FST);
 		break;
 	case IN_STATUS_STAGE:
 		/*
@@ -2131,6 +2159,7 @@
 
 	set_ep0state(udc, WAIT_ACK_SET_CONF_INTERF);
 	udc->driver->setup(&udc->gadget, &req);
+	ep_write_UDCCSR(&udc->pxa_ep[0], UDCCSR0_AREN);
 }
 
 /**
@@ -2159,6 +2188,7 @@
 
 	set_ep0state(udc, WAIT_ACK_SET_CONF_INTERF);
 	udc->driver->setup(&udc->gadget, &req);
+	ep_write_UDCCSR(&udc->pxa_ep[0], UDCCSR0_AREN);
 }
 
 /*
@@ -2280,7 +2310,7 @@
 	memset(&udc->stats, 0, sizeof udc->stats);
 
 	nuke(ep, -EPROTO);
-	udc_ep_writel(ep, UDCCSR, UDCCSR0_FTF | UDCCSR0_OPC);
+	ep_write_UDCCSR(ep, UDCCSR0_FTF | UDCCSR0_OPC);
 	ep0_idle(udc);
 }
 
@@ -2479,6 +2509,12 @@
 		udc_disable(udc);
 }
 
+#ifdef CONFIG_CPU_PXA27x
+extern void pxa27x_clear_otgph(void);
+#else
+#define pxa27x_clear_otgph()   do {} while (0)
+#endif
+
 #ifdef CONFIG_PM
 /**
  * pxa_udc_suspend - Suspend udc device
@@ -2546,8 +2582,7 @@
 	 * Software must configure the USB OTG pad, UDC, and UHC
 	 * to the state they were in before entering sleep mode.
 	 */
-	if (cpu_is_pxa27x())
-		PSSR |= PSSR_OTGPH;
+	pxa27x_clear_otgph();
 
 	return 0;
 }
@@ -2571,7 +2606,7 @@
 
 static int __init udc_init(void)
 {
-	if (!cpu_is_pxa27x())
+	if (!cpu_is_pxa27x() && !cpu_is_pxa3xx())
 		return -ENODEV;
 
 	printk(KERN_INFO "%s: version %s\n", driver_name, DRIVER_VERSION);
diff --git a/drivers/usb/gadget/pxa27x_udc.h b/drivers/usb/gadget/pxa27x_udc.h
index db58125..e25225e 100644
--- a/drivers/usb/gadget/pxa27x_udc.h
+++ b/drivers/usb/gadget/pxa27x_udc.h
@@ -130,6 +130,8 @@
 #define UP2OCR_HXOE	(1 << 17)	/* Transceiver Output Enable */
 #define UP2OCR_SEOS	(1 << 24)	/* Single-Ended Output Select */
 
+#define UDCCSR0_ACM	(1 << 9)	/* Ack Control Mode */
+#define UDCCSR0_AREN	(1 << 8)	/* Ack Response Enable */
 #define UDCCSR0_SA	(1 << 7)	/* Setup Active */
 #define UDCCSR0_RNE	(1 << 6)	/* Receive FIFO Not Empty */
 #define UDCCSR0_FST	(1 << 5)	/* Force Stall */
diff --git a/drivers/usb/gadget/s3c-hsotg.c b/drivers/usb/gadget/s3c-hsotg.c
new file mode 100644
index 0000000..50c71aa
--- /dev/null
+++ b/drivers/usb/gadget/s3c-hsotg.c
@@ -0,0 +1,3269 @@
+/* linux/drivers/usb/gadget/s3c-hsotg.c
+ *
+ * Copyright 2008 Openmoko, Inc.
+ * Copyright 2008 Simtec Electronics
+ *      Ben Dooks <ben@simtec.co.uk>
+ *      http://armlinux.simtec.co.uk/
+ *
+ * S3C USB2.0 High-speed / OtG driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+
+#include <mach/map.h>
+
+#include <plat/regs-usb-hsotg-phy.h>
+#include <plat/regs-usb-hsotg.h>
+#include <plat/regs-sys.h>
+#include <plat/udc-hs.h>
+
+#define DMA_ADDR_INVALID (~((dma_addr_t)0))
+
+/* EP0_MPS_LIMIT
+ *
+ * Unfortunately there seems to be a limit of the amount of data that can
+ * be transfered by IN transactions on EP0. This is either 127 bytes or 3
+ * packets (which practially means 1 packet and 63 bytes of data) when the
+ * MPS is set to 64.
+ *
+ * This means if we are wanting to move >127 bytes of data, we need to
+ * split the transactions up, but just doing one packet at a time does
+ * not work (this may be an implicit DATA0 PID on first packet of the
+ * transaction) and doing 2 packets is outside the controller's limits.
+ *
+ * If we try to lower the MPS size for EP0, then no transfers work properly
+ * for EP0, and the system will fail basic enumeration. As no cause for this
+ * has currently been found, we cannot support any large IN transfers for
+ * EP0.
+ */
+#define EP0_MPS_LIMIT	64
+
+struct s3c_hsotg;
+struct s3c_hsotg_req;
+
+/**
+ * struct s3c_hsotg_ep - driver endpoint definition.
+ * @ep: The gadget layer representation of the endpoint.
+ * @name: The driver generated name for the endpoint.
+ * @queue: Queue of requests for this endpoint.
+ * @parent: Reference back to the parent device structure.
+ * @req: The current request that the endpoint is processing. This is
+ *       used to indicate an request has been loaded onto the endpoint
+ *       and has yet to be completed (maybe due to data move, or simply
+ *	 awaiting an ack from the core all the data has been completed).
+ * @debugfs: File entry for debugfs file for this endpoint.
+ * @lock: State lock to protect contents of endpoint.
+ * @dir_in: Set to true if this endpoint is of the IN direction, which
+ *	    means that it is sending data to the Host.
+ * @index: The index for the endpoint registers.
+ * @name: The name array passed to the USB core.
+ * @halted: Set if the endpoint has been halted.
+ * @periodic: Set if this is a periodic ep, such as Interrupt
+ * @sent_zlp: Set if we've sent a zero-length packet.
+ * @total_data: The total number of data bytes done.
+ * @fifo_size: The size of the FIFO (for periodic IN endpoints)
+ * @fifo_load: The amount of data loaded into the FIFO (periodic IN)
+ * @last_load: The offset of data for the last start of request.
+ * @size_loaded: The last loaded size for DxEPTSIZE for periodic IN
+ *
+ * This is the driver's state for each registered enpoint, allowing it
+ * to keep track of transactions that need doing. Each endpoint has a
+ * lock to protect the state, to try and avoid using an overall lock
+ * for the host controller as much as possible.
+ *
+ * For periodic IN endpoints, we have fifo_size and fifo_load to try
+ * and keep track of the amount of data in the periodic FIFO for each
+ * of these as we don't have a status register that tells us how much
+ * is in each of them.
+ */
+struct s3c_hsotg_ep {
+	struct usb_ep		ep;
+	struct list_head	queue;
+	struct s3c_hsotg	*parent;
+	struct s3c_hsotg_req	*req;
+	struct dentry		*debugfs;
+
+	spinlock_t		lock;
+
+	unsigned long		total_data;
+	unsigned int		size_loaded;
+	unsigned int		last_load;
+	unsigned int		fifo_load;
+	unsigned short		fifo_size;
+
+	unsigned char		dir_in;
+	unsigned char		index;
+
+	unsigned int		halted:1;
+	unsigned int		periodic:1;
+	unsigned int		sent_zlp:1;
+
+	char			name[10];
+};
+
+#define S3C_HSOTG_EPS	(8+1)	/* limit to 9 for the moment */
+
+/**
+ * struct s3c_hsotg - driver state.
+ * @dev: The parent device supplied to the probe function
+ * @driver: USB gadget driver
+ * @plat: The platform specific configuration data.
+ * @regs: The memory area mapped for accessing registers.
+ * @regs_res: The resource that was allocated when claiming register space.
+ * @irq: The IRQ number we are using
+ * @debug_root: root directrory for debugfs.
+ * @debug_file: main status file for debugfs.
+ * @debug_fifo: FIFO status file for debugfs.
+ * @ep0_reply: Request used for ep0 reply.
+ * @ep0_buff: Buffer for EP0 reply data, if needed.
+ * @ctrl_buff: Buffer for EP0 control requests.
+ * @ctrl_req: Request for EP0 control packets.
+ * @eps: The endpoints being supplied to the gadget framework
+ */
+struct s3c_hsotg {
+	struct device		 *dev;
+	struct usb_gadget_driver *driver;
+	struct s3c_hsotg_plat	 *plat;
+
+	void __iomem		*regs;
+	struct resource		*regs_res;
+	int			irq;
+
+	struct dentry		*debug_root;
+	struct dentry		*debug_file;
+	struct dentry		*debug_fifo;
+
+	struct usb_request	*ep0_reply;
+	struct usb_request	*ctrl_req;
+	u8			ep0_buff[8];
+	u8			ctrl_buff[8];
+
+	struct usb_gadget	gadget;
+	struct s3c_hsotg_ep	eps[];
+};
+
+/**
+ * struct s3c_hsotg_req - data transfer request
+ * @req: The USB gadget request
+ * @queue: The list of requests for the endpoint this is queued for.
+ * @in_progress: Has already had size/packets written to core
+ * @mapped: DMA buffer for this request has been mapped via dma_map_single().
+ */
+struct s3c_hsotg_req {
+	struct usb_request	req;
+	struct list_head	queue;
+	unsigned char		in_progress;
+	unsigned char		mapped;
+};
+
+/* conversion functions */
+static inline struct s3c_hsotg_req *our_req(struct usb_request *req)
+{
+	return container_of(req, struct s3c_hsotg_req, req);
+}
+
+static inline struct s3c_hsotg_ep *our_ep(struct usb_ep *ep)
+{
+	return container_of(ep, struct s3c_hsotg_ep, ep);
+}
+
+static inline struct s3c_hsotg *to_hsotg(struct usb_gadget *gadget)
+{
+	return container_of(gadget, struct s3c_hsotg, gadget);
+}
+
+static inline void __orr32(void __iomem *ptr, u32 val)
+{
+	writel(readl(ptr) | val, ptr);
+}
+
+static inline void __bic32(void __iomem *ptr, u32 val)
+{
+	writel(readl(ptr) & ~val, ptr);
+}
+
+/* forward decleration of functions */
+static void s3c_hsotg_dump(struct s3c_hsotg *hsotg);
+
+/**
+ * using_dma - return the DMA status of the driver.
+ * @hsotg: The driver state.
+ *
+ * Return true if we're using DMA.
+ *
+ * Currently, we have the DMA support code worked into everywhere
+ * that needs it, but the AMBA DMA implementation in the hardware can
+ * only DMA from 32bit aligned addresses. This means that gadgets such
+ * as the CDC Ethernet cannot work as they often pass packets which are
+ * not 32bit aligned.
+ *
+ * Unfortunately the choice to use DMA or not is global to the controller
+ * and seems to be only settable when the controller is being put through
+ * a core reset. This means we either need to fix the gadgets to take
+ * account of DMA alignment, or add bounce buffers (yuerk).
+ *
+ * Until this issue is sorted out, we always return 'false'.
+ */
+static inline bool using_dma(struct s3c_hsotg *hsotg)
+{
+	return false;	/* support is not complete */
+}
+
+/**
+ * s3c_hsotg_en_gsint - enable one or more of the general interrupt
+ * @hsotg: The device state
+ * @ints: A bitmask of the interrupts to enable
+ */
+static void s3c_hsotg_en_gsint(struct s3c_hsotg *hsotg, u32 ints)
+{
+	u32 gsintmsk = readl(hsotg->regs + S3C_GINTMSK);
+	u32 new_gsintmsk;
+
+	new_gsintmsk = gsintmsk | ints;
+
+	if (new_gsintmsk != gsintmsk) {
+		dev_dbg(hsotg->dev, "gsintmsk now 0x%08x\n", new_gsintmsk);
+		writel(new_gsintmsk, hsotg->regs + S3C_GINTMSK);
+	}
+}
+
+/**
+ * s3c_hsotg_disable_gsint - disable one or more of the general interrupt
+ * @hsotg: The device state
+ * @ints: A bitmask of the interrupts to enable
+ */
+static void s3c_hsotg_disable_gsint(struct s3c_hsotg *hsotg, u32 ints)
+{
+	u32 gsintmsk = readl(hsotg->regs + S3C_GINTMSK);
+	u32 new_gsintmsk;
+
+	new_gsintmsk = gsintmsk & ~ints;
+
+	if (new_gsintmsk != gsintmsk)
+		writel(new_gsintmsk, hsotg->regs + S3C_GINTMSK);
+}
+
+/**
+ * s3c_hsotg_ctrl_epint - enable/disable an endpoint irq
+ * @hsotg: The device state
+ * @ep: The endpoint index
+ * @dir_in: True if direction is in.
+ * @en: The enable value, true to enable
+ *
+ * Set or clear the mask for an individual endpoint's interrupt
+ * request.
+ */
+static void s3c_hsotg_ctrl_epint(struct s3c_hsotg *hsotg,
+				 unsigned int ep, unsigned int dir_in,
+				 unsigned int en)
+{
+	unsigned long flags;
+	u32 bit = 1 << ep;
+	u32 daint;
+
+	if (!dir_in)
+		bit <<= 16;
+
+	local_irq_save(flags);
+	daint = readl(hsotg->regs + S3C_DAINTMSK);
+	if (en)
+		daint |= bit;
+	else
+		daint &= ~bit;
+	writel(daint, hsotg->regs + S3C_DAINTMSK);
+	local_irq_restore(flags);
+}
+
+/**
+ * s3c_hsotg_init_fifo - initialise non-periodic FIFOs
+ * @hsotg: The device instance.
+ */
+static void s3c_hsotg_init_fifo(struct s3c_hsotg *hsotg)
+{
+	/* the ryu 2.6.24 release ahs
+	   writel(0x1C0, hsotg->regs + S3C_GRXFSIZ);
+	   writel(S3C_GNPTXFSIZ_NPTxFStAddr(0x200) |
+		S3C_GNPTXFSIZ_NPTxFDep(0x1C0),
+		hsotg->regs + S3C_GNPTXFSIZ);
+	*/
+
+	/* set FIFO sizes to 2048/0x1C0 */
+
+	writel(2048, hsotg->regs + S3C_GRXFSIZ);
+	writel(S3C_GNPTXFSIZ_NPTxFStAddr(2048) |
+	       S3C_GNPTXFSIZ_NPTxFDep(0x1C0),
+	       hsotg->regs + S3C_GNPTXFSIZ);
+}
+
+/**
+ * @ep: USB endpoint to allocate request for.
+ * @flags: Allocation flags
+ *
+ * Allocate a new USB request structure appropriate for the specified endpoint
+ */
+struct usb_request *s3c_hsotg_ep_alloc_request(struct usb_ep *ep, gfp_t flags)
+{
+	struct s3c_hsotg_req *req;
+
+	req = kzalloc(sizeof(struct s3c_hsotg_req), flags);
+	if (!req)
+		return NULL;
+
+	INIT_LIST_HEAD(&req->queue);
+
+	req->req.dma = DMA_ADDR_INVALID;
+	return &req->req;
+}
+
+/**
+ * is_ep_periodic - return true if the endpoint is in periodic mode.
+ * @hs_ep: The endpoint to query.
+ *
+ * Returns true if the endpoint is in periodic mode, meaning it is being
+ * used for an Interrupt or ISO transfer.
+ */
+static inline int is_ep_periodic(struct s3c_hsotg_ep *hs_ep)
+{
+	return hs_ep->periodic;
+}
+
+/**
+ * s3c_hsotg_unmap_dma - unmap the DMA memory being used for the request
+ * @hsotg: The device state.
+ * @hs_ep: The endpoint for the request
+ * @hs_req: The request being processed.
+ *
+ * This is the reverse of s3c_hsotg_map_dma(), called for the completion
+ * of a request to ensure the buffer is ready for access by the caller.
+*/
+static void s3c_hsotg_unmap_dma(struct s3c_hsotg *hsotg,
+				struct s3c_hsotg_ep *hs_ep,
+				struct s3c_hsotg_req *hs_req)
+{
+	struct usb_request *req = &hs_req->req;
+	enum dma_data_direction dir;
+
+	dir = hs_ep->dir_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+
+	/* ignore this if we're not moving any data */
+	if (hs_req->req.length == 0)
+		return;
+
+	if (hs_req->mapped) {
+		/* we mapped this, so unmap and remove the dma */
+
+		dma_unmap_single(hsotg->dev, req->dma, req->length, dir);
+
+		req->dma = DMA_ADDR_INVALID;
+		hs_req->mapped = 0;
+	} else {
+		dma_sync_single(hsotg->dev, req->dma, req->length, dir);
+	}
+}
+
+/**
+ * s3c_hsotg_write_fifo - write packet Data to the TxFIFO
+ * @hsotg: The controller state.
+ * @hs_ep: The endpoint we're going to write for.
+ * @hs_req: The request to write data for.
+ *
+ * This is called when the TxFIFO has some space in it to hold a new
+ * transmission and we have something to give it. The actual setup of
+ * the data size is done elsewhere, so all we have to do is to actually
+ * write the data.
+ *
+ * The return value is zero if there is more space (or nothing was done)
+ * otherwise -ENOSPC is returned if the FIFO space was used up.
+ *
+ * This routine is only needed for PIO
+*/
+static int s3c_hsotg_write_fifo(struct s3c_hsotg *hsotg,
+				struct s3c_hsotg_ep *hs_ep,
+				struct s3c_hsotg_req *hs_req)
+{
+	bool periodic = is_ep_periodic(hs_ep);
+	u32 gnptxsts = readl(hsotg->regs + S3C_GNPTXSTS);
+	int buf_pos = hs_req->req.actual;
+	int to_write = hs_ep->size_loaded;
+	void *data;
+	int can_write;
+	int pkt_round;
+
+	to_write -= (buf_pos - hs_ep->last_load);
+
+	/* if there's nothing to write, get out early */
+	if (to_write == 0)
+		return 0;
+
+	if (periodic) {
+		u32 epsize = readl(hsotg->regs + S3C_DIEPTSIZ(hs_ep->index));
+		int size_left;
+		int size_done;
+
+		/* work out how much data was loaded so we can calculate
+		 * how much data is left in the fifo. */
+
+		size_left = S3C_DxEPTSIZ_XferSize_GET(epsize);
+
+		dev_dbg(hsotg->dev, "%s: left=%d, load=%d, fifo=%d, size %d\n",
+			__func__, size_left,
+			hs_ep->size_loaded, hs_ep->fifo_load, hs_ep->fifo_size);
+
+		/* how much of the data has moved */
+		size_done = hs_ep->size_loaded - size_left;
+
+		/* how much data is left in the fifo */
+		can_write = hs_ep->fifo_load - size_done;
+		dev_dbg(hsotg->dev, "%s: => can_write1=%d\n",
+			__func__, can_write);
+
+		can_write = hs_ep->fifo_size - can_write;
+		dev_dbg(hsotg->dev, "%s: => can_write2=%d\n",
+			__func__, can_write);
+
+		if (can_write <= 0) {
+			s3c_hsotg_en_gsint(hsotg, S3C_GINTSTS_PTxFEmp);
+			return -ENOSPC;
+		}
+	} else {
+		if (S3C_GNPTXSTS_NPTxQSpcAvail_GET(gnptxsts) == 0) {
+			dev_dbg(hsotg->dev,
+				"%s: no queue slots available (0x%08x)\n",
+				__func__, gnptxsts);
+
+			s3c_hsotg_en_gsint(hsotg, S3C_GINTSTS_NPTxFEmp);
+			return -ENOSPC;
+		}
+
+		can_write = S3C_GNPTXSTS_NPTxFSpcAvail_GET(gnptxsts);
+	}
+
+	dev_dbg(hsotg->dev, "%s: GNPTXSTS=%08x, can=%d, to=%d, mps %d\n",
+		 __func__, gnptxsts, can_write, to_write, hs_ep->ep.maxpacket);
+
+	/* limit to 512 bytes of data, it seems at least on the non-periodic
+	 * FIFO, requests of >512 cause the endpoint to get stuck with a
+	 * fragment of the end of the transfer in it.
+	 */
+	if (can_write > 512)
+		can_write = 512;
+
+	/* see if we can write data */
+
+	if (to_write > can_write) {
+		to_write = can_write;
+		pkt_round = to_write % hs_ep->ep.maxpacket;
+
+		/* Not sure, but we probably shouldn't be writing partial
+		 * packets into the FIFO, so round the write down to an
+		 * exact number of packets.
+		 *
+		 * Note, we do not currently check to see if we can ever
+		 * write a full packet or not to the FIFO.
+		 */
+
+		if (pkt_round)
+			to_write -= pkt_round;
+
+		/* enable correct FIFO interrupt to alert us when there
+		 * is more room left. */
+
+		s3c_hsotg_en_gsint(hsotg,
+				   periodic ? S3C_GINTSTS_PTxFEmp :
+				   S3C_GINTSTS_NPTxFEmp);
+	}
+
+	dev_dbg(hsotg->dev, "write %d/%d, can_write %d, done %d\n",
+		 to_write, hs_req->req.length, can_write, buf_pos);
+
+	if (to_write <= 0)
+		return -ENOSPC;
+
+	hs_req->req.actual = buf_pos + to_write;
+	hs_ep->total_data += to_write;
+
+	if (periodic)
+		hs_ep->fifo_load += to_write;
+
+	to_write = DIV_ROUND_UP(to_write, 4);
+	data = hs_req->req.buf + buf_pos;
+
+	writesl(hsotg->regs + S3C_EPFIFO(hs_ep->index), data, to_write);
+
+	return (to_write >= can_write) ? -ENOSPC : 0;
+}
+
+/**
+ * get_ep_limit - get the maximum data legnth for this endpoint
+ * @hs_ep: The endpoint
+ *
+ * Return the maximum data that can be queued in one go on a given endpoint
+ * so that transfers that are too long can be split.
+ */
+static unsigned get_ep_limit(struct s3c_hsotg_ep *hs_ep)
+{
+	int index = hs_ep->index;
+	unsigned maxsize;
+	unsigned maxpkt;
+
+	if (index != 0) {
+		maxsize = S3C_DxEPTSIZ_XferSize_LIMIT + 1;
+		maxpkt = S3C_DxEPTSIZ_PktCnt_LIMIT + 1;
+	} else {
+		if (hs_ep->dir_in) {
+			/* maxsize = S3C_DIEPTSIZ0_XferSize_LIMIT + 1; */
+			maxsize = 64+64+1;
+			maxpkt = S3C_DIEPTSIZ0_PktCnt_LIMIT + 1;
+		} else {
+			maxsize = 0x3f;
+			maxpkt = 2;
+		}
+	}
+
+	/* we made the constant loading easier above by using +1 */
+	maxpkt--;
+	maxsize--;
+
+	/* constrain by packet count if maxpkts*pktsize is greater
+	 * than the length register size. */
+
+	if ((maxpkt * hs_ep->ep.maxpacket) < maxsize)
+		maxsize = maxpkt * hs_ep->ep.maxpacket;
+
+	return maxsize;
+}
+
+/**
+ * s3c_hsotg_start_req - start a USB request from an endpoint's queue
+ * @hsotg: The controller state.
+ * @hs_ep: The endpoint to process a request for
+ * @hs_req: The request to start.
+ * @continuing: True if we are doing more for the current request.
+ *
+ * Start the given request running by setting the endpoint registers
+ * appropriately, and writing any data to the FIFOs.
+ */
+static void s3c_hsotg_start_req(struct s3c_hsotg *hsotg,
+				struct s3c_hsotg_ep *hs_ep,
+				struct s3c_hsotg_req *hs_req,
+				bool continuing)
+{
+	struct usb_request *ureq = &hs_req->req;
+	int index = hs_ep->index;
+	int dir_in = hs_ep->dir_in;
+	u32 epctrl_reg;
+	u32 epsize_reg;
+	u32 epsize;
+	u32 ctrl;
+	unsigned length;
+	unsigned packets;
+	unsigned maxreq;
+
+	if (index != 0) {
+		if (hs_ep->req && !continuing) {
+			dev_err(hsotg->dev, "%s: active request\n", __func__);
+			WARN_ON(1);
+			return;
+		} else if (hs_ep->req != hs_req && continuing) {
+			dev_err(hsotg->dev,
+				"%s: continue different req\n", __func__);
+			WARN_ON(1);
+			return;
+		}
+	}
+
+	epctrl_reg = dir_in ? S3C_DIEPCTL(index) : S3C_DOEPCTL(index);
+	epsize_reg = dir_in ? S3C_DIEPTSIZ(index) : S3C_DOEPTSIZ(index);
+
+	dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x, ep %d, dir %s\n",
+		__func__, readl(hsotg->regs + epctrl_reg), index,
+		hs_ep->dir_in ? "in" : "out");
+
+	length = ureq->length - ureq->actual;
+
+	if (0)
+		dev_dbg(hsotg->dev,
+			"REQ buf %p len %d dma 0x%08x noi=%d zp=%d snok=%d\n",
+			ureq->buf, length, ureq->dma,
+			ureq->no_interrupt, ureq->zero, ureq->short_not_ok);
+
+	maxreq = get_ep_limit(hs_ep);
+	if (length > maxreq) {
+		int round = maxreq % hs_ep->ep.maxpacket;
+
+		dev_dbg(hsotg->dev, "%s: length %d, max-req %d, r %d\n",
+			__func__, length, maxreq, round);
+
+		/* round down to multiple of packets */
+		if (round)
+			maxreq -= round;
+
+		length = maxreq;
+	}
+
+	if (length)
+		packets = DIV_ROUND_UP(length, hs_ep->ep.maxpacket);
+	else
+		packets = 1;	/* send one packet if length is zero. */
+
+	if (dir_in && index != 0)
+		epsize = S3C_DxEPTSIZ_MC(1);
+	else
+		epsize = 0;
+
+	if (index != 0 && ureq->zero) {
+		/* test for the packets being exactly right for the
+		 * transfer */
+
+		if (length == (packets * hs_ep->ep.maxpacket))
+			packets++;
+	}
+
+	epsize |= S3C_DxEPTSIZ_PktCnt(packets);
+	epsize |= S3C_DxEPTSIZ_XferSize(length);
+
+	dev_dbg(hsotg->dev, "%s: %d@%d/%d, 0x%08x => 0x%08x\n",
+		__func__, packets, length, ureq->length, epsize, epsize_reg);
+
+	/* store the request as the current one we're doing */
+	hs_ep->req = hs_req;
+
+	/* write size / packets */
+	writel(epsize, hsotg->regs + epsize_reg);
+
+	ctrl = readl(hsotg->regs + epctrl_reg);
+
+	if (ctrl & S3C_DxEPCTL_Stall) {
+		dev_warn(hsotg->dev, "%s: ep%d is stalled\n", __func__, index);
+
+		/* not sure what we can do here, if it is EP0 then we should
+		 * get this cleared once the endpoint has transmitted the
+		 * STALL packet, otherwise it needs to be cleared by the
+		 * host.
+		 */
+	}
+
+	if (using_dma(hsotg)) {
+		unsigned int dma_reg;
+
+		/* write DMA address to control register, buffer already
+		 * synced by s3c_hsotg_ep_queue().  */
+
+		dma_reg = dir_in ? S3C_DIEPDMA(index) : S3C_DOEPDMA(index);
+		writel(ureq->dma, hsotg->regs + dma_reg);
+
+		dev_dbg(hsotg->dev, "%s: 0x%08x => 0x%08x\n",
+			__func__, ureq->dma, dma_reg);
+	}
+
+	ctrl |= S3C_DxEPCTL_EPEna;	/* ensure ep enabled */
+	ctrl |= S3C_DxEPCTL_USBActEp;
+	ctrl |= S3C_DxEPCTL_CNAK;	/* clear NAK set by core */
+
+	dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x\n", __func__, ctrl);
+	writel(ctrl, hsotg->regs + epctrl_reg);
+
+	/* set these, it seems that DMA support increments past the end
+	 * of the packet buffer so we need to calculate the length from
+	 * this information. */
+	hs_ep->size_loaded = length;
+	hs_ep->last_load = ureq->actual;
+
+	if (dir_in && !using_dma(hsotg)) {
+		/* set these anyway, we may need them for non-periodic in */
+		hs_ep->fifo_load = 0;
+
+		s3c_hsotg_write_fifo(hsotg, hs_ep, hs_req);
+	}
+
+	/* clear the INTknTXFEmpMsk when we start request, more as a aide
+	 * to debugging to see what is going on. */
+	if (dir_in)
+		writel(S3C_DIEPMSK_INTknTXFEmpMsk,
+		       hsotg->regs + S3C_DIEPINT(index));
+
+	/* Note, trying to clear the NAK here causes problems with transmit
+	 * on the S3C6400 ending up with the TXFIFO becomming full. */
+
+	/* check ep is enabled */
+	if (!(readl(hsotg->regs + epctrl_reg) & S3C_DxEPCTL_EPEna))
+		dev_warn(hsotg->dev,
+			 "ep%d: failed to become enabled (DxEPCTL=0x%08x)?\n",
+			 index, readl(hsotg->regs + epctrl_reg));
+
+	dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x\n",
+		__func__, readl(hsotg->regs + epctrl_reg));
+}
+
+/**
+ * s3c_hsotg_map_dma - map the DMA memory being used for the request
+ * @hsotg: The device state.
+ * @hs_ep: The endpoint the request is on.
+ * @req: The request being processed.
+ *
+ * We've been asked to queue a request, so ensure that the memory buffer
+ * is correctly setup for DMA. If we've been passed an extant DMA address
+ * then ensure the buffer has been synced to memory. If our buffer has no
+ * DMA memory, then we map the memory and mark our request to allow us to
+ * cleanup on completion.
+*/
+static int s3c_hsotg_map_dma(struct s3c_hsotg *hsotg,
+			     struct s3c_hsotg_ep *hs_ep,
+			     struct usb_request *req)
+{
+	enum dma_data_direction dir;
+	struct s3c_hsotg_req *hs_req = our_req(req);
+
+	dir = hs_ep->dir_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+
+	/* if the length is zero, ignore the DMA data */
+	if (hs_req->req.length == 0)
+		return 0;
+
+	if (req->dma == DMA_ADDR_INVALID) {
+		dma_addr_t dma;
+
+		dma = dma_map_single(hsotg->dev, req->buf, req->length, dir);
+
+		if (unlikely(dma_mapping_error(hsotg->dev, dma)))
+			goto dma_error;
+
+		if (dma & 3) {
+			dev_err(hsotg->dev, "%s: unaligned dma buffer\n",
+				__func__);
+
+			dma_unmap_single(hsotg->dev, dma, req->length, dir);
+			return -EINVAL;
+		}
+
+		hs_req->mapped = 1;
+		req->dma = dma;
+	} else {
+		dma_sync_single(hsotg->dev, req->dma, req->length, dir);
+		hs_req->mapped = 0;
+	}
+
+	return 0;
+
+dma_error:
+	dev_err(hsotg->dev, "%s: failed to map buffer %p, %d bytes\n",
+		__func__, req->buf, req->length);
+
+	return -EIO;
+}
+
+static int s3c_hsotg_ep_queue(struct usb_ep *ep, struct usb_request *req,
+			      gfp_t gfp_flags)
+{
+	struct s3c_hsotg_req *hs_req = our_req(req);
+	struct s3c_hsotg_ep *hs_ep = our_ep(ep);
+	struct s3c_hsotg *hs = hs_ep->parent;
+	unsigned long irqflags;
+	bool first;
+
+	dev_dbg(hs->dev, "%s: req %p: %d@%p, noi=%d, zero=%d, snok=%d\n",
+		ep->name, req, req->length, req->buf, req->no_interrupt,
+		req->zero, req->short_not_ok);
+
+	/* initialise status of the request */
+	INIT_LIST_HEAD(&hs_req->queue);
+	req->actual = 0;
+	req->status = -EINPROGRESS;
+
+	/* if we're using DMA, sync the buffers as necessary */
+	if (using_dma(hs)) {
+		int ret = s3c_hsotg_map_dma(hs, hs_ep, req);
+		if (ret)
+			return ret;
+	}
+
+	spin_lock_irqsave(&hs_ep->lock, irqflags);
+
+	first = list_empty(&hs_ep->queue);
+	list_add_tail(&hs_req->queue, &hs_ep->queue);
+
+	if (first)
+		s3c_hsotg_start_req(hs, hs_ep, hs_req, false);
+
+	spin_unlock_irqrestore(&hs_ep->lock, irqflags);
+
+	return 0;
+}
+
+static void s3c_hsotg_ep_free_request(struct usb_ep *ep,
+				      struct usb_request *req)
+{
+	struct s3c_hsotg_req *hs_req = our_req(req);
+
+	kfree(hs_req);
+}
+
+/**
+ * s3c_hsotg_complete_oursetup - setup completion callback
+ * @ep: The endpoint the request was on.
+ * @req: The request completed.
+ *
+ * Called on completion of any requests the driver itself
+ * submitted that need cleaning up.
+ */
+static void s3c_hsotg_complete_oursetup(struct usb_ep *ep,
+					struct usb_request *req)
+{
+	struct s3c_hsotg_ep *hs_ep = our_ep(ep);
+	struct s3c_hsotg *hsotg = hs_ep->parent;
+
+	dev_dbg(hsotg->dev, "%s: ep %p, req %p\n", __func__, ep, req);
+
+	s3c_hsotg_ep_free_request(ep, req);
+}
+
+/**
+ * ep_from_windex - convert control wIndex value to endpoint
+ * @hsotg: The driver state.
+ * @windex: The control request wIndex field (in host order).
+ *
+ * Convert the given wIndex into a pointer to an driver endpoint
+ * structure, or return NULL if it is not a valid endpoint.
+*/
+static struct s3c_hsotg_ep *ep_from_windex(struct s3c_hsotg *hsotg,
+					   u32 windex)
+{
+	struct s3c_hsotg_ep *ep = &hsotg->eps[windex & 0x7F];
+	int dir = (windex & USB_DIR_IN) ? 1 : 0;
+	int idx = windex & 0x7F;
+
+	if (windex >= 0x100)
+		return NULL;
+
+	if (idx > S3C_HSOTG_EPS)
+		return NULL;
+
+	if (idx && ep->dir_in != dir)
+		return NULL;
+
+	return ep;
+}
+
+/**
+ * s3c_hsotg_send_reply - send reply to control request
+ * @hsotg: The device state
+ * @ep: Endpoint 0
+ * @buff: Buffer for request
+ * @length: Length of reply.
+ *
+ * Create a request and queue it on the given endpoint. This is useful as
+ * an internal method of sending replies to certain control requests, etc.
+ */
+static int s3c_hsotg_send_reply(struct s3c_hsotg *hsotg,
+				struct s3c_hsotg_ep *ep,
+				void *buff,
+				int length)
+{
+	struct usb_request *req;
+	int ret;
+
+	dev_dbg(hsotg->dev, "%s: buff %p, len %d\n", __func__, buff, length);
+
+	req = s3c_hsotg_ep_alloc_request(&ep->ep, GFP_ATOMIC);
+	hsotg->ep0_reply = req;
+	if (!req) {
+		dev_warn(hsotg->dev, "%s: cannot alloc req\n", __func__);
+		return -ENOMEM;
+	}
+
+	req->buf = hsotg->ep0_buff;
+	req->length = length;
+	req->zero = 1; /* always do zero-length final transfer */
+	req->complete = s3c_hsotg_complete_oursetup;
+
+	if (length)
+		memcpy(req->buf, buff, length);
+	else
+		ep->sent_zlp = 1;
+
+	ret = s3c_hsotg_ep_queue(&ep->ep, req, GFP_ATOMIC);
+	if (ret) {
+		dev_warn(hsotg->dev, "%s: cannot queue req\n", __func__);
+		return ret;
+	}
+
+	return 0;
+}
+
+/**
+ * s3c_hsotg_process_req_status - process request GET_STATUS
+ * @hsotg: The device state
+ * @ctrl: USB control request
+ */
+static int s3c_hsotg_process_req_status(struct s3c_hsotg *hsotg,
+					struct usb_ctrlrequest *ctrl)
+{
+	struct s3c_hsotg_ep *ep0 = &hsotg->eps[0];
+	struct s3c_hsotg_ep *ep;
+	__le16 reply;
+	int ret;
+
+	dev_dbg(hsotg->dev, "%s: USB_REQ_GET_STATUS\n", __func__);
+
+	if (!ep0->dir_in) {
+		dev_warn(hsotg->dev, "%s: direction out?\n", __func__);
+		return -EINVAL;
+	}
+
+	switch (ctrl->bRequestType & USB_RECIP_MASK) {
+	case USB_RECIP_DEVICE:
+		reply = cpu_to_le16(0); /* bit 0 => self powered,
+					 * bit 1 => remote wakeup */
+		break;
+
+	case USB_RECIP_INTERFACE:
+		/* currently, the data result should be zero */
+		reply = cpu_to_le16(0);
+		break;
+
+	case USB_RECIP_ENDPOINT:
+		ep = ep_from_windex(hsotg, le16_to_cpu(ctrl->wIndex));
+		if (!ep)
+			return -ENOENT;
+
+		reply = cpu_to_le16(ep->halted ? 1 : 0);
+		break;
+
+	default:
+		return 0;
+	}
+
+	if (le16_to_cpu(ctrl->wLength) != 2)
+		return -EINVAL;
+
+	ret = s3c_hsotg_send_reply(hsotg, ep0, &reply, 2);
+	if (ret) {
+		dev_err(hsotg->dev, "%s: failed to send reply\n", __func__);
+		return ret;
+	}
+
+	return 1;
+}
+
+static int s3c_hsotg_ep_sethalt(struct usb_ep *ep, int value);
+
+/**
+ * s3c_hsotg_process_req_featire - process request {SET,CLEAR}_FEATURE
+ * @hsotg: The device state
+ * @ctrl: USB control request
+ */
+static int s3c_hsotg_process_req_feature(struct s3c_hsotg *hsotg,
+					 struct usb_ctrlrequest *ctrl)
+{
+	bool set = (ctrl->bRequest == USB_REQ_SET_FEATURE);
+	struct s3c_hsotg_ep *ep;
+
+	dev_dbg(hsotg->dev, "%s: %s_FEATURE\n",
+		__func__, set ? "SET" : "CLEAR");
+
+	if (ctrl->bRequestType == USB_RECIP_ENDPOINT) {
+		ep = ep_from_windex(hsotg, le16_to_cpu(ctrl->wIndex));
+		if (!ep) {
+			dev_dbg(hsotg->dev, "%s: no endpoint for 0x%04x\n",
+				__func__, le16_to_cpu(ctrl->wIndex));
+			return -ENOENT;
+		}
+
+		switch (le16_to_cpu(ctrl->wValue)) {
+		case USB_ENDPOINT_HALT:
+			s3c_hsotg_ep_sethalt(&ep->ep, set);
+			break;
+
+		default:
+			return -ENOENT;
+		}
+	} else
+		return -ENOENT;  /* currently only deal with endpoint */
+
+	return 1;
+}
+
+/**
+ * s3c_hsotg_process_control - process a control request
+ * @hsotg: The device state
+ * @ctrl: The control request received
+ *
+ * The controller has received the SETUP phase of a control request, and
+ * needs to work out what to do next (and whether to pass it on to the
+ * gadget driver).
+ */
+static void s3c_hsotg_process_control(struct s3c_hsotg *hsotg,
+				      struct usb_ctrlrequest *ctrl)
+{
+	struct s3c_hsotg_ep *ep0 = &hsotg->eps[0];
+	int ret = 0;
+	u32 dcfg;
+
+	ep0->sent_zlp = 0;
+
+	dev_dbg(hsotg->dev, "ctrl Req=%02x, Type=%02x, V=%04x, L=%04x\n",
+		 ctrl->bRequest, ctrl->bRequestType,
+		 ctrl->wValue, ctrl->wLength);
+
+	/* record the direction of the request, for later use when enquing
+	 * packets onto EP0. */
+
+	ep0->dir_in = (ctrl->bRequestType & USB_DIR_IN) ? 1 : 0;
+	dev_dbg(hsotg->dev, "ctrl: dir_in=%d\n", ep0->dir_in);
+
+	/* if we've no data with this request, then the last part of the
+	 * transaction is going to implicitly be IN. */
+	if (ctrl->wLength == 0)
+		ep0->dir_in = 1;
+
+	if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
+		switch (ctrl->bRequest) {
+		case USB_REQ_SET_ADDRESS:
+			dcfg = readl(hsotg->regs + S3C_DCFG);
+			dcfg &= ~S3C_DCFG_DevAddr_MASK;
+			dcfg |= ctrl->wValue << S3C_DCFG_DevAddr_SHIFT;
+			writel(dcfg, hsotg->regs + S3C_DCFG);
+
+			dev_info(hsotg->dev, "new address %d\n", ctrl->wValue);
+
+			ret = s3c_hsotg_send_reply(hsotg, ep0, NULL, 0);
+			return;
+
+		case USB_REQ_GET_STATUS:
+			ret = s3c_hsotg_process_req_status(hsotg, ctrl);
+			break;
+
+		case USB_REQ_CLEAR_FEATURE:
+		case USB_REQ_SET_FEATURE:
+			ret = s3c_hsotg_process_req_feature(hsotg, ctrl);
+			break;
+		}
+	}
+
+	/* as a fallback, try delivering it to the driver to deal with */
+
+	if (ret == 0 && hsotg->driver) {
+		ret = hsotg->driver->setup(&hsotg->gadget, ctrl);
+		if (ret < 0)
+			dev_dbg(hsotg->dev, "driver->setup() ret %d\n", ret);
+	}
+
+	if (ret > 0) {
+		if (!ep0->dir_in) {
+			/* need to generate zlp in reply or take data */
+			/* todo - deal with any data we might be sent? */
+			ret = s3c_hsotg_send_reply(hsotg, ep0, NULL, 0);
+		}
+	}
+
+	/* the request is either unhandlable, or is not formatted correctly
+	 * so respond with a STALL for the status stage to indicate failure.
+	 */
+
+	if (ret < 0) {
+		u32 reg;
+		u32 ctrl;
+
+		dev_dbg(hsotg->dev, "ep0 stall (dir=%d)\n", ep0->dir_in);
+		reg = (ep0->dir_in) ? S3C_DIEPCTL0 : S3C_DOEPCTL0;
+
+		/* S3C_DxEPCTL_Stall will be cleared by EP once it has
+		 * taken effect, so no need to clear later. */
+
+		ctrl = readl(hsotg->regs + reg);
+		ctrl |= S3C_DxEPCTL_Stall;
+		ctrl |= S3C_DxEPCTL_CNAK;
+		writel(ctrl, hsotg->regs + reg);
+
+		dev_dbg(hsotg->dev,
+			"writen DxEPCTL=0x%08x to %08x (DxEPCTL=0x%08x)\n",
+			ctrl, reg, readl(hsotg->regs + reg));
+
+		/* don't belive we need to anything more to get the EP
+		 * to reply with a STALL packet */
+	}
+}
+
+static void s3c_hsotg_enqueue_setup(struct s3c_hsotg *hsotg);
+
+/**
+ * s3c_hsotg_complete_setup - completion of a setup transfer
+ * @ep: The endpoint the request was on.
+ * @req: The request completed.
+ *
+ * Called on completion of any requests the driver itself submitted for
+ * EP0 setup packets
+ */
+static void s3c_hsotg_complete_setup(struct usb_ep *ep,
+				     struct usb_request *req)
+{
+	struct s3c_hsotg_ep *hs_ep = our_ep(ep);
+	struct s3c_hsotg *hsotg = hs_ep->parent;
+
+	if (req->status < 0) {
+		dev_dbg(hsotg->dev, "%s: failed %d\n", __func__, req->status);
+		return;
+	}
+
+	if (req->actual == 0)
+		s3c_hsotg_enqueue_setup(hsotg);
+	else
+		s3c_hsotg_process_control(hsotg, req->buf);
+}
+
+/**
+ * s3c_hsotg_enqueue_setup - start a request for EP0 packets
+ * @hsotg: The device state.
+ *
+ * Enqueue a request on EP0 if necessary to received any SETUP packets
+ * received from the host.
+ */
+static void s3c_hsotg_enqueue_setup(struct s3c_hsotg *hsotg)
+{
+	struct usb_request *req = hsotg->ctrl_req;
+	struct s3c_hsotg_req *hs_req = our_req(req);
+	int ret;
+
+	dev_dbg(hsotg->dev, "%s: queueing setup request\n", __func__);
+
+	req->zero = 0;
+	req->length = 8;
+	req->buf = hsotg->ctrl_buff;
+	req->complete = s3c_hsotg_complete_setup;
+
+	if (!list_empty(&hs_req->queue)) {
+		dev_dbg(hsotg->dev, "%s already queued???\n", __func__);
+		return;
+	}
+
+	hsotg->eps[0].dir_in = 0;
+
+	ret = s3c_hsotg_ep_queue(&hsotg->eps[0].ep, req, GFP_ATOMIC);
+	if (ret < 0) {
+		dev_err(hsotg->dev, "%s: failed queue (%d)\n", __func__, ret);
+		/* Don't think there's much we can do other than watch the
+		 * driver fail. */
+	}
+}
+
+/**
+ * get_ep_head - return the first request on the endpoint
+ * @hs_ep: The controller endpoint to get
+ *
+ * Get the first request on the endpoint.
+*/
+static struct s3c_hsotg_req *get_ep_head(struct s3c_hsotg_ep *hs_ep)
+{
+	if (list_empty(&hs_ep->queue))
+		return NULL;
+
+	return list_first_entry(&hs_ep->queue, struct s3c_hsotg_req, queue);
+}
+
+/**
+ * s3c_hsotg_complete_request - complete a request given to us
+ * @hsotg: The device state.
+ * @hs_ep: The endpoint the request was on.
+ * @hs_req: The request to complete.
+ * @result: The result code (0 => Ok, otherwise errno)
+ *
+ * The given request has finished, so call the necessary completion
+ * if it has one and then look to see if we can start a new request
+ * on the endpoint.
+ *
+ * Note, expects the ep to already be locked as appropriate.
+*/
+static void s3c_hsotg_complete_request(struct s3c_hsotg *hsotg,
+				       struct s3c_hsotg_ep *hs_ep,
+				       struct s3c_hsotg_req *hs_req,
+				       int result)
+{
+	bool restart;
+
+	if (!hs_req) {
+		dev_dbg(hsotg->dev, "%s: nothing to complete?\n", __func__);
+		return;
+	}
+
+	dev_dbg(hsotg->dev, "complete: ep %p %s, req %p, %d => %p\n",
+		hs_ep, hs_ep->ep.name, hs_req, result, hs_req->req.complete);
+
+	/* only replace the status if we've not already set an error
+	 * from a previous transaction */
+
+	if (hs_req->req.status == -EINPROGRESS)
+		hs_req->req.status = result;
+
+	hs_ep->req = NULL;
+	list_del_init(&hs_req->queue);
+
+	if (using_dma(hsotg))
+		s3c_hsotg_unmap_dma(hsotg, hs_ep, hs_req);
+
+	/* call the complete request with the locks off, just in case the
+	 * request tries to queue more work for this endpoint. */
+
+	if (hs_req->req.complete) {
+		spin_unlock(&hs_ep->lock);
+		hs_req->req.complete(&hs_ep->ep, &hs_req->req);
+		spin_lock(&hs_ep->lock);
+	}
+
+	/* Look to see if there is anything else to do. Note, the completion
+	 * of the previous request may have caused a new request to be started
+	 * so be careful when doing this. */
+
+	if (!hs_ep->req && result >= 0) {
+		restart = !list_empty(&hs_ep->queue);
+		if (restart) {
+			hs_req = get_ep_head(hs_ep);
+			s3c_hsotg_start_req(hsotg, hs_ep, hs_req, false);
+		}
+	}
+}
+
+/**
+ * s3c_hsotg_complete_request_lock - complete a request given to us (locked)
+ * @hsotg: The device state.
+ * @hs_ep: The endpoint the request was on.
+ * @hs_req: The request to complete.
+ * @result: The result code (0 => Ok, otherwise errno)
+ *
+ * See s3c_hsotg_complete_request(), but called with the endpoint's
+ * lock held.
+*/
+static void s3c_hsotg_complete_request_lock(struct s3c_hsotg *hsotg,
+					    struct s3c_hsotg_ep *hs_ep,
+					    struct s3c_hsotg_req *hs_req,
+					    int result)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&hs_ep->lock, flags);
+	s3c_hsotg_complete_request(hsotg, hs_ep, hs_req, result);
+	spin_unlock_irqrestore(&hs_ep->lock, flags);
+}
+
+/**
+ * s3c_hsotg_rx_data - receive data from the FIFO for an endpoint
+ * @hsotg: The device state.
+ * @ep_idx: The endpoint index for the data
+ * @size: The size of data in the fifo, in bytes
+ *
+ * The FIFO status shows there is data to read from the FIFO for a given
+ * endpoint, so sort out whether we need to read the data into a request
+ * that has been made for that endpoint.
+ */
+static void s3c_hsotg_rx_data(struct s3c_hsotg *hsotg, int ep_idx, int size)
+{
+	struct s3c_hsotg_ep *hs_ep = &hsotg->eps[ep_idx];
+	struct s3c_hsotg_req *hs_req = hs_ep->req;
+	void __iomem *fifo = hsotg->regs + S3C_EPFIFO(ep_idx);
+	int to_read;
+	int max_req;
+	int read_ptr;
+
+	if (!hs_req) {
+		u32 epctl = readl(hsotg->regs + S3C_DOEPCTL(ep_idx));
+		int ptr;
+
+		dev_warn(hsotg->dev,
+			 "%s: FIFO %d bytes on ep%d but no req (DxEPCTl=0x%08x)\n",
+			 __func__, size, ep_idx, epctl);
+
+		/* dump the data from the FIFO, we've nothing we can do */
+		for (ptr = 0; ptr < size; ptr += 4)
+			(void)readl(fifo);
+
+		return;
+	}
+
+	spin_lock(&hs_ep->lock);
+
+	to_read = size;
+	read_ptr = hs_req->req.actual;
+	max_req = hs_req->req.length - read_ptr;
+
+	if (to_read > max_req) {
+		/* more data appeared than we where willing
+		 * to deal with in this request.
+		 */
+
+		/* currently we don't deal this */
+		WARN_ON_ONCE(1);
+	}
+
+	dev_dbg(hsotg->dev, "%s: read %d/%d, done %d/%d\n",
+		__func__, to_read, max_req, read_ptr, hs_req->req.length);
+
+	hs_ep->total_data += to_read;
+	hs_req->req.actual += to_read;
+	to_read = DIV_ROUND_UP(to_read, 4);
+
+	/* note, we might over-write the buffer end by 3 bytes depending on
+	 * alignment of the data. */
+	readsl(fifo, hs_req->req.buf + read_ptr, to_read);
+
+	spin_unlock(&hs_ep->lock);
+}
+
+/**
+ * s3c_hsotg_send_zlp - send zero-length packet on control endpoint
+ * @hsotg: The device instance
+ * @req: The request currently on this endpoint
+ *
+ * Generate a zero-length IN packet request for terminating a SETUP
+ * transaction.
+ *
+ * Note, since we don't write any data to the TxFIFO, then it is
+ * currently belived that we do not need to wait for any space in
+ * the TxFIFO.
+ */
+static void s3c_hsotg_send_zlp(struct s3c_hsotg *hsotg,
+			       struct s3c_hsotg_req *req)
+{
+	u32 ctrl;
+
+	if (!req) {
+		dev_warn(hsotg->dev, "%s: no request?\n", __func__);
+		return;
+	}
+
+	if (req->req.length == 0) {
+		hsotg->eps[0].sent_zlp = 1;
+		s3c_hsotg_enqueue_setup(hsotg);
+		return;
+	}
+
+	hsotg->eps[0].dir_in = 1;
+	hsotg->eps[0].sent_zlp = 1;
+
+	dev_dbg(hsotg->dev, "sending zero-length packet\n");
+
+	/* issue a zero-sized packet to terminate this */
+	writel(S3C_DxEPTSIZ_MC(1) | S3C_DxEPTSIZ_PktCnt(1) |
+	       S3C_DxEPTSIZ_XferSize(0), hsotg->regs + S3C_DIEPTSIZ(0));
+
+	ctrl = readl(hsotg->regs + S3C_DIEPCTL0);
+	ctrl |= S3C_DxEPCTL_CNAK;  /* clear NAK set by core */
+	ctrl |= S3C_DxEPCTL_EPEna; /* ensure ep enabled */
+	ctrl |= S3C_DxEPCTL_USBActEp;
+	writel(ctrl, hsotg->regs + S3C_DIEPCTL0);
+}
+
+/**
+ * s3c_hsotg_handle_outdone - handle receiving OutDone/SetupDone from RXFIFO
+ * @hsotg: The device instance
+ * @epnum: The endpoint received from
+ * @was_setup: Set if processing a SetupDone event.
+ *
+ * The RXFIFO has delivered an OutDone event, which means that the data
+ * transfer for an OUT endpoint has been completed, either by a short
+ * packet or by the finish of a transfer.
+*/
+static void s3c_hsotg_handle_outdone(struct s3c_hsotg *hsotg,
+				     int epnum, bool was_setup)
+{
+	struct s3c_hsotg_ep *hs_ep = &hsotg->eps[epnum];
+	struct s3c_hsotg_req *hs_req = hs_ep->req;
+	struct usb_request *req = &hs_req->req;
+	int result = 0;
+
+	if (!hs_req) {
+		dev_dbg(hsotg->dev, "%s: no request active\n", __func__);
+		return;
+	}
+
+	if (using_dma(hsotg)) {
+		u32 epsize = readl(hsotg->regs + S3C_DOEPTSIZ(epnum));
+		unsigned size_done;
+		unsigned size_left;
+
+		/* Calculate the size of the transfer by checking how much
+		 * is left in the endpoint size register and then working it
+		 * out from the amount we loaded for the transfer.
+		 *
+		 * We need to do this as DMA pointers are always 32bit aligned
+		 * so may overshoot/undershoot the transfer.
+		 */
+
+		size_left = S3C_DxEPTSIZ_XferSize_GET(epsize);
+
+		size_done = hs_ep->size_loaded - size_left;
+		size_done += hs_ep->last_load;
+
+		req->actual = size_done;
+	}
+
+	if (req->actual < req->length && req->short_not_ok) {
+		dev_dbg(hsotg->dev, "%s: got %d/%d (short not ok) => error\n",
+			__func__, req->actual, req->length);
+
+		/* todo - what should we return here? there's no one else
+		 * even bothering to check the status. */
+	}
+
+	if (epnum == 0) {
+		if (!was_setup && req->complete != s3c_hsotg_complete_setup)
+			s3c_hsotg_send_zlp(hsotg, hs_req);
+	}
+
+	s3c_hsotg_complete_request_lock(hsotg, hs_ep, hs_req, result);
+}
+
+/**
+ * s3c_hsotg_read_frameno - read current frame number
+ * @hsotg: The device instance
+ *
+ * Return the current frame number
+*/
+static u32 s3c_hsotg_read_frameno(struct s3c_hsotg *hsotg)
+{
+	u32 dsts;
+
+	dsts = readl(hsotg->regs + S3C_DSTS);
+	dsts &= S3C_DSTS_SOFFN_MASK;
+	dsts >>= S3C_DSTS_SOFFN_SHIFT;
+
+	return dsts;
+}
+
+/**
+ * s3c_hsotg_handle_rx - RX FIFO has data
+ * @hsotg: The device instance
+ *
+ * The IRQ handler has detected that the RX FIFO has some data in it
+ * that requires processing, so find out what is in there and do the
+ * appropriate read.
+ *
+ * The RXFIFO is a true FIFO, the packets comming out are still in packet
+ * chunks, so if you have x packets received on an endpoint you'll get x
+ * FIFO events delivered, each with a packet's worth of data in it.
+ *
+ * When using DMA, we should not be processing events from the RXFIFO
+ * as the actual data should be sent to the memory directly and we turn
+ * on the completion interrupts to get notifications of transfer completion.
+ */
+void s3c_hsotg_handle_rx(struct s3c_hsotg *hsotg)
+{
+	u32 grxstsr = readl(hsotg->regs + S3C_GRXSTSP);
+	u32 epnum, status, size;
+
+	WARN_ON(using_dma(hsotg));
+
+	epnum = grxstsr & S3C_GRXSTS_EPNum_MASK;
+	status = grxstsr & S3C_GRXSTS_PktSts_MASK;
+
+	size = grxstsr & S3C_GRXSTS_ByteCnt_MASK;
+	size >>= S3C_GRXSTS_ByteCnt_SHIFT;
+
+	if (1)
+		dev_dbg(hsotg->dev, "%s: GRXSTSP=0x%08x (%d@%d)\n",
+			__func__, grxstsr, size, epnum);
+
+#define __status(x) ((x) >> S3C_GRXSTS_PktSts_SHIFT)
+
+	switch (status >> S3C_GRXSTS_PktSts_SHIFT) {
+	case __status(S3C_GRXSTS_PktSts_GlobalOutNAK):
+		dev_dbg(hsotg->dev, "GlobalOutNAK\n");
+		break;
+
+	case __status(S3C_GRXSTS_PktSts_OutDone):
+		dev_dbg(hsotg->dev, "OutDone (Frame=0x%08x)\n",
+			s3c_hsotg_read_frameno(hsotg));
+
+		if (!using_dma(hsotg))
+			s3c_hsotg_handle_outdone(hsotg, epnum, false);
+		break;
+
+	case __status(S3C_GRXSTS_PktSts_SetupDone):
+		dev_dbg(hsotg->dev,
+			"SetupDone (Frame=0x%08x, DOPEPCTL=0x%08x)\n",
+			s3c_hsotg_read_frameno(hsotg),
+			readl(hsotg->regs + S3C_DOEPCTL(0)));
+
+		s3c_hsotg_handle_outdone(hsotg, epnum, true);
+		break;
+
+	case __status(S3C_GRXSTS_PktSts_OutRX):
+		s3c_hsotg_rx_data(hsotg, epnum, size);
+		break;
+
+	case __status(S3C_GRXSTS_PktSts_SetupRX):
+		dev_dbg(hsotg->dev,
+			"SetupRX (Frame=0x%08x, DOPEPCTL=0x%08x)\n",
+			s3c_hsotg_read_frameno(hsotg),
+			readl(hsotg->regs + S3C_DOEPCTL(0)));
+
+		s3c_hsotg_rx_data(hsotg, epnum, size);
+		break;
+
+	default:
+		dev_warn(hsotg->dev, "%s: unknown status %08x\n",
+			 __func__, grxstsr);
+
+		s3c_hsotg_dump(hsotg);
+		break;
+	}
+}
+
+/**
+ * s3c_hsotg_ep0_mps - turn max packet size into register setting
+ * @mps: The maximum packet size in bytes.
+*/
+static u32 s3c_hsotg_ep0_mps(unsigned int mps)
+{
+	switch (mps) {
+	case 64:
+		return S3C_D0EPCTL_MPS_64;
+	case 32:
+		return S3C_D0EPCTL_MPS_32;
+	case 16:
+		return S3C_D0EPCTL_MPS_16;
+	case 8:
+		return S3C_D0EPCTL_MPS_8;
+	}
+
+	/* bad max packet size, warn and return invalid result */
+	WARN_ON(1);
+	return (u32)-1;
+}
+
+/**
+ * s3c_hsotg_set_ep_maxpacket - set endpoint's max-packet field
+ * @hsotg: The driver state.
+ * @ep: The index number of the endpoint
+ * @mps: The maximum packet size in bytes
+ *
+ * Configure the maximum packet size for the given endpoint, updating
+ * the hardware control registers to reflect this.
+ */
+static void s3c_hsotg_set_ep_maxpacket(struct s3c_hsotg *hsotg,
+				       unsigned int ep, unsigned int mps)
+{
+	struct s3c_hsotg_ep *hs_ep = &hsotg->eps[ep];
+	void __iomem *regs = hsotg->regs;
+	u32 mpsval;
+	u32 reg;
+
+	if (ep == 0) {
+		/* EP0 is a special case */
+		mpsval = s3c_hsotg_ep0_mps(mps);
+		if (mpsval > 3)
+			goto bad_mps;
+	} else {
+		if (mps >= S3C_DxEPCTL_MPS_LIMIT+1)
+			goto bad_mps;
+
+		mpsval = mps;
+	}
+
+	hs_ep->ep.maxpacket = mps;
+
+	/* update both the in and out endpoint controldir_ registers, even
+	 * if one of the directions may not be in use. */
+
+	reg = readl(regs + S3C_DIEPCTL(ep));
+	reg &= ~S3C_DxEPCTL_MPS_MASK;
+	reg |= mpsval;
+	writel(reg, regs + S3C_DIEPCTL(ep));
+
+	reg = readl(regs + S3C_DOEPCTL(ep));
+	reg &= ~S3C_DxEPCTL_MPS_MASK;
+	reg |= mpsval;
+	writel(reg, regs + S3C_DOEPCTL(ep));
+
+	return;
+
+bad_mps:
+	dev_err(hsotg->dev, "ep%d: bad mps of %d\n", ep, mps);
+}
+
+
+/**
+ * s3c_hsotg_trytx - check to see if anything needs transmitting
+ * @hsotg: The driver state
+ * @hs_ep: The driver endpoint to check.
+ *
+ * Check to see if there is a request that has data to send, and if so
+ * make an attempt to write data into the FIFO.
+ */
+static int s3c_hsotg_trytx(struct s3c_hsotg *hsotg,
+			   struct s3c_hsotg_ep *hs_ep)
+{
+	struct s3c_hsotg_req *hs_req = hs_ep->req;
+
+	if (!hs_ep->dir_in || !hs_req)
+		return 0;
+
+	if (hs_req->req.actual < hs_req->req.length) {
+		dev_dbg(hsotg->dev, "trying to write more for ep%d\n",
+			hs_ep->index);
+		return s3c_hsotg_write_fifo(hsotg, hs_ep, hs_req);
+	}
+
+	return 0;
+}
+
+/**
+ * s3c_hsotg_complete_in - complete IN transfer
+ * @hsotg: The device state.
+ * @hs_ep: The endpoint that has just completed.
+ *
+ * An IN transfer has been completed, update the transfer's state and then
+ * call the relevant completion routines.
+ */
+static void s3c_hsotg_complete_in(struct s3c_hsotg *hsotg,
+				  struct s3c_hsotg_ep *hs_ep)
+{
+	struct s3c_hsotg_req *hs_req = hs_ep->req;
+	u32 epsize = readl(hsotg->regs + S3C_DIEPTSIZ(hs_ep->index));
+	int size_left, size_done;
+
+	if (!hs_req) {
+		dev_dbg(hsotg->dev, "XferCompl but no req\n");
+		return;
+	}
+
+	/* Calculate the size of the transfer by checking how much is left
+	 * in the endpoint size register and then working it out from
+	 * the amount we loaded for the transfer.
+	 *
+	 * We do this even for DMA, as the transfer may have incremented
+	 * past the end of the buffer (DMA transfers are always 32bit
+	 * aligned).
+	 */
+
+	size_left = S3C_DxEPTSIZ_XferSize_GET(epsize);
+
+	size_done = hs_ep->size_loaded - size_left;
+	size_done += hs_ep->last_load;
+
+	if (hs_req->req.actual != size_done)
+		dev_dbg(hsotg->dev, "%s: adjusting size done %d => %d\n",
+			__func__, hs_req->req.actual, size_done);
+
+	hs_req->req.actual = size_done;
+
+	/* if we did all of the transfer, and there is more data left
+	 * around, then try restarting the rest of the request */
+
+	if (!size_left && hs_req->req.actual < hs_req->req.length) {
+		dev_dbg(hsotg->dev, "%s trying more for req...\n", __func__);
+		s3c_hsotg_start_req(hsotg, hs_ep, hs_req, true);
+	} else
+		s3c_hsotg_complete_request_lock(hsotg, hs_ep, hs_req, 0);
+}
+
+/**
+ * s3c_hsotg_epint - handle an in/out endpoint interrupt
+ * @hsotg: The driver state
+ * @idx: The index for the endpoint (0..15)
+ * @dir_in: Set if this is an IN endpoint
+ *
+ * Process and clear any interrupt pending for an individual endpoint
+*/
+static void s3c_hsotg_epint(struct s3c_hsotg *hsotg, unsigned int idx,
+			    int dir_in)
+{
+	struct s3c_hsotg_ep *hs_ep = &hsotg->eps[idx];
+	u32 epint_reg = dir_in ? S3C_DIEPINT(idx) : S3C_DOEPINT(idx);
+	u32 epctl_reg = dir_in ? S3C_DIEPCTL(idx) : S3C_DOEPCTL(idx);
+	u32 epsiz_reg = dir_in ? S3C_DIEPTSIZ(idx) : S3C_DOEPTSIZ(idx);
+	u32 ints;
+	u32 clear = 0;
+
+	ints = readl(hsotg->regs + epint_reg);
+
+	dev_dbg(hsotg->dev, "%s: ep%d(%s) DxEPINT=0x%08x\n",
+		__func__, idx, dir_in ? "in" : "out", ints);
+
+	if (ints & S3C_DxEPINT_XferCompl) {
+		dev_dbg(hsotg->dev,
+			"%s: XferCompl: DxEPCTL=0x%08x, DxEPTSIZ=%08x\n",
+			__func__, readl(hsotg->regs + epctl_reg),
+			readl(hsotg->regs + epsiz_reg));
+
+		/* we get OutDone from the FIFO, so we only need to look
+		 * at completing IN requests here */
+		if (dir_in) {
+			s3c_hsotg_complete_in(hsotg, hs_ep);
+
+			if (idx == 0)
+				s3c_hsotg_enqueue_setup(hsotg);
+		} else if (using_dma(hsotg)) {
+			/* We're using DMA, we need to fire an OutDone here
+			 * as we ignore the RXFIFO. */
+
+			s3c_hsotg_handle_outdone(hsotg, idx, false);
+		}
+
+		clear |= S3C_DxEPINT_XferCompl;
+	}
+
+	if (ints & S3C_DxEPINT_EPDisbld) {
+		dev_dbg(hsotg->dev, "%s: EPDisbld\n", __func__);
+		clear |= S3C_DxEPINT_EPDisbld;
+	}
+
+	if (ints & S3C_DxEPINT_AHBErr) {
+		dev_dbg(hsotg->dev, "%s: AHBErr\n", __func__);
+		clear |= S3C_DxEPINT_AHBErr;
+	}
+
+	if (ints & S3C_DxEPINT_Setup) {  /* Setup or Timeout */
+		dev_dbg(hsotg->dev, "%s: Setup/Timeout\n",  __func__);
+
+		if (using_dma(hsotg) && idx == 0) {
+			/* this is the notification we've received a
+			 * setup packet. In non-DMA mode we'd get this
+			 * from the RXFIFO, instead we need to process
+			 * the setup here. */
+
+			if (dir_in)
+				WARN_ON_ONCE(1);
+			else
+				s3c_hsotg_handle_outdone(hsotg, 0, true);
+		}
+
+		clear |= S3C_DxEPINT_Setup;
+	}
+
+	if (ints & S3C_DxEPINT_Back2BackSetup) {
+		dev_dbg(hsotg->dev, "%s: B2BSetup/INEPNakEff\n", __func__);
+		clear |= S3C_DxEPINT_Back2BackSetup;
+	}
+
+	if (dir_in) {
+		/* not sure if this is important, but we'll clear it anyway
+		 */
+		if (ints & S3C_DIEPMSK_INTknTXFEmpMsk) {
+			dev_dbg(hsotg->dev, "%s: ep%d: INTknTXFEmpMsk\n",
+				__func__, idx);
+			clear |= S3C_DIEPMSK_INTknTXFEmpMsk;
+		}
+
+		/* this probably means something bad is happening */
+		if (ints & S3C_DIEPMSK_INTknEPMisMsk) {
+			dev_warn(hsotg->dev, "%s: ep%d: INTknEP\n",
+				 __func__, idx);
+			clear |= S3C_DIEPMSK_INTknEPMisMsk;
+		}
+	}
+
+	writel(clear, hsotg->regs + epint_reg);
+}
+
+/**
+ * s3c_hsotg_irq_enumdone - Handle EnumDone interrupt (enumeration done)
+ * @hsotg: The device state.
+ *
+ * Handle updating the device settings after the enumeration phase has
+ * been completed.
+*/
+static void s3c_hsotg_irq_enumdone(struct s3c_hsotg *hsotg)
+{
+	u32 dsts = readl(hsotg->regs + S3C_DSTS);
+	int ep0_mps = 0, ep_mps;
+
+	/* This should signal the finish of the enumeration phase
+	 * of the USB handshaking, so we should now know what rate
+	 * we connected at. */
+
+	dev_dbg(hsotg->dev, "EnumDone (DSTS=0x%08x)\n", dsts);
+
+	/* note, since we're limited by the size of transfer on EP0, and
+	 * it seems IN transfers must be a even number of packets we do
+	 * not advertise a 64byte MPS on EP0. */
+
+	/* catch both EnumSpd_FS and EnumSpd_FS48 */
+	switch (dsts & S3C_DSTS_EnumSpd_MASK) {
+	case S3C_DSTS_EnumSpd_FS:
+	case S3C_DSTS_EnumSpd_FS48:
+		hsotg->gadget.speed = USB_SPEED_FULL;
+		dev_info(hsotg->dev, "new device is full-speed\n");
+
+		ep0_mps = EP0_MPS_LIMIT;
+		ep_mps = 64;
+		break;
+
+	case S3C_DSTS_EnumSpd_HS:
+		dev_info(hsotg->dev, "new device is high-speed\n");
+		hsotg->gadget.speed = USB_SPEED_HIGH;
+
+		ep0_mps = EP0_MPS_LIMIT;
+		ep_mps = 512;
+		break;
+
+	case S3C_DSTS_EnumSpd_LS:
+		hsotg->gadget.speed = USB_SPEED_LOW;
+		dev_info(hsotg->dev, "new device is low-speed\n");
+
+		/* note, we don't actually support LS in this driver at the
+		 * moment, and the documentation seems to imply that it isn't
+		 * supported by the PHYs on some of the devices.
+		 */
+		break;
+	}
+
+	/* we should now know the maximum packet size for an
+	 * endpoint, so set the endpoints to a default value. */
+
+	if (ep0_mps) {
+		int i;
+		s3c_hsotg_set_ep_maxpacket(hsotg, 0, ep0_mps);
+		for (i = 1; i < S3C_HSOTG_EPS; i++)
+			s3c_hsotg_set_ep_maxpacket(hsotg, i, ep_mps);
+	}
+
+	/* ensure after enumeration our EP0 is active */
+
+	s3c_hsotg_enqueue_setup(hsotg);
+
+	dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
+		readl(hsotg->regs + S3C_DIEPCTL0),
+		readl(hsotg->regs + S3C_DOEPCTL0));
+}
+
+/**
+ * kill_all_requests - remove all requests from the endpoint's queue
+ * @hsotg: The device state.
+ * @ep: The endpoint the requests may be on.
+ * @result: The result code to use.
+ * @force: Force removal of any current requests
+ *
+ * Go through the requests on the given endpoint and mark them
+ * completed with the given result code.
+ */
+static void kill_all_requests(struct s3c_hsotg *hsotg,
+			      struct s3c_hsotg_ep *ep,
+			      int result, bool force)
+{
+	struct s3c_hsotg_req *req, *treq;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ep->lock, flags);
+
+	list_for_each_entry_safe(req, treq, &ep->queue, queue) {
+		/* currently, we can't do much about an already
+		 * running request on an in endpoint */
+
+		if (ep->req == req && ep->dir_in && !force)
+			continue;
+
+		s3c_hsotg_complete_request(hsotg, ep, req,
+					   result);
+	}
+
+	spin_unlock_irqrestore(&ep->lock, flags);
+}
+
+#define call_gadget(_hs, _entry) \
+	if ((_hs)->gadget.speed != USB_SPEED_UNKNOWN &&	\
+	    (_hs)->driver && (_hs)->driver->_entry)	\
+		(_hs)->driver->_entry(&(_hs)->gadget);
+
+/**
+ * s3c_hsotg_disconnect_irq - disconnect irq service
+ * @hsotg: The device state.
+ *
+ * A disconnect IRQ has been received, meaning that the host has
+ * lost contact with the bus. Remove all current transactions
+ * and signal the gadget driver that this has happened.
+*/
+static void s3c_hsotg_disconnect_irq(struct s3c_hsotg *hsotg)
+{
+	unsigned ep;
+
+	for (ep = 0; ep < S3C_HSOTG_EPS; ep++)
+		kill_all_requests(hsotg, &hsotg->eps[ep], -ESHUTDOWN, true);
+
+	call_gadget(hsotg, disconnect);
+}
+
+/**
+ * s3c_hsotg_irq_fifoempty - TX FIFO empty interrupt handler
+ * @hsotg: The device state:
+ * @periodic: True if this is a periodic FIFO interrupt
+ */
+static void s3c_hsotg_irq_fifoempty(struct s3c_hsotg *hsotg, bool periodic)
+{
+	struct s3c_hsotg_ep *ep;
+	int epno, ret;
+
+	/* look through for any more data to transmit */
+
+	for (epno = 0; epno < S3C_HSOTG_EPS; epno++) {
+		ep = &hsotg->eps[epno];
+
+		if (!ep->dir_in)
+			continue;
+
+		if ((periodic && !ep->periodic) ||
+		    (!periodic && ep->periodic))
+			continue;
+
+		ret = s3c_hsotg_trytx(hsotg, ep);
+		if (ret < 0)
+			break;
+	}
+}
+
+static struct s3c_hsotg *our_hsotg;
+
+/* IRQ flags which will trigger a retry around the IRQ loop */
+#define IRQ_RETRY_MASK (S3C_GINTSTS_NPTxFEmp | \
+			S3C_GINTSTS_PTxFEmp |  \
+			S3C_GINTSTS_RxFLvl)
+
+/**
+ * s3c_hsotg_irq - handle device interrupt
+ * @irq: The IRQ number triggered
+ * @pw: The pw value when registered the handler.
+ */
+static irqreturn_t s3c_hsotg_irq(int irq, void *pw)
+{
+	struct s3c_hsotg *hsotg = pw;
+	int retry_count = 8;
+	u32 gintsts;
+	u32 gintmsk;
+
+irq_retry:
+	gintsts = readl(hsotg->regs + S3C_GINTSTS);
+	gintmsk = readl(hsotg->regs + S3C_GINTMSK);
+
+	dev_dbg(hsotg->dev, "%s: %08x %08x (%08x) retry %d\n",
+		__func__, gintsts, gintsts & gintmsk, gintmsk, retry_count);
+
+	gintsts &= gintmsk;
+
+	if (gintsts & S3C_GINTSTS_OTGInt) {
+		u32 otgint = readl(hsotg->regs + S3C_GOTGINT);
+
+		dev_info(hsotg->dev, "OTGInt: %08x\n", otgint);
+
+		writel(otgint, hsotg->regs + S3C_GOTGINT);
+		writel(S3C_GINTSTS_OTGInt, hsotg->regs + S3C_GINTSTS);
+	}
+
+	if (gintsts & S3C_GINTSTS_DisconnInt) {
+		dev_dbg(hsotg->dev, "%s: DisconnInt\n", __func__);
+		writel(S3C_GINTSTS_DisconnInt, hsotg->regs + S3C_GINTSTS);
+
+		s3c_hsotg_disconnect_irq(hsotg);
+	}
+
+	if (gintsts & S3C_GINTSTS_SessReqInt) {
+		dev_dbg(hsotg->dev, "%s: SessReqInt\n", __func__);
+		writel(S3C_GINTSTS_SessReqInt, hsotg->regs + S3C_GINTSTS);
+	}
+
+	if (gintsts & S3C_GINTSTS_EnumDone) {
+		s3c_hsotg_irq_enumdone(hsotg);
+		writel(S3C_GINTSTS_EnumDone, hsotg->regs + S3C_GINTSTS);
+	}
+
+	if (gintsts & S3C_GINTSTS_ConIDStsChng) {
+		dev_dbg(hsotg->dev, "ConIDStsChg (DSTS=0x%08x, GOTCTL=%08x)\n",
+			readl(hsotg->regs + S3C_DSTS),
+			readl(hsotg->regs + S3C_GOTGCTL));
+
+		writel(S3C_GINTSTS_ConIDStsChng, hsotg->regs + S3C_GINTSTS);
+	}
+
+	if (gintsts & (S3C_GINTSTS_OEPInt | S3C_GINTSTS_IEPInt)) {
+		u32 daint = readl(hsotg->regs + S3C_DAINT);
+		u32 daint_out = daint >> S3C_DAINT_OutEP_SHIFT;
+		u32 daint_in = daint & ~(daint_out << S3C_DAINT_OutEP_SHIFT);
+		int ep;
+
+		dev_dbg(hsotg->dev, "%s: daint=%08x\n", __func__, daint);
+
+		for (ep = 0; ep < 15 && daint_out; ep++, daint_out >>= 1) {
+			if (daint_out & 1)
+				s3c_hsotg_epint(hsotg, ep, 0);
+		}
+
+		for (ep = 0; ep < 15 && daint_in; ep++, daint_in >>= 1) {
+			if (daint_in & 1)
+				s3c_hsotg_epint(hsotg, ep, 1);
+		}
+
+		writel(daint, hsotg->regs + S3C_DAINT);
+		writel(gintsts & (S3C_GINTSTS_OEPInt | S3C_GINTSTS_IEPInt),
+		       hsotg->regs + S3C_GINTSTS);
+	}
+
+	if (gintsts & S3C_GINTSTS_USBRst) {
+		dev_info(hsotg->dev, "%s: USBRst\n", __func__);
+		dev_dbg(hsotg->dev, "GNPTXSTS=%08x\n",
+			readl(hsotg->regs + S3C_GNPTXSTS));
+
+		kill_all_requests(hsotg, &hsotg->eps[0], -ECONNRESET, true);
+
+		/* it seems after a reset we can end up with a situation
+		 * where the TXFIFO still has data in it... try flushing
+		 * it to remove anything that may still be in it.
+		 */
+
+		if (1) {
+			writel(S3C_GRSTCTL_TxFNum(0) | S3C_GRSTCTL_TxFFlsh,
+			       hsotg->regs + S3C_GRSTCTL);
+
+			dev_info(hsotg->dev, "GNPTXSTS=%08x\n",
+				 readl(hsotg->regs + S3C_GNPTXSTS));
+		}
+
+		s3c_hsotg_enqueue_setup(hsotg);
+
+		writel(S3C_GINTSTS_USBRst, hsotg->regs + S3C_GINTSTS);
+	}
+
+	/* check both FIFOs */
+
+	if (gintsts & S3C_GINTSTS_NPTxFEmp) {
+		dev_dbg(hsotg->dev, "NPTxFEmp\n");
+
+		/* Disable the interrupt to stop it happening again
+		 * unless one of these endpoint routines decides that
+		 * it needs re-enabling */
+
+		s3c_hsotg_disable_gsint(hsotg, S3C_GINTSTS_NPTxFEmp);
+		s3c_hsotg_irq_fifoempty(hsotg, false);
+
+		writel(S3C_GINTSTS_NPTxFEmp, hsotg->regs + S3C_GINTSTS);
+	}
+
+	if (gintsts & S3C_GINTSTS_PTxFEmp) {
+		dev_dbg(hsotg->dev, "PTxFEmp\n");
+
+		/* See note in S3C_GINTSTS_NPTxFEmp */
+
+		s3c_hsotg_disable_gsint(hsotg, S3C_GINTSTS_PTxFEmp);
+		s3c_hsotg_irq_fifoempty(hsotg, true);
+
+		writel(S3C_GINTSTS_PTxFEmp, hsotg->regs + S3C_GINTSTS);
+	}
+
+	if (gintsts & S3C_GINTSTS_RxFLvl) {
+		/* note, since GINTSTS_RxFLvl doubles as FIFO-not-empty,
+		 * we need to retry s3c_hsotg_handle_rx if this is still
+		 * set. */
+
+		s3c_hsotg_handle_rx(hsotg);
+		writel(S3C_GINTSTS_RxFLvl, hsotg->regs + S3C_GINTSTS);
+	}
+
+	if (gintsts & S3C_GINTSTS_ModeMis) {
+		dev_warn(hsotg->dev, "warning, mode mismatch triggered\n");
+		writel(S3C_GINTSTS_ModeMis, hsotg->regs + S3C_GINTSTS);
+	}
+
+	if (gintsts & S3C_GINTSTS_USBSusp) {
+		dev_info(hsotg->dev, "S3C_GINTSTS_USBSusp\n");
+		writel(S3C_GINTSTS_USBSusp, hsotg->regs + S3C_GINTSTS);
+
+		call_gadget(hsotg, suspend);
+	}
+
+	if (gintsts & S3C_GINTSTS_WkUpInt) {
+		dev_info(hsotg->dev, "S3C_GINTSTS_WkUpIn\n");
+		writel(S3C_GINTSTS_WkUpInt, hsotg->regs + S3C_GINTSTS);
+
+		call_gadget(hsotg, resume);
+	}
+
+	if (gintsts & S3C_GINTSTS_ErlySusp) {
+		dev_dbg(hsotg->dev, "S3C_GINTSTS_ErlySusp\n");
+		writel(S3C_GINTSTS_ErlySusp, hsotg->regs + S3C_GINTSTS);
+	}
+
+	/* these next two seem to crop-up occasionally causing the core
+	 * to shutdown the USB transfer, so try clearing them and logging
+	 * the occurence. */
+
+	if (gintsts & S3C_GINTSTS_GOUTNakEff) {
+		dev_info(hsotg->dev, "GOUTNakEff triggered\n");
+
+		s3c_hsotg_dump(hsotg);
+
+		writel(S3C_DCTL_CGOUTNak, hsotg->regs + S3C_DCTL);
+		writel(S3C_GINTSTS_GOUTNakEff, hsotg->regs + S3C_GINTSTS);
+	}
+
+	if (gintsts & S3C_GINTSTS_GINNakEff) {
+		dev_info(hsotg->dev, "GINNakEff triggered\n");
+
+		s3c_hsotg_dump(hsotg);
+
+		writel(S3C_DCTL_CGNPInNAK, hsotg->regs + S3C_DCTL);
+		writel(S3C_GINTSTS_GINNakEff, hsotg->regs + S3C_GINTSTS);
+	}
+
+	/* if we've had fifo events, we should try and go around the
+	 * loop again to see if there's any point in returning yet. */
+
+	if (gintsts & IRQ_RETRY_MASK && --retry_count > 0)
+			goto irq_retry;
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * s3c_hsotg_ep_enable - enable the given endpoint
+ * @ep: The USB endpint to configure
+ * @desc: The USB endpoint descriptor to configure with.
+ *
+ * This is called from the USB gadget code's usb_ep_enable().
+*/
+static int s3c_hsotg_ep_enable(struct usb_ep *ep,
+			       const struct usb_endpoint_descriptor *desc)
+{
+	struct s3c_hsotg_ep *hs_ep = our_ep(ep);
+	struct s3c_hsotg *hsotg = hs_ep->parent;
+	unsigned long flags;
+	int index = hs_ep->index;
+	u32 epctrl_reg;
+	u32 epctrl;
+	u32 mps;
+	int dir_in;
+
+	dev_dbg(hsotg->dev,
+		"%s: ep %s: a 0x%02x, attr 0x%02x, mps 0x%04x, intr %d\n",
+		__func__, ep->name, desc->bEndpointAddress, desc->bmAttributes,
+		desc->wMaxPacketSize, desc->bInterval);
+
+	/* not to be called for EP0 */
+	WARN_ON(index == 0);
+
+	dir_in = (desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK) ? 1 : 0;
+	if (dir_in != hs_ep->dir_in) {
+		dev_err(hsotg->dev, "%s: direction mismatch!\n", __func__);
+		return -EINVAL;
+	}
+
+	mps = le16_to_cpu(desc->wMaxPacketSize);
+
+	/* note, we handle this here instead of s3c_hsotg_set_ep_maxpacket */
+
+	epctrl_reg = dir_in ? S3C_DIEPCTL(index) : S3C_DOEPCTL(index);
+	epctrl = readl(hsotg->regs + epctrl_reg);
+
+	dev_dbg(hsotg->dev, "%s: read DxEPCTL=0x%08x from 0x%08x\n",
+		__func__, epctrl, epctrl_reg);
+
+	spin_lock_irqsave(&hs_ep->lock, flags);
+
+	epctrl &= ~(S3C_DxEPCTL_EPType_MASK | S3C_DxEPCTL_MPS_MASK);
+	epctrl |= S3C_DxEPCTL_MPS(mps);
+
+	/* mark the endpoint as active, otherwise the core may ignore
+	 * transactions entirely for this endpoint */
+	epctrl |= S3C_DxEPCTL_USBActEp;
+
+	/* set the NAK status on the endpoint, otherwise we might try and
+	 * do something with data that we've yet got a request to process
+	 * since the RXFIFO will take data for an endpoint even if the
+	 * size register hasn't been set.
+	 */
+
+	epctrl |= S3C_DxEPCTL_SNAK;
+
+	/* update the endpoint state */
+	hs_ep->ep.maxpacket = mps;
+
+	/* default, set to non-periodic */
+	hs_ep->periodic = 0;
+
+	switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
+	case USB_ENDPOINT_XFER_ISOC:
+		dev_err(hsotg->dev, "no current ISOC support\n");
+		return -EINVAL;
+
+	case USB_ENDPOINT_XFER_BULK:
+		epctrl |= S3C_DxEPCTL_EPType_Bulk;
+		break;
+
+	case USB_ENDPOINT_XFER_INT:
+		if (dir_in) {
+			/* Allocate our TxFNum by simply using the index
+			 * of the endpoint for the moment. We could do
+			 * something better if the host indicates how
+			 * many FIFOs we are expecting to use. */
+
+			hs_ep->periodic = 1;
+			epctrl |= S3C_DxEPCTL_TxFNum(index);
+		}
+
+		epctrl |= S3C_DxEPCTL_EPType_Intterupt;
+		break;
+
+	case USB_ENDPOINT_XFER_CONTROL:
+		epctrl |= S3C_DxEPCTL_EPType_Control;
+		break;
+	}
+
+	/* for non control endpoints, set PID to D0 */
+	if (index)
+		epctrl |= S3C_DxEPCTL_SetD0PID;
+
+	dev_dbg(hsotg->dev, "%s: write DxEPCTL=0x%08x\n",
+		__func__, epctrl);
+
+	writel(epctrl, hsotg->regs + epctrl_reg);
+	dev_dbg(hsotg->dev, "%s: read DxEPCTL=0x%08x\n",
+		__func__, readl(hsotg->regs + epctrl_reg));
+
+	/* enable the endpoint interrupt */
+	s3c_hsotg_ctrl_epint(hsotg, index, dir_in, 1);
+
+	spin_unlock_irqrestore(&hs_ep->lock, flags);
+	return 0;
+}
+
+static int s3c_hsotg_ep_disable(struct usb_ep *ep)
+{
+	struct s3c_hsotg_ep *hs_ep = our_ep(ep);
+	struct s3c_hsotg *hsotg = hs_ep->parent;
+	int dir_in = hs_ep->dir_in;
+	int index = hs_ep->index;
+	unsigned long flags;
+	u32 epctrl_reg;
+	u32 ctrl;
+
+	dev_info(hsotg->dev, "%s(ep %p)\n", __func__, ep);
+
+	if (ep == &hsotg->eps[0].ep) {
+		dev_err(hsotg->dev, "%s: called for ep0\n", __func__);
+		return -EINVAL;
+	}
+
+	epctrl_reg = dir_in ? S3C_DIEPCTL(index) : S3C_DOEPCTL(index);
+
+	/* terminate all requests with shutdown */
+	kill_all_requests(hsotg, hs_ep, -ESHUTDOWN, false);
+
+	spin_lock_irqsave(&hs_ep->lock, flags);
+
+	ctrl = readl(hsotg->regs + epctrl_reg);
+	ctrl &= ~S3C_DxEPCTL_EPEna;
+	ctrl &= ~S3C_DxEPCTL_USBActEp;
+	ctrl |= S3C_DxEPCTL_SNAK;
+
+	dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x\n", __func__, ctrl);
+	writel(ctrl, hsotg->regs + epctrl_reg);
+
+	/* disable endpoint interrupts */
+	s3c_hsotg_ctrl_epint(hsotg, hs_ep->index, hs_ep->dir_in, 0);
+
+	spin_unlock_irqrestore(&hs_ep->lock, flags);
+	return 0;
+}
+
+/**
+ * on_list - check request is on the given endpoint
+ * @ep: The endpoint to check.
+ * @test: The request to test if it is on the endpoint.
+*/
+static bool on_list(struct s3c_hsotg_ep *ep, struct s3c_hsotg_req *test)
+{
+	struct s3c_hsotg_req *req, *treq;
+
+	list_for_each_entry_safe(req, treq, &ep->queue, queue) {
+		if (req == test)
+			return true;
+	}
+
+	return false;
+}
+
+static int s3c_hsotg_ep_dequeue(struct usb_ep *ep, struct usb_request *req)
+{
+	struct s3c_hsotg_req *hs_req = our_req(req);
+	struct s3c_hsotg_ep *hs_ep = our_ep(ep);
+	struct s3c_hsotg *hs = hs_ep->parent;
+	unsigned long flags;
+
+	dev_info(hs->dev, "ep_dequeue(%p,%p)\n", ep, req);
+
+	if (hs_req == hs_ep->req) {
+		dev_dbg(hs->dev, "%s: already in progress\n", __func__);
+		return -EINPROGRESS;
+	}
+
+	spin_lock_irqsave(&hs_ep->lock, flags);
+
+	if (!on_list(hs_ep, hs_req)) {
+		spin_unlock_irqrestore(&hs_ep->lock, flags);
+		return -EINVAL;
+	}
+
+	s3c_hsotg_complete_request(hs, hs_ep, hs_req, -ECONNRESET);
+	spin_unlock_irqrestore(&hs_ep->lock, flags);
+
+	return 0;
+}
+
+static int s3c_hsotg_ep_sethalt(struct usb_ep *ep, int value)
+{
+	struct s3c_hsotg_ep *hs_ep = our_ep(ep);
+	struct s3c_hsotg *hs = hs_ep->parent;
+	int index = hs_ep->index;
+	unsigned long irqflags;
+	u32 epreg;
+	u32 epctl;
+
+	dev_info(hs->dev, "%s(ep %p %s, %d)\n", __func__, ep, ep->name, value);
+
+	spin_lock_irqsave(&hs_ep->lock, irqflags);
+
+	/* write both IN and OUT control registers */
+
+	epreg = S3C_DIEPCTL(index);
+	epctl = readl(hs->regs + epreg);
+
+	if (value)
+		epctl |= S3C_DxEPCTL_Stall;
+	else
+		epctl &= ~S3C_DxEPCTL_Stall;
+
+	writel(epctl, hs->regs + epreg);
+
+	epreg = S3C_DOEPCTL(index);
+	epctl = readl(hs->regs + epreg);
+
+	if (value)
+		epctl |= S3C_DxEPCTL_Stall;
+	else
+		epctl &= ~S3C_DxEPCTL_Stall;
+
+	writel(epctl, hs->regs + epreg);
+
+	spin_unlock_irqrestore(&hs_ep->lock, irqflags);
+
+	return 0;
+}
+
+static struct usb_ep_ops s3c_hsotg_ep_ops = {
+	.enable		= s3c_hsotg_ep_enable,
+	.disable	= s3c_hsotg_ep_disable,
+	.alloc_request	= s3c_hsotg_ep_alloc_request,
+	.free_request	= s3c_hsotg_ep_free_request,
+	.queue		= s3c_hsotg_ep_queue,
+	.dequeue	= s3c_hsotg_ep_dequeue,
+	.set_halt	= s3c_hsotg_ep_sethalt,
+	/* note, don't belive we have any call for the fifo routines */
+};
+
+/**
+ * s3c_hsotg_corereset - issue softreset to the core
+ * @hsotg: The device state
+ *
+ * Issue a soft reset to the core, and await the core finishing it.
+*/
+static int s3c_hsotg_corereset(struct s3c_hsotg *hsotg)
+{
+	int timeout;
+	u32 grstctl;
+
+	dev_dbg(hsotg->dev, "resetting core\n");
+
+	/* issue soft reset */
+	writel(S3C_GRSTCTL_CSftRst, hsotg->regs + S3C_GRSTCTL);
+
+	timeout = 1000;
+	do {
+		grstctl = readl(hsotg->regs + S3C_GRSTCTL);
+	} while (!(grstctl & S3C_GRSTCTL_CSftRst) && timeout-- > 0);
+
+	if (!grstctl & S3C_GRSTCTL_CSftRst) {
+		dev_err(hsotg->dev, "Failed to get CSftRst asserted\n");
+		return -EINVAL;
+	}
+
+	timeout = 1000;
+
+	while (1) {
+		u32 grstctl = readl(hsotg->regs + S3C_GRSTCTL);
+
+		if (timeout-- < 0) {
+			dev_info(hsotg->dev,
+				 "%s: reset failed, GRSTCTL=%08x\n",
+				 __func__, grstctl);
+			return -ETIMEDOUT;
+		}
+
+		if (grstctl & S3C_GRSTCTL_CSftRst)
+			continue;
+
+		if (!(grstctl & S3C_GRSTCTL_AHBIdle))
+			continue;
+
+		break; 		/* reset done */
+	}
+
+	dev_dbg(hsotg->dev, "reset successful\n");
+	return 0;
+}
+
+int usb_gadget_register_driver(struct usb_gadget_driver *driver)
+{
+	struct s3c_hsotg *hsotg = our_hsotg;
+	int ret;
+
+	if (!hsotg) {
+		printk(KERN_ERR "%s: called with no device\n", __func__);
+		return -ENODEV;
+	}
+
+	if (!driver) {
+		dev_err(hsotg->dev, "%s: no driver\n", __func__);
+		return -EINVAL;
+	}
+
+	if (driver->speed != USB_SPEED_HIGH &&
+	    driver->speed != USB_SPEED_FULL) {
+		dev_err(hsotg->dev, "%s: bad speed\n", __func__);
+	}
+
+	if (!driver->bind || !driver->setup) {
+		dev_err(hsotg->dev, "%s: missing entry points\n", __func__);
+		return -EINVAL;
+	}
+
+	WARN_ON(hsotg->driver);
+
+	driver->driver.bus = NULL;
+	hsotg->driver = driver;
+	hsotg->gadget.dev.driver = &driver->driver;
+	hsotg->gadget.dev.dma_mask = hsotg->dev->dma_mask;
+	hsotg->gadget.speed = USB_SPEED_UNKNOWN;
+
+	ret = device_add(&hsotg->gadget.dev);
+	if (ret) {
+		dev_err(hsotg->dev, "failed to register gadget device\n");
+		goto err;
+	}
+
+	ret = driver->bind(&hsotg->gadget);
+	if (ret) {
+		dev_err(hsotg->dev, "failed bind %s\n", driver->driver.name);
+
+		hsotg->gadget.dev.driver = NULL;
+		hsotg->driver = NULL;
+		goto err;
+	}
+
+	/* we must now enable ep0 ready for host detection and then
+	 * set configuration. */
+
+	s3c_hsotg_corereset(hsotg);
+
+	/* set the PLL on, remove the HNP/SRP and set the PHY */
+	writel(S3C_GUSBCFG_PHYIf16 | S3C_GUSBCFG_TOutCal(7) |
+	       (0x5 << 10), hsotg->regs + S3C_GUSBCFG);
+
+	/* looks like soft-reset changes state of FIFOs */
+	s3c_hsotg_init_fifo(hsotg);
+
+	__orr32(hsotg->regs + S3C_DCTL, S3C_DCTL_SftDiscon);
+
+	writel(1 << 18 | S3C_DCFG_DevSpd_HS,  hsotg->regs + S3C_DCFG);
+
+	writel(S3C_GINTSTS_DisconnInt | S3C_GINTSTS_SessReqInt |
+	       S3C_GINTSTS_ConIDStsChng | S3C_GINTSTS_USBRst |
+	       S3C_GINTSTS_EnumDone | S3C_GINTSTS_OTGInt |
+	       S3C_GINTSTS_USBSusp | S3C_GINTSTS_WkUpInt |
+	       S3C_GINTSTS_GOUTNakEff | S3C_GINTSTS_GINNakEff |
+	       S3C_GINTSTS_ErlySusp,
+	       hsotg->regs + S3C_GINTMSK);
+
+	if (using_dma(hsotg))
+		writel(S3C_GAHBCFG_GlblIntrEn | S3C_GAHBCFG_DMAEn |
+		       S3C_GAHBCFG_HBstLen_Incr4,
+		       hsotg->regs + S3C_GAHBCFG);
+	else
+		writel(S3C_GAHBCFG_GlblIntrEn, hsotg->regs + S3C_GAHBCFG);
+
+	/* Enabling INTknTXFEmpMsk here seems to be a big mistake, we end
+	 * up being flooded with interrupts if the host is polling the
+	 * endpoint to try and read data. */
+
+	writel(S3C_DIEPMSK_TimeOUTMsk | S3C_DIEPMSK_AHBErrMsk |
+	       S3C_DIEPMSK_INTknEPMisMsk |
+	       S3C_DIEPMSK_EPDisbldMsk | S3C_DIEPMSK_XferComplMsk,
+	       hsotg->regs + S3C_DIEPMSK);
+
+	/* don't need XferCompl, we get that from RXFIFO in slave mode. In
+	 * DMA mode we may need this. */
+	writel(S3C_DOEPMSK_SetupMsk | S3C_DOEPMSK_AHBErrMsk |
+	       S3C_DOEPMSK_EPDisbldMsk |
+	       using_dma(hsotg) ? (S3C_DIEPMSK_XferComplMsk |
+				   S3C_DIEPMSK_TimeOUTMsk) : 0,
+	       hsotg->regs + S3C_DOEPMSK);
+
+	writel(0, hsotg->regs + S3C_DAINTMSK);
+
+	dev_info(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
+		 readl(hsotg->regs + S3C_DIEPCTL0),
+		 readl(hsotg->regs + S3C_DOEPCTL0));
+
+	/* enable in and out endpoint interrupts */
+	s3c_hsotg_en_gsint(hsotg, S3C_GINTSTS_OEPInt | S3C_GINTSTS_IEPInt);
+
+	/* Enable the RXFIFO when in slave mode, as this is how we collect
+	 * the data. In DMA mode, we get events from the FIFO but also
+	 * things we cannot process, so do not use it. */
+	if (!using_dma(hsotg))
+		s3c_hsotg_en_gsint(hsotg, S3C_GINTSTS_RxFLvl);
+
+	/* Enable interrupts for EP0 in and out */
+	s3c_hsotg_ctrl_epint(hsotg, 0, 0, 1);
+	s3c_hsotg_ctrl_epint(hsotg, 0, 1, 1);
+
+	__orr32(hsotg->regs + S3C_DCTL, S3C_DCTL_PWROnPrgDone);
+	udelay(10);  /* see openiboot */
+	__bic32(hsotg->regs + S3C_DCTL, S3C_DCTL_PWROnPrgDone);
+
+	dev_info(hsotg->dev, "DCTL=0x%08x\n", readl(hsotg->regs + S3C_DCTL));
+
+	/* S3C_DxEPCTL_USBActEp says RO in manual, but seems to be set by
+	   writing to the EPCTL register.. */
+
+	/* set to read 1 8byte packet */
+	writel(S3C_DxEPTSIZ_MC(1) | S3C_DxEPTSIZ_PktCnt(1) |
+	       S3C_DxEPTSIZ_XferSize(8), hsotg->regs + DOEPTSIZ0);
+
+	writel(s3c_hsotg_ep0_mps(hsotg->eps[0].ep.maxpacket) |
+	       S3C_DxEPCTL_CNAK | S3C_DxEPCTL_EPEna |
+	       S3C_DxEPCTL_USBActEp,
+	       hsotg->regs + S3C_DOEPCTL0);
+
+	/* enable, but don't activate EP0in */
+	writel(s3c_hsotg_ep0_mps(hsotg->eps[0].ep.maxpacket) |
+	       S3C_DxEPCTL_USBActEp, hsotg->regs + S3C_DIEPCTL0);
+
+	s3c_hsotg_enqueue_setup(hsotg);
+
+	dev_info(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
+		 readl(hsotg->regs + S3C_DIEPCTL0),
+		 readl(hsotg->regs + S3C_DOEPCTL0));
+
+	/* clear global NAKs */
+	writel(S3C_DCTL_CGOUTNak | S3C_DCTL_CGNPInNAK,
+	       hsotg->regs + S3C_DCTL);
+
+	/* remove the soft-disconnect and let's go */
+	__bic32(hsotg->regs + S3C_DCTL, S3C_DCTL_SftDiscon);
+
+	/* report to the user, and return */
+
+	dev_info(hsotg->dev, "bound driver %s\n", driver->driver.name);
+	return 0;
+
+err:
+	hsotg->driver = NULL;
+	hsotg->gadget.dev.driver = NULL;
+	return ret;
+}
+
+int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
+{
+	struct s3c_hsotg *hsotg = our_hsotg;
+	int ep;
+
+	if (!hsotg)
+		return -ENODEV;
+
+	if (!driver || driver != hsotg->driver || !driver->unbind)
+		return -EINVAL;
+
+	/* all endpoints should be shutdown */
+	for (ep = 0; ep < S3C_HSOTG_EPS; ep++)
+		s3c_hsotg_ep_disable(&hsotg->eps[ep].ep);
+
+	call_gadget(hsotg, disconnect);
+
+	driver->unbind(&hsotg->gadget);
+	hsotg->driver = NULL;
+	hsotg->gadget.speed = USB_SPEED_UNKNOWN;
+
+	device_del(&hsotg->gadget.dev);
+
+	dev_info(hsotg->dev, "unregistered gadget driver '%s'\n",
+		 driver->driver.name);
+
+	return 0;
+}
+EXPORT_SYMBOL(usb_gadget_unregister_driver);
+
+static int s3c_hsotg_gadget_getframe(struct usb_gadget *gadget)
+{
+	return s3c_hsotg_read_frameno(to_hsotg(gadget));
+}
+
+static struct usb_gadget_ops s3c_hsotg_gadget_ops = {
+	.get_frame	= s3c_hsotg_gadget_getframe,
+};
+
+/**
+ * s3c_hsotg_initep - initialise a single endpoint
+ * @hsotg: The device state.
+ * @hs_ep: The endpoint to be initialised.
+ * @epnum: The endpoint number
+ *
+ * Initialise the given endpoint (as part of the probe and device state
+ * creation) to give to the gadget driver. Setup the endpoint name, any
+ * direction information and other state that may be required.
+ */
+static void __devinit s3c_hsotg_initep(struct s3c_hsotg *hsotg,
+				       struct s3c_hsotg_ep *hs_ep,
+				       int epnum)
+{
+	u32 ptxfifo;
+	char *dir;
+
+	if (epnum == 0)
+		dir = "";
+	else if ((epnum % 2) == 0) {
+		dir = "out";
+	} else {
+		dir = "in";
+		hs_ep->dir_in = 1;
+	}
+
+	hs_ep->index = epnum;
+
+	snprintf(hs_ep->name, sizeof(hs_ep->name), "ep%d%s", epnum, dir);
+
+	INIT_LIST_HEAD(&hs_ep->queue);
+	INIT_LIST_HEAD(&hs_ep->ep.ep_list);
+
+	spin_lock_init(&hs_ep->lock);
+
+	/* add to the list of endpoints known by the gadget driver */
+	if (epnum)
+		list_add_tail(&hs_ep->ep.ep_list, &hsotg->gadget.ep_list);
+
+	hs_ep->parent = hsotg;
+	hs_ep->ep.name = hs_ep->name;
+	hs_ep->ep.maxpacket = epnum ? 512 : EP0_MPS_LIMIT;
+	hs_ep->ep.ops = &s3c_hsotg_ep_ops;
+
+	/* Read the FIFO size for the Periodic TX FIFO, even if we're
+	 * an OUT endpoint, we may as well do this if in future the
+	 * code is changed to make each endpoint's direction changeable.
+	 */
+
+	ptxfifo = readl(hsotg->regs + S3C_DPTXFSIZn(epnum));
+	hs_ep->fifo_size = S3C_DPTXFSIZn_DPTxFSize_GET(ptxfifo);
+
+	/* if we're using dma, we need to set the next-endpoint pointer
+	 * to be something valid.
+	 */
+
+	if (using_dma(hsotg)) {
+		u32 next = S3C_DxEPCTL_NextEp((epnum + 1) % 15);
+		writel(next, hsotg->regs + S3C_DIEPCTL(epnum));
+		writel(next, hsotg->regs + S3C_DOEPCTL(epnum));
+	}
+}
+
+/**
+ * s3c_hsotg_otgreset - reset the OtG phy block
+ * @hsotg: The host state.
+ *
+ * Power up the phy, set the basic configuration and start the PHY.
+ */
+static void s3c_hsotg_otgreset(struct s3c_hsotg *hsotg)
+{
+	u32 osc;
+
+	writel(0, S3C_PHYPWR);
+	mdelay(1);
+
+	osc = hsotg->plat->is_osc ? S3C_PHYCLK_EXT_OSC : 0;
+
+	writel(osc | 0x10, S3C_PHYCLK);
+
+	/* issue a full set of resets to the otg and core */
+
+	writel(S3C_RSTCON_PHY, S3C_RSTCON);
+	udelay(20);	/* at-least 10uS */
+	writel(0, S3C_RSTCON);
+}
+
+
+static void s3c_hsotg_init(struct s3c_hsotg *hsotg)
+{
+	/* unmask subset of endpoint interrupts */
+
+	writel(S3C_DIEPMSK_TimeOUTMsk | S3C_DIEPMSK_AHBErrMsk |
+	       S3C_DIEPMSK_EPDisbldMsk | S3C_DIEPMSK_XferComplMsk,
+	       hsotg->regs + S3C_DIEPMSK);
+
+	writel(S3C_DOEPMSK_SetupMsk | S3C_DOEPMSK_AHBErrMsk |
+	       S3C_DOEPMSK_EPDisbldMsk | S3C_DOEPMSK_XferComplMsk,
+	       hsotg->regs + S3C_DOEPMSK);
+
+	writel(0, hsotg->regs + S3C_DAINTMSK);
+
+	if (0) {
+		/* post global nak until we're ready */
+		writel(S3C_DCTL_SGNPInNAK | S3C_DCTL_SGOUTNak,
+		       hsotg->regs + S3C_DCTL);
+	}
+
+	/* setup fifos */
+
+	dev_info(hsotg->dev, "GRXFSIZ=0x%08x, GNPTXFSIZ=0x%08x\n",
+		 readl(hsotg->regs + S3C_GRXFSIZ),
+		 readl(hsotg->regs + S3C_GNPTXFSIZ));
+
+	s3c_hsotg_init_fifo(hsotg);
+
+	/* set the PLL on, remove the HNP/SRP and set the PHY */
+	writel(S3C_GUSBCFG_PHYIf16 | S3C_GUSBCFG_TOutCal(7) | (0x5 << 10),
+	       hsotg->regs + S3C_GUSBCFG);
+
+	writel(using_dma(hsotg) ? S3C_GAHBCFG_DMAEn : 0x0,
+	       hsotg->regs + S3C_GAHBCFG);
+}
+
+static void s3c_hsotg_dump(struct s3c_hsotg *hsotg)
+{
+	struct device *dev = hsotg->dev;
+	void __iomem *regs = hsotg->regs;
+	u32 val;
+	int idx;
+
+	dev_info(dev, "DCFG=0x%08x, DCTL=0x%08x, DIEPMSK=%08x\n",
+		 readl(regs + S3C_DCFG), readl(regs + S3C_DCTL),
+		 readl(regs + S3C_DIEPMSK));
+
+	dev_info(dev, "GAHBCFG=0x%08x, 0x44=0x%08x\n",
+		 readl(regs + S3C_GAHBCFG), readl(regs + 0x44));
+
+	dev_info(dev, "GRXFSIZ=0x%08x, GNPTXFSIZ=0x%08x\n",
+		 readl(regs + S3C_GRXFSIZ), readl(regs + S3C_GNPTXFSIZ));
+
+	/* show periodic fifo settings */
+
+	for (idx = 1; idx <= 15; idx++) {
+		val = readl(regs + S3C_DPTXFSIZn(idx));
+		dev_info(dev, "DPTx[%d] FSize=%d, StAddr=0x%08x\n", idx,
+			 val >> S3C_DPTXFSIZn_DPTxFSize_SHIFT,
+			 val & S3C_DPTXFSIZn_DPTxFStAddr_MASK);
+	}
+
+	for (idx = 0; idx < 15; idx++) {
+		dev_info(dev,
+			 "ep%d-in: EPCTL=0x%08x, SIZ=0x%08x, DMA=0x%08x\n", idx,
+			 readl(regs + S3C_DIEPCTL(idx)),
+			 readl(regs + S3C_DIEPTSIZ(idx)),
+			 readl(regs + S3C_DIEPDMA(idx)));
+
+		val = readl(regs + S3C_DOEPCTL(idx));
+		dev_info(dev,
+			 "ep%d-out: EPCTL=0x%08x, SIZ=0x%08x, DMA=0x%08x\n",
+			 idx, readl(regs + S3C_DOEPCTL(idx)),
+			 readl(regs + S3C_DOEPTSIZ(idx)),
+			 readl(regs + S3C_DOEPDMA(idx)));
+
+	}
+
+	dev_info(dev, "DVBUSDIS=0x%08x, DVBUSPULSE=%08x\n",
+		 readl(regs + S3C_DVBUSDIS), readl(regs + S3C_DVBUSPULSE));
+}
+
+
+/**
+ * state_show - debugfs: show overall driver and device state.
+ * @seq: The seq file to write to.
+ * @v: Unused parameter.
+ *
+ * This debugfs entry shows the overall state of the hardware and
+ * some general information about each of the endpoints available
+ * to the system.
+ */
+static int state_show(struct seq_file *seq, void *v)
+{
+	struct s3c_hsotg *hsotg = seq->private;
+	void __iomem *regs = hsotg->regs;
+	int idx;
+
+	seq_printf(seq, "DCFG=0x%08x, DCTL=0x%08x, DSTS=0x%08x\n",
+		 readl(regs + S3C_DCFG),
+		 readl(regs + S3C_DCTL),
+		 readl(regs + S3C_DSTS));
+
+	seq_printf(seq, "DIEPMSK=0x%08x, DOEPMASK=0x%08x\n",
+		   readl(regs + S3C_DIEPMSK), readl(regs + S3C_DOEPMSK));
+
+	seq_printf(seq, "GINTMSK=0x%08x, GINTSTS=0x%08x\n",
+		   readl(regs + S3C_GINTMSK),
+		   readl(regs + S3C_GINTSTS));
+
+	seq_printf(seq, "DAINTMSK=0x%08x, DAINT=0x%08x\n",
+		   readl(regs + S3C_DAINTMSK),
+		   readl(regs + S3C_DAINT));
+
+	seq_printf(seq, "GNPTXSTS=0x%08x, GRXSTSR=%08x\n",
+		   readl(regs + S3C_GNPTXSTS),
+		   readl(regs + S3C_GRXSTSR));
+
+	seq_printf(seq, "\nEndpoint status:\n");
+
+	for (idx = 0; idx < 15; idx++) {
+		u32 in, out;
+
+		in = readl(regs + S3C_DIEPCTL(idx));
+		out = readl(regs + S3C_DOEPCTL(idx));
+
+		seq_printf(seq, "ep%d: DIEPCTL=0x%08x, DOEPCTL=0x%08x",
+			   idx, in, out);
+
+		in = readl(regs + S3C_DIEPTSIZ(idx));
+		out = readl(regs + S3C_DOEPTSIZ(idx));
+
+		seq_printf(seq, ", DIEPTSIZ=0x%08x, DOEPTSIZ=0x%08x",
+			   in, out);
+
+		seq_printf(seq, "\n");
+	}
+
+	return 0;
+}
+
+static int state_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, state_show, inode->i_private);
+}
+
+static const struct file_operations state_fops = {
+	.owner		= THIS_MODULE,
+	.open		= state_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+/**
+ * fifo_show - debugfs: show the fifo information
+ * @seq: The seq_file to write data to.
+ * @v: Unused parameter.
+ *
+ * Show the FIFO information for the overall fifo and all the
+ * periodic transmission FIFOs.
+*/
+static int fifo_show(struct seq_file *seq, void *v)
+{
+	struct s3c_hsotg *hsotg = seq->private;
+	void __iomem *regs = hsotg->regs;
+	u32 val;
+	int idx;
+
+	seq_printf(seq, "Non-periodic FIFOs:\n");
+	seq_printf(seq, "RXFIFO: Size %d\n", readl(regs + S3C_GRXFSIZ));
+
+	val = readl(regs + S3C_GNPTXFSIZ);
+	seq_printf(seq, "NPTXFIFO: Size %d, Start 0x%08x\n",
+		   val >> S3C_GNPTXFSIZ_NPTxFDep_SHIFT,
+		   val & S3C_GNPTXFSIZ_NPTxFStAddr_MASK);
+
+	seq_printf(seq, "\nPeriodic TXFIFOs:\n");
+
+	for (idx = 1; idx <= 15; idx++) {
+		val = readl(regs + S3C_DPTXFSIZn(idx));
+
+		seq_printf(seq, "\tDPTXFIFO%2d: Size %d, Start 0x%08x\n", idx,
+			   val >> S3C_DPTXFSIZn_DPTxFSize_SHIFT,
+			   val & S3C_DPTXFSIZn_DPTxFStAddr_MASK);
+	}
+
+	return 0;
+}
+
+static int fifo_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, fifo_show, inode->i_private);
+}
+
+static const struct file_operations fifo_fops = {
+	.owner		= THIS_MODULE,
+	.open		= fifo_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+
+static const char *decode_direction(int is_in)
+{
+	return is_in ? "in" : "out";
+}
+
+/**
+ * ep_show - debugfs: show the state of an endpoint.
+ * @seq: The seq_file to write data to.
+ * @v: Unused parameter.
+ *
+ * This debugfs entry shows the state of the given endpoint (one is
+ * registered for each available).
+*/
+static int ep_show(struct seq_file *seq, void *v)
+{
+	struct s3c_hsotg_ep *ep = seq->private;
+	struct s3c_hsotg *hsotg = ep->parent;
+	struct s3c_hsotg_req *req;
+	void __iomem *regs = hsotg->regs;
+	int index = ep->index;
+	int show_limit = 15;
+	unsigned long flags;
+
+	seq_printf(seq, "Endpoint index %d, named %s,  dir %s:\n",
+		   ep->index, ep->ep.name, decode_direction(ep->dir_in));
+
+	/* first show the register state */
+
+	seq_printf(seq, "\tDIEPCTL=0x%08x, DOEPCTL=0x%08x\n",
+		   readl(regs + S3C_DIEPCTL(index)),
+		   readl(regs + S3C_DOEPCTL(index)));
+
+	seq_printf(seq, "\tDIEPDMA=0x%08x, DOEPDMA=0x%08x\n",
+		   readl(regs + S3C_DIEPDMA(index)),
+		   readl(regs + S3C_DOEPDMA(index)));
+
+	seq_printf(seq, "\tDIEPINT=0x%08x, DOEPINT=0x%08x\n",
+		   readl(regs + S3C_DIEPINT(index)),
+		   readl(regs + S3C_DOEPINT(index)));
+
+	seq_printf(seq, "\tDIEPTSIZ=0x%08x, DOEPTSIZ=0x%08x\n",
+		   readl(regs + S3C_DIEPTSIZ(index)),
+		   readl(regs + S3C_DOEPTSIZ(index)));
+
+	seq_printf(seq, "\n");
+	seq_printf(seq, "mps %d\n", ep->ep.maxpacket);
+	seq_printf(seq, "total_data=%ld\n", ep->total_data);
+
+	seq_printf(seq, "request list (%p,%p):\n",
+		   ep->queue.next, ep->queue.prev);
+
+	spin_lock_irqsave(&ep->lock, flags);
+
+	list_for_each_entry(req, &ep->queue, queue) {
+		if (--show_limit < 0) {
+			seq_printf(seq, "not showing more requests...\n");
+			break;
+		}
+
+		seq_printf(seq, "%c req %p: %d bytes @%p, ",
+			   req == ep->req ? '*' : ' ',
+			   req, req->req.length, req->req.buf);
+		seq_printf(seq, "%d done, res %d\n",
+			   req->req.actual, req->req.status);
+	}
+
+	spin_unlock_irqrestore(&ep->lock, flags);
+
+	return 0;
+}
+
+static int ep_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, ep_show, inode->i_private);
+}
+
+static const struct file_operations ep_fops = {
+	.owner		= THIS_MODULE,
+	.open		= ep_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+/**
+ * s3c_hsotg_create_debug - create debugfs directory and files
+ * @hsotg: The driver state
+ *
+ * Create the debugfs files to allow the user to get information
+ * about the state of the system. The directory name is created
+ * with the same name as the device itself, in case we end up
+ * with multiple blocks in future systems.
+*/
+static void __devinit s3c_hsotg_create_debug(struct s3c_hsotg *hsotg)
+{
+	struct dentry *root;
+	unsigned epidx;
+
+	root = debugfs_create_dir(dev_name(hsotg->dev), NULL);
+	hsotg->debug_root = root;
+	if (IS_ERR(root)) {
+		dev_err(hsotg->dev, "cannot create debug root\n");
+		return;
+	}
+
+	/* create general state file */
+
+	hsotg->debug_file = debugfs_create_file("state", 0444, root,
+						hsotg, &state_fops);
+
+	if (IS_ERR(hsotg->debug_file))
+		dev_err(hsotg->dev, "%s: failed to create state\n", __func__);
+
+	hsotg->debug_fifo = debugfs_create_file("fifo", 0444, root,
+						hsotg, &fifo_fops);
+
+	if (IS_ERR(hsotg->debug_fifo))
+		dev_err(hsotg->dev, "%s: failed to create fifo\n", __func__);
+
+	/* create one file for each endpoint */
+
+	for (epidx = 0; epidx < S3C_HSOTG_EPS; epidx++) {
+		struct s3c_hsotg_ep *ep = &hsotg->eps[epidx];
+
+		ep->debugfs = debugfs_create_file(ep->name, 0444,
+						  root, ep, &ep_fops);
+
+		if (IS_ERR(ep->debugfs))
+			dev_err(hsotg->dev, "failed to create %s debug file\n",
+				ep->name);
+	}
+}
+
+/**
+ * s3c_hsotg_delete_debug - cleanup debugfs entries
+ * @hsotg: The driver state
+ *
+ * Cleanup (remove) the debugfs files for use on module exit.
+*/
+static void __devexit s3c_hsotg_delete_debug(struct s3c_hsotg *hsotg)
+{
+	unsigned epidx;
+
+	for (epidx = 0; epidx < S3C_HSOTG_EPS; epidx++) {
+		struct s3c_hsotg_ep *ep = &hsotg->eps[epidx];
+		debugfs_remove(ep->debugfs);
+	}
+
+	debugfs_remove(hsotg->debug_file);
+	debugfs_remove(hsotg->debug_fifo);
+	debugfs_remove(hsotg->debug_root);
+}
+
+/**
+ * s3c_hsotg_gate - set the hardware gate for the block
+ * @pdev: The device we bound to
+ * @on: On or off.
+ *
+ * Set the hardware gate setting into the block. If we end up on
+ * something other than an S3C64XX, then we might need to change this
+ * to using a platform data callback, or some other mechanism.
+ */
+static void s3c_hsotg_gate(struct platform_device *pdev, bool on)
+{
+	unsigned long flags;
+	u32 others;
+
+	local_irq_save(flags);
+
+	others = __raw_readl(S3C64XX_OTHERS);
+	if (on)
+		others |= S3C64XX_OTHERS_USBMASK;
+	else
+		others &= ~S3C64XX_OTHERS_USBMASK;
+	__raw_writel(others, S3C64XX_OTHERS);
+
+	local_irq_restore(flags);
+}
+
+struct s3c_hsotg_plat s3c_hsotg_default_pdata;
+
+static int __devinit s3c_hsotg_probe(struct platform_device *pdev)
+{
+	struct s3c_hsotg_plat *plat = pdev->dev.platform_data;
+	struct device *dev = &pdev->dev;
+	struct s3c_hsotg *hsotg;
+	struct resource *res;
+	int epnum;
+	int ret;
+
+	if (!plat)
+		plat = &s3c_hsotg_default_pdata;
+
+	hsotg = kzalloc(sizeof(struct s3c_hsotg) +
+			sizeof(struct s3c_hsotg_ep) * S3C_HSOTG_EPS,
+			GFP_KERNEL);
+	if (!hsotg) {
+		dev_err(dev, "cannot get memory\n");
+		return -ENOMEM;
+	}
+
+	hsotg->dev = dev;
+	hsotg->plat = plat;
+
+	platform_set_drvdata(pdev, hsotg);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		dev_err(dev, "cannot find register resource 0\n");
+		ret = -EINVAL;
+		goto err_mem;
+	}
+
+	hsotg->regs_res = request_mem_region(res->start, resource_size(res),
+					     dev_name(dev));
+	if (!hsotg->regs_res) {
+		dev_err(dev, "cannot reserve registers\n");
+		ret = -ENOENT;
+		goto err_mem;
+	}
+
+	hsotg->regs = ioremap(res->start, resource_size(res));
+	if (!hsotg->regs) {
+		dev_err(dev, "cannot map registers\n");
+		ret = -ENXIO;
+		goto err_regs_res;
+	}
+
+	ret = platform_get_irq(pdev, 0);
+	if (ret < 0) {
+		dev_err(dev, "cannot find IRQ\n");
+		goto err_regs;
+	}
+
+	hsotg->irq = ret;
+
+	ret = request_irq(ret, s3c_hsotg_irq, 0, dev_name(dev), hsotg);
+	if (ret < 0) {
+		dev_err(dev, "cannot claim IRQ\n");
+		goto err_regs;
+	}
+
+	dev_info(dev, "regs %p, irq %d\n", hsotg->regs, hsotg->irq);
+
+	device_initialize(&hsotg->gadget.dev);
+
+	dev_set_name(&hsotg->gadget.dev, "gadget");
+
+	hsotg->gadget.is_dualspeed = 1;
+	hsotg->gadget.ops = &s3c_hsotg_gadget_ops;
+	hsotg->gadget.name = dev_name(dev);
+
+	hsotg->gadget.dev.parent = dev;
+	hsotg->gadget.dev.dma_mask = dev->dma_mask;
+
+	/* setup endpoint information */
+
+	INIT_LIST_HEAD(&hsotg->gadget.ep_list);
+	hsotg->gadget.ep0 = &hsotg->eps[0].ep;
+
+	/* allocate EP0 request */
+
+	hsotg->ctrl_req = s3c_hsotg_ep_alloc_request(&hsotg->eps[0].ep,
+						     GFP_KERNEL);
+	if (!hsotg->ctrl_req) {
+		dev_err(dev, "failed to allocate ctrl req\n");
+		goto err_regs;
+	}
+
+	/* reset the system */
+
+	s3c_hsotg_gate(pdev, true);
+
+	s3c_hsotg_otgreset(hsotg);
+	s3c_hsotg_corereset(hsotg);
+	s3c_hsotg_init(hsotg);
+
+	/* initialise the endpoints now the core has been initialised */
+	for (epnum = 0; epnum < S3C_HSOTG_EPS; epnum++)
+		s3c_hsotg_initep(hsotg, &hsotg->eps[epnum], epnum);
+
+	s3c_hsotg_create_debug(hsotg);
+
+	s3c_hsotg_dump(hsotg);
+
+	our_hsotg = hsotg;
+	return 0;
+
+err_regs:
+	iounmap(hsotg->regs);
+
+err_regs_res:
+	release_resource(hsotg->regs_res);
+	kfree(hsotg->regs_res);
+
+err_mem:
+	kfree(hsotg);
+	return ret;
+}
+
+static int __devexit s3c_hsotg_remove(struct platform_device *pdev)
+{
+	struct s3c_hsotg *hsotg = platform_get_drvdata(pdev);
+
+	s3c_hsotg_delete_debug(hsotg);
+
+	usb_gadget_unregister_driver(hsotg->driver);
+
+	free_irq(hsotg->irq, hsotg);
+	iounmap(hsotg->regs);
+
+	release_resource(hsotg->regs_res);
+	kfree(hsotg->regs_res);
+
+	s3c_hsotg_gate(pdev, false);
+
+	kfree(hsotg);
+	return 0;
+}
+
+#if 1
+#define s3c_hsotg_suspend NULL
+#define s3c_hsotg_resume NULL
+#endif
+
+static struct platform_driver s3c_hsotg_driver = {
+	.driver		= {
+		.name	= "s3c-hsotg",
+		.owner	= THIS_MODULE,
+	},
+	.probe		= s3c_hsotg_probe,
+	.remove		= __devexit_p(s3c_hsotg_remove),
+	.suspend	= s3c_hsotg_suspend,
+	.resume		= s3c_hsotg_resume,
+};
+
+static int __init s3c_hsotg_modinit(void)
+{
+	return platform_driver_register(&s3c_hsotg_driver);
+}
+
+static void __exit s3c_hsotg_modexit(void)
+{
+	platform_driver_unregister(&s3c_hsotg_driver);
+}
+
+module_init(s3c_hsotg_modinit);
+module_exit(s3c_hsotg_modexit);
+
+MODULE_DESCRIPTION("Samsung S3C USB High-speed/OtG device");
+MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:s3c-hsotg");
diff --git a/drivers/usb/gadget/u_audio.c b/drivers/usb/gadget/u_audio.c
new file mode 100644
index 0000000..0f3d22f
--- /dev/null
+++ b/drivers/usb/gadget/u_audio.c
@@ -0,0 +1,319 @@
+/*
+ * u_audio.c -- ALSA audio utilities for Gadget stack
+ *
+ * Copyright (C) 2008 Bryan Wu <cooloney@kernel.org>
+ * Copyright (C) 2008 Analog Devices, Inc
+ *
+ * Enter bugs at http://blackfin.uclinux.org/
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/kernel.h>
+#include <linux/utsname.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/ctype.h>
+#include <linux/random.h>
+#include <linux/syscalls.h>
+
+#include "u_audio.h"
+
+/*
+ * This component encapsulates the ALSA devices for USB audio gadget
+ */
+
+#define FILE_PCM_PLAYBACK	"/dev/snd/pcmC0D0p"
+#define FILE_PCM_CAPTURE	"/dev/snd/pcmC0D0c"
+#define FILE_CONTROL		"/dev/snd/controlC0"
+
+static char *fn_play = FILE_PCM_PLAYBACK;
+module_param(fn_play, charp, S_IRUGO);
+MODULE_PARM_DESC(fn_play, "Playback PCM device file name");
+
+static char *fn_cap = FILE_PCM_CAPTURE;
+module_param(fn_cap, charp, S_IRUGO);
+MODULE_PARM_DESC(fn_cap, "Capture PCM device file name");
+
+static char *fn_cntl = FILE_CONTROL;
+module_param(fn_cntl, charp, S_IRUGO);
+MODULE_PARM_DESC(fn_cntl, "Control device file name");
+
+/*-------------------------------------------------------------------------*/
+
+/**
+ * Some ALSA internal helper functions
+ */
+static int snd_interval_refine_set(struct snd_interval *i, unsigned int val)
+{
+	struct snd_interval t;
+	t.empty = 0;
+	t.min = t.max = val;
+	t.openmin = t.openmax = 0;
+	t.integer = 1;
+	return snd_interval_refine(i, &t);
+}
+
+static int _snd_pcm_hw_param_set(struct snd_pcm_hw_params *params,
+				 snd_pcm_hw_param_t var, unsigned int val,
+				 int dir)
+{
+	int changed;
+	if (hw_is_mask(var)) {
+		struct snd_mask *m = hw_param_mask(params, var);
+		if (val == 0 && dir < 0) {
+			changed = -EINVAL;
+			snd_mask_none(m);
+		} else {
+			if (dir > 0)
+				val++;
+			else if (dir < 0)
+				val--;
+			changed = snd_mask_refine_set(
+					hw_param_mask(params, var), val);
+		}
+	} else if (hw_is_interval(var)) {
+		struct snd_interval *i = hw_param_interval(params, var);
+		if (val == 0 && dir < 0) {
+			changed = -EINVAL;
+			snd_interval_none(i);
+		} else if (dir == 0)
+			changed = snd_interval_refine_set(i, val);
+		else {
+			struct snd_interval t;
+			t.openmin = 1;
+			t.openmax = 1;
+			t.empty = 0;
+			t.integer = 0;
+			if (dir < 0) {
+				t.min = val - 1;
+				t.max = val;
+			} else {
+				t.min = val;
+				t.max = val+1;
+			}
+			changed = snd_interval_refine(i, &t);
+		}
+	} else
+		return -EINVAL;
+	if (changed) {
+		params->cmask |= 1 << var;
+		params->rmask |= 1 << var;
+	}
+	return changed;
+}
+/*-------------------------------------------------------------------------*/
+
+/**
+ * Set default hardware params
+ */
+static int playback_default_hw_params(struct gaudio_snd_dev *snd)
+{
+	struct snd_pcm_substream *substream = snd->substream;
+	struct snd_pcm_hw_params *params;
+	snd_pcm_sframes_t result;
+
+       /*
+	* SNDRV_PCM_ACCESS_RW_INTERLEAVED,
+	* SNDRV_PCM_FORMAT_S16_LE
+	* CHANNELS: 2
+	* RATE: 48000
+	*/
+	snd->access = SNDRV_PCM_ACCESS_RW_INTERLEAVED;
+	snd->format = SNDRV_PCM_FORMAT_S16_LE;
+	snd->channels = 2;
+	snd->rate = 48000;
+
+	params = kzalloc(sizeof(*params), GFP_KERNEL);
+	if (!params)
+		return -ENOMEM;
+
+	_snd_pcm_hw_params_any(params);
+	_snd_pcm_hw_param_set(params, SNDRV_PCM_HW_PARAM_ACCESS,
+			snd->access, 0);
+	_snd_pcm_hw_param_set(params, SNDRV_PCM_HW_PARAM_FORMAT,
+			snd->format, 0);
+	_snd_pcm_hw_param_set(params, SNDRV_PCM_HW_PARAM_CHANNELS,
+			snd->channels, 0);
+	_snd_pcm_hw_param_set(params, SNDRV_PCM_HW_PARAM_RATE,
+			snd->rate, 0);
+
+	snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL);
+	snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_HW_PARAMS, params);
+
+	result = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_PREPARE, NULL);
+	if (result < 0) {
+		ERROR(snd->card,
+			"Preparing sound card failed: %d\n", (int)result);
+		kfree(params);
+		return result;
+	}
+
+	/* Store the hardware parameters */
+	snd->access = params_access(params);
+	snd->format = params_format(params);
+	snd->channels = params_channels(params);
+	snd->rate = params_rate(params);
+
+	kfree(params);
+
+	INFO(snd->card,
+		"Hardware params: access %x, format %x, channels %d, rate %d\n",
+		snd->access, snd->format, snd->channels, snd->rate);
+
+	return 0;
+}
+
+/**
+ * Playback audio buffer data by ALSA PCM device
+ */
+static size_t u_audio_playback(struct gaudio *card, void *buf, size_t count)
+{
+	struct gaudio_snd_dev	*snd = &card->playback;
+	struct snd_pcm_substream *substream = snd->substream;
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	mm_segment_t old_fs;
+	ssize_t result;
+	snd_pcm_sframes_t frames;
+
+try_again:
+	if (runtime->status->state == SNDRV_PCM_STATE_XRUN ||
+		runtime->status->state == SNDRV_PCM_STATE_SUSPENDED) {
+		result = snd_pcm_kernel_ioctl(substream,
+				SNDRV_PCM_IOCTL_PREPARE, NULL);
+		if (result < 0) {
+			ERROR(card, "Preparing sound card failed: %d\n",
+					(int)result);
+			return result;
+		}
+	}
+
+	frames = bytes_to_frames(runtime, count);
+	old_fs = get_fs();
+	set_fs(KERNEL_DS);
+	result = snd_pcm_lib_write(snd->substream, buf, frames);
+	if (result != frames) {
+		ERROR(card, "Playback error: %d\n", (int)result);
+		set_fs(old_fs);
+		goto try_again;
+	}
+	set_fs(old_fs);
+
+	return 0;
+}
+
+static int u_audio_get_playback_channels(struct gaudio *card)
+{
+	return card->playback.channels;
+}
+
+static int u_audio_get_playback_rate(struct gaudio *card)
+{
+	return card->playback.rate;
+}
+
+/**
+ * Open ALSA PCM and control device files
+ * Initial the PCM or control device
+ */
+static int gaudio_open_snd_dev(struct gaudio *card)
+{
+	struct snd_pcm_file *pcm_file;
+	struct gaudio_snd_dev *snd;
+
+	if (!card)
+		return -ENODEV;
+
+	/* Open control device */
+	snd = &card->control;
+	snd->filp = filp_open(fn_cntl, O_RDWR, 0);
+	if (IS_ERR(snd->filp)) {
+		int ret = PTR_ERR(snd->filp);
+		ERROR(card, "unable to open sound control device file: %s\n",
+				fn_cntl);
+		snd->filp = NULL;
+		return ret;
+	}
+	snd->card = card;
+
+	/* Open PCM playback device and setup substream */
+	snd = &card->playback;
+	snd->filp = filp_open(fn_play, O_WRONLY, 0);
+	if (IS_ERR(snd->filp)) {
+		ERROR(card, "No such PCM playback device: %s\n", fn_play);
+		snd->filp = NULL;
+	}
+	pcm_file = snd->filp->private_data;
+	snd->substream = pcm_file->substream;
+	snd->card = card;
+	playback_default_hw_params(snd);
+
+	/* Open PCM capture device and setup substream */
+	snd = &card->capture;
+	snd->filp = filp_open(fn_cap, O_RDONLY, 0);
+	if (IS_ERR(snd->filp)) {
+		ERROR(card, "No such PCM capture device: %s\n", fn_cap);
+		snd->filp = NULL;
+	}
+	pcm_file = snd->filp->private_data;
+	snd->substream = pcm_file->substream;
+	snd->card = card;
+
+	return 0;
+}
+
+/**
+ * Close ALSA PCM and control device files
+ */
+static int gaudio_close_snd_dev(struct gaudio *gau)
+{
+	struct gaudio_snd_dev	*snd;
+
+	/* Close control device */
+	snd = &gau->control;
+	if (!IS_ERR(snd->filp))
+		filp_close(snd->filp, current->files);
+
+	/* Close PCM playback device and setup substream */
+	snd = &gau->playback;
+	if (!IS_ERR(snd->filp))
+		filp_close(snd->filp, current->files);
+
+	/* Close PCM capture device and setup substream */
+	snd = &gau->capture;
+	if (!IS_ERR(snd->filp))
+		filp_close(snd->filp, current->files);
+
+	return 0;
+}
+
+/**
+ * gaudio_setup - setup ALSA interface and preparing for USB transfer
+ *
+ * This sets up PCM, mixer or MIDI ALSA devices fore USB gadget using.
+ *
+ * Returns negative errno, or zero on success
+ */
+int __init gaudio_setup(struct gaudio *card)
+{
+	int	ret;
+
+	ret = gaudio_open_snd_dev(card);
+	if (ret)
+		ERROR(card, "we need at least one control device\n");
+
+	return ret;
+
+}
+
+/**
+ * gaudio_cleanup - remove ALSA device interface
+ *
+ * This is called to free all resources allocated by @gaudio_setup().
+ */
+void gaudio_cleanup(struct gaudio *card)
+{
+	if (card)
+		gaudio_close_snd_dev(card);
+}
+
diff --git a/drivers/usb/gadget/u_audio.h b/drivers/usb/gadget/u_audio.h
new file mode 100644
index 0000000..cc8d159
--- /dev/null
+++ b/drivers/usb/gadget/u_audio.h
@@ -0,0 +1,56 @@
+/*
+ * u_audio.h -- interface to USB gadget "ALSA AUDIO" utilities
+ *
+ * Copyright (C) 2008 Bryan Wu <cooloney@kernel.org>
+ * Copyright (C) 2008 Analog Devices, Inc
+ *
+ * Enter bugs at http://blackfin.uclinux.org/
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef __U_AUDIO_H
+#define __U_AUDIO_H
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/usb/audio.h>
+#include <linux/usb/composite.h>
+
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+
+#include "gadget_chips.h"
+
+/*
+ * This represents the USB side of an audio card device, managed by a USB
+ * function which provides control and stream interfaces.
+ */
+
+struct gaudio_snd_dev {
+	struct gaudio			*card;
+	struct file			*filp;
+	struct snd_pcm_substream	*substream;
+	int				access;
+	int				format;
+	int				channels;
+	int				rate;
+};
+
+struct gaudio {
+	struct usb_function		func;
+	struct usb_gadget		*gadget;
+
+	/* ALSA sound device interfaces */
+	struct gaudio_snd_dev		control;
+	struct gaudio_snd_dev		playback;
+	struct gaudio_snd_dev		capture;
+
+	/* TODO */
+};
+
+int gaudio_setup(struct gaudio *card);
+void gaudio_cleanup(struct gaudio *card);
+
+#endif /* __U_AUDIO_H */
diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c
index 0a4d99a..fc6e709 100644
--- a/drivers/usb/gadget/u_serial.c
+++ b/drivers/usb/gadget/u_serial.c
@@ -371,6 +371,7 @@
 
 		req->length = len;
 		list_del(&req->list);
+		req->zero = (gs_buf_data_avail(&port->port_write_buf) == 0);
 
 		pr_vdebug(PREFIX "%d: tx len=%d, 0x%02x 0x%02x 0x%02x ...\n",
 				port->port_num, len, *((u8 *)req->buf),
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 845479f..1576a05 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -17,6 +17,26 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called c67x00.
 
+config USB_XHCI_HCD
+	tristate "xHCI HCD (USB 3.0) support (EXPERIMENTAL)"
+	depends on USB && PCI && EXPERIMENTAL
+	---help---
+	  The eXtensible Host Controller Interface (xHCI) is standard for USB 3.0
+	  "SuperSpeed" host controller hardware.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called xhci-hcd.
+
+config USB_XHCI_HCD_DEBUGGING
+	bool "Debugging for the xHCI host controller"
+	depends on USB_XHCI_HCD
+	---help---
+	  Say 'Y' to turn on debugging for the xHCI host controller driver.
+	  This will spew debugging output, even in interrupt context.
+	  This should only be used for debugging xHCI driver bugs.
+
+	  If unsure, say N.
+
 config USB_EHCI_HCD
 	tristate "EHCI HCD (USB 2.0) support"
 	depends on USB && USB_ARCH_HAS_EHCI
diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
index f163571..289d748 100644
--- a/drivers/usb/host/Makefile
+++ b/drivers/usb/host/Makefile
@@ -12,6 +12,7 @@
 ifeq ($(CONFIG_FHCI_DEBUG),y)
 fhci-objs += fhci-dbg.o
 endif
+xhci-objs := xhci-hcd.o xhci-mem.o xhci-pci.o xhci-ring.o xhci-hub.o xhci-dbg.o
 
 obj-$(CONFIG_USB_WHCI_HCD)	+= whci/
 
@@ -23,6 +24,7 @@
 obj-$(CONFIG_USB_OHCI_HCD)	+= ohci-hcd.o
 obj-$(CONFIG_USB_UHCI_HCD)	+= uhci-hcd.o
 obj-$(CONFIG_USB_FHCI_HCD)	+= fhci.o
+obj-$(CONFIG_USB_XHCI_HCD)	+= xhci.o
 obj-$(CONFIG_USB_SL811_HCD)	+= sl811-hcd.o
 obj-$(CONFIG_USB_SL811_CS)	+= sl811_cs.o
 obj-$(CONFIG_USB_U132_HCD)	+= u132-hcd.o
diff --git a/drivers/usb/host/ehci-au1xxx.c b/drivers/usb/host/ehci-au1xxx.c
index bf69f47..c3a778b 100644
--- a/drivers/usb/host/ehci-au1xxx.c
+++ b/drivers/usb/host/ehci-au1xxx.c
@@ -97,6 +97,7 @@
 	.urb_enqueue		= ehci_urb_enqueue,
 	.urb_dequeue		= ehci_urb_dequeue,
 	.endpoint_disable	= ehci_endpoint_disable,
+	.endpoint_reset		= ehci_endpoint_reset,
 
 	/*
 	 * scheduling support
diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
index 01c3da3..bf86809 100644
--- a/drivers/usb/host/ehci-fsl.c
+++ b/drivers/usb/host/ehci-fsl.c
@@ -309,6 +309,7 @@
 	.urb_enqueue = ehci_urb_enqueue,
 	.urb_dequeue = ehci_urb_dequeue,
 	.endpoint_disable = ehci_endpoint_disable,
+	.endpoint_reset = ehci_endpoint_reset,
 
 	/*
 	 * scheduling support
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index c637207..2b72473 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -1024,6 +1024,51 @@
 	return;
 }
 
+static void
+ehci_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep)
+{
+	struct ehci_hcd		*ehci = hcd_to_ehci(hcd);
+	struct ehci_qh		*qh;
+	int			eptype = usb_endpoint_type(&ep->desc);
+
+	if (eptype != USB_ENDPOINT_XFER_BULK && eptype != USB_ENDPOINT_XFER_INT)
+		return;
+
+ rescan:
+	spin_lock_irq(&ehci->lock);
+	qh = ep->hcpriv;
+
+	/* For Bulk and Interrupt endpoints we maintain the toggle state
+	 * in the hardware; the toggle bits in udev aren't used at all.
+	 * When an endpoint is reset by usb_clear_halt() we must reset
+	 * the toggle bit in the QH.
+	 */
+	if (qh) {
+		if (!list_empty(&qh->qtd_list)) {
+			WARN_ONCE(1, "clear_halt for a busy endpoint\n");
+		} else if (qh->qh_state == QH_STATE_IDLE) {
+			qh->hw_token &= ~cpu_to_hc32(ehci, QTD_TOGGLE);
+		} else {
+			/* It's not safe to write into the overlay area
+			 * while the QH is active.  Unlink it first and
+			 * wait for the unlink to complete.
+			 */
+			if (qh->qh_state == QH_STATE_LINKED) {
+				if (eptype == USB_ENDPOINT_XFER_BULK) {
+					unlink_async(ehci, qh);
+				} else {
+					intr_deschedule(ehci, qh);
+					(void) qh_schedule(ehci, qh);
+				}
+			}
+			spin_unlock_irq(&ehci->lock);
+			schedule_timeout_uninterruptible(1);
+			goto rescan;
+		}
+	}
+	spin_unlock_irq(&ehci->lock);
+}
+
 static int ehci_get_frame (struct usb_hcd *hcd)
 {
 	struct ehci_hcd		*ehci = hcd_to_ehci (hcd);
@@ -1097,7 +1142,7 @@
 		 sizeof(struct ehci_itd), sizeof(struct ehci_sitd));
 
 #ifdef DEBUG
-	ehci_debug_root = debugfs_create_dir("ehci", NULL);
+	ehci_debug_root = debugfs_create_dir("ehci", usb_debug_root);
 	if (!ehci_debug_root) {
 		retval = -ENOENT;
 		goto err_debug;
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index 97a53a4..f46ad27 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -391,7 +391,7 @@
 
 	/* with integrated TT there is no companion! */
 	if (!ehci_is_TDI(ehci))
-		i = device_create_file(ehci_to_hcd(ehci)->self.dev,
+		i = device_create_file(ehci_to_hcd(ehci)->self.controller,
 				       &dev_attr_companion);
 }
 
@@ -399,7 +399,7 @@
 {
 	/* with integrated TT there is no companion! */
 	if (!ehci_is_TDI(ehci))
-		device_remove_file(ehci_to_hcd(ehci)->self.dev,
+		device_remove_file(ehci_to_hcd(ehci)->self.controller,
 				   &dev_attr_companion);
 }
 
diff --git a/drivers/usb/host/ehci-ixp4xx.c b/drivers/usb/host/ehci-ixp4xx.c
index 9c32063..a44bb4a 100644
--- a/drivers/usb/host/ehci-ixp4xx.c
+++ b/drivers/usb/host/ehci-ixp4xx.c
@@ -51,6 +51,7 @@
 	.urb_enqueue		= ehci_urb_enqueue,
 	.urb_dequeue		= ehci_urb_dequeue,
 	.endpoint_disable	= ehci_endpoint_disable,
+	.endpoint_reset		= ehci_endpoint_reset,
 	.get_frame_number	= ehci_get_frame,
 	.hub_status_data	= ehci_hub_status_data,
 	.hub_control		= ehci_hub_control,
diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c
index 9d48790..770dd9a 100644
--- a/drivers/usb/host/ehci-orion.c
+++ b/drivers/usb/host/ehci-orion.c
@@ -149,6 +149,7 @@
 	.urb_enqueue = ehci_urb_enqueue,
 	.urb_dequeue = ehci_urb_dequeue,
 	.endpoint_disable = ehci_endpoint_disable,
+	.endpoint_reset = ehci_endpoint_reset,
 
 	/*
 	 * scheduling support
@@ -187,7 +188,7 @@
 	}
 }
 
-static int __init ehci_orion_drv_probe(struct platform_device *pdev)
+static int __devinit ehci_orion_drv_probe(struct platform_device *pdev)
 {
 	struct orion_ehci_data *pd = pdev->dev.platform_data;
 	struct resource *res;
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index 5aa8bce..f3683e1 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -268,7 +268,7 @@
  * Also they depend on separate root hub suspend/resume.
  */
 
-static int ehci_pci_suspend(struct usb_hcd *hcd, pm_message_t message)
+static int ehci_pci_suspend(struct usb_hcd *hcd)
 {
 	struct ehci_hcd		*ehci = hcd_to_ehci(hcd);
 	unsigned long		flags;
@@ -293,12 +293,6 @@
 	ehci_writel(ehci, 0, &ehci->regs->intr_enable);
 	(void)ehci_readl(ehci, &ehci->regs->intr_enable);
 
-	/* make sure snapshot being resumed re-enumerates everything */
-	if (message.event == PM_EVENT_PRETHAW) {
-		ehci_halt(ehci);
-		ehci_reset(ehci);
-	}
-
 	clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
  bail:
 	spin_unlock_irqrestore (&ehci->lock, flags);
@@ -309,7 +303,7 @@
 	return rc;
 }
 
-static int ehci_pci_resume(struct usb_hcd *hcd)
+static int ehci_pci_resume(struct usb_hcd *hcd, bool hibernated)
 {
 	struct ehci_hcd		*ehci = hcd_to_ehci(hcd);
 	struct pci_dev		*pdev = to_pci_dev(hcd->self.controller);
@@ -322,10 +316,12 @@
 	/* Mark hardware accessible again as we are out of D3 state by now */
 	set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
 
-	/* If CF is still set, we maintained PCI Vaux power.
+	/* If CF is still set and we aren't resuming from hibernation
+	 * then we maintained PCI Vaux power.
 	 * Just undo the effect of ehci_pci_suspend().
 	 */
-	if (ehci_readl(ehci, &ehci->regs->configured_flag) == FLAG_CF) {
+	if (ehci_readl(ehci, &ehci->regs->configured_flag) == FLAG_CF &&
+				!hibernated) {
 		int	mask = INTR_MASK;
 
 		if (!hcd->self.root_hub->do_remote_wakeup)
@@ -335,7 +331,6 @@
 		return 0;
 	}
 
-	ehci_dbg(ehci, "lost power, restarting\n");
 	usb_root_hub_lost_power(hcd->self.root_hub);
 
 	/* Else reset, to cope with power loss or flush-to-storage
@@ -393,6 +388,7 @@
 	.urb_enqueue =		ehci_urb_enqueue,
 	.urb_dequeue =		ehci_urb_dequeue,
 	.endpoint_disable =	ehci_endpoint_disable,
+	.endpoint_reset =	ehci_endpoint_reset,
 
 	/*
 	 * scheduling support
@@ -429,10 +425,11 @@
 
 	.probe =	usb_hcd_pci_probe,
 	.remove =	usb_hcd_pci_remove,
-
-#ifdef	CONFIG_PM
-	.suspend =	usb_hcd_pci_suspend,
-	.resume =	usb_hcd_pci_resume,
-#endif
 	.shutdown = 	usb_hcd_pci_shutdown,
+
+#ifdef CONFIG_PM_SLEEP
+	.driver =	{
+		.pm =	&usb_hcd_pci_pm_ops
+	},
+#endif
 };
diff --git a/drivers/usb/host/ehci-ppc-of.c b/drivers/usb/host/ehci-ppc-of.c
index ef732b7..fbd27228 100644
--- a/drivers/usb/host/ehci-ppc-of.c
+++ b/drivers/usb/host/ehci-ppc-of.c
@@ -61,6 +61,7 @@
 	.urb_enqueue		= ehci_urb_enqueue,
 	.urb_dequeue		= ehci_urb_dequeue,
 	.endpoint_disable	= ehci_endpoint_disable,
+	.endpoint_reset		= ehci_endpoint_reset,
 
 	/*
 	 * scheduling support
diff --git a/drivers/usb/host/ehci-ps3.c b/drivers/usb/host/ehci-ps3.c
index 1ba9f9a..eecd2a0 100644
--- a/drivers/usb/host/ehci-ps3.c
+++ b/drivers/usb/host/ehci-ps3.c
@@ -65,6 +65,7 @@
 	.urb_enqueue		= ehci_urb_enqueue,
 	.urb_dequeue		= ehci_urb_dequeue,
 	.endpoint_disable	= ehci_endpoint_disable,
+	.endpoint_reset		= ehci_endpoint_reset,
 	.get_frame_number	= ehci_get_frame,
 	.hub_status_data	= ehci_hub_status_data,
 	.hub_control		= ehci_hub_control,
@@ -162,7 +163,7 @@
 	dev_dbg(&dev->core, "%s:%d: virq            %lu\n", __func__, __LINE__,
 		(unsigned long)virq);
 
-	ps3_system_bus_set_driver_data(dev, hcd);
+	ps3_system_bus_set_drvdata(dev, hcd);
 
 	result = usb_add_hcd(hcd, virq, IRQF_DISABLED);
 
@@ -195,8 +196,7 @@
 static int ps3_ehci_remove(struct ps3_system_bus_device *dev)
 {
 	unsigned int tmp;
-	struct usb_hcd *hcd =
-		(struct usb_hcd *)ps3_system_bus_get_driver_data(dev);
+	struct usb_hcd *hcd = ps3_system_bus_get_drvdata(dev);
 
 	BUG_ON(!hcd);
 
@@ -208,7 +208,7 @@
 	ehci_shutdown(hcd);
 	usb_remove_hcd(hcd);
 
-	ps3_system_bus_set_driver_data(dev, NULL);
+	ps3_system_bus_set_drvdata(dev, NULL);
 
 	BUG_ON(!hcd->regs);
 	iounmap(hcd->regs);
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index 1976b1b..3192f68 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -93,22 +93,6 @@
 	qh->hw_qtd_next = QTD_NEXT(ehci, qtd->qtd_dma);
 	qh->hw_alt_next = EHCI_LIST_END(ehci);
 
-	/* Except for control endpoints, we make hardware maintain data
-	 * toggle (like OHCI) ... here (re)initialize the toggle in the QH,
-	 * and set the pseudo-toggle in udev. Only usb_clear_halt() will
-	 * ever clear it.
-	 */
-	if (!(qh->hw_info1 & cpu_to_hc32(ehci, 1 << 14))) {
-		unsigned	is_out, epnum;
-
-		is_out = !(qtd->hw_token & cpu_to_hc32(ehci, 1 << 8));
-		epnum = (hc32_to_cpup(ehci, &qh->hw_info1) >> 8) & 0x0f;
-		if (unlikely (!usb_gettoggle (qh->dev, epnum, is_out))) {
-			qh->hw_token &= ~cpu_to_hc32(ehci, QTD_TOGGLE);
-			usb_settoggle (qh->dev, epnum, is_out, 1);
-		}
-	}
-
 	/* HC must see latest qtd and qh data before we clear ACTIVE+HALT */
 	wmb ();
 	qh->hw_token &= cpu_to_hc32(ehci, QTD_TOGGLE | QTD_STS_PING);
@@ -850,7 +834,6 @@
 	qh->qh_state = QH_STATE_IDLE;
 	qh->hw_info1 = cpu_to_hc32(ehci, info1);
 	qh->hw_info2 = cpu_to_hc32(ehci, info2);
-	usb_settoggle (urb->dev, usb_pipeendpoint (urb->pipe), !is_input, 1);
 	qh_refresh (ehci, qh);
 	return qh;
 }
@@ -881,7 +864,7 @@
 		}
 	}
 
-	/* clear halt and/or toggle; and maybe recover from silicon quirk */
+	/* clear halt and maybe recover from silicon quirk */
 	if (qh->qh_state == QH_STATE_IDLE)
 		qh_refresh (ehci, qh);
 
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index 556d0ec..9d1babc 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -760,8 +760,10 @@
 	if (status) {
 		/* "normal" case, uframing flexible except with splits */
 		if (qh->period) {
-			frame = qh->period - 1;
-			do {
+			int		i;
+
+			for (i = qh->period; status && i > 0; --i) {
+				frame = ++ehci->random_frame % qh->period;
 				for (uframe = 0; uframe < 8; uframe++) {
 					status = check_intr_schedule (ehci,
 							frame, uframe, qh,
@@ -769,7 +771,7 @@
 					if (status == 0)
 						break;
 				}
-			} while (status && frame--);
+			}
 
 		/* qh->period == 0 means every uframe */
 		} else {
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index 6cff195..90ad339 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -116,6 +116,7 @@
 	struct timer_list	watchdog;
 	unsigned long		actions;
 	unsigned		stamp;
+	unsigned		random_frame;
 	unsigned long		next_statechange;
 	u32			command;
 
diff --git a/drivers/usb/host/fhci-dbg.c b/drivers/usb/host/fhci-dbg.c
index ea8a425..e799f86 100644
--- a/drivers/usb/host/fhci-dbg.c
+++ b/drivers/usb/host/fhci-dbg.c
@@ -108,7 +108,7 @@
 {
 	struct device *dev = fhci_to_hcd(fhci)->self.controller;
 
-	fhci->dfs_root = debugfs_create_dir(dev_name(dev), NULL);
+	fhci->dfs_root = debugfs_create_dir(dev_name(dev), usb_debug_root);
 	if (!fhci->dfs_root) {
 		WARN_ON(1);
 		return;
diff --git a/drivers/usb/host/hwa-hc.c b/drivers/usb/host/hwa-hc.c
index cbf30e5..88b0321 100644
--- a/drivers/usb/host/hwa-hc.c
+++ b/drivers/usb/host/hwa-hc.c
@@ -172,25 +172,6 @@
 
 }
 
-static int hwahc_op_suspend(struct usb_hcd *usb_hcd, pm_message_t msg)
-{
-	struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
-	struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
-	dev_err(wusbhc->dev, "%s (%p [%p], 0x%lx) UNIMPLEMENTED\n", __func__,
-		usb_hcd, hwahc, *(unsigned long *) &msg);
-	return -ENOSYS;
-}
-
-static int hwahc_op_resume(struct usb_hcd *usb_hcd)
-{
-	struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
-	struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
-
-	dev_err(wusbhc->dev, "%s (%p [%p]) UNIMPLEMENTED\n", __func__,
-		usb_hcd, hwahc);
-	return -ENOSYS;
-}
-
 /*
  * No need to abort pipes, as when this is called, all the children
  * has been disconnected and that has done it [through
@@ -598,8 +579,6 @@
 	.flags = HCD_USB2,		/* FIXME */
 	.reset = hwahc_op_reset,
 	.start = hwahc_op_start,
-	.pci_suspend = hwahc_op_suspend,
-	.pci_resume = hwahc_op_resume,
 	.stop = hwahc_op_stop,
 	.get_frame_number = hwahc_op_get_frame_number,
 	.urb_enqueue = hwahc_op_urb_enqueue,
diff --git a/drivers/usb/host/ohci-dbg.c b/drivers/usb/host/ohci-dbg.c
index d3269656..811f5dfd 100644
--- a/drivers/usb/host/ohci-dbg.c
+++ b/drivers/usb/host/ohci-dbg.c
@@ -431,7 +431,7 @@
 
 struct debug_buffer {
 	ssize_t (*fill_func)(struct debug_buffer *);	/* fill method */
-	struct device *dev;
+	struct ohci_hcd *ohci;
 	struct mutex mutex;	/* protect filling of buffer */
 	size_t count;		/* number of characters filled into buffer */
 	char *page;
@@ -505,15 +505,11 @@
 
 static ssize_t fill_async_buffer(struct debug_buffer *buf)
 {
-	struct usb_bus		*bus;
-	struct usb_hcd		*hcd;
 	struct ohci_hcd		*ohci;
 	size_t			temp;
 	unsigned long		flags;
 
-	bus = dev_get_drvdata(buf->dev);
-	hcd = bus_to_hcd(bus);
-	ohci = hcd_to_ohci(hcd);
+	ohci = buf->ohci;
 
 	/* display control and bulk lists together, for simplicity */
 	spin_lock_irqsave (&ohci->lock, flags);
@@ -529,8 +525,6 @@
 
 static ssize_t fill_periodic_buffer(struct debug_buffer *buf)
 {
-	struct usb_bus		*bus;
-	struct usb_hcd		*hcd;
 	struct ohci_hcd		*ohci;
 	struct ed		**seen, *ed;
 	unsigned long		flags;
@@ -542,9 +536,7 @@
 		return 0;
 	seen_count = 0;
 
-	bus = (struct usb_bus *)dev_get_drvdata(buf->dev);
-	hcd = bus_to_hcd(bus);
-	ohci = hcd_to_ohci(hcd);
+	ohci = buf->ohci;
 	next = buf->page;
 	size = PAGE_SIZE;
 
@@ -626,7 +618,6 @@
 
 static ssize_t fill_registers_buffer(struct debug_buffer *buf)
 {
-	struct usb_bus		*bus;
 	struct usb_hcd		*hcd;
 	struct ohci_hcd		*ohci;
 	struct ohci_regs __iomem *regs;
@@ -635,9 +626,8 @@
 	char			*next;
 	u32			rdata;
 
-	bus = (struct usb_bus *)dev_get_drvdata(buf->dev);
-	hcd = bus_to_hcd(bus);
-	ohci = hcd_to_ohci(hcd);
+	ohci = buf->ohci;
+	hcd = ohci_to_hcd(ohci);
 	regs = ohci->regs;
 	next = buf->page;
 	size = PAGE_SIZE;
@@ -710,7 +700,7 @@
 	return PAGE_SIZE - size;
 }
 
-static struct debug_buffer *alloc_buffer(struct device *dev,
+static struct debug_buffer *alloc_buffer(struct ohci_hcd *ohci,
 				ssize_t (*fill_func)(struct debug_buffer *))
 {
 	struct debug_buffer *buf;
@@ -718,7 +708,7 @@
 	buf = kzalloc(sizeof(struct debug_buffer), GFP_KERNEL);
 
 	if (buf) {
-		buf->dev = dev;
+		buf->ohci = ohci;
 		buf->fill_func = fill_func;
 		mutex_init(&buf->mutex);
 	}
@@ -810,26 +800,25 @@
 static inline void create_debug_files (struct ohci_hcd *ohci)
 {
 	struct usb_bus *bus = &ohci_to_hcd(ohci)->self;
-	struct device *dev = bus->dev;
 
 	ohci->debug_dir = debugfs_create_dir(bus->bus_name, ohci_debug_root);
 	if (!ohci->debug_dir)
 		goto dir_error;
 
 	ohci->debug_async = debugfs_create_file("async", S_IRUGO,
-						ohci->debug_dir, dev,
+						ohci->debug_dir, ohci,
 						&debug_async_fops);
 	if (!ohci->debug_async)
 		goto async_error;
 
 	ohci->debug_periodic = debugfs_create_file("periodic", S_IRUGO,
-						   ohci->debug_dir, dev,
+						   ohci->debug_dir, ohci,
 						   &debug_periodic_fops);
 	if (!ohci->debug_periodic)
 		goto periodic_error;
 
 	ohci->debug_registers = debugfs_create_file("registers", S_IRUGO,
-						    ohci->debug_dir, dev,
+						    ohci->debug_dir, ohci,
 						    &debug_registers_fops);
 	if (!ohci->debug_registers)
 		goto registers_error;
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 25db704..5815168 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -571,7 +571,7 @@
  */
 static int ohci_run (struct ohci_hcd *ohci)
 {
-	u32			mask, temp;
+	u32			mask, val;
 	int			first = ohci->fminterval == 0;
 	struct usb_hcd		*hcd = ohci_to_hcd(ohci);
 
@@ -580,8 +580,8 @@
 	/* boot firmware should have set this up (5.1.1.3.1) */
 	if (first) {
 
-		temp = ohci_readl (ohci, &ohci->regs->fminterval);
-		ohci->fminterval = temp & 0x3fff;
+		val = ohci_readl (ohci, &ohci->regs->fminterval);
+		ohci->fminterval = val & 0x3fff;
 		if (ohci->fminterval != FI)
 			ohci_dbg (ohci, "fminterval delta %d\n",
 				ohci->fminterval - FI);
@@ -600,25 +600,25 @@
 
 	switch (ohci->hc_control & OHCI_CTRL_HCFS) {
 	case OHCI_USB_OPER:
-		temp = 0;
+		val = 0;
 		break;
 	case OHCI_USB_SUSPEND:
 	case OHCI_USB_RESUME:
 		ohci->hc_control &= OHCI_CTRL_RWC;
 		ohci->hc_control |= OHCI_USB_RESUME;
-		temp = 10 /* msec wait */;
+		val = 10 /* msec wait */;
 		break;
 	// case OHCI_USB_RESET:
 	default:
 		ohci->hc_control &= OHCI_CTRL_RWC;
 		ohci->hc_control |= OHCI_USB_RESET;
-		temp = 50 /* msec wait */;
+		val = 50 /* msec wait */;
 		break;
 	}
 	ohci_writel (ohci, ohci->hc_control, &ohci->regs->control);
 	// flush the writes
 	(void) ohci_readl (ohci, &ohci->regs->control);
-	msleep(temp);
+	msleep(val);
 
 	memset (ohci->hcca, 0, sizeof (struct ohci_hcca));
 
@@ -628,9 +628,9 @@
 retry:
 	/* HC Reset requires max 10 us delay */
 	ohci_writel (ohci, OHCI_HCR,  &ohci->regs->cmdstatus);
-	temp = 30;	/* ... allow extra time */
+	val = 30;	/* ... allow extra time */
 	while ((ohci_readl (ohci, &ohci->regs->cmdstatus) & OHCI_HCR) != 0) {
-		if (--temp == 0) {
+		if (--val == 0) {
 			spin_unlock_irq (&ohci->lock);
 			ohci_err (ohci, "USB HC reset timed out!\n");
 			return -1;
@@ -699,23 +699,23 @@
 	ohci_writel (ohci, mask, &ohci->regs->intrenable);
 
 	/* handle root hub init quirks ... */
-	temp = roothub_a (ohci);
-	temp &= ~(RH_A_PSM | RH_A_OCPM);
+	val = roothub_a (ohci);
+	val &= ~(RH_A_PSM | RH_A_OCPM);
 	if (ohci->flags & OHCI_QUIRK_SUPERIO) {
 		/* NSC 87560 and maybe others */
-		temp |= RH_A_NOCP;
-		temp &= ~(RH_A_POTPGT | RH_A_NPS);
-		ohci_writel (ohci, temp, &ohci->regs->roothub.a);
+		val |= RH_A_NOCP;
+		val &= ~(RH_A_POTPGT | RH_A_NPS);
+		ohci_writel (ohci, val, &ohci->regs->roothub.a);
 	} else if ((ohci->flags & OHCI_QUIRK_AMD756) ||
 			(ohci->flags & OHCI_QUIRK_HUB_POWER)) {
 		/* hub power always on; required for AMD-756 and some
 		 * Mac platforms.  ganged overcurrent reporting, if any.
 		 */
-		temp |= RH_A_NPS;
-		ohci_writel (ohci, temp, &ohci->regs->roothub.a);
+		val |= RH_A_NPS;
+		ohci_writel (ohci, val, &ohci->regs->roothub.a);
 	}
 	ohci_writel (ohci, RH_HS_LPSC, &ohci->regs->roothub.status);
-	ohci_writel (ohci, (temp & RH_A_NPS) ? 0 : RH_B_PPCM,
+	ohci_writel (ohci, (val & RH_A_NPS) ? 0 : RH_B_PPCM,
 						&ohci->regs->roothub.b);
 	// flush those writes
 	(void) ohci_readl (ohci, &ohci->regs->control);
@@ -724,7 +724,7 @@
 	spin_unlock_irq (&ohci->lock);
 
 	// POTPGT delay is bits 24-31, in 2 ms units.
-	mdelay ((temp >> 23) & 0x1fe);
+	mdelay ((val >> 23) & 0x1fe);
 	hcd->state = HC_STATE_RUNNING;
 
 	if (quirk_zfmicro(ohci)) {
@@ -1105,7 +1105,7 @@
 	set_bit(USB_OHCI_LOADED, &usb_hcds_loaded);
 
 #ifdef DEBUG
-	ohci_debug_root = debugfs_create_dir("ohci", NULL);
+	ohci_debug_root = debugfs_create_dir("ohci", usb_debug_root);
 	if (!ohci_debug_root) {
 		retval = -ENOENT;
 		goto error_debug;
diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
index f9961b4..d2ba04d 100644
--- a/drivers/usb/host/ohci-pci.c
+++ b/drivers/usb/host/ohci-pci.c
@@ -372,7 +372,7 @@
 
 #ifdef	CONFIG_PM
 
-static int ohci_pci_suspend (struct usb_hcd *hcd, pm_message_t message)
+static int ohci_pci_suspend(struct usb_hcd *hcd)
 {
 	struct ohci_hcd	*ohci = hcd_to_ohci (hcd);
 	unsigned long	flags;
@@ -394,10 +394,6 @@
 	ohci_writel(ohci, OHCI_INTR_MIE, &ohci->regs->intrdisable);
 	(void)ohci_readl(ohci, &ohci->regs->intrdisable);
 
-	/* make sure snapshot being resumed re-enumerates everything */
-	if (message.event == PM_EVENT_PRETHAW)
-		ohci_usb_reset(ohci);
-
 	clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
  bail:
 	spin_unlock_irqrestore (&ohci->lock, flags);
@@ -406,9 +402,14 @@
 }
 
 
-static int ohci_pci_resume (struct usb_hcd *hcd)
+static int ohci_pci_resume(struct usb_hcd *hcd, bool hibernated)
 {
 	set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+
+	/* Make sure resume from hibernation re-enumerates everything */
+	if (hibernated)
+		ohci_usb_reset(hcd_to_ohci(hcd));
+
 	ohci_finish_controller_resume(hcd);
 	return 0;
 }
@@ -484,12 +485,11 @@
 
 	.probe =	usb_hcd_pci_probe,
 	.remove =	usb_hcd_pci_remove,
-
-#ifdef	CONFIG_PM
-	.suspend =	usb_hcd_pci_suspend,
-	.resume =	usb_hcd_pci_resume,
-#endif
-
 	.shutdown =	usb_hcd_pci_shutdown,
-};
 
+#ifdef CONFIG_PM_SLEEP
+	.driver =	{
+		.pm =	&usb_hcd_pci_pm_ops
+	},
+#endif
+};
diff --git a/drivers/usb/host/ohci-ps3.c b/drivers/usb/host/ohci-ps3.c
index 3d19103..1d56259 100644
--- a/drivers/usb/host/ohci-ps3.c
+++ b/drivers/usb/host/ohci-ps3.c
@@ -162,7 +162,7 @@
 	dev_dbg(&dev->core, "%s:%d: virq            %lu\n", __func__, __LINE__,
 		(unsigned long)virq);
 
-	ps3_system_bus_set_driver_data(dev, hcd);
+	ps3_system_bus_set_drvdata(dev, hcd);
 
 	result = usb_add_hcd(hcd, virq, IRQF_DISABLED);
 
@@ -195,8 +195,7 @@
 static int ps3_ohci_remove(struct ps3_system_bus_device *dev)
 {
 	unsigned int tmp;
-	struct usb_hcd *hcd =
-		(struct usb_hcd *)ps3_system_bus_get_driver_data(dev);
+	struct usb_hcd *hcd = ps3_system_bus_get_drvdata(dev);
 
 	BUG_ON(!hcd);
 
@@ -208,7 +207,7 @@
 	ohci_shutdown(hcd);
 	usb_remove_hcd(hcd);
 
-	ps3_system_bus_set_driver_data(dev, NULL);
+	ps3_system_bus_set_drvdata(dev, NULL);
 
 	BUG_ON(!hcd->regs);
 	iounmap(hcd->regs);
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index 033c284..83b5f9c 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -15,6 +15,7 @@
 #include <linux/delay.h>
 #include <linux/acpi.h>
 #include "pci-quirks.h"
+#include "xhci-ext-caps.h"
 
 
 #define UHCI_USBLEGSUP		0xc0		/* legacy support */
@@ -341,7 +342,127 @@
 	return;
 }
 
+/*
+ * handshake - spin reading a register until handshake completes
+ * @ptr: address of hc register to be read
+ * @mask: bits to look at in result of read
+ * @done: value of those bits when handshake succeeds
+ * @wait_usec: timeout in microseconds
+ * @delay_usec: delay in microseconds to wait between polling
+ *
+ * Polls a register every delay_usec microseconds.
+ * Returns 0 when the mask bits have the value done.
+ * Returns -ETIMEDOUT if this condition is not true after
+ * wait_usec microseconds have passed.
+ */
+static int handshake(void __iomem *ptr, u32 mask, u32 done,
+		int wait_usec, int delay_usec)
+{
+	u32	result;
 
+	do {
+		result = readl(ptr);
+		result &= mask;
+		if (result == done)
+			return 0;
+		udelay(delay_usec);
+		wait_usec -= delay_usec;
+	} while (wait_usec > 0);
+	return -ETIMEDOUT;
+}
+
+/**
+ * PCI Quirks for xHCI.
+ *
+ * Takes care of the handoff between the Pre-OS (i.e. BIOS) and the OS.
+ * It signals to the BIOS that the OS wants control of the host controller,
+ * and then waits 5 seconds for the BIOS to hand over control.
+ * If we timeout, assume the BIOS is broken and take control anyway.
+ */
+static void __devinit quirk_usb_handoff_xhci(struct pci_dev *pdev)
+{
+	void __iomem *base;
+	int ext_cap_offset;
+	void __iomem *op_reg_base;
+	u32 val;
+	int timeout;
+
+	if (!mmio_resource_enabled(pdev, 0))
+		return;
+
+	base = ioremap_nocache(pci_resource_start(pdev, 0),
+				pci_resource_len(pdev, 0));
+	if (base == NULL)
+		return;
+
+	/*
+	 * Find the Legacy Support Capability register -
+	 * this is optional for xHCI host controllers.
+	 */
+	ext_cap_offset = xhci_find_next_cap_offset(base, XHCI_HCC_PARAMS_OFFSET);
+	do {
+		if (!ext_cap_offset)
+			/* We've reached the end of the extended capabilities */
+			goto hc_init;
+		val = readl(base + ext_cap_offset);
+		if (XHCI_EXT_CAPS_ID(val) == XHCI_EXT_CAPS_LEGACY)
+			break;
+		ext_cap_offset = xhci_find_next_cap_offset(base, ext_cap_offset);
+	} while (1);
+
+	/* If the BIOS owns the HC, signal that the OS wants it, and wait */
+	if (val & XHCI_HC_BIOS_OWNED) {
+		writel(val & XHCI_HC_OS_OWNED, base + ext_cap_offset);
+
+		/* Wait for 5 seconds with 10 microsecond polling interval */
+		timeout = handshake(base + ext_cap_offset, XHCI_HC_BIOS_OWNED,
+				0, 5000, 10);
+
+		/* Assume a buggy BIOS and take HC ownership anyway */
+		if (timeout) {
+			dev_warn(&pdev->dev, "xHCI BIOS handoff failed"
+					" (BIOS bug ?) %08x\n", val);
+			writel(val & ~XHCI_HC_BIOS_OWNED, base + ext_cap_offset);
+		}
+	}
+
+	/* Disable any BIOS SMIs */
+	writel(XHCI_LEGACY_DISABLE_SMI,
+			base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET);
+
+hc_init:
+	op_reg_base = base + XHCI_HC_LENGTH(readl(base));
+
+	/* Wait for the host controller to be ready before writing any
+	 * operational or runtime registers.  Wait 5 seconds and no more.
+	 */
+	timeout = handshake(op_reg_base + XHCI_STS_OFFSET, XHCI_STS_CNR, 0,
+			5000, 10);
+	/* Assume a buggy HC and start HC initialization anyway */
+	if (timeout) {
+		val = readl(op_reg_base + XHCI_STS_OFFSET);
+		dev_warn(&pdev->dev,
+				"xHCI HW not ready after 5 sec (HC bug?) "
+				"status = 0x%x\n", val);
+	}
+
+	/* Send the halt and disable interrupts command */
+	val = readl(op_reg_base + XHCI_CMD_OFFSET);
+	val &= ~(XHCI_CMD_RUN | XHCI_IRQS);
+	writel(val, op_reg_base + XHCI_CMD_OFFSET);
+
+	/* Wait for the HC to halt - poll every 125 usec (one microframe). */
+	timeout = handshake(op_reg_base + XHCI_STS_OFFSET, XHCI_STS_HALT, 1,
+			XHCI_MAX_HALT_USEC, 125);
+	if (timeout) {
+		val = readl(op_reg_base + XHCI_STS_OFFSET);
+		dev_warn(&pdev->dev,
+				"xHCI HW did not halt within %d usec "
+				"status = 0x%x\n", XHCI_MAX_HALT_USEC, val);
+	}
+
+	iounmap(base);
+}
 
 static void __devinit quirk_usb_early_handoff(struct pci_dev *pdev)
 {
@@ -351,5 +472,7 @@
 		quirk_usb_handoff_ohci(pdev);
 	else if (pdev->class == PCI_CLASS_SERIAL_USB_EHCI)
 		quirk_usb_disable_ehci(pdev);
+	else if (pdev->class == PCI_CLASS_SERIAL_USB_XHCI)
+		quirk_usb_handoff_xhci(pdev);
 }
 DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, quirk_usb_early_handoff);
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
index f1626e5..56976cc 100644
--- a/drivers/usb/host/r8a66597-hcd.c
+++ b/drivers/usb/host/r8a66597-hcd.c
@@ -46,31 +46,10 @@
 MODULE_AUTHOR("Yoshihiro Shimoda");
 MODULE_ALIAS("platform:r8a66597_hcd");
 
-#define DRIVER_VERSION	"10 Apr 2008"
+#define DRIVER_VERSION	"2009-05-26"
 
 static const char hcd_name[] = "r8a66597_hcd";
 
-/* module parameters */
-#if !defined(CONFIG_SUPERH_ON_CHIP_R8A66597)
-static unsigned short clock = XTAL12;
-module_param(clock, ushort, 0644);
-MODULE_PARM_DESC(clock, "input clock: 48MHz=32768, 24MHz=16384, 12MHz=0 "
-		"(default=0)");
-#endif
-
-static unsigned short vif = LDRV;
-module_param(vif, ushort, 0644);
-MODULE_PARM_DESC(vif, "input VIF: 3.3V=32768, 1.5V=0(default=32768)");
-
-static unsigned short endian;
-module_param(endian, ushort, 0644);
-MODULE_PARM_DESC(endian, "data endian: big=256, little=0 (default=0)");
-
-static unsigned short irq_sense = 0xff;
-module_param(irq_sense, ushort, 0644);
-MODULE_PARM_DESC(irq_sense, "IRQ sense: low level=32, falling edge=0 "
-		"(default=32)");
-
 static void packet_write(struct r8a66597 *r8a66597, u16 pipenum);
 static int r8a66597_get_frame(struct usb_hcd *hcd);
 
@@ -136,7 +115,8 @@
 		}
 	} while ((tmp & USBE) != USBE);
 	r8a66597_bclr(r8a66597, USBE, SYSCFG0);
-	r8a66597_mdfy(r8a66597, clock, XTAL, SYSCFG0);
+	r8a66597_mdfy(r8a66597, get_xtal_from_pdata(r8a66597->pdata), XTAL,
+			SYSCFG0);
 
 	i = 0;
 	r8a66597_bset(r8a66597, XCKE, SYSCFG0);
@@ -203,6 +183,9 @@
 static int enable_controller(struct r8a66597 *r8a66597)
 {
 	int ret, port;
+	u16 vif = r8a66597->pdata->vif ? LDRV : 0;
+	u16 irq_sense = r8a66597->irq_sense_low ? INTL : 0;
+	u16 endian = r8a66597->pdata->endian ? BIGEND : 0;
 
 	ret = r8a66597_clock_enable(r8a66597);
 	if (ret < 0)
@@ -2373,7 +2356,7 @@
 	return 0;
 }
 
-static int __init r8a66597_probe(struct platform_device *pdev)
+static int __devinit r8a66597_probe(struct platform_device *pdev)
 {
 #if defined(CONFIG_SUPERH_ON_CHIP_R8A66597) && defined(CONFIG_HAVE_CLK)
 	char clk_name[8];
@@ -2418,6 +2401,12 @@
 		goto clean_up;
 	}
 
+	if (pdev->dev.platform_data == NULL) {
+		dev_err(&pdev->dev, "no platform data\n");
+		ret = -ENODEV;
+		goto clean_up;
+	}
+
 	/* initialize hcd */
 	hcd = usb_create_hcd(&r8a66597_hc_driver, &pdev->dev, (char *)hcd_name);
 	if (!hcd) {
@@ -2428,6 +2417,8 @@
 	r8a66597 = hcd_to_r8a66597(hcd);
 	memset(r8a66597, 0, sizeof(struct r8a66597));
 	dev_set_drvdata(&pdev->dev, r8a66597);
+	r8a66597->pdata = pdev->dev.platform_data;
+	r8a66597->irq_sense_low = irq_trigger == IRQF_TRIGGER_LOW;
 
 #if defined(CONFIG_SUPERH_ON_CHIP_R8A66597) && defined(CONFIG_HAVE_CLK)
 	snprintf(clk_name, sizeof(clk_name), "usb%d", pdev->id);
@@ -2458,29 +2449,6 @@
 
 	hcd->rsrc_start = res->start;
 
-	/* irq_sense setting on cmdline takes precedence over resource
-	 * settings, so the introduction of irqflags in IRQ resourse
-	 * won't disturb existing setups */
-	switch (irq_sense) {
-		case INTL:
-			irq_trigger = IRQF_TRIGGER_LOW;
-			break;
-		case 0:
-			irq_trigger = IRQF_TRIGGER_FALLING;
-			break;
-		case 0xff:
-			if (irq_trigger)
-				irq_sense = (irq_trigger & IRQF_TRIGGER_LOW) ?
-					    INTL : 0;
-			else {
-				irq_sense = INTL;
-				irq_trigger = IRQF_TRIGGER_LOW;
-			}
-			break;
-		default:
-			dev_err(&pdev->dev, "Unknown irq_sense value.\n");
-	}
-
 	ret = usb_add_hcd(hcd, irq, IRQF_DISABLED | irq_trigger);
 	if (ret != 0) {
 		dev_err(&pdev->dev, "Failed to add hcd\n");
diff --git a/drivers/usb/host/r8a66597.h b/drivers/usb/host/r8a66597.h
index f49208f..d72680b4 100644
--- a/drivers/usb/host/r8a66597.h
+++ b/drivers/usb/host/r8a66597.h
@@ -30,6 +30,8 @@
 #include <linux/clk.h>
 #endif
 
+#include <linux/usb/r8a66597.h>
+
 #define SYSCFG0		0x00
 #define SYSCFG1		0x02
 #define SYSSTS0		0x04
@@ -488,6 +490,7 @@
 #if defined(CONFIG_SUPERH_ON_CHIP_R8A66597) && defined(CONFIG_HAVE_CLK)
 	struct clk *clk;
 #endif
+	struct r8a66597_platdata	*pdata;
 	struct r8a66597_device		device0;
 	struct r8a66597_root_hub	root_hub[R8A66597_MAX_ROOT_HUB];
 	struct list_head		pipe_queue[R8A66597_MAX_NUM_PIPE];
@@ -506,6 +509,7 @@
 	unsigned long child_connect_map[4];
 
 	unsigned bus_suspended:1;
+	unsigned irq_sense_low:1;
 };
 
 static inline struct r8a66597 *hcd_to_r8a66597(struct usb_hcd *hcd)
@@ -660,10 +664,36 @@
 {
 	unsigned long dvstctr_reg = get_dvstctr_reg(port);
 
-	if (power)
-		r8a66597_bset(r8a66597, VBOUT, dvstctr_reg);
-	else
-		r8a66597_bclr(r8a66597, VBOUT, dvstctr_reg);
+	if (r8a66597->pdata->port_power) {
+		r8a66597->pdata->port_power(port, power);
+	} else {
+		if (power)
+			r8a66597_bset(r8a66597, VBOUT, dvstctr_reg);
+		else
+			r8a66597_bclr(r8a66597, VBOUT, dvstctr_reg);
+	}
+}
+
+static inline u16 get_xtal_from_pdata(struct r8a66597_platdata *pdata)
+{
+	u16 clock = 0;
+
+	switch (pdata->xtal) {
+	case R8A66597_PLATDATA_XTAL_12MHZ:
+		clock = XTAL12;
+		break;
+	case R8A66597_PLATDATA_XTAL_24MHZ:
+		clock = XTAL24;
+		break;
+	case R8A66597_PLATDATA_XTAL_48MHZ:
+		clock = XTAL48;
+		break;
+	default:
+		printk(KERN_ERR "r8a66597: platdata clock is wrong.\n");
+		break;
+	}
+
+	return clock;
 }
 
 #define get_pipectr_addr(pipenum)	(PIPE1CTR + (pipenum - 1) * 2)
diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c
index cf5e4cf..274751b 100644
--- a/drivers/usb/host/uhci-hcd.c
+++ b/drivers/usb/host/uhci-hcd.c
@@ -769,7 +769,7 @@
 	return rc;
 }
 
-static int uhci_pci_suspend(struct usb_hcd *hcd, pm_message_t message)
+static int uhci_pci_suspend(struct usb_hcd *hcd)
 {
 	struct uhci_hcd *uhci = hcd_to_uhci(hcd);
 	int rc = 0;
@@ -795,10 +795,6 @@
 
 	/* FIXME: Enable non-PME# remote wakeup? */
 
-	/* make sure snapshot being resumed re-enumerates everything */
-	if (message.event == PM_EVENT_PRETHAW)
-		uhci_hc_died(uhci);
-
 done_okay:
 	clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
 done:
@@ -806,7 +802,7 @@
 	return rc;
 }
 
-static int uhci_pci_resume(struct usb_hcd *hcd)
+static int uhci_pci_resume(struct usb_hcd *hcd, bool hibernated)
 {
 	struct uhci_hcd *uhci = hcd_to_uhci(hcd);
 
@@ -820,6 +816,10 @@
 
 	spin_lock_irq(&uhci->lock);
 
+	/* Make sure resume from hibernation re-enumerates everything */
+	if (hibernated)
+		uhci_hc_died(uhci);
+
 	/* FIXME: Disable non-PME# remote wakeup? */
 
 	/* The firmware or a boot kernel may have changed the controller
@@ -940,10 +940,11 @@
 	.remove =	usb_hcd_pci_remove,
 	.shutdown =	uhci_shutdown,
 
-#ifdef	CONFIG_PM
-	.suspend =	usb_hcd_pci_suspend,
-	.resume =	usb_hcd_pci_resume,
-#endif	/* PM */
+#ifdef CONFIG_PM_SLEEP
+	.driver =	{
+		.pm =	&usb_hcd_pci_pm_ops
+	},
+#endif
 };
  
 static int __init uhci_hcd_init(void)
@@ -961,7 +962,7 @@
 		errbuf = kmalloc(ERRBUF_LEN, GFP_KERNEL);
 		if (!errbuf)
 			goto errbuf_failed;
-		uhci_debugfs_root = debugfs_create_dir("uhci", NULL);
+		uhci_debugfs_root = debugfs_create_dir("uhci", usb_debug_root);
 		if (!uhci_debugfs_root)
 			goto debug_failed;
 	}
diff --git a/drivers/usb/host/uhci-q.c b/drivers/usb/host/uhci-q.c
index 3e5807d..64e57bf 100644
--- a/drivers/usb/host/uhci-q.c
+++ b/drivers/usb/host/uhci-q.c
@@ -260,7 +260,7 @@
 	INIT_LIST_HEAD(&qh->node);
 
 	if (udev) {		/* Normal QH */
-		qh->type = hep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
+		qh->type = usb_endpoint_type(&hep->desc);
 		if (qh->type != USB_ENDPOINT_XFER_ISOC) {
 			qh->dummy_td = uhci_alloc_td(uhci);
 			if (!qh->dummy_td) {
diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c
new file mode 100644
index 0000000..2501c57
--- /dev/null
+++ b/drivers/usb/host/xhci-dbg.c
@@ -0,0 +1,485 @@
+/*
+ * xHCI host controller driver
+ *
+ * Copyright (C) 2008 Intel Corp.
+ *
+ * Author: Sarah Sharp
+ * Some code borrowed from the Linux EHCI driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include "xhci.h"
+
+#define XHCI_INIT_VALUE 0x0
+
+/* Add verbose debugging later, just print everything for now */
+
+void xhci_dbg_regs(struct xhci_hcd *xhci)
+{
+	u32 temp;
+
+	xhci_dbg(xhci, "// xHCI capability registers at %p:\n",
+			xhci->cap_regs);
+	temp = xhci_readl(xhci, &xhci->cap_regs->hc_capbase);
+	xhci_dbg(xhci, "// @%p = 0x%x (CAPLENGTH AND HCIVERSION)\n",
+			&xhci->cap_regs->hc_capbase, temp);
+	xhci_dbg(xhci, "//   CAPLENGTH: 0x%x\n",
+			(unsigned int) HC_LENGTH(temp));
+#if 0
+	xhci_dbg(xhci, "//   HCIVERSION: 0x%x\n",
+			(unsigned int) HC_VERSION(temp));
+#endif
+
+	xhci_dbg(xhci, "// xHCI operational registers at %p:\n", xhci->op_regs);
+
+	temp = xhci_readl(xhci, &xhci->cap_regs->run_regs_off);
+	xhci_dbg(xhci, "// @%p = 0x%x RTSOFF\n",
+			&xhci->cap_regs->run_regs_off,
+			(unsigned int) temp & RTSOFF_MASK);
+	xhci_dbg(xhci, "// xHCI runtime registers at %p:\n", xhci->run_regs);
+
+	temp = xhci_readl(xhci, &xhci->cap_regs->db_off);
+	xhci_dbg(xhci, "// @%p = 0x%x DBOFF\n", &xhci->cap_regs->db_off, temp);
+	xhci_dbg(xhci, "// Doorbell array at %p:\n", xhci->dba);
+}
+
+static void xhci_print_cap_regs(struct xhci_hcd *xhci)
+{
+	u32 temp;
+
+	xhci_dbg(xhci, "xHCI capability registers at %p:\n", xhci->cap_regs);
+
+	temp = xhci_readl(xhci, &xhci->cap_regs->hc_capbase);
+	xhci_dbg(xhci, "CAPLENGTH AND HCIVERSION 0x%x:\n",
+			(unsigned int) temp);
+	xhci_dbg(xhci, "CAPLENGTH: 0x%x\n",
+			(unsigned int) HC_LENGTH(temp));
+	xhci_dbg(xhci, "HCIVERSION: 0x%x\n",
+			(unsigned int) HC_VERSION(temp));
+
+	temp = xhci_readl(xhci, &xhci->cap_regs->hcs_params1);
+	xhci_dbg(xhci, "HCSPARAMS 1: 0x%x\n",
+			(unsigned int) temp);
+	xhci_dbg(xhci, "  Max device slots: %u\n",
+			(unsigned int) HCS_MAX_SLOTS(temp));
+	xhci_dbg(xhci, "  Max interrupters: %u\n",
+			(unsigned int) HCS_MAX_INTRS(temp));
+	xhci_dbg(xhci, "  Max ports: %u\n",
+			(unsigned int) HCS_MAX_PORTS(temp));
+
+	temp = xhci_readl(xhci, &xhci->cap_regs->hcs_params2);
+	xhci_dbg(xhci, "HCSPARAMS 2: 0x%x\n",
+			(unsigned int) temp);
+	xhci_dbg(xhci, "  Isoc scheduling threshold: %u\n",
+			(unsigned int) HCS_IST(temp));
+	xhci_dbg(xhci, "  Maximum allowed segments in event ring: %u\n",
+			(unsigned int) HCS_ERST_MAX(temp));
+
+	temp = xhci_readl(xhci, &xhci->cap_regs->hcs_params3);
+	xhci_dbg(xhci, "HCSPARAMS 3 0x%x:\n",
+			(unsigned int) temp);
+	xhci_dbg(xhci, "  Worst case U1 device exit latency: %u\n",
+			(unsigned int) HCS_U1_LATENCY(temp));
+	xhci_dbg(xhci, "  Worst case U2 device exit latency: %u\n",
+			(unsigned int) HCS_U2_LATENCY(temp));
+
+	temp = xhci_readl(xhci, &xhci->cap_regs->hcc_params);
+	xhci_dbg(xhci, "HCC PARAMS 0x%x:\n", (unsigned int) temp);
+	xhci_dbg(xhci, "  HC generates %s bit addresses\n",
+			HCC_64BIT_ADDR(temp) ? "64" : "32");
+	/* FIXME */
+	xhci_dbg(xhci, "  FIXME: more HCCPARAMS debugging\n");
+
+	temp = xhci_readl(xhci, &xhci->cap_regs->run_regs_off);
+	xhci_dbg(xhci, "RTSOFF 0x%x:\n", temp & RTSOFF_MASK);
+}
+
+static void xhci_print_command_reg(struct xhci_hcd *xhci)
+{
+	u32 temp;
+
+	temp = xhci_readl(xhci, &xhci->op_regs->command);
+	xhci_dbg(xhci, "USBCMD 0x%x:\n", temp);
+	xhci_dbg(xhci, "  HC is %s\n",
+			(temp & CMD_RUN) ? "running" : "being stopped");
+	xhci_dbg(xhci, "  HC has %sfinished hard reset\n",
+			(temp & CMD_RESET) ? "not " : "");
+	xhci_dbg(xhci, "  Event Interrupts %s\n",
+			(temp & CMD_EIE) ? "enabled " : "disabled");
+	xhci_dbg(xhci, "  Host System Error Interrupts %s\n",
+			(temp & CMD_EIE) ? "enabled " : "disabled");
+	xhci_dbg(xhci, "  HC has %sfinished light reset\n",
+			(temp & CMD_LRESET) ? "not " : "");
+}
+
+static void xhci_print_status(struct xhci_hcd *xhci)
+{
+	u32 temp;
+
+	temp = xhci_readl(xhci, &xhci->op_regs->status);
+	xhci_dbg(xhci, "USBSTS 0x%x:\n", temp);
+	xhci_dbg(xhci, "  Event ring is %sempty\n",
+			(temp & STS_EINT) ? "not " : "");
+	xhci_dbg(xhci, "  %sHost System Error\n",
+			(temp & STS_FATAL) ? "WARNING: " : "No ");
+	xhci_dbg(xhci, "  HC is %s\n",
+			(temp & STS_HALT) ? "halted" : "running");
+}
+
+static void xhci_print_op_regs(struct xhci_hcd *xhci)
+{
+	xhci_dbg(xhci, "xHCI operational registers at %p:\n", xhci->op_regs);
+	xhci_print_command_reg(xhci);
+	xhci_print_status(xhci);
+}
+
+static void xhci_print_ports(struct xhci_hcd *xhci)
+{
+	u32 __iomem *addr;
+	int i, j;
+	int ports;
+	char *names[NUM_PORT_REGS] = {
+		"status",
+		"power",
+		"link",
+		"reserved",
+	};
+
+	ports = HCS_MAX_PORTS(xhci->hcs_params1);
+	addr = &xhci->op_regs->port_status_base;
+	for (i = 0; i < ports; i++) {
+		for (j = 0; j < NUM_PORT_REGS; ++j) {
+			xhci_dbg(xhci, "%p port %s reg = 0x%x\n",
+					addr, names[j],
+					(unsigned int) xhci_readl(xhci, addr));
+			addr++;
+		}
+	}
+}
+
+void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int set_num)
+{
+	void *addr;
+	u32 temp;
+
+	addr = &ir_set->irq_pending;
+	temp = xhci_readl(xhci, addr);
+	if (temp == XHCI_INIT_VALUE)
+		return;
+
+	xhci_dbg(xhci, "  %p: ir_set[%i]\n", ir_set, set_num);
+
+	xhci_dbg(xhci, "  %p: ir_set.pending = 0x%x\n", addr,
+			(unsigned int)temp);
+
+	addr = &ir_set->irq_control;
+	temp = xhci_readl(xhci, addr);
+	xhci_dbg(xhci, "  %p: ir_set.control = 0x%x\n", addr,
+			(unsigned int)temp);
+
+	addr = &ir_set->erst_size;
+	temp = xhci_readl(xhci, addr);
+	xhci_dbg(xhci, "  %p: ir_set.erst_size = 0x%x\n", addr,
+			(unsigned int)temp);
+
+	addr = &ir_set->rsvd;
+	temp = xhci_readl(xhci, addr);
+	if (temp != XHCI_INIT_VALUE)
+		xhci_dbg(xhci, "  WARN: %p: ir_set.rsvd = 0x%x\n",
+				addr, (unsigned int)temp);
+
+	addr = &ir_set->erst_base[0];
+	temp = xhci_readl(xhci, addr);
+	xhci_dbg(xhci, "  %p: ir_set.erst_base[0] = 0x%x\n",
+			addr, (unsigned int) temp);
+
+	addr = &ir_set->erst_base[1];
+	temp = xhci_readl(xhci, addr);
+	xhci_dbg(xhci, "  %p: ir_set.erst_base[1] = 0x%x\n",
+			addr, (unsigned int) temp);
+
+	addr = &ir_set->erst_dequeue[0];
+	temp = xhci_readl(xhci, addr);
+	xhci_dbg(xhci, "  %p: ir_set.erst_dequeue[0] = 0x%x\n",
+			addr, (unsigned int) temp);
+
+	addr = &ir_set->erst_dequeue[1];
+	temp = xhci_readl(xhci, addr);
+	xhci_dbg(xhci, "  %p: ir_set.erst_dequeue[1] = 0x%x\n",
+			addr, (unsigned int) temp);
+}
+
+void xhci_print_run_regs(struct xhci_hcd *xhci)
+{
+	u32 temp;
+	int i;
+
+	xhci_dbg(xhci, "xHCI runtime registers at %p:\n", xhci->run_regs);
+	temp = xhci_readl(xhci, &xhci->run_regs->microframe_index);
+	xhci_dbg(xhci, "  %p: Microframe index = 0x%x\n",
+			&xhci->run_regs->microframe_index,
+			(unsigned int) temp);
+	for (i = 0; i < 7; ++i) {
+		temp = xhci_readl(xhci, &xhci->run_regs->rsvd[i]);
+		if (temp != XHCI_INIT_VALUE)
+			xhci_dbg(xhci, "  WARN: %p: Rsvd[%i] = 0x%x\n",
+					&xhci->run_regs->rsvd[i],
+					i, (unsigned int) temp);
+	}
+}
+
+void xhci_print_registers(struct xhci_hcd *xhci)
+{
+	xhci_print_cap_regs(xhci);
+	xhci_print_op_regs(xhci);
+	xhci_print_ports(xhci);
+}
+
+void xhci_print_trb_offsets(struct xhci_hcd *xhci, union xhci_trb *trb)
+{
+	int i;
+	for (i = 0; i < 4; ++i)
+		xhci_dbg(xhci, "Offset 0x%x = 0x%x\n",
+				i*4, trb->generic.field[i]);
+}
+
+/**
+ * Debug a transfer request block (TRB).
+ */
+void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb)
+{
+	u64	address;
+	u32	type = xhci_readl(xhci, &trb->link.control) & TRB_TYPE_BITMASK;
+
+	switch (type) {
+	case TRB_TYPE(TRB_LINK):
+		xhci_dbg(xhci, "Link TRB:\n");
+		xhci_print_trb_offsets(xhci, trb);
+
+		address = trb->link.segment_ptr[0] +
+			(((u64) trb->link.segment_ptr[1]) << 32);
+		xhci_dbg(xhci, "Next ring segment DMA address = 0x%llx\n", address);
+
+		xhci_dbg(xhci, "Interrupter target = 0x%x\n",
+				GET_INTR_TARGET(trb->link.intr_target));
+		xhci_dbg(xhci, "Cycle bit = %u\n",
+				(unsigned int) (trb->link.control & TRB_CYCLE));
+		xhci_dbg(xhci, "Toggle cycle bit = %u\n",
+				(unsigned int) (trb->link.control & LINK_TOGGLE));
+		xhci_dbg(xhci, "No Snoop bit = %u\n",
+				(unsigned int) (trb->link.control & TRB_NO_SNOOP));
+		break;
+	case TRB_TYPE(TRB_TRANSFER):
+		address = trb->trans_event.buffer[0] +
+			(((u64) trb->trans_event.buffer[1]) << 32);
+		/*
+		 * FIXME: look at flags to figure out if it's an address or if
+		 * the data is directly in the buffer field.
+		 */
+		xhci_dbg(xhci, "DMA address or buffer contents= %llu\n", address);
+		break;
+	case TRB_TYPE(TRB_COMPLETION):
+		address = trb->event_cmd.cmd_trb[0] +
+			(((u64) trb->event_cmd.cmd_trb[1]) << 32);
+		xhci_dbg(xhci, "Command TRB pointer = %llu\n", address);
+		xhci_dbg(xhci, "Completion status = %u\n",
+				(unsigned int) GET_COMP_CODE(trb->event_cmd.status));
+		xhci_dbg(xhci, "Flags = 0x%x\n", (unsigned int) trb->event_cmd.flags);
+		break;
+	default:
+		xhci_dbg(xhci, "Unknown TRB with TRB type ID %u\n",
+				(unsigned int) type>>10);
+		xhci_print_trb_offsets(xhci, trb);
+		break;
+	}
+}
+
+/**
+ * Debug a segment with an xHCI ring.
+ *
+ * @return The Link TRB of the segment, or NULL if there is no Link TRB
+ * (which is a bug, since all segments must have a Link TRB).
+ *
+ * Prints out all TRBs in the segment, even those after the Link TRB.
+ *
+ * XXX: should we print out TRBs that the HC owns?  As long as we don't
+ * write, that should be fine...  We shouldn't expect that the memory pointed to
+ * by the TRB is valid at all.  Do we care about ones the HC owns?  Probably,
+ * for HC debugging.
+ */
+void xhci_debug_segment(struct xhci_hcd *xhci, struct xhci_segment *seg)
+{
+	int i;
+	u32 addr = (u32) seg->dma;
+	union xhci_trb *trb = seg->trbs;
+
+	for (i = 0; i < TRBS_PER_SEGMENT; ++i) {
+		trb = &seg->trbs[i];
+		xhci_dbg(xhci, "@%08x %08x %08x %08x %08x\n", addr,
+				(unsigned int) trb->link.segment_ptr[0],
+				(unsigned int) trb->link.segment_ptr[1],
+				(unsigned int) trb->link.intr_target,
+				(unsigned int) trb->link.control);
+		addr += sizeof(*trb);
+	}
+}
+
+void xhci_dbg_ring_ptrs(struct xhci_hcd *xhci, struct xhci_ring *ring)
+{
+	xhci_dbg(xhci, "Ring deq = %p (virt), 0x%llx (dma)\n",
+			ring->dequeue,
+			(unsigned long long)xhci_trb_virt_to_dma(ring->deq_seg,
+							    ring->dequeue));
+	xhci_dbg(xhci, "Ring deq updated %u times\n",
+			ring->deq_updates);
+	xhci_dbg(xhci, "Ring enq = %p (virt), 0x%llx (dma)\n",
+			ring->enqueue,
+			(unsigned long long)xhci_trb_virt_to_dma(ring->enq_seg,
+							    ring->enqueue));
+	xhci_dbg(xhci, "Ring enq updated %u times\n",
+			ring->enq_updates);
+}
+
+/**
+ * Debugging for an xHCI ring, which is a queue broken into multiple segments.
+ *
+ * Print out each segment in the ring.  Check that the DMA address in
+ * each link segment actually matches the segment's stored DMA address.
+ * Check that the link end bit is only set at the end of the ring.
+ * Check that the dequeue and enqueue pointers point to real data in this ring
+ * (not some other ring).
+ */
+void xhci_debug_ring(struct xhci_hcd *xhci, struct xhci_ring *ring)
+{
+	/* FIXME: Throw an error if any segment doesn't have a Link TRB */
+	struct xhci_segment *seg;
+	struct xhci_segment *first_seg = ring->first_seg;
+	xhci_debug_segment(xhci, first_seg);
+
+	if (!ring->enq_updates && !ring->deq_updates) {
+		xhci_dbg(xhci, "  Ring has not been updated\n");
+		return;
+	}
+	for (seg = first_seg->next; seg != first_seg; seg = seg->next)
+		xhci_debug_segment(xhci, seg);
+}
+
+void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst)
+{
+	u32 addr = (u32) erst->erst_dma_addr;
+	int i;
+	struct xhci_erst_entry *entry;
+
+	for (i = 0; i < erst->num_entries; ++i) {
+		entry = &erst->entries[i];
+		xhci_dbg(xhci, "@%08x %08x %08x %08x %08x\n",
+				(unsigned int) addr,
+				(unsigned int) entry->seg_addr[0],
+				(unsigned int) entry->seg_addr[1],
+				(unsigned int) entry->seg_size,
+				(unsigned int) entry->rsvd);
+		addr += sizeof(*entry);
+	}
+}
+
+void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci)
+{
+	u32 val;
+
+	val = xhci_readl(xhci, &xhci->op_regs->cmd_ring[0]);
+	xhci_dbg(xhci, "// xHC command ring deq ptr low bits + flags = 0x%x\n", val);
+	val = xhci_readl(xhci, &xhci->op_regs->cmd_ring[1]);
+	xhci_dbg(xhci, "// xHC command ring deq ptr high bits = 0x%x\n", val);
+}
+
+void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_device_control *ctx, dma_addr_t dma, unsigned int last_ep)
+{
+	int i, j;
+	int last_ep_ctx = 31;
+	/* Fields are 32 bits wide, DMA addresses are in bytes */
+	int field_size = 32 / 8;
+
+	xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - drop flags\n",
+			&ctx->drop_flags, (unsigned long long)dma,
+			ctx->drop_flags);
+	dma += field_size;
+	xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - add flags\n",
+			&ctx->add_flags, (unsigned long long)dma,
+			ctx->add_flags);
+	dma += field_size;
+	for (i = 0; i > 6; ++i) {
+		xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n",
+				&ctx->rsvd[i], (unsigned long long)dma,
+				ctx->rsvd[i], i);
+		dma += field_size;
+	}
+
+	xhci_dbg(xhci, "Slot Context:\n");
+	xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_info\n",
+			&ctx->slot.dev_info,
+			(unsigned long long)dma, ctx->slot.dev_info);
+	dma += field_size;
+	xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_info2\n",
+			&ctx->slot.dev_info2,
+			(unsigned long long)dma, ctx->slot.dev_info2);
+	dma += field_size;
+	xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - tt_info\n",
+			&ctx->slot.tt_info,
+			(unsigned long long)dma, ctx->slot.tt_info);
+	dma += field_size;
+	xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_state\n",
+			&ctx->slot.dev_state,
+			(unsigned long long)dma, ctx->slot.dev_state);
+	dma += field_size;
+	for (i = 0; i > 4; ++i) {
+		xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n",
+				&ctx->slot.reserved[i], (unsigned long long)dma,
+				ctx->slot.reserved[i], i);
+		dma += field_size;
+	}
+
+	if (last_ep < 31)
+		last_ep_ctx = last_ep + 1;
+	for (i = 0; i < last_ep_ctx; ++i) {
+		xhci_dbg(xhci, "Endpoint %02d Context:\n", i);
+		xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info\n",
+				&ctx->ep[i].ep_info,
+				(unsigned long long)dma, ctx->ep[i].ep_info);
+		dma += field_size;
+		xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info2\n",
+				&ctx->ep[i].ep_info2,
+				(unsigned long long)dma, ctx->ep[i].ep_info2);
+		dma += field_size;
+		xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - deq[0]\n",
+				&ctx->ep[i].deq[0],
+				(unsigned long long)dma, ctx->ep[i].deq[0]);
+		dma += field_size;
+		xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - deq[1]\n",
+				&ctx->ep[i].deq[1],
+				(unsigned long long)dma, ctx->ep[i].deq[1]);
+		dma += field_size;
+		xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - tx_info\n",
+				&ctx->ep[i].tx_info,
+				(unsigned long long)dma, ctx->ep[i].tx_info);
+		dma += field_size;
+		for (j = 0; j < 3; ++j) {
+			xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n",
+					&ctx->ep[i].reserved[j],
+					(unsigned long long)dma,
+					ctx->ep[i].reserved[j], j);
+			dma += field_size;
+		}
+	}
+}
diff --git a/drivers/usb/host/xhci-ext-caps.h b/drivers/usb/host/xhci-ext-caps.h
new file mode 100644
index 0000000..ecc131c
--- /dev/null
+++ b/drivers/usb/host/xhci-ext-caps.h
@@ -0,0 +1,145 @@
+/*
+ * xHCI host controller driver
+ *
+ * Copyright (C) 2008 Intel Corp.
+ *
+ * Author: Sarah Sharp
+ * Some code borrowed from the Linux EHCI driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+/* Up to 16 microframes to halt an HC - one microframe is 125 microsectonds */
+#define XHCI_MAX_HALT_USEC	(16*125)
+/* HC not running - set to 1 when run/stop bit is cleared. */
+#define XHCI_STS_HALT		(1<<0)
+
+/* HCCPARAMS offset from PCI base address */
+#define XHCI_HCC_PARAMS_OFFSET	0x10
+/* HCCPARAMS contains the first extended capability pointer */
+#define XHCI_HCC_EXT_CAPS(p)	(((p)>>16)&0xffff)
+
+/* Command and Status registers offset from the Operational Registers address */
+#define XHCI_CMD_OFFSET		0x00
+#define XHCI_STS_OFFSET		0x04
+
+#define XHCI_MAX_EXT_CAPS		50
+
+/* Capability Register */
+/* bits 7:0 - how long is the Capabilities register */
+#define XHCI_HC_LENGTH(p)	(((p)>>00)&0x00ff)
+
+/* Extended capability register fields */
+#define XHCI_EXT_CAPS_ID(p)	(((p)>>0)&0xff)
+#define XHCI_EXT_CAPS_NEXT(p)	(((p)>>8)&0xff)
+#define	XHCI_EXT_CAPS_VAL(p)	((p)>>16)
+/* Extended capability IDs - ID 0 reserved */
+#define XHCI_EXT_CAPS_LEGACY	1
+#define XHCI_EXT_CAPS_PROTOCOL	2
+#define XHCI_EXT_CAPS_PM	3
+#define XHCI_EXT_CAPS_VIRT	4
+#define XHCI_EXT_CAPS_ROUTE	5
+/* IDs 6-9 reserved */
+#define XHCI_EXT_CAPS_DEBUG	10
+/* USB Legacy Support Capability - section 7.1.1 */
+#define XHCI_HC_BIOS_OWNED	(1 << 16)
+#define XHCI_HC_OS_OWNED	(1 << 24)
+
+/* USB Legacy Support Capability - section 7.1.1 */
+/* Add this offset, plus the value of xECP in HCCPARAMS to the base address */
+#define XHCI_LEGACY_SUPPORT_OFFSET	(0x00)
+
+/* USB Legacy Support Control and Status Register  - section 7.1.2 */
+/* Add this offset, plus the value of xECP in HCCPARAMS to the base address */
+#define XHCI_LEGACY_CONTROL_OFFSET	(0x04)
+/* bits 1:2, 5:12, and 17:19 need to be preserved; bits 21:28 should be zero */
+#define	XHCI_LEGACY_DISABLE_SMI		((0x3 << 1) + (0xff << 5) + (0x7 << 17))
+
+/* command register values to disable interrupts and halt the HC */
+/* start/stop HC execution - do not write unless HC is halted*/
+#define XHCI_CMD_RUN		(1 << 0)
+/* Event Interrupt Enable - get irq when EINT bit is set in USBSTS register */
+#define XHCI_CMD_EIE		(1 << 2)
+/* Host System Error Interrupt Enable - get irq when HSEIE bit set in USBSTS */
+#define XHCI_CMD_HSEIE		(1 << 3)
+/* Enable Wrap Event - '1' means xHC generates an event when MFINDEX wraps. */
+#define XHCI_CMD_EWE		(1 << 10)
+
+#define XHCI_IRQS		(XHCI_CMD_EIE | XHCI_CMD_HSEIE | XHCI_CMD_EWE)
+
+/* true: Controller Not Ready to accept doorbell or op reg writes after reset */
+#define XHCI_STS_CNR		(1 << 11)
+
+#include <linux/io.h>
+
+/**
+ * Return the next extended capability pointer register.
+ *
+ * @base	PCI register base address.
+ *
+ * @ext_offset	Offset of the 32-bit register that contains the extended
+ * capabilites pointer.  If searching for the first extended capability, pass
+ * in XHCI_HCC_PARAMS_OFFSET.  If searching for the next extended capability,
+ * pass in the offset of the current extended capability register.
+ *
+ * Returns 0 if there is no next extended capability register or returns the register offset
+ * from the PCI registers base address.
+ */
+static inline int xhci_find_next_cap_offset(void __iomem *base, int ext_offset)
+{
+	u32 next;
+
+	next = readl(base + ext_offset);
+
+	if (ext_offset == XHCI_HCC_PARAMS_OFFSET)
+		/* Find the first extended capability */
+		next = XHCI_HCC_EXT_CAPS(next);
+	else
+		/* Find the next extended capability */
+		next = XHCI_EXT_CAPS_NEXT(next);
+	if (!next)
+		return 0;
+	/*
+	 * Address calculation from offset of extended capabilities
+	 * (or HCCPARAMS) register - see section 5.3.6 and section 7.
+	 */
+	return ext_offset + (next << 2);
+}
+
+/**
+ * Find the offset of the extended capabilities with capability ID id.
+ *
+ * @base PCI MMIO registers base address.
+ * @ext_offset Offset from base of the first extended capability to look at,
+ * 		or the address of HCCPARAMS.
+ * @id Extended capability ID to search for.
+ *
+ * This uses an arbitrary limit of XHCI_MAX_EXT_CAPS extended capabilities
+ * to make sure that the list doesn't contain a loop.
+ */
+static inline int xhci_find_ext_cap_by_id(void __iomem *base, int ext_offset, int id)
+{
+	u32 val;
+	int limit = XHCI_MAX_EXT_CAPS;
+
+	while (ext_offset && limit > 0) {
+		val = readl(base + ext_offset);
+		if (XHCI_EXT_CAPS_ID(val) == id)
+			break;
+		ext_offset = xhci_find_next_cap_offset(base, ext_offset);
+		limit--;
+	}
+	if (limit > 0)
+		return ext_offset;
+	return 0;
+}
diff --git a/drivers/usb/host/xhci-hcd.c b/drivers/usb/host/xhci-hcd.c
new file mode 100644
index 0000000..dba3e07
--- /dev/null
+++ b/drivers/usb/host/xhci-hcd.c
@@ -0,0 +1,1274 @@
+/*
+ * xHCI host controller driver
+ *
+ * Copyright (C) 2008 Intel Corp.
+ *
+ * Author: Sarah Sharp
+ * Some code borrowed from the Linux EHCI driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/irq.h>
+#include <linux/module.h>
+
+#include "xhci.h"
+
+#define DRIVER_AUTHOR "Sarah Sharp"
+#define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
+
+/* TODO: copied from ehci-hcd.c - can this be refactored? */
+/*
+ * handshake - spin reading hc until handshake completes or fails
+ * @ptr: address of hc register to be read
+ * @mask: bits to look at in result of read
+ * @done: value of those bits when handshake succeeds
+ * @usec: timeout in microseconds
+ *
+ * Returns negative errno, or zero on success
+ *
+ * Success happens when the "mask" bits have the specified value (hardware
+ * handshake done).  There are two failure modes:  "usec" have passed (major
+ * hardware flakeout), or the register reads as all-ones (hardware removed).
+ */
+static int handshake(struct xhci_hcd *xhci, void __iomem *ptr,
+		      u32 mask, u32 done, int usec)
+{
+	u32	result;
+
+	do {
+		result = xhci_readl(xhci, ptr);
+		if (result == ~(u32)0)		/* card removed */
+			return -ENODEV;
+		result &= mask;
+		if (result == done)
+			return 0;
+		udelay(1);
+		usec--;
+	} while (usec > 0);
+	return -ETIMEDOUT;
+}
+
+/*
+ * Force HC into halt state.
+ *
+ * Disable any IRQs and clear the run/stop bit.
+ * HC will complete any current and actively pipelined transactions, and
+ * should halt within 16 microframes of the run/stop bit being cleared.
+ * Read HC Halted bit in the status register to see when the HC is finished.
+ * XXX: shouldn't we set HC_STATE_HALT here somewhere?
+ */
+int xhci_halt(struct xhci_hcd *xhci)
+{
+	u32 halted;
+	u32 cmd;
+	u32 mask;
+
+	xhci_dbg(xhci, "// Halt the HC\n");
+	/* Disable all interrupts from the host controller */
+	mask = ~(XHCI_IRQS);
+	halted = xhci_readl(xhci, &xhci->op_regs->status) & STS_HALT;
+	if (!halted)
+		mask &= ~CMD_RUN;
+
+	cmd = xhci_readl(xhci, &xhci->op_regs->command);
+	cmd &= mask;
+	xhci_writel(xhci, cmd, &xhci->op_regs->command);
+
+	return handshake(xhci, &xhci->op_regs->status,
+			STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
+}
+
+/*
+ * Reset a halted HC, and set the internal HC state to HC_STATE_HALT.
+ *
+ * This resets pipelines, timers, counters, state machines, etc.
+ * Transactions will be terminated immediately, and operational registers
+ * will be set to their defaults.
+ */
+int xhci_reset(struct xhci_hcd *xhci)
+{
+	u32 command;
+	u32 state;
+
+	state = xhci_readl(xhci, &xhci->op_regs->status);
+	BUG_ON((state & STS_HALT) == 0);
+
+	xhci_dbg(xhci, "// Reset the HC\n");
+	command = xhci_readl(xhci, &xhci->op_regs->command);
+	command |= CMD_RESET;
+	xhci_writel(xhci, command, &xhci->op_regs->command);
+	/* XXX: Why does EHCI set this here?  Shouldn't other code do this? */
+	xhci_to_hcd(xhci)->state = HC_STATE_HALT;
+
+	return handshake(xhci, &xhci->op_regs->command, CMD_RESET, 0, 250 * 1000);
+}
+
+/*
+ * Stop the HC from processing the endpoint queues.
+ */
+static void xhci_quiesce(struct xhci_hcd *xhci)
+{
+	/*
+	 * Queues are per endpoint, so we need to disable an endpoint or slot.
+	 *
+	 * To disable a slot, we need to insert a disable slot command on the
+	 * command ring and ring the doorbell.  This will also free any internal
+	 * resources associated with the slot (which might not be what we want).
+	 *
+	 * A Release Endpoint command sounds better - doesn't free internal HC
+	 * memory, but removes the endpoints from the schedule and releases the
+	 * bandwidth, disables the doorbells, and clears the endpoint enable
+	 * flag.  Usually used prior to a set interface command.
+	 *
+	 * TODO: Implement after command ring code is done.
+	 */
+	BUG_ON(!HC_IS_RUNNING(xhci_to_hcd(xhci)->state));
+	xhci_dbg(xhci, "Finished quiescing -- code not written yet\n");
+}
+
+#if 0
+/* Set up MSI-X table for entry 0 (may claim other entries later) */
+static int xhci_setup_msix(struct xhci_hcd *xhci)
+{
+	int ret;
+	struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
+
+	xhci->msix_count = 0;
+	/* XXX: did I do this right?  ixgbe does kcalloc for more than one */
+	xhci->msix_entries = kmalloc(sizeof(struct msix_entry), GFP_KERNEL);
+	if (!xhci->msix_entries) {
+		xhci_err(xhci, "Failed to allocate MSI-X entries\n");
+		return -ENOMEM;
+	}
+	xhci->msix_entries[0].entry = 0;
+
+	ret = pci_enable_msix(pdev, xhci->msix_entries, xhci->msix_count);
+	if (ret) {
+		xhci_err(xhci, "Failed to enable MSI-X\n");
+		goto free_entries;
+	}
+
+	/*
+	 * Pass the xhci pointer value as the request_irq "cookie".
+	 * If more irqs are added, this will need to be unique for each one.
+	 */
+	ret = request_irq(xhci->msix_entries[0].vector, &xhci_irq, 0,
+			"xHCI", xhci_to_hcd(xhci));
+	if (ret) {
+		xhci_err(xhci, "Failed to allocate MSI-X interrupt\n");
+		goto disable_msix;
+	}
+	xhci_dbg(xhci, "Finished setting up MSI-X\n");
+	return 0;
+
+disable_msix:
+	pci_disable_msix(pdev);
+free_entries:
+	kfree(xhci->msix_entries);
+	xhci->msix_entries = NULL;
+	return ret;
+}
+
+/* XXX: code duplication; can xhci_setup_msix call this? */
+/* Free any IRQs and disable MSI-X */
+static void xhci_cleanup_msix(struct xhci_hcd *xhci)
+{
+	struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
+	if (!xhci->msix_entries)
+		return;
+
+	free_irq(xhci->msix_entries[0].vector, xhci);
+	pci_disable_msix(pdev);
+	kfree(xhci->msix_entries);
+	xhci->msix_entries = NULL;
+	xhci_dbg(xhci, "Finished cleaning up MSI-X\n");
+}
+#endif
+
+/*
+ * Initialize memory for HCD and xHC (one-time init).
+ *
+ * Program the PAGESIZE register, initialize the device context array, create
+ * device contexts (?), set up a command ring segment (or two?), create event
+ * ring (one for now).
+ */
+int xhci_init(struct usb_hcd *hcd)
+{
+	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+	int retval = 0;
+
+	xhci_dbg(xhci, "xhci_init\n");
+	spin_lock_init(&xhci->lock);
+	retval = xhci_mem_init(xhci, GFP_KERNEL);
+	xhci_dbg(xhci, "Finished xhci_init\n");
+
+	return retval;
+}
+
+/*
+ * Called in interrupt context when there might be work
+ * queued on the event ring
+ *
+ * xhci->lock must be held by caller.
+ */
+static void xhci_work(struct xhci_hcd *xhci)
+{
+	u32 temp;
+
+	/*
+	 * Clear the op reg interrupt status first,
+	 * so we can receive interrupts from other MSI-X interrupters.
+	 * Write 1 to clear the interrupt status.
+	 */
+	temp = xhci_readl(xhci, &xhci->op_regs->status);
+	temp |= STS_EINT;
+	xhci_writel(xhci, temp, &xhci->op_regs->status);
+	/* FIXME when MSI-X is supported and there are multiple vectors */
+	/* Clear the MSI-X event interrupt status */
+
+	/* Acknowledge the interrupt */
+	temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
+	temp |= 0x3;
+	xhci_writel(xhci, temp, &xhci->ir_set->irq_pending);
+	/* Flush posted writes */
+	xhci_readl(xhci, &xhci->ir_set->irq_pending);
+
+	/* FIXME this should be a delayed service routine that clears the EHB */
+	xhci_handle_event(xhci);
+
+	/* Clear the event handler busy flag; the event ring should be empty. */
+	temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]);
+	xhci_writel(xhci, temp & ~ERST_EHB, &xhci->ir_set->erst_dequeue[0]);
+	/* Flush posted writes -- FIXME is this necessary? */
+	xhci_readl(xhci, &xhci->ir_set->irq_pending);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * xHCI spec says we can get an interrupt, and if the HC has an error condition,
+ * we might get bad data out of the event ring.  Section 4.10.2.7 has a list of
+ * indicators of an event TRB error, but we check the status *first* to be safe.
+ */
+irqreturn_t xhci_irq(struct usb_hcd *hcd)
+{
+	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+	u32 temp, temp2;
+
+	spin_lock(&xhci->lock);
+	/* Check if the xHC generated the interrupt, or the irq is shared */
+	temp = xhci_readl(xhci, &xhci->op_regs->status);
+	temp2 = xhci_readl(xhci, &xhci->ir_set->irq_pending);
+	if (!(temp & STS_EINT) && !ER_IRQ_PENDING(temp2)) {
+		spin_unlock(&xhci->lock);
+		return IRQ_NONE;
+	}
+
+	if (temp & STS_FATAL) {
+		xhci_warn(xhci, "WARNING: Host System Error\n");
+		xhci_halt(xhci);
+		xhci_to_hcd(xhci)->state = HC_STATE_HALT;
+		spin_unlock(&xhci->lock);
+		return -ESHUTDOWN;
+	}
+
+	xhci_work(xhci);
+	spin_unlock(&xhci->lock);
+
+	return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
+void xhci_event_ring_work(unsigned long arg)
+{
+	unsigned long flags;
+	int temp;
+	struct xhci_hcd *xhci = (struct xhci_hcd *) arg;
+	int i, j;
+
+	xhci_dbg(xhci, "Poll event ring: %lu\n", jiffies);
+
+	spin_lock_irqsave(&xhci->lock, flags);
+	temp = xhci_readl(xhci, &xhci->op_regs->status);
+	xhci_dbg(xhci, "op reg status = 0x%x\n", temp);
+	temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
+	xhci_dbg(xhci, "ir_set 0 pending = 0x%x\n", temp);
+	xhci_dbg(xhci, "No-op commands handled = %d\n", xhci->noops_handled);
+	xhci_dbg(xhci, "HC error bitmask = 0x%x\n", xhci->error_bitmask);
+	xhci->error_bitmask = 0;
+	xhci_dbg(xhci, "Event ring:\n");
+	xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
+	xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
+	temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]);
+	temp &= ERST_PTR_MASK;
+	xhci_dbg(xhci, "ERST deq = 0x%x\n", temp);
+	xhci_dbg(xhci, "Command ring:\n");
+	xhci_debug_segment(xhci, xhci->cmd_ring->deq_seg);
+	xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
+	xhci_dbg_cmd_ptrs(xhci);
+	for (i = 0; i < MAX_HC_SLOTS; ++i) {
+		if (xhci->devs[i]) {
+			for (j = 0; j < 31; ++j) {
+				if (xhci->devs[i]->ep_rings[j]) {
+					xhci_dbg(xhci, "Dev %d endpoint ring %d:\n", i, j);
+					xhci_debug_segment(xhci, xhci->devs[i]->ep_rings[j]->deq_seg);
+				}
+			}
+		}
+	}
+
+	if (xhci->noops_submitted != NUM_TEST_NOOPS)
+		if (xhci_setup_one_noop(xhci))
+			xhci_ring_cmd_db(xhci);
+	spin_unlock_irqrestore(&xhci->lock, flags);
+
+	if (!xhci->zombie)
+		mod_timer(&xhci->event_ring_timer, jiffies + POLL_TIMEOUT * HZ);
+	else
+		xhci_dbg(xhci, "Quit polling the event ring.\n");
+}
+#endif
+
+/*
+ * Start the HC after it was halted.
+ *
+ * This function is called by the USB core when the HC driver is added.
+ * Its opposite is xhci_stop().
+ *
+ * xhci_init() must be called once before this function can be called.
+ * Reset the HC, enable device slot contexts, program DCBAAP, and
+ * set command ring pointer and event ring pointer.
+ *
+ * Setup MSI-X vectors and enable interrupts.
+ */
+int xhci_run(struct usb_hcd *hcd)
+{
+	u32 temp;
+	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+	void (*doorbell)(struct xhci_hcd *) = NULL;
+
+	hcd->uses_new_polling = 1;
+	hcd->poll_rh = 0;
+
+	xhci_dbg(xhci, "xhci_run\n");
+#if 0	/* FIXME: MSI not setup yet */
+	/* Do this at the very last minute */
+	ret = xhci_setup_msix(xhci);
+	if (!ret)
+		return ret;
+
+	return -ENOSYS;
+#endif
+#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
+	init_timer(&xhci->event_ring_timer);
+	xhci->event_ring_timer.data = (unsigned long) xhci;
+	xhci->event_ring_timer.function = xhci_event_ring_work;
+	/* Poll the event ring */
+	xhci->event_ring_timer.expires = jiffies + POLL_TIMEOUT * HZ;
+	xhci->zombie = 0;
+	xhci_dbg(xhci, "Setting event ring polling timer\n");
+	add_timer(&xhci->event_ring_timer);
+#endif
+
+	xhci_dbg(xhci, "// Set the interrupt modulation register\n");
+	temp = xhci_readl(xhci, &xhci->ir_set->irq_control);
+	temp &= ~ER_IRQ_INTERVAL_MASK;
+	temp |= (u32) 160;
+	xhci_writel(xhci, temp, &xhci->ir_set->irq_control);
+
+	/* Set the HCD state before we enable the irqs */
+	hcd->state = HC_STATE_RUNNING;
+	temp = xhci_readl(xhci, &xhci->op_regs->command);
+	temp |= (CMD_EIE);
+	xhci_dbg(xhci, "// Enable interrupts, cmd = 0x%x.\n",
+			temp);
+	xhci_writel(xhci, temp, &xhci->op_regs->command);
+
+	temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
+	xhci_dbg(xhci, "// Enabling event ring interrupter %p by writing 0x%x to irq_pending\n",
+			xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp));
+	xhci_writel(xhci, ER_IRQ_ENABLE(temp),
+			&xhci->ir_set->irq_pending);
+	xhci_print_ir_set(xhci, xhci->ir_set, 0);
+
+	if (NUM_TEST_NOOPS > 0)
+		doorbell = xhci_setup_one_noop(xhci);
+
+	xhci_dbg(xhci, "Command ring memory map follows:\n");
+	xhci_debug_ring(xhci, xhci->cmd_ring);
+	xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
+	xhci_dbg_cmd_ptrs(xhci);
+
+	xhci_dbg(xhci, "ERST memory map follows:\n");
+	xhci_dbg_erst(xhci, &xhci->erst);
+	xhci_dbg(xhci, "Event ring:\n");
+	xhci_debug_ring(xhci, xhci->event_ring);
+	xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
+	temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]);
+	temp &= ERST_PTR_MASK;
+	xhci_dbg(xhci, "ERST deq = 0x%x\n", temp);
+	temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[1]);
+	xhci_dbg(xhci, "ERST deq upper = 0x%x\n", temp);
+
+	temp = xhci_readl(xhci, &xhci->op_regs->command);
+	temp |= (CMD_RUN);
+	xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n",
+			temp);
+	xhci_writel(xhci, temp, &xhci->op_regs->command);
+	/* Flush PCI posted writes */
+	temp = xhci_readl(xhci, &xhci->op_regs->command);
+	xhci_dbg(xhci, "// @%p = 0x%x\n", &xhci->op_regs->command, temp);
+	if (doorbell)
+		(*doorbell)(xhci);
+
+	xhci_dbg(xhci, "Finished xhci_run\n");
+	return 0;
+}
+
+/*
+ * Stop xHCI driver.
+ *
+ * This function is called by the USB core when the HC driver is removed.
+ * Its opposite is xhci_run().
+ *
+ * Disable device contexts, disable IRQs, and quiesce the HC.
+ * Reset the HC, finish any completed transactions, and cleanup memory.
+ */
+void xhci_stop(struct usb_hcd *hcd)
+{
+	u32 temp;
+	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+
+	spin_lock_irq(&xhci->lock);
+	if (HC_IS_RUNNING(hcd->state))
+		xhci_quiesce(xhci);
+	xhci_halt(xhci);
+	xhci_reset(xhci);
+	spin_unlock_irq(&xhci->lock);
+
+#if 0	/* No MSI yet */
+	xhci_cleanup_msix(xhci);
+#endif
+#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
+	/* Tell the event ring poll function not to reschedule */
+	xhci->zombie = 1;
+	del_timer_sync(&xhci->event_ring_timer);
+#endif
+
+	xhci_dbg(xhci, "// Disabling event ring interrupts\n");
+	temp = xhci_readl(xhci, &xhci->op_regs->status);
+	xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status);
+	temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
+	xhci_writel(xhci, ER_IRQ_DISABLE(temp),
+			&xhci->ir_set->irq_pending);
+	xhci_print_ir_set(xhci, xhci->ir_set, 0);
+
+	xhci_dbg(xhci, "cleaning up memory\n");
+	xhci_mem_cleanup(xhci);
+	xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
+		    xhci_readl(xhci, &xhci->op_regs->status));
+}
+
+/*
+ * Shutdown HC (not bus-specific)
+ *
+ * This is called when the machine is rebooting or halting.  We assume that the
+ * machine will be powered off, and the HC's internal state will be reset.
+ * Don't bother to free memory.
+ */
+void xhci_shutdown(struct usb_hcd *hcd)
+{
+	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+
+	spin_lock_irq(&xhci->lock);
+	xhci_halt(xhci);
+	spin_unlock_irq(&xhci->lock);
+
+#if 0
+	xhci_cleanup_msix(xhci);
+#endif
+
+	xhci_dbg(xhci, "xhci_shutdown completed - status = %x\n",
+		    xhci_readl(xhci, &xhci->op_regs->status));
+}
+
+/*-------------------------------------------------------------------------*/
+
+/**
+ * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and
+ * HCDs.  Find the index for an endpoint given its descriptor.  Use the return
+ * value to right shift 1 for the bitmask.
+ *
+ * Index  = (epnum * 2) + direction - 1,
+ * where direction = 0 for OUT, 1 for IN.
+ * For control endpoints, the IN index is used (OUT index is unused), so
+ * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2)
+ */
+unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc)
+{
+	unsigned int index;
+	if (usb_endpoint_xfer_control(desc))
+		index = (unsigned int) (usb_endpoint_num(desc)*2);
+	else
+		index = (unsigned int) (usb_endpoint_num(desc)*2) +
+			(usb_endpoint_dir_in(desc) ? 1 : 0) - 1;
+	return index;
+}
+
+/* Find the flag for this endpoint (for use in the control context).  Use the
+ * endpoint index to create a bitmask.  The slot context is bit 0, endpoint 0 is
+ * bit 1, etc.
+ */
+unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc)
+{
+	return 1 << (xhci_get_endpoint_index(desc) + 1);
+}
+
+/* Compute the last valid endpoint context index.  Basically, this is the
+ * endpoint index plus one.  For slot contexts with more than valid endpoint,
+ * we find the most significant bit set in the added contexts flags.
+ * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000
+ * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one.
+ */
+static inline unsigned int xhci_last_valid_endpoint(u32 added_ctxs)
+{
+	return fls(added_ctxs) - 1;
+}
+
+/* Returns 1 if the arguments are OK;
+ * returns 0 this is a root hub; returns -EINVAL for NULL pointers.
+ */
+int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
+		struct usb_host_endpoint *ep, int check_ep, const char *func) {
+	if (!hcd || (check_ep && !ep) || !udev) {
+		printk(KERN_DEBUG "xHCI %s called with invalid args\n",
+				func);
+		return -EINVAL;
+	}
+	if (!udev->parent) {
+		printk(KERN_DEBUG "xHCI %s called for root hub\n",
+				func);
+		return 0;
+	}
+	if (!udev->slot_id) {
+		printk(KERN_DEBUG "xHCI %s called with unaddressed device\n",
+				func);
+		return -EINVAL;
+	}
+	return 1;
+}
+
+/*
+ * non-error returns are a promise to giveback() the urb later
+ * we drop ownership so next owner (or urb unlink) can get it
+ */
+int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
+{
+	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+	unsigned long flags;
+	int ret = 0;
+	unsigned int slot_id, ep_index;
+
+	if (!urb || xhci_check_args(hcd, urb->dev, urb->ep, true, __func__) <= 0)
+		return -EINVAL;
+
+	slot_id = urb->dev->slot_id;
+	ep_index = xhci_get_endpoint_index(&urb->ep->desc);
+
+	spin_lock_irqsave(&xhci->lock, flags);
+	if (!xhci->devs || !xhci->devs[slot_id]) {
+		if (!in_interrupt())
+			dev_warn(&urb->dev->dev, "WARN: urb submitted for dev with no Slot ID\n");
+		ret = -EINVAL;
+		goto exit;
+	}
+	if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) {
+		if (!in_interrupt())
+			xhci_dbg(xhci, "urb submitted during PCI suspend\n");
+		ret = -ESHUTDOWN;
+		goto exit;
+	}
+	if (usb_endpoint_xfer_control(&urb->ep->desc))
+		ret = xhci_queue_ctrl_tx(xhci, mem_flags, urb,
+				slot_id, ep_index);
+	else if (usb_endpoint_xfer_bulk(&urb->ep->desc))
+		ret = xhci_queue_bulk_tx(xhci, mem_flags, urb,
+				slot_id, ep_index);
+	else
+		ret = -EINVAL;
+exit:
+	spin_unlock_irqrestore(&xhci->lock, flags);
+	return ret;
+}
+
+/*
+ * Remove the URB's TD from the endpoint ring.  This may cause the HC to stop
+ * USB transfers, potentially stopping in the middle of a TRB buffer.  The HC
+ * should pick up where it left off in the TD, unless a Set Transfer Ring
+ * Dequeue Pointer is issued.
+ *
+ * The TRBs that make up the buffers for the canceled URB will be "removed" from
+ * the ring.  Since the ring is a contiguous structure, they can't be physically
+ * removed.  Instead, there are two options:
+ *
+ *  1) If the HC is in the middle of processing the URB to be canceled, we
+ *     simply move the ring's dequeue pointer past those TRBs using the Set
+ *     Transfer Ring Dequeue Pointer command.  This will be the common case,
+ *     when drivers timeout on the last submitted URB and attempt to cancel.
+ *
+ *  2) If the HC is in the middle of a different TD, we turn the TRBs into a
+ *     series of 1-TRB transfer no-op TDs.  (No-ops shouldn't be chained.)  The
+ *     HC will need to invalidate the any TRBs it has cached after the stop
+ *     endpoint command, as noted in the xHCI 0.95 errata.
+ *
+ *  3) The TD may have completed by the time the Stop Endpoint Command
+ *     completes, so software needs to handle that case too.
+ *
+ * This function should protect against the TD enqueueing code ringing the
+ * doorbell while this code is waiting for a Stop Endpoint command to complete.
+ * It also needs to account for multiple cancellations on happening at the same
+ * time for the same endpoint.
+ *
+ * Note that this function can be called in any context, or so says
+ * usb_hcd_unlink_urb()
+ */
+int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+{
+	unsigned long flags;
+	int ret;
+	struct xhci_hcd *xhci;
+	struct xhci_td *td;
+	unsigned int ep_index;
+	struct xhci_ring *ep_ring;
+
+	xhci = hcd_to_xhci(hcd);
+	spin_lock_irqsave(&xhci->lock, flags);
+	/* Make sure the URB hasn't completed or been unlinked already */
+	ret = usb_hcd_check_unlink_urb(hcd, urb, status);
+	if (ret || !urb->hcpriv)
+		goto done;
+
+	xhci_dbg(xhci, "Cancel URB %p\n", urb);
+	ep_index = xhci_get_endpoint_index(&urb->ep->desc);
+	ep_ring = xhci->devs[urb->dev->slot_id]->ep_rings[ep_index];
+	td = (struct xhci_td *) urb->hcpriv;
+
+	ep_ring->cancels_pending++;
+	list_add_tail(&td->cancelled_td_list, &ep_ring->cancelled_td_list);
+	/* Queue a stop endpoint command, but only if this is
+	 * the first cancellation to be handled.
+	 */
+	if (ep_ring->cancels_pending == 1) {
+		xhci_queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index);
+		xhci_ring_cmd_db(xhci);
+	}
+done:
+	spin_unlock_irqrestore(&xhci->lock, flags);
+	return ret;
+}
+
+/* Drop an endpoint from a new bandwidth configuration for this device.
+ * Only one call to this function is allowed per endpoint before
+ * check_bandwidth() or reset_bandwidth() must be called.
+ * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
+ * add the endpoint to the schedule with possibly new parameters denoted by a
+ * different endpoint descriptor in usb_host_endpoint.
+ * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
+ * not allowed.
+ *
+ * The USB core will not allow URBs to be queued to an endpoint that is being
+ * disabled, so there's no need for mutual exclusion to protect
+ * the xhci->devs[slot_id] structure.
+ */
+int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
+		struct usb_host_endpoint *ep)
+{
+	struct xhci_hcd *xhci;
+	struct xhci_device_control *in_ctx;
+	unsigned int last_ctx;
+	unsigned int ep_index;
+	struct xhci_ep_ctx *ep_ctx;
+	u32 drop_flag;
+	u32 new_add_flags, new_drop_flags, new_slot_info;
+	int ret;
+
+	ret = xhci_check_args(hcd, udev, ep, 1, __func__);
+	if (ret <= 0)
+		return ret;
+	xhci = hcd_to_xhci(hcd);
+	xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
+
+	drop_flag = xhci_get_endpoint_flag(&ep->desc);
+	if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) {
+		xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n",
+				__func__, drop_flag);
+		return 0;
+	}
+
+	if (!xhci->devs || !xhci->devs[udev->slot_id]) {
+		xhci_warn(xhci, "xHCI %s called with unaddressed device\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	in_ctx = xhci->devs[udev->slot_id]->in_ctx;
+	ep_index = xhci_get_endpoint_index(&ep->desc);
+	ep_ctx = &xhci->devs[udev->slot_id]->out_ctx->ep[ep_index];
+	/* If the HC already knows the endpoint is disabled,
+	 * or the HCD has noted it is disabled, ignore this request
+	 */
+	if ((ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED ||
+			in_ctx->drop_flags & xhci_get_endpoint_flag(&ep->desc)) {
+		xhci_warn(xhci, "xHCI %s called with disabled ep %p\n",
+				__func__, ep);
+		return 0;
+	}
+
+	in_ctx->drop_flags |= drop_flag;
+	new_drop_flags = in_ctx->drop_flags;
+
+	in_ctx->add_flags = ~drop_flag;
+	new_add_flags = in_ctx->add_flags;
+
+	last_ctx = xhci_last_valid_endpoint(in_ctx->add_flags);
+	/* Update the last valid endpoint context, if we deleted the last one */
+	if ((in_ctx->slot.dev_info & LAST_CTX_MASK) > LAST_CTX(last_ctx)) {
+		in_ctx->slot.dev_info &= ~LAST_CTX_MASK;
+		in_ctx->slot.dev_info |= LAST_CTX(last_ctx);
+	}
+	new_slot_info = in_ctx->slot.dev_info;
+
+	xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
+
+	xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
+			(unsigned int) ep->desc.bEndpointAddress,
+			udev->slot_id,
+			(unsigned int) new_drop_flags,
+			(unsigned int) new_add_flags,
+			(unsigned int) new_slot_info);
+	return 0;
+}
+
+/* Add an endpoint to a new possible bandwidth configuration for this device.
+ * Only one call to this function is allowed per endpoint before
+ * check_bandwidth() or reset_bandwidth() must be called.
+ * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
+ * add the endpoint to the schedule with possibly new parameters denoted by a
+ * different endpoint descriptor in usb_host_endpoint.
+ * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
+ * not allowed.
+ *
+ * The USB core will not allow URBs to be queued to an endpoint until the
+ * configuration or alt setting is installed in the device, so there's no need
+ * for mutual exclusion to protect the xhci->devs[slot_id] structure.
+ */
+int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
+		struct usb_host_endpoint *ep)
+{
+	struct xhci_hcd *xhci;
+	struct xhci_device_control *in_ctx;
+	unsigned int ep_index;
+	struct xhci_ep_ctx *ep_ctx;
+	u32 added_ctxs;
+	unsigned int last_ctx;
+	u32 new_add_flags, new_drop_flags, new_slot_info;
+	int ret = 0;
+
+	ret = xhci_check_args(hcd, udev, ep, 1, __func__);
+	if (ret <= 0)
+		return ret;
+	xhci = hcd_to_xhci(hcd);
+
+	added_ctxs = xhci_get_endpoint_flag(&ep->desc);
+	last_ctx = xhci_last_valid_endpoint(added_ctxs);
+	if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) {
+		/* FIXME when we have to issue an evaluate endpoint command to
+		 * deal with ep0 max packet size changing once we get the
+		 * descriptors
+		 */
+		xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n",
+				__func__, added_ctxs);
+		return 0;
+	}
+
+	if (!xhci->devs || !xhci->devs[udev->slot_id]) {
+		xhci_warn(xhci, "xHCI %s called with unaddressed device\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	in_ctx = xhci->devs[udev->slot_id]->in_ctx;
+	ep_index = xhci_get_endpoint_index(&ep->desc);
+	ep_ctx = &xhci->devs[udev->slot_id]->out_ctx->ep[ep_index];
+	/* If the HCD has already noted the endpoint is enabled,
+	 * ignore this request.
+	 */
+	if (in_ctx->add_flags & xhci_get_endpoint_flag(&ep->desc)) {
+		xhci_warn(xhci, "xHCI %s called with enabled ep %p\n",
+				__func__, ep);
+		return 0;
+	}
+
+	/*
+	 * Configuration and alternate setting changes must be done in
+	 * process context, not interrupt context (or so documenation
+	 * for usb_set_interface() and usb_set_configuration() claim).
+	 */
+	if (xhci_endpoint_init(xhci, xhci->devs[udev->slot_id],
+				udev, ep, GFP_KERNEL) < 0) {
+		dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n",
+				__func__, ep->desc.bEndpointAddress);
+		return -ENOMEM;
+	}
+
+	in_ctx->add_flags |= added_ctxs;
+	new_add_flags = in_ctx->add_flags;
+
+	/* If xhci_endpoint_disable() was called for this endpoint, but the
+	 * xHC hasn't been notified yet through the check_bandwidth() call,
+	 * this re-adds a new state for the endpoint from the new endpoint
+	 * descriptors.  We must drop and re-add this endpoint, so we leave the
+	 * drop flags alone.
+	 */
+	new_drop_flags = in_ctx->drop_flags;
+
+	/* Update the last valid endpoint context, if we just added one past */
+	if ((in_ctx->slot.dev_info & LAST_CTX_MASK) < LAST_CTX(last_ctx)) {
+		in_ctx->slot.dev_info &= ~LAST_CTX_MASK;
+		in_ctx->slot.dev_info |= LAST_CTX(last_ctx);
+	}
+	new_slot_info = in_ctx->slot.dev_info;
+
+	xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
+			(unsigned int) ep->desc.bEndpointAddress,
+			udev->slot_id,
+			(unsigned int) new_drop_flags,
+			(unsigned int) new_add_flags,
+			(unsigned int) new_slot_info);
+	return 0;
+}
+
+static void xhci_zero_in_ctx(struct xhci_virt_device *virt_dev)
+{
+	struct xhci_ep_ctx *ep_ctx;
+	int i;
+
+	/* When a device's add flag and drop flag are zero, any subsequent
+	 * configure endpoint command will leave that endpoint's state
+	 * untouched.  Make sure we don't leave any old state in the input
+	 * endpoint contexts.
+	 */
+	virt_dev->in_ctx->drop_flags = 0;
+	virt_dev->in_ctx->add_flags = 0;
+	virt_dev->in_ctx->slot.dev_info &= ~LAST_CTX_MASK;
+	/* Endpoint 0 is always valid */
+	virt_dev->in_ctx->slot.dev_info |= LAST_CTX(1);
+	for (i = 1; i < 31; ++i) {
+		ep_ctx = &virt_dev->in_ctx->ep[i];
+		ep_ctx->ep_info = 0;
+		ep_ctx->ep_info2 = 0;
+		ep_ctx->deq[0] = 0;
+		ep_ctx->deq[1] = 0;
+		ep_ctx->tx_info = 0;
+	}
+}
+
+/* Called after one or more calls to xhci_add_endpoint() or
+ * xhci_drop_endpoint().  If this call fails, the USB core is expected
+ * to call xhci_reset_bandwidth().
+ *
+ * Since we are in the middle of changing either configuration or
+ * installing a new alt setting, the USB core won't allow URBs to be
+ * enqueued for any endpoint on the old config or interface.  Nothing
+ * else should be touching the xhci->devs[slot_id] structure, so we
+ * don't need to take the xhci->lock for manipulating that.
+ */
+int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
+{
+	int i;
+	int ret = 0;
+	int timeleft;
+	unsigned long flags;
+	struct xhci_hcd *xhci;
+	struct xhci_virt_device	*virt_dev;
+
+	ret = xhci_check_args(hcd, udev, NULL, 0, __func__);
+	if (ret <= 0)
+		return ret;
+	xhci = hcd_to_xhci(hcd);
+
+	if (!udev->slot_id || !xhci->devs || !xhci->devs[udev->slot_id]) {
+		xhci_warn(xhci, "xHCI %s called with unaddressed device\n",
+				__func__);
+		return -EINVAL;
+	}
+	xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
+	virt_dev = xhci->devs[udev->slot_id];
+
+	/* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */
+	virt_dev->in_ctx->add_flags |= SLOT_FLAG;
+	virt_dev->in_ctx->add_flags &= ~EP0_FLAG;
+	virt_dev->in_ctx->drop_flags &= ~SLOT_FLAG;
+	virt_dev->in_ctx->drop_flags &= ~EP0_FLAG;
+	xhci_dbg(xhci, "New Input Control Context:\n");
+	xhci_dbg_ctx(xhci, virt_dev->in_ctx, virt_dev->in_ctx_dma,
+			LAST_CTX_TO_EP_NUM(virt_dev->in_ctx->slot.dev_info));
+
+	spin_lock_irqsave(&xhci->lock, flags);
+	ret = xhci_queue_configure_endpoint(xhci, virt_dev->in_ctx_dma,
+			udev->slot_id);
+	if (ret < 0) {
+		spin_unlock_irqrestore(&xhci->lock, flags);
+		xhci_dbg(xhci, "FIXME allocate a new ring segment\n");
+		return -ENOMEM;
+	}
+	xhci_ring_cmd_db(xhci);
+	spin_unlock_irqrestore(&xhci->lock, flags);
+
+	/* Wait for the configure endpoint command to complete */
+	timeleft = wait_for_completion_interruptible_timeout(
+			&virt_dev->cmd_completion,
+			USB_CTRL_SET_TIMEOUT);
+	if (timeleft <= 0) {
+		xhci_warn(xhci, "%s while waiting for configure endpoint command\n",
+				timeleft == 0 ? "Timeout" : "Signal");
+		/* FIXME cancel the configure endpoint command */
+		return -ETIME;
+	}
+
+	switch (virt_dev->cmd_status) {
+	case COMP_ENOMEM:
+		dev_warn(&udev->dev, "Not enough host controller resources "
+				"for new device state.\n");
+		ret = -ENOMEM;
+		/* FIXME: can we allocate more resources for the HC? */
+		break;
+	case COMP_BW_ERR:
+		dev_warn(&udev->dev, "Not enough bandwidth "
+				"for new device state.\n");
+		ret = -ENOSPC;
+		/* FIXME: can we go back to the old state? */
+		break;
+	case COMP_TRB_ERR:
+		/* the HCD set up something wrong */
+		dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, add flag = 1, "
+				"and endpoint is not disabled.\n");
+		ret = -EINVAL;
+		break;
+	case COMP_SUCCESS:
+		dev_dbg(&udev->dev, "Successful Endpoint Configure command\n");
+		break;
+	default:
+		xhci_err(xhci, "ERROR: unexpected command completion "
+				"code 0x%x.\n", virt_dev->cmd_status);
+		ret = -EINVAL;
+		break;
+	}
+	if (ret) {
+		/* Callee should call reset_bandwidth() */
+		return ret;
+	}
+
+	xhci_dbg(xhci, "Output context after successful config ep cmd:\n");
+	xhci_dbg_ctx(xhci, virt_dev->out_ctx, virt_dev->out_ctx_dma,
+			LAST_CTX_TO_EP_NUM(virt_dev->in_ctx->slot.dev_info));
+
+	xhci_zero_in_ctx(virt_dev);
+	/* Free any old rings */
+	for (i = 1; i < 31; ++i) {
+		if (virt_dev->new_ep_rings[i]) {
+			xhci_ring_free(xhci, virt_dev->ep_rings[i]);
+			virt_dev->ep_rings[i] = virt_dev->new_ep_rings[i];
+			virt_dev->new_ep_rings[i] = NULL;
+		}
+	}
+
+	return ret;
+}
+
+void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
+{
+	struct xhci_hcd *xhci;
+	struct xhci_virt_device	*virt_dev;
+	int i, ret;
+
+	ret = xhci_check_args(hcd, udev, NULL, 0, __func__);
+	if (ret <= 0)
+		return;
+	xhci = hcd_to_xhci(hcd);
+
+	if (!xhci->devs || !xhci->devs[udev->slot_id]) {
+		xhci_warn(xhci, "xHCI %s called with unaddressed device\n",
+				__func__);
+		return;
+	}
+	xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
+	virt_dev = xhci->devs[udev->slot_id];
+	/* Free any rings allocated for added endpoints */
+	for (i = 0; i < 31; ++i) {
+		if (virt_dev->new_ep_rings[i]) {
+			xhci_ring_free(xhci, virt_dev->new_ep_rings[i]);
+			virt_dev->new_ep_rings[i] = NULL;
+		}
+	}
+	xhci_zero_in_ctx(virt_dev);
+}
+
+/*
+ * At this point, the struct usb_device is about to go away, the device has
+ * disconnected, and all traffic has been stopped and the endpoints have been
+ * disabled.  Free any HC data structures associated with that device.
+ */
+void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
+{
+	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+	unsigned long flags;
+
+	if (udev->slot_id == 0)
+		return;
+
+	spin_lock_irqsave(&xhci->lock, flags);
+	if (xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id)) {
+		spin_unlock_irqrestore(&xhci->lock, flags);
+		xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
+		return;
+	}
+	xhci_ring_cmd_db(xhci);
+	spin_unlock_irqrestore(&xhci->lock, flags);
+	/*
+	 * Event command completion handler will free any data structures
+	 * associated with the slot.  XXX Can free sleep?
+	 */
+}
+
+/*
+ * Returns 0 if the xHC ran out of device slots, the Enable Slot command
+ * timed out, or allocating memory failed.  Returns 1 on success.
+ */
+int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
+{
+	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+	unsigned long flags;
+	int timeleft;
+	int ret;
+
+	spin_lock_irqsave(&xhci->lock, flags);
+	ret = xhci_queue_slot_control(xhci, TRB_ENABLE_SLOT, 0);
+	if (ret) {
+		spin_unlock_irqrestore(&xhci->lock, flags);
+		xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
+		return 0;
+	}
+	xhci_ring_cmd_db(xhci);
+	spin_unlock_irqrestore(&xhci->lock, flags);
+
+	/* XXX: how much time for xHC slot assignment? */
+	timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev,
+			USB_CTRL_SET_TIMEOUT);
+	if (timeleft <= 0) {
+		xhci_warn(xhci, "%s while waiting for a slot\n",
+				timeleft == 0 ? "Timeout" : "Signal");
+		/* FIXME cancel the enable slot request */
+		return 0;
+	}
+
+	if (!xhci->slot_id) {
+		xhci_err(xhci, "Error while assigning device slot ID\n");
+		return 0;
+	}
+	/* xhci_alloc_virt_device() does not touch rings; no need to lock */
+	if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_KERNEL)) {
+		/* Disable slot, if we can do it without mem alloc */
+		xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
+		spin_lock_irqsave(&xhci->lock, flags);
+		if (!xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id))
+			xhci_ring_cmd_db(xhci);
+		spin_unlock_irqrestore(&xhci->lock, flags);
+		return 0;
+	}
+	udev->slot_id = xhci->slot_id;
+	/* Is this a LS or FS device under a HS hub? */
+	/* Hub or peripherial? */
+	return 1;
+}
+
+/*
+ * Issue an Address Device command (which will issue a SetAddress request to
+ * the device).
+ * We should be protected by the usb_address0_mutex in khubd's hub_port_init, so
+ * we should only issue and wait on one address command at the same time.
+ *
+ * We add one to the device address issued by the hardware because the USB core
+ * uses address 1 for the root hubs (even though they're not really devices).
+ */
+int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
+{
+	unsigned long flags;
+	int timeleft;
+	struct xhci_virt_device *virt_dev;
+	int ret = 0;
+	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+	u32 temp;
+
+	if (!udev->slot_id) {
+		xhci_dbg(xhci, "Bad Slot ID %d\n", udev->slot_id);
+		return -EINVAL;
+	}
+
+	virt_dev = xhci->devs[udev->slot_id];
+
+	/* If this is a Set Address to an unconfigured device, setup ep 0 */
+	if (!udev->config)
+		xhci_setup_addressable_virt_dev(xhci, udev);
+	/* Otherwise, assume the core has the device configured how it wants */
+
+	spin_lock_irqsave(&xhci->lock, flags);
+	ret = xhci_queue_address_device(xhci, virt_dev->in_ctx_dma,
+			udev->slot_id);
+	if (ret) {
+		spin_unlock_irqrestore(&xhci->lock, flags);
+		xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
+		return ret;
+	}
+	xhci_ring_cmd_db(xhci);
+	spin_unlock_irqrestore(&xhci->lock, flags);
+
+	/* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */
+	timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev,
+			USB_CTRL_SET_TIMEOUT);
+	/* FIXME: From section 4.3.4: "Software shall be responsible for timing
+	 * the SetAddress() "recovery interval" required by USB and aborting the
+	 * command on a timeout.
+	 */
+	if (timeleft <= 0) {
+		xhci_warn(xhci, "%s while waiting for a slot\n",
+				timeleft == 0 ? "Timeout" : "Signal");
+		/* FIXME cancel the address device command */
+		return -ETIME;
+	}
+
+	switch (virt_dev->cmd_status) {
+	case COMP_CTX_STATE:
+	case COMP_EBADSLT:
+		xhci_err(xhci, "Setup ERROR: address device command for slot %d.\n",
+				udev->slot_id);
+		ret = -EINVAL;
+		break;
+	case COMP_TX_ERR:
+		dev_warn(&udev->dev, "Device not responding to set address.\n");
+		ret = -EPROTO;
+		break;
+	case COMP_SUCCESS:
+		xhci_dbg(xhci, "Successful Address Device command\n");
+		break;
+	default:
+		xhci_err(xhci, "ERROR: unexpected command completion "
+				"code 0x%x.\n", virt_dev->cmd_status);
+		ret = -EINVAL;
+		break;
+	}
+	if (ret) {
+		return ret;
+	}
+	temp = xhci_readl(xhci, &xhci->op_regs->dcbaa_ptr[0]);
+	xhci_dbg(xhci, "Op regs DCBAA ptr[0] = %#08x\n", temp);
+	temp = xhci_readl(xhci, &xhci->op_regs->dcbaa_ptr[1]);
+	xhci_dbg(xhci, "Op regs DCBAA ptr[1] = %#08x\n", temp);
+	xhci_dbg(xhci, "Slot ID %d dcbaa entry[0] @%p = %#08x\n",
+			udev->slot_id,
+			&xhci->dcbaa->dev_context_ptrs[2*udev->slot_id],
+			xhci->dcbaa->dev_context_ptrs[2*udev->slot_id]);
+	xhci_dbg(xhci, "Slot ID %d dcbaa entry[1] @%p = %#08x\n",
+			udev->slot_id,
+			&xhci->dcbaa->dev_context_ptrs[2*udev->slot_id+1],
+			xhci->dcbaa->dev_context_ptrs[2*udev->slot_id+1]);
+	xhci_dbg(xhci, "Output Context DMA address = %#08llx\n",
+			(unsigned long long)virt_dev->out_ctx_dma);
+	xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
+	xhci_dbg_ctx(xhci, virt_dev->in_ctx, virt_dev->in_ctx_dma, 2);
+	xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
+	xhci_dbg_ctx(xhci, virt_dev->out_ctx, virt_dev->out_ctx_dma, 2);
+	/*
+	 * USB core uses address 1 for the roothubs, so we add one to the
+	 * address given back to us by the HC.
+	 */
+	udev->devnum = (virt_dev->out_ctx->slot.dev_state & DEV_ADDR_MASK) + 1;
+	/* Zero the input context control for later use */
+	virt_dev->in_ctx->add_flags = 0;
+	virt_dev->in_ctx->drop_flags = 0;
+	/* Mirror flags in the output context for future ep enable/disable */
+	virt_dev->out_ctx->add_flags = SLOT_FLAG | EP0_FLAG;
+	virt_dev->out_ctx->drop_flags = 0;
+
+	xhci_dbg(xhci, "Device address = %d\n", udev->devnum);
+	/* XXX Meh, not sure if anyone else but choose_address uses this. */
+	set_bit(udev->devnum, udev->bus->devmap.devicemap);
+
+	return 0;
+}
+
+int xhci_get_frame(struct usb_hcd *hcd)
+{
+	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+	/* EHCI mods by the periodic size.  Why? */
+	return xhci_readl(xhci, &xhci->run_regs->microframe_index) >> 3;
+}
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_LICENSE("GPL");
+
+static int __init xhci_hcd_init(void)
+{
+#ifdef CONFIG_PCI
+	int retval = 0;
+
+	retval = xhci_register_pci();
+
+	if (retval < 0) {
+		printk(KERN_DEBUG "Problem registering PCI driver.");
+		return retval;
+	}
+#endif
+	/*
+	 * Check the compiler generated sizes of structures that must be laid
+	 * out in specific ways for hardware access.
+	 */
+	BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8);
+	BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8);
+	BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8);
+	/* xhci_device_control has eight fields, and also
+	 * embeds one xhci_slot_ctx and 31 xhci_ep_ctx
+	 */
+	BUILD_BUG_ON(sizeof(struct xhci_device_control) != (8+8+8*31)*32/8);
+	BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8);
+	BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8);
+	BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8);
+	BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 7*32/8);
+	BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8);
+	/* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */
+	BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8);
+	BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8);
+	return 0;
+}
+module_init(xhci_hcd_init);
+
+static void __exit xhci_hcd_cleanup(void)
+{
+#ifdef CONFIG_PCI
+	xhci_unregister_pci();
+#endif
+}
+module_exit(xhci_hcd_cleanup);
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
new file mode 100644
index 0000000..eac5b53
--- /dev/null
+++ b/drivers/usb/host/xhci-hub.c
@@ -0,0 +1,308 @@
+/*
+ * xHCI host controller driver
+ *
+ * Copyright (C) 2008 Intel Corp.
+ *
+ * Author: Sarah Sharp
+ * Some code borrowed from the Linux EHCI driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <asm/unaligned.h>
+
+#include "xhci.h"
+
+static void xhci_hub_descriptor(struct xhci_hcd *xhci,
+		struct usb_hub_descriptor *desc)
+{
+	int ports;
+	u16 temp;
+
+	ports = HCS_MAX_PORTS(xhci->hcs_params1);
+
+	/* USB 3.0 hubs have a different descriptor, but we fake this for now */
+	desc->bDescriptorType = 0x29;
+	desc->bPwrOn2PwrGood = 10;	/* xhci section 5.4.9 says 20ms max */
+	desc->bHubContrCurrent = 0;
+
+	desc->bNbrPorts = ports;
+	temp = 1 + (ports / 8);
+	desc->bDescLength = 7 + 2 * temp;
+
+	/* Why does core/hcd.h define bitmap?  It's just confusing. */
+	memset(&desc->DeviceRemovable[0], 0, temp);
+	memset(&desc->DeviceRemovable[temp], 0xff, temp);
+
+	/* Ugh, these should be #defines, FIXME */
+	/* Using table 11-13 in USB 2.0 spec. */
+	temp = 0;
+	/* Bits 1:0 - support port power switching, or power always on */
+	if (HCC_PPC(xhci->hcc_params))
+		temp |= 0x0001;
+	else
+		temp |= 0x0002;
+	/* Bit  2 - root hubs are not part of a compound device */
+	/* Bits 4:3 - individual port over current protection */
+	temp |= 0x0008;
+	/* Bits 6:5 - no TTs in root ports */
+	/* Bit  7 - no port indicators */
+	desc->wHubCharacteristics = (__force __u16) cpu_to_le16(temp);
+}
+
+static unsigned int xhci_port_speed(unsigned int port_status)
+{
+	if (DEV_LOWSPEED(port_status))
+		return 1 << USB_PORT_FEAT_LOWSPEED;
+	if (DEV_HIGHSPEED(port_status))
+		return 1 << USB_PORT_FEAT_HIGHSPEED;
+	if (DEV_SUPERSPEED(port_status))
+		return 1 << USB_PORT_FEAT_SUPERSPEED;
+	/*
+	 * FIXME: Yes, we should check for full speed, but the core uses that as
+	 * a default in portspeed() in usb/core/hub.c (which is the only place
+	 * USB_PORT_FEAT_*SPEED is used).
+	 */
+	return 0;
+}
+
+/*
+ * These bits are Read Only (RO) and should be saved and written to the
+ * registers: 0, 3, 10:13, 30
+ * connect status, over-current status, port speed, and device removable.
+ * connect status and port speed are also sticky - meaning they're in
+ * the AUX well and they aren't changed by a hot, warm, or cold reset.
+ */
+#define	XHCI_PORT_RO	((1<<0) | (1<<3) | (0xf<<10) | (1<<30))
+/*
+ * These bits are RW; writing a 0 clears the bit, writing a 1 sets the bit:
+ * bits 5:8, 9, 14:15, 25:27
+ * link state, port power, port indicator state, "wake on" enable state
+ */
+#define XHCI_PORT_RWS	((0xf<<5) | (1<<9) | (0x3<<14) | (0x7<<25))
+/*
+ * These bits are RW; writing a 1 sets the bit, writing a 0 has no effect:
+ * bit 4 (port reset)
+ */
+#define	XHCI_PORT_RW1S	((1<<4))
+/*
+ * These bits are RW; writing a 1 clears the bit, writing a 0 has no effect:
+ * bits 1, 17, 18, 19, 20, 21, 22, 23
+ * port enable/disable, and
+ * change bits: connect, PED, warm port reset changed (reserved zero for USB 2.0 ports),
+ * over-current, reset, link state, and L1 change
+ */
+#define XHCI_PORT_RW1CS	((1<<1) | (0x7f<<17))
+/*
+ * Bit 16 is RW, and writing a '1' to it causes the link state control to be
+ * latched in
+ */
+#define	XHCI_PORT_RW	((1<<16))
+/*
+ * These bits are Reserved Zero (RsvdZ) and zero should be written to them:
+ * bits 2, 24, 28:31
+ */
+#define	XHCI_PORT_RZ	((1<<2) | (1<<24) | (0xf<<28))
+
+/*
+ * Given a port state, this function returns a value that would result in the
+ * port being in the same state, if the value was written to the port status
+ * control register.
+ * Save Read Only (RO) bits and save read/write bits where
+ * writing a 0 clears the bit and writing a 1 sets the bit (RWS).
+ * For all other types (RW1S, RW1CS, RW, and RZ), writing a '0' has no effect.
+ */
+static u32 xhci_port_state_to_neutral(u32 state)
+{
+	/* Save read-only status and port state */
+	return (state & XHCI_PORT_RO) | (state & XHCI_PORT_RWS);
+}
+
+int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+		u16 wIndex, char *buf, u16 wLength)
+{
+	struct xhci_hcd	*xhci = hcd_to_xhci(hcd);
+	int ports;
+	unsigned long flags;
+	u32 temp, status;
+	int retval = 0;
+	u32 __iomem *addr;
+	char *port_change_bit;
+
+	ports = HCS_MAX_PORTS(xhci->hcs_params1);
+
+	spin_lock_irqsave(&xhci->lock, flags);
+	switch (typeReq) {
+	case GetHubStatus:
+		/* No power source, over-current reported per port */
+		memset(buf, 0, 4);
+		break;
+	case GetHubDescriptor:
+		xhci_hub_descriptor(xhci, (struct usb_hub_descriptor *) buf);
+		break;
+	case GetPortStatus:
+		if (!wIndex || wIndex > ports)
+			goto error;
+		wIndex--;
+		status = 0;
+		addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS*(wIndex & 0xff);
+		temp = xhci_readl(xhci, addr);
+		xhci_dbg(xhci, "get port status, actual port %d status  = 0x%x\n", wIndex, temp);
+
+		/* wPortChange bits */
+		if (temp & PORT_CSC)
+			status |= 1 << USB_PORT_FEAT_C_CONNECTION;
+		if (temp & PORT_PEC)
+			status |= 1 << USB_PORT_FEAT_C_ENABLE;
+		if ((temp & PORT_OCC))
+			status |= 1 << USB_PORT_FEAT_C_OVER_CURRENT;
+		/*
+		 * FIXME ignoring suspend, reset, and USB 2.1/3.0 specific
+		 * changes
+		 */
+		if (temp & PORT_CONNECT) {
+			status |= 1 << USB_PORT_FEAT_CONNECTION;
+			status |= xhci_port_speed(temp);
+		}
+		if (temp & PORT_PE)
+			status |= 1 << USB_PORT_FEAT_ENABLE;
+		if (temp & PORT_OC)
+			status |= 1 << USB_PORT_FEAT_OVER_CURRENT;
+		if (temp & PORT_RESET)
+			status |= 1 << USB_PORT_FEAT_RESET;
+		if (temp & PORT_POWER)
+			status |= 1 << USB_PORT_FEAT_POWER;
+		xhci_dbg(xhci, "Get port status returned 0x%x\n", status);
+		put_unaligned(cpu_to_le32(status), (__le32 *) buf);
+		break;
+	case SetPortFeature:
+		wIndex &= 0xff;
+		if (!wIndex || wIndex > ports)
+			goto error;
+		wIndex--;
+		addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS*(wIndex & 0xff);
+		temp = xhci_readl(xhci, addr);
+		temp = xhci_port_state_to_neutral(temp);
+		switch (wValue) {
+		case USB_PORT_FEAT_POWER:
+			/*
+			 * Turn on ports, even if there isn't per-port switching.
+			 * HC will report connect events even before this is set.
+			 * However, khubd will ignore the roothub events until
+			 * the roothub is registered.
+			 */
+			xhci_writel(xhci, temp | PORT_POWER, addr);
+
+			temp = xhci_readl(xhci, addr);
+			xhci_dbg(xhci, "set port power, actual port %d status  = 0x%x\n", wIndex, temp);
+			break;
+		case USB_PORT_FEAT_RESET:
+			temp = (temp | PORT_RESET);
+			xhci_writel(xhci, temp, addr);
+
+			temp = xhci_readl(xhci, addr);
+			xhci_dbg(xhci, "set port reset, actual port %d status  = 0x%x\n", wIndex, temp);
+			break;
+		default:
+			goto error;
+		}
+		temp = xhci_readl(xhci, addr); /* unblock any posted writes */
+		break;
+	case ClearPortFeature:
+		if (!wIndex || wIndex > ports)
+			goto error;
+		wIndex--;
+		addr = &xhci->op_regs->port_status_base +
+			NUM_PORT_REGS*(wIndex & 0xff);
+		temp = xhci_readl(xhci, addr);
+		temp = xhci_port_state_to_neutral(temp);
+		switch (wValue) {
+		case USB_PORT_FEAT_C_RESET:
+			status = PORT_RC;
+			port_change_bit = "reset";
+			break;
+		case USB_PORT_FEAT_C_CONNECTION:
+			status = PORT_CSC;
+			port_change_bit = "connect";
+			break;
+		case USB_PORT_FEAT_C_OVER_CURRENT:
+			status = PORT_OCC;
+			port_change_bit = "over-current";
+			break;
+		default:
+			goto error;
+		}
+		/* Change bits are all write 1 to clear */
+		xhci_writel(xhci, temp | status, addr);
+		temp = xhci_readl(xhci, addr);
+		xhci_dbg(xhci, "clear port %s change, actual port %d status  = 0x%x\n",
+				port_change_bit, wIndex, temp);
+		temp = xhci_readl(xhci, addr); /* unblock any posted writes */
+		break;
+	default:
+error:
+		/* "stall" on error */
+		retval = -EPIPE;
+	}
+	spin_unlock_irqrestore(&xhci->lock, flags);
+	return retval;
+}
+
+/*
+ * Returns 0 if the status hasn't changed, or the number of bytes in buf.
+ * Ports are 0-indexed from the HCD point of view,
+ * and 1-indexed from the USB core pointer of view.
+ * xHCI instances can have up to 127 ports, so FIXME if you see more than 15.
+ *
+ * Note that the status change bits will be cleared as soon as a port status
+ * change event is generated, so we use the saved status from that event.
+ */
+int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
+{
+	unsigned long flags;
+	u32 temp, status;
+	int i, retval;
+	struct xhci_hcd	*xhci = hcd_to_xhci(hcd);
+	int ports;
+	u32 __iomem *addr;
+
+	ports = HCS_MAX_PORTS(xhci->hcs_params1);
+
+	/* Initial status is no changes */
+	buf[0] = 0;
+	status = 0;
+	if (ports > 7) {
+		buf[1] = 0;
+		retval = 2;
+	} else {
+		retval = 1;
+	}
+
+	spin_lock_irqsave(&xhci->lock, flags);
+	/* For each port, did anything change?  If so, set that bit in buf. */
+	for (i = 0; i < ports; i++) {
+		addr = &xhci->op_regs->port_status_base +
+			NUM_PORT_REGS*i;
+		temp = xhci_readl(xhci, addr);
+		if (temp & (PORT_CSC | PORT_PEC | PORT_OCC)) {
+			if (i < 7)
+				buf[0] |= 1 << (i + 1);
+			else
+				buf[1] |= 1 << (i - 7);
+			status = 1;
+		}
+	}
+	spin_unlock_irqrestore(&xhci->lock, flags);
+	return status ? retval : 0;
+}
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
new file mode 100644
index 0000000..c8a72de
--- /dev/null
+++ b/drivers/usb/host/xhci-mem.c
@@ -0,0 +1,769 @@
+/*
+ * xHCI host controller driver
+ *
+ * Copyright (C) 2008 Intel Corp.
+ *
+ * Author: Sarah Sharp
+ * Some code borrowed from the Linux EHCI driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/usb.h>
+#include <linux/pci.h>
+#include <linux/dmapool.h>
+
+#include "xhci.h"
+
+/*
+ * Allocates a generic ring segment from the ring pool, sets the dma address,
+ * initializes the segment to zero, and sets the private next pointer to NULL.
+ *
+ * Section 4.11.1.1:
+ * "All components of all Command and Transfer TRBs shall be initialized to '0'"
+ */
+static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, gfp_t flags)
+{
+	struct xhci_segment *seg;
+	dma_addr_t	dma;
+
+	seg = kzalloc(sizeof *seg, flags);
+	if (!seg)
+		return 0;
+	xhci_dbg(xhci, "Allocating priv segment structure at %p\n", seg);
+
+	seg->trbs = dma_pool_alloc(xhci->segment_pool, flags, &dma);
+	if (!seg->trbs) {
+		kfree(seg);
+		return 0;
+	}
+	xhci_dbg(xhci, "// Allocating segment at %p (virtual) 0x%llx (DMA)\n",
+			seg->trbs, (unsigned long long)dma);
+
+	memset(seg->trbs, 0, SEGMENT_SIZE);
+	seg->dma = dma;
+	seg->next = NULL;
+
+	return seg;
+}
+
+static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
+{
+	if (!seg)
+		return;
+	if (seg->trbs) {
+		xhci_dbg(xhci, "Freeing DMA segment at %p (virtual) 0x%llx (DMA)\n",
+				seg->trbs, (unsigned long long)seg->dma);
+		dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma);
+		seg->trbs = NULL;
+	}
+	xhci_dbg(xhci, "Freeing priv segment structure at %p\n", seg);
+	kfree(seg);
+}
+
+/*
+ * Make the prev segment point to the next segment.
+ *
+ * Change the last TRB in the prev segment to be a Link TRB which points to the
+ * DMA address of the next segment.  The caller needs to set any Link TRB
+ * related flags, such as End TRB, Toggle Cycle, and no snoop.
+ */
+static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
+		struct xhci_segment *next, bool link_trbs)
+{
+	u32 val;
+
+	if (!prev || !next)
+		return;
+	prev->next = next;
+	if (link_trbs) {
+		prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr[0] = next->dma;
+
+		/* Set the last TRB in the segment to have a TRB type ID of Link TRB */
+		val = prev->trbs[TRBS_PER_SEGMENT-1].link.control;
+		val &= ~TRB_TYPE_BITMASK;
+		val |= TRB_TYPE(TRB_LINK);
+		prev->trbs[TRBS_PER_SEGMENT-1].link.control = val;
+	}
+	xhci_dbg(xhci, "Linking segment 0x%llx to segment 0x%llx (DMA)\n",
+			(unsigned long long)prev->dma,
+			(unsigned long long)next->dma);
+}
+
+/* XXX: Do we need the hcd structure in all these functions? */
+void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
+{
+	struct xhci_segment *seg;
+	struct xhci_segment *first_seg;
+
+	if (!ring || !ring->first_seg)
+		return;
+	first_seg = ring->first_seg;
+	seg = first_seg->next;
+	xhci_dbg(xhci, "Freeing ring at %p\n", ring);
+	while (seg != first_seg) {
+		struct xhci_segment *next = seg->next;
+		xhci_segment_free(xhci, seg);
+		seg = next;
+	}
+	xhci_segment_free(xhci, first_seg);
+	ring->first_seg = NULL;
+	kfree(ring);
+}
+
+/**
+ * Create a new ring with zero or more segments.
+ *
+ * Link each segment together into a ring.
+ * Set the end flag and the cycle toggle bit on the last segment.
+ * See section 4.9.1 and figures 15 and 16.
+ */
+static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
+		unsigned int num_segs, bool link_trbs, gfp_t flags)
+{
+	struct xhci_ring	*ring;
+	struct xhci_segment	*prev;
+
+	ring = kzalloc(sizeof *(ring), flags);
+	xhci_dbg(xhci, "Allocating ring at %p\n", ring);
+	if (!ring)
+		return 0;
+
+	INIT_LIST_HEAD(&ring->td_list);
+	INIT_LIST_HEAD(&ring->cancelled_td_list);
+	if (num_segs == 0)
+		return ring;
+
+	ring->first_seg = xhci_segment_alloc(xhci, flags);
+	if (!ring->first_seg)
+		goto fail;
+	num_segs--;
+
+	prev = ring->first_seg;
+	while (num_segs > 0) {
+		struct xhci_segment	*next;
+
+		next = xhci_segment_alloc(xhci, flags);
+		if (!next)
+			goto fail;
+		xhci_link_segments(xhci, prev, next, link_trbs);
+
+		prev = next;
+		num_segs--;
+	}
+	xhci_link_segments(xhci, prev, ring->first_seg, link_trbs);
+
+	if (link_trbs) {
+		/* See section 4.9.2.1 and 6.4.4.1 */
+		prev->trbs[TRBS_PER_SEGMENT-1].link.control |= (LINK_TOGGLE);
+		xhci_dbg(xhci, "Wrote link toggle flag to"
+				" segment %p (virtual), 0x%llx (DMA)\n",
+				prev, (unsigned long long)prev->dma);
+	}
+	/* The ring is empty, so the enqueue pointer == dequeue pointer */
+	ring->enqueue = ring->first_seg->trbs;
+	ring->enq_seg = ring->first_seg;
+	ring->dequeue = ring->enqueue;
+	ring->deq_seg = ring->first_seg;
+	/* The ring is initialized to 0. The producer must write 1 to the cycle
+	 * bit to handover ownership of the TRB, so PCS = 1.  The consumer must
+	 * compare CCS to the cycle bit to check ownership, so CCS = 1.
+	 */
+	ring->cycle_state = 1;
+
+	return ring;
+
+fail:
+	xhci_ring_free(xhci, ring);
+	return 0;
+}
+
+/* All the xhci_tds in the ring's TD list should be freed at this point */
+void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
+{
+	struct xhci_virt_device *dev;
+	int i;
+
+	/* Slot ID 0 is reserved */
+	if (slot_id == 0 || !xhci->devs[slot_id])
+		return;
+
+	dev = xhci->devs[slot_id];
+	xhci->dcbaa->dev_context_ptrs[2*slot_id] = 0;
+	xhci->dcbaa->dev_context_ptrs[2*slot_id + 1] = 0;
+	if (!dev)
+		return;
+
+	for (i = 0; i < 31; ++i)
+		if (dev->ep_rings[i])
+			xhci_ring_free(xhci, dev->ep_rings[i]);
+
+	if (dev->in_ctx)
+		dma_pool_free(xhci->device_pool,
+				dev->in_ctx, dev->in_ctx_dma);
+	if (dev->out_ctx)
+		dma_pool_free(xhci->device_pool,
+				dev->out_ctx, dev->out_ctx_dma);
+	kfree(xhci->devs[slot_id]);
+	xhci->devs[slot_id] = 0;
+}
+
+int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
+		struct usb_device *udev, gfp_t flags)
+{
+	dma_addr_t	dma;
+	struct xhci_virt_device *dev;
+
+	/* Slot ID 0 is reserved */
+	if (slot_id == 0 || xhci->devs[slot_id]) {
+		xhci_warn(xhci, "Bad Slot ID %d\n", slot_id);
+		return 0;
+	}
+
+	xhci->devs[slot_id] = kzalloc(sizeof(*xhci->devs[slot_id]), flags);
+	if (!xhci->devs[slot_id])
+		return 0;
+	dev = xhci->devs[slot_id];
+
+	/* Allocate the (output) device context that will be used in the HC */
+	dev->out_ctx = dma_pool_alloc(xhci->device_pool, flags, &dma);
+	if (!dev->out_ctx)
+		goto fail;
+	dev->out_ctx_dma = dma;
+	xhci_dbg(xhci, "Slot %d output ctx = 0x%llx (dma)\n", slot_id,
+			(unsigned long long)dma);
+	memset(dev->out_ctx, 0, sizeof(*dev->out_ctx));
+
+	/* Allocate the (input) device context for address device command */
+	dev->in_ctx = dma_pool_alloc(xhci->device_pool, flags, &dma);
+	if (!dev->in_ctx)
+		goto fail;
+	dev->in_ctx_dma = dma;
+	xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id,
+			(unsigned long long)dma);
+	memset(dev->in_ctx, 0, sizeof(*dev->in_ctx));
+
+	/* Allocate endpoint 0 ring */
+	dev->ep_rings[0] = xhci_ring_alloc(xhci, 1, true, flags);
+	if (!dev->ep_rings[0])
+		goto fail;
+
+	init_completion(&dev->cmd_completion);
+
+	/*
+	 * Point to output device context in dcbaa; skip the output control
+	 * context, which is eight 32 bit fields (or 32 bytes long)
+	 */
+	xhci->dcbaa->dev_context_ptrs[2*slot_id] =
+		(u32) dev->out_ctx_dma + (32);
+	xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n",
+			slot_id,
+			&xhci->dcbaa->dev_context_ptrs[2*slot_id],
+			(unsigned long long)dev->out_ctx_dma);
+	xhci->dcbaa->dev_context_ptrs[2*slot_id + 1] = 0;
+
+	return 1;
+fail:
+	xhci_free_virt_device(xhci, slot_id);
+	return 0;
+}
+
+/* Setup an xHCI virtual device for a Set Address command */
+int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev)
+{
+	struct xhci_virt_device *dev;
+	struct xhci_ep_ctx	*ep0_ctx;
+	struct usb_device	*top_dev;
+
+	dev = xhci->devs[udev->slot_id];
+	/* Slot ID 0 is reserved */
+	if (udev->slot_id == 0 || !dev) {
+		xhci_warn(xhci, "Slot ID %d is not assigned to this device\n",
+				udev->slot_id);
+		return -EINVAL;
+	}
+	ep0_ctx = &dev->in_ctx->ep[0];
+
+	/* 2) New slot context and endpoint 0 context are valid*/
+	dev->in_ctx->add_flags = SLOT_FLAG | EP0_FLAG;
+
+	/* 3) Only the control endpoint is valid - one endpoint context */
+	dev->in_ctx->slot.dev_info |= LAST_CTX(1);
+
+	switch (udev->speed) {
+	case USB_SPEED_SUPER:
+		dev->in_ctx->slot.dev_info |= (u32) udev->route;
+		dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_SS;
+		break;
+	case USB_SPEED_HIGH:
+		dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_HS;
+		break;
+	case USB_SPEED_FULL:
+		dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_FS;
+		break;
+	case USB_SPEED_LOW:
+		dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_LS;
+		break;
+	case USB_SPEED_VARIABLE:
+		xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
+		return -EINVAL;
+		break;
+	default:
+		/* Speed was set earlier, this shouldn't happen. */
+		BUG();
+	}
+	/* Find the root hub port this device is under */
+	for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
+			top_dev = top_dev->parent)
+		/* Found device below root hub */;
+	dev->in_ctx->slot.dev_info2 |= (u32) ROOT_HUB_PORT(top_dev->portnum);
+	xhci_dbg(xhci, "Set root hub portnum to %d\n", top_dev->portnum);
+
+	/* Is this a LS/FS device under a HS hub? */
+	/*
+	 * FIXME: I don't think this is right, where does the TT info for the
+	 * roothub or parent hub come from?
+	 */
+	if ((udev->speed == USB_SPEED_LOW || udev->speed == USB_SPEED_FULL) &&
+			udev->tt) {
+		dev->in_ctx->slot.tt_info = udev->tt->hub->slot_id;
+		dev->in_ctx->slot.tt_info |= udev->ttport << 8;
+	}
+	xhci_dbg(xhci, "udev->tt = %p\n", udev->tt);
+	xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport);
+
+	/* Step 4 - ring already allocated */
+	/* Step 5 */
+	ep0_ctx->ep_info2 = EP_TYPE(CTRL_EP);
+	/*
+	 * See section 4.3 bullet 6:
+	 * The default Max Packet size for ep0 is "8 bytes for a USB2
+	 * LS/FS/HS device or 512 bytes for a USB3 SS device"
+	 * XXX: Not sure about wireless USB devices.
+	 */
+	if (udev->speed == USB_SPEED_SUPER)
+		ep0_ctx->ep_info2 |= MAX_PACKET(512);
+	else
+		ep0_ctx->ep_info2 |= MAX_PACKET(8);
+	/* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */
+	ep0_ctx->ep_info2 |= MAX_BURST(0);
+	ep0_ctx->ep_info2 |= ERROR_COUNT(3);
+
+	ep0_ctx->deq[0] =
+		dev->ep_rings[0]->first_seg->dma;
+	ep0_ctx->deq[0] |= dev->ep_rings[0]->cycle_state;
+	ep0_ctx->deq[1] = 0;
+
+	/* Steps 7 and 8 were done in xhci_alloc_virt_device() */
+
+	return 0;
+}
+
+/* Return the polling or NAK interval.
+ *
+ * The polling interval is expressed in "microframes".  If xHCI's Interval field
+ * is set to N, it will service the endpoint every 2^(Interval)*125us.
+ *
+ * The NAK interval is one NAK per 1 to 255 microframes, or no NAKs if interval
+ * is set to 0.
+ */
+static inline unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
+		struct usb_host_endpoint *ep)
+{
+	unsigned int interval = 0;
+
+	switch (udev->speed) {
+	case USB_SPEED_HIGH:
+		/* Max NAK rate */
+		if (usb_endpoint_xfer_control(&ep->desc) ||
+				usb_endpoint_xfer_bulk(&ep->desc))
+			interval = ep->desc.bInterval;
+		/* Fall through - SS and HS isoc/int have same decoding */
+	case USB_SPEED_SUPER:
+		if (usb_endpoint_xfer_int(&ep->desc) ||
+				usb_endpoint_xfer_isoc(&ep->desc)) {
+			if (ep->desc.bInterval == 0)
+				interval = 0;
+			else
+				interval = ep->desc.bInterval - 1;
+			if (interval > 15)
+				interval = 15;
+			if (interval != ep->desc.bInterval + 1)
+				dev_warn(&udev->dev, "ep %#x - rounding interval to %d microframes\n",
+						ep->desc.bEndpointAddress, 1 << interval);
+		}
+		break;
+	/* Convert bInterval (in 1-255 frames) to microframes and round down to
+	 * nearest power of 2.
+	 */
+	case USB_SPEED_FULL:
+	case USB_SPEED_LOW:
+		if (usb_endpoint_xfer_int(&ep->desc) ||
+				usb_endpoint_xfer_isoc(&ep->desc)) {
+			interval = fls(8*ep->desc.bInterval) - 1;
+			if (interval > 10)
+				interval = 10;
+			if (interval < 3)
+				interval = 3;
+			if ((1 << interval) != 8*ep->desc.bInterval)
+				dev_warn(&udev->dev, "ep %#x - rounding interval to %d microframes\n",
+						ep->desc.bEndpointAddress, 1 << interval);
+		}
+		break;
+	default:
+		BUG();
+	}
+	return EP_INTERVAL(interval);
+}
+
+static inline u32 xhci_get_endpoint_type(struct usb_device *udev,
+		struct usb_host_endpoint *ep)
+{
+	int in;
+	u32 type;
+
+	in = usb_endpoint_dir_in(&ep->desc);
+	if (usb_endpoint_xfer_control(&ep->desc)) {
+		type = EP_TYPE(CTRL_EP);
+	} else if (usb_endpoint_xfer_bulk(&ep->desc)) {
+		if (in)
+			type = EP_TYPE(BULK_IN_EP);
+		else
+			type = EP_TYPE(BULK_OUT_EP);
+	} else if (usb_endpoint_xfer_isoc(&ep->desc)) {
+		if (in)
+			type = EP_TYPE(ISOC_IN_EP);
+		else
+			type = EP_TYPE(ISOC_OUT_EP);
+	} else if (usb_endpoint_xfer_int(&ep->desc)) {
+		if (in)
+			type = EP_TYPE(INT_IN_EP);
+		else
+			type = EP_TYPE(INT_OUT_EP);
+	} else {
+		BUG();
+	}
+	return type;
+}
+
+int xhci_endpoint_init(struct xhci_hcd *xhci,
+		struct xhci_virt_device *virt_dev,
+		struct usb_device *udev,
+		struct usb_host_endpoint *ep,
+		gfp_t mem_flags)
+{
+	unsigned int ep_index;
+	struct xhci_ep_ctx *ep_ctx;
+	struct xhci_ring *ep_ring;
+	unsigned int max_packet;
+	unsigned int max_burst;
+
+	ep_index = xhci_get_endpoint_index(&ep->desc);
+	ep_ctx = &virt_dev->in_ctx->ep[ep_index];
+
+	/* Set up the endpoint ring */
+	virt_dev->new_ep_rings[ep_index] = xhci_ring_alloc(xhci, 1, true, mem_flags);
+	if (!virt_dev->new_ep_rings[ep_index])
+		return -ENOMEM;
+	ep_ring = virt_dev->new_ep_rings[ep_index];
+	ep_ctx->deq[0] = ep_ring->first_seg->dma | ep_ring->cycle_state;
+	ep_ctx->deq[1] = 0;
+
+	ep_ctx->ep_info = xhci_get_endpoint_interval(udev, ep);
+
+	/* FIXME dig Mult and streams info out of ep companion desc */
+
+	/* Allow 3 retries for everything but isoc */
+	if (!usb_endpoint_xfer_isoc(&ep->desc))
+		ep_ctx->ep_info2 = ERROR_COUNT(3);
+	else
+		ep_ctx->ep_info2 = ERROR_COUNT(0);
+
+	ep_ctx->ep_info2 |= xhci_get_endpoint_type(udev, ep);
+
+	/* Set the max packet size and max burst */
+	switch (udev->speed) {
+	case USB_SPEED_SUPER:
+		max_packet = ep->desc.wMaxPacketSize;
+		ep_ctx->ep_info2 |= MAX_PACKET(max_packet);
+		/* dig out max burst from ep companion desc */
+		max_packet = ep->ss_ep_comp->desc.bMaxBurst;
+		ep_ctx->ep_info2 |= MAX_BURST(max_packet);
+		break;
+	case USB_SPEED_HIGH:
+		/* bits 11:12 specify the number of additional transaction
+		 * opportunities per microframe (USB 2.0, section 9.6.6)
+		 */
+		if (usb_endpoint_xfer_isoc(&ep->desc) ||
+				usb_endpoint_xfer_int(&ep->desc)) {
+			max_burst = (ep->desc.wMaxPacketSize & 0x1800) >> 11;
+			ep_ctx->ep_info2 |= MAX_BURST(max_burst);
+		}
+		/* Fall through */
+	case USB_SPEED_FULL:
+	case USB_SPEED_LOW:
+		max_packet = ep->desc.wMaxPacketSize & 0x3ff;
+		ep_ctx->ep_info2 |= MAX_PACKET(max_packet);
+		break;
+	default:
+		BUG();
+	}
+	/* FIXME Debug endpoint context */
+	return 0;
+}
+
+void xhci_endpoint_zero(struct xhci_hcd *xhci,
+		struct xhci_virt_device *virt_dev,
+		struct usb_host_endpoint *ep)
+{
+	unsigned int ep_index;
+	struct xhci_ep_ctx *ep_ctx;
+
+	ep_index = xhci_get_endpoint_index(&ep->desc);
+	ep_ctx = &virt_dev->in_ctx->ep[ep_index];
+
+	ep_ctx->ep_info = 0;
+	ep_ctx->ep_info2 = 0;
+	ep_ctx->deq[0] = 0;
+	ep_ctx->deq[1] = 0;
+	ep_ctx->tx_info = 0;
+	/* Don't free the endpoint ring until the set interface or configuration
+	 * request succeeds.
+	 */
+}
+
+void xhci_mem_cleanup(struct xhci_hcd *xhci)
+{
+	struct pci_dev	*pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
+	int size;
+	int i;
+
+	/* Free the Event Ring Segment Table and the actual Event Ring */
+	xhci_writel(xhci, 0, &xhci->ir_set->erst_size);
+	xhci_writel(xhci, 0, &xhci->ir_set->erst_base[0]);
+	xhci_writel(xhci, 0, &xhci->ir_set->erst_base[1]);
+	xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[0]);
+	xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[1]);
+	size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
+	if (xhci->erst.entries)
+		pci_free_consistent(pdev, size,
+				xhci->erst.entries, xhci->erst.erst_dma_addr);
+	xhci->erst.entries = NULL;
+	xhci_dbg(xhci, "Freed ERST\n");
+	if (xhci->event_ring)
+		xhci_ring_free(xhci, xhci->event_ring);
+	xhci->event_ring = NULL;
+	xhci_dbg(xhci, "Freed event ring\n");
+
+	xhci_writel(xhci, 0, &xhci->op_regs->cmd_ring[0]);
+	xhci_writel(xhci, 0, &xhci->op_regs->cmd_ring[1]);
+	if (xhci->cmd_ring)
+		xhci_ring_free(xhci, xhci->cmd_ring);
+	xhci->cmd_ring = NULL;
+	xhci_dbg(xhci, "Freed command ring\n");
+
+	for (i = 1; i < MAX_HC_SLOTS; ++i)
+		xhci_free_virt_device(xhci, i);
+
+	if (xhci->segment_pool)
+		dma_pool_destroy(xhci->segment_pool);
+	xhci->segment_pool = NULL;
+	xhci_dbg(xhci, "Freed segment pool\n");
+
+	if (xhci->device_pool)
+		dma_pool_destroy(xhci->device_pool);
+	xhci->device_pool = NULL;
+	xhci_dbg(xhci, "Freed device context pool\n");
+
+	xhci_writel(xhci, 0, &xhci->op_regs->dcbaa_ptr[0]);
+	xhci_writel(xhci, 0, &xhci->op_regs->dcbaa_ptr[1]);
+	if (xhci->dcbaa)
+		pci_free_consistent(pdev, sizeof(*xhci->dcbaa),
+				xhci->dcbaa, xhci->dcbaa->dma);
+	xhci->dcbaa = NULL;
+
+	xhci->page_size = 0;
+	xhci->page_shift = 0;
+}
+
+int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
+{
+	dma_addr_t	dma;
+	struct device	*dev = xhci_to_hcd(xhci)->self.controller;
+	unsigned int	val, val2;
+	struct xhci_segment	*seg;
+	u32 page_size;
+	int i;
+
+	page_size = xhci_readl(xhci, &xhci->op_regs->page_size);
+	xhci_dbg(xhci, "Supported page size register = 0x%x\n", page_size);
+	for (i = 0; i < 16; i++) {
+		if ((0x1 & page_size) != 0)
+			break;
+		page_size = page_size >> 1;
+	}
+	if (i < 16)
+		xhci_dbg(xhci, "Supported page size of %iK\n", (1 << (i+12)) / 1024);
+	else
+		xhci_warn(xhci, "WARN: no supported page size\n");
+	/* Use 4K pages, since that's common and the minimum the HC supports */
+	xhci->page_shift = 12;
+	xhci->page_size = 1 << xhci->page_shift;
+	xhci_dbg(xhci, "HCD page size set to %iK\n", xhci->page_size / 1024);
+
+	/*
+	 * Program the Number of Device Slots Enabled field in the CONFIG
+	 * register with the max value of slots the HC can handle.
+	 */
+	val = HCS_MAX_SLOTS(xhci_readl(xhci, &xhci->cap_regs->hcs_params1));
+	xhci_dbg(xhci, "// xHC can handle at most %d device slots.\n",
+			(unsigned int) val);
+	val2 = xhci_readl(xhci, &xhci->op_regs->config_reg);
+	val |= (val2 & ~HCS_SLOTS_MASK);
+	xhci_dbg(xhci, "// Setting Max device slots reg = 0x%x.\n",
+			(unsigned int) val);
+	xhci_writel(xhci, val, &xhci->op_regs->config_reg);
+
+	/*
+	 * Section 5.4.8 - doorbell array must be
+	 * "physically contiguous and 64-byte (cache line) aligned".
+	 */
+	xhci->dcbaa = pci_alloc_consistent(to_pci_dev(dev),
+			sizeof(*xhci->dcbaa), &dma);
+	if (!xhci->dcbaa)
+		goto fail;
+	memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa));
+	xhci->dcbaa->dma = dma;
+	xhci_dbg(xhci, "// Device context base array address = 0x%llx (DMA), %p (virt)\n",
+			(unsigned long long)xhci->dcbaa->dma, xhci->dcbaa);
+	xhci_writel(xhci, dma, &xhci->op_regs->dcbaa_ptr[0]);
+	xhci_writel(xhci, (u32) 0, &xhci->op_regs->dcbaa_ptr[1]);
+
+	/*
+	 * Initialize the ring segment pool.  The ring must be a contiguous
+	 * structure comprised of TRBs.  The TRBs must be 16 byte aligned,
+	 * however, the command ring segment needs 64-byte aligned segments,
+	 * so we pick the greater alignment need.
+	 */
+	xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
+			SEGMENT_SIZE, 64, xhci->page_size);
+	/* See Table 46 and Note on Figure 55 */
+	/* FIXME support 64-byte contexts */
+	xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev,
+			sizeof(struct xhci_device_control),
+			64, xhci->page_size);
+	if (!xhci->segment_pool || !xhci->device_pool)
+		goto fail;
+
+	/* Set up the command ring to have one segments for now. */
+	xhci->cmd_ring = xhci_ring_alloc(xhci, 1, true, flags);
+	if (!xhci->cmd_ring)
+		goto fail;
+	xhci_dbg(xhci, "Allocated command ring at %p\n", xhci->cmd_ring);
+	xhci_dbg(xhci, "First segment DMA is 0x%llx\n",
+			(unsigned long long)xhci->cmd_ring->first_seg->dma);
+
+	/* Set the address in the Command Ring Control register */
+	val = xhci_readl(xhci, &xhci->op_regs->cmd_ring[0]);
+	val = (val & ~CMD_RING_ADDR_MASK) |
+		(xhci->cmd_ring->first_seg->dma & CMD_RING_ADDR_MASK) |
+		xhci->cmd_ring->cycle_state;
+	xhci_dbg(xhci, "// Setting command ring address low bits to 0x%x\n", val);
+	xhci_writel(xhci, val, &xhci->op_regs->cmd_ring[0]);
+	xhci_dbg(xhci, "// Setting command ring address high bits to 0x0\n");
+	xhci_writel(xhci, (u32) 0, &xhci->op_regs->cmd_ring[1]);
+	xhci_dbg_cmd_ptrs(xhci);
+
+	val = xhci_readl(xhci, &xhci->cap_regs->db_off);
+	val &= DBOFF_MASK;
+	xhci_dbg(xhci, "// Doorbell array is located at offset 0x%x"
+			" from cap regs base addr\n", val);
+	xhci->dba = (void *) xhci->cap_regs + val;
+	xhci_dbg_regs(xhci);
+	xhci_print_run_regs(xhci);
+	/* Set ir_set to interrupt register set 0 */
+	xhci->ir_set = (void *) xhci->run_regs->ir_set;
+
+	/*
+	 * Event ring setup: Allocate a normal ring, but also setup
+	 * the event ring segment table (ERST).  Section 4.9.3.
+	 */
+	xhci_dbg(xhci, "// Allocating event ring\n");
+	xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, false, flags);
+	if (!xhci->event_ring)
+		goto fail;
+
+	xhci->erst.entries = pci_alloc_consistent(to_pci_dev(dev),
+			sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS, &dma);
+	if (!xhci->erst.entries)
+		goto fail;
+	xhci_dbg(xhci, "// Allocated event ring segment table at 0x%llx\n",
+			(unsigned long long)dma);
+
+	memset(xhci->erst.entries, 0, sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS);
+	xhci->erst.num_entries = ERST_NUM_SEGS;
+	xhci->erst.erst_dma_addr = dma;
+	xhci_dbg(xhci, "Set ERST to 0; private num segs = %i, virt addr = %p, dma addr = 0x%llx\n",
+			xhci->erst.num_entries,
+			xhci->erst.entries,
+			(unsigned long long)xhci->erst.erst_dma_addr);
+
+	/* set ring base address and size for each segment table entry */
+	for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) {
+		struct xhci_erst_entry *entry = &xhci->erst.entries[val];
+		entry->seg_addr[0] = seg->dma;
+		entry->seg_addr[1] = 0;
+		entry->seg_size = TRBS_PER_SEGMENT;
+		entry->rsvd = 0;
+		seg = seg->next;
+	}
+
+	/* set ERST count with the number of entries in the segment table */
+	val = xhci_readl(xhci, &xhci->ir_set->erst_size);
+	val &= ERST_SIZE_MASK;
+	val |= ERST_NUM_SEGS;
+	xhci_dbg(xhci, "// Write ERST size = %i to ir_set 0 (some bits preserved)\n",
+			val);
+	xhci_writel(xhci, val, &xhci->ir_set->erst_size);
+
+	xhci_dbg(xhci, "// Set ERST entries to point to event ring.\n");
+	/* set the segment table base address */
+	xhci_dbg(xhci, "// Set ERST base address for ir_set 0 = 0x%llx\n",
+			(unsigned long long)xhci->erst.erst_dma_addr);
+	val = xhci_readl(xhci, &xhci->ir_set->erst_base[0]);
+	val &= ERST_PTR_MASK;
+	val |= (xhci->erst.erst_dma_addr & ~ERST_PTR_MASK);
+	xhci_writel(xhci, val, &xhci->ir_set->erst_base[0]);
+	xhci_writel(xhci, 0, &xhci->ir_set->erst_base[1]);
+
+	/* Set the event ring dequeue address */
+	xhci_set_hc_event_deq(xhci);
+	xhci_dbg(xhci, "Wrote ERST address to ir_set 0.\n");
+	xhci_print_ir_set(xhci, xhci->ir_set, 0);
+
+	/*
+	 * XXX: Might need to set the Interrupter Moderation Register to
+	 * something other than the default (~1ms minimum between interrupts).
+	 * See section 5.5.1.2.
+	 */
+	init_completion(&xhci->addr_dev);
+	for (i = 0; i < MAX_HC_SLOTS; ++i)
+		xhci->devs[i] = 0;
+
+	return 0;
+fail:
+	xhci_warn(xhci, "Couldn't initialize memory\n");
+	xhci_mem_cleanup(xhci);
+	return -ENOMEM;
+}
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
new file mode 100644
index 0000000..1462709
--- /dev/null
+++ b/drivers/usb/host/xhci-pci.c
@@ -0,0 +1,166 @@
+/*
+ * xHCI host controller driver PCI Bus Glue.
+ *
+ * Copyright (C) 2008 Intel Corp.
+ *
+ * Author: Sarah Sharp
+ * Some code borrowed from the Linux EHCI driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/pci.h>
+
+#include "xhci.h"
+
+static const char hcd_name[] = "xhci_hcd";
+
+/* called after powerup, by probe or system-pm "wakeup" */
+static int xhci_pci_reinit(struct xhci_hcd *xhci, struct pci_dev *pdev)
+{
+	/*
+	 * TODO: Implement finding debug ports later.
+	 * TODO: see if there are any quirks that need to be added to handle
+	 * new extended capabilities.
+	 */
+
+	/* PCI Memory-Write-Invalidate cycle support is optional (uncommon) */
+	if (!pci_set_mwi(pdev))
+		xhci_dbg(xhci, "MWI active\n");
+
+	xhci_dbg(xhci, "Finished xhci_pci_reinit\n");
+	return 0;
+}
+
+/* called during probe() after chip reset completes */
+static int xhci_pci_setup(struct usb_hcd *hcd)
+{
+	struct xhci_hcd		*xhci = hcd_to_xhci(hcd);
+	struct pci_dev		*pdev = to_pci_dev(hcd->self.controller);
+	int			retval;
+
+	xhci->cap_regs = hcd->regs;
+	xhci->op_regs = hcd->regs +
+		HC_LENGTH(xhci_readl(xhci, &xhci->cap_regs->hc_capbase));
+	xhci->run_regs = hcd->regs +
+		(xhci_readl(xhci, &xhci->cap_regs->run_regs_off) & RTSOFF_MASK);
+	/* Cache read-only capability registers */
+	xhci->hcs_params1 = xhci_readl(xhci, &xhci->cap_regs->hcs_params1);
+	xhci->hcs_params2 = xhci_readl(xhci, &xhci->cap_regs->hcs_params2);
+	xhci->hcs_params3 = xhci_readl(xhci, &xhci->cap_regs->hcs_params3);
+	xhci->hcc_params = xhci_readl(xhci, &xhci->cap_regs->hcc_params);
+	xhci_print_registers(xhci);
+
+	/* Make sure the HC is halted. */
+	retval = xhci_halt(xhci);
+	if (retval)
+		return retval;
+
+	xhci_dbg(xhci, "Resetting HCD\n");
+	/* Reset the internal HC memory state and registers. */
+	retval = xhci_reset(xhci);
+	if (retval)
+		return retval;
+	xhci_dbg(xhci, "Reset complete\n");
+
+	xhci_dbg(xhci, "Calling HCD init\n");
+	/* Initialize HCD and host controller data structures. */
+	retval = xhci_init(hcd);
+	if (retval)
+		return retval;
+	xhci_dbg(xhci, "Called HCD init\n");
+
+	pci_read_config_byte(pdev, XHCI_SBRN_OFFSET, &xhci->sbrn);
+	xhci_dbg(xhci, "Got SBRN %u\n", (unsigned int) xhci->sbrn);
+
+	/* Find any debug ports */
+	return xhci_pci_reinit(xhci, pdev);
+}
+
+static const struct hc_driver xhci_pci_hc_driver = {
+	.description =		hcd_name,
+	.product_desc =		"xHCI Host Controller",
+	.hcd_priv_size =	sizeof(struct xhci_hcd),
+
+	/*
+	 * generic hardware linkage
+	 */
+	.irq =			xhci_irq,
+	.flags =		HCD_MEMORY | HCD_USB3,
+
+	/*
+	 * basic lifecycle operations
+	 */
+	.reset =		xhci_pci_setup,
+	.start =		xhci_run,
+	/* suspend and resume implemented later */
+	.stop =			xhci_stop,
+	.shutdown =		xhci_shutdown,
+
+	/*
+	 * managing i/o requests and associated device resources
+	 */
+	.urb_enqueue =		xhci_urb_enqueue,
+	.urb_dequeue =		xhci_urb_dequeue,
+	.alloc_dev =		xhci_alloc_dev,
+	.free_dev =		xhci_free_dev,
+	.add_endpoint =		xhci_add_endpoint,
+	.drop_endpoint =	xhci_drop_endpoint,
+	.check_bandwidth =	xhci_check_bandwidth,
+	.reset_bandwidth =	xhci_reset_bandwidth,
+	.address_device =	xhci_address_device,
+
+	/*
+	 * scheduling support
+	 */
+	.get_frame_number =	xhci_get_frame,
+
+	/* Root hub support */
+	.hub_control =		xhci_hub_control,
+	.hub_status_data =	xhci_hub_status_data,
+};
+
+/*-------------------------------------------------------------------------*/
+
+/* PCI driver selection metadata; PCI hotplugging uses this */
+static const struct pci_device_id pci_ids[] = { {
+	/* handle any USB 3.0 xHCI controller */
+	PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_XHCI, ~0),
+	.driver_data =	(unsigned long) &xhci_pci_hc_driver,
+	},
+	{ /* end: all zeroes */ }
+};
+MODULE_DEVICE_TABLE(pci, pci_ids);
+
+/* pci driver glue; this is a "new style" PCI driver module */
+static struct pci_driver xhci_pci_driver = {
+	.name =		(char *) hcd_name,
+	.id_table =	pci_ids,
+
+	.probe =	usb_hcd_pci_probe,
+	.remove =	usb_hcd_pci_remove,
+	/* suspend and resume implemented later */
+
+	.shutdown = 	usb_hcd_pci_shutdown,
+};
+
+int xhci_register_pci()
+{
+	return pci_register_driver(&xhci_pci_driver);
+}
+
+void xhci_unregister_pci()
+{
+	pci_unregister_driver(&xhci_pci_driver);
+}
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
new file mode 100644
index 0000000..02d8198
--- /dev/null
+++ b/drivers/usb/host/xhci-ring.c
@@ -0,0 +1,1648 @@
+/*
+ * xHCI host controller driver
+ *
+ * Copyright (C) 2008 Intel Corp.
+ *
+ * Author: Sarah Sharp
+ * Some code borrowed from the Linux EHCI driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/*
+ * Ring initialization rules:
+ * 1. Each segment is initialized to zero, except for link TRBs.
+ * 2. Ring cycle state = 0.  This represents Producer Cycle State (PCS) or
+ *    Consumer Cycle State (CCS), depending on ring function.
+ * 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment.
+ *
+ * Ring behavior rules:
+ * 1. A ring is empty if enqueue == dequeue.  This means there will always be at
+ *    least one free TRB in the ring.  This is useful if you want to turn that
+ *    into a link TRB and expand the ring.
+ * 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a
+ *    link TRB, then load the pointer with the address in the link TRB.  If the
+ *    link TRB had its toggle bit set, you may need to update the ring cycle
+ *    state (see cycle bit rules).  You may have to do this multiple times
+ *    until you reach a non-link TRB.
+ * 3. A ring is full if enqueue++ (for the definition of increment above)
+ *    equals the dequeue pointer.
+ *
+ * Cycle bit rules:
+ * 1. When a consumer increments a dequeue pointer and encounters a toggle bit
+ *    in a link TRB, it must toggle the ring cycle state.
+ * 2. When a producer increments an enqueue pointer and encounters a toggle bit
+ *    in a link TRB, it must toggle the ring cycle state.
+ *
+ * Producer rules:
+ * 1. Check if ring is full before you enqueue.
+ * 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing.
+ *    Update enqueue pointer between each write (which may update the ring
+ *    cycle state).
+ * 3. Notify consumer.  If SW is producer, it rings the doorbell for command
+ *    and endpoint rings.  If HC is the producer for the event ring,
+ *    and it generates an interrupt according to interrupt modulation rules.
+ *
+ * Consumer rules:
+ * 1. Check if TRB belongs to you.  If the cycle bit == your ring cycle state,
+ *    the TRB is owned by the consumer.
+ * 2. Update dequeue pointer (which may update the ring cycle state) and
+ *    continue processing TRBs until you reach a TRB which is not owned by you.
+ * 3. Notify the producer.  SW is the consumer for the event ring, and it
+ *   updates event ring dequeue pointer.  HC is the consumer for the command and
+ *   endpoint rings; it generates events on the event ring for these.
+ */
+
+#include <linux/scatterlist.h>
+#include "xhci.h"
+
+/*
+ * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA
+ * address of the TRB.
+ */
+dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
+		union xhci_trb *trb)
+{
+	unsigned long segment_offset;
+
+	if (!seg || !trb || trb < seg->trbs)
+		return 0;
+	/* offset in TRBs */
+	segment_offset = trb - seg->trbs;
+	if (segment_offset > TRBS_PER_SEGMENT)
+		return 0;
+	return seg->dma + (segment_offset * sizeof(*trb));
+}
+
+/* Does this link TRB point to the first segment in a ring,
+ * or was the previous TRB the last TRB on the last segment in the ERST?
+ */
+static inline bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring,
+		struct xhci_segment *seg, union xhci_trb *trb)
+{
+	if (ring == xhci->event_ring)
+		return (trb == &seg->trbs[TRBS_PER_SEGMENT]) &&
+			(seg->next == xhci->event_ring->first_seg);
+	else
+		return trb->link.control & LINK_TOGGLE;
+}
+
+/* Is this TRB a link TRB or was the last TRB the last TRB in this event ring
+ * segment?  I.e. would the updated event TRB pointer step off the end of the
+ * event seg?
+ */
+static inline int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
+		struct xhci_segment *seg, union xhci_trb *trb)
+{
+	if (ring == xhci->event_ring)
+		return trb == &seg->trbs[TRBS_PER_SEGMENT];
+	else
+		return (trb->link.control & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK);
+}
+
+/* Updates trb to point to the next TRB in the ring, and updates seg if the next
+ * TRB is in a new segment.  This does not skip over link TRBs, and it does not
+ * effect the ring dequeue or enqueue pointers.
+ */
+static void next_trb(struct xhci_hcd *xhci,
+		struct xhci_ring *ring,
+		struct xhci_segment **seg,
+		union xhci_trb **trb)
+{
+	if (last_trb(xhci, ring, *seg, *trb)) {
+		*seg = (*seg)->next;
+		*trb = ((*seg)->trbs);
+	} else {
+		*trb = (*trb)++;
+	}
+}
+
+/*
+ * See Cycle bit rules. SW is the consumer for the event ring only.
+ * Don't make a ring full of link TRBs.  That would be dumb and this would loop.
+ */
+static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer)
+{
+	union xhci_trb *next = ++(ring->dequeue);
+
+	ring->deq_updates++;
+	/* Update the dequeue pointer further if that was a link TRB or we're at
+	 * the end of an event ring segment (which doesn't have link TRBS)
+	 */
+	while (last_trb(xhci, ring, ring->deq_seg, next)) {
+		if (consumer && last_trb_on_last_seg(xhci, ring, ring->deq_seg, next)) {
+			ring->cycle_state = (ring->cycle_state ? 0 : 1);
+			if (!in_interrupt())
+				xhci_dbg(xhci, "Toggle cycle state for ring %p = %i\n",
+						ring,
+						(unsigned int) ring->cycle_state);
+		}
+		ring->deq_seg = ring->deq_seg->next;
+		ring->dequeue = ring->deq_seg->trbs;
+		next = ring->dequeue;
+	}
+}
+
+/*
+ * See Cycle bit rules. SW is the consumer for the event ring only.
+ * Don't make a ring full of link TRBs.  That would be dumb and this would loop.
+ *
+ * If we've just enqueued a TRB that is in the middle of a TD (meaning the
+ * chain bit is set), then set the chain bit in all the following link TRBs.
+ * If we've enqueued the last TRB in a TD, make sure the following link TRBs
+ * have their chain bit cleared (so that each Link TRB is a separate TD).
+ *
+ * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit
+ * set, but other sections talk about dealing with the chain bit set.
+ * Assume section 6.4.4.1 is wrong, and the chain bit can be set in a Link TRB.
+ */
+static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer)
+{
+	u32 chain;
+	union xhci_trb *next;
+
+	chain = ring->enqueue->generic.field[3] & TRB_CHAIN;
+	next = ++(ring->enqueue);
+
+	ring->enq_updates++;
+	/* Update the dequeue pointer further if that was a link TRB or we're at
+	 * the end of an event ring segment (which doesn't have link TRBS)
+	 */
+	while (last_trb(xhci, ring, ring->enq_seg, next)) {
+		if (!consumer) {
+			if (ring != xhci->event_ring) {
+				next->link.control &= ~TRB_CHAIN;
+				next->link.control |= chain;
+				/* Give this link TRB to the hardware */
+				wmb();
+				if (next->link.control & TRB_CYCLE)
+					next->link.control &= (u32) ~TRB_CYCLE;
+				else
+					next->link.control |= (u32) TRB_CYCLE;
+			}
+			/* Toggle the cycle bit after the last ring segment. */
+			if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
+				ring->cycle_state = (ring->cycle_state ? 0 : 1);
+				if (!in_interrupt())
+					xhci_dbg(xhci, "Toggle cycle state for ring %p = %i\n",
+							ring,
+							(unsigned int) ring->cycle_state);
+			}
+		}
+		ring->enq_seg = ring->enq_seg->next;
+		ring->enqueue = ring->enq_seg->trbs;
+		next = ring->enqueue;
+	}
+}
+
+/*
+ * Check to see if there's room to enqueue num_trbs on the ring.  See rules
+ * above.
+ * FIXME: this would be simpler and faster if we just kept track of the number
+ * of free TRBs in a ring.
+ */
+static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
+		unsigned int num_trbs)
+{
+	int i;
+	union xhci_trb *enq = ring->enqueue;
+	struct xhci_segment *enq_seg = ring->enq_seg;
+
+	/* Check if ring is empty */
+	if (enq == ring->dequeue)
+		return 1;
+	/* Make sure there's an extra empty TRB available */
+	for (i = 0; i <= num_trbs; ++i) {
+		if (enq == ring->dequeue)
+			return 0;
+		enq++;
+		while (last_trb(xhci, ring, enq_seg, enq)) {
+			enq_seg = enq_seg->next;
+			enq = enq_seg->trbs;
+		}
+	}
+	return 1;
+}
+
+void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
+{
+	u32 temp;
+	dma_addr_t deq;
+
+	deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
+			xhci->event_ring->dequeue);
+	if (deq == 0 && !in_interrupt())
+		xhci_warn(xhci, "WARN something wrong with SW event ring "
+				"dequeue ptr.\n");
+	/* Update HC event ring dequeue pointer */
+	temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]);
+	temp &= ERST_PTR_MASK;
+	if (!in_interrupt())
+		xhci_dbg(xhci, "// Write event ring dequeue pointer\n");
+	xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[1]);
+	xhci_writel(xhci, (deq & ~ERST_PTR_MASK) | temp,
+			&xhci->ir_set->erst_dequeue[0]);
+}
+
+/* Ring the host controller doorbell after placing a command on the ring */
+void xhci_ring_cmd_db(struct xhci_hcd *xhci)
+{
+	u32 temp;
+
+	xhci_dbg(xhci, "// Ding dong!\n");
+	temp = xhci_readl(xhci, &xhci->dba->doorbell[0]) & DB_MASK;
+	xhci_writel(xhci, temp | DB_TARGET_HOST, &xhci->dba->doorbell[0]);
+	/* Flush PCI posted writes */
+	xhci_readl(xhci, &xhci->dba->doorbell[0]);
+}
+
+static void ring_ep_doorbell(struct xhci_hcd *xhci,
+		unsigned int slot_id,
+		unsigned int ep_index)
+{
+	struct xhci_ring *ep_ring;
+	u32 field;
+	__u32 __iomem *db_addr = &xhci->dba->doorbell[slot_id];
+
+	ep_ring = xhci->devs[slot_id]->ep_rings[ep_index];
+	/* Don't ring the doorbell for this endpoint if there are pending
+	 * cancellations because the we don't want to interrupt processing.
+	 */
+	if (!ep_ring->cancels_pending && !(ep_ring->state & SET_DEQ_PENDING)) {
+		field = xhci_readl(xhci, db_addr) & DB_MASK;
+		xhci_writel(xhci, field | EPI_TO_DB(ep_index), db_addr);
+		/* Flush PCI posted writes - FIXME Matthew Wilcox says this
+		 * isn't time-critical and we shouldn't make the CPU wait for
+		 * the flush.
+		 */
+		xhci_readl(xhci, db_addr);
+	}
+}
+
+/*
+ * Find the segment that trb is in.  Start searching in start_seg.
+ * If we must move past a segment that has a link TRB with a toggle cycle state
+ * bit set, then we will toggle the value pointed at by cycle_state.
+ */
+static struct xhci_segment *find_trb_seg(
+		struct xhci_segment *start_seg,
+		union xhci_trb	*trb, int *cycle_state)
+{
+	struct xhci_segment *cur_seg = start_seg;
+	struct xhci_generic_trb *generic_trb;
+
+	while (cur_seg->trbs > trb ||
+			&cur_seg->trbs[TRBS_PER_SEGMENT - 1] < trb) {
+		generic_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1].generic;
+		if (TRB_TYPE(generic_trb->field[3]) == TRB_LINK &&
+				(generic_trb->field[3] & LINK_TOGGLE))
+			*cycle_state = ~(*cycle_state) & 0x1;
+		cur_seg = cur_seg->next;
+		if (cur_seg == start_seg)
+			/* Looped over the entire list.  Oops! */
+			return 0;
+	}
+	return cur_seg;
+}
+
+struct dequeue_state {
+	struct xhci_segment *new_deq_seg;
+	union xhci_trb *new_deq_ptr;
+	int new_cycle_state;
+};
+
+/*
+ * Move the xHC's endpoint ring dequeue pointer past cur_td.
+ * Record the new state of the xHC's endpoint ring dequeue segment,
+ * dequeue pointer, and new consumer cycle state in state.
+ * Update our internal representation of the ring's dequeue pointer.
+ *
+ * We do this in three jumps:
+ *  - First we update our new ring state to be the same as when the xHC stopped.
+ *  - Then we traverse the ring to find the segment that contains
+ *    the last TRB in the TD.  We toggle the xHC's new cycle state when we pass
+ *    any link TRBs with the toggle cycle bit set.
+ *  - Finally we move the dequeue state one TRB further, toggling the cycle bit
+ *    if we've moved it past a link TRB with the toggle cycle bit set.
+ */
+static void find_new_dequeue_state(struct xhci_hcd *xhci,
+		unsigned int slot_id, unsigned int ep_index,
+		struct xhci_td *cur_td, struct dequeue_state *state)
+{
+	struct xhci_virt_device *dev = xhci->devs[slot_id];
+	struct xhci_ring *ep_ring = dev->ep_rings[ep_index];
+	struct xhci_generic_trb *trb;
+
+	state->new_cycle_state = 0;
+	state->new_deq_seg = find_trb_seg(cur_td->start_seg,
+			ep_ring->stopped_trb,
+			&state->new_cycle_state);
+	if (!state->new_deq_seg)
+		BUG();
+	/* Dig out the cycle state saved by the xHC during the stop ep cmd */
+	state->new_cycle_state = 0x1 & dev->out_ctx->ep[ep_index].deq[0];
+
+	state->new_deq_ptr = cur_td->last_trb;
+	state->new_deq_seg = find_trb_seg(state->new_deq_seg,
+			state->new_deq_ptr,
+			&state->new_cycle_state);
+	if (!state->new_deq_seg)
+		BUG();
+
+	trb = &state->new_deq_ptr->generic;
+	if (TRB_TYPE(trb->field[3]) == TRB_LINK &&
+				(trb->field[3] & LINK_TOGGLE))
+		state->new_cycle_state = ~(state->new_cycle_state) & 0x1;
+	next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr);
+
+	/* Don't update the ring cycle state for the producer (us). */
+	ep_ring->dequeue = state->new_deq_ptr;
+	ep_ring->deq_seg = state->new_deq_seg;
+}
+
+static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
+		struct xhci_td *cur_td)
+{
+	struct xhci_segment *cur_seg;
+	union xhci_trb *cur_trb;
+
+	for (cur_seg = cur_td->start_seg, cur_trb = cur_td->first_trb;
+			true;
+			next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
+		if ((cur_trb->generic.field[3] & TRB_TYPE_BITMASK) ==
+				TRB_TYPE(TRB_LINK)) {
+			/* Unchain any chained Link TRBs, but
+			 * leave the pointers intact.
+			 */
+			cur_trb->generic.field[3] &= ~TRB_CHAIN;
+			xhci_dbg(xhci, "Cancel (unchain) link TRB\n");
+			xhci_dbg(xhci, "Address = %p (0x%llx dma); "
+					"in seg %p (0x%llx dma)\n",
+					cur_trb,
+					(unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb),
+					cur_seg,
+					(unsigned long long)cur_seg->dma);
+		} else {
+			cur_trb->generic.field[0] = 0;
+			cur_trb->generic.field[1] = 0;
+			cur_trb->generic.field[2] = 0;
+			/* Preserve only the cycle bit of this TRB */
+			cur_trb->generic.field[3] &= TRB_CYCLE;
+			cur_trb->generic.field[3] |= TRB_TYPE(TRB_TR_NOOP);
+			xhci_dbg(xhci, "Cancel TRB %p (0x%llx dma) "
+					"in seg %p (0x%llx dma)\n",
+					cur_trb,
+					(unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb),
+					cur_seg,
+					(unsigned long long)cur_seg->dma);
+		}
+		if (cur_trb == cur_td->last_trb)
+			break;
+	}
+}
+
+static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
+		unsigned int ep_index, struct xhci_segment *deq_seg,
+		union xhci_trb *deq_ptr, u32 cycle_state);
+
+/*
+ * When we get a command completion for a Stop Endpoint Command, we need to
+ * unlink any cancelled TDs from the ring.  There are two ways to do that:
+ *
+ *  1. If the HW was in the middle of processing the TD that needs to be
+ *     cancelled, then we must move the ring's dequeue pointer past the last TRB
+ *     in the TD with a Set Dequeue Pointer Command.
+ *  2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain
+ *     bit cleared) so that the HW will skip over them.
+ */
+static void handle_stopped_endpoint(struct xhci_hcd *xhci,
+		union xhci_trb *trb)
+{
+	unsigned int slot_id;
+	unsigned int ep_index;
+	struct xhci_ring *ep_ring;
+	struct list_head *entry;
+	struct xhci_td *cur_td = 0;
+	struct xhci_td *last_unlinked_td;
+
+	struct dequeue_state deq_state;
+#ifdef CONFIG_USB_HCD_STAT
+	ktime_t stop_time = ktime_get();
+#endif
+
+	memset(&deq_state, 0, sizeof(deq_state));
+	slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
+	ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]);
+	ep_ring = xhci->devs[slot_id]->ep_rings[ep_index];
+
+	if (list_empty(&ep_ring->cancelled_td_list))
+		return;
+
+	/* Fix up the ep ring first, so HW stops executing cancelled TDs.
+	 * We have the xHCI lock, so nothing can modify this list until we drop
+	 * it.  We're also in the event handler, so we can't get re-interrupted
+	 * if another Stop Endpoint command completes
+	 */
+	list_for_each(entry, &ep_ring->cancelled_td_list) {
+		cur_td = list_entry(entry, struct xhci_td, cancelled_td_list);
+		xhci_dbg(xhci, "Cancelling TD starting at %p, 0x%llx (dma).\n",
+				cur_td->first_trb,
+				(unsigned long long)xhci_trb_virt_to_dma(cur_td->start_seg, cur_td->first_trb));
+		/*
+		 * If we stopped on the TD we need to cancel, then we have to
+		 * move the xHC endpoint ring dequeue pointer past this TD.
+		 */
+		if (cur_td == ep_ring->stopped_td)
+			find_new_dequeue_state(xhci, slot_id, ep_index, cur_td,
+					&deq_state);
+		else
+			td_to_noop(xhci, ep_ring, cur_td);
+		/*
+		 * The event handler won't see a completion for this TD anymore,
+		 * so remove it from the endpoint ring's TD list.  Keep it in
+		 * the cancelled TD list for URB completion later.
+		 */
+		list_del(&cur_td->td_list);
+		ep_ring->cancels_pending--;
+	}
+	last_unlinked_td = cur_td;
+
+	/* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
+	if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
+		xhci_dbg(xhci, "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), "
+				"new deq ptr = %p (0x%llx dma), new cycle = %u\n",
+				deq_state.new_deq_seg,
+				(unsigned long long)deq_state.new_deq_seg->dma,
+				deq_state.new_deq_ptr,
+				(unsigned long long)xhci_trb_virt_to_dma(deq_state.new_deq_seg, deq_state.new_deq_ptr),
+				deq_state.new_cycle_state);
+		queue_set_tr_deq(xhci, slot_id, ep_index,
+				deq_state.new_deq_seg,
+				deq_state.new_deq_ptr,
+				(u32) deq_state.new_cycle_state);
+		/* Stop the TD queueing code from ringing the doorbell until
+		 * this command completes.  The HC won't set the dequeue pointer
+		 * if the ring is running, and ringing the doorbell starts the
+		 * ring running.
+		 */
+		ep_ring->state |= SET_DEQ_PENDING;
+		xhci_ring_cmd_db(xhci);
+	} else {
+		/* Otherwise just ring the doorbell to restart the ring */
+		ring_ep_doorbell(xhci, slot_id, ep_index);
+	}
+
+	/*
+	 * Drop the lock and complete the URBs in the cancelled TD list.
+	 * New TDs to be cancelled might be added to the end of the list before
+	 * we can complete all the URBs for the TDs we already unlinked.
+	 * So stop when we've completed the URB for the last TD we unlinked.
+	 */
+	do {
+		cur_td = list_entry(ep_ring->cancelled_td_list.next,
+				struct xhci_td, cancelled_td_list);
+		list_del(&cur_td->cancelled_td_list);
+
+		/* Clean up the cancelled URB */
+#ifdef CONFIG_USB_HCD_STAT
+		hcd_stat_update(xhci->tp_stat, cur_td->urb->actual_length,
+				ktime_sub(stop_time, cur_td->start_time));
+#endif
+		cur_td->urb->hcpriv = NULL;
+		usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), cur_td->urb);
+
+		xhci_dbg(xhci, "Giveback cancelled URB %p\n", cur_td->urb);
+		spin_unlock(&xhci->lock);
+		/* Doesn't matter what we pass for status, since the core will
+		 * just overwrite it (because the URB has been unlinked).
+		 */
+		usb_hcd_giveback_urb(xhci_to_hcd(xhci), cur_td->urb, 0);
+		kfree(cur_td);
+
+		spin_lock(&xhci->lock);
+	} while (cur_td != last_unlinked_td);
+
+	/* Return to the event handler with xhci->lock re-acquired */
+}
+
+/*
+ * When we get a completion for a Set Transfer Ring Dequeue Pointer command,
+ * we need to clear the set deq pending flag in the endpoint ring state, so that
+ * the TD queueing code can ring the doorbell again.  We also need to ring the
+ * endpoint doorbell to restart the ring, but only if there aren't more
+ * cancellations pending.
+ */
+static void handle_set_deq_completion(struct xhci_hcd *xhci,
+		struct xhci_event_cmd *event,
+		union xhci_trb *trb)
+{
+	unsigned int slot_id;
+	unsigned int ep_index;
+	struct xhci_ring *ep_ring;
+	struct xhci_virt_device *dev;
+
+	slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
+	ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]);
+	dev = xhci->devs[slot_id];
+	ep_ring = dev->ep_rings[ep_index];
+
+	if (GET_COMP_CODE(event->status) != COMP_SUCCESS) {
+		unsigned int ep_state;
+		unsigned int slot_state;
+
+		switch (GET_COMP_CODE(event->status)) {
+		case COMP_TRB_ERR:
+			xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because "
+					"of stream ID configuration\n");
+			break;
+		case COMP_CTX_STATE:
+			xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due "
+					"to incorrect slot or ep state.\n");
+			ep_state = dev->out_ctx->ep[ep_index].ep_info;
+			ep_state &= EP_STATE_MASK;
+			slot_state = dev->out_ctx->slot.dev_state;
+			slot_state = GET_SLOT_STATE(slot_state);
+			xhci_dbg(xhci, "Slot state = %u, EP state = %u\n",
+					slot_state, ep_state);
+			break;
+		case COMP_EBADSLT:
+			xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because "
+					"slot %u was not enabled.\n", slot_id);
+			break;
+		default:
+			xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown "
+					"completion code of %u.\n",
+					GET_COMP_CODE(event->status));
+			break;
+		}
+		/* OK what do we do now?  The endpoint state is hosed, and we
+		 * should never get to this point if the synchronization between
+		 * queueing, and endpoint state are correct.  This might happen
+		 * if the device gets disconnected after we've finished
+		 * cancelling URBs, which might not be an error...
+		 */
+	} else {
+		xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq[0] = 0x%x, "
+				"deq[1] = 0x%x.\n",
+				dev->out_ctx->ep[ep_index].deq[0],
+				dev->out_ctx->ep[ep_index].deq[1]);
+	}
+
+	ep_ring->state &= ~SET_DEQ_PENDING;
+	ring_ep_doorbell(xhci, slot_id, ep_index);
+}
+
+
+static void handle_cmd_completion(struct xhci_hcd *xhci,
+		struct xhci_event_cmd *event)
+{
+	int slot_id = TRB_TO_SLOT_ID(event->flags);
+	u64 cmd_dma;
+	dma_addr_t cmd_dequeue_dma;
+
+	cmd_dma = (((u64) event->cmd_trb[1]) << 32) + event->cmd_trb[0];
+	cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
+			xhci->cmd_ring->dequeue);
+	/* Is the command ring deq ptr out of sync with the deq seg ptr? */
+	if (cmd_dequeue_dma == 0) {
+		xhci->error_bitmask |= 1 << 4;
+		return;
+	}
+	/* Does the DMA address match our internal dequeue pointer address? */
+	if (cmd_dma != (u64) cmd_dequeue_dma) {
+		xhci->error_bitmask |= 1 << 5;
+		return;
+	}
+	switch (xhci->cmd_ring->dequeue->generic.field[3] & TRB_TYPE_BITMASK) {
+	case TRB_TYPE(TRB_ENABLE_SLOT):
+		if (GET_COMP_CODE(event->status) == COMP_SUCCESS)
+			xhci->slot_id = slot_id;
+		else
+			xhci->slot_id = 0;
+		complete(&xhci->addr_dev);
+		break;
+	case TRB_TYPE(TRB_DISABLE_SLOT):
+		if (xhci->devs[slot_id])
+			xhci_free_virt_device(xhci, slot_id);
+		break;
+	case TRB_TYPE(TRB_CONFIG_EP):
+		xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(event->status);
+		complete(&xhci->devs[slot_id]->cmd_completion);
+		break;
+	case TRB_TYPE(TRB_ADDR_DEV):
+		xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(event->status);
+		complete(&xhci->addr_dev);
+		break;
+	case TRB_TYPE(TRB_STOP_RING):
+		handle_stopped_endpoint(xhci, xhci->cmd_ring->dequeue);
+		break;
+	case TRB_TYPE(TRB_SET_DEQ):
+		handle_set_deq_completion(xhci, event, xhci->cmd_ring->dequeue);
+		break;
+	case TRB_TYPE(TRB_CMD_NOOP):
+		++xhci->noops_handled;
+		break;
+	default:
+		/* Skip over unknown commands on the event ring */
+		xhci->error_bitmask |= 1 << 6;
+		break;
+	}
+	inc_deq(xhci, xhci->cmd_ring, false);
+}
+
+static void handle_port_status(struct xhci_hcd *xhci,
+		union xhci_trb *event)
+{
+	u32 port_id;
+
+	/* Port status change events always have a successful completion code */
+	if (GET_COMP_CODE(event->generic.field[2]) != COMP_SUCCESS) {
+		xhci_warn(xhci, "WARN: xHC returned failed port status event\n");
+		xhci->error_bitmask |= 1 << 8;
+	}
+	/* FIXME: core doesn't care about all port link state changes yet */
+	port_id = GET_PORT_ID(event->generic.field[0]);
+	xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id);
+
+	/* Update event ring dequeue pointer before dropping the lock */
+	inc_deq(xhci, xhci->event_ring, true);
+	xhci_set_hc_event_deq(xhci);
+
+	spin_unlock(&xhci->lock);
+	/* Pass this up to the core */
+	usb_hcd_poll_rh_status(xhci_to_hcd(xhci));
+	spin_lock(&xhci->lock);
+}
+
+/*
+ * This TD is defined by the TRBs starting at start_trb in start_seg and ending
+ * at end_trb, which may be in another segment.  If the suspect DMA address is a
+ * TRB in this TD, this function returns that TRB's segment.  Otherwise it
+ * returns 0.
+ */
+static struct xhci_segment *trb_in_td(
+		struct xhci_segment *start_seg,
+		union xhci_trb	*start_trb,
+		union xhci_trb	*end_trb,
+		dma_addr_t	suspect_dma)
+{
+	dma_addr_t start_dma;
+	dma_addr_t end_seg_dma;
+	dma_addr_t end_trb_dma;
+	struct xhci_segment *cur_seg;
+
+	start_dma = xhci_trb_virt_to_dma(start_seg, start_trb);
+	cur_seg = start_seg;
+
+	do {
+		/* We may get an event for a Link TRB in the middle of a TD */
+		end_seg_dma = xhci_trb_virt_to_dma(cur_seg,
+				&start_seg->trbs[TRBS_PER_SEGMENT - 1]);
+		/* If the end TRB isn't in this segment, this is set to 0 */
+		end_trb_dma = xhci_trb_virt_to_dma(cur_seg, end_trb);
+
+		if (end_trb_dma > 0) {
+			/* The end TRB is in this segment, so suspect should be here */
+			if (start_dma <= end_trb_dma) {
+				if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma)
+					return cur_seg;
+			} else {
+				/* Case for one segment with
+				 * a TD wrapped around to the top
+				 */
+				if ((suspect_dma >= start_dma &&
+							suspect_dma <= end_seg_dma) ||
+						(suspect_dma >= cur_seg->dma &&
+						 suspect_dma <= end_trb_dma))
+					return cur_seg;
+			}
+			return 0;
+		} else {
+			/* Might still be somewhere in this segment */
+			if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma)
+				return cur_seg;
+		}
+		cur_seg = cur_seg->next;
+		start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
+	} while (1);
+
+}
+
+/*
+ * If this function returns an error condition, it means it got a Transfer
+ * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address.
+ * At this point, the host controller is probably hosed and should be reset.
+ */
+static int handle_tx_event(struct xhci_hcd *xhci,
+		struct xhci_transfer_event *event)
+{
+	struct xhci_virt_device *xdev;
+	struct xhci_ring *ep_ring;
+	int ep_index;
+	struct xhci_td *td = 0;
+	dma_addr_t event_dma;
+	struct xhci_segment *event_seg;
+	union xhci_trb *event_trb;
+	struct urb *urb = 0;
+	int status = -EINPROGRESS;
+
+	xdev = xhci->devs[TRB_TO_SLOT_ID(event->flags)];
+	if (!xdev) {
+		xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n");
+		return -ENODEV;
+	}
+
+	/* Endpoint ID is 1 based, our index is zero based */
+	ep_index = TRB_TO_EP_ID(event->flags) - 1;
+	ep_ring = xdev->ep_rings[ep_index];
+	if (!ep_ring || (xdev->out_ctx->ep[ep_index].ep_info & EP_STATE_MASK) == EP_STATE_DISABLED) {
+		xhci_err(xhci, "ERROR Transfer event pointed to disabled endpoint\n");
+		return -ENODEV;
+	}
+
+	event_dma = event->buffer[0];
+	if (event->buffer[1] != 0)
+		xhci_warn(xhci, "WARN ignoring upper 32-bits of 64-bit TRB dma address\n");
+
+	/* This TRB should be in the TD at the head of this ring's TD list */
+	if (list_empty(&ep_ring->td_list)) {
+		xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n",
+				TRB_TO_SLOT_ID(event->flags), ep_index);
+		xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
+				(unsigned int) (event->flags & TRB_TYPE_BITMASK)>>10);
+		xhci_print_trb_offsets(xhci, (union xhci_trb *) event);
+		urb = NULL;
+		goto cleanup;
+	}
+	td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list);
+
+	/* Is this a TRB in the currently executing TD? */
+	event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue,
+			td->last_trb, event_dma);
+	if (!event_seg) {
+		/* HC is busted, give up! */
+		xhci_err(xhci, "ERROR Transfer event TRB DMA ptr not part of current TD\n");
+		return -ESHUTDOWN;
+	}
+	event_trb = &event_seg->trbs[(event_dma - event_seg->dma) / sizeof(*event_trb)];
+	xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
+			(unsigned int) (event->flags & TRB_TYPE_BITMASK)>>10);
+	xhci_dbg(xhci, "Offset 0x00 (buffer[0]) = 0x%x\n",
+			(unsigned int) event->buffer[0]);
+	xhci_dbg(xhci, "Offset 0x04 (buffer[0]) = 0x%x\n",
+			(unsigned int) event->buffer[1]);
+	xhci_dbg(xhci, "Offset 0x08 (transfer length) = 0x%x\n",
+			(unsigned int) event->transfer_len);
+	xhci_dbg(xhci, "Offset 0x0C (flags) = 0x%x\n",
+			(unsigned int) event->flags);
+
+	/* Look for common error cases */
+	switch (GET_COMP_CODE(event->transfer_len)) {
+	/* Skip codes that require special handling depending on
+	 * transfer type
+	 */
+	case COMP_SUCCESS:
+	case COMP_SHORT_TX:
+		break;
+	case COMP_STOP:
+		xhci_dbg(xhci, "Stopped on Transfer TRB\n");
+		break;
+	case COMP_STOP_INVAL:
+		xhci_dbg(xhci, "Stopped on No-op or Link TRB\n");
+		break;
+	case COMP_STALL:
+		xhci_warn(xhci, "WARN: Stalled endpoint\n");
+		status = -EPIPE;
+		break;
+	case COMP_TRB_ERR:
+		xhci_warn(xhci, "WARN: TRB error on endpoint\n");
+		status = -EILSEQ;
+		break;
+	case COMP_TX_ERR:
+		xhci_warn(xhci, "WARN: transfer error on endpoint\n");
+		status = -EPROTO;
+		break;
+	case COMP_DB_ERR:
+		xhci_warn(xhci, "WARN: HC couldn't access mem fast enough\n");
+		status = -ENOSR;
+		break;
+	default:
+		xhci_warn(xhci, "ERROR Unknown event condition, HC probably busted\n");
+		urb = NULL;
+		goto cleanup;
+	}
+	/* Now update the urb's actual_length and give back to the core */
+	/* Was this a control transfer? */
+	if (usb_endpoint_xfer_control(&td->urb->ep->desc)) {
+		xhci_debug_trb(xhci, xhci->event_ring->dequeue);
+		switch (GET_COMP_CODE(event->transfer_len)) {
+		case COMP_SUCCESS:
+			if (event_trb == ep_ring->dequeue) {
+				xhci_warn(xhci, "WARN: Success on ctrl setup TRB without IOC set??\n");
+				status = -ESHUTDOWN;
+			} else if (event_trb != td->last_trb) {
+				xhci_warn(xhci, "WARN: Success on ctrl data TRB without IOC set??\n");
+				status = -ESHUTDOWN;
+			} else {
+				xhci_dbg(xhci, "Successful control transfer!\n");
+				status = 0;
+			}
+			break;
+		case COMP_SHORT_TX:
+			xhci_warn(xhci, "WARN: short transfer on control ep\n");
+			status = -EREMOTEIO;
+			break;
+		default:
+			/* Others already handled above */
+			break;
+		}
+		/*
+		 * Did we transfer any data, despite the errors that might have
+		 * happened?  I.e. did we get past the setup stage?
+		 */
+		if (event_trb != ep_ring->dequeue) {
+			/* The event was for the status stage */
+			if (event_trb == td->last_trb) {
+				td->urb->actual_length =
+					td->urb->transfer_buffer_length;
+			} else {
+			/* Maybe the event was for the data stage? */
+				if (GET_COMP_CODE(event->transfer_len) != COMP_STOP_INVAL)
+					/* We didn't stop on a link TRB in the middle */
+					td->urb->actual_length =
+						td->urb->transfer_buffer_length -
+						TRB_LEN(event->transfer_len);
+			}
+		}
+	} else {
+		switch (GET_COMP_CODE(event->transfer_len)) {
+		case COMP_SUCCESS:
+			/* Double check that the HW transferred everything. */
+			if (event_trb != td->last_trb) {
+				xhci_warn(xhci, "WARN Successful completion "
+						"on short TX\n");
+				if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
+					status = -EREMOTEIO;
+				else
+					status = 0;
+			} else {
+				xhci_dbg(xhci, "Successful bulk transfer!\n");
+				status = 0;
+			}
+			break;
+		case COMP_SHORT_TX:
+			if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
+				status = -EREMOTEIO;
+			else
+				status = 0;
+			break;
+		default:
+			/* Others already handled above */
+			break;
+		}
+		dev_dbg(&td->urb->dev->dev,
+				"ep %#x - asked for %d bytes, "
+				"%d bytes untransferred\n",
+				td->urb->ep->desc.bEndpointAddress,
+				td->urb->transfer_buffer_length,
+				TRB_LEN(event->transfer_len));
+		/* Fast path - was this the last TRB in the TD for this URB? */
+		if (event_trb == td->last_trb) {
+			if (TRB_LEN(event->transfer_len) != 0) {
+				td->urb->actual_length =
+					td->urb->transfer_buffer_length -
+					TRB_LEN(event->transfer_len);
+				if (td->urb->actual_length < 0) {
+					xhci_warn(xhci, "HC gave bad length "
+							"of %d bytes left\n",
+							TRB_LEN(event->transfer_len));
+					td->urb->actual_length = 0;
+				}
+				if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
+					status = -EREMOTEIO;
+				else
+					status = 0;
+			} else {
+				td->urb->actual_length = td->urb->transfer_buffer_length;
+				/* Ignore a short packet completion if the
+				 * untransferred length was zero.
+				 */
+				status = 0;
+			}
+		} else {
+			/* Slow path - walk the list, starting from the dequeue
+			 * pointer, to get the actual length transferred.
+			 */
+			union xhci_trb *cur_trb;
+			struct xhci_segment *cur_seg;
+
+			td->urb->actual_length = 0;
+			for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg;
+					cur_trb != event_trb;
+					next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
+				if (TRB_TYPE(cur_trb->generic.field[3]) != TRB_TR_NOOP &&
+						TRB_TYPE(cur_trb->generic.field[3]) != TRB_LINK)
+					td->urb->actual_length +=
+						TRB_LEN(cur_trb->generic.field[2]);
+			}
+			/* If the ring didn't stop on a Link or No-op TRB, add
+			 * in the actual bytes transferred from the Normal TRB
+			 */
+			if (GET_COMP_CODE(event->transfer_len) != COMP_STOP_INVAL)
+				td->urb->actual_length +=
+					TRB_LEN(cur_trb->generic.field[2]) -
+					TRB_LEN(event->transfer_len);
+		}
+	}
+	/* The Endpoint Stop Command completion will take care of
+	 * any stopped TDs.  A stopped TD may be restarted, so don't update the
+	 * ring dequeue pointer or take this TD off any lists yet.
+	 */
+	if (GET_COMP_CODE(event->transfer_len) == COMP_STOP_INVAL ||
+			GET_COMP_CODE(event->transfer_len) == COMP_STOP) {
+		ep_ring->stopped_td = td;
+		ep_ring->stopped_trb = event_trb;
+	} else {
+		/* Update ring dequeue pointer */
+		while (ep_ring->dequeue != td->last_trb)
+			inc_deq(xhci, ep_ring, false);
+		inc_deq(xhci, ep_ring, false);
+
+		/* Clean up the endpoint's TD list */
+		urb = td->urb;
+		list_del(&td->td_list);
+		/* Was this TD slated to be cancelled but completed anyway? */
+		if (!list_empty(&td->cancelled_td_list)) {
+			list_del(&td->cancelled_td_list);
+			ep_ring->cancels_pending--;
+		}
+		kfree(td);
+		urb->hcpriv = NULL;
+	}
+cleanup:
+	inc_deq(xhci, xhci->event_ring, true);
+	xhci_set_hc_event_deq(xhci);
+
+	/* FIXME for multi-TD URBs (who have buffers bigger than 64MB) */
+	if (urb) {
+		usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), urb);
+		spin_unlock(&xhci->lock);
+		usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, status);
+		spin_lock(&xhci->lock);
+	}
+	return 0;
+}
+
+/*
+ * This function handles all OS-owned events on the event ring.  It may drop
+ * xhci->lock between event processing (e.g. to pass up port status changes).
+ */
+void xhci_handle_event(struct xhci_hcd *xhci)
+{
+	union xhci_trb *event;
+	int update_ptrs = 1;
+	int ret;
+
+	if (!xhci->event_ring || !xhci->event_ring->dequeue) {
+		xhci->error_bitmask |= 1 << 1;
+		return;
+	}
+
+	event = xhci->event_ring->dequeue;
+	/* Does the HC or OS own the TRB? */
+	if ((event->event_cmd.flags & TRB_CYCLE) !=
+			xhci->event_ring->cycle_state) {
+		xhci->error_bitmask |= 1 << 2;
+		return;
+	}
+
+	/* FIXME: Handle more event types. */
+	switch ((event->event_cmd.flags & TRB_TYPE_BITMASK)) {
+	case TRB_TYPE(TRB_COMPLETION):
+		handle_cmd_completion(xhci, &event->event_cmd);
+		break;
+	case TRB_TYPE(TRB_PORT_STATUS):
+		handle_port_status(xhci, event);
+		update_ptrs = 0;
+		break;
+	case TRB_TYPE(TRB_TRANSFER):
+		ret = handle_tx_event(xhci, &event->trans_event);
+		if (ret < 0)
+			xhci->error_bitmask |= 1 << 9;
+		else
+			update_ptrs = 0;
+		break;
+	default:
+		xhci->error_bitmask |= 1 << 3;
+	}
+
+	if (update_ptrs) {
+		/* Update SW and HC event ring dequeue pointer */
+		inc_deq(xhci, xhci->event_ring, true);
+		xhci_set_hc_event_deq(xhci);
+	}
+	/* Are there more items on the event ring? */
+	xhci_handle_event(xhci);
+}
+
+/****		Endpoint Ring Operations	****/
+
+/*
+ * Generic function for queueing a TRB on a ring.
+ * The caller must have checked to make sure there's room on the ring.
+ */
+static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
+		bool consumer,
+		u32 field1, u32 field2, u32 field3, u32 field4)
+{
+	struct xhci_generic_trb *trb;
+
+	trb = &ring->enqueue->generic;
+	trb->field[0] = field1;
+	trb->field[1] = field2;
+	trb->field[2] = field3;
+	trb->field[3] = field4;
+	inc_enq(xhci, ring, consumer);
+}
+
+/*
+ * Does various checks on the endpoint ring, and makes it ready to queue num_trbs.
+ * FIXME allocate segments if the ring is full.
+ */
+static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
+		u32 ep_state, unsigned int num_trbs, gfp_t mem_flags)
+{
+	/* Make sure the endpoint has been added to xHC schedule */
+	xhci_dbg(xhci, "Endpoint state = 0x%x\n", ep_state);
+	switch (ep_state) {
+	case EP_STATE_DISABLED:
+		/*
+		 * USB core changed config/interfaces without notifying us,
+		 * or hardware is reporting the wrong state.
+		 */
+		xhci_warn(xhci, "WARN urb submitted to disabled ep\n");
+		return -ENOENT;
+	case EP_STATE_HALTED:
+	case EP_STATE_ERROR:
+		xhci_warn(xhci, "WARN waiting for halt or error on ep "
+				"to be cleared\n");
+		/* FIXME event handling code for error needs to clear it */
+		/* XXX not sure if this should be -ENOENT or not */
+		return -EINVAL;
+	case EP_STATE_STOPPED:
+	case EP_STATE_RUNNING:
+		break;
+	default:
+		xhci_err(xhci, "ERROR unknown endpoint state for ep\n");
+		/*
+		 * FIXME issue Configure Endpoint command to try to get the HC
+		 * back into a known state.
+		 */
+		return -EINVAL;
+	}
+	if (!room_on_ring(xhci, ep_ring, num_trbs)) {
+		/* FIXME allocate more room */
+		xhci_err(xhci, "ERROR no room on ep ring\n");
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+static int prepare_transfer(struct xhci_hcd *xhci,
+		struct xhci_virt_device *xdev,
+		unsigned int ep_index,
+		unsigned int num_trbs,
+		struct urb *urb,
+		struct xhci_td **td,
+		gfp_t mem_flags)
+{
+	int ret;
+
+	ret = prepare_ring(xhci, xdev->ep_rings[ep_index],
+			xdev->out_ctx->ep[ep_index].ep_info & EP_STATE_MASK,
+			num_trbs, mem_flags);
+	if (ret)
+		return ret;
+	*td = kzalloc(sizeof(struct xhci_td), mem_flags);
+	if (!*td)
+		return -ENOMEM;
+	INIT_LIST_HEAD(&(*td)->td_list);
+	INIT_LIST_HEAD(&(*td)->cancelled_td_list);
+
+	ret = usb_hcd_link_urb_to_ep(xhci_to_hcd(xhci), urb);
+	if (unlikely(ret)) {
+		kfree(*td);
+		return ret;
+	}
+
+	(*td)->urb = urb;
+	urb->hcpriv = (void *) (*td);
+	/* Add this TD to the tail of the endpoint ring's TD list */
+	list_add_tail(&(*td)->td_list, &xdev->ep_rings[ep_index]->td_list);
+	(*td)->start_seg = xdev->ep_rings[ep_index]->enq_seg;
+	(*td)->first_trb = xdev->ep_rings[ep_index]->enqueue;
+
+	return 0;
+}
+
+static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
+{
+	int num_sgs, num_trbs, running_total, temp, i;
+	struct scatterlist *sg;
+
+	sg = NULL;
+	num_sgs = urb->num_sgs;
+	temp = urb->transfer_buffer_length;
+
+	xhci_dbg(xhci, "count sg list trbs: \n");
+	num_trbs = 0;
+	for_each_sg(urb->sg->sg, sg, num_sgs, i) {
+		unsigned int previous_total_trbs = num_trbs;
+		unsigned int len = sg_dma_len(sg);
+
+		/* Scatter gather list entries may cross 64KB boundaries */
+		running_total = TRB_MAX_BUFF_SIZE -
+			(sg_dma_address(sg) & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+		if (running_total != 0)
+			num_trbs++;
+
+		/* How many more 64KB chunks to transfer, how many more TRBs? */
+		while (running_total < sg_dma_len(sg)) {
+			num_trbs++;
+			running_total += TRB_MAX_BUFF_SIZE;
+		}
+		xhci_dbg(xhci, " sg #%d: dma = %#llx, len = %#x (%d), num_trbs = %d\n",
+				i, (unsigned long long)sg_dma_address(sg),
+				len, len, num_trbs - previous_total_trbs);
+
+		len = min_t(int, len, temp);
+		temp -= len;
+		if (temp == 0)
+			break;
+	}
+	xhci_dbg(xhci, "\n");
+	if (!in_interrupt())
+		dev_dbg(&urb->dev->dev, "ep %#x - urb len = %d, sglist used, num_trbs = %d\n",
+				urb->ep->desc.bEndpointAddress,
+				urb->transfer_buffer_length,
+				num_trbs);
+	return num_trbs;
+}
+
+static void check_trb_math(struct urb *urb, int num_trbs, int running_total)
+{
+	if (num_trbs != 0)
+		dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated number of "
+				"TRBs, %d left\n", __func__,
+				urb->ep->desc.bEndpointAddress, num_trbs);
+	if (running_total != urb->transfer_buffer_length)
+		dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
+				"queued %#x (%d), asked for %#x (%d)\n",
+				__func__,
+				urb->ep->desc.bEndpointAddress,
+				running_total, running_total,
+				urb->transfer_buffer_length,
+				urb->transfer_buffer_length);
+}
+
+static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
+		unsigned int ep_index, int start_cycle,
+		struct xhci_generic_trb *start_trb, struct xhci_td *td)
+{
+	/*
+	 * Pass all the TRBs to the hardware at once and make sure this write
+	 * isn't reordered.
+	 */
+	wmb();
+	start_trb->field[3] |= start_cycle;
+	ring_ep_doorbell(xhci, slot_id, ep_index);
+}
+
+static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+		struct urb *urb, int slot_id, unsigned int ep_index)
+{
+	struct xhci_ring *ep_ring;
+	unsigned int num_trbs;
+	struct xhci_td *td;
+	struct scatterlist *sg;
+	int num_sgs;
+	int trb_buff_len, this_sg_len, running_total;
+	bool first_trb;
+	u64 addr;
+
+	struct xhci_generic_trb *start_trb;
+	int start_cycle;
+
+	ep_ring = xhci->devs[slot_id]->ep_rings[ep_index];
+	num_trbs = count_sg_trbs_needed(xhci, urb);
+	num_sgs = urb->num_sgs;
+
+	trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id],
+			ep_index, num_trbs, urb, &td, mem_flags);
+	if (trb_buff_len < 0)
+		return trb_buff_len;
+	/*
+	 * Don't give the first TRB to the hardware (by toggling the cycle bit)
+	 * until we've finished creating all the other TRBs.  The ring's cycle
+	 * state may change as we enqueue the other TRBs, so save it too.
+	 */
+	start_trb = &ep_ring->enqueue->generic;
+	start_cycle = ep_ring->cycle_state;
+
+	running_total = 0;
+	/*
+	 * How much data is in the first TRB?
+	 *
+	 * There are three forces at work for TRB buffer pointers and lengths:
+	 * 1. We don't want to walk off the end of this sg-list entry buffer.
+	 * 2. The transfer length that the driver requested may be smaller than
+	 *    the amount of memory allocated for this scatter-gather list.
+	 * 3. TRBs buffers can't cross 64KB boundaries.
+	 */
+	sg = urb->sg->sg;
+	addr = (u64) sg_dma_address(sg);
+	this_sg_len = sg_dma_len(sg);
+	trb_buff_len = TRB_MAX_BUFF_SIZE -
+		(addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+	trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
+	if (trb_buff_len > urb->transfer_buffer_length)
+		trb_buff_len = urb->transfer_buffer_length;
+	xhci_dbg(xhci, "First length to xfer from 1st sglist entry = %u\n",
+			trb_buff_len);
+
+	first_trb = true;
+	/* Queue the first TRB, even if it's zero-length */
+	do {
+		u32 field = 0;
+
+		/* Don't change the cycle bit of the first TRB until later */
+		if (first_trb)
+			first_trb = false;
+		else
+			field |= ep_ring->cycle_state;
+
+		/* Chain all the TRBs together; clear the chain bit in the last
+		 * TRB to indicate it's the last TRB in the chain.
+		 */
+		if (num_trbs > 1) {
+			field |= TRB_CHAIN;
+		} else {
+			/* FIXME - add check for ZERO_PACKET flag before this */
+			td->last_trb = ep_ring->enqueue;
+			field |= TRB_IOC;
+		}
+		xhci_dbg(xhci, " sg entry: dma = %#x, len = %#x (%d), "
+				"64KB boundary at %#x, end dma = %#x\n",
+				(unsigned int) addr, trb_buff_len, trb_buff_len,
+				(unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
+				(unsigned int) addr + trb_buff_len);
+		if (TRB_MAX_BUFF_SIZE -
+				(addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)) < trb_buff_len) {
+			xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n");
+			xhci_dbg(xhci, "Next boundary at %#x, end dma = %#x\n",
+					(unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
+					(unsigned int) addr + trb_buff_len);
+		}
+		queue_trb(xhci, ep_ring, false,
+				(u32) addr,
+				(u32) ((u64) addr >> 32),
+				TRB_LEN(trb_buff_len) | TRB_INTR_TARGET(0),
+				/* We always want to know if the TRB was short,
+				 * or we won't get an event when it completes.
+				 * (Unless we use event data TRBs, which are a
+				 * waste of space and HC resources.)
+				 */
+				field | TRB_ISP | TRB_TYPE(TRB_NORMAL));
+		--num_trbs;
+		running_total += trb_buff_len;
+
+		/* Calculate length for next transfer --
+		 * Are we done queueing all the TRBs for this sg entry?
+		 */
+		this_sg_len -= trb_buff_len;
+		if (this_sg_len == 0) {
+			--num_sgs;
+			if (num_sgs == 0)
+				break;
+			sg = sg_next(sg);
+			addr = (u64) sg_dma_address(sg);
+			this_sg_len = sg_dma_len(sg);
+		} else {
+			addr += trb_buff_len;
+		}
+
+		trb_buff_len = TRB_MAX_BUFF_SIZE -
+			(addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+		trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
+		if (running_total + trb_buff_len > urb->transfer_buffer_length)
+			trb_buff_len =
+				urb->transfer_buffer_length - running_total;
+	} while (running_total < urb->transfer_buffer_length);
+
+	check_trb_math(urb, num_trbs, running_total);
+	giveback_first_trb(xhci, slot_id, ep_index, start_cycle, start_trb, td);
+	return 0;
+}
+
+/* This is very similar to what ehci-q.c qtd_fill() does */
+int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+		struct urb *urb, int slot_id, unsigned int ep_index)
+{
+	struct xhci_ring *ep_ring;
+	struct xhci_td *td;
+	int num_trbs;
+	struct xhci_generic_trb *start_trb;
+	bool first_trb;
+	int start_cycle;
+	u32 field;
+
+	int running_total, trb_buff_len, ret;
+	u64 addr;
+
+	if (urb->sg)
+		return queue_bulk_sg_tx(xhci, mem_flags, urb, slot_id, ep_index);
+
+	ep_ring = xhci->devs[slot_id]->ep_rings[ep_index];
+
+	num_trbs = 0;
+	/* How much data is (potentially) left before the 64KB boundary? */
+	running_total = TRB_MAX_BUFF_SIZE -
+		(urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+
+	/* If there's some data on this 64KB chunk, or we have to send a
+	 * zero-length transfer, we need at least one TRB
+	 */
+	if (running_total != 0 || urb->transfer_buffer_length == 0)
+		num_trbs++;
+	/* How many more 64KB chunks to transfer, how many more TRBs? */
+	while (running_total < urb->transfer_buffer_length) {
+		num_trbs++;
+		running_total += TRB_MAX_BUFF_SIZE;
+	}
+	/* FIXME: this doesn't deal with URB_ZERO_PACKET - need one more */
+
+	if (!in_interrupt())
+		dev_dbg(&urb->dev->dev, "ep %#x - urb len = %#x (%d), addr = %#llx, num_trbs = %d\n",
+				urb->ep->desc.bEndpointAddress,
+				urb->transfer_buffer_length,
+				urb->transfer_buffer_length,
+				(unsigned long long)urb->transfer_dma,
+				num_trbs);
+
+	ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
+			num_trbs, urb, &td, mem_flags);
+	if (ret < 0)
+		return ret;
+
+	/*
+	 * Don't give the first TRB to the hardware (by toggling the cycle bit)
+	 * until we've finished creating all the other TRBs.  The ring's cycle
+	 * state may change as we enqueue the other TRBs, so save it too.
+	 */
+	start_trb = &ep_ring->enqueue->generic;
+	start_cycle = ep_ring->cycle_state;
+
+	running_total = 0;
+	/* How much data is in the first TRB? */
+	addr = (u64) urb->transfer_dma;
+	trb_buff_len = TRB_MAX_BUFF_SIZE -
+		(urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+	if (urb->transfer_buffer_length < trb_buff_len)
+		trb_buff_len = urb->transfer_buffer_length;
+
+	first_trb = true;
+
+	/* Queue the first TRB, even if it's zero-length */
+	do {
+		field = 0;
+
+		/* Don't change the cycle bit of the first TRB until later */
+		if (first_trb)
+			first_trb = false;
+		else
+			field |= ep_ring->cycle_state;
+
+		/* Chain all the TRBs together; clear the chain bit in the last
+		 * TRB to indicate it's the last TRB in the chain.
+		 */
+		if (num_trbs > 1) {
+			field |= TRB_CHAIN;
+		} else {
+			/* FIXME - add check for ZERO_PACKET flag before this */
+			td->last_trb = ep_ring->enqueue;
+			field |= TRB_IOC;
+		}
+		queue_trb(xhci, ep_ring, false,
+				(u32) addr,
+				(u32) ((u64) addr >> 32),
+				TRB_LEN(trb_buff_len) | TRB_INTR_TARGET(0),
+				/* We always want to know if the TRB was short,
+				 * or we won't get an event when it completes.
+				 * (Unless we use event data TRBs, which are a
+				 * waste of space and HC resources.)
+				 */
+				field | TRB_ISP | TRB_TYPE(TRB_NORMAL));
+		--num_trbs;
+		running_total += trb_buff_len;
+
+		/* Calculate length for next transfer */
+		addr += trb_buff_len;
+		trb_buff_len = urb->transfer_buffer_length - running_total;
+		if (trb_buff_len > TRB_MAX_BUFF_SIZE)
+			trb_buff_len = TRB_MAX_BUFF_SIZE;
+	} while (running_total < urb->transfer_buffer_length);
+
+	check_trb_math(urb, num_trbs, running_total);
+	giveback_first_trb(xhci, slot_id, ep_index, start_cycle, start_trb, td);
+	return 0;
+}
+
+/* Caller must have locked xhci->lock */
+int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+		struct urb *urb, int slot_id, unsigned int ep_index)
+{
+	struct xhci_ring *ep_ring;
+	int num_trbs;
+	int ret;
+	struct usb_ctrlrequest *setup;
+	struct xhci_generic_trb *start_trb;
+	int start_cycle;
+	u32 field;
+	struct xhci_td *td;
+
+	ep_ring = xhci->devs[slot_id]->ep_rings[ep_index];
+
+	/*
+	 * Need to copy setup packet into setup TRB, so we can't use the setup
+	 * DMA address.
+	 */
+	if (!urb->setup_packet)
+		return -EINVAL;
+
+	if (!in_interrupt())
+		xhci_dbg(xhci, "Queueing ctrl tx for slot id %d, ep %d\n",
+				slot_id, ep_index);
+	/* 1 TRB for setup, 1 for status */
+	num_trbs = 2;
+	/*
+	 * Don't need to check if we need additional event data and normal TRBs,
+	 * since data in control transfers will never get bigger than 16MB
+	 * XXX: can we get a buffer that crosses 64KB boundaries?
+	 */
+	if (urb->transfer_buffer_length > 0)
+		num_trbs++;
+	ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index, num_trbs,
+			urb, &td, mem_flags);
+	if (ret < 0)
+		return ret;
+
+	/*
+	 * Don't give the first TRB to the hardware (by toggling the cycle bit)
+	 * until we've finished creating all the other TRBs.  The ring's cycle
+	 * state may change as we enqueue the other TRBs, so save it too.
+	 */
+	start_trb = &ep_ring->enqueue->generic;
+	start_cycle = ep_ring->cycle_state;
+
+	/* Queue setup TRB - see section 6.4.1.2.1 */
+	/* FIXME better way to translate setup_packet into two u32 fields? */
+	setup = (struct usb_ctrlrequest *) urb->setup_packet;
+	queue_trb(xhci, ep_ring, false,
+			/* FIXME endianness is probably going to bite my ass here. */
+			setup->bRequestType | setup->bRequest << 8 | setup->wValue << 16,
+			setup->wIndex | setup->wLength << 16,
+			TRB_LEN(8) | TRB_INTR_TARGET(0),
+			/* Immediate data in pointer */
+			TRB_IDT | TRB_TYPE(TRB_SETUP));
+
+	/* If there's data, queue data TRBs */
+	field = 0;
+	if (urb->transfer_buffer_length > 0) {
+		if (setup->bRequestType & USB_DIR_IN)
+			field |= TRB_DIR_IN;
+		queue_trb(xhci, ep_ring, false,
+				lower_32_bits(urb->transfer_dma),
+				upper_32_bits(urb->transfer_dma),
+				TRB_LEN(urb->transfer_buffer_length) | TRB_INTR_TARGET(0),
+				/* Event on short tx */
+				field | TRB_ISP | TRB_TYPE(TRB_DATA) | ep_ring->cycle_state);
+	}
+
+	/* Save the DMA address of the last TRB in the TD */
+	td->last_trb = ep_ring->enqueue;
+
+	/* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */
+	/* If the device sent data, the status stage is an OUT transfer */
+	if (urb->transfer_buffer_length > 0 && setup->bRequestType & USB_DIR_IN)
+		field = 0;
+	else
+		field = TRB_DIR_IN;
+	queue_trb(xhci, ep_ring, false,
+			0,
+			0,
+			TRB_INTR_TARGET(0),
+			/* Event on completion */
+			field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state);
+
+	giveback_first_trb(xhci, slot_id, ep_index, start_cycle, start_trb, td);
+	return 0;
+}
+
+/****		Command Ring Operations		****/
+
+/* Generic function for queueing a command TRB on the command ring */
+static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2, u32 field3, u32 field4)
+{
+	if (!room_on_ring(xhci, xhci->cmd_ring, 1)) {
+		if (!in_interrupt())
+			xhci_err(xhci, "ERR: No room for command on command ring\n");
+		return -ENOMEM;
+	}
+	queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3,
+			field4 | xhci->cmd_ring->cycle_state);
+	return 0;
+}
+
+/* Queue a no-op command on the command ring */
+static int queue_cmd_noop(struct xhci_hcd *xhci)
+{
+	return queue_command(xhci, 0, 0, 0, TRB_TYPE(TRB_CMD_NOOP));
+}
+
+/*
+ * Place a no-op command on the command ring to test the command and
+ * event ring.
+ */
+void *xhci_setup_one_noop(struct xhci_hcd *xhci)
+{
+	if (queue_cmd_noop(xhci) < 0)
+		return NULL;
+	xhci->noops_submitted++;
+	return xhci_ring_cmd_db;
+}
+
+/* Queue a slot enable or disable request on the command ring */
+int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id)
+{
+	return queue_command(xhci, 0, 0, 0,
+			TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id));
+}
+
+/* Queue an address device command TRB */
+int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
+		u32 slot_id)
+{
+	return queue_command(xhci, in_ctx_ptr, 0, 0,
+			TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id));
+}
+
+/* Queue a configure endpoint command TRB */
+int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
+		u32 slot_id)
+{
+	return queue_command(xhci, in_ctx_ptr, 0, 0,
+			TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id));
+}
+
+int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id,
+		unsigned int ep_index)
+{
+	u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
+	u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
+	u32 type = TRB_TYPE(TRB_STOP_RING);
+
+	return queue_command(xhci, 0, 0, 0,
+			trb_slot_id | trb_ep_index | type);
+}
+
+/* Set Transfer Ring Dequeue Pointer command.
+ * This should not be used for endpoints that have streams enabled.
+ */
+static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
+		unsigned int ep_index, struct xhci_segment *deq_seg,
+		union xhci_trb *deq_ptr, u32 cycle_state)
+{
+	dma_addr_t addr;
+	u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
+	u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
+	u32 type = TRB_TYPE(TRB_SET_DEQ);
+
+	addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr);
+	if (addr == 0)
+		xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
+		xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n",
+				deq_seg, deq_ptr);
+	return queue_command(xhci, (u32) addr | cycle_state, 0, 0,
+			trb_slot_id | trb_ep_index | type);
+}
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
new file mode 100644
index 0000000..8936eeb
--- /dev/null
+++ b/drivers/usb/host/xhci.h
@@ -0,0 +1,1157 @@
+/*
+ * xHCI host controller driver
+ *
+ * Copyright (C) 2008 Intel Corp.
+ *
+ * Author: Sarah Sharp
+ * Some code borrowed from the Linux EHCI driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __LINUX_XHCI_HCD_H
+#define __LINUX_XHCI_HCD_H
+
+#include <linux/usb.h>
+#include <linux/timer.h>
+
+#include "../core/hcd.h"
+/* Code sharing between pci-quirks and xhci hcd */
+#include	"xhci-ext-caps.h"
+
+/* xHCI PCI Configuration Registers */
+#define XHCI_SBRN_OFFSET	(0x60)
+
+/* Max number of USB devices for any host controller - limit in section 6.1 */
+#define MAX_HC_SLOTS		256
+/* Section 5.3.3 - MaxPorts */
+#define MAX_HC_PORTS		127
+
+/*
+ * xHCI register interface.
+ * This corresponds to the eXtensible Host Controller Interface (xHCI)
+ * Revision 0.95 specification
+ *
+ * Registers should always be accessed with double word or quad word accesses.
+ *
+ * Some xHCI implementations may support 64-bit address pointers.  Registers
+ * with 64-bit address pointers should be written to with dword accesses by
+ * writing the low dword first (ptr[0]), then the high dword (ptr[1]) second.
+ * xHCI implementations that do not support 64-bit address pointers will ignore
+ * the high dword, and write order is irrelevant.
+ */
+
+/**
+ * struct xhci_cap_regs - xHCI Host Controller Capability Registers.
+ * @hc_capbase:		length of the capabilities register and HC version number
+ * @hcs_params1:	HCSPARAMS1 - Structural Parameters 1
+ * @hcs_params2:	HCSPARAMS2 - Structural Parameters 2
+ * @hcs_params3:	HCSPARAMS3 - Structural Parameters 3
+ * @hcc_params:		HCCPARAMS - Capability Parameters
+ * @db_off:		DBOFF - Doorbell array offset
+ * @run_regs_off:	RTSOFF - Runtime register space offset
+ */
+struct xhci_cap_regs {
+	u32	hc_capbase;
+	u32	hcs_params1;
+	u32	hcs_params2;
+	u32	hcs_params3;
+	u32	hcc_params;
+	u32	db_off;
+	u32	run_regs_off;
+	/* Reserved up to (CAPLENGTH - 0x1C) */
+};
+
+/* hc_capbase bitmasks */
+/* bits 7:0 - how long is the Capabilities register */
+#define HC_LENGTH(p)		XHCI_HC_LENGTH(p)
+/* bits 31:16	*/
+#define HC_VERSION(p)		(((p) >> 16) & 0xffff)
+
+/* HCSPARAMS1 - hcs_params1 - bitmasks */
+/* bits 0:7, Max Device Slots */
+#define HCS_MAX_SLOTS(p)	(((p) >> 0) & 0xff)
+#define HCS_SLOTS_MASK		0xff
+/* bits 8:18, Max Interrupters */
+#define HCS_MAX_INTRS(p)	(((p) >> 8) & 0x7ff)
+/* bits 24:31, Max Ports - max value is 0x7F = 127 ports */
+#define HCS_MAX_PORTS(p)	(((p) >> 24) & 0x7f)
+
+/* HCSPARAMS2 - hcs_params2 - bitmasks */
+/* bits 0:3, frames or uframes that SW needs to queue transactions
+ * ahead of the HW to meet periodic deadlines */
+#define HCS_IST(p)		(((p) >> 0) & 0xf)
+/* bits 4:7, max number of Event Ring segments */
+#define HCS_ERST_MAX(p)		(((p) >> 4) & 0xf)
+/* bit 26 Scratchpad restore - for save/restore HW state - not used yet */
+/* bits 27:31 number of Scratchpad buffers SW must allocate for the HW */
+
+/* HCSPARAMS3 - hcs_params3 - bitmasks */
+/* bits 0:7, Max U1 to U0 latency for the roothub ports */
+#define HCS_U1_LATENCY(p)	(((p) >> 0) & 0xff)
+/* bits 16:31, Max U2 to U0 latency for the roothub ports */
+#define HCS_U2_LATENCY(p)	(((p) >> 16) & 0xffff)
+
+/* HCCPARAMS - hcc_params - bitmasks */
+/* true: HC can use 64-bit address pointers */
+#define HCC_64BIT_ADDR(p)	((p) & (1 << 0))
+/* true: HC can do bandwidth negotiation */
+#define HCC_BANDWIDTH_NEG(p)	((p) & (1 << 1))
+/* true: HC uses 64-byte Device Context structures
+ * FIXME 64-byte context structures aren't supported yet.
+ */
+#define HCC_64BYTE_CONTEXT(p)	((p) & (1 << 2))
+/* true: HC has port power switches */
+#define HCC_PPC(p)		((p) & (1 << 3))
+/* true: HC has port indicators */
+#define HCS_INDICATOR(p)	((p) & (1 << 4))
+/* true: HC has Light HC Reset Capability */
+#define HCC_LIGHT_RESET(p)	((p) & (1 << 5))
+/* true: HC supports latency tolerance messaging */
+#define HCC_LTC(p)		((p) & (1 << 6))
+/* true: no secondary Stream ID Support */
+#define HCC_NSS(p)		((p) & (1 << 7))
+/* Max size for Primary Stream Arrays - 2^(n+1), where n is bits 12:15 */
+#define HCC_MAX_PSA		(1 << ((((p) >> 12) & 0xf) + 1))
+/* Extended Capabilities pointer from PCI base - section 5.3.6 */
+#define HCC_EXT_CAPS(p)		XHCI_HCC_EXT_CAPS(p)
+
+/* db_off bitmask - bits 0:1 reserved */
+#define	DBOFF_MASK	(~0x3)
+
+/* run_regs_off bitmask - bits 0:4 reserved */
+#define	RTSOFF_MASK	(~0x1f)
+
+
+/* Number of registers per port */
+#define	NUM_PORT_REGS	4
+
+/**
+ * struct xhci_op_regs - xHCI Host Controller Operational Registers.
+ * @command:		USBCMD - xHC command register
+ * @status:		USBSTS - xHC status register
+ * @page_size:		This indicates the page size that the host controller
+ * 			supports.  If bit n is set, the HC supports a page size
+ * 			of 2^(n+12), up to a 128MB page size.
+ * 			4K is the minimum page size.
+ * @cmd_ring:		CRP - 64-bit Command Ring Pointer
+ * @dcbaa_ptr:		DCBAAP - 64-bit Device Context Base Address Array Pointer
+ * @config_reg:		CONFIG - Configure Register
+ * @port_status_base:	PORTSCn - base address for Port Status and Control
+ * 			Each port has a Port Status and Control register,
+ * 			followed by a Port Power Management Status and Control
+ * 			register, a Port Link Info register, and a reserved
+ * 			register.
+ * @port_power_base:	PORTPMSCn - base address for
+ * 			Port Power Management Status and Control
+ * @port_link_base:	PORTLIn - base address for Port Link Info (current
+ * 			Link PM state and control) for USB 2.1 and USB 3.0
+ * 			devices.
+ */
+struct xhci_op_regs {
+	u32	command;
+	u32	status;
+	u32	page_size;
+	u32	reserved1;
+	u32	reserved2;
+	u32	dev_notification;
+	u32	cmd_ring[2];
+	/* rsvd: offset 0x20-2F */
+	u32	reserved3[4];
+	u32	dcbaa_ptr[2];
+	u32	config_reg;
+	/* rsvd: offset 0x3C-3FF */
+	u32	reserved4[241];
+	/* port 1 registers, which serve as a base address for other ports */
+	u32	port_status_base;
+	u32	port_power_base;
+	u32	port_link_base;
+	u32	reserved5;
+	/* registers for ports 2-255 */
+	u32	reserved6[NUM_PORT_REGS*254];
+};
+
+/* USBCMD - USB command - command bitmasks */
+/* start/stop HC execution - do not write unless HC is halted*/
+#define CMD_RUN		XHCI_CMD_RUN
+/* Reset HC - resets internal HC state machine and all registers (except
+ * PCI config regs).  HC does NOT drive a USB reset on the downstream ports.
+ * The xHCI driver must reinitialize the xHC after setting this bit.
+ */
+#define CMD_RESET	(1 << 1)
+/* Event Interrupt Enable - a '1' allows interrupts from the host controller */
+#define CMD_EIE		XHCI_CMD_EIE
+/* Host System Error Interrupt Enable - get out-of-band signal for HC errors */
+#define CMD_HSEIE	XHCI_CMD_HSEIE
+/* bits 4:6 are reserved (and should be preserved on writes). */
+/* light reset (port status stays unchanged) - reset completed when this is 0 */
+#define CMD_LRESET	(1 << 7)
+/* FIXME: ignoring host controller save/restore state for now. */
+#define CMD_CSS		(1 << 8)
+#define CMD_CRS		(1 << 9)
+/* Enable Wrap Event - '1' means xHC generates an event when MFINDEX wraps. */
+#define CMD_EWE		XHCI_CMD_EWE
+/* MFINDEX power management - '1' means xHC can stop MFINDEX counter if all root
+ * hubs are in U3 (selective suspend), disconnect, disabled, or powered-off.
+ * '0' means the xHC can power it off if all ports are in the disconnect,
+ * disabled, or powered-off state.
+ */
+#define CMD_PM_INDEX	(1 << 11)
+/* bits 12:31 are reserved (and should be preserved on writes). */
+
+/* USBSTS - USB status - status bitmasks */
+/* HC not running - set to 1 when run/stop bit is cleared. */
+#define STS_HALT	XHCI_STS_HALT
+/* serious error, e.g. PCI parity error.  The HC will clear the run/stop bit. */
+#define STS_FATAL	(1 << 2)
+/* event interrupt - clear this prior to clearing any IP flags in IR set*/
+#define STS_EINT	(1 << 3)
+/* port change detect */
+#define STS_PORT	(1 << 4)
+/* bits 5:7 reserved and zeroed */
+/* save state status - '1' means xHC is saving state */
+#define STS_SAVE	(1 << 8)
+/* restore state status - '1' means xHC is restoring state */
+#define STS_RESTORE	(1 << 9)
+/* true: save or restore error */
+#define STS_SRE		(1 << 10)
+/* true: Controller Not Ready to accept doorbell or op reg writes after reset */
+#define STS_CNR		XHCI_STS_CNR
+/* true: internal Host Controller Error - SW needs to reset and reinitialize */
+#define STS_HCE		(1 << 12)
+/* bits 13:31 reserved and should be preserved */
+
+/*
+ * DNCTRL - Device Notification Control Register - dev_notification bitmasks
+ * Generate a device notification event when the HC sees a transaction with a
+ * notification type that matches a bit set in this bit field.
+ */
+#define	DEV_NOTE_MASK		(0xffff)
+#define ENABLE_DEV_NOTE(x)	(1 << x)
+/* Most of the device notification types should only be used for debug.
+ * SW does need to pay attention to function wake notifications.
+ */
+#define	DEV_NOTE_FWAKE		ENABLE_DEV_NOTE(1)
+
+/* CRCR - Command Ring Control Register - cmd_ring bitmasks */
+/* bit 0 is the command ring cycle state */
+/* stop ring operation after completion of the currently executing command */
+#define CMD_RING_PAUSE		(1 << 1)
+/* stop ring immediately - abort the currently executing command */
+#define CMD_RING_ABORT		(1 << 2)
+/* true: command ring is running */
+#define CMD_RING_RUNNING	(1 << 3)
+/* bits 4:5 reserved and should be preserved */
+/* Command Ring pointer - bit mask for the lower 32 bits. */
+#define CMD_RING_ADDR_MASK	(0xffffffc0)
+
+/* CONFIG - Configure Register - config_reg bitmasks */
+/* bits 0:7 - maximum number of device slots enabled (NumSlotsEn) */
+#define MAX_DEVS(p)	((p) & 0xff)
+/* bits 8:31 - reserved and should be preserved */
+
+/* PORTSC - Port Status and Control Register - port_status_base bitmasks */
+/* true: device connected */
+#define PORT_CONNECT	(1 << 0)
+/* true: port enabled */
+#define PORT_PE		(1 << 1)
+/* bit 2 reserved and zeroed */
+/* true: port has an over-current condition */
+#define PORT_OC		(1 << 3)
+/* true: port reset signaling asserted */
+#define PORT_RESET	(1 << 4)
+/* Port Link State - bits 5:8
+ * A read gives the current link PM state of the port,
+ * a write with Link State Write Strobe set sets the link state.
+ */
+/* true: port has power (see HCC_PPC) */
+#define PORT_POWER	(1 << 9)
+/* bits 10:13 indicate device speed:
+ * 0 - undefined speed - port hasn't be initialized by a reset yet
+ * 1 - full speed
+ * 2 - low speed
+ * 3 - high speed
+ * 4 - super speed
+ * 5-15 reserved
+ */
+#define DEV_SPEED_MASK		(0xf << 10)
+#define	XDEV_FS			(0x1 << 10)
+#define	XDEV_LS			(0x2 << 10)
+#define	XDEV_HS			(0x3 << 10)
+#define	XDEV_SS			(0x4 << 10)
+#define DEV_UNDEFSPEED(p)	(((p) & DEV_SPEED_MASK) == (0x0<<10))
+#define DEV_FULLSPEED(p)	(((p) & DEV_SPEED_MASK) == XDEV_FS)
+#define DEV_LOWSPEED(p)		(((p) & DEV_SPEED_MASK) == XDEV_LS)
+#define DEV_HIGHSPEED(p)	(((p) & DEV_SPEED_MASK) == XDEV_HS)
+#define DEV_SUPERSPEED(p)	(((p) & DEV_SPEED_MASK) == XDEV_SS)
+/* Bits 20:23 in the Slot Context are the speed for the device */
+#define	SLOT_SPEED_FS		(XDEV_FS << 10)
+#define	SLOT_SPEED_LS		(XDEV_LS << 10)
+#define	SLOT_SPEED_HS		(XDEV_HS << 10)
+#define	SLOT_SPEED_SS		(XDEV_SS << 10)
+/* Port Indicator Control */
+#define PORT_LED_OFF	(0 << 14)
+#define PORT_LED_AMBER	(1 << 14)
+#define PORT_LED_GREEN	(2 << 14)
+#define PORT_LED_MASK	(3 << 14)
+/* Port Link State Write Strobe - set this when changing link state */
+#define PORT_LINK_STROBE	(1 << 16)
+/* true: connect status change */
+#define PORT_CSC	(1 << 17)
+/* true: port enable change */
+#define PORT_PEC	(1 << 18)
+/* true: warm reset for a USB 3.0 device is done.  A "hot" reset puts the port
+ * into an enabled state, and the device into the default state.  A "warm" reset
+ * also resets the link, forcing the device through the link training sequence.
+ * SW can also look at the Port Reset register to see when warm reset is done.
+ */
+#define PORT_WRC	(1 << 19)
+/* true: over-current change */
+#define PORT_OCC	(1 << 20)
+/* true: reset change - 1 to 0 transition of PORT_RESET */
+#define PORT_RC		(1 << 21)
+/* port link status change - set on some port link state transitions:
+ *  Transition				Reason
+ *  ------------------------------------------------------------------------------
+ *  - U3 to Resume			Wakeup signaling from a device
+ *  - Resume to Recovery to U0		USB 3.0 device resume
+ *  - Resume to U0			USB 2.0 device resume
+ *  - U3 to Recovery to U0		Software resume of USB 3.0 device complete
+ *  - U3 to U0				Software resume of USB 2.0 device complete
+ *  - U2 to U0				L1 resume of USB 2.1 device complete
+ *  - U0 to U0 (???)			L1 entry rejection by USB 2.1 device
+ *  - U0 to disabled			L1 entry error with USB 2.1 device
+ *  - Any state to inactive		Error on USB 3.0 port
+ */
+#define PORT_PLC	(1 << 22)
+/* port configure error change - port failed to configure its link partner */
+#define PORT_CEC	(1 << 23)
+/* bit 24 reserved */
+/* wake on connect (enable) */
+#define PORT_WKCONN_E	(1 << 25)
+/* wake on disconnect (enable) */
+#define PORT_WKDISC_E	(1 << 26)
+/* wake on over-current (enable) */
+#define PORT_WKOC_E	(1 << 27)
+/* bits 28:29 reserved */
+/* true: device is removable - for USB 3.0 roothub emulation */
+#define PORT_DEV_REMOVE	(1 << 30)
+/* Initiate a warm port reset - complete when PORT_WRC is '1' */
+#define PORT_WR		(1 << 31)
+
+/* Port Power Management Status and Control - port_power_base bitmasks */
+/* Inactivity timer value for transitions into U1, in microseconds.
+ * Timeout can be up to 127us.  0xFF means an infinite timeout.
+ */
+#define PORT_U1_TIMEOUT(p)	((p) & 0xff)
+/* Inactivity timer value for transitions into U2 */
+#define PORT_U2_TIMEOUT(p)	(((p) & 0xff) << 8)
+/* Bits 24:31 for port testing */
+
+
+/**
+ * struct xhci_intr_reg - Interrupt Register Set
+ * @irq_pending:	IMAN - Interrupt Management Register.  Used to enable
+ *			interrupts and check for pending interrupts.
+ * @irq_control:	IMOD - Interrupt Moderation Register.
+ * 			Used to throttle interrupts.
+ * @erst_size:		Number of segments in the Event Ring Segment Table (ERST).
+ * @erst_base:		ERST base address.
+ * @erst_dequeue:	Event ring dequeue pointer.
+ *
+ * Each interrupter (defined by a MSI-X vector) has an event ring and an Event
+ * Ring Segment Table (ERST) associated with it.  The event ring is comprised of
+ * multiple segments of the same size.  The HC places events on the ring and
+ * "updates the Cycle bit in the TRBs to indicate to software the current
+ * position of the Enqueue Pointer." The HCD (Linux) processes those events and
+ * updates the dequeue pointer.
+ */
+struct xhci_intr_reg {
+	u32	irq_pending;
+	u32	irq_control;
+	u32	erst_size;
+	u32	rsvd;
+	u32	erst_base[2];
+	u32	erst_dequeue[2];
+};
+
+/* irq_pending bitmasks */
+#define	ER_IRQ_PENDING(p)	((p) & 0x1)
+/* bits 2:31 need to be preserved */
+/* THIS IS BUGGY - FIXME - IP IS WRITE 1 TO CLEAR */
+#define	ER_IRQ_CLEAR(p)		((p) & 0xfffffffe)
+#define	ER_IRQ_ENABLE(p)	((ER_IRQ_CLEAR(p)) | 0x2)
+#define	ER_IRQ_DISABLE(p)	((ER_IRQ_CLEAR(p)) & ~(0x2))
+
+/* irq_control bitmasks */
+/* Minimum interval between interrupts (in 250ns intervals).  The interval
+ * between interrupts will be longer if there are no events on the event ring.
+ * Default is 4000 (1 ms).
+ */
+#define ER_IRQ_INTERVAL_MASK	(0xffff)
+/* Counter used to count down the time to the next interrupt - HW use only */
+#define ER_IRQ_COUNTER_MASK	(0xffff << 16)
+
+/* erst_size bitmasks */
+/* Preserve bits 16:31 of erst_size */
+#define	ERST_SIZE_MASK		(0xffff << 16)
+
+/* erst_dequeue bitmasks */
+/* Dequeue ERST Segment Index (DESI) - Segment number (or alias)
+ * where the current dequeue pointer lies.  This is an optional HW hint.
+ */
+#define ERST_DESI_MASK		(0x7)
+/* Event Handler Busy (EHB) - is the event ring scheduled to be serviced by
+ * a work queue (or delayed service routine)?
+ */
+#define ERST_EHB		(1 << 3)
+#define ERST_PTR_MASK		(0xf)
+
+/**
+ * struct xhci_run_regs
+ * @microframe_index:
+ * 		MFINDEX - current microframe number
+ *
+ * Section 5.5 Host Controller Runtime Registers:
+ * "Software should read and write these registers using only Dword (32 bit)
+ * or larger accesses"
+ */
+struct xhci_run_regs {
+	u32			microframe_index;
+	u32			rsvd[7];
+	struct xhci_intr_reg	ir_set[128];
+};
+
+/**
+ * struct doorbell_array
+ *
+ * Section 5.6
+ */
+struct xhci_doorbell_array {
+	u32	doorbell[256];
+};
+
+#define	DB_TARGET_MASK		0xFFFFFF00
+#define	DB_STREAM_ID_MASK	0x0000FFFF
+#define	DB_TARGET_HOST		0x0
+#define	DB_STREAM_ID_HOST	0x0
+#define	DB_MASK			(0xff << 8)
+
+/* Endpoint Target - bits 0:7 */
+#define EPI_TO_DB(p)		(((p) + 1) & 0xff)
+
+
+/**
+ * struct xhci_slot_ctx
+ * @dev_info:	Route string, device speed, hub info, and last valid endpoint
+ * @dev_info2:	Max exit latency for device number, root hub port number
+ * @tt_info:	tt_info is used to construct split transaction tokens
+ * @dev_state:	slot state and device address
+ *
+ * Slot Context - section 6.2.1.1.  This assumes the HC uses 32-byte context
+ * structures.  If the HC uses 64-byte contexts, there is an additional 32 bytes
+ * reserved at the end of the slot context for HC internal use.
+ */
+struct xhci_slot_ctx {
+	u32	dev_info;
+	u32	dev_info2;
+	u32	tt_info;
+	u32	dev_state;
+	/* offset 0x10 to 0x1f reserved for HC internal use */
+	u32	reserved[4];
+};
+
+/* dev_info bitmasks */
+/* Route String - 0:19 */
+#define ROUTE_STRING_MASK	(0xfffff)
+/* Device speed - values defined by PORTSC Device Speed field - 20:23 */
+#define DEV_SPEED	(0xf << 20)
+/* bit 24 reserved */
+/* Is this LS/FS device connected through a HS hub? - bit 25 */
+#define DEV_MTT		(0x1 << 25)
+/* Set if the device is a hub - bit 26 */
+#define DEV_HUB		(0x1 << 26)
+/* Index of the last valid endpoint context in this device context - 27:31 */
+#define LAST_CTX_MASK	(0x1f << 27)
+#define LAST_CTX(p)	((p) << 27)
+#define LAST_CTX_TO_EP_NUM(p)	(((p) >> 27) - 1)
+#define SLOT_FLAG	(1 << 0)
+#define EP0_FLAG	(1 << 1)
+
+/* dev_info2 bitmasks */
+/* Max Exit Latency (ms) - worst case time to wake up all links in dev path */
+#define MAX_EXIT	(0xffff)
+/* Root hub port number that is needed to access the USB device */
+#define ROOT_HUB_PORT(p)	(((p) & 0xff) << 16)
+
+/* tt_info bitmasks */
+/*
+ * TT Hub Slot ID - for low or full speed devices attached to a high-speed hub
+ * The Slot ID of the hub that isolates the high speed signaling from
+ * this low or full-speed device.  '0' if attached to root hub port.
+ */
+#define TT_SLOT		(0xff)
+/*
+ * The number of the downstream facing port of the high-speed hub
+ * '0' if the device is not low or full speed.
+ */
+#define TT_PORT		(0xff << 8)
+
+/* dev_state bitmasks */
+/* USB device address - assigned by the HC */
+#define DEV_ADDR_MASK	(0xff)
+/* bits 8:26 reserved */
+/* Slot state */
+#define SLOT_STATE	(0x1f << 27)
+#define GET_SLOT_STATE(p)	(((p) & (0x1f << 27)) >> 27)
+
+
+/**
+ * struct xhci_ep_ctx
+ * @ep_info:	endpoint state, streams, mult, and interval information.
+ * @ep_info2:	information on endpoint type, max packet size, max burst size,
+ * 		error count, and whether the HC will force an event for all
+ * 		transactions.
+ * @deq:	64-bit ring dequeue pointer address.  If the endpoint only
+ * 		defines one stream, this points to the endpoint transfer ring.
+ * 		Otherwise, it points to a stream context array, which has a
+ * 		ring pointer for each flow.
+ * @tx_info:
+ * 		Average TRB lengths for the endpoint ring and
+ * 		max payload within an Endpoint Service Interval Time (ESIT).
+ *
+ * Endpoint Context - section 6.2.1.2.  This assumes the HC uses 32-byte context
+ * structures.  If the HC uses 64-byte contexts, there is an additional 32 bytes
+ * reserved at the end of the endpoint context for HC internal use.
+ */
+struct xhci_ep_ctx {
+	u32	ep_info;
+	u32	ep_info2;
+	u32	deq[2];
+	u32	tx_info;
+	/* offset 0x14 - 0x1f reserved for HC internal use */
+	u32	reserved[3];
+};
+
+/* ep_info bitmasks */
+/*
+ * Endpoint State - bits 0:2
+ * 0 - disabled
+ * 1 - running
+ * 2 - halted due to halt condition - ok to manipulate endpoint ring
+ * 3 - stopped
+ * 4 - TRB error
+ * 5-7 - reserved
+ */
+#define EP_STATE_MASK		(0xf)
+#define EP_STATE_DISABLED	0
+#define EP_STATE_RUNNING	1
+#define EP_STATE_HALTED		2
+#define EP_STATE_STOPPED	3
+#define EP_STATE_ERROR		4
+/* Mult - Max number of burtst within an interval, in EP companion desc. */
+#define EP_MULT(p)		((p & 0x3) << 8)
+/* bits 10:14 are Max Primary Streams */
+/* bit 15 is Linear Stream Array */
+/* Interval - period between requests to an endpoint - 125u increments. */
+#define EP_INTERVAL(p)		((p & 0xff) << 16)
+
+/* ep_info2 bitmasks */
+/*
+ * Force Event - generate transfer events for all TRBs for this endpoint
+ * This will tell the HC to ignore the IOC and ISP flags (for debugging only).
+ */
+#define	FORCE_EVENT	(0x1)
+#define ERROR_COUNT(p)	(((p) & 0x3) << 1)
+#define EP_TYPE(p)	((p) << 3)
+#define ISOC_OUT_EP	1
+#define BULK_OUT_EP	2
+#define INT_OUT_EP	3
+#define CTRL_EP		4
+#define ISOC_IN_EP	5
+#define BULK_IN_EP	6
+#define INT_IN_EP	7
+/* bit 6 reserved */
+/* bit 7 is Host Initiate Disable - for disabling stream selection */
+#define MAX_BURST(p)	(((p)&0xff) << 8)
+#define MAX_PACKET(p)	(((p)&0xffff) << 16)
+
+
+/**
+ * struct xhci_device_control
+ * Input/Output context; see section 6.2.5.
+ *
+ * @drop_context:	set the bit of the endpoint context you want to disable
+ * @add_context:	set the bit of the endpoint context you want to enable
+ */
+struct xhci_device_control {
+	u32	drop_flags;
+	u32	add_flags;
+	u32	rsvd[6];
+	struct xhci_slot_ctx	slot;
+	struct xhci_ep_ctx	ep[31];
+};
+
+/* drop context bitmasks */
+#define	DROP_EP(x)	(0x1 << x)
+/* add context bitmasks */
+#define	ADD_EP(x)	(0x1 << x)
+
+
+struct xhci_virt_device {
+	/*
+	 * Commands to the hardware are passed an "input context" that
+	 * tells the hardware what to change in its data structures.
+	 * The hardware will return changes in an "output context" that
+	 * software must allocate for the hardware.  We need to keep
+	 * track of input and output contexts separately because
+	 * these commands might fail and we don't trust the hardware.
+	 */
+	struct xhci_device_control	*out_ctx;
+	dma_addr_t			out_ctx_dma;
+	/* Used for addressing devices and configuration changes */
+	struct xhci_device_control	*in_ctx;
+	dma_addr_t			in_ctx_dma;
+	/* FIXME when stream support is added */
+	struct xhci_ring		*ep_rings[31];
+	/* Temporary storage in case the configure endpoint command fails and we
+	 * have to restore the device state to the previous state
+	 */
+	struct xhci_ring		*new_ep_rings[31];
+	struct completion		cmd_completion;
+	/* Status of the last command issued for this device */
+	u32				cmd_status;
+};
+
+
+/**
+ * struct xhci_device_context_array
+ * @dev_context_ptr	array of 64-bit DMA addresses for device contexts
+ */
+struct xhci_device_context_array {
+	/* 64-bit device addresses; we only write 32-bit addresses */
+	u32			dev_context_ptrs[2*MAX_HC_SLOTS];
+	/* private xHCD pointers */
+	dma_addr_t	dma;
+};
+/* TODO: write function to set the 64-bit device DMA address */
+/*
+ * TODO: change this to be dynamically sized at HC mem init time since the HC
+ * might not be able to handle the maximum number of devices possible.
+ */
+
+
+struct xhci_stream_ctx {
+	/* 64-bit stream ring address, cycle state, and stream type */
+	u32	stream_ring[2];
+	/* offset 0x14 - 0x1f reserved for HC internal use */
+	u32	reserved[2];
+};
+
+
+struct xhci_transfer_event {
+	/* 64-bit buffer address, or immediate data */
+	u32	buffer[2];
+	u32	transfer_len;
+	/* This field is interpreted differently based on the type of TRB */
+	u32	flags;
+};
+
+/** Transfer Event bit fields **/
+#define	TRB_TO_EP_ID(p)	(((p) >> 16) & 0x1f)
+
+/* Completion Code - only applicable for some types of TRBs */
+#define	COMP_CODE_MASK		(0xff << 24)
+#define GET_COMP_CODE(p)	(((p) & COMP_CODE_MASK) >> 24)
+#define COMP_SUCCESS	1
+/* Data Buffer Error */
+#define COMP_DB_ERR	2
+/* Babble Detected Error */
+#define COMP_BABBLE	3
+/* USB Transaction Error */
+#define COMP_TX_ERR	4
+/* TRB Error - some TRB field is invalid */
+#define COMP_TRB_ERR	5
+/* Stall Error - USB device is stalled */
+#define COMP_STALL	6
+/* Resource Error - HC doesn't have memory for that device configuration */
+#define COMP_ENOMEM	7
+/* Bandwidth Error - not enough room in schedule for this dev config */
+#define COMP_BW_ERR	8
+/* No Slots Available Error - HC ran out of device slots */
+#define COMP_ENOSLOTS	9
+/* Invalid Stream Type Error */
+#define COMP_STREAM_ERR	10
+/* Slot Not Enabled Error - doorbell rung for disabled device slot */
+#define COMP_EBADSLT	11
+/* Endpoint Not Enabled Error */
+#define COMP_EBADEP	12
+/* Short Packet */
+#define COMP_SHORT_TX	13
+/* Ring Underrun - doorbell rung for an empty isoc OUT ep ring */
+#define COMP_UNDERRUN	14
+/* Ring Overrun - isoc IN ep ring is empty when ep is scheduled to RX */
+#define COMP_OVERRUN	15
+/* Virtual Function Event Ring Full Error */
+#define COMP_VF_FULL	16
+/* Parameter Error - Context parameter is invalid */
+#define COMP_EINVAL	17
+/* Bandwidth Overrun Error - isoc ep exceeded its allocated bandwidth */
+#define COMP_BW_OVER	18
+/* Context State Error - illegal context state transition requested */
+#define COMP_CTX_STATE	19
+/* No Ping Response Error - HC didn't get PING_RESPONSE in time to TX */
+#define COMP_PING_ERR	20
+/* Event Ring is full */
+#define COMP_ER_FULL	21
+/* Missed Service Error - HC couldn't service an isoc ep within interval */
+#define COMP_MISSED_INT	23
+/* Successfully stopped command ring */
+#define COMP_CMD_STOP	24
+/* Successfully aborted current command and stopped command ring */
+#define COMP_CMD_ABORT	25
+/* Stopped - transfer was terminated by a stop endpoint command */
+#define COMP_STOP	26
+/* Same as COMP_EP_STOPPED, but the transfered length in the event is invalid */
+#define COMP_STOP_INVAL	27
+/* Control Abort Error - Debug Capability - control pipe aborted */
+#define COMP_DBG_ABORT	28
+/* TRB type 29 and 30 reserved */
+/* Isoc Buffer Overrun - an isoc IN ep sent more data than could fit in TD */
+#define COMP_BUFF_OVER	31
+/* Event Lost Error - xHC has an "internal event overrun condition" */
+#define COMP_ISSUES	32
+/* Undefined Error - reported when other error codes don't apply */
+#define COMP_UNKNOWN	33
+/* Invalid Stream ID Error */
+#define COMP_STRID_ERR	34
+/* Secondary Bandwidth Error - may be returned by a Configure Endpoint cmd */
+/* FIXME - check for this */
+#define COMP_2ND_BW_ERR	35
+/* Split Transaction Error */
+#define	COMP_SPLIT_ERR	36
+
+struct xhci_link_trb {
+	/* 64-bit segment pointer*/
+	u32 segment_ptr[2];
+	u32 intr_target;
+	u32 control;
+};
+
+/* control bitfields */
+#define LINK_TOGGLE	(0x1<<1)
+
+/* Command completion event TRB */
+struct xhci_event_cmd {
+	/* Pointer to command TRB, or the value passed by the event data trb */
+	u32 cmd_trb[2];
+	u32 status;
+	u32 flags;
+};
+
+/* flags bitmasks */
+/* bits 16:23 are the virtual function ID */
+/* bits 24:31 are the slot ID */
+#define TRB_TO_SLOT_ID(p)	(((p) & (0xff<<24)) >> 24)
+#define SLOT_ID_FOR_TRB(p)	(((p) & 0xff) << 24)
+
+/* Stop Endpoint TRB - ep_index to endpoint ID for this TRB */
+#define TRB_TO_EP_INDEX(p)		((((p) & (0x1f << 16)) >> 16) - 1)
+#define	EP_ID_FOR_TRB(p)		((((p) + 1) & 0x1f) << 16)
+
+
+/* Port Status Change Event TRB fields */
+/* Port ID - bits 31:24 */
+#define GET_PORT_ID(p)		(((p) & (0xff << 24)) >> 24)
+
+/* Normal TRB fields */
+/* transfer_len bitmasks - bits 0:16 */
+#define	TRB_LEN(p)		((p) & 0x1ffff)
+/* TD size - number of bytes remaining in the TD (including this TRB):
+ * bits 17 - 21.  Shift the number of bytes by 10. */
+#define TD_REMAINDER(p)		((((p) >> 10) & 0x1f) << 17)
+/* Interrupter Target - which MSI-X vector to target the completion event at */
+#define TRB_INTR_TARGET(p)	(((p) & 0x3ff) << 22)
+#define GET_INTR_TARGET(p)	(((p) >> 22) & 0x3ff)
+
+/* Cycle bit - indicates TRB ownership by HC or HCD */
+#define TRB_CYCLE		(1<<0)
+/*
+ * Force next event data TRB to be evaluated before task switch.
+ * Used to pass OS data back after a TD completes.
+ */
+#define TRB_ENT			(1<<1)
+/* Interrupt on short packet */
+#define TRB_ISP			(1<<2)
+/* Set PCIe no snoop attribute */
+#define TRB_NO_SNOOP		(1<<3)
+/* Chain multiple TRBs into a TD */
+#define TRB_CHAIN		(1<<4)
+/* Interrupt on completion */
+#define TRB_IOC			(1<<5)
+/* The buffer pointer contains immediate data */
+#define TRB_IDT			(1<<6)
+
+
+/* Control transfer TRB specific fields */
+#define TRB_DIR_IN		(1<<16)
+
+struct xhci_generic_trb {
+	u32 field[4];
+};
+
+union xhci_trb {
+	struct xhci_link_trb		link;
+	struct xhci_transfer_event	trans_event;
+	struct xhci_event_cmd		event_cmd;
+	struct xhci_generic_trb		generic;
+};
+
+/* TRB bit mask */
+#define	TRB_TYPE_BITMASK	(0xfc00)
+#define TRB_TYPE(p)		((p) << 10)
+/* TRB type IDs */
+/* bulk, interrupt, isoc scatter/gather, and control data stage */
+#define TRB_NORMAL		1
+/* setup stage for control transfers */
+#define TRB_SETUP		2
+/* data stage for control transfers */
+#define TRB_DATA		3
+/* status stage for control transfers */
+#define TRB_STATUS		4
+/* isoc transfers */
+#define TRB_ISOC		5
+/* TRB for linking ring segments */
+#define TRB_LINK		6
+#define TRB_EVENT_DATA		7
+/* Transfer Ring No-op (not for the command ring) */
+#define TRB_TR_NOOP		8
+/* Command TRBs */
+/* Enable Slot Command */
+#define TRB_ENABLE_SLOT		9
+/* Disable Slot Command */
+#define TRB_DISABLE_SLOT	10
+/* Address Device Command */
+#define TRB_ADDR_DEV		11
+/* Configure Endpoint Command */
+#define TRB_CONFIG_EP		12
+/* Evaluate Context Command */
+#define TRB_EVAL_CONTEXT	13
+/* Reset Transfer Ring Command */
+#define TRB_RESET_RING		14
+/* Stop Transfer Ring Command */
+#define TRB_STOP_RING		15
+/* Set Transfer Ring Dequeue Pointer Command */
+#define TRB_SET_DEQ		16
+/* Reset Device Command */
+#define TRB_RESET_DEV		17
+/* Force Event Command (opt) */
+#define TRB_FORCE_EVENT		18
+/* Negotiate Bandwidth Command (opt) */
+#define TRB_NEG_BANDWIDTH	19
+/* Set Latency Tolerance Value Command (opt) */
+#define TRB_SET_LT		20
+/* Get port bandwidth Command */
+#define TRB_GET_BW		21
+/* Force Header Command - generate a transaction or link management packet */
+#define TRB_FORCE_HEADER	22
+/* No-op Command - not for transfer rings */
+#define TRB_CMD_NOOP		23
+/* TRB IDs 24-31 reserved */
+/* Event TRBS */
+/* Transfer Event */
+#define TRB_TRANSFER		32
+/* Command Completion Event */
+#define TRB_COMPLETION		33
+/* Port Status Change Event */
+#define TRB_PORT_STATUS		34
+/* Bandwidth Request Event (opt) */
+#define TRB_BANDWIDTH_EVENT	35
+/* Doorbell Event (opt) */
+#define TRB_DOORBELL		36
+/* Host Controller Event */
+#define TRB_HC_EVENT		37
+/* Device Notification Event - device sent function wake notification */
+#define TRB_DEV_NOTE		38
+/* MFINDEX Wrap Event - microframe counter wrapped */
+#define TRB_MFINDEX_WRAP	39
+/* TRB IDs 40-47 reserved, 48-63 is vendor-defined */
+
+/*
+ * TRBS_PER_SEGMENT must be a multiple of 4,
+ * since the command ring is 64-byte aligned.
+ * It must also be greater than 16.
+ */
+#define TRBS_PER_SEGMENT	64
+#define SEGMENT_SIZE		(TRBS_PER_SEGMENT*16)
+/* TRB buffer pointers can't cross 64KB boundaries */
+#define TRB_MAX_BUFF_SHIFT		16
+#define TRB_MAX_BUFF_SIZE	(1 << TRB_MAX_BUFF_SHIFT)
+
+struct xhci_segment {
+	union xhci_trb		*trbs;
+	/* private to HCD */
+	struct xhci_segment	*next;
+	dma_addr_t		dma;
+};
+
+struct xhci_td {
+	struct list_head	td_list;
+	struct list_head	cancelled_td_list;
+	struct urb		*urb;
+	struct xhci_segment	*start_seg;
+	union xhci_trb		*first_trb;
+	union xhci_trb		*last_trb;
+};
+
+struct xhci_ring {
+	struct xhci_segment	*first_seg;
+	union  xhci_trb		*enqueue;
+	struct xhci_segment	*enq_seg;
+	unsigned int		enq_updates;
+	union  xhci_trb		*dequeue;
+	struct xhci_segment	*deq_seg;
+	unsigned int		deq_updates;
+	struct list_head	td_list;
+	/* ----  Related to URB cancellation ---- */
+	struct list_head	cancelled_td_list;
+	unsigned int		cancels_pending;
+	unsigned int		state;
+#define SET_DEQ_PENDING		(1 << 0)
+	/* The TRB that was last reported in a stopped endpoint ring */
+	union xhci_trb		*stopped_trb;
+	struct xhci_td		*stopped_td;
+	/*
+	 * Write the cycle state into the TRB cycle field to give ownership of
+	 * the TRB to the host controller (if we are the producer), or to check
+	 * if we own the TRB (if we are the consumer).  See section 4.9.1.
+	 */
+	u32			cycle_state;
+};
+
+struct xhci_erst_entry {
+	/* 64-bit event ring segment address */
+	u32	seg_addr[2];
+	u32	seg_size;
+	/* Set to zero */
+	u32	rsvd;
+};
+
+struct xhci_erst {
+	struct xhci_erst_entry	*entries;
+	unsigned int		num_entries;
+	/* xhci->event_ring keeps track of segment dma addresses */
+	dma_addr_t		erst_dma_addr;
+	/* Num entries the ERST can contain */
+	unsigned int		erst_size;
+};
+
+/*
+ * Each segment table entry is 4*32bits long.  1K seems like an ok size:
+ * (1K bytes * 8bytes/bit) / (4*32 bits) = 64 segment entries in the table,
+ * meaning 64 ring segments.
+ * Initial allocated size of the ERST, in number of entries */
+#define	ERST_NUM_SEGS	1
+/* Initial allocated size of the ERST, in number of entries */
+#define	ERST_SIZE	64
+/* Initial number of event segment rings allocated */
+#define	ERST_ENTRIES	1
+/* Poll every 60 seconds */
+#define	POLL_TIMEOUT	60
+/* XXX: Make these module parameters */
+
+
+/* There is one ehci_hci structure per controller */
+struct xhci_hcd {
+	/* glue to PCI and HCD framework */
+	struct xhci_cap_regs __iomem *cap_regs;
+	struct xhci_op_regs __iomem *op_regs;
+	struct xhci_run_regs __iomem *run_regs;
+	struct xhci_doorbell_array __iomem *dba;
+	/* Our HCD's current interrupter register set */
+	struct	xhci_intr_reg __iomem *ir_set;
+
+	/* Cached register copies of read-only HC data */
+	__u32		hcs_params1;
+	__u32		hcs_params2;
+	__u32		hcs_params3;
+	__u32		hcc_params;
+
+	spinlock_t	lock;
+
+	/* packed release number */
+	u8		sbrn;
+	u16		hci_version;
+	u8		max_slots;
+	u8		max_interrupters;
+	u8		max_ports;
+	u8		isoc_threshold;
+	int		event_ring_max;
+	int		addr_64;
+	/* 4KB min, 128MB max */
+	int		page_size;
+	/* Valid values are 12 to 20, inclusive */
+	int		page_shift;
+	/* only one MSI vector for now, but might need more later */
+	int		msix_count;
+	struct msix_entry	*msix_entries;
+	/* data structures */
+	struct xhci_device_context_array *dcbaa;
+	struct xhci_ring	*cmd_ring;
+	struct xhci_ring	*event_ring;
+	struct xhci_erst	erst;
+	/* slot enabling and address device helpers */
+	struct completion	addr_dev;
+	int slot_id;
+	/* Internal mirror of the HW's dcbaa */
+	struct xhci_virt_device	*devs[MAX_HC_SLOTS];
+
+	/* DMA pools */
+	struct dma_pool	*device_pool;
+	struct dma_pool	*segment_pool;
+
+#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
+	/* Poll the rings - for debugging */
+	struct timer_list	event_ring_timer;
+	int			zombie;
+#endif
+	/* Statistics */
+	int			noops_submitted;
+	int			noops_handled;
+	int			error_bitmask;
+};
+
+/* For testing purposes */
+#define NUM_TEST_NOOPS	0
+
+/* convert between an HCD pointer and the corresponding EHCI_HCD */
+static inline struct xhci_hcd *hcd_to_xhci(struct usb_hcd *hcd)
+{
+	return (struct xhci_hcd *) (hcd->hcd_priv);
+}
+
+static inline struct usb_hcd *xhci_to_hcd(struct xhci_hcd *xhci)
+{
+	return container_of((void *) xhci, struct usb_hcd, hcd_priv);
+}
+
+#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
+#define XHCI_DEBUG	1
+#else
+#define XHCI_DEBUG	0
+#endif
+
+#define xhci_dbg(xhci, fmt, args...) \
+	do { if (XHCI_DEBUG) dev_dbg(xhci_to_hcd(xhci)->self.controller , fmt , ## args); } while (0)
+#define xhci_info(xhci, fmt, args...) \
+	do { if (XHCI_DEBUG) dev_info(xhci_to_hcd(xhci)->self.controller , fmt , ## args); } while (0)
+#define xhci_err(xhci, fmt, args...) \
+	dev_err(xhci_to_hcd(xhci)->self.controller , fmt , ## args)
+#define xhci_warn(xhci, fmt, args...) \
+	dev_warn(xhci_to_hcd(xhci)->self.controller , fmt , ## args)
+
+/* TODO: copied from ehci.h - can be refactored? */
+/* xHCI spec says all registers are little endian */
+static inline unsigned int xhci_readl(const struct xhci_hcd *xhci,
+		__u32 __iomem *regs)
+{
+	return readl(regs);
+}
+static inline void xhci_writel(struct xhci_hcd *xhci,
+		const unsigned int val, __u32 __iomem *regs)
+{
+	if (!in_interrupt())
+		xhci_dbg(xhci,
+			 "`MEM_WRITE_DWORD(3'b000, 32'h%p, 32'h%0x, 4'hf);\n",
+			 regs, val);
+	writel(val, regs);
+}
+
+/* xHCI debugging */
+void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int set_num);
+void xhci_print_registers(struct xhci_hcd *xhci);
+void xhci_dbg_regs(struct xhci_hcd *xhci);
+void xhci_print_run_regs(struct xhci_hcd *xhci);
+void xhci_print_trb_offsets(struct xhci_hcd *xhci, union xhci_trb *trb);
+void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb);
+void xhci_debug_segment(struct xhci_hcd *xhci, struct xhci_segment *seg);
+void xhci_debug_ring(struct xhci_hcd *xhci, struct xhci_ring *ring);
+void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst);
+void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci);
+void xhci_dbg_ring_ptrs(struct xhci_hcd *xhci, struct xhci_ring *ring);
+void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_device_control *ctx, dma_addr_t dma, unsigned int last_ep);
+
+/* xHCI memory managment */
+void xhci_mem_cleanup(struct xhci_hcd *xhci);
+int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags);
+void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id);
+int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, struct usb_device *udev, gfp_t flags);
+int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev);
+unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc);
+unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc);
+void xhci_endpoint_zero(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev, struct usb_host_endpoint *ep);
+int xhci_endpoint_init(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev,
+		struct usb_device *udev, struct usb_host_endpoint *ep,
+		gfp_t mem_flags);
+void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring);
+
+#ifdef CONFIG_PCI
+/* xHCI PCI glue */
+int xhci_register_pci(void);
+void xhci_unregister_pci(void);
+#endif
+
+/* xHCI host controller glue */
+int xhci_halt(struct xhci_hcd *xhci);
+int xhci_reset(struct xhci_hcd *xhci);
+int xhci_init(struct usb_hcd *hcd);
+int xhci_run(struct usb_hcd *hcd);
+void xhci_stop(struct usb_hcd *hcd);
+void xhci_shutdown(struct usb_hcd *hcd);
+int xhci_get_frame(struct usb_hcd *hcd);
+irqreturn_t xhci_irq(struct usb_hcd *hcd);
+int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev);
+void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev);
+int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev);
+int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags);
+int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status);
+int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep);
+int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep);
+int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
+void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
+
+/* xHCI ring, segment, TRB, and TD functions */
+dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg, union xhci_trb *trb);
+void xhci_ring_cmd_db(struct xhci_hcd *xhci);
+void *xhci_setup_one_noop(struct xhci_hcd *xhci);
+void xhci_handle_event(struct xhci_hcd *xhci);
+void xhci_set_hc_event_deq(struct xhci_hcd *xhci);
+int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id);
+int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
+		u32 slot_id);
+int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id,
+		unsigned int ep_index);
+int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb,
+		int slot_id, unsigned int ep_index);
+int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb,
+		int slot_id, unsigned int ep_index);
+int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
+		u32 slot_id);
+
+/* xHCI roothub code */
+int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex,
+		char *buf, u16 wLength);
+int xhci_hub_status_data(struct usb_hcd *hcd, char *buf);
+
+#endif /* __LINUX_XHCI_HCD_H */
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
index a4ef77e..3c5fe5c 100644
--- a/drivers/usb/misc/iowarrior.c
+++ b/drivers/usb/misc/iowarrior.c
@@ -726,12 +726,18 @@
 	.poll = iowarrior_poll,
 };
 
+static char *iowarrior_nodename(struct device *dev)
+{
+	return kasprintf(GFP_KERNEL, "usb/%s", dev_name(dev));
+}
+
 /*
  * usb class driver info in order to get a minor number from the usb core,
  * and to have the device registered with devfs and the driver core
  */
 static struct usb_class_driver iowarrior_class = {
 	.name = "iowarrior%d",
+	.nodename = iowarrior_nodename,
 	.fops = &iowarrior_fops,
 	.minor_base = IOWARRIOR_MINOR_BASE,
 };
diff --git a/drivers/usb/misc/legousbtower.c b/drivers/usb/misc/legousbtower.c
index ab0f322..c1e2433 100644
--- a/drivers/usb/misc/legousbtower.c
+++ b/drivers/usb/misc/legousbtower.c
@@ -266,12 +266,18 @@
 	.llseek =	tower_llseek,
 };
 
+static char *legousbtower_nodename(struct device *dev)
+{
+	return kasprintf(GFP_KERNEL, "usb/%s", dev_name(dev));
+}
+
 /*
  * usb class driver info in order to get a minor number from the usb core,
  * and to have the device registered with the driver core
  */
 static struct usb_class_driver tower_class = {
 	.name =		"legousbtower%d",
+	.nodename = 	legousbtower_nodename,
 	.fops =		&tower_fops,
 	.minor_base =	LEGO_USB_TOWER_MINOR_BASE,
 };
diff --git a/drivers/usb/misc/sisusbvga/Kconfig b/drivers/usb/misc/sisusbvga/Kconfig
index 7603cbe..30ea7ca 100644
--- a/drivers/usb/misc/sisusbvga/Kconfig
+++ b/drivers/usb/misc/sisusbvga/Kconfig
@@ -1,7 +1,7 @@
 
 config USB_SISUSBVGA
 	tristate "USB 2.0 SVGA dongle support (Net2280/SiS315)"
-	depends on USB && USB_EHCI_HCD
+	depends on USB && (USB_MUSB_HDRC || USB_EHCI_HCD)
         ---help---
 	  Say Y here if you intend to attach a USB2VGA dongle based on a
 	  Net2280 and a SiS315 chip.
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
index 5f1a19d..a9f06d7 100644
--- a/drivers/usb/misc/usbtest.c
+++ b/drivers/usb/misc/usbtest.c
@@ -1072,23 +1072,34 @@
 	 */
 	msleep (jiffies % (2 * INTERRUPT_RATE));
 	if (async) {
-retry:
-		retval = usb_unlink_urb (urb);
-		if (retval == -EBUSY || retval == -EIDRM) {
-			/* we can't unlink urbs while they're completing.
-			 * or if they've completed, and we haven't resubmitted.
-			 * "normal" drivers would prevent resubmission, but
-			 * since we're testing unlink paths, we can't.
-			 */
-			ERROR(dev,  "unlink retry\n");
-			goto retry;
+		while (!completion_done(&completion)) {
+			retval = usb_unlink_urb(urb);
+
+			switch (retval) {
+			case -EBUSY:
+			case -EIDRM:
+				/* we can't unlink urbs while they're completing
+				 * or if they've completed, and we haven't
+				 * resubmitted. "normal" drivers would prevent
+				 * resubmission, but since we're testing unlink
+				 * paths, we can't.
+				 */
+				ERROR(dev, "unlink retry\n");
+				continue;
+			case 0:
+			case -EINPROGRESS:
+				break;
+
+			default:
+				dev_err(&dev->intf->dev,
+					"unlink fail %d\n", retval);
+				return retval;
+			}
+
+			break;
 		}
 	} else
 		usb_kill_urb (urb);
-	if (!(retval == 0 || retval == -EINPROGRESS)) {
-		dev_err(&dev->intf->dev, "unlink fail %d\n", retval);
-		return retval;
-	}
 
 	wait_for_completion (&completion);
 	retval = urb->status;
diff --git a/drivers/usb/mon/mon_text.c b/drivers/usb/mon/mon_text.c
index 1f71543..a7eb4c9 100644
--- a/drivers/usb/mon/mon_text.c
+++ b/drivers/usb/mon/mon_text.c
@@ -733,7 +733,7 @@
 {
 	struct dentry *mondir;
 
-	mondir = debugfs_create_dir("usbmon", NULL);
+	mondir = debugfs_create_dir("usbmon", usb_debug_root);
 	if (IS_ERR(mondir)) {
 		printk(KERN_NOTICE TAG ": debugfs is not available\n");
 		return -ENODEV;
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index b66e854..70073b1 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -10,6 +10,7 @@
 config USB_MUSB_HDRC
 	depends on (USB || USB_GADGET) && HAVE_CLK
 	depends on !SUPERH
+	select NOP_USB_XCEIV if ARCH_DAVINCI
 	select TWL4030_USB if MACH_OMAP_3430SDP
 	select USB_OTG_UTILS
 	tristate 'Inventra Highspeed Dual Role Controller (TI, ADI, ...)'
@@ -55,6 +56,7 @@
 config USB_TUSB6010
 	boolean "TUSB 6010 support"
 	depends on USB_MUSB_HDRC && !USB_MUSB_SOC
+	select NOP_USB_XCEIV
 	default y
 	help
 	  The TUSB 6010 chip, from Texas Instruments, connects a discrete
diff --git a/drivers/usb/musb/blackfin.c b/drivers/usb/musb/blackfin.c
index 7861348..f2f66eb 100644
--- a/drivers/usb/musb/blackfin.c
+++ b/drivers/usb/musb/blackfin.c
@@ -143,7 +143,7 @@
 	u16 val;
 
 	spin_lock_irqsave(&musb->lock, flags);
-	switch (musb->xceiv.state) {
+	switch (musb->xceiv->state) {
 	case OTG_STATE_A_IDLE:
 	case OTG_STATE_A_WAIT_BCON:
 		/* Start a new session */
@@ -154,7 +154,7 @@
 		val = musb_readw(musb->mregs, MUSB_DEVCTL);
 		if (!(val & MUSB_DEVCTL_BDEVICE)) {
 			gpio_set_value(musb->config->gpio_vrsel, 1);
-			musb->xceiv.state = OTG_STATE_A_WAIT_BCON;
+			musb->xceiv->state = OTG_STATE_A_WAIT_BCON;
 		} else {
 			gpio_set_value(musb->config->gpio_vrsel, 0);
 
@@ -247,6 +247,11 @@
 	}
 	gpio_direction_output(musb->config->gpio_vrsel, 0);
 
+	usb_nop_xceiv_register();
+	musb->xceiv = otg_get_transceiver();
+	if (!musb->xceiv)
+		return -ENODEV;
+
 	if (ANOMALY_05000346) {
 		bfin_write_USB_APHY_CALIB(ANOMALY_05000346_value);
 		SSYNC();
@@ -291,7 +296,7 @@
 			musb_conn_timer_handler, (unsigned long) musb);
 	}
 	if (is_peripheral_enabled(musb))
-		musb->xceiv.set_power = bfin_set_power;
+		musb->xceiv->set_power = bfin_set_power;
 
 	musb->isr = blackfin_interrupt;
 
diff --git a/drivers/usb/musb/cppi_dma.c b/drivers/usb/musb/cppi_dma.c
index 1976e9b..c3577bb 100644
--- a/drivers/usb/musb/cppi_dma.c
+++ b/drivers/usb/musb/cppi_dma.c
@@ -6,6 +6,7 @@
  * The TUSB6020, using VLYNQ, has CPPI that looks much like DaVinci.
  */
 
+#include <linux/platform_device.h>
 #include <linux/usb.h>
 
 #include "musb_core.h"
@@ -1145,17 +1146,27 @@
 	return completed;
 }
 
-void cppi_completion(struct musb *musb, u32 rx, u32 tx)
+irqreturn_t cppi_interrupt(int irq, void *dev_id)
 {
-	void __iomem		*tibase;
-	int			i, index;
+	struct musb		*musb = dev_id;
 	struct cppi		*cppi;
+	void __iomem		*tibase;
 	struct musb_hw_ep	*hw_ep = NULL;
+	u32			rx, tx;
+	int			i, index;
 
 	cppi = container_of(musb->dma_controller, struct cppi, controller);
 
 	tibase = musb->ctrl_base;
 
+	tx = musb_readl(tibase, DAVINCI_TXCPPI_MASKED_REG);
+	rx = musb_readl(tibase, DAVINCI_RXCPPI_MASKED_REG);
+
+	if (!tx && !rx)
+		return IRQ_NONE;
+
+	DBG(4, "CPPI IRQ Tx%x Rx%x\n", tx, rx);
+
 	/* process TX channels */
 	for (index = 0; tx; tx = tx >> 1, index++) {
 		struct cppi_channel		*tx_ch;
@@ -1273,6 +1284,8 @@
 
 	/* write to CPPI EOI register to re-enable interrupts */
 	musb_writel(tibase, DAVINCI_CPPI_EOI_REG, 0);
+
+	return IRQ_HANDLED;
 }
 
 /* Instantiate a software object representing a DMA controller. */
@@ -1280,6 +1293,9 @@
 dma_controller_create(struct musb *musb, void __iomem *mregs)
 {
 	struct cppi		*controller;
+	struct device		*dev = musb->controller;
+	struct platform_device	*pdev = to_platform_device(dev);
+	int			irq = platform_get_irq(pdev, 1);
 
 	controller = kzalloc(sizeof *controller, GFP_KERNEL);
 	if (!controller)
@@ -1310,6 +1326,15 @@
 		return NULL;
 	}
 
+	if (irq > 0) {
+		if (request_irq(irq, cppi_interrupt, 0, "cppi-dma", musb)) {
+			dev_err(dev, "request_irq %d failed!\n", irq);
+			dma_controller_destroy(&controller->controller);
+			return NULL;
+		}
+		controller->irq = irq;
+	}
+
 	return &controller->controller;
 }
 
@@ -1322,6 +1347,9 @@
 
 	cppi = container_of(c, struct cppi, controller);
 
+	if (cppi->irq)
+		free_irq(cppi->irq, cppi->musb);
+
 	/* assert:  caller stopped the controller first */
 	dma_pool_destroy(cppi->pool);
 
diff --git a/drivers/usb/musb/cppi_dma.h b/drivers/usb/musb/cppi_dma.h
index 729b407..8a39de3 100644
--- a/drivers/usb/musb/cppi_dma.h
+++ b/drivers/usb/musb/cppi_dma.h
@@ -119,6 +119,8 @@
 	void __iomem			*mregs;		/* Mentor regs */
 	void __iomem			*tibase;	/* TI/CPPI regs */
 
+	int				irq;
+
 	struct cppi_channel		tx[4];
 	struct cppi_channel		rx[4];
 
@@ -127,7 +129,7 @@
 	struct list_head		tx_complete;
 };
 
-/* irq handling hook */
-extern void cppi_completion(struct musb *, u32 rx, u32 tx);
+/* CPPI IRQ handler */
+extern irqreturn_t cppi_interrupt(int, void *);
 
 #endif				/* end of ifndef _CPPI_DMA_H_ */
diff --git a/drivers/usb/musb/davinci.c b/drivers/usb/musb/davinci.c
index 10d11ab..180d7da 100644
--- a/drivers/usb/musb/davinci.c
+++ b/drivers/usb/musb/davinci.c
@@ -215,7 +215,7 @@
 	DBG(7, "poll devctl %02x (%s)\n", devctl, otg_state_string(musb));
 
 	spin_lock_irqsave(&musb->lock, flags);
-	switch (musb->xceiv.state) {
+	switch (musb->xceiv->state) {
 	case OTG_STATE_A_WAIT_VFALL:
 		/* Wait till VBUS falls below SessionEnd (~0.2V); the 1.3 RTL
 		 * seems to mis-handle session "start" otherwise (or in our
@@ -226,7 +226,7 @@
 			mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
 			break;
 		}
-		musb->xceiv.state = OTG_STATE_A_WAIT_VRISE;
+		musb->xceiv->state = OTG_STATE_A_WAIT_VRISE;
 		musb_writel(musb->ctrl_base, DAVINCI_USB_INT_SET_REG,
 			MUSB_INTR_VBUSERROR << DAVINCI_USB_USBINT_SHIFT);
 		break;
@@ -251,7 +251,7 @@
 		if (devctl & MUSB_DEVCTL_BDEVICE)
 			mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
 		else
-			musb->xceiv.state = OTG_STATE_A_IDLE;
+			musb->xceiv->state = OTG_STATE_A_IDLE;
 		break;
 	default:
 		break;
@@ -265,6 +265,7 @@
 	irqreturn_t	retval = IRQ_NONE;
 	struct musb	*musb = __hci;
 	void __iomem	*tibase = musb->ctrl_base;
+	struct cppi	*cppi;
 	u32		tmp;
 
 	spin_lock_irqsave(&musb->lock, flags);
@@ -281,16 +282,9 @@
 	/* CPPI interrupts share the same IRQ line, but have their own
 	 * mask, state, "vector", and EOI registers.
 	 */
-	if (is_cppi_enabled()) {
-		u32 cppi_tx = musb_readl(tibase, DAVINCI_TXCPPI_MASKED_REG);
-		u32 cppi_rx = musb_readl(tibase, DAVINCI_RXCPPI_MASKED_REG);
-
-		if (cppi_tx || cppi_rx) {
-			DBG(4, "CPPI IRQ t%x r%x\n", cppi_tx, cppi_rx);
-			cppi_completion(musb, cppi_rx, cppi_tx);
-			retval = IRQ_HANDLED;
-		}
-	}
+	cppi = container_of(musb->dma_controller, struct cppi, controller);
+	if (is_cppi_enabled() && musb->dma_controller && !cppi->irq)
+		retval = cppi_interrupt(irq, __hci);
 
 	/* ack and handle non-CPPI interrupts */
 	tmp = musb_readl(tibase, DAVINCI_USB_INT_SRC_MASKED_REG);
@@ -331,21 +325,21 @@
 			 * to stop registering in devctl.
 			 */
 			musb->int_usb &= ~MUSB_INTR_VBUSERROR;
-			musb->xceiv.state = OTG_STATE_A_WAIT_VFALL;
+			musb->xceiv->state = OTG_STATE_A_WAIT_VFALL;
 			mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
 			WARNING("VBUS error workaround (delay coming)\n");
 		} else if (is_host_enabled(musb) && drvvbus) {
 			musb->is_active = 1;
 			MUSB_HST_MODE(musb);
-			musb->xceiv.default_a = 1;
-			musb->xceiv.state = OTG_STATE_A_WAIT_VRISE;
+			musb->xceiv->default_a = 1;
+			musb->xceiv->state = OTG_STATE_A_WAIT_VRISE;
 			portstate(musb->port1_status |= USB_PORT_STAT_POWER);
 			del_timer(&otg_workaround);
 		} else {
 			musb->is_active = 0;
 			MUSB_DEV_MODE(musb);
-			musb->xceiv.default_a = 0;
-			musb->xceiv.state = OTG_STATE_B_IDLE;
+			musb->xceiv->default_a = 0;
+			musb->xceiv->state = OTG_STATE_B_IDLE;
 			portstate(musb->port1_status &= ~USB_PORT_STAT_POWER);
 		}
 
@@ -367,17 +361,12 @@
 
 	/* poll for ID change */
 	if (is_otg_enabled(musb)
-			&& musb->xceiv.state == OTG_STATE_B_IDLE)
+			&& musb->xceiv->state == OTG_STATE_B_IDLE)
 		mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
 
 	spin_unlock_irqrestore(&musb->lock, flags);
 
-	/* REVISIT we sometimes get unhandled IRQs
-	 * (e.g. ep0).  not clear why...
-	 */
-	if (retval != IRQ_HANDLED)
-		DBG(5, "unhandled? %08x\n", tmp);
-	return IRQ_HANDLED;
+	return retval;
 }
 
 int musb_platform_set_mode(struct musb *musb, u8 mode)
@@ -391,6 +380,11 @@
 	void __iomem	*tibase = musb->ctrl_base;
 	u32		revision;
 
+	usb_nop_xceiv_register();
+	musb->xceiv = otg_get_transceiver();
+	if (!musb->xceiv)
+		return -ENODEV;
+
 	musb->mregs += DAVINCI_BASE_OFFSET;
 
 	clk_enable(musb->clock);
@@ -398,7 +392,7 @@
 	/* returns zero if e.g. not clocked */
 	revision = musb_readl(tibase, DAVINCI_USB_VERSION_REG);
 	if (revision == 0)
-		return -ENODEV;
+		goto fail;
 
 	if (is_host_enabled(musb))
 		setup_timer(&otg_workaround, otg_timer, (unsigned long) musb);
@@ -432,6 +426,10 @@
 
 	musb->isr = davinci_interrupt;
 	return 0;
+
+fail:
+	usb_nop_xceiv_unregister();
+	return -ENODEV;
 }
 
 int musb_platform_exit(struct musb *musb)
@@ -442,7 +440,7 @@
 	davinci_source_power(musb, 0 /*off*/, 1);
 
 	/* delay, to avoid problems with module reload */
-	if (is_host_enabled(musb) && musb->xceiv.default_a) {
+	if (is_host_enabled(musb) && musb->xceiv->default_a) {
 		int	maxdelay = 30;
 		u8	devctl, warn = 0;
 
@@ -471,5 +469,7 @@
 
 	clk_disable(musb->clock);
 
+	usb_nop_xceiv_unregister();
+
 	return 0;
 }
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 4000cf6..554a414 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -112,6 +112,7 @@
 #include "davinci.h"
 #endif
 
+#define TA_WAIT_BCON(m) max_t(int, (m)->a_wait_bcon, OTG_TIME_A_WAIT_BCON)
 
 
 unsigned musb_debug;
@@ -267,7 +268,7 @@
 
 const char *otg_state_string(struct musb *musb)
 {
-	switch (musb->xceiv.state) {
+	switch (musb->xceiv->state) {
 	case OTG_STATE_A_IDLE:		return "a_idle";
 	case OTG_STATE_A_WAIT_VRISE:	return "a_wait_vrise";
 	case OTG_STATE_A_WAIT_BCON:	return "a_wait_bcon";
@@ -288,12 +289,6 @@
 #ifdef	CONFIG_USB_MUSB_OTG
 
 /*
- * See also USB_OTG_1-3.pdf 6.6.5 Timers
- * REVISIT: Are the other timers done in the hardware?
- */
-#define TB_ASE0_BRST		100	/* Min 3.125 ms */
-
-/*
  * Handles OTG hnp timeouts, such as b_ase0_brst
  */
 void musb_otg_timer_func(unsigned long data)
@@ -302,16 +297,18 @@
 	unsigned long	flags;
 
 	spin_lock_irqsave(&musb->lock, flags);
-	switch (musb->xceiv.state) {
+	switch (musb->xceiv->state) {
 	case OTG_STATE_B_WAIT_ACON:
 		DBG(1, "HNP: b_wait_acon timeout; back to b_peripheral\n");
 		musb_g_disconnect(musb);
-		musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
+		musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
 		musb->is_active = 0;
 		break;
+	case OTG_STATE_A_SUSPEND:
 	case OTG_STATE_A_WAIT_BCON:
-		DBG(1, "HNP: a_wait_bcon timeout; back to a_host\n");
-		musb_hnp_stop(musb);
+		DBG(1, "HNP: %s timeout\n", otg_state_string(musb));
+		musb_set_vbus(musb, 0);
+		musb->xceiv->state = OTG_STATE_A_WAIT_VFALL;
 		break;
 	default:
 		DBG(1, "HNP: Unhandled mode %s\n", otg_state_string(musb));
@@ -320,10 +317,8 @@
 	spin_unlock_irqrestore(&musb->lock, flags);
 }
 
-static DEFINE_TIMER(musb_otg_timer, musb_otg_timer_func, 0, 0);
-
 /*
- * Stops the B-device HNP state. Caller must take care of locking.
+ * Stops the HNP transition. Caller must take care of locking.
  */
 void musb_hnp_stop(struct musb *musb)
 {
@@ -331,20 +326,17 @@
 	void __iomem	*mbase = musb->mregs;
 	u8	reg;
 
-	switch (musb->xceiv.state) {
+	DBG(1, "HNP: stop from %s\n", otg_state_string(musb));
+
+	switch (musb->xceiv->state) {
 	case OTG_STATE_A_PERIPHERAL:
-	case OTG_STATE_A_WAIT_VFALL:
-	case OTG_STATE_A_WAIT_BCON:
-		DBG(1, "HNP: Switching back to A-host\n");
 		musb_g_disconnect(musb);
-		musb->xceiv.state = OTG_STATE_A_IDLE;
-		MUSB_HST_MODE(musb);
-		musb->is_active = 0;
+		DBG(1, "HNP: back to %s\n", otg_state_string(musb));
 		break;
 	case OTG_STATE_B_HOST:
 		DBG(1, "HNP: Disabling HR\n");
 		hcd->self.is_b_host = 0;
-		musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
+		musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
 		MUSB_DEV_MODE(musb);
 		reg = musb_readb(mbase, MUSB_POWER);
 		reg |= MUSB_POWER_SUSPENDM;
@@ -402,7 +394,7 @@
 
 		if (devctl & MUSB_DEVCTL_HM) {
 #ifdef CONFIG_USB_MUSB_HDRC_HCD
-			switch (musb->xceiv.state) {
+			switch (musb->xceiv->state) {
 			case OTG_STATE_A_SUSPEND:
 				/* remote wakeup?  later, GetPortStatus
 				 * will stop RESUME signaling
@@ -425,12 +417,12 @@
 				musb->rh_timer = jiffies
 						+ msecs_to_jiffies(20);
 
-				musb->xceiv.state = OTG_STATE_A_HOST;
+				musb->xceiv->state = OTG_STATE_A_HOST;
 				musb->is_active = 1;
 				usb_hcd_resume_root_hub(musb_to_hcd(musb));
 				break;
 			case OTG_STATE_B_WAIT_ACON:
-				musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
+				musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
 				musb->is_active = 1;
 				MUSB_DEV_MODE(musb);
 				break;
@@ -441,11 +433,11 @@
 			}
 #endif
 		} else {
-			switch (musb->xceiv.state) {
+			switch (musb->xceiv->state) {
 #ifdef CONFIG_USB_MUSB_HDRC_HCD
 			case OTG_STATE_A_SUSPEND:
 				/* possibly DISCONNECT is upcoming */
-				musb->xceiv.state = OTG_STATE_A_HOST;
+				musb->xceiv->state = OTG_STATE_A_HOST;
 				usb_hcd_resume_root_hub(musb_to_hcd(musb));
 				break;
 #endif
@@ -490,7 +482,7 @@
 		 */
 		musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION);
 		musb->ep0_stage = MUSB_EP0_START;
-		musb->xceiv.state = OTG_STATE_A_IDLE;
+		musb->xceiv->state = OTG_STATE_A_IDLE;
 		MUSB_HST_MODE(musb);
 		musb_set_vbus(musb, 1);
 
@@ -516,7 +508,7 @@
 		 * REVISIT:  do delays from lots of DEBUG_KERNEL checks
 		 * make trouble here, keeping VBUS < 4.4V ?
 		 */
-		switch (musb->xceiv.state) {
+		switch (musb->xceiv->state) {
 		case OTG_STATE_A_HOST:
 			/* recovery is dicey once we've gotten past the
 			 * initial stages of enumeration, but if VBUS
@@ -594,37 +586,40 @@
 		if (devctl & MUSB_DEVCTL_LSDEV)
 			musb->port1_status |= USB_PORT_STAT_LOW_SPEED;
 
+		/* indicate new connection to OTG machine */
+		switch (musb->xceiv->state) {
+		case OTG_STATE_B_PERIPHERAL:
+			if (int_usb & MUSB_INTR_SUSPEND) {
+				DBG(1, "HNP: SUSPEND+CONNECT, now b_host\n");
+				int_usb &= ~MUSB_INTR_SUSPEND;
+				goto b_host;
+			} else
+				DBG(1, "CONNECT as b_peripheral???\n");
+			break;
+		case OTG_STATE_B_WAIT_ACON:
+			DBG(1, "HNP: CONNECT, now b_host\n");
+b_host:
+			musb->xceiv->state = OTG_STATE_B_HOST;
+			hcd->self.is_b_host = 1;
+			musb->ignore_disconnect = 0;
+			del_timer(&musb->otg_timer);
+			break;
+		default:
+			if ((devctl & MUSB_DEVCTL_VBUS)
+					== (3 << MUSB_DEVCTL_VBUS_SHIFT)) {
+				musb->xceiv->state = OTG_STATE_A_HOST;
+				hcd->self.is_b_host = 0;
+			}
+			break;
+		}
+
+		/* poke the root hub */
+		MUSB_HST_MODE(musb);
 		if (hcd->status_urb)
 			usb_hcd_poll_rh_status(hcd);
 		else
 			usb_hcd_resume_root_hub(hcd);
 
-		MUSB_HST_MODE(musb);
-
-		/* indicate new connection to OTG machine */
-		switch (musb->xceiv.state) {
-		case OTG_STATE_B_PERIPHERAL:
-			if (int_usb & MUSB_INTR_SUSPEND) {
-				DBG(1, "HNP: SUSPEND+CONNECT, now b_host\n");
-				musb->xceiv.state = OTG_STATE_B_HOST;
-				hcd->self.is_b_host = 1;
-				int_usb &= ~MUSB_INTR_SUSPEND;
-			} else
-				DBG(1, "CONNECT as b_peripheral???\n");
-			break;
-		case OTG_STATE_B_WAIT_ACON:
-			DBG(1, "HNP: Waiting to switch to b_host state\n");
-			musb->xceiv.state = OTG_STATE_B_HOST;
-			hcd->self.is_b_host = 1;
-			break;
-		default:
-			if ((devctl & MUSB_DEVCTL_VBUS)
-					== (3 << MUSB_DEVCTL_VBUS_SHIFT)) {
-				musb->xceiv.state = OTG_STATE_A_HOST;
-				hcd->self.is_b_host = 0;
-			}
-			break;
-		}
 		DBG(1, "CONNECT (%s) devctl %02x\n",
 				otg_state_string(musb), devctl);
 	}
@@ -650,7 +645,7 @@
 			}
 		} else if (is_peripheral_capable()) {
 			DBG(1, "BUS RESET as %s\n", otg_state_string(musb));
-			switch (musb->xceiv.state) {
+			switch (musb->xceiv->state) {
 #ifdef CONFIG_USB_OTG
 			case OTG_STATE_A_SUSPEND:
 				/* We need to ignore disconnect on suspend
@@ -661,24 +656,27 @@
 				musb_g_reset(musb);
 				/* FALLTHROUGH */
 			case OTG_STATE_A_WAIT_BCON:	/* OPT TD.4.7-900ms */
-				DBG(1, "HNP: Setting timer as %s\n",
-						otg_state_string(musb));
-				musb_otg_timer.data = (unsigned long)musb;
-				mod_timer(&musb_otg_timer, jiffies
-					+ msecs_to_jiffies(100));
+				/* never use invalid T(a_wait_bcon) */
+				DBG(1, "HNP: in %s, %d msec timeout\n",
+						otg_state_string(musb),
+						TA_WAIT_BCON(musb));
+				mod_timer(&musb->otg_timer, jiffies
+					+ msecs_to_jiffies(TA_WAIT_BCON(musb)));
 				break;
 			case OTG_STATE_A_PERIPHERAL:
-				musb_hnp_stop(musb);
+				musb->ignore_disconnect = 0;
+				del_timer(&musb->otg_timer);
+				musb_g_reset(musb);
 				break;
 			case OTG_STATE_B_WAIT_ACON:
 				DBG(1, "HNP: RESET (%s), to b_peripheral\n",
 					otg_state_string(musb));
-				musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
+				musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
 				musb_g_reset(musb);
 				break;
 #endif
 			case OTG_STATE_B_IDLE:
-				musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
+				musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
 				/* FALLTHROUGH */
 			case OTG_STATE_B_PERIPHERAL:
 				musb_g_reset(musb);
@@ -763,7 +761,7 @@
 				MUSB_MODE(musb), devctl);
 		handled = IRQ_HANDLED;
 
-		switch (musb->xceiv.state) {
+		switch (musb->xceiv->state) {
 #ifdef CONFIG_USB_MUSB_HDRC_HCD
 		case OTG_STATE_A_HOST:
 		case OTG_STATE_A_SUSPEND:
@@ -776,7 +774,16 @@
 #endif	/* HOST */
 #ifdef CONFIG_USB_MUSB_OTG
 		case OTG_STATE_B_HOST:
-			musb_hnp_stop(musb);
+			/* REVISIT this behaves for "real disconnect"
+			 * cases; make sure the other transitions from
+			 * from B_HOST act right too.  The B_HOST code
+			 * in hnp_stop() is currently not used...
+			 */
+			musb_root_disconnect(musb);
+			musb_to_hcd(musb)->self.is_b_host = 0;
+			musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
+			MUSB_DEV_MODE(musb);
+			musb_g_disconnect(musb);
 			break;
 		case OTG_STATE_A_PERIPHERAL:
 			musb_hnp_stop(musb);
@@ -805,26 +812,35 @@
 				otg_state_string(musb), devctl, power);
 		handled = IRQ_HANDLED;
 
-		switch (musb->xceiv.state) {
+		switch (musb->xceiv->state) {
 #ifdef	CONFIG_USB_MUSB_OTG
 		case OTG_STATE_A_PERIPHERAL:
-			/*
-			 * We cannot stop HNP here, devctl BDEVICE might be
-			 * still set.
+			/* We also come here if the cable is removed, since
+			 * this silicon doesn't report ID-no-longer-grounded.
+			 *
+			 * We depend on T(a_wait_bcon) to shut us down, and
+			 * hope users don't do anything dicey during this
+			 * undesired detour through A_WAIT_BCON.
 			 */
+			musb_hnp_stop(musb);
+			usb_hcd_resume_root_hub(musb_to_hcd(musb));
+			musb_root_disconnect(musb);
+			musb_platform_try_idle(musb, jiffies
+					+ msecs_to_jiffies(musb->a_wait_bcon
+						? : OTG_TIME_A_WAIT_BCON));
 			break;
 #endif
 		case OTG_STATE_B_PERIPHERAL:
 			musb_g_suspend(musb);
 			musb->is_active = is_otg_enabled(musb)
-					&& musb->xceiv.gadget->b_hnp_enable;
+					&& musb->xceiv->gadget->b_hnp_enable;
 			if (musb->is_active) {
 #ifdef	CONFIG_USB_MUSB_OTG
-				musb->xceiv.state = OTG_STATE_B_WAIT_ACON;
+				musb->xceiv->state = OTG_STATE_B_WAIT_ACON;
 				DBG(1, "HNP: Setting timer for b_ase0_brst\n");
-				musb_otg_timer.data = (unsigned long)musb;
-				mod_timer(&musb_otg_timer, jiffies
-					+ msecs_to_jiffies(TB_ASE0_BRST));
+				mod_timer(&musb->otg_timer, jiffies
+					+ msecs_to_jiffies(
+							OTG_TIME_B_ASE0_BRST));
 #endif
 			}
 			break;
@@ -834,9 +850,9 @@
 					+ msecs_to_jiffies(musb->a_wait_bcon));
 			break;
 		case OTG_STATE_A_HOST:
-			musb->xceiv.state = OTG_STATE_A_SUSPEND;
+			musb->xceiv->state = OTG_STATE_A_SUSPEND;
 			musb->is_active = is_otg_enabled(musb)
-					&& musb->xceiv.host->b_hnp_enable;
+					&& musb->xceiv->host->b_hnp_enable;
 			break;
 		case OTG_STATE_B_HOST:
 			/* Transition to B_PERIPHERAL, see 6.8.2.6 p 44 */
@@ -1068,14 +1084,13 @@
 { .hw_ep_num =  8, .style = FIFO_RX,   .maxpacket = 512, },
 { .hw_ep_num =  9, .style = FIFO_TX,   .maxpacket = 512, },
 { .hw_ep_num =  9, .style = FIFO_RX,   .maxpacket = 512, },
-{ .hw_ep_num = 10, .style = FIFO_TX,   .maxpacket = 512, },
-{ .hw_ep_num = 10, .style = FIFO_RX,   .maxpacket = 512, },
-{ .hw_ep_num = 11, .style = FIFO_TX,   .maxpacket = 512, },
-{ .hw_ep_num = 11, .style = FIFO_RX,   .maxpacket = 512, },
-{ .hw_ep_num = 12, .style = FIFO_TX,   .maxpacket = 512, },
-{ .hw_ep_num = 12, .style = FIFO_RX,   .maxpacket = 512, },
-{ .hw_ep_num = 13, .style = FIFO_TX,   .maxpacket = 512, },
-{ .hw_ep_num = 13, .style = FIFO_RX,   .maxpacket = 512, },
+{ .hw_ep_num = 10, .style = FIFO_TX,   .maxpacket = 256, },
+{ .hw_ep_num = 10, .style = FIFO_RX,   .maxpacket = 64, },
+{ .hw_ep_num = 11, .style = FIFO_TX,   .maxpacket = 256, },
+{ .hw_ep_num = 11, .style = FIFO_RX,   .maxpacket = 64, },
+{ .hw_ep_num = 12, .style = FIFO_TX,   .maxpacket = 256, },
+{ .hw_ep_num = 12, .style = FIFO_RX,   .maxpacket = 64, },
+{ .hw_ep_num = 13, .style = FIFO_RXTX, .maxpacket = 4096, },
 { .hw_ep_num = 14, .style = FIFO_RXTX, .maxpacket = 1024, },
 { .hw_ep_num = 15, .style = FIFO_RXTX, .maxpacket = 1024, },
 };
@@ -1335,11 +1350,11 @@
 	}
 	if (reg & MUSB_CONFIGDATA_HBRXE) {
 		strcat(aInfo, ", HB-ISO Rx");
-		strcat(aInfo, " (X)");		/* no driver support */
+		musb->hb_iso_rx = true;
 	}
 	if (reg & MUSB_CONFIGDATA_HBTXE) {
 		strcat(aInfo, ", HB-ISO Tx");
-		strcat(aInfo, " (X)");		/* no driver support */
+		musb->hb_iso_tx = true;
 	}
 	if (reg & MUSB_CONFIGDATA_SOFTCONE)
 		strcat(aInfo, ", SoftConn");
@@ -1481,13 +1496,7 @@
 
 	spin_unlock_irqrestore(&musb->lock, flags);
 
-	/* REVISIT we sometimes get spurious IRQs on g_ep0
-	 * not clear why...
-	 */
-	if (retval != IRQ_HANDLED)
-		DBG(5, "spurious?\n");
-
-	return IRQ_HANDLED;
+	return retval;
 }
 
 #else
@@ -1687,8 +1696,9 @@
 	}
 
 	spin_lock_irqsave(&musb->lock, flags);
-	musb->a_wait_bcon = val;
-	if (musb->xceiv.state == OTG_STATE_A_WAIT_BCON)
+	/* force T(a_wait_bcon) to be zero/unlimited *OR* valid */
+	musb->a_wait_bcon = val ? max_t(int, val, OTG_TIME_A_WAIT_BCON) : 0 ;
+	if (musb->xceiv->state == OTG_STATE_A_WAIT_BCON)
 		musb->is_active = 0;
 	musb_platform_try_idle(musb, jiffies + msecs_to_jiffies(val));
 	spin_unlock_irqrestore(&musb->lock, flags);
@@ -1706,10 +1716,13 @@
 
 	spin_lock_irqsave(&musb->lock, flags);
 	val = musb->a_wait_bcon;
+	/* FIXME get_vbus_status() is normally #defined as false...
+	 * and is effectively TUSB-specific.
+	 */
 	vbus = musb_platform_get_vbus_status(musb);
 	spin_unlock_irqrestore(&musb->lock, flags);
 
-	return sprintf(buf, "Vbus %s, timeout %lu\n",
+	return sprintf(buf, "Vbus %s, timeout %lu msec\n",
 			vbus ? "on" : "off", val);
 }
 static DEVICE_ATTR(vbus, 0644, musb_vbus_show, musb_vbus_store);
@@ -1749,8 +1762,8 @@
 	struct musb *musb = container_of(data, struct musb, irq_work);
 	static int old_state;
 
-	if (musb->xceiv.state != old_state) {
-		old_state = musb->xceiv.state;
+	if (musb->xceiv->state != old_state) {
+		old_state = musb->xceiv->state;
 		sysfs_notify(&musb->controller->kobj, NULL, "mode");
 	}
 }
@@ -1782,6 +1795,7 @@
 	hcd->uses_new_polling = 1;
 
 	musb->vbuserr_retry = VBUSERR_RETRY_COUNT;
+	musb->a_wait_bcon = OTG_TIME_A_WAIT_BCON;
 #else
 	musb = kzalloc(sizeof *musb, GFP_KERNEL);
 	if (!musb)
@@ -1847,7 +1861,7 @@
 	}
 
 #ifdef CONFIG_USB_MUSB_OTG
-	put_device(musb->xceiv.dev);
+	put_device(musb->xceiv->dev);
 #endif
 
 #ifdef CONFIG_USB_MUSB_HDRC_HCD
@@ -1928,10 +1942,18 @@
 		}
 	}
 
-	/* assume vbus is off */
-
-	/* platform adjusts musb->mregs and musb->isr if needed,
-	 * and activates clocks
+	/* The musb_platform_init() call:
+	 *   - adjusts musb->mregs and musb->isr if needed,
+	 *   - may initialize an integrated tranceiver
+	 *   - initializes musb->xceiv, usually by otg_get_transceiver()
+	 *   - activates clocks.
+	 *   - stops powering VBUS
+	 *   - assigns musb->board_set_vbus if host mode is enabled
+	 *
+	 * There are various transciever configurations.  Blackfin,
+	 * DaVinci, TUSB60x0, and others integrate them.  OMAP3 uses
+	 * external/discrete ones in various flavors (twl4030 family,
+	 * isp1504, non-OTG, etc) mostly hooking up through ULPI.
 	 */
 	musb->isr = generic_interrupt;
 	status = musb_platform_init(musb);
@@ -1968,6 +1990,10 @@
 	if (status < 0)
 		goto fail2;
 
+#ifdef CONFIG_USB_OTG
+	setup_timer(&musb->otg_timer, musb_otg_timer_func, (unsigned long) musb);
+#endif
+
 	/* Init IRQ workqueue before request_irq */
 	INIT_WORK(&musb->irq_work, musb_irq_work);
 
@@ -1999,17 +2025,17 @@
 				? "DMA" : "PIO",
 			musb->nIrq);
 
-#ifdef CONFIG_USB_MUSB_HDRC_HCD
-	/* host side needs more setup, except for no-host modes */
-	if (musb->board_mode != MUSB_PERIPHERAL) {
+	/* host side needs more setup */
+	if (is_host_enabled(musb)) {
 		struct usb_hcd	*hcd = musb_to_hcd(musb);
 
-		if (musb->board_mode == MUSB_OTG)
+		otg_set_host(musb->xceiv, &hcd->self);
+
+		if (is_otg_enabled(musb))
 			hcd->self.otg_port = 1;
-		musb->xceiv.host = &hcd->self;
+		musb->xceiv->host = &hcd->self;
 		hcd->power_budget = 2 * (plat->power ? : 250);
 	}
-#endif				/* CONFIG_USB_MUSB_HDRC_HCD */
 
 	/* For the host-only role, we can activate right away.
 	 * (We expect the ID pin to be forcibly grounded!!)
@@ -2017,8 +2043,8 @@
 	 */
 	if (!is_otg_enabled(musb) && is_host_enabled(musb)) {
 		MUSB_HST_MODE(musb);
-		musb->xceiv.default_a = 1;
-		musb->xceiv.state = OTG_STATE_A_IDLE;
+		musb->xceiv->default_a = 1;
+		musb->xceiv->state = OTG_STATE_A_IDLE;
 
 		status = usb_add_hcd(musb_to_hcd(musb), -1, 0);
 		if (status)
@@ -2033,8 +2059,8 @@
 
 	} else /* peripheral is enabled */ {
 		MUSB_DEV_MODE(musb);
-		musb->xceiv.default_a = 0;
-		musb->xceiv.state = OTG_STATE_B_IDLE;
+		musb->xceiv->default_a = 0;
+		musb->xceiv->state = OTG_STATE_B_IDLE;
 
 		status = musb_gadget_setup(musb);
 		if (status)
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index efb39b5..f3772ca 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -40,6 +40,7 @@
 #include <linux/interrupt.h>
 #include <linux/smp_lock.h>
 #include <linux/errno.h>
+#include <linux/timer.h>
 #include <linux/clk.h>
 #include <linux/device.h>
 #include <linux/usb/ch9.h>
@@ -171,7 +172,8 @@
 
 /* peripheral side ep0 states */
 enum musb_g_ep0_state {
-	MUSB_EP0_STAGE_SETUP,		/* idle, waiting for setup */
+	MUSB_EP0_STAGE_IDLE,		/* idle, waiting for SETUP */
+	MUSB_EP0_STAGE_SETUP,		/* received SETUP */
 	MUSB_EP0_STAGE_TX,		/* IN data */
 	MUSB_EP0_STAGE_RX,		/* OUT data */
 	MUSB_EP0_STAGE_STATUSIN,	/* (after OUT data) */
@@ -179,10 +181,15 @@
 	MUSB_EP0_STAGE_ACKWAIT,		/* after zlp, before statusin */
 } __attribute__ ((packed));
 
-/* OTG protocol constants */
+/*
+ * OTG protocol constants.  See USB OTG 1.3 spec,
+ * sections 5.5 "Device Timings" and 6.6.5 "Timers".
+ */
 #define OTG_TIME_A_WAIT_VRISE	100		/* msec (max) */
-#define OTG_TIME_A_WAIT_BCON	0		/* 0=infinite; min 1000 msec */
-#define OTG_TIME_A_IDLE_BDIS	200		/* msec (min) */
+#define OTG_TIME_A_WAIT_BCON	1100		/* min 1 second */
+#define OTG_TIME_A_AIDL_BDIS	200		/* min 200 msec */
+#define OTG_TIME_B_ASE0_BRST	100		/* min 3.125 ms */
+
 
 /*************************** REGISTER ACCESS ********************************/
 
@@ -331,6 +338,8 @@
 	struct list_head	control;	/* of musb_qh */
 	struct list_head	in_bulk;	/* of musb_qh */
 	struct list_head	out_bulk;	/* of musb_qh */
+
+	struct timer_list	otg_timer;
 #endif
 
 	/* called with IRQs blocked; ON/nonzero implies starting a session,
@@ -355,7 +364,7 @@
 	u16			int_rx;
 	u16			int_tx;
 
-	struct otg_transceiver	xceiv;
+	struct otg_transceiver	*xceiv;
 
 	int nIrq;
 	unsigned		irq_wake:1;
@@ -386,6 +395,9 @@
 	unsigned is_multipoint:1;
 	unsigned ignore_disconnect:1;	/* during bus resets */
 
+	unsigned		hb_iso_rx:1;	/* high bandwidth iso rx? */
+	unsigned		hb_iso_tx:1;	/* high bandwidth iso tx? */
+
 #ifdef C_MP_TX
 	unsigned bulk_split:1;
 #define	can_bulk_split(musb,type) \
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index f79440c..8b3c4e2 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -310,7 +310,7 @@
 			/* setup DMA, then program endpoint CSR */
 			request_size = min(request->length,
 						musb_ep->dma->max_len);
-			if (request_size <= musb_ep->packet_sz)
+			if (request_size < musb_ep->packet_sz)
 				musb_ep->dma->desired_mode = 0;
 			else
 				musb_ep->dma->desired_mode = 1;
@@ -349,7 +349,8 @@
 #elif defined(CONFIG_USB_TI_CPPI_DMA)
 		/* program endpoint CSR first, then setup DMA */
 		csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY);
-		csr |= MUSB_TXCSR_MODE | MUSB_TXCSR_DMAENAB;
+		csr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_DMAMODE |
+		       MUSB_TXCSR_MODE;
 		musb_writew(epio, MUSB_TXCSR,
 			(MUSB_TXCSR_P_WZC_BITS & ~MUSB_TXCSR_P_UNDERRUN)
 				| csr);
@@ -1405,7 +1406,7 @@
 
 	spin_lock_irqsave(&musb->lock, flags);
 
-	switch (musb->xceiv.state) {
+	switch (musb->xceiv->state) {
 	case OTG_STATE_B_PERIPHERAL:
 		/* NOTE:  OTG state machine doesn't include B_SUSPENDED;
 		 * that's part of the standard usb 1.1 state machine, and
@@ -1507,9 +1508,9 @@
 {
 	struct musb	*musb = gadget_to_musb(gadget);
 
-	if (!musb->xceiv.set_power)
+	if (!musb->xceiv->set_power)
 		return -EOPNOTSUPP;
-	return otg_set_power(&musb->xceiv, mA);
+	return otg_set_power(musb->xceiv, mA);
 }
 
 static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on)
@@ -1732,11 +1733,7 @@
 
 		spin_lock_irqsave(&musb->lock, flags);
 
-		/* REVISIT always use otg_set_peripheral(), handling
-		 * issues including the root hub one below ...
-		 */
-		musb->xceiv.gadget = &musb->g;
-		musb->xceiv.state = OTG_STATE_B_IDLE;
+		otg_set_peripheral(musb->xceiv, &musb->g);
 		musb->is_active = 1;
 
 		/* FIXME this ignores the softconnect flag.  Drivers are
@@ -1748,6 +1745,8 @@
 		if (!is_otg_enabled(musb))
 			musb_start(musb);
 
+		otg_set_peripheral(musb->xceiv, &musb->g);
+
 		spin_unlock_irqrestore(&musb->lock, flags);
 
 		if (is_otg_enabled(musb)) {
@@ -1761,8 +1760,7 @@
 			if (retval < 0) {
 				DBG(1, "add_hcd failed, %d\n", retval);
 				spin_lock_irqsave(&musb->lock, flags);
-				musb->xceiv.gadget = NULL;
-				musb->xceiv.state = OTG_STATE_UNDEFINED;
+				otg_set_peripheral(musb->xceiv, NULL);
 				musb->gadget_driver = NULL;
 				musb->g.dev.driver = NULL;
 				spin_unlock_irqrestore(&musb->lock, flags);
@@ -1845,8 +1843,9 @@
 
 		(void) musb_gadget_vbus_draw(&musb->g, 0);
 
-		musb->xceiv.state = OTG_STATE_UNDEFINED;
+		musb->xceiv->state = OTG_STATE_UNDEFINED;
 		stop_activity(musb, driver);
+		otg_set_peripheral(musb->xceiv, NULL);
 
 		DBG(3, "unregistering driver %s\n", driver->function);
 		spin_unlock_irqrestore(&musb->lock, flags);
@@ -1882,7 +1881,7 @@
 void musb_g_resume(struct musb *musb)
 {
 	musb->is_suspended = 0;
-	switch (musb->xceiv.state) {
+	switch (musb->xceiv->state) {
 	case OTG_STATE_B_IDLE:
 		break;
 	case OTG_STATE_B_WAIT_ACON:
@@ -1908,10 +1907,10 @@
 	devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
 	DBG(3, "devctl %02x\n", devctl);
 
-	switch (musb->xceiv.state) {
+	switch (musb->xceiv->state) {
 	case OTG_STATE_B_IDLE:
 		if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
-			musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
+			musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
 		break;
 	case OTG_STATE_B_PERIPHERAL:
 		musb->is_suspended = 1;
@@ -1957,22 +1956,24 @@
 		spin_lock(&musb->lock);
 	}
 
-	switch (musb->xceiv.state) {
+	switch (musb->xceiv->state) {
 	default:
 #ifdef	CONFIG_USB_MUSB_OTG
 		DBG(2, "Unhandled disconnect %s, setting a_idle\n",
 			otg_state_string(musb));
-		musb->xceiv.state = OTG_STATE_A_IDLE;
+		musb->xceiv->state = OTG_STATE_A_IDLE;
+		MUSB_HST_MODE(musb);
 		break;
 	case OTG_STATE_A_PERIPHERAL:
-		musb->xceiv.state = OTG_STATE_A_WAIT_VFALL;
+		musb->xceiv->state = OTG_STATE_A_WAIT_BCON;
+		MUSB_HST_MODE(musb);
 		break;
 	case OTG_STATE_B_WAIT_ACON:
 	case OTG_STATE_B_HOST:
 #endif
 	case OTG_STATE_B_PERIPHERAL:
 	case OTG_STATE_B_IDLE:
-		musb->xceiv.state = OTG_STATE_B_IDLE;
+		musb->xceiv->state = OTG_STATE_B_IDLE;
 		break;
 	case OTG_STATE_B_SRP_INIT:
 		break;
@@ -2028,10 +2029,10 @@
 	 * or else after HNP, as A-Device
 	 */
 	if (devctl & MUSB_DEVCTL_BDEVICE) {
-		musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
+		musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
 		musb->g.is_a_peripheral = 0;
 	} else if (is_otg_enabled(musb)) {
-		musb->xceiv.state = OTG_STATE_A_PERIPHERAL;
+		musb->xceiv->state = OTG_STATE_A_PERIPHERAL;
 		musb->g.is_a_peripheral = 1;
 	} else
 		WARN_ON(1);
diff --git a/drivers/usb/musb/musb_gadget_ep0.c b/drivers/usb/musb/musb_gadget_ep0.c
index 3f5e30d..40ed50e 100644
--- a/drivers/usb/musb/musb_gadget_ep0.c
+++ b/drivers/usb/musb/musb_gadget_ep0.c
@@ -4,6 +4,7 @@
  * Copyright 2005 Mentor Graphics Corporation
  * Copyright (C) 2005-2006 by Texas Instruments
  * Copyright (C) 2006-2007 Nokia Corporation
+ * Copyright (C) 2008-2009 MontaVista Software, Inc. <source@mvista.com>
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
@@ -58,7 +59,8 @@
 static char *decode_ep0stage(u8 stage)
 {
 	switch (stage) {
-	case MUSB_EP0_STAGE_SETUP:	return "idle";
+	case MUSB_EP0_STAGE_IDLE:	return "idle";
+	case MUSB_EP0_STAGE_SETUP:	return "setup";
 	case MUSB_EP0_STAGE_TX:		return "in";
 	case MUSB_EP0_STAGE_RX:		return "out";
 	case MUSB_EP0_STAGE_ACKWAIT:	return "wait";
@@ -628,7 +630,7 @@
 		musb_writew(regs, MUSB_CSR0,
 				csr & ~MUSB_CSR0_P_SENTSTALL);
 		retval = IRQ_HANDLED;
-		musb->ep0_state = MUSB_EP0_STAGE_SETUP;
+		musb->ep0_state = MUSB_EP0_STAGE_IDLE;
 		csr = musb_readw(regs, MUSB_CSR0);
 	}
 
@@ -636,7 +638,18 @@
 	if (csr & MUSB_CSR0_P_SETUPEND) {
 		musb_writew(regs, MUSB_CSR0, MUSB_CSR0_P_SVDSETUPEND);
 		retval = IRQ_HANDLED;
-		musb->ep0_state = MUSB_EP0_STAGE_SETUP;
+		/* Transition into the early status phase */
+		switch (musb->ep0_state) {
+		case MUSB_EP0_STAGE_TX:
+			musb->ep0_state = MUSB_EP0_STAGE_STATUSOUT;
+			break;
+		case MUSB_EP0_STAGE_RX:
+			musb->ep0_state = MUSB_EP0_STAGE_STATUSIN;
+			break;
+		default:
+			ERR("SetupEnd came in a wrong ep0stage %s",
+			    decode_ep0stage(musb->ep0_state));
+		}
 		csr = musb_readw(regs, MUSB_CSR0);
 		/* NOTE:  request may need completion */
 	}
@@ -697,11 +710,31 @@
 			if (req)
 				musb_g_ep0_giveback(musb, req);
 		}
+
+		/*
+		 * In case when several interrupts can get coalesced,
+		 * check to see if we've already received a SETUP packet...
+		 */
+		if (csr & MUSB_CSR0_RXPKTRDY)
+			goto setup;
+
+		retval = IRQ_HANDLED;
+		musb->ep0_state = MUSB_EP0_STAGE_IDLE;
+		break;
+
+	case MUSB_EP0_STAGE_IDLE:
+		/*
+		 * This state is typically (but not always) indiscernible
+		 * from the status states since the corresponding interrupts
+		 * tend to happen within too little period of time (with only
+		 * a zero-length packet in between) and so get coalesced...
+		 */
 		retval = IRQ_HANDLED;
 		musb->ep0_state = MUSB_EP0_STAGE_SETUP;
 		/* FALLTHROUGH */
 
 	case MUSB_EP0_STAGE_SETUP:
+setup:
 		if (csr & MUSB_CSR0_RXPKTRDY) {
 			struct usb_ctrlrequest	setup;
 			int			handled = 0;
@@ -783,7 +816,7 @@
 stall:
 				DBG(3, "stall (%d)\n", handled);
 				musb->ackpend |= MUSB_CSR0_P_SENDSTALL;
-				musb->ep0_state = MUSB_EP0_STAGE_SETUP;
+				musb->ep0_state = MUSB_EP0_STAGE_IDLE;
 finish:
 				musb_writew(regs, MUSB_CSR0,
 						musb->ackpend);
@@ -803,7 +836,7 @@
 		/* "can't happen" */
 		WARN_ON(1);
 		musb_writew(regs, MUSB_CSR0, MUSB_CSR0_P_SENDSTALL);
-		musb->ep0_state = MUSB_EP0_STAGE_SETUP;
+		musb->ep0_state = MUSB_EP0_STAGE_IDLE;
 		break;
 	}
 
@@ -959,7 +992,7 @@
 
 		csr |= MUSB_CSR0_P_SENDSTALL;
 		musb_writew(regs, MUSB_CSR0, csr);
-		musb->ep0_state = MUSB_EP0_STAGE_SETUP;
+		musb->ep0_state = MUSB_EP0_STAGE_IDLE;
 		musb->ackpend = 0;
 		break;
 	default:
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index db1b574..94a2a35 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -181,6 +181,19 @@
 	musb_writew(ep->regs, MUSB_TXCSR, txcsr);
 }
 
+static void musb_ep_set_qh(struct musb_hw_ep *ep, int is_in, struct musb_qh *qh)
+{
+	if (is_in != 0 || ep->is_shared_fifo)
+		ep->in_qh  = qh;
+	if (is_in == 0 || ep->is_shared_fifo)
+		ep->out_qh = qh;
+}
+
+static struct musb_qh *musb_ep_get_qh(struct musb_hw_ep *ep, int is_in)
+{
+	return is_in ? ep->in_qh : ep->out_qh;
+}
+
 /*
  * Start the URB at the front of an endpoint's queue
  * end must be claimed from the caller.
@@ -210,7 +223,6 @@
 	case USB_ENDPOINT_XFER_CONTROL:
 		/* control transfers always start with SETUP */
 		is_in = 0;
-		hw_ep->out_qh = qh;
 		musb->ep0_stage = MUSB_EP0_START;
 		buf = urb->setup_packet;
 		len = 8;
@@ -239,10 +251,7 @@
 			epnum, buf + offset, len);
 
 	/* Configure endpoint */
-	if (is_in || hw_ep->is_shared_fifo)
-		hw_ep->in_qh = qh;
-	else
-		hw_ep->out_qh = qh;
+	musb_ep_set_qh(hw_ep, is_in, qh);
 	musb_ep_program(musb, epnum, urb, !is_in, buf, offset, len);
 
 	/* transmit may have more work: start it when it is time */
@@ -286,9 +295,8 @@
 	}
 }
 
-/* caller owns controller lock, irqs are blocked */
-static void
-__musb_giveback(struct musb *musb, struct urb *urb, int status)
+/* Context: caller owns controller lock, IRQs are blocked */
+static void musb_giveback(struct musb *musb, struct urb *urb, int status)
 __releases(musb->lock)
 __acquires(musb->lock)
 {
@@ -321,60 +329,57 @@
 	spin_lock(&musb->lock);
 }
 
-/* for bulk/interrupt endpoints only */
-static inline void
-musb_save_toggle(struct musb_hw_ep *ep, int is_in, struct urb *urb)
+/* For bulk/interrupt endpoints only */
+static inline void musb_save_toggle(struct musb_qh *qh, int is_in,
+				    struct urb *urb)
 {
-	struct usb_device	*udev = urb->dev;
+	void __iomem		*epio = qh->hw_ep->regs;
 	u16			csr;
-	void __iomem		*epio = ep->regs;
-	struct musb_qh		*qh;
 
-	/* FIXME:  the current Mentor DMA code seems to have
+	/*
+	 * FIXME: the current Mentor DMA code seems to have
 	 * problems getting toggle correct.
 	 */
 
-	if (is_in || ep->is_shared_fifo)
-		qh = ep->in_qh;
+	if (is_in)
+		csr = musb_readw(epio, MUSB_RXCSR) & MUSB_RXCSR_H_DATATOGGLE;
 	else
-		qh = ep->out_qh;
+		csr = musb_readw(epio, MUSB_TXCSR) & MUSB_TXCSR_H_DATATOGGLE;
 
-	if (!is_in) {
-		csr = musb_readw(epio, MUSB_TXCSR);
-		usb_settoggle(udev, qh->epnum, 1,
-			(csr & MUSB_TXCSR_H_DATATOGGLE)
-				? 1 : 0);
-	} else {
-		csr = musb_readw(epio, MUSB_RXCSR);
-		usb_settoggle(udev, qh->epnum, 0,
-			(csr & MUSB_RXCSR_H_DATATOGGLE)
-				? 1 : 0);
-	}
+	usb_settoggle(urb->dev, qh->epnum, !is_in, csr ? 1 : 0);
 }
 
-/* caller owns controller lock, irqs are blocked */
-static struct musb_qh *
-musb_giveback(struct musb_qh *qh, struct urb *urb, int status)
+/*
+ * Advance this hardware endpoint's queue, completing the specified URB and
+ * advancing to either the next URB queued to that qh, or else invalidating
+ * that qh and advancing to the next qh scheduled after the current one.
+ *
+ * Context: caller owns controller lock, IRQs are blocked
+ */
+static void musb_advance_schedule(struct musb *musb, struct urb *urb,
+				  struct musb_hw_ep *hw_ep, int is_in)
 {
+	struct musb_qh		*qh = musb_ep_get_qh(hw_ep, is_in);
 	struct musb_hw_ep	*ep = qh->hw_ep;
-	struct musb		*musb = ep->musb;
-	int			is_in = usb_pipein(urb->pipe);
 	int			ready = qh->is_ready;
+	int			status;
+
+	status = (urb->status == -EINPROGRESS) ? 0 : urb->status;
 
 	/* save toggle eagerly, for paranoia */
 	switch (qh->type) {
 	case USB_ENDPOINT_XFER_BULK:
 	case USB_ENDPOINT_XFER_INT:
-		musb_save_toggle(ep, is_in, urb);
+		musb_save_toggle(qh, is_in, urb);
 		break;
 	case USB_ENDPOINT_XFER_ISOC:
-		if (status == 0 && urb->error_count)
+		if (urb->error_count)
 			status = -EXDEV;
 		break;
 	}
 
 	qh->is_ready = 0;
-	__musb_giveback(musb, urb, status);
+	musb_giveback(musb, urb, status);
 	qh->is_ready = ready;
 
 	/* reclaim resources (and bandwidth) ASAP; deschedule it, and
@@ -388,11 +393,8 @@
 		else
 			ep->tx_reinit = 1;
 
-		/* clobber old pointers to this qh */
-		if (is_in || ep->is_shared_fifo)
-			ep->in_qh = NULL;
-		else
-			ep->out_qh = NULL;
+		/* Clobber old pointers to this qh */
+		musb_ep_set_qh(ep, is_in, NULL);
 		qh->hep->hcpriv = NULL;
 
 		switch (qh->type) {
@@ -421,36 +423,10 @@
 			break;
 		}
 	}
-	return qh;
-}
-
-/*
- * Advance this hardware endpoint's queue, completing the specified urb and
- * advancing to either the next urb queued to that qh, or else invalidating
- * that qh and advancing to the next qh scheduled after the current one.
- *
- * Context: caller owns controller lock, irqs are blocked
- */
-static void
-musb_advance_schedule(struct musb *musb, struct urb *urb,
-		struct musb_hw_ep *hw_ep, int is_in)
-{
-	struct musb_qh	*qh;
-
-	if (is_in || hw_ep->is_shared_fifo)
-		qh = hw_ep->in_qh;
-	else
-		qh = hw_ep->out_qh;
-
-	if (urb->status == -EINPROGRESS)
-		qh = musb_giveback(qh, urb, 0);
-	else
-		qh = musb_giveback(qh, urb, urb->status);
 
 	if (qh != NULL && qh->is_ready) {
 		DBG(4, "... next ep%d %cX urb %p\n",
-				hw_ep->epnum, is_in ? 'R' : 'T',
-				next_urb(qh));
+		    hw_ep->epnum, is_in ? 'R' : 'T', next_urb(qh));
 		musb_start_urb(musb, is_in, qh);
 	}
 }
@@ -629,7 +605,8 @@
 	musb_writeb(ep->regs, MUSB_RXTYPE, qh->type_reg);
 	musb_writeb(ep->regs, MUSB_RXINTERVAL, qh->intv_reg);
 	/* NOTE: bulk combining rewrites high bits of maxpacket */
-	musb_writew(ep->regs, MUSB_RXMAXP, qh->maxpacket);
+	musb_writew(ep->regs, MUSB_RXMAXP,
+			qh->maxpacket | ((qh->hb_mult - 1) << 11));
 
 	ep->rx_reinit = 0;
 }
@@ -651,9 +628,10 @@
 	csr = musb_readw(epio, MUSB_TXCSR);
 	if (length > pkt_size) {
 		mode = 1;
-		csr |= MUSB_TXCSR_AUTOSET
-			| MUSB_TXCSR_DMAMODE
-			| MUSB_TXCSR_DMAENAB;
+		csr |= MUSB_TXCSR_DMAMODE | MUSB_TXCSR_DMAENAB;
+		/* autoset shouldn't be set in high bandwidth */
+		if (qh->hb_mult == 1)
+			csr |= MUSB_TXCSR_AUTOSET;
 	} else {
 		mode = 0;
 		csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAMODE);
@@ -703,15 +681,8 @@
 	void __iomem		*mbase = musb->mregs;
 	struct musb_hw_ep	*hw_ep = musb->endpoints + epnum;
 	void __iomem		*epio = hw_ep->regs;
-	struct musb_qh		*qh;
-	u16			packet_sz;
-
-	if (!is_out || hw_ep->is_shared_fifo)
-		qh = hw_ep->in_qh;
-	else
-		qh = hw_ep->out_qh;
-
-	packet_sz = qh->maxpacket;
+	struct musb_qh		*qh = musb_ep_get_qh(hw_ep, !is_out);
+	u16			packet_sz = qh->maxpacket;
 
 	DBG(3, "%s hw%d urb %p spd%d dev%d ep%d%s "
 				"h_addr%02x h_port%02x bytes %d\n",
@@ -1129,17 +1100,14 @@
 	u16			tx_csr;
 	size_t			length = 0;
 	size_t			offset = 0;
-	struct urb		*urb;
 	struct musb_hw_ep	*hw_ep = musb->endpoints + epnum;
 	void __iomem		*epio = hw_ep->regs;
-	struct musb_qh		*qh = hw_ep->is_shared_fifo ? hw_ep->in_qh
-							    : hw_ep->out_qh;
+	struct musb_qh		*qh = hw_ep->out_qh;
+	struct urb		*urb = next_urb(qh);
 	u32			status = 0;
 	void __iomem		*mbase = musb->mregs;
 	struct dma_channel	*dma;
 
-	urb = next_urb(qh);
-
 	musb_ep_select(mbase, epnum);
 	tx_csr = musb_readw(epio, MUSB_TXCSR);
 
@@ -1427,7 +1395,7 @@
 			urb->actual_length += dma->actual_len;
 			dma->actual_len = 0L;
 		}
-		musb_save_toggle(ep, 1, urb);
+		musb_save_toggle(cur_qh, 1, urb);
 
 		/* move cur_qh to end of queue */
 		list_move_tail(&cur_qh->ring, &musb->in_bulk);
@@ -1531,6 +1499,10 @@
 			/* packet error reported later */
 			iso_err = true;
 		}
+	} else if (rx_csr & MUSB_RXCSR_INCOMPRX) {
+		DBG(3, "end %d high bandwidth incomplete ISO packet RX\n",
+				epnum);
+		status = -EPROTO;
 	}
 
 	/* faults abort the transfer */
@@ -1738,7 +1710,11 @@
 				val &= ~MUSB_RXCSR_H_AUTOREQ;
 			else
 				val |= MUSB_RXCSR_H_AUTOREQ;
-			val |= MUSB_RXCSR_AUTOCLEAR | MUSB_RXCSR_DMAENAB;
+			val |= MUSB_RXCSR_DMAENAB;
+
+			/* autoclear shouldn't be set in high bandwidth */
+			if (qh->hb_mult == 1)
+				val |= MUSB_RXCSR_AUTOCLEAR;
 
 			musb_writew(epio, MUSB_RXCSR,
 				MUSB_RXCSR_H_WZC_BITS | val);
@@ -1817,19 +1793,17 @@
 			epnum++, hw_ep++) {
 		int	diff;
 
-		if (is_in || hw_ep->is_shared_fifo) {
-			if (hw_ep->in_qh  != NULL)
-				continue;
-		} else	if (hw_ep->out_qh != NULL)
+		if (musb_ep_get_qh(hw_ep, is_in) != NULL)
 			continue;
 
 		if (hw_ep == musb->bulk_ep)
 			continue;
 
 		if (is_in)
-			diff = hw_ep->max_packet_sz_rx - qh->maxpacket;
+			diff = hw_ep->max_packet_sz_rx;
 		else
-			diff = hw_ep->max_packet_sz_tx - qh->maxpacket;
+			diff = hw_ep->max_packet_sz_tx;
+		diff -= (qh->maxpacket * qh->hb_mult);
 
 		if (diff >= 0 && best_diff > diff) {
 			best_diff = diff;
@@ -1932,15 +1906,27 @@
 	qh->is_ready = 1;
 
 	qh->maxpacket = le16_to_cpu(epd->wMaxPacketSize);
+	qh->type = usb_endpoint_type(epd);
 
-	/* no high bandwidth support yet */
-	if (qh->maxpacket & ~0x7ff) {
-		ret = -EMSGSIZE;
-		goto done;
+	/* Bits 11 & 12 of wMaxPacketSize encode high bandwidth multiplier.
+	 * Some musb cores don't support high bandwidth ISO transfers; and
+	 * we don't (yet!) support high bandwidth interrupt transfers.
+	 */
+	qh->hb_mult = 1 + ((qh->maxpacket >> 11) & 0x03);
+	if (qh->hb_mult > 1) {
+		int ok = (qh->type == USB_ENDPOINT_XFER_ISOC);
+
+		if (ok)
+			ok = (usb_pipein(urb->pipe) && musb->hb_iso_rx)
+				|| (usb_pipeout(urb->pipe) && musb->hb_iso_tx);
+		if (!ok) {
+			ret = -EMSGSIZE;
+			goto done;
+		}
+		qh->maxpacket &= 0x7ff;
 	}
 
 	qh->epnum = usb_endpoint_num(epd);
-	qh->type = usb_endpoint_type(epd);
 
 	/* NOTE: urb->dev->devnum is wrong during SET_ADDRESS */
 	qh->addr_reg = (u8) usb_pipedevice(urb->pipe);
@@ -2052,14 +2038,15 @@
  * called with controller locked, irqs blocked
  * that hardware queue advances to the next transfer, unless prevented
  */
-static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh, int is_in)
+static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh)
 {
 	struct musb_hw_ep	*ep = qh->hw_ep;
 	void __iomem		*epio = ep->regs;
 	unsigned		hw_end = ep->epnum;
 	void __iomem		*regs = ep->musb->mregs;
-	u16			csr;
+	int			is_in = usb_pipein(urb->pipe);
 	int			status = 0;
+	u16			csr;
 
 	musb_ep_select(regs, hw_end);
 
@@ -2112,14 +2099,14 @@
 {
 	struct musb		*musb = hcd_to_musb(hcd);
 	struct musb_qh		*qh;
-	struct list_head	*sched;
 	unsigned long		flags;
+	int			is_in  = usb_pipein(urb->pipe);
 	int			ret;
 
 	DBG(4, "urb=%p, dev%d ep%d%s\n", urb,
 			usb_pipedevice(urb->pipe),
 			usb_pipeendpoint(urb->pipe),
-			usb_pipein(urb->pipe) ? "in" : "out");
+			is_in ? "in" : "out");
 
 	spin_lock_irqsave(&musb->lock, flags);
 	ret = usb_hcd_check_unlink_urb(hcd, urb, status);
@@ -2130,47 +2117,25 @@
 	if (!qh)
 		goto done;
 
-	/* Any URB not actively programmed into endpoint hardware can be
+	/*
+	 * Any URB not actively programmed into endpoint hardware can be
 	 * immediately given back; that's any URB not at the head of an
 	 * endpoint queue, unless someday we get real DMA queues.  And even
 	 * if it's at the head, it might not be known to the hardware...
 	 *
-	 * Otherwise abort current transfer, pending dma, etc.; urb->status
+	 * Otherwise abort current transfer, pending DMA, etc.; urb->status
 	 * has already been updated.  This is a synchronous abort; it'd be
 	 * OK to hold off until after some IRQ, though.
+	 *
+	 * NOTE: qh is invalid unless !list_empty(&hep->urb_list)
 	 */
-	if (!qh->is_ready || urb->urb_list.prev != &qh->hep->urb_list)
-		ret = -EINPROGRESS;
-	else {
-		switch (qh->type) {
-		case USB_ENDPOINT_XFER_CONTROL:
-			sched = &musb->control;
-			break;
-		case USB_ENDPOINT_XFER_BULK:
-			if (qh->mux == 1) {
-				if (usb_pipein(urb->pipe))
-					sched = &musb->in_bulk;
-				else
-					sched = &musb->out_bulk;
-				break;
-			}
-		default:
-			/* REVISIT when we get a schedule tree, periodic
-			 * transfers won't always be at the head of a
-			 * singleton queue...
-			 */
-			sched = NULL;
-			break;
-		}
-	}
-
-	/* NOTE:  qh is invalid unless !list_empty(&hep->urb_list) */
-	if (ret < 0 || (sched && qh != first_qh(sched))) {
+	if (!qh->is_ready
+			|| urb->urb_list.prev != &qh->hep->urb_list
+			|| musb_ep_get_qh(qh->hw_ep, is_in) != qh) {
 		int	ready = qh->is_ready;
 
-		ret = 0;
 		qh->is_ready = 0;
-		__musb_giveback(musb, urb, 0);
+		musb_giveback(musb, urb, 0);
 		qh->is_ready = ready;
 
 		/* If nothing else (usually musb_giveback) is using it
@@ -2182,7 +2147,7 @@
 			kfree(qh);
 		}
 	} else
-		ret = musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN);
+		ret = musb_cleanup_urb(urb, qh);
 done:
 	spin_unlock_irqrestore(&musb->lock, flags);
 	return ret;
@@ -2192,13 +2157,11 @@
 static void
 musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
 {
-	u8			epnum = hep->desc.bEndpointAddress;
+	u8			is_in = hep->desc.bEndpointAddress & USB_DIR_IN;
 	unsigned long		flags;
 	struct musb		*musb = hcd_to_musb(hcd);
-	u8			is_in = epnum & USB_DIR_IN;
 	struct musb_qh		*qh;
 	struct urb		*urb;
-	struct list_head	*sched;
 
 	spin_lock_irqsave(&musb->lock, flags);
 
@@ -2206,31 +2169,11 @@
 	if (qh == NULL)
 		goto exit;
 
-	switch (qh->type) {
-	case USB_ENDPOINT_XFER_CONTROL:
-		sched = &musb->control;
-		break;
-	case USB_ENDPOINT_XFER_BULK:
-		if (qh->mux == 1) {
-			if (is_in)
-				sched = &musb->in_bulk;
-			else
-				sched = &musb->out_bulk;
-			break;
-		}
-	default:
-		/* REVISIT when we get a schedule tree, periodic transfers
-		 * won't always be at the head of a singleton queue...
-		 */
-		sched = NULL;
-		break;
-	}
+	/* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */
 
-	/* NOTE:  qh is invalid unless !list_empty(&hep->urb_list) */
-
-	/* kick first urb off the hardware, if needed */
+	/* Kick the first URB off the hardware, if needed */
 	qh->is_ready = 0;
-	if (!sched || qh == first_qh(sched)) {
+	if (musb_ep_get_qh(qh->hw_ep, is_in) == qh) {
 		urb = next_urb(qh);
 
 		/* make software (then hardware) stop ASAP */
@@ -2238,7 +2181,7 @@
 			urb->status = -ESHUTDOWN;
 
 		/* cleanup */
-		musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN);
+		musb_cleanup_urb(urb, qh);
 
 		/* Then nuke all the others ... and advance the
 		 * queue on hw_ep (e.g. bulk ring) when we're done.
@@ -2254,7 +2197,7 @@
 		 * will activate any of these as it advances.
 		 */
 		while (!list_empty(&hep->urb_list))
-			__musb_giveback(musb, next_urb(qh), -ESHUTDOWN);
+			musb_giveback(musb, next_urb(qh), -ESHUTDOWN);
 
 		hep->hcpriv = NULL;
 		list_del(&qh->ring);
@@ -2293,7 +2236,7 @@
 {
 	struct musb	*musb = hcd_to_musb(hcd);
 
-	if (musb->xceiv.state == OTG_STATE_A_SUSPEND)
+	if (musb->xceiv->state == OTG_STATE_A_SUSPEND)
 		return 0;
 
 	if (is_host_active(musb) && musb->is_active) {
diff --git a/drivers/usb/musb/musb_host.h b/drivers/usb/musb/musb_host.h
index 0b7fbcd..14b0077 100644
--- a/drivers/usb/musb/musb_host.h
+++ b/drivers/usb/musb/musb_host.h
@@ -67,6 +67,7 @@
 	u8			is_ready;	/* safe to modify hw_ep */
 	u8			type;		/* XFERTYPE_* */
 	u8			epnum;
+	u8			hb_mult;	/* high bandwidth pkts per uf */
 	u16			maxpacket;
 	u16			frame;		/* for periodic schedule */
 	unsigned		iso_idx;	/* in urb->iso_frame_desc[] */
diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c
index bf677ac..bfe5fe4 100644
--- a/drivers/usb/musb/musb_virthub.c
+++ b/drivers/usb/musb/musb_virthub.c
@@ -78,18 +78,22 @@
 		DBG(3, "Root port suspended, power %02x\n", power);
 
 		musb->port1_status |= USB_PORT_STAT_SUSPEND;
-		switch (musb->xceiv.state) {
+		switch (musb->xceiv->state) {
 		case OTG_STATE_A_HOST:
-			musb->xceiv.state = OTG_STATE_A_SUSPEND;
+			musb->xceiv->state = OTG_STATE_A_SUSPEND;
 			musb->is_active = is_otg_enabled(musb)
-					&& musb->xceiv.host->b_hnp_enable;
+					&& musb->xceiv->host->b_hnp_enable;
+			if (musb->is_active)
+				mod_timer(&musb->otg_timer, jiffies
+					+ msecs_to_jiffies(
+						OTG_TIME_A_AIDL_BDIS));
 			musb_platform_try_idle(musb, 0);
 			break;
 #ifdef	CONFIG_USB_MUSB_OTG
 		case OTG_STATE_B_HOST:
-			musb->xceiv.state = OTG_STATE_B_WAIT_ACON;
+			musb->xceiv->state = OTG_STATE_B_WAIT_ACON;
 			musb->is_active = is_otg_enabled(musb)
-					&& musb->xceiv.host->b_hnp_enable;
+					&& musb->xceiv->host->b_hnp_enable;
 			musb_platform_try_idle(musb, 0);
 			break;
 #endif
@@ -116,7 +120,7 @@
 	void __iomem	*mbase = musb->mregs;
 
 #ifdef CONFIG_USB_MUSB_OTG
-	if (musb->xceiv.state == OTG_STATE_B_IDLE) {
+	if (musb->xceiv->state == OTG_STATE_B_IDLE) {
 		DBG(2, "HNP: Returning from HNP; no hub reset from b_idle\n");
 		musb->port1_status &= ~USB_PORT_STAT_RESET;
 		return;
@@ -186,14 +190,23 @@
 	usb_hcd_poll_rh_status(musb_to_hcd(musb));
 	musb->is_active = 0;
 
-	switch (musb->xceiv.state) {
-	case OTG_STATE_A_HOST:
+	switch (musb->xceiv->state) {
 	case OTG_STATE_A_SUSPEND:
-		musb->xceiv.state = OTG_STATE_A_WAIT_BCON;
+#ifdef	CONFIG_USB_MUSB_OTG
+		if (is_otg_enabled(musb)
+				&& musb->xceiv->host->b_hnp_enable) {
+			musb->xceiv->state = OTG_STATE_A_PERIPHERAL;
+			musb->g.is_a_peripheral = 1;
+			break;
+		}
+#endif
+		/* FALLTHROUGH */
+	case OTG_STATE_A_HOST:
+		musb->xceiv->state = OTG_STATE_A_WAIT_BCON;
 		musb->is_active = 0;
 		break;
 	case OTG_STATE_A_WAIT_VFALL:
-		musb->xceiv.state = OTG_STATE_B_IDLE;
+		musb->xceiv->state = OTG_STATE_B_IDLE;
 		break;
 	default:
 		DBG(1, "host disconnect (%s)\n", otg_state_string(musb));
@@ -332,7 +345,7 @@
 			musb->port1_status |= USB_PORT_STAT_C_SUSPEND << 16;
 			usb_hcd_poll_rh_status(musb_to_hcd(musb));
 			/* NOTE: it might really be A_WAIT_BCON ... */
-			musb->xceiv.state = OTG_STATE_A_HOST;
+			musb->xceiv->state = OTG_STATE_A_HOST;
 		}
 
 		put_unaligned(cpu_to_le32(musb->port1_status
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index 60924ce..3487520 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -44,7 +44,6 @@
 #define	get_cpu_rev()	2
 #endif
 
-#define MUSB_TIMEOUT_A_WAIT_BCON	1100
 
 static struct timer_list musb_idle_timer;
 
@@ -61,17 +60,17 @@
 
 	devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
 
-	switch (musb->xceiv.state) {
+	switch (musb->xceiv->state) {
 	case OTG_STATE_A_WAIT_BCON:
 		devctl &= ~MUSB_DEVCTL_SESSION;
 		musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
 
 		devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
 		if (devctl & MUSB_DEVCTL_BDEVICE) {
-			musb->xceiv.state = OTG_STATE_B_IDLE;
+			musb->xceiv->state = OTG_STATE_B_IDLE;
 			MUSB_DEV_MODE(musb);
 		} else {
-			musb->xceiv.state = OTG_STATE_A_IDLE;
+			musb->xceiv->state = OTG_STATE_A_IDLE;
 			MUSB_HST_MODE(musb);
 		}
 		break;
@@ -89,7 +88,7 @@
 			musb->port1_status |= USB_PORT_STAT_C_SUSPEND << 16;
 			usb_hcd_poll_rh_status(musb_to_hcd(musb));
 			/* NOTE: it might really be A_WAIT_BCON ... */
-			musb->xceiv.state = OTG_STATE_A_HOST;
+			musb->xceiv->state = OTG_STATE_A_HOST;
 		}
 		break;
 #endif
@@ -97,9 +96,9 @@
 	case OTG_STATE_A_HOST:
 		devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
 		if (devctl &  MUSB_DEVCTL_BDEVICE)
-			musb->xceiv.state = OTG_STATE_B_IDLE;
+			musb->xceiv->state = OTG_STATE_B_IDLE;
 		else
-			musb->xceiv.state = OTG_STATE_A_WAIT_BCON;
+			musb->xceiv->state = OTG_STATE_A_WAIT_BCON;
 #endif
 	default:
 		break;
@@ -118,7 +117,7 @@
 
 	/* Never idle if active, or when VBUS timeout is not set as host */
 	if (musb->is_active || ((musb->a_wait_bcon == 0)
-			&& (musb->xceiv.state == OTG_STATE_A_WAIT_BCON))) {
+			&& (musb->xceiv->state == OTG_STATE_A_WAIT_BCON))) {
 		DBG(4, "%s active, deleting timer\n", otg_state_string(musb));
 		del_timer(&musb_idle_timer);
 		last_timer = jiffies;
@@ -163,8 +162,8 @@
 
 	if (is_on) {
 		musb->is_active = 1;
-		musb->xceiv.default_a = 1;
-		musb->xceiv.state = OTG_STATE_A_WAIT_VRISE;
+		musb->xceiv->default_a = 1;
+		musb->xceiv->state = OTG_STATE_A_WAIT_VRISE;
 		devctl |= MUSB_DEVCTL_SESSION;
 
 		MUSB_HST_MODE(musb);
@@ -175,8 +174,8 @@
 		 * jumping right to B_IDLE...
 		 */
 
-		musb->xceiv.default_a = 0;
-		musb->xceiv.state = OTG_STATE_B_IDLE;
+		musb->xceiv->default_a = 0;
+		musb->xceiv->state = OTG_STATE_B_IDLE;
 		devctl &= ~MUSB_DEVCTL_SESSION;
 
 		MUSB_DEV_MODE(musb);
@@ -188,10 +187,6 @@
 		otg_state_string(musb),
 		musb_readb(musb->mregs, MUSB_DEVCTL));
 }
-static int omap_set_power(struct otg_transceiver *x, unsigned mA)
-{
-	return 0;
-}
 
 static int musb_platform_resume(struct musb *musb);
 
@@ -202,24 +197,6 @@
 	devctl |= MUSB_DEVCTL_SESSION;
 	musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
 
-	switch (musb_mode) {
-#ifdef CONFIG_USB_MUSB_HDRC_HCD
-	case MUSB_HOST:
-		otg_set_host(&musb->xceiv, musb->xceiv.host);
-		break;
-#endif
-#ifdef CONFIG_USB_GADGET_MUSB_HDRC
-	case MUSB_PERIPHERAL:
-		otg_set_peripheral(&musb->xceiv, musb->xceiv.gadget);
-		break;
-#endif
-#ifdef CONFIG_USB_MUSB_OTG
-	case MUSB_OTG:
-		break;
-#endif
-	default:
-		return -EINVAL;
-	}
 	return 0;
 }
 
@@ -231,6 +208,16 @@
 	omap_cfg_reg(AE5_2430_USB0HS_STP);
 #endif
 
+	/* We require some kind of external transceiver, hooked
+	 * up through ULPI.  TWL4030-family PMICs include one,
+	 * which needs a driver, drivers aren't always needed.
+	 */
+	musb->xceiv = otg_get_transceiver();
+	if (!musb->xceiv) {
+		pr_err("HS USB OTG: no transceiver configured\n");
+		return -ENODEV;
+	}
+
 	musb_platform_resume(musb);
 
 	l = omap_readl(OTG_SYSCONFIG);
@@ -240,7 +227,12 @@
 	l &= ~AUTOIDLE;		/* disable auto idle */
 	l &= ~NOIDLE;		/* remove possible noidle */
 	l |= SMARTIDLE;		/* enable smart idle */
-	l |= AUTOIDLE;		/* enable auto idle */
+	/*
+	 * MUSB AUTOIDLE don't work in 3430.
+	 * Workaround by Richard Woodruff/TI
+	 */
+	if (!cpu_is_omap3430())
+		l |= AUTOIDLE;		/* enable auto idle */
 	omap_writel(l, OTG_SYSCONFIG);
 
 	l = omap_readl(OTG_INTERFSEL);
@@ -257,9 +249,6 @@
 
 	if (is_host_enabled(musb))
 		musb->board_set_vbus = omap_set_vbus;
-	if (is_peripheral_enabled(musb))
-		musb->xceiv.set_power = omap_set_power;
-	musb->a_wait_bcon = MUSB_TIMEOUT_A_WAIT_BCON;
 
 	setup_timer(&musb_idle_timer, musb_do_idle, (unsigned long) musb);
 
@@ -282,8 +271,7 @@
 	l |= ENABLEWAKEUP;	/* enable wakeup */
 	omap_writel(l, OTG_SYSCONFIG);
 
-	if (musb->xceiv.set_suspend)
-		musb->xceiv.set_suspend(&musb->xceiv, 1);
+	otg_set_suspend(musb->xceiv, 1);
 
 	if (musb->set_clock)
 		musb->set_clock(musb->clock, 0);
@@ -300,8 +288,7 @@
 	if (!musb->clock)
 		return 0;
 
-	if (musb->xceiv.set_suspend)
-		musb->xceiv.set_suspend(&musb->xceiv, 0);
+	otg_set_suspend(musb->xceiv, 0);
 
 	if (musb->set_clock)
 		musb->set_clock(musb->clock, 1);
diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c
index 4ac1477..88b587c 100644
--- a/drivers/usb/musb/tusb6010.c
+++ b/drivers/usb/musb/tusb6010.c
@@ -259,6 +259,8 @@
 		tusb_fifo_read_unaligned(fifo, buf, len);
 }
 
+static struct musb *the_musb;
+
 #ifdef CONFIG_USB_GADGET_MUSB_HDRC
 
 /* This is used by gadget drivers, and OTG transceiver logic, allowing
@@ -269,7 +271,7 @@
  */
 static int tusb_draw_power(struct otg_transceiver *x, unsigned mA)
 {
-	struct musb	*musb = container_of(x, struct musb, xceiv);
+	struct musb	*musb = the_musb;
 	void __iomem	*tbase = musb->ctrl_base;
 	u32		reg;
 
@@ -419,7 +421,7 @@
 
 	spin_lock_irqsave(&musb->lock, flags);
 
-	switch (musb->xceiv.state) {
+	switch (musb->xceiv->state) {
 	case OTG_STATE_A_WAIT_BCON:
 		if ((musb->a_wait_bcon != 0)
 			&& (musb->idle_timeout == 0
@@ -483,7 +485,7 @@
 
 	/* Never idle if active, or when VBUS timeout is not set as host */
 	if (musb->is_active || ((musb->a_wait_bcon == 0)
-			&& (musb->xceiv.state == OTG_STATE_A_WAIT_BCON))) {
+			&& (musb->xceiv->state == OTG_STATE_A_WAIT_BCON))) {
 		DBG(4, "%s active, deleting timer\n", otg_state_string(musb));
 		del_timer(&musb_idle_timer);
 		last_timer = jiffies;
@@ -532,8 +534,8 @@
 		if (musb->set_clock)
 			musb->set_clock(musb->clock, 1);
 		timer = OTG_TIMER_MS(OTG_TIME_A_WAIT_VRISE);
-		musb->xceiv.default_a = 1;
-		musb->xceiv.state = OTG_STATE_A_WAIT_VRISE;
+		musb->xceiv->default_a = 1;
+		musb->xceiv->state = OTG_STATE_A_WAIT_VRISE;
 		devctl |= MUSB_DEVCTL_SESSION;
 
 		conf |= TUSB_DEV_CONF_USB_HOST_MODE;
@@ -546,24 +548,24 @@
 		/* If ID pin is grounded, we want to be a_idle */
 		otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT);
 		if (!(otg_stat & TUSB_DEV_OTG_STAT_ID_STATUS)) {
-			switch (musb->xceiv.state) {
+			switch (musb->xceiv->state) {
 			case OTG_STATE_A_WAIT_VRISE:
 			case OTG_STATE_A_WAIT_BCON:
-				musb->xceiv.state = OTG_STATE_A_WAIT_VFALL;
+				musb->xceiv->state = OTG_STATE_A_WAIT_VFALL;
 				break;
 			case OTG_STATE_A_WAIT_VFALL:
-				musb->xceiv.state = OTG_STATE_A_IDLE;
+				musb->xceiv->state = OTG_STATE_A_IDLE;
 				break;
 			default:
-				musb->xceiv.state = OTG_STATE_A_IDLE;
+				musb->xceiv->state = OTG_STATE_A_IDLE;
 			}
 			musb->is_active = 0;
-			musb->xceiv.default_a = 1;
+			musb->xceiv->default_a = 1;
 			MUSB_HST_MODE(musb);
 		} else {
 			musb->is_active = 0;
-			musb->xceiv.default_a = 0;
-			musb->xceiv.state = OTG_STATE_B_IDLE;
+			musb->xceiv->default_a = 0;
+			musb->xceiv->state = OTG_STATE_B_IDLE;
 			MUSB_DEV_MODE(musb);
 		}
 
@@ -674,7 +676,7 @@
 		else
 			default_a = is_host_enabled(musb);
 		DBG(2, "Default-%c\n", default_a ? 'A' : 'B');
-		musb->xceiv.default_a = default_a;
+		musb->xceiv->default_a = default_a;
 		tusb_source_power(musb, default_a);
 
 		/* Don't allow idling immediately */
@@ -686,7 +688,7 @@
 	if (int_src & TUSB_INT_SRC_VBUS_SENSE_CHNG) {
 
 		/* B-dev state machine:  no vbus ~= disconnect */
-		if ((is_otg_enabled(musb) && !musb->xceiv.default_a)
+		if ((is_otg_enabled(musb) && !musb->xceiv->default_a)
 				|| !is_host_enabled(musb)) {
 #ifdef CONFIG_USB_MUSB_HDRC_HCD
 			/* ? musb_root_disconnect(musb); */
@@ -701,9 +703,9 @@
 
 			if (otg_stat & TUSB_DEV_OTG_STAT_SESS_END) {
 				DBG(1, "Forcing disconnect (no interrupt)\n");
-				if (musb->xceiv.state != OTG_STATE_B_IDLE) {
+				if (musb->xceiv->state != OTG_STATE_B_IDLE) {
 					/* INTR_DISCONNECT can hide... */
-					musb->xceiv.state = OTG_STATE_B_IDLE;
+					musb->xceiv->state = OTG_STATE_B_IDLE;
 					musb->int_usb |= MUSB_INTR_DISCONNECT;
 				}
 				musb->is_active = 0;
@@ -717,7 +719,7 @@
 			DBG(2, "vbus change, %s, otg %03x\n",
 				otg_state_string(musb), otg_stat);
 
-			switch (musb->xceiv.state) {
+			switch (musb->xceiv->state) {
 			case OTG_STATE_A_IDLE:
 				DBG(2, "Got SRP, turning on VBUS\n");
 				musb_set_vbus(musb, 1);
@@ -765,7 +767,7 @@
 
 		DBG(4, "%s timer, %03x\n", otg_state_string(musb), otg_stat);
 
-		switch (musb->xceiv.state) {
+		switch (musb->xceiv->state) {
 		case OTG_STATE_A_WAIT_VRISE:
 			/* VBUS has probably been valid for a while now,
 			 * but may well have bounced out of range a bit
@@ -777,7 +779,7 @@
 					DBG(2, "devctl %02x\n", devctl);
 					break;
 				}
-				musb->xceiv.state = OTG_STATE_A_WAIT_BCON;
+				musb->xceiv->state = OTG_STATE_A_WAIT_BCON;
 				musb->is_active = 0;
 				idle_timeout = jiffies
 					+ msecs_to_jiffies(musb->a_wait_bcon);
@@ -1093,9 +1095,14 @@
 {
 	struct platform_device	*pdev;
 	struct resource		*mem;
-	void __iomem		*sync;
+	void __iomem		*sync = NULL;
 	int			ret;
 
+	usb_nop_xceiv_register();
+	musb->xceiv = otg_get_transceiver();
+	if (!musb->xceiv)
+		return -ENODEV;
+
 	pdev = to_platform_device(musb->controller);
 
 	/* dma address for async dma */
@@ -1106,14 +1113,16 @@
 	mem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
 	if (!mem) {
 		pr_debug("no sync dma resource?\n");
-		return -ENODEV;
+		ret = -ENODEV;
+		goto done;
 	}
 	musb->sync = mem->start;
 
 	sync = ioremap(mem->start, mem->end - mem->start + 1);
 	if (!sync) {
 		pr_debug("ioremap for sync failed\n");
-		return -ENOMEM;
+		ret = -ENOMEM;
+		goto done;
 	}
 	musb->sync_va = sync;
 
@@ -1126,28 +1135,37 @@
 	if (ret) {
 		printk(KERN_ERR "Could not start tusb6010 (%d)\n",
 				ret);
-		return -ENODEV;
+		goto done;
 	}
 	musb->isr = tusb_interrupt;
 
 	if (is_host_enabled(musb))
 		musb->board_set_vbus = tusb_source_power;
-	if (is_peripheral_enabled(musb))
-		musb->xceiv.set_power = tusb_draw_power;
+	if (is_peripheral_enabled(musb)) {
+		musb->xceiv->set_power = tusb_draw_power;
+		the_musb = musb;
+	}
 
 	setup_timer(&musb_idle_timer, musb_do_idle, (unsigned long) musb);
 
+done:
+	if (ret < 0) {
+		if (sync)
+			iounmap(sync);
+		usb_nop_xceiv_unregister();
+	}
 	return ret;
 }
 
 int musb_platform_exit(struct musb *musb)
 {
 	del_timer_sync(&musb_idle_timer);
+	the_musb = NULL;
 
 	if (musb->board_set_power)
 		musb->board_set_power(0);
 
 	iounmap(musb->sync_va);
-
+	usb_nop_xceiv_unregister();
 	return 0;
 }
diff --git a/drivers/usb/otg/Kconfig b/drivers/usb/otg/Kconfig
index aa884d0..69feeec 100644
--- a/drivers/usb/otg/Kconfig
+++ b/drivers/usb/otg/Kconfig
@@ -59,4 +59,18 @@
 	 built-in with usb ip or which are autonomous and doesn't require any
 	 phy programming such as ISP1x04 etc.
 
+config USB_LANGWELL_OTG
+	tristate "Intel Langwell USB OTG dual-role support"
+	depends on USB && MRST
+	select USB_OTG
+	select USB_OTG_UTILS
+	help
+	  Say Y here if you want to build Intel Langwell USB OTG
+	  transciever driver in kernel. This driver implements role
+	  switch between EHCI host driver and Langwell USB OTG
+	  client driver.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called langwell_otg.
+
 endif # USB || OTG
diff --git a/drivers/usb/otg/Makefile b/drivers/usb/otg/Makefile
index 2081678..6d1abdd 100644
--- a/drivers/usb/otg/Makefile
+++ b/drivers/usb/otg/Makefile
@@ -9,6 +9,7 @@
 obj-$(CONFIG_USB_GPIO_VBUS)	+= gpio_vbus.o
 obj-$(CONFIG_ISP1301_OMAP)	+= isp1301_omap.o
 obj-$(CONFIG_TWL4030_USB)	+= twl4030-usb.o
+obj-$(CONFIG_USB_LANGWELL_OTG)	+= langwell_otg.o
 obj-$(CONFIG_NOP_USB_XCEIV)	+= nop-usb-xceiv.o
 
 ccflags-$(CONFIG_USB_DEBUG)	+= -DDEBUG
diff --git a/drivers/usb/otg/langwell_otg.c b/drivers/usb/otg/langwell_otg.c
new file mode 100644
index 0000000..6f628d0
--- /dev/null
+++ b/drivers/usb/otg/langwell_otg.c
@@ -0,0 +1,1915 @@
+/*
+ * Intel Langwell USB OTG transceiver driver
+ * Copyright (C) 2008 - 2009, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+/* This driver helps to switch Langwell OTG controller function between host
+ * and peripheral. It works with EHCI driver and Langwell client controller
+ * driver together.
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/moduleparam.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb.h>
+#include <linux/usb/otg.h>
+#include <linux/notifier.h>
+#include <asm/ipc_defs.h>
+#include <linux/delay.h>
+#include "../core/hcd.h"
+
+#include <linux/usb/langwell_otg.h>
+
+#define	DRIVER_DESC		"Intel Langwell USB OTG transceiver driver"
+#define	DRIVER_VERSION		"3.0.0.32L.0002"
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("Henry Yuan <hang.yuan@intel.com>, Hao Wu <hao.wu@intel.com>");
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_LICENSE("GPL");
+
+static const char driver_name[] = "langwell_otg";
+
+static int langwell_otg_probe(struct pci_dev *pdev,
+			const struct pci_device_id *id);
+static void langwell_otg_remove(struct pci_dev *pdev);
+static int langwell_otg_suspend(struct pci_dev *pdev, pm_message_t message);
+static int langwell_otg_resume(struct pci_dev *pdev);
+
+static int langwell_otg_set_host(struct otg_transceiver *otg,
+				struct usb_bus *host);
+static int langwell_otg_set_peripheral(struct otg_transceiver *otg,
+				struct usb_gadget *gadget);
+static int langwell_otg_start_srp(struct otg_transceiver *otg);
+
+static const struct pci_device_id pci_ids[] = {{
+	.class =        ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
+	.class_mask =   ~0,
+	.vendor =	0x8086,
+	.device =	0x0811,
+	.subvendor =	PCI_ANY_ID,
+	.subdevice =	PCI_ANY_ID,
+}, { /* end: all zeroes */ }
+};
+
+static struct pci_driver otg_pci_driver = {
+	.name =		(char *) driver_name,
+	.id_table =	pci_ids,
+
+	.probe =	langwell_otg_probe,
+	.remove =	langwell_otg_remove,
+
+	.suspend =	langwell_otg_suspend,
+	.resume =	langwell_otg_resume,
+};
+
+static const char *state_string(enum usb_otg_state state)
+{
+	switch (state) {
+	case OTG_STATE_A_IDLE:
+		return "a_idle";
+	case OTG_STATE_A_WAIT_VRISE:
+		return "a_wait_vrise";
+	case OTG_STATE_A_WAIT_BCON:
+		return "a_wait_bcon";
+	case OTG_STATE_A_HOST:
+		return "a_host";
+	case OTG_STATE_A_SUSPEND:
+		return "a_suspend";
+	case OTG_STATE_A_PERIPHERAL:
+		return "a_peripheral";
+	case OTG_STATE_A_WAIT_VFALL:
+		return "a_wait_vfall";
+	case OTG_STATE_A_VBUS_ERR:
+		return "a_vbus_err";
+	case OTG_STATE_B_IDLE:
+		return "b_idle";
+	case OTG_STATE_B_SRP_INIT:
+		return "b_srp_init";
+	case OTG_STATE_B_PERIPHERAL:
+		return "b_peripheral";
+	case OTG_STATE_B_WAIT_ACON:
+		return "b_wait_acon";
+	case OTG_STATE_B_HOST:
+		return "b_host";
+	default:
+		return "UNDEFINED";
+	}
+}
+
+/* HSM timers */
+static inline struct langwell_otg_timer *otg_timer_initializer
+(void (*function)(unsigned long), unsigned long expires, unsigned long data)
+{
+	struct langwell_otg_timer *timer;
+	timer = kmalloc(sizeof(struct langwell_otg_timer), GFP_KERNEL);
+	timer->function = function;
+	timer->expires = expires;
+	timer->data = data;
+	return timer;
+}
+
+static struct langwell_otg_timer *a_wait_vrise_tmr, *a_wait_bcon_tmr,
+	*a_aidl_bdis_tmr, *b_ase0_brst_tmr, *b_se0_srp_tmr, *b_srp_res_tmr,
+	*b_bus_suspend_tmr;
+
+static struct list_head active_timers;
+
+static struct langwell_otg *the_transceiver;
+
+/* host/client notify transceiver when event affects HNP state */
+void langwell_update_transceiver()
+{
+	otg_dbg("transceiver driver is notified\n");
+	queue_work(the_transceiver->qwork, &the_transceiver->work);
+}
+EXPORT_SYMBOL(langwell_update_transceiver);
+
+static int langwell_otg_set_host(struct otg_transceiver *otg,
+					struct usb_bus *host)
+{
+	otg->host = host;
+
+	return 0;
+}
+
+static int langwell_otg_set_peripheral(struct otg_transceiver *otg,
+					struct usb_gadget *gadget)
+{
+	otg->gadget = gadget;
+
+	return 0;
+}
+
+static int langwell_otg_set_power(struct otg_transceiver *otg,
+				unsigned mA)
+{
+	return 0;
+}
+
+/* A-device drives vbus, controlled through PMIC CHRGCNTL register*/
+static void langwell_otg_drv_vbus(int on)
+{
+	struct ipc_pmic_reg_data	pmic_data = {0};
+	struct ipc_pmic_reg_data	battery_data;
+
+	/* Check if battery is attached or not */
+	battery_data.pmic_reg_data[0].register_address = 0xd2;
+	battery_data.ioc = 0;
+	battery_data.num_entries = 1;
+	if (ipc_pmic_register_read(&battery_data)) {
+		otg_dbg("Failed to read PMIC register 0xd2.\n");
+		return;
+	}
+
+	if ((battery_data.pmic_reg_data[0].value & 0x20) == 0) {
+		otg_dbg("no battery attached\n");
+		return;
+	}
+
+	/* Workaround for battery attachment issue */
+	if (battery_data.pmic_reg_data[0].value == 0x34) {
+		otg_dbg("battery \n");
+		return;
+	}
+
+	otg_dbg("battery attached\n");
+
+	pmic_data.ioc = 0;
+	pmic_data.pmic_reg_data[0].register_address = 0xD4;
+	pmic_data.num_entries = 1;
+	if (on)
+		pmic_data.pmic_reg_data[0].value = 0x20;
+	else
+		pmic_data.pmic_reg_data[0].value = 0xc0;
+
+	if (ipc_pmic_register_write(&pmic_data, TRUE))
+		otg_dbg("Failed to write PMIC.\n");
+
+}
+
+/* charge vbus or discharge vbus through a resistor to ground */
+static void langwell_otg_chrg_vbus(int on)
+{
+
+	u32	val;
+
+	val = readl(the_transceiver->regs + CI_OTGSC);
+
+	if (on)
+		writel((val & ~OTGSC_INTSTS_MASK) | OTGSC_VC,
+				the_transceiver->regs + CI_OTGSC);
+	else
+		writel((val & ~OTGSC_INTSTS_MASK) | OTGSC_VD,
+				the_transceiver->regs + CI_OTGSC);
+
+}
+
+/* Start SRP */
+static int langwell_otg_start_srp(struct otg_transceiver *otg)
+{
+	u32	val;
+
+	otg_dbg("Start SRP ->\n");
+
+	val = readl(the_transceiver->regs + CI_OTGSC);
+
+	writel((val & ~OTGSC_INTSTS_MASK) | OTGSC_HADP,
+		the_transceiver->regs + CI_OTGSC);
+
+	/* Check if the data plus is finished or not */
+	msleep(8);
+	val = readl(the_transceiver->regs + CI_OTGSC);
+	if (val & (OTGSC_HADP | OTGSC_DP))
+		otg_dbg("DataLine SRP Error\n");
+
+	/* FIXME: VBus SRP */
+
+	return 0;
+}
+
+
+/* stop SOF via bus_suspend */
+static void langwell_otg_loc_sof(int on)
+{
+	struct usb_hcd	*hcd;
+	int		err;
+
+	otg_dbg("loc_sof -> %d\n", on);
+
+	hcd = bus_to_hcd(the_transceiver->otg.host);
+	if (on)
+		err = hcd->driver->bus_resume(hcd);
+	else
+		err = hcd->driver->bus_suspend(hcd);
+
+	if (err)
+		otg_dbg("Failed to resume/suspend bus - %d\n", err);
+}
+
+static void langwell_otg_phy_low_power(int on)
+{
+	u32	val;
+
+	otg_dbg("phy low power mode-> %d\n", on);
+
+	val = readl(the_transceiver->regs + CI_HOSTPC1);
+	if (on)
+		writel(val | HOSTPC1_PHCD, the_transceiver->regs + CI_HOSTPC1);
+	else
+		writel(val & ~HOSTPC1_PHCD, the_transceiver->regs + CI_HOSTPC1);
+}
+
+/* Enable/Disable OTG interrupt */
+static void langwell_otg_intr(int on)
+{
+	u32 val;
+
+	otg_dbg("interrupt -> %d\n", on);
+
+	val = readl(the_transceiver->regs + CI_OTGSC);
+	if (on) {
+		val = val | (OTGSC_INTEN_MASK | OTGSC_IDPU);
+		writel(val, the_transceiver->regs + CI_OTGSC);
+	} else {
+		val = val & ~(OTGSC_INTEN_MASK | OTGSC_IDPU);
+		writel(val, the_transceiver->regs + CI_OTGSC);
+	}
+}
+
+/* set HAAR: Hardware Assist Auto-Reset */
+static void langwell_otg_HAAR(int on)
+{
+	u32	val;
+
+	otg_dbg("HAAR -> %d\n", on);
+
+	val = readl(the_transceiver->regs + CI_OTGSC);
+	if (on)
+		writel((val & ~OTGSC_INTSTS_MASK) | OTGSC_HAAR,
+				the_transceiver->regs + CI_OTGSC);
+	else
+		writel((val & ~OTGSC_INTSTS_MASK) & ~OTGSC_HAAR,
+				the_transceiver->regs + CI_OTGSC);
+}
+
+/* set HABA: Hardware Assist B-Disconnect to A-Connect */
+static void langwell_otg_HABA(int on)
+{
+	u32	val;
+
+	otg_dbg("HABA -> %d\n", on);
+
+	val = readl(the_transceiver->regs + CI_OTGSC);
+	if (on)
+		writel((val & ~OTGSC_INTSTS_MASK) | OTGSC_HABA,
+				the_transceiver->regs + CI_OTGSC);
+	else
+		writel((val & ~OTGSC_INTSTS_MASK) & ~OTGSC_HABA,
+				the_transceiver->regs + CI_OTGSC);
+}
+
+static int langwell_otg_check_se0_srp(int on)
+{
+	u32 val;
+
+	int delay_time = TB_SE0_SRP * 10; /* step is 100us */
+
+	otg_dbg("check_se0_srp -> \n");
+
+	do {
+		udelay(100);
+		if (!delay_time--)
+			break;
+		val = readl(the_transceiver->regs + CI_PORTSC1);
+		val &= PORTSC_LS;
+	} while (!val);
+
+	otg_dbg("check_se0_srp <- \n");
+	return val;
+}
+
+/* The timeout callback function to set time out bit */
+static void set_tmout(unsigned long indicator)
+{
+	*(int *)indicator = 1;
+}
+
+void langwell_otg_nsf_msg(unsigned long indicator)
+{
+	switch (indicator) {
+	case 2:
+	case 4:
+	case 6:
+	case 7:
+		printk(KERN_ERR "OTG:NSF-%lu - deivce not responding\n",
+				indicator);
+		break;
+	case 3:
+		printk(KERN_ERR "OTG:NSF-%lu - deivce not supported\n",
+				indicator);
+		break;
+	default:
+		printk(KERN_ERR "Do not have this kind of NSF\n");
+		break;
+	}
+}
+
+/* Initialize timers */
+static void langwell_otg_init_timers(struct otg_hsm *hsm)
+{
+	/* HSM used timers */
+	a_wait_vrise_tmr = otg_timer_initializer(&set_tmout, TA_WAIT_VRISE,
+				(unsigned long)&hsm->a_wait_vrise_tmout);
+	a_wait_bcon_tmr = otg_timer_initializer(&set_tmout, TA_WAIT_BCON,
+				(unsigned long)&hsm->a_wait_bcon_tmout);
+	a_aidl_bdis_tmr = otg_timer_initializer(&set_tmout, TA_AIDL_BDIS,
+				(unsigned long)&hsm->a_aidl_bdis_tmout);
+	b_ase0_brst_tmr = otg_timer_initializer(&set_tmout, TB_ASE0_BRST,
+				(unsigned long)&hsm->b_ase0_brst_tmout);
+	b_se0_srp_tmr = otg_timer_initializer(&set_tmout, TB_SE0_SRP,
+				(unsigned long)&hsm->b_se0_srp);
+	b_srp_res_tmr = otg_timer_initializer(&set_tmout, TB_SRP_RES,
+				(unsigned long)&hsm->b_srp_res_tmout);
+	b_bus_suspend_tmr = otg_timer_initializer(&set_tmout, TB_BUS_SUSPEND,
+				(unsigned long)&hsm->b_bus_suspend_tmout);
+}
+
+/* Free timers */
+static void langwell_otg_free_timers(void)
+{
+	kfree(a_wait_vrise_tmr);
+	kfree(a_wait_bcon_tmr);
+	kfree(a_aidl_bdis_tmr);
+	kfree(b_ase0_brst_tmr);
+	kfree(b_se0_srp_tmr);
+	kfree(b_srp_res_tmr);
+	kfree(b_bus_suspend_tmr);
+}
+
+/* Add timer to timer list */
+static void langwell_otg_add_timer(void *gtimer)
+{
+	struct langwell_otg_timer *timer = (struct langwell_otg_timer *)gtimer;
+	struct langwell_otg_timer *tmp_timer;
+	u32	val32;
+
+	/* Check if the timer is already in the active list,
+	 * if so update timer count
+	 */
+	list_for_each_entry(tmp_timer, &active_timers, list)
+		if (tmp_timer == timer) {
+			timer->count = timer->expires;
+			return;
+		}
+	timer->count = timer->expires;
+
+	if (list_empty(&active_timers)) {
+		val32 = readl(the_transceiver->regs + CI_OTGSC);
+		writel(val32 | OTGSC_1MSE, the_transceiver->regs + CI_OTGSC);
+	}
+
+	list_add_tail(&timer->list, &active_timers);
+}
+
+/* Remove timer from the timer list; clear timeout status */
+static void langwell_otg_del_timer(void *gtimer)
+{
+	struct langwell_otg_timer *timer = (struct langwell_otg_timer *)gtimer;
+	struct langwell_otg_timer *tmp_timer, *del_tmp;
+	u32 val32;
+
+	list_for_each_entry_safe(tmp_timer, del_tmp, &active_timers, list)
+		if (tmp_timer == timer)
+			list_del(&timer->list);
+
+	if (list_empty(&active_timers)) {
+		val32 = readl(the_transceiver->regs + CI_OTGSC);
+		writel(val32 & ~OTGSC_1MSE, the_transceiver->regs + CI_OTGSC);
+	}
+}
+
+/* Reduce timer count by 1, and find timeout conditions.*/
+static int langwell_otg_tick_timer(u32 *int_sts)
+{
+	struct langwell_otg_timer *tmp_timer, *del_tmp;
+	int expired = 0;
+
+	list_for_each_entry_safe(tmp_timer, del_tmp, &active_timers, list) {
+		tmp_timer->count--;
+		/* check if timer expires */
+		if (!tmp_timer->count) {
+			list_del(&tmp_timer->list);
+			tmp_timer->function(tmp_timer->data);
+			expired = 1;
+		}
+	}
+
+	if (list_empty(&active_timers)) {
+		otg_dbg("tick timer: disable 1ms int\n");
+		*int_sts = *int_sts & ~OTGSC_1MSE;
+	}
+	return expired;
+}
+
+static void reset_otg(void)
+{
+	u32	val;
+	int	delay_time = 1000;
+
+	otg_dbg("reseting OTG controller ...\n");
+	val = readl(the_transceiver->regs + CI_USBCMD);
+	writel(val | USBCMD_RST, the_transceiver->regs + CI_USBCMD);
+	do {
+		udelay(100);
+		if (!delay_time--)
+			otg_dbg("reset timeout\n");
+		val = readl(the_transceiver->regs + CI_USBCMD);
+		val &= USBCMD_RST;
+	} while (val != 0);
+	otg_dbg("reset done.\n");
+}
+
+static void set_host_mode(void)
+{
+	u32 	val;
+
+	reset_otg();
+	val = readl(the_transceiver->regs + CI_USBMODE);
+	val = (val & (~USBMODE_CM)) | USBMODE_HOST;
+	writel(val, the_transceiver->regs + CI_USBMODE);
+}
+
+static void set_client_mode(void)
+{
+	u32 	val;
+
+	reset_otg();
+	val = readl(the_transceiver->regs + CI_USBMODE);
+	val = (val & (~USBMODE_CM)) | USBMODE_DEVICE;
+	writel(val, the_transceiver->regs + CI_USBMODE);
+}
+
+static void init_hsm(void)
+{
+	struct langwell_otg	*langwell = the_transceiver;
+	u32			val32;
+
+	/* read OTGSC after reset */
+	val32 = readl(langwell->regs + CI_OTGSC);
+	otg_dbg("%s: OTGSC init value = 0x%x\n", __func__, val32);
+
+	/* set init state */
+	if (val32 & OTGSC_ID) {
+		langwell->hsm.id = 1;
+		langwell->otg.default_a = 0;
+		set_client_mode();
+		langwell->otg.state = OTG_STATE_B_IDLE;
+		langwell_otg_drv_vbus(0);
+	} else {
+		langwell->hsm.id = 0;
+		langwell->otg.default_a = 1;
+		set_host_mode();
+		langwell->otg.state = OTG_STATE_A_IDLE;
+	}
+
+	/* set session indicator */
+	if (val32 & OTGSC_BSE)
+		langwell->hsm.b_sess_end = 1;
+	if (val32 & OTGSC_BSV)
+		langwell->hsm.b_sess_vld = 1;
+	if (val32 & OTGSC_ASV)
+		langwell->hsm.a_sess_vld = 1;
+	if (val32 & OTGSC_AVV)
+		langwell->hsm.a_vbus_vld = 1;
+
+	/* defautly power the bus */
+	langwell->hsm.a_bus_req = 1;
+	langwell->hsm.a_bus_drop = 0;
+	/* defautly don't request bus as B device */
+	langwell->hsm.b_bus_req = 0;
+	/* no system error */
+	langwell->hsm.a_clr_err = 0;
+}
+
+static irqreturn_t otg_dummy_irq(int irq, void *_dev)
+{
+	void __iomem	*reg_base = _dev;
+	u32	val;
+	u32	int_mask = 0;
+
+	val = readl(reg_base + CI_USBMODE);
+	if ((val & USBMODE_CM) != USBMODE_DEVICE)
+		return IRQ_NONE;
+
+	val = readl(reg_base + CI_USBSTS);
+	int_mask = val & INTR_DUMMY_MASK;
+
+	if (int_mask == 0)
+		return IRQ_NONE;
+
+	/* clear hsm.b_conn here since host driver can't detect it
+	*  otg_dummy_irq called means B-disconnect happened.
+	*/
+	if (the_transceiver->hsm.b_conn) {
+		the_transceiver->hsm.b_conn = 0;
+		if (spin_trylock(&the_transceiver->wq_lock)) {
+			queue_work(the_transceiver->qwork,
+				&the_transceiver->work);
+			spin_unlock(&the_transceiver->wq_lock);
+		}
+	}
+	/* Clear interrupts */
+	writel(int_mask, reg_base + CI_USBSTS);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t otg_irq(int irq, void *_dev)
+{
+	struct	langwell_otg *langwell = _dev;
+	u32	int_sts, int_en;
+	u32	int_mask = 0;
+	int	flag = 0;
+
+	int_sts = readl(langwell->regs + CI_OTGSC);
+	int_en = (int_sts & OTGSC_INTEN_MASK) >> 8;
+	int_mask = int_sts & int_en;
+	if (int_mask == 0)
+		return IRQ_NONE;
+
+	if (int_mask & OTGSC_IDIS) {
+		otg_dbg("%s: id change int\n", __func__);
+		langwell->hsm.id = (int_sts & OTGSC_ID) ? 1 : 0;
+		flag = 1;
+	}
+	if (int_mask & OTGSC_DPIS) {
+		otg_dbg("%s: data pulse int\n", __func__);
+		langwell->hsm.a_srp_det = (int_sts & OTGSC_DPS) ? 1 : 0;
+		flag = 1;
+	}
+	if (int_mask & OTGSC_BSEIS) {
+		otg_dbg("%s: b session end int\n", __func__);
+		langwell->hsm.b_sess_end = (int_sts & OTGSC_BSE) ? 1 : 0;
+		flag = 1;
+	}
+	if (int_mask & OTGSC_BSVIS) {
+		otg_dbg("%s: b session valid int\n", __func__);
+		langwell->hsm.b_sess_vld = (int_sts & OTGSC_BSV) ? 1 : 0;
+		flag = 1;
+	}
+	if (int_mask & OTGSC_ASVIS) {
+		otg_dbg("%s: a session valid int\n", __func__);
+		langwell->hsm.a_sess_vld = (int_sts & OTGSC_ASV) ? 1 : 0;
+		flag = 1;
+	}
+	if (int_mask & OTGSC_AVVIS) {
+		otg_dbg("%s: a vbus valid int\n", __func__);
+		langwell->hsm.a_vbus_vld = (int_sts & OTGSC_AVV) ? 1 : 0;
+		flag = 1;
+	}
+
+	if (int_mask & OTGSC_1MSS) {
+		/* need to schedule otg_work if any timer is expired */
+		if (langwell_otg_tick_timer(&int_sts))
+			flag = 1;
+	}
+
+	writel((int_sts & ~OTGSC_INTSTS_MASK) | int_mask,
+			langwell->regs + CI_OTGSC);
+	if (flag)
+		queue_work(langwell->qwork, &langwell->work);
+
+	return IRQ_HANDLED;
+}
+
+static void langwell_otg_work(struct work_struct *work)
+{
+	struct langwell_otg *langwell = container_of(work,
+					struct langwell_otg, work);
+	int	retval;
+
+	otg_dbg("%s: old state = %s\n", __func__,
+			state_string(langwell->otg.state));
+
+	switch (langwell->otg.state) {
+	case OTG_STATE_UNDEFINED:
+	case OTG_STATE_B_IDLE:
+		if (!langwell->hsm.id) {
+			langwell_otg_del_timer(b_srp_res_tmr);
+			langwell->otg.default_a = 1;
+			langwell->hsm.a_srp_det = 0;
+
+			langwell_otg_chrg_vbus(0);
+			langwell_otg_drv_vbus(0);
+
+			set_host_mode();
+			langwell->otg.state = OTG_STATE_A_IDLE;
+			queue_work(langwell->qwork, &langwell->work);
+		} else if (langwell->hsm.b_srp_res_tmout) {
+			langwell->hsm.b_srp_res_tmout = 0;
+			langwell->hsm.b_bus_req = 0;
+			langwell_otg_nsf_msg(6);
+		} else if (langwell->hsm.b_sess_vld) {
+			langwell_otg_del_timer(b_srp_res_tmr);
+			langwell->hsm.b_sess_end = 0;
+			langwell->hsm.a_bus_suspend = 0;
+
+			langwell_otg_chrg_vbus(0);
+			if (langwell->client_ops) {
+				langwell->client_ops->resume(langwell->pdev);
+				langwell->otg.state = OTG_STATE_B_PERIPHERAL;
+			} else
+				otg_dbg("client driver not loaded.\n");
+
+		} else if (langwell->hsm.b_bus_req &&
+				(langwell->hsm.b_sess_end)) {
+			/* workaround for b_se0_srp detection */
+			retval = langwell_otg_check_se0_srp(0);
+			if (retval) {
+				langwell->hsm.b_bus_req = 0;
+				otg_dbg("LS is not SE0, try again later\n");
+			} else {
+				/* Start SRP */
+				langwell_otg_start_srp(&langwell->otg);
+				langwell_otg_add_timer(b_srp_res_tmr);
+			}
+		}
+		break;
+	case OTG_STATE_B_SRP_INIT:
+		if (!langwell->hsm.id) {
+			langwell->otg.default_a = 1;
+			langwell->hsm.a_srp_det = 0;
+
+			langwell_otg_drv_vbus(0);
+			langwell_otg_chrg_vbus(0);
+
+			langwell->otg.state = OTG_STATE_A_IDLE;
+			queue_work(langwell->qwork, &langwell->work);
+		} else if (langwell->hsm.b_sess_vld) {
+			langwell_otg_chrg_vbus(0);
+			if (langwell->client_ops) {
+				langwell->client_ops->resume(langwell->pdev);
+				langwell->otg.state = OTG_STATE_B_PERIPHERAL;
+			} else
+				otg_dbg("client driver not loaded.\n");
+		}
+		break;
+	case OTG_STATE_B_PERIPHERAL:
+		if (!langwell->hsm.id) {
+			langwell->otg.default_a = 1;
+			langwell->hsm.a_srp_det = 0;
+
+			langwell_otg_drv_vbus(0);
+			langwell_otg_chrg_vbus(0);
+			set_host_mode();
+
+			if (langwell->client_ops) {
+				langwell->client_ops->suspend(langwell->pdev,
+					PMSG_FREEZE);
+			} else
+				otg_dbg("client driver has been removed.\n");
+
+			langwell->otg.state = OTG_STATE_A_IDLE;
+			queue_work(langwell->qwork, &langwell->work);
+		} else if (!langwell->hsm.b_sess_vld) {
+			langwell->hsm.b_hnp_enable = 0;
+
+			if (langwell->client_ops) {
+				langwell->client_ops->suspend(langwell->pdev,
+					PMSG_FREEZE);
+			} else
+				otg_dbg("client driver has been removed.\n");
+
+			langwell->otg.state = OTG_STATE_B_IDLE;
+		} else if (langwell->hsm.b_bus_req && langwell->hsm.b_hnp_enable
+			&& langwell->hsm.a_bus_suspend) {
+
+			if (langwell->client_ops) {
+				langwell->client_ops->suspend(langwell->pdev,
+					PMSG_FREEZE);
+			} else
+				otg_dbg("client driver has been removed.\n");
+
+			langwell_otg_HAAR(1);
+			langwell->hsm.a_conn = 0;
+
+			if (langwell->host_ops) {
+				langwell->host_ops->probe(langwell->pdev,
+					langwell->host_ops->id_table);
+				langwell->otg.state = OTG_STATE_B_WAIT_ACON;
+			} else
+				otg_dbg("host driver not loaded.\n");
+
+			langwell->hsm.a_bus_resume = 0;
+			langwell->hsm.b_ase0_brst_tmout = 0;
+			langwell_otg_add_timer(b_ase0_brst_tmr);
+		}
+		break;
+
+	case OTG_STATE_B_WAIT_ACON:
+		if (!langwell->hsm.id) {
+			langwell_otg_del_timer(b_ase0_brst_tmr);
+			langwell->otg.default_a = 1;
+			langwell->hsm.a_srp_det = 0;
+
+			langwell_otg_drv_vbus(0);
+			langwell_otg_chrg_vbus(0);
+			set_host_mode();
+
+			langwell_otg_HAAR(0);
+			if (langwell->host_ops)
+				langwell->host_ops->remove(langwell->pdev);
+			else
+				otg_dbg("host driver has been removed.\n");
+			langwell->otg.state = OTG_STATE_A_IDLE;
+			queue_work(langwell->qwork, &langwell->work);
+		} else if (!langwell->hsm.b_sess_vld) {
+			langwell_otg_del_timer(b_ase0_brst_tmr);
+			langwell->hsm.b_hnp_enable = 0;
+			langwell->hsm.b_bus_req = 0;
+			langwell_otg_chrg_vbus(0);
+			langwell_otg_HAAR(0);
+
+			if (langwell->host_ops)
+				langwell->host_ops->remove(langwell->pdev);
+			else
+				otg_dbg("host driver has been removed.\n");
+			langwell->otg.state = OTG_STATE_B_IDLE;
+		} else if (langwell->hsm.a_conn) {
+			langwell_otg_del_timer(b_ase0_brst_tmr);
+			langwell_otg_HAAR(0);
+			langwell->otg.state = OTG_STATE_B_HOST;
+			queue_work(langwell->qwork, &langwell->work);
+		} else if (langwell->hsm.a_bus_resume ||
+				langwell->hsm.b_ase0_brst_tmout) {
+			langwell_otg_del_timer(b_ase0_brst_tmr);
+			langwell_otg_HAAR(0);
+			langwell_otg_nsf_msg(7);
+
+			if (langwell->host_ops)
+				langwell->host_ops->remove(langwell->pdev);
+			else
+				otg_dbg("host driver has been removed.\n");
+
+			langwell->hsm.a_bus_suspend = 0;
+			langwell->hsm.b_bus_req = 0;
+
+			if (langwell->client_ops)
+				langwell->client_ops->resume(langwell->pdev);
+			else
+				otg_dbg("client driver not loaded.\n");
+
+			langwell->otg.state = OTG_STATE_B_PERIPHERAL;
+		}
+		break;
+
+	case OTG_STATE_B_HOST:
+		if (!langwell->hsm.id) {
+			langwell->otg.default_a = 1;
+			langwell->hsm.a_srp_det = 0;
+
+			langwell_otg_drv_vbus(0);
+			langwell_otg_chrg_vbus(0);
+			set_host_mode();
+			if (langwell->host_ops)
+				langwell->host_ops->remove(langwell->pdev);
+			else
+				otg_dbg("host driver has been removed.\n");
+			langwell->otg.state = OTG_STATE_A_IDLE;
+			queue_work(langwell->qwork, &langwell->work);
+		} else if (!langwell->hsm.b_sess_vld) {
+			langwell->hsm.b_hnp_enable = 0;
+			langwell->hsm.b_bus_req = 0;
+			langwell_otg_chrg_vbus(0);
+			if (langwell->host_ops)
+				langwell->host_ops->remove(langwell->pdev);
+			else
+				otg_dbg("host driver has been removed.\n");
+			langwell->otg.state = OTG_STATE_B_IDLE;
+		} else if ((!langwell->hsm.b_bus_req) ||
+				(!langwell->hsm.a_conn)) {
+			langwell->hsm.b_bus_req = 0;
+			langwell_otg_loc_sof(0);
+			if (langwell->host_ops)
+				langwell->host_ops->remove(langwell->pdev);
+			else
+				otg_dbg("host driver has been removed.\n");
+
+			langwell->hsm.a_bus_suspend = 0;
+
+			if (langwell->client_ops)
+				langwell->client_ops->resume(langwell->pdev);
+			else
+				otg_dbg("client driver not loaded.\n");
+
+			langwell->otg.state = OTG_STATE_B_PERIPHERAL;
+		}
+		break;
+
+	case OTG_STATE_A_IDLE:
+		langwell->otg.default_a = 1;
+		if (langwell->hsm.id) {
+			langwell->otg.default_a = 0;
+			langwell->hsm.b_bus_req = 0;
+			langwell_otg_drv_vbus(0);
+			langwell_otg_chrg_vbus(0);
+
+			langwell->otg.state = OTG_STATE_B_IDLE;
+			queue_work(langwell->qwork, &langwell->work);
+		} else if (langwell->hsm.a_sess_vld) {
+			langwell_otg_drv_vbus(1);
+			langwell->hsm.a_srp_det = 1;
+			langwell->hsm.a_wait_vrise_tmout = 0;
+			langwell_otg_add_timer(a_wait_vrise_tmr);
+			langwell->otg.state = OTG_STATE_A_WAIT_VRISE;
+			queue_work(langwell->qwork, &langwell->work);
+		} else if (!langwell->hsm.a_bus_drop &&
+			(langwell->hsm.a_srp_det || langwell->hsm.a_bus_req)) {
+			langwell_otg_drv_vbus(1);
+			langwell->hsm.a_wait_vrise_tmout = 0;
+			langwell_otg_add_timer(a_wait_vrise_tmr);
+			langwell->otg.state = OTG_STATE_A_WAIT_VRISE;
+			queue_work(langwell->qwork, &langwell->work);
+		}
+		break;
+	case OTG_STATE_A_WAIT_VRISE:
+		if (langwell->hsm.id) {
+			langwell_otg_del_timer(a_wait_vrise_tmr);
+			langwell->hsm.b_bus_req = 0;
+			langwell->otg.default_a = 0;
+			langwell_otg_drv_vbus(0);
+			langwell->otg.state = OTG_STATE_B_IDLE;
+		} else if (langwell->hsm.a_vbus_vld) {
+			langwell_otg_del_timer(a_wait_vrise_tmr);
+			if (langwell->host_ops)
+				langwell->host_ops->probe(langwell->pdev,
+						langwell->host_ops->id_table);
+			else
+				otg_dbg("host driver not loaded.\n");
+			langwell->hsm.b_conn = 0;
+			langwell->hsm.a_set_b_hnp_en = 0;
+			langwell->hsm.a_wait_bcon_tmout = 0;
+			langwell_otg_add_timer(a_wait_bcon_tmr);
+			langwell->otg.state = OTG_STATE_A_WAIT_BCON;
+		} else if (langwell->hsm.a_wait_vrise_tmout) {
+			if (langwell->hsm.a_vbus_vld) {
+				if (langwell->host_ops)
+					langwell->host_ops->probe(
+						langwell->pdev,
+						langwell->host_ops->id_table);
+				else
+					otg_dbg("host driver not loaded.\n");
+				langwell->hsm.b_conn = 0;
+				langwell->hsm.a_set_b_hnp_en = 0;
+				langwell->hsm.a_wait_bcon_tmout = 0;
+				langwell_otg_add_timer(a_wait_bcon_tmr);
+				langwell->otg.state = OTG_STATE_A_WAIT_BCON;
+			} else {
+				langwell_otg_drv_vbus(0);
+				langwell->otg.state = OTG_STATE_A_VBUS_ERR;
+			}
+		}
+		break;
+	case OTG_STATE_A_WAIT_BCON:
+		if (langwell->hsm.id) {
+			langwell_otg_del_timer(a_wait_bcon_tmr);
+
+			langwell->otg.default_a = 0;
+			langwell->hsm.b_bus_req = 0;
+			if (langwell->host_ops)
+				langwell->host_ops->remove(langwell->pdev);
+			else
+				otg_dbg("host driver has been removed.\n");
+			langwell_otg_drv_vbus(0);
+			langwell->otg.state = OTG_STATE_B_IDLE;
+			queue_work(langwell->qwork, &langwell->work);
+		} else if (!langwell->hsm.a_vbus_vld) {
+			langwell_otg_del_timer(a_wait_bcon_tmr);
+
+			if (langwell->host_ops)
+				langwell->host_ops->remove(langwell->pdev);
+			else
+				otg_dbg("host driver has been removed.\n");
+			langwell_otg_drv_vbus(0);
+			langwell->otg.state = OTG_STATE_A_VBUS_ERR;
+		} else if (langwell->hsm.a_bus_drop ||
+				(langwell->hsm.a_wait_bcon_tmout &&
+				!langwell->hsm.a_bus_req)) {
+			langwell_otg_del_timer(a_wait_bcon_tmr);
+
+			if (langwell->host_ops)
+				langwell->host_ops->remove(langwell->pdev);
+			else
+				otg_dbg("host driver has been removed.\n");
+			langwell_otg_drv_vbus(0);
+			langwell->otg.state = OTG_STATE_A_WAIT_VFALL;
+		} else if (langwell->hsm.b_conn) {
+			langwell_otg_del_timer(a_wait_bcon_tmr);
+
+			langwell->hsm.a_suspend_req = 0;
+			langwell->otg.state = OTG_STATE_A_HOST;
+			if (!langwell->hsm.a_bus_req &&
+				langwell->hsm.a_set_b_hnp_en) {
+				/* It is not safe enough to do a fast
+				 * transistion from A_WAIT_BCON to
+				 * A_SUSPEND */
+				msleep(10000);
+				if (langwell->hsm.a_bus_req)
+					break;
+
+				if (request_irq(langwell->pdev->irq,
+					otg_dummy_irq, IRQF_SHARED,
+					driver_name, langwell->regs) != 0) {
+					otg_dbg("request interrupt %d fail\n",
+					langwell->pdev->irq);
+				}
+
+				langwell_otg_HABA(1);
+				langwell->hsm.b_bus_resume = 0;
+				langwell->hsm.a_aidl_bdis_tmout = 0;
+				langwell_otg_add_timer(a_aidl_bdis_tmr);
+
+				langwell_otg_loc_sof(0);
+				langwell->otg.state = OTG_STATE_A_SUSPEND;
+			} else if (!langwell->hsm.a_bus_req &&
+				!langwell->hsm.a_set_b_hnp_en) {
+				struct pci_dev *pdev = langwell->pdev;
+				if (langwell->host_ops)
+					langwell->host_ops->remove(pdev);
+				else
+					otg_dbg("host driver removed.\n");
+				langwell_otg_drv_vbus(0);
+				langwell->otg.state = OTG_STATE_A_WAIT_VFALL;
+			}
+		}
+		break;
+	case OTG_STATE_A_HOST:
+		if (langwell->hsm.id) {
+			langwell->otg.default_a = 0;
+			langwell->hsm.b_bus_req = 0;
+			if (langwell->host_ops)
+				langwell->host_ops->remove(langwell->pdev);
+			else
+				otg_dbg("host driver has been removed.\n");
+			langwell_otg_drv_vbus(0);
+			langwell->otg.state = OTG_STATE_B_IDLE;
+			queue_work(langwell->qwork, &langwell->work);
+		} else if (langwell->hsm.a_bus_drop ||
+		(!langwell->hsm.a_set_b_hnp_en && !langwell->hsm.a_bus_req)) {
+			if (langwell->host_ops)
+				langwell->host_ops->remove(langwell->pdev);
+			else
+				otg_dbg("host driver has been removed.\n");
+			langwell_otg_drv_vbus(0);
+			langwell->otg.state = OTG_STATE_A_WAIT_VFALL;
+		} else if (!langwell->hsm.a_vbus_vld) {
+			if (langwell->host_ops)
+				langwell->host_ops->remove(langwell->pdev);
+			else
+				otg_dbg("host driver has been removed.\n");
+			langwell_otg_drv_vbus(0);
+			langwell->otg.state = OTG_STATE_A_VBUS_ERR;
+		} else if (langwell->hsm.a_set_b_hnp_en
+				&& !langwell->hsm.a_bus_req) {
+			/* Set HABA to enable hardware assistance to signal
+			 *  A-connect after receiver B-disconnect. Hardware
+			 *  will then set client mode and enable URE, SLE and
+			 *  PCE after the assistance. otg_dummy_irq is used to
+			 *  clean these ints when client driver is not resumed.
+			 */
+			if (request_irq(langwell->pdev->irq,
+				otg_dummy_irq, IRQF_SHARED, driver_name,
+				langwell->regs) != 0) {
+				otg_dbg("request interrupt %d failed\n",
+						langwell->pdev->irq);
+			}
+
+			/* set HABA */
+			langwell_otg_HABA(1);
+			langwell->hsm.b_bus_resume = 0;
+			langwell->hsm.a_aidl_bdis_tmout = 0;
+			langwell_otg_add_timer(a_aidl_bdis_tmr);
+			langwell_otg_loc_sof(0);
+			langwell->otg.state = OTG_STATE_A_SUSPEND;
+		} else if (!langwell->hsm.b_conn || !langwell->hsm.a_bus_req) {
+			langwell->hsm.a_wait_bcon_tmout = 0;
+			langwell->hsm.a_set_b_hnp_en = 0;
+			langwell_otg_add_timer(a_wait_bcon_tmr);
+			langwell->otg.state = OTG_STATE_A_WAIT_BCON;
+		}
+		break;
+	case OTG_STATE_A_SUSPEND:
+		if (langwell->hsm.id) {
+			langwell_otg_del_timer(a_aidl_bdis_tmr);
+			langwell_otg_HABA(0);
+			free_irq(langwell->pdev->irq, langwell->regs);
+			langwell->otg.default_a = 0;
+			langwell->hsm.b_bus_req = 0;
+			if (langwell->host_ops)
+				langwell->host_ops->remove(langwell->pdev);
+			else
+				otg_dbg("host driver has been removed.\n");
+			langwell_otg_drv_vbus(0);
+			langwell->otg.state = OTG_STATE_B_IDLE;
+			queue_work(langwell->qwork, &langwell->work);
+		} else if (langwell->hsm.a_bus_req ||
+				langwell->hsm.b_bus_resume) {
+			langwell_otg_del_timer(a_aidl_bdis_tmr);
+			langwell_otg_HABA(0);
+			free_irq(langwell->pdev->irq, langwell->regs);
+			langwell->hsm.a_suspend_req = 0;
+			langwell_otg_loc_sof(1);
+			langwell->otg.state = OTG_STATE_A_HOST;
+		} else if (langwell->hsm.a_aidl_bdis_tmout ||
+				langwell->hsm.a_bus_drop) {
+			langwell_otg_del_timer(a_aidl_bdis_tmr);
+			langwell_otg_HABA(0);
+			free_irq(langwell->pdev->irq, langwell->regs);
+			if (langwell->host_ops)
+				langwell->host_ops->remove(langwell->pdev);
+			else
+				otg_dbg("host driver has been removed.\n");
+			langwell_otg_drv_vbus(0);
+			langwell->otg.state = OTG_STATE_A_WAIT_VFALL;
+		} else if (!langwell->hsm.b_conn &&
+				langwell->hsm.a_set_b_hnp_en) {
+			langwell_otg_del_timer(a_aidl_bdis_tmr);
+			langwell_otg_HABA(0);
+			free_irq(langwell->pdev->irq, langwell->regs);
+
+			if (langwell->host_ops)
+				langwell->host_ops->remove(langwell->pdev);
+			else
+				otg_dbg("host driver has been removed.\n");
+
+			langwell->hsm.b_bus_suspend = 0;
+			langwell->hsm.b_bus_suspend_vld = 0;
+			langwell->hsm.b_bus_suspend_tmout = 0;
+
+			/* msleep(200); */
+			if (langwell->client_ops)
+				langwell->client_ops->resume(langwell->pdev);
+			else
+				otg_dbg("client driver not loaded.\n");
+
+			langwell_otg_add_timer(b_bus_suspend_tmr);
+			langwell->otg.state = OTG_STATE_A_PERIPHERAL;
+			break;
+		} else if (!langwell->hsm.a_vbus_vld) {
+			langwell_otg_del_timer(a_aidl_bdis_tmr);
+			langwell_otg_HABA(0);
+			free_irq(langwell->pdev->irq, langwell->regs);
+			if (langwell->host_ops)
+				langwell->host_ops->remove(langwell->pdev);
+			else
+				otg_dbg("host driver has been removed.\n");
+			langwell_otg_drv_vbus(0);
+			langwell->otg.state = OTG_STATE_A_VBUS_ERR;
+		}
+		break;
+	case OTG_STATE_A_PERIPHERAL:
+		if (langwell->hsm.id) {
+			langwell_otg_del_timer(b_bus_suspend_tmr);
+			langwell->otg.default_a = 0;
+			langwell->hsm.b_bus_req = 0;
+			if (langwell->client_ops)
+				langwell->client_ops->suspend(langwell->pdev,
+					PMSG_FREEZE);
+			else
+				otg_dbg("client driver has been removed.\n");
+			langwell_otg_drv_vbus(0);
+			langwell->otg.state = OTG_STATE_B_IDLE;
+			queue_work(langwell->qwork, &langwell->work);
+		} else if (!langwell->hsm.a_vbus_vld) {
+			langwell_otg_del_timer(b_bus_suspend_tmr);
+			if (langwell->client_ops)
+				langwell->client_ops->suspend(langwell->pdev,
+					PMSG_FREEZE);
+			else
+				otg_dbg("client driver has been removed.\n");
+			langwell_otg_drv_vbus(0);
+			langwell->otg.state = OTG_STATE_A_VBUS_ERR;
+		} else if (langwell->hsm.a_bus_drop) {
+			langwell_otg_del_timer(b_bus_suspend_tmr);
+			if (langwell->client_ops)
+				langwell->client_ops->suspend(langwell->pdev,
+					PMSG_FREEZE);
+			else
+				otg_dbg("client driver has been removed.\n");
+			langwell_otg_drv_vbus(0);
+			langwell->otg.state = OTG_STATE_A_WAIT_VFALL;
+		} else if (langwell->hsm.b_bus_suspend) {
+			langwell_otg_del_timer(b_bus_suspend_tmr);
+			if (langwell->client_ops)
+				langwell->client_ops->suspend(langwell->pdev,
+					PMSG_FREEZE);
+			else
+				otg_dbg("client driver has been removed.\n");
+
+			if (langwell->host_ops)
+				langwell->host_ops->probe(langwell->pdev,
+						langwell->host_ops->id_table);
+			else
+				otg_dbg("host driver not loaded.\n");
+			langwell->hsm.a_set_b_hnp_en = 0;
+			langwell->hsm.a_wait_bcon_tmout = 0;
+			langwell_otg_add_timer(a_wait_bcon_tmr);
+			langwell->otg.state = OTG_STATE_A_WAIT_BCON;
+		} else if (langwell->hsm.b_bus_suspend_tmout) {
+			u32	val;
+			val = readl(langwell->regs + CI_PORTSC1);
+			if (!(val & PORTSC_SUSP))
+				break;
+			if (langwell->client_ops)
+				langwell->client_ops->suspend(langwell->pdev,
+						PMSG_FREEZE);
+			else
+				otg_dbg("client driver has been removed.\n");
+			if (langwell->host_ops)
+				langwell->host_ops->probe(langwell->pdev,
+						langwell->host_ops->id_table);
+			else
+				otg_dbg("host driver not loaded.\n");
+			langwell->hsm.a_set_b_hnp_en = 0;
+			langwell->hsm.a_wait_bcon_tmout = 0;
+			langwell_otg_add_timer(a_wait_bcon_tmr);
+			langwell->otg.state = OTG_STATE_A_WAIT_BCON;
+		}
+		break;
+	case OTG_STATE_A_VBUS_ERR:
+		if (langwell->hsm.id) {
+			langwell->otg.default_a = 0;
+			langwell->hsm.a_clr_err = 0;
+			langwell->hsm.a_srp_det = 0;
+			langwell->otg.state = OTG_STATE_B_IDLE;
+			queue_work(langwell->qwork, &langwell->work);
+		} else if (langwell->hsm.a_clr_err) {
+			langwell->hsm.a_clr_err = 0;
+			langwell->hsm.a_srp_det = 0;
+			reset_otg();
+			init_hsm();
+			if (langwell->otg.state == OTG_STATE_A_IDLE)
+				queue_work(langwell->qwork, &langwell->work);
+		}
+		break;
+	case OTG_STATE_A_WAIT_VFALL:
+		if (langwell->hsm.id) {
+			langwell->otg.default_a = 0;
+			langwell->otg.state = OTG_STATE_B_IDLE;
+			queue_work(langwell->qwork, &langwell->work);
+		} else if (langwell->hsm.a_bus_req) {
+			langwell_otg_drv_vbus(1);
+			langwell->hsm.a_wait_vrise_tmout = 0;
+			langwell_otg_add_timer(a_wait_vrise_tmr);
+			langwell->otg.state = OTG_STATE_A_WAIT_VRISE;
+		} else if (!langwell->hsm.a_sess_vld) {
+			langwell->hsm.a_srp_det = 0;
+			langwell_otg_drv_vbus(0);
+			set_host_mode();
+			langwell->otg.state = OTG_STATE_A_IDLE;
+		}
+		break;
+	default:
+		;
+	}
+
+	otg_dbg("%s: new state = %s\n", __func__,
+			state_string(langwell->otg.state));
+}
+
+	static ssize_t
+show_registers(struct device *_dev, struct device_attribute *attr, char *buf)
+{
+	struct langwell_otg *langwell;
+	char *next;
+	unsigned size;
+	unsigned t;
+
+	langwell = the_transceiver;
+	next = buf;
+	size = PAGE_SIZE;
+
+	t = scnprintf(next, size,
+		"\n"
+		"USBCMD = 0x%08x \n"
+		"USBSTS = 0x%08x \n"
+		"USBINTR = 0x%08x \n"
+		"ASYNCLISTADDR = 0x%08x \n"
+		"PORTSC1 = 0x%08x \n"
+		"HOSTPC1 = 0x%08x \n"
+		"OTGSC = 0x%08x \n"
+		"USBMODE = 0x%08x \n",
+		readl(langwell->regs + 0x30),
+		readl(langwell->regs + 0x34),
+		readl(langwell->regs + 0x38),
+		readl(langwell->regs + 0x48),
+		readl(langwell->regs + 0x74),
+		readl(langwell->regs + 0xb4),
+		readl(langwell->regs + 0xf4),
+		readl(langwell->regs + 0xf8)
+		);
+	size -= t;
+	next += t;
+
+	return PAGE_SIZE - size;
+}
+static DEVICE_ATTR(registers, S_IRUGO, show_registers, NULL);
+
+static ssize_t
+show_hsm(struct device *_dev, struct device_attribute *attr, char *buf)
+{
+	struct langwell_otg *langwell;
+	char *next;
+	unsigned size;
+	unsigned t;
+
+	langwell = the_transceiver;
+	next = buf;
+	size = PAGE_SIZE;
+
+	t = scnprintf(next, size,
+		"\n"
+		"current state = %s\n"
+		"a_bus_resume = \t%d\n"
+		"a_bus_suspend = \t%d\n"
+		"a_conn = \t%d\n"
+		"a_sess_vld = \t%d\n"
+		"a_srp_det = \t%d\n"
+		"a_vbus_vld = \t%d\n"
+		"b_bus_resume = \t%d\n"
+		"b_bus_suspend = \t%d\n"
+		"b_conn = \t%d\n"
+		"b_se0_srp = \t%d\n"
+		"b_sess_end = \t%d\n"
+		"b_sess_vld = \t%d\n"
+		"id = \t%d\n"
+		"a_set_b_hnp_en = \t%d\n"
+		"b_srp_done = \t%d\n"
+		"b_hnp_enable = \t%d\n"
+		"a_wait_vrise_tmout = \t%d\n"
+		"a_wait_bcon_tmout = \t%d\n"
+		"a_aidl_bdis_tmout = \t%d\n"
+		"b_ase0_brst_tmout = \t%d\n"
+		"a_bus_drop = \t%d\n"
+		"a_bus_req = \t%d\n"
+		"a_clr_err = \t%d\n"
+		"a_suspend_req = \t%d\n"
+		"b_bus_req = \t%d\n"
+		"b_bus_suspend_tmout = \t%d\n"
+		"b_bus_suspend_vld = \t%d\n",
+		state_string(langwell->otg.state),
+		langwell->hsm.a_bus_resume,
+		langwell->hsm.a_bus_suspend,
+		langwell->hsm.a_conn,
+		langwell->hsm.a_sess_vld,
+		langwell->hsm.a_srp_det,
+		langwell->hsm.a_vbus_vld,
+		langwell->hsm.b_bus_resume,
+		langwell->hsm.b_bus_suspend,
+		langwell->hsm.b_conn,
+		langwell->hsm.b_se0_srp,
+		langwell->hsm.b_sess_end,
+		langwell->hsm.b_sess_vld,
+		langwell->hsm.id,
+		langwell->hsm.a_set_b_hnp_en,
+		langwell->hsm.b_srp_done,
+		langwell->hsm.b_hnp_enable,
+		langwell->hsm.a_wait_vrise_tmout,
+		langwell->hsm.a_wait_bcon_tmout,
+		langwell->hsm.a_aidl_bdis_tmout,
+		langwell->hsm.b_ase0_brst_tmout,
+		langwell->hsm.a_bus_drop,
+		langwell->hsm.a_bus_req,
+		langwell->hsm.a_clr_err,
+		langwell->hsm.a_suspend_req,
+		langwell->hsm.b_bus_req,
+		langwell->hsm.b_bus_suspend_tmout,
+		langwell->hsm.b_bus_suspend_vld
+		);
+	size -= t;
+	next += t;
+
+	return PAGE_SIZE - size;
+}
+static DEVICE_ATTR(hsm, S_IRUGO, show_hsm, NULL);
+
+static ssize_t
+get_a_bus_req(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct langwell_otg *langwell;
+	char *next;
+	unsigned size;
+	unsigned t;
+
+	langwell =  the_transceiver;
+	next = buf;
+	size = PAGE_SIZE;
+
+	t = scnprintf(next, size, "%d", langwell->hsm.a_bus_req);
+	size -= t;
+	next += t;
+
+	return PAGE_SIZE - size;
+}
+
+static ssize_t
+set_a_bus_req(struct device *dev, struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct langwell_otg *langwell;
+	langwell = the_transceiver;
+	if (!langwell->otg.default_a)
+		return -1;
+	if (count > 2)
+		return -1;
+
+	if (buf[0] == '0') {
+		langwell->hsm.a_bus_req = 0;
+		otg_dbg("a_bus_req = 0\n");
+	} else if (buf[0] == '1') {
+		/* If a_bus_drop is TRUE, a_bus_req can't be set */
+		if (langwell->hsm.a_bus_drop)
+			return -1;
+		langwell->hsm.a_bus_req = 1;
+		otg_dbg("a_bus_req = 1\n");
+	}
+	if (spin_trylock(&langwell->wq_lock)) {
+		queue_work(langwell->qwork, &langwell->work);
+		spin_unlock(&langwell->wq_lock);
+	}
+	return count;
+}
+static DEVICE_ATTR(a_bus_req, S_IRUGO | S_IWUGO, get_a_bus_req, set_a_bus_req);
+
+static ssize_t
+get_a_bus_drop(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct langwell_otg *langwell;
+	char *next;
+	unsigned size;
+	unsigned t;
+
+	langwell =  the_transceiver;
+	next = buf;
+	size = PAGE_SIZE;
+
+	t = scnprintf(next, size, "%d", langwell->hsm.a_bus_drop);
+	size -= t;
+	next += t;
+
+	return PAGE_SIZE - size;
+}
+
+static ssize_t
+set_a_bus_drop(struct device *dev, struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct langwell_otg *langwell;
+	langwell = the_transceiver;
+	if (!langwell->otg.default_a)
+		return -1;
+	if (count > 2)
+		return -1;
+
+	if (buf[0] == '0') {
+		langwell->hsm.a_bus_drop = 0;
+		otg_dbg("a_bus_drop = 0\n");
+	} else if (buf[0] == '1') {
+		langwell->hsm.a_bus_drop = 1;
+		langwell->hsm.a_bus_req = 0;
+		otg_dbg("a_bus_drop = 1, then a_bus_req = 0\n");
+	}
+	if (spin_trylock(&langwell->wq_lock)) {
+		queue_work(langwell->qwork, &langwell->work);
+		spin_unlock(&langwell->wq_lock);
+	}
+	return count;
+}
+static DEVICE_ATTR(a_bus_drop, S_IRUGO | S_IWUGO,
+	get_a_bus_drop, set_a_bus_drop);
+
+static ssize_t
+get_b_bus_req(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct langwell_otg *langwell;
+	char *next;
+	unsigned size;
+	unsigned t;
+
+	langwell =  the_transceiver;
+	next = buf;
+	size = PAGE_SIZE;
+
+	t = scnprintf(next, size, "%d", langwell->hsm.b_bus_req);
+	size -= t;
+	next += t;
+
+	return PAGE_SIZE - size;
+}
+
+static ssize_t
+set_b_bus_req(struct device *dev, struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct langwell_otg *langwell;
+	langwell = the_transceiver;
+
+	if (langwell->otg.default_a)
+		return -1;
+
+	if (count > 2)
+		return -1;
+
+	if (buf[0] == '0') {
+		langwell->hsm.b_bus_req = 0;
+		otg_dbg("b_bus_req = 0\n");
+	} else if (buf[0] == '1') {
+		langwell->hsm.b_bus_req = 1;
+		otg_dbg("b_bus_req = 1\n");
+	}
+	if (spin_trylock(&langwell->wq_lock)) {
+		queue_work(langwell->qwork, &langwell->work);
+		spin_unlock(&langwell->wq_lock);
+	}
+	return count;
+}
+static DEVICE_ATTR(b_bus_req, S_IRUGO | S_IWUGO, get_b_bus_req, set_b_bus_req);
+
+static ssize_t
+set_a_clr_err(struct device *dev, struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct langwell_otg *langwell;
+	langwell = the_transceiver;
+
+	if (!langwell->otg.default_a)
+		return -1;
+	if (count > 2)
+		return -1;
+
+	if (buf[0] == '1') {
+		langwell->hsm.a_clr_err = 1;
+		otg_dbg("a_clr_err = 1\n");
+	}
+	if (spin_trylock(&langwell->wq_lock)) {
+		queue_work(langwell->qwork, &langwell->work);
+		spin_unlock(&langwell->wq_lock);
+	}
+	return count;
+}
+static DEVICE_ATTR(a_clr_err, S_IWUGO, NULL, set_a_clr_err);
+
+static struct attribute *inputs_attrs[] = {
+	&dev_attr_a_bus_req.attr,
+	&dev_attr_a_bus_drop.attr,
+	&dev_attr_b_bus_req.attr,
+	&dev_attr_a_clr_err.attr,
+	NULL,
+};
+
+static struct attribute_group debug_dev_attr_group = {
+	.name = "inputs",
+	.attrs = inputs_attrs,
+};
+
+int langwell_register_host(struct pci_driver *host_driver)
+{
+	int	ret = 0;
+
+	the_transceiver->host_ops = host_driver;
+	queue_work(the_transceiver->qwork, &the_transceiver->work);
+	otg_dbg("host controller driver is registered\n");
+
+	return ret;
+}
+EXPORT_SYMBOL(langwell_register_host);
+
+void langwell_unregister_host(struct pci_driver *host_driver)
+{
+	if (the_transceiver->host_ops)
+		the_transceiver->host_ops->remove(the_transceiver->pdev);
+	the_transceiver->host_ops = NULL;
+	the_transceiver->hsm.a_bus_drop = 1;
+	queue_work(the_transceiver->qwork, &the_transceiver->work);
+	otg_dbg("host controller driver is unregistered\n");
+}
+EXPORT_SYMBOL(langwell_unregister_host);
+
+int langwell_register_peripheral(struct pci_driver *client_driver)
+{
+	int	ret = 0;
+
+	if (client_driver)
+		ret = client_driver->probe(the_transceiver->pdev,
+				client_driver->id_table);
+	if (!ret) {
+		the_transceiver->client_ops = client_driver;
+		queue_work(the_transceiver->qwork, &the_transceiver->work);
+		otg_dbg("client controller driver is registered\n");
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL(langwell_register_peripheral);
+
+void langwell_unregister_peripheral(struct pci_driver *client_driver)
+{
+	if (the_transceiver->client_ops)
+		the_transceiver->client_ops->remove(the_transceiver->pdev);
+	the_transceiver->client_ops = NULL;
+	the_transceiver->hsm.b_bus_req = 0;
+	queue_work(the_transceiver->qwork, &the_transceiver->work);
+	otg_dbg("client controller driver is unregistered\n");
+}
+EXPORT_SYMBOL(langwell_unregister_peripheral);
+
+static int langwell_otg_probe(struct pci_dev *pdev,
+		const struct pci_device_id *id)
+{
+	unsigned long		resource, len;
+	void __iomem 		*base = NULL;
+	int			retval;
+	u32			val32;
+	struct langwell_otg	*langwell;
+	char			qname[] = "langwell_otg_queue";
+
+	retval = 0;
+	otg_dbg("\notg controller is detected.\n");
+	if (pci_enable_device(pdev) < 0) {
+		retval = -ENODEV;
+		goto done;
+	}
+
+	langwell = kzalloc(sizeof *langwell, GFP_KERNEL);
+	if (langwell == NULL) {
+		retval = -ENOMEM;
+		goto done;
+	}
+	the_transceiver = langwell;
+
+	/* control register: BAR 0 */
+	resource = pci_resource_start(pdev, 0);
+	len = pci_resource_len(pdev, 0);
+	if (!request_mem_region(resource, len, driver_name)) {
+		retval = -EBUSY;
+		goto err;
+	}
+	langwell->region = 1;
+
+	base = ioremap_nocache(resource, len);
+	if (base == NULL) {
+		retval = -EFAULT;
+		goto err;
+	}
+	langwell->regs = base;
+
+	if (!pdev->irq) {
+		otg_dbg("No IRQ.\n");
+		retval = -ENODEV;
+		goto err;
+	}
+
+	langwell->qwork = create_workqueue(qname);
+	if (!langwell->qwork) {
+		otg_dbg("cannot create workqueue %s\n", qname);
+		retval = -ENOMEM;
+		goto err;
+	}
+	INIT_WORK(&langwell->work, langwell_otg_work);
+
+	/* OTG common part */
+	langwell->pdev = pdev;
+	langwell->otg.dev = &pdev->dev;
+	langwell->otg.label = driver_name;
+	langwell->otg.set_host = langwell_otg_set_host;
+	langwell->otg.set_peripheral = langwell_otg_set_peripheral;
+	langwell->otg.set_power = langwell_otg_set_power;
+	langwell->otg.start_srp = langwell_otg_start_srp;
+	langwell->otg.state = OTG_STATE_UNDEFINED;
+	if (otg_set_transceiver(&langwell->otg)) {
+		otg_dbg("can't set transceiver\n");
+		retval = -EBUSY;
+		goto err;
+	}
+
+	reset_otg();
+	init_hsm();
+
+	spin_lock_init(&langwell->lock);
+	spin_lock_init(&langwell->wq_lock);
+	INIT_LIST_HEAD(&active_timers);
+	langwell_otg_init_timers(&langwell->hsm);
+
+	if (request_irq(pdev->irq, otg_irq, IRQF_SHARED,
+				driver_name, langwell) != 0) {
+		otg_dbg("request interrupt %d failed\n", pdev->irq);
+		retval = -EBUSY;
+		goto err;
+	}
+
+	/* enable OTGSC int */
+	val32 = OTGSC_DPIE | OTGSC_BSEIE | OTGSC_BSVIE |
+		OTGSC_ASVIE | OTGSC_AVVIE | OTGSC_IDIE | OTGSC_IDPU;
+	writel(val32, langwell->regs + CI_OTGSC);
+
+	retval = device_create_file(&pdev->dev, &dev_attr_registers);
+	if (retval < 0) {
+		otg_dbg("Can't register sysfs attribute: %d\n", retval);
+		goto err;
+	}
+
+	retval = device_create_file(&pdev->dev, &dev_attr_hsm);
+	if (retval < 0) {
+		otg_dbg("Can't hsm sysfs attribute: %d\n", retval);
+		goto err;
+	}
+
+	retval = sysfs_create_group(&pdev->dev.kobj, &debug_dev_attr_group);
+	if (retval < 0) {
+		otg_dbg("Can't register sysfs attr group: %d\n", retval);
+		goto err;
+	}
+
+	if (langwell->otg.state == OTG_STATE_A_IDLE)
+		queue_work(langwell->qwork, &langwell->work);
+
+	return 0;
+
+err:
+	if (the_transceiver)
+		langwell_otg_remove(pdev);
+done:
+	return retval;
+}
+
+static void langwell_otg_remove(struct pci_dev *pdev)
+{
+	struct langwell_otg *langwell;
+
+	langwell = the_transceiver;
+
+	if (langwell->qwork) {
+		flush_workqueue(langwell->qwork);
+		destroy_workqueue(langwell->qwork);
+	}
+	langwell_otg_free_timers();
+
+	/* disable OTGSC interrupt as OTGSC doesn't change in reset */
+	writel(0, langwell->regs + CI_OTGSC);
+
+	if (pdev->irq)
+		free_irq(pdev->irq, langwell);
+	if (langwell->regs)
+		iounmap(langwell->regs);
+	if (langwell->region)
+		release_mem_region(pci_resource_start(pdev, 0),
+				pci_resource_len(pdev, 0));
+
+	otg_set_transceiver(NULL);
+	pci_disable_device(pdev);
+	sysfs_remove_group(&pdev->dev.kobj, &debug_dev_attr_group);
+	device_remove_file(&pdev->dev, &dev_attr_hsm);
+	device_remove_file(&pdev->dev, &dev_attr_registers);
+	kfree(langwell);
+	langwell = NULL;
+}
+
+static void transceiver_suspend(struct pci_dev *pdev)
+{
+	pci_save_state(pdev);
+	pci_set_power_state(pdev, PCI_D3hot);
+	langwell_otg_phy_low_power(1);
+}
+
+static int langwell_otg_suspend(struct pci_dev *pdev, pm_message_t message)
+{
+	int 	ret = 0;
+	struct langwell_otg *langwell;
+
+	langwell = the_transceiver;
+
+	/* Disbale OTG interrupts */
+	langwell_otg_intr(0);
+
+	if (pdev->irq)
+		free_irq(pdev->irq, langwell);
+
+	/* Prevent more otg_work */
+	flush_workqueue(langwell->qwork);
+	spin_lock(&langwell->wq_lock);
+
+	/* start actions */
+	switch (langwell->otg.state) {
+	case OTG_STATE_A_IDLE:
+	case OTG_STATE_B_IDLE:
+	case OTG_STATE_A_WAIT_VFALL:
+	case OTG_STATE_A_VBUS_ERR:
+		transceiver_suspend(pdev);
+		break;
+	case OTG_STATE_A_WAIT_VRISE:
+		langwell_otg_del_timer(a_wait_vrise_tmr);
+		langwell->hsm.a_srp_det = 0;
+		langwell_otg_drv_vbus(0);
+		langwell->otg.state = OTG_STATE_A_IDLE;
+		transceiver_suspend(pdev);
+		break;
+	case OTG_STATE_A_WAIT_BCON:
+		langwell_otg_del_timer(a_wait_bcon_tmr);
+		if (langwell->host_ops)
+			ret = langwell->host_ops->suspend(pdev, message);
+		langwell_otg_drv_vbus(0);
+		break;
+	case OTG_STATE_A_HOST:
+		if (langwell->host_ops)
+			ret = langwell->host_ops->suspend(pdev, message);
+		langwell_otg_drv_vbus(0);
+		langwell_otg_phy_low_power(1);
+		break;
+	case OTG_STATE_A_SUSPEND:
+		langwell_otg_del_timer(a_aidl_bdis_tmr);
+		langwell_otg_HABA(0);
+		if (langwell->host_ops)
+			langwell->host_ops->remove(pdev);
+		else
+			otg_dbg("host driver has been removed.\n");
+		langwell_otg_drv_vbus(0);
+		transceiver_suspend(pdev);
+		langwell->otg.state = OTG_STATE_A_WAIT_VFALL;
+		break;
+	case OTG_STATE_A_PERIPHERAL:
+		if (langwell->client_ops)
+			ret = langwell->client_ops->suspend(pdev, message);
+		else
+			otg_dbg("client driver has been removed.\n");
+		langwell_otg_drv_vbus(0);
+		transceiver_suspend(pdev);
+		langwell->otg.state = OTG_STATE_A_WAIT_VFALL;
+		break;
+	case OTG_STATE_B_HOST:
+		if (langwell->host_ops)
+			langwell->host_ops->remove(pdev);
+		else
+			otg_dbg("host driver has been removed.\n");
+		langwell->hsm.b_bus_req = 0;
+		transceiver_suspend(pdev);
+		langwell->otg.state = OTG_STATE_B_IDLE;
+		break;
+	case OTG_STATE_B_PERIPHERAL:
+		if (langwell->client_ops)
+			ret = langwell->client_ops->suspend(pdev, message);
+		else
+			otg_dbg("client driver has been removed.\n");
+		break;
+	case OTG_STATE_B_WAIT_ACON:
+		langwell_otg_del_timer(b_ase0_brst_tmr);
+		langwell_otg_HAAR(0);
+		if (langwell->host_ops)
+			langwell->host_ops->remove(pdev);
+		else
+			otg_dbg("host driver has been removed.\n");
+		langwell->hsm.b_bus_req = 0;
+		langwell->otg.state = OTG_STATE_B_IDLE;
+		transceiver_suspend(pdev);
+		break;
+	default:
+		otg_dbg("error state before suspend\n ");
+		break;
+	}
+	spin_unlock(&langwell->wq_lock);
+
+	return ret;
+}
+
+static void transceiver_resume(struct pci_dev *pdev)
+{
+	pci_restore_state(pdev);
+	pci_set_power_state(pdev, PCI_D0);
+	langwell_otg_phy_low_power(0);
+}
+
+static int langwell_otg_resume(struct pci_dev *pdev)
+{
+	int 	ret = 0;
+	struct langwell_otg *langwell;
+
+	langwell = the_transceiver;
+
+	spin_lock(&langwell->wq_lock);
+
+	switch (langwell->otg.state) {
+	case OTG_STATE_A_IDLE:
+	case OTG_STATE_B_IDLE:
+	case OTG_STATE_A_WAIT_VFALL:
+	case OTG_STATE_A_VBUS_ERR:
+		transceiver_resume(pdev);
+		break;
+	case OTG_STATE_A_WAIT_BCON:
+		langwell_otg_add_timer(a_wait_bcon_tmr);
+		langwell_otg_drv_vbus(1);
+		if (langwell->host_ops)
+			ret = langwell->host_ops->resume(pdev);
+		break;
+	case OTG_STATE_A_HOST:
+		langwell_otg_drv_vbus(1);
+		langwell_otg_phy_low_power(0);
+		if (langwell->host_ops)
+			ret = langwell->host_ops->resume(pdev);
+		break;
+	case OTG_STATE_B_PERIPHERAL:
+		if (langwell->client_ops)
+			ret = langwell->client_ops->resume(pdev);
+		else
+			otg_dbg("client driver not loaded.\n");
+		break;
+	default:
+		otg_dbg("error state before suspend\n ");
+		break;
+	}
+
+	if (request_irq(pdev->irq, otg_irq, IRQF_SHARED,
+				driver_name, the_transceiver) != 0) {
+		otg_dbg("request interrupt %d failed\n", pdev->irq);
+		ret = -EBUSY;
+	}
+
+	/* enable OTG interrupts */
+	langwell_otg_intr(1);
+
+	spin_unlock(&langwell->wq_lock);
+
+	queue_work(langwell->qwork, &langwell->work);
+
+
+	return ret;
+}
+
+static int __init langwell_otg_init(void)
+{
+	return pci_register_driver(&otg_pci_driver);
+}
+module_init(langwell_otg_init);
+
+static void __exit langwell_otg_cleanup(void)
+{
+	pci_unregister_driver(&otg_pci_driver);
+}
+module_exit(langwell_otg_cleanup);
diff --git a/drivers/usb/otg/nop-usb-xceiv.c b/drivers/usb/otg/nop-usb-xceiv.c
index c567168..9ed5ea5 100644
--- a/drivers/usb/otg/nop-usb-xceiv.c
+++ b/drivers/usb/otg/nop-usb-xceiv.c
@@ -22,8 +22,8 @@
  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  *
  * Current status:
- * 	this is to add "nop" transceiver for all those phy which is
- * 	autonomous such as isp1504 etc.
+ *	This provides a "nop" transceiver for PHYs which are
+ *	autonomous such as isp1504, isp1707, etc.
  */
 
 #include <linux/module.h>
@@ -36,30 +36,25 @@
 	struct device		*dev;
 };
 
-static u64 nop_xceiv_dmamask = DMA_BIT_MASK(32);
-
-static struct platform_device nop_xceiv_device = {
-	.name           = "nop_usb_xceiv",
-	.id             = -1,
-	.dev = {
-		.dma_mask               = &nop_xceiv_dmamask,
-		.coherent_dma_mask      = DMA_BIT_MASK(32),
-		.platform_data          = NULL,
-	},
-};
+static struct platform_device *pd;
 
 void usb_nop_xceiv_register(void)
 {
-	if (platform_device_register(&nop_xceiv_device) < 0) {
+	if (pd)
+		return;
+	pd = platform_device_register_simple("nop_usb_xceiv", -1, NULL, 0);
+	if (!pd) {
 		printk(KERN_ERR "Unable to register usb nop transceiver\n");
 		return;
 	}
 }
+EXPORT_SYMBOL(usb_nop_xceiv_register);
 
 void usb_nop_xceiv_unregister(void)
 {
-	platform_device_unregister(&nop_xceiv_device);
+	platform_device_unregister(pd);
 }
+EXPORT_SYMBOL(usb_nop_xceiv_unregister);
 
 static inline struct nop_usb_xceiv *xceiv_to_nop(struct otg_transceiver *x)
 {
diff --git a/drivers/usb/otg/twl4030-usb.c b/drivers/usb/otg/twl4030-usb.c
index d9478d0..9e3e7a5 100644
--- a/drivers/usb/otg/twl4030-usb.c
+++ b/drivers/usb/otg/twl4030-usb.c
@@ -217,6 +217,7 @@
 
 /* In module TWL4030_MODULE_PM_MASTER */
 #define PROTECT_KEY			0x0E
+#define STS_HW_CONDITIONS		0x0F
 
 /* In module TWL4030_MODULE_PM_RECEIVER */
 #define VUSB_DEDICATED1			0x7D
@@ -351,15 +352,26 @@
 	int	status;
 	int	linkstat = USB_LINK_UNKNOWN;
 
-	/* STS_HW_CONDITIONS */
-	status = twl4030_readb(twl, TWL4030_MODULE_PM_MASTER, 0x0f);
+	/*
+	 * For ID/VBUS sensing, see manual section 15.4.8 ...
+	 * except when using only battery backup power, two
+	 * comparators produce VBUS_PRES and ID_PRES signals,
+	 * which don't match docs elsewhere.  But ... BIT(7)
+	 * and BIT(2) of STS_HW_CONDITIONS, respectively, do
+	 * seem to match up.  If either is true the USB_PRES
+	 * signal is active, the OTG module is activated, and
+	 * its interrupt may be raised (may wake the system).
+	 */
+	status = twl4030_readb(twl, TWL4030_MODULE_PM_MASTER,
+			STS_HW_CONDITIONS);
 	if (status < 0)
 		dev_err(twl->dev, "USB link status err %d\n", status);
-	else if (status & BIT(7))
-		linkstat = USB_LINK_VBUS;
-	else if (status & BIT(2))
-		linkstat = USB_LINK_ID;
-	else
+	else if (status & (BIT(7) | BIT(2))) {
+		if (status & BIT(2))
+			linkstat = USB_LINK_ID;
+		else
+			linkstat = USB_LINK_VBUS;
+	} else
 		linkstat = USB_LINK_NONE;
 
 	dev_dbg(twl->dev, "HW_CONDITIONS 0x%02x/%d; link %d\n",
@@ -641,7 +653,7 @@
 	return 0;
 }
 
-static int __init twl4030_usb_probe(struct platform_device *pdev)
+static int __devinit twl4030_usb_probe(struct platform_device *pdev)
 {
 	struct twl4030_usb_data *pdata = pdev->dev.platform_data;
 	struct twl4030_usb	*twl;
diff --git a/drivers/usb/serial/aircable.c b/drivers/usb/serial/aircable.c
index 6d106e7..2cbfab3 100644
--- a/drivers/usb/serial/aircable.c
+++ b/drivers/usb/serial/aircable.c
@@ -364,7 +364,7 @@
 	return 0;
 }
 
-static void aircable_shutdown(struct usb_serial *serial)
+static void aircable_release(struct usb_serial *serial)
 {
 
 	struct usb_serial_port *port = serial->port[0];
@@ -375,7 +375,6 @@
 	if (priv) {
 		serial_buf_free(priv->tx_buf);
 		serial_buf_free(priv->rx_buf);
-		usb_set_serial_port_data(port, NULL);
 		kfree(priv);
 	}
 }
@@ -601,7 +600,7 @@
 	.num_ports =		1,
 	.attach =		aircable_attach,
 	.probe =		aircable_probe,
-	.shutdown =		aircable_shutdown,
+	.release =		aircable_release,
 	.write =		aircable_write,
 	.write_room =		aircable_write_room,
 	.write_bulk_callback =	aircable_write_bulk_callback,
diff --git a/drivers/usb/serial/belkin_sa.c b/drivers/usb/serial/belkin_sa.c
index 2bfd6dd..7033b03 100644
--- a/drivers/usb/serial/belkin_sa.c
+++ b/drivers/usb/serial/belkin_sa.c
@@ -90,7 +90,7 @@
 
 /* function prototypes for a Belkin USB Serial Adapter F5U103 */
 static int  belkin_sa_startup(struct usb_serial *serial);
-static void belkin_sa_shutdown(struct usb_serial *serial);
+static void belkin_sa_release(struct usb_serial *serial);
 static int  belkin_sa_open(struct tty_struct *tty,
 			struct usb_serial_port *port, struct file *filp);
 static void belkin_sa_close(struct usb_serial_port *port);
@@ -142,7 +142,7 @@
 	.tiocmget =		belkin_sa_tiocmget,
 	.tiocmset =		belkin_sa_tiocmset,
 	.attach =		belkin_sa_startup,
-	.shutdown =		belkin_sa_shutdown,
+	.release =		belkin_sa_release,
 };
 
 
@@ -197,14 +197,13 @@
 }
 
 
-static void belkin_sa_shutdown(struct usb_serial *serial)
+static void belkin_sa_release(struct usb_serial *serial)
 {
 	struct belkin_sa_private *priv;
 	int i;
 
 	dbg("%s", __func__);
 
-	/* stop reads and writes on all ports */
 	for (i = 0; i < serial->num_ports; ++i) {
 		/* My special items, the standard routines free my urbs */
 		priv = usb_get_serial_port_data(serial->port[i]);
diff --git a/drivers/usb/serial/bus.c b/drivers/usb/serial/bus.c
index 83bbb5b..ba555c5 100644
--- a/drivers/usb/serial/bus.c
+++ b/drivers/usb/serial/bus.c
@@ -59,23 +59,22 @@
 		retval = -ENODEV;
 		goto exit;
 	}
+	if (port->dev_state != PORT_REGISTERING)
+		goto exit;
 
 	driver = port->serial->type;
 	if (driver->port_probe) {
-		if (!try_module_get(driver->driver.owner)) {
-			dev_err(dev, "module get failed, exiting\n");
-			retval = -EIO;
-			goto exit;
-		}
 		retval = driver->port_probe(port);
-		module_put(driver->driver.owner);
 		if (retval)
 			goto exit;
 	}
 
 	retval = device_create_file(dev, &dev_attr_port_number);
-	if (retval)
+	if (retval) {
+		if (driver->port_remove)
+			retval = driver->port_remove(port);
 		goto exit;
+	}
 
 	minor = port->number;
 	tty_register_device(usb_serial_tty_driver, minor, dev);
@@ -98,19 +97,15 @@
 	if (!port)
 		return -ENODEV;
 
+	if (port->dev_state != PORT_UNREGISTERING)
+		return retval;
+
 	device_remove_file(&port->dev, &dev_attr_port_number);
 
 	driver = port->serial->type;
-	if (driver->port_remove) {
-		if (!try_module_get(driver->driver.owner)) {
-			dev_err(dev, "module get failed, exiting\n");
-			retval = -EIO;
-			goto exit;
-		}
+	if (driver->port_remove)
 		retval = driver->port_remove(port);
-		module_put(driver->driver.owner);
-	}
-exit:
+
 	minor = port->number;
 	tty_unregister_device(usb_serial_tty_driver, minor);
 	dev_info(dev, "%s converter now disconnected from ttyUSB%d\n",
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 16a154d..2b9eeda 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -50,7 +50,7 @@
 		unsigned int, unsigned int);
 static void cp210x_break_ctl(struct tty_struct *, int);
 static int cp210x_startup(struct usb_serial *);
-static void cp210x_shutdown(struct usb_serial *);
+static void cp210x_disconnect(struct usb_serial *);
 
 static int debug;
 
@@ -137,7 +137,7 @@
 	.tiocmget 		= cp210x_tiocmget,
 	.tiocmset		= cp210x_tiocmset,
 	.attach			= cp210x_startup,
-	.shutdown		= cp210x_shutdown,
+	.disconnect		= cp210x_disconnect,
 };
 
 /* Config request types */
@@ -792,7 +792,7 @@
 	return 0;
 }
 
-static void cp210x_shutdown(struct usb_serial *serial)
+static void cp210x_disconnect(struct usb_serial *serial)
 {
 	int i;
 
diff --git a/drivers/usb/serial/cyberjack.c b/drivers/usb/serial/cyberjack.c
index 933ba91..336523f 100644
--- a/drivers/usb/serial/cyberjack.c
+++ b/drivers/usb/serial/cyberjack.c
@@ -58,7 +58,8 @@
 
 /* Function prototypes */
 static int cyberjack_startup(struct usb_serial *serial);
-static void cyberjack_shutdown(struct usb_serial *serial);
+static void cyberjack_disconnect(struct usb_serial *serial);
+static void cyberjack_release(struct usb_serial *serial);
 static int  cyberjack_open(struct tty_struct *tty,
 			struct usb_serial_port *port, struct file *filp);
 static void cyberjack_close(struct usb_serial_port *port);
@@ -94,7 +95,8 @@
 	.id_table =		id_table,
 	.num_ports =		1,
 	.attach =		cyberjack_startup,
-	.shutdown =		cyberjack_shutdown,
+	.disconnect =		cyberjack_disconnect,
+	.release =		cyberjack_release,
 	.open =			cyberjack_open,
 	.close =		cyberjack_close,
 	.write =		cyberjack_write,
@@ -148,17 +150,25 @@
 	return 0;
 }
 
-static void cyberjack_shutdown(struct usb_serial *serial)
+static void cyberjack_disconnect(struct usb_serial *serial)
+{
+	int i;
+
+	dbg("%s", __func__);
+
+	for (i = 0; i < serial->num_ports; ++i)
+		usb_kill_urb(serial->port[i]->interrupt_in_urb);
+}
+
+static void cyberjack_release(struct usb_serial *serial)
 {
 	int i;
 
 	dbg("%s", __func__);
 
 	for (i = 0; i < serial->num_ports; ++i) {
-		usb_kill_urb(serial->port[i]->interrupt_in_urb);
 		/* My special items, the standard routines free my urbs */
 		kfree(usb_get_serial_port_data(serial->port[i]));
-		usb_set_serial_port_data(serial->port[i], NULL);
 	}
 }
 
diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c
index 669f938..9734085 100644
--- a/drivers/usb/serial/cypress_m8.c
+++ b/drivers/usb/serial/cypress_m8.c
@@ -171,7 +171,7 @@
 static int  cypress_earthmate_startup(struct usb_serial *serial);
 static int  cypress_hidcom_startup(struct usb_serial *serial);
 static int  cypress_ca42v2_startup(struct usb_serial *serial);
-static void cypress_shutdown(struct usb_serial *serial);
+static void cypress_release(struct usb_serial *serial);
 static int  cypress_open(struct tty_struct *tty,
 			struct usb_serial_port *port, struct file *filp);
 static void cypress_close(struct usb_serial_port *port);
@@ -215,7 +215,7 @@
 	.id_table =			id_table_earthmate,
 	.num_ports =			1,
 	.attach =			cypress_earthmate_startup,
-	.shutdown =			cypress_shutdown,
+	.release =			cypress_release,
 	.open =				cypress_open,
 	.close =			cypress_close,
 	.dtr_rts =			cypress_dtr_rts,
@@ -242,7 +242,7 @@
 	.id_table =			id_table_cyphidcomrs232,
 	.num_ports =			1,
 	.attach =			cypress_hidcom_startup,
-	.shutdown =			cypress_shutdown,
+	.release =			cypress_release,
 	.open =				cypress_open,
 	.close =			cypress_close,
 	.dtr_rts =			cypress_dtr_rts,
@@ -269,7 +269,7 @@
 	.id_table =			id_table_nokiaca42v2,
 	.num_ports =			1,
 	.attach =			cypress_ca42v2_startup,
-	.shutdown =			cypress_shutdown,
+	.release =			cypress_release,
 	.open =				cypress_open,
 	.close =			cypress_close,
 	.dtr_rts =			cypress_dtr_rts,
@@ -616,7 +616,7 @@
 } /* cypress_ca42v2_startup */
 
 
-static void cypress_shutdown(struct usb_serial *serial)
+static void cypress_release(struct usb_serial *serial)
 {
 	struct cypress_private *priv;
 
@@ -629,7 +629,6 @@
 	if (priv) {
 		cypress_buf_free(priv->buf);
 		kfree(priv);
-		usb_set_serial_port_data(serial->port[0], NULL);
 	}
 }
 
diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
index 30f5140..f480809 100644
--- a/drivers/usb/serial/digi_acceleport.c
+++ b/drivers/usb/serial/digi_acceleport.c
@@ -460,7 +460,8 @@
 static void digi_dtr_rts(struct usb_serial_port *port, int on);
 static int digi_startup_device(struct usb_serial *serial);
 static int digi_startup(struct usb_serial *serial);
-static void digi_shutdown(struct usb_serial *serial);
+static void digi_disconnect(struct usb_serial *serial);
+static void digi_release(struct usb_serial *serial);
 static void digi_read_bulk_callback(struct urb *urb);
 static int digi_read_inb_callback(struct urb *urb);
 static int digi_read_oob_callback(struct urb *urb);
@@ -524,7 +525,8 @@
 	.tiocmget =			digi_tiocmget,
 	.tiocmset =			digi_tiocmset,
 	.attach =			digi_startup,
-	.shutdown =			digi_shutdown,
+	.disconnect =			digi_disconnect,
+	.release =			digi_release,
 };
 
 static struct usb_serial_driver digi_acceleport_4_device = {
@@ -550,7 +552,8 @@
 	.tiocmget =			digi_tiocmget,
 	.tiocmset =			digi_tiocmset,
 	.attach =			digi_startup,
-	.shutdown =			digi_shutdown,
+	.disconnect =			digi_disconnect,
+	.release =			digi_release,
 };
 
 
@@ -1556,16 +1559,23 @@
 }
 
 
-static void digi_shutdown(struct usb_serial *serial)
+static void digi_disconnect(struct usb_serial *serial)
 {
 	int i;
-	dbg("digi_shutdown: TOP, in_interrupt()=%ld", in_interrupt());
+	dbg("digi_disconnect: TOP, in_interrupt()=%ld", in_interrupt());
 
 	/* stop reads and writes on all ports */
 	for (i = 0; i < serial->type->num_ports + 1; i++) {
 		usb_kill_urb(serial->port[i]->read_urb);
 		usb_kill_urb(serial->port[i]->write_urb);
 	}
+}
+
+
+static void digi_release(struct usb_serial *serial)
+{
+	int i;
+	dbg("digi_release: TOP, in_interrupt()=%ld", in_interrupt());
 
 	/* free the private data structures for all ports */
 	/* number of regular ports + 1 for the out-of-band port */
diff --git a/drivers/usb/serial/empeg.c b/drivers/usb/serial/empeg.c
index 2b141cc..80cb347 100644
--- a/drivers/usb/serial/empeg.c
+++ b/drivers/usb/serial/empeg.c
@@ -90,7 +90,6 @@
 static void empeg_throttle(struct tty_struct *tty);
 static void empeg_unthrottle(struct tty_struct *tty);
 static int  empeg_startup(struct usb_serial *serial);
-static void empeg_shutdown(struct usb_serial *serial);
 static void empeg_set_termios(struct tty_struct *tty,
 		struct usb_serial_port *port, struct ktermios *old_termios);
 static void empeg_write_bulk_callback(struct urb *urb);
@@ -124,7 +123,6 @@
 	.throttle =		empeg_throttle,
 	.unthrottle =		empeg_unthrottle,
 	.attach =		empeg_startup,
-	.shutdown =		empeg_shutdown,
 	.set_termios =		empeg_set_termios,
 	.write =		empeg_write,
 	.write_room =		empeg_write_room,
@@ -427,12 +425,6 @@
 }
 
 
-static void empeg_shutdown(struct usb_serial *serial)
-{
-	dbg("%s", __func__);
-}
-
-
 static void empeg_set_termios(struct tty_struct *tty,
 		struct usb_serial_port *port, struct ktermios *old_termios)
 {
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 683304d..3dc3768 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -47,7 +47,7 @@
 /*
  * Version Information
  */
-#define DRIVER_VERSION "v1.4.3"
+#define DRIVER_VERSION "v1.5.0"
 #define DRIVER_AUTHOR "Greg Kroah-Hartman <greg@kroah.com>, Bill Ryder <bryder@sgi.com>, Kuba Ober <kuba@mareimbrium.org>"
 #define DRIVER_DESC "USB FTDI Serial Converters Driver"
 
@@ -82,7 +82,8 @@
 	int rx_processed;
 	unsigned long rx_bytes;
 
-	__u16 interface;	/* FT2232C port interface (0 for FT232/245) */
+	__u16 interface;	/* FT2232C, FT2232H or FT4232H port interface
+				   (0 for FT232/245) */
 
 	speed_t force_baud;	/* if non-zero, force the baud rate to
 				   this value */
@@ -94,6 +95,7 @@
 	unsigned long tx_bytes;
 	unsigned long tx_outstanding_bytes;
 	unsigned long tx_outstanding_urbs;
+	unsigned short max_packet_size;
 };
 
 /* struct ftdi_sio_quirk is used by devices requiring special attention. */
@@ -164,6 +166,7 @@
 	{ USB_DEVICE(FTDI_VID, FTDI_8U232AM_ALT_PID) },
 	{ USB_DEVICE(FTDI_VID, FTDI_232RL_PID) },
 	{ USB_DEVICE(FTDI_VID, FTDI_8U2232C_PID) },
+	{ USB_DEVICE(FTDI_VID, FTDI_4232H_PID) },
 	{ USB_DEVICE(FTDI_VID, FTDI_MICRO_CHAMELEON_PID) },
 	{ USB_DEVICE(FTDI_VID, FTDI_RELAIS_PID) },
 	{ USB_DEVICE(FTDI_VID, FTDI_OPENDCC_PID) },
@@ -673,6 +676,7 @@
 	{ USB_DEVICE(JETI_VID, JETI_SPC1201_PID) },
 	{ USB_DEVICE(MARVELL_VID, MARVELL_SHEEVAPLUG_PID),
 		.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+	{ USB_DEVICE(LARSENBRUSGAARD_VID, LB_ALTITRACK_PID) },
 	{ },					/* Optional parameter entry */
 	{ }					/* Terminating entry */
 };
@@ -693,12 +697,13 @@
 	[FT232BM] = "FT232BM",
 	[FT2232C] = "FT2232C",
 	[FT232RL] = "FT232RL",
+	[FT2232H] = "FT2232H",
+	[FT4232H] = "FT4232H"
 };
 
 
 /* Constants for read urb and write urb */
 #define BUFSZ 512
-#define PKTSZ 64
 
 /* rx_flags */
 #define THROTTLED		0x01
@@ -715,7 +720,6 @@
 /* function prototypes for a FTDI serial converter */
 static int  ftdi_sio_probe(struct usb_serial *serial,
 					const struct usb_device_id *id);
-static void ftdi_shutdown(struct usb_serial *serial);
 static int  ftdi_sio_port_probe(struct usb_serial_port *port);
 static int  ftdi_sio_port_remove(struct usb_serial_port *port);
 static int  ftdi_open(struct tty_struct *tty,
@@ -744,6 +748,8 @@
 static unsigned short int ftdi_232am_baud_to_divisor(int baud);
 static __u32 ftdi_232bm_baud_base_to_divisor(int baud, int base);
 static __u32 ftdi_232bm_baud_to_divisor(int baud);
+static __u32 ftdi_2232h_baud_base_to_divisor(int baud, int base);
+static __u32 ftdi_2232h_baud_to_divisor(int baud);
 
 static struct usb_serial_driver ftdi_sio_device = {
 	.driver = {
@@ -772,7 +778,6 @@
 	.ioctl =		ftdi_ioctl,
 	.set_termios =		ftdi_set_termios,
 	.break_ctl =		ftdi_break_ctl,
-	.shutdown =		ftdi_shutdown,
 };
 
 
@@ -838,6 +843,36 @@
 	 return ftdi_232bm_baud_base_to_divisor(baud, 48000000);
 }
 
+static __u32 ftdi_2232h_baud_base_to_divisor(int baud, int base)
+{
+	static const unsigned char divfrac[8] = { 0, 3, 2, 4, 1, 5, 6, 7 };
+	__u32 divisor;
+	int divisor3;
+
+	/* hi-speed baud rate is 10-bit sampling instead of 16-bit */
+	divisor3 = (base / 10 / baud) * 8;
+
+	divisor = divisor3 >> 3;
+	divisor |= (__u32)divfrac[divisor3 & 0x7] << 14;
+	/* Deal with special cases for highest baud rates. */
+	if (divisor == 1)
+		divisor = 0;
+	else if (divisor == 0x4001)
+		divisor = 1;
+	/*
+	 * Set this bit to turn off a divide by 2.5 on baud rate generator
+	 * This enables baud rates up to 12Mbaud but cannot reach below 1200
+	 * baud with this bit set
+	 */
+	divisor |= 0x00020000;
+	return divisor;
+}
+
+static __u32 ftdi_2232h_baud_to_divisor(int baud)
+{
+	 return ftdi_2232h_baud_base_to_divisor(baud, 120000000);
+}
+
 #define set_mctrl(port, set)		update_mctrl((port), (set), 0)
 #define clear_mctrl(port, clear)	update_mctrl((port), 0, (clear))
 
@@ -996,6 +1031,19 @@
 			baud = 9600;
 		}
 		break;
+	case FT2232H: /* FT2232H chip */
+	case FT4232H: /* FT4232H chip */
+		if ((baud <= 12000000) & (baud >= 1200)) {
+			div_value = ftdi_2232h_baud_to_divisor(baud);
+		} else if (baud < 1200) {
+			div_value = ftdi_232bm_baud_to_divisor(baud);
+		} else {
+			dbg("%s - Baud rate too high!", __func__);
+			div_value = ftdi_232bm_baud_to_divisor(9600);
+			div_okay = 0;
+			baud = 9600;
+		}
+		break;
 	} /* priv->chip_type */
 
 	if (div_okay) {
@@ -1196,14 +1244,29 @@
 	if (interfaces > 1) {
 		int inter;
 
-		/* Multiple interfaces.  Assume FT2232C. */
-		priv->chip_type = FT2232C;
+		/* Multiple interfaces.*/
+		if (version == 0x0800) {
+			priv->chip_type = FT4232H;
+			/* Hi-speed - baud clock runs at 120MHz */
+			priv->baud_base = 120000000 / 2;
+		} else if (version == 0x0700) {
+			priv->chip_type = FT2232H;
+			/* Hi-speed - baud clock runs at 120MHz */
+			priv->baud_base = 120000000 / 2;
+		} else
+			priv->chip_type = FT2232C;
+
 		/* Determine interface code. */
 		inter = serial->interface->altsetting->desc.bInterfaceNumber;
-		if (inter == 0)
-			priv->interface = PIT_SIOA;
-		else
-			priv->interface = PIT_SIOB;
+		if (inter == 0) {
+			priv->interface = INTERFACE_A;
+		} else  if (inter == 1) {
+			priv->interface = INTERFACE_B;
+		} else  if (inter == 2) {
+			priv->interface = INTERFACE_C;
+		} else  if (inter == 3) {
+			priv->interface = INTERFACE_D;
+		}
 		/* BM-type devices have a bug where bcdDevice gets set
 		 * to 0x200 when iSerialNumber is 0.  */
 		if (version < 0x500) {
@@ -1231,6 +1294,45 @@
 }
 
 
+/* Determine the maximum packet size for the device.  This depends on the chip
+ * type and the USB host capabilities.  The value should be obtained from the
+ * device descriptor as the chip will use the appropriate values for the host.*/
+static void ftdi_set_max_packet_size(struct usb_serial_port *port)
+{
+	struct ftdi_private *priv = usb_get_serial_port_data(port);
+	struct usb_serial *serial = port->serial;
+	struct usb_device *udev = serial->dev;
+
+	struct usb_interface *interface = serial->interface;
+	struct usb_endpoint_descriptor *ep_desc = &interface->cur_altsetting->endpoint[1].desc;
+
+	unsigned num_endpoints;
+	int i = 0;
+
+	num_endpoints = interface->cur_altsetting->desc.bNumEndpoints;
+	dev_info(&udev->dev, "Number of endpoints %d\n", num_endpoints);
+
+	/* NOTE: some customers have programmed FT232R/FT245R devices
+	 * with an endpoint size of 0 - not good.  In this case, we
+	 * want to override the endpoint descriptor setting and use a
+	 * value of 64 for wMaxPacketSize */
+	for (i = 0; i < num_endpoints; i++) {
+		dev_info(&udev->dev, "Endpoint %d MaxPacketSize %d\n", i+1,
+			interface->cur_altsetting->endpoint[i].desc.wMaxPacketSize);
+		ep_desc = &interface->cur_altsetting->endpoint[i].desc;
+		if (ep_desc->wMaxPacketSize == 0) {
+			ep_desc->wMaxPacketSize = cpu_to_le16(0x40);
+			dev_info(&udev->dev, "Overriding wMaxPacketSize on endpoint %d\n", i);
+		}
+	}
+
+	/* set max packet size based on descriptor */
+	priv->max_packet_size = ep_desc->wMaxPacketSize;
+
+	dev_info(&udev->dev, "Setting MaxPacketSize %d\n", priv->max_packet_size);
+}
+
+
 /*
  * ***************************************************************************
  * Sysfs Attribute
@@ -1314,7 +1416,9 @@
 		if ((!retval) &&
 		    (priv->chip_type == FT232BM ||
 		     priv->chip_type == FT2232C ||
-		     priv->chip_type == FT232RL)) {
+		     priv->chip_type == FT232RL ||
+		     priv->chip_type == FT2232H ||
+		     priv->chip_type == FT4232H)) {
 			retval = device_create_file(&port->dev,
 						    &dev_attr_latency_timer);
 		}
@@ -1333,7 +1437,9 @@
 		device_remove_file(&port->dev, &dev_attr_event_char);
 		if (priv->chip_type == FT232BM ||
 		    priv->chip_type == FT2232C ||
-		    priv->chip_type == FT232RL) {
+		    priv->chip_type == FT232RL ||
+		    priv->chip_type == FT2232H ||
+		    priv->chip_type == FT4232H) {
 			device_remove_file(&port->dev, &dev_attr_latency_timer);
 		}
 	}
@@ -1416,6 +1522,7 @@
 	usb_set_serial_port_data(port, priv);
 
 	ftdi_determine_type(port);
+	ftdi_set_max_packet_size(port);
 	read_latency_timer(port);
 	create_sysfs_attrs(port);
 	return 0;
@@ -1485,18 +1592,6 @@
 	return 0;
 }
 
-/* ftdi_shutdown is called from usbserial:usb_serial_disconnect
- *   it is called when the usb device is disconnected
- *
- *   usbserial:usb_serial_disconnect
- *      calls __serial_close for each open of the port
- *      shutdown is called then (ie ftdi_shutdown)
- */
-static void ftdi_shutdown(struct usb_serial *serial)
-{
-	dbg("%s", __func__);
-}
-
 static void ftdi_sio_priv_release(struct kref *k)
 {
 	struct ftdi_private *priv = container_of(k, struct ftdi_private, kref);
@@ -1671,8 +1766,8 @@
 	if (data_offset > 0) {
 		/* Original sio needs control bytes too... */
 		transfer_size += (data_offset *
-				((count + (PKTSZ - 1 - data_offset)) /
-				 (PKTSZ - data_offset)));
+				((count + (priv->max_packet_size - 1 - data_offset)) /
+				 (priv->max_packet_size - data_offset)));
 	}
 
 	buffer = kmalloc(transfer_size, GFP_ATOMIC);
@@ -1694,7 +1789,7 @@
 	if (data_offset > 0) {
 		/* Original sio requires control byte at start of
 		   each packet. */
-		int user_pktsz = PKTSZ - data_offset;
+		int user_pktsz = priv->max_packet_size - data_offset;
 		int todo = count;
 		unsigned char *first_byte = buffer;
 		const unsigned char *current_position = buf;
@@ -1775,11 +1870,6 @@
 
 	dbg("%s - port %d", __func__, port->number);
 
-	if (status) {
-		dbg("nonzero write bulk status received: %d", status);
-		return;
-	}
-
 	priv = usb_get_serial_port_data(port);
 	if (!priv) {
 		dbg("%s - bad port private data pointer - exiting", __func__);
@@ -1790,13 +1880,18 @@
 	data_offset = priv->write_offset;
 	if (data_offset > 0) {
 		/* Subtract the control bytes */
-		countback -= (data_offset * DIV_ROUND_UP(countback, PKTSZ));
+		countback -= (data_offset * DIV_ROUND_UP(countback, priv->max_packet_size));
 	}
 	spin_lock_irqsave(&priv->tx_lock, flags);
 	--priv->tx_outstanding_urbs;
 	priv->tx_outstanding_bytes -= countback;
 	spin_unlock_irqrestore(&priv->tx_lock, flags);
 
+	if (status) {
+		dbg("nonzero write bulk status received: %d", status);
+		return;
+	}
+
 	usb_serial_port_softint(port);
 } /* ftdi_write_bulk_callback */
 
@@ -1892,7 +1987,7 @@
 
 	/* count data bytes, but not status bytes */
 	countread = urb->actual_length;
-	countread -= 2 * DIV_ROUND_UP(countread, PKTSZ);
+	countread -= 2 * DIV_ROUND_UP(countread, priv->max_packet_size);
 	spin_lock_irqsave(&priv->rx_lock, flags);
 	priv->rx_bytes += countread;
 	spin_unlock_irqrestore(&priv->rx_lock, flags);
@@ -1965,7 +2060,7 @@
 
 	need_flip = 0;
 	for (packet_offset = priv->rx_processed;
-		packet_offset < urb->actual_length; packet_offset += PKTSZ) {
+		packet_offset < urb->actual_length; packet_offset += priv->max_packet_size) {
 		int length;
 
 		/* Compare new line status to the old one, signal if different/
@@ -1980,7 +2075,7 @@
 			priv->prev_status = new_status;
 		}
 
-		length = min_t(u32, PKTSZ, urb->actual_length-packet_offset)-2;
+		length = min_t(u32, priv->max_packet_size, urb->actual_length-packet_offset)-2;
 		if (length < 0) {
 			dev_err(&port->dev, "%s - bad packet length: %d\n",
 				__func__, length+2);
@@ -2011,6 +2106,7 @@
 		if (data[packet_offset+1] & FTDI_RS_BI) {
 			error_flag = TTY_BREAK;
 			dbg("BREAK received");
+			usb_serial_handle_break(port);
 		}
 		if (data[packet_offset+1] & FTDI_RS_PE) {
 			error_flag = TTY_PARITY;
@@ -2025,8 +2121,11 @@
 				/* Note that the error flag is duplicated for
 				   every character received since we don't know
 				   which character it applied to */
-				tty_insert_flip_char(tty,
-					data[packet_offset + i], error_flag);
+				if (!usb_serial_handle_sysrq_char(port,
+						data[packet_offset + i]))
+					tty_insert_flip_char(tty,
+						data[packet_offset + i],
+						error_flag);
 			}
 			need_flip = 1;
 		}
@@ -2332,6 +2431,8 @@
 	case FT232BM:
 	case FT2232C:
 	case FT232RL:
+	case FT2232H:
+	case FT4232H:
 		/* the 8U232AM returns a two byte value (the sio is a 1 byte
 		   value) - in the same format as the data returned from the in
 		   point */
diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h
index 12330fa1..f1d440a 100644
--- a/drivers/usb/serial/ftdi_sio.h
+++ b/drivers/usb/serial/ftdi_sio.h
@@ -10,7 +10,7 @@
  * The device is based on the FTDI FT8U100AX chip. It has a DB25 on one side,
  * USB on the other.
  *
- * Thanx to FTDI (http://www.ftdi.co.uk) for so kindly providing details
+ * Thanx to FTDI (http://www.ftdichip.com) for so kindly providing details
  * of the protocol required to talk to the device and ongoing assistence
  * during development.
  *
@@ -28,11 +28,15 @@
 #define FTDI_8U232AM_ALT_PID 0x6006 /* FTDI's alternate PID for above */
 #define FTDI_8U2232C_PID 0x6010 /* Dual channel device */
 #define FTDI_232RL_PID  0xFBFA  /* Product ID for FT232RL */
+#define FTDI_4232H_PID 0x6011 /* Quad channel hi-speed device */
 #define FTDI_RELAIS_PID	0xFA10  /* Relais device from Rudolf Gugler */
 #define FTDI_NF_RIC_VID	0x0DCD	/* Vendor Id */
 #define FTDI_NF_RIC_PID	0x0001	/* Product Id */
 #define FTDI_USBX_707_PID 0xF857	/* ADSTech IR Blaster USBX-707 */
 
+/* Larsen and Brusgaard AltiTrack/USBtrack  */
+#define LARSENBRUSGAARD_VID		0x0FD8
+#define LB_ALTITRACK_PID		0x0001
 
 /* www.canusb.com Lawicel CANUSB device */
 #define FTDI_CANUSB_PID 0xFFA8 /* Product Id */
@@ -873,6 +877,11 @@
 #define FTDI_SIO_SET_LATENCY_TIMER	9 /* Set the latency timer */
 #define FTDI_SIO_GET_LATENCY_TIMER	10 /* Get the latency timer */
 
+/* Interface indicies for FT2232, FT2232H and FT4232H devices*/
+#define INTERFACE_A		1
+#define INTERFACE_B		2
+#define INTERFACE_C		3
+#define INTERFACE_D		4
 
 /*
  * FIC / OpenMoko, Inc. http://wiki.openmoko.org/wiki/Neo1973_Debug_Board_v3
@@ -1036,6 +1045,8 @@
 	FT232BM = 3,
 	FT2232C = 4,
 	FT232RL = 5,
+	FT2232H = 6,
+	FT4232H = 7
 } ftdi_chip_type_t;
 
 typedef enum {
diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c
index ee25a3f..8839f1c 100644
--- a/drivers/usb/serial/garmin_gps.c
+++ b/drivers/usb/serial/garmin_gps.c
@@ -1,7 +1,7 @@
 /*
  * Garmin GPS driver
  *
- * Copyright (C) 2006,2007 Hermann Kneissel herkne@users.sourceforge.net
+ * Copyright (C) 2006-2009 Hermann Kneissel herkne@users.sourceforge.net
  *
  * The latest version of the driver can be found at
  * http://sourceforge.net/projects/garmin-gps/
@@ -51,7 +51,7 @@
  */
 
 #define VERSION_MAJOR	0
-#define VERSION_MINOR	31
+#define VERSION_MINOR	33
 
 #define _STR(s) #s
 #define _DRIVER_VERSION(a, b) "v" _STR(a) "." _STR(b)
@@ -129,7 +129,6 @@
 	__u8   state;
 	__u16  flags;
 	__u8   mode;
-	__u8   ignorePkts;
 	__u8   count;
 	__u8   pkt_id;
 	__u32  serial_num;
@@ -141,8 +140,6 @@
 	__u8   inbuffer [GPS_IN_BUFSIZ];  /* tty -> usb */
 	__u8   outbuffer[GPS_OUT_BUFSIZ]; /* usb -> tty */
 	__u8   privpkt[4*6];
-	atomic_t req_count;
-	atomic_t resp_count;
 	spinlock_t lock;
 	struct list_head pktlist;
 };
@@ -170,6 +167,8 @@
 #define FLAGS_BULK_IN_ACTIVE      0x0020
 #define FLAGS_BULK_IN_RESTART     0x0010
 #define FLAGS_THROTTLED           0x0008
+#define APP_REQ_SEEN              0x0004
+#define APP_RESP_SEEN             0x0002
 #define CLEAR_HALT_REQUIRED       0x0001
 
 #define FLAGS_QUEUING             0x0100
@@ -184,20 +183,16 @@
 
 
 /* function prototypes */
-static void gsp_next_packet(struct garmin_data *garmin_data_p);
-static int  garmin_write_bulk(struct usb_serial_port *port,
+static int gsp_next_packet(struct garmin_data *garmin_data_p);
+static int garmin_write_bulk(struct usb_serial_port *port,
 			     const unsigned char *buf, int count,
 			     int dismiss_ack);
 
 /* some special packets to be send or received */
 static unsigned char const GARMIN_START_SESSION_REQ[]
 	= { 0, 0, 0, 0,  5, 0, 0, 0, 0, 0, 0, 0 };
-static unsigned char const GARMIN_START_SESSION_REQ2[]
-	= { 0, 0, 0, 0, 16, 0, 0, 0, 0, 0, 0, 0 };
 static unsigned char const GARMIN_START_SESSION_REPLY[]
 	= { 0, 0, 0, 0,  6, 0, 0, 0, 4, 0, 0, 0 };
-static unsigned char const GARMIN_SESSION_ACTIVE_REPLY[]
-	= { 0, 0, 0, 0, 17, 0, 0, 0, 4, 0, 0, 0, 0, 16, 0, 0 };
 static unsigned char const GARMIN_BULK_IN_AVAIL_REPLY[]
 	= { 0, 0, 0, 0,  2, 0, 0, 0, 0, 0, 0, 0 };
 static unsigned char const GARMIN_APP_LAYER_REPLY[]
@@ -233,13 +228,6 @@
 };
 
 
-static inline int noResponseFromAppLayer(struct garmin_data *garmin_data_p)
-{
-	return atomic_read(&garmin_data_p->req_count) ==
-				atomic_read(&garmin_data_p->resp_count);
-}
-
-
 static inline int getLayerId(const __u8 *usbPacket)
 {
 	return __le32_to_cpup((__le32 *)(usbPacket));
@@ -325,8 +313,11 @@
 		state = garmin_data_p->state;
 		spin_unlock_irqrestore(&garmin_data_p->lock, flags);
 
+		dbg("%s - added: pkt: %d - %d bytes",
+			__func__, pkt->seq, data_length);
+
 		/* in serial mode, if someone is waiting for data from
-		   the device, iconvert and send the next packet to tty. */
+		   the device, convert and send the next packet to tty. */
 		if (result && (state == STATE_GSP_WAIT_DATA))
 			gsp_next_packet(garmin_data_p);
 	}
@@ -411,7 +402,7 @@
 /*
  * called for a complete packet received from tty layer
  *
- * the complete packet (pkzid ... cksum) is in garmin_data_p->inbuf starting
+ * the complete packet (pktid ... cksum) is in garmin_data_p->inbuf starting
  * at GSP_INITIAL_OFFSET.
  *
  * count - number of bytes in the input buffer including space reserved for
@@ -501,7 +492,6 @@
 	unsigned long flags;
 	int offs = 0;
 	int ack_or_nak_seen = 0;
-	int i = 0;
 	__u8 *dest;
 	int size;
 	/* dleSeen: set if last byte read was a DLE */
@@ -519,8 +509,8 @@
 	skip = garmin_data_p->flags & FLAGS_GSP_SKIP;
 	spin_unlock_irqrestore(&garmin_data_p->lock, flags);
 
-	dbg("%s - dle=%d skip=%d size=%d count=%d",
-		__func__, dleSeen, skip, size, count);
+	/* dbg("%s - dle=%d skip=%d size=%d count=%d",
+		__func__, dleSeen, skip, size, count); */
 
 	if (size == 0)
 		size = GSP_INITIAL_OFFSET;
@@ -568,7 +558,6 @@
 		} else if (!skip) {
 
 			if (dleSeen) {
-				dbg("non-masked DLE at %d - restarting", i);
 				size = GSP_INITIAL_OFFSET;
 				dleSeen = 0;
 			}
@@ -599,19 +588,19 @@
 	else
 		garmin_data_p->flags &= ~FLAGS_GSP_DLESEEN;
 
-	if (ack_or_nak_seen)
-		garmin_data_p->state = STATE_GSP_WAIT_DATA;
-
 	spin_unlock_irqrestore(&garmin_data_p->lock, flags);
 
-	if (ack_or_nak_seen)
-		gsp_next_packet(garmin_data_p);
+	if (ack_or_nak_seen) {
+		if (gsp_next_packet(garmin_data_p) > 0)
+			garmin_data_p->state = STATE_ACTIVE;
+		else
+			garmin_data_p->state = STATE_GSP_WAIT_DATA;
+	}
 	return count;
 }
 
 
 
-
 /*
  * Sends a usb packet to the tty
  *
@@ -733,29 +722,28 @@
 }
 
 
-
-
-
 /*
  * Process the next pending data packet - if there is one
  */
-static void gsp_next_packet(struct garmin_data *garmin_data_p)
+static int gsp_next_packet(struct garmin_data *garmin_data_p)
 {
+	int result = 0;
 	struct garmin_packet *pkt = NULL;
 
 	while ((pkt = pkt_pop(garmin_data_p)) != NULL) {
 		dbg("%s - next pkt: %d", __func__, pkt->seq);
-		if (gsp_send(garmin_data_p, pkt->data, pkt->size) > 0) {
+		result = gsp_send(garmin_data_p, pkt->data, pkt->size);
+		if (result > 0) {
 			kfree(pkt);
-			return;
+			return result;
 		}
 		kfree(pkt);
 	}
+	return result;
 }
 
 
 
-
 /******************************************************************************
  * garmin native mode
  ******************************************************************************/
@@ -888,14 +876,6 @@
 	unsigned long flags;
 	int status = 0;
 
-	struct usb_serial_port *port = garmin_data_p->port;
-
-	if (port != NULL && atomic_read(&garmin_data_p->resp_count)) {
-		/* send a terminate command */
-		status = garmin_write_bulk(port, GARMIN_STOP_TRANSFER_REQ,
-					sizeof(GARMIN_STOP_TRANSFER_REQ), 1);
-	}
-
 	/* flush all queued data */
 	pkt_clear(garmin_data_p);
 
@@ -908,16 +888,12 @@
 }
 
 
-
-
-
-
 static int garmin_init_session(struct usb_serial_port *port)
 {
-	unsigned long flags;
 	struct usb_serial *serial = port->serial;
 	struct garmin_data *garmin_data_p = usb_get_serial_port_data(port);
 	int status = 0;
+	int i = 0;
 
 	if (status == 0) {
 		usb_kill_urb(port->interrupt_in_urb);
@@ -931,30 +907,25 @@
 							__func__, status);
 	}
 
+	/*
+	 * using the initialization method from gpsbabel. See comments in
+	 * gpsbabel/jeeps/gpslibusb.c gusb_reset_toggles()
+	 */
 	if (status == 0) {
 		dbg("%s - starting session ...", __func__);
 		garmin_data_p->state = STATE_ACTIVE;
-		status = garmin_write_bulk(port, GARMIN_START_SESSION_REQ,
+
+		for (i = 0; i < 3; i++) {
+			status = garmin_write_bulk(port,
+					GARMIN_START_SESSION_REQ,
 					sizeof(GARMIN_START_SESSION_REQ), 0);
 
-		if (status >= 0) {
-
-			spin_lock_irqsave(&garmin_data_p->lock, flags);
-			garmin_data_p->ignorePkts++;
-			spin_unlock_irqrestore(&garmin_data_p->lock, flags);
-
-			/* not needed, but the win32 driver does it too ... */
-			status = garmin_write_bulk(port,
-					GARMIN_START_SESSION_REQ2,
-					sizeof(GARMIN_START_SESSION_REQ2), 0);
-			if (status >= 0) {
-				status = 0;
-				spin_lock_irqsave(&garmin_data_p->lock, flags);
-				garmin_data_p->ignorePkts++;
-				spin_unlock_irqrestore(&garmin_data_p->lock,
-									flags);
-			}
+			if (status < 0)
+				break;
 		}
+
+		if (status > 0)
+			status = 0;
 	}
 
 	return status;
@@ -962,8 +933,6 @@
 
 
 
-
-
 static int garmin_open(struct tty_struct *tty,
 			struct usb_serial_port *port, struct file *filp)
 {
@@ -977,8 +946,6 @@
 	garmin_data_p->mode  = initial_mode;
 	garmin_data_p->count = 0;
 	garmin_data_p->flags = 0;
-	atomic_set(&garmin_data_p->req_count, 0);
-	atomic_set(&garmin_data_p->resp_count, 0);
 	spin_unlock_irqrestore(&garmin_data_p->lock, flags);
 
 	/* shutdown any bulk reads that might be going on */
@@ -1006,6 +973,7 @@
 		return;
 
 	mutex_lock(&port->serial->disc_mutex);
+
 	if (!port->serial->disconnected)
 		garmin_clear(garmin_data_p);
 
@@ -1013,25 +981,17 @@
 	usb_kill_urb(port->read_urb);
 	usb_kill_urb(port->write_urb);
 
-	if (!port->serial->disconnected) {
-		if (noResponseFromAppLayer(garmin_data_p) ||
-		    ((garmin_data_p->flags & CLEAR_HALT_REQUIRED) != 0)) {
-			process_resetdev_request(port);
-			garmin_data_p->state = STATE_RESET;
-		} else {
-			garmin_data_p->state = STATE_DISCONNECTED;
-		}
-	} else {
+	/* keep reset state so we know that we must start a new session */
+	if (garmin_data_p->state != STATE_RESET)
 		garmin_data_p->state = STATE_DISCONNECTED;
-	}
+
 	mutex_unlock(&port->serial->disc_mutex);
 }
 
+
 static void garmin_write_bulk_callback(struct urb *urb)
 {
-	unsigned long flags;
 	struct usb_serial_port *port = urb->context;
-	int status = urb->status;
 
 	if (port) {
 		struct garmin_data *garmin_data_p =
@@ -1039,20 +999,13 @@
 
 		dbg("%s - port %d", __func__, port->number);
 
-		if (GARMIN_LAYERID_APPL == getLayerId(urb->transfer_buffer)
-		    && (garmin_data_p->mode == MODE_GARMIN_SERIAL))  {
-			gsp_send_ack(garmin_data_p,
+		if (GARMIN_LAYERID_APPL == getLayerId(urb->transfer_buffer)) {
+
+			if (garmin_data_p->mode == MODE_GARMIN_SERIAL) {
+				gsp_send_ack(garmin_data_p,
 					((__u8 *)urb->transfer_buffer)[4]);
+			}
 		}
-
-		if (status) {
-			dbg("%s - nonzero write bulk status received: %d",
-			    __func__, status);
-			spin_lock_irqsave(&garmin_data_p->lock, flags);
-			garmin_data_p->flags |= CLEAR_HALT_REQUIRED;
-			spin_unlock_irqrestore(&garmin_data_p->lock, flags);
-		}
-
 		usb_serial_port_softint(port);
 	}
 
@@ -1108,7 +1061,11 @@
 	urb->transfer_flags |= URB_ZERO_PACKET;
 
 	if (GARMIN_LAYERID_APPL == getLayerId(buffer)) {
-		atomic_inc(&garmin_data_p->req_count);
+
+		spin_lock_irqsave(&garmin_data_p->lock, flags);
+		garmin_data_p->flags |= APP_REQ_SEEN;
+		spin_unlock_irqrestore(&garmin_data_p->lock, flags);
+
 		if (garmin_data_p->mode == MODE_GARMIN_SERIAL)  {
 			pkt_clear(garmin_data_p);
 			garmin_data_p->state = STATE_GSP_WAIT_DATA;
@@ -1140,6 +1097,9 @@
 
 	usb_serial_debug_data(debug, &port->dev, __func__, count, buf);
 
+	if (garmin_data_p->state == STATE_RESET)
+		return -EIO;
+
 	/* check for our private packets */
 	if (count >= GARMIN_PKTHDR_LENGTH) {
 		len = PRIVPKTSIZ;
@@ -1184,7 +1144,7 @@
 				break;
 
 			case PRIV_PKTID_RESET_REQ:
-				atomic_inc(&garmin_data_p->req_count);
+				process_resetdev_request(port);
 				break;
 
 			case PRIV_PKTID_SET_DEF_MODE:
@@ -1200,8 +1160,6 @@
 		}
 	}
 
-	garmin_data_p->ignorePkts = 0;
-
 	if (garmin_data_p->mode == MODE_GARMIN_SERIAL) {
 		return gsp_receive(garmin_data_p, buf, count);
 	} else {	/* MODE_NATIVE */
@@ -1224,31 +1182,33 @@
 static void garmin_read_process(struct garmin_data *garmin_data_p,
 				 unsigned char *data, unsigned data_length)
 {
+	unsigned long flags;
+
 	if (garmin_data_p->flags & FLAGS_DROP_DATA) {
 		/* abort-transfer cmd is actice */
 		dbg("%s - pkt dropped", __func__);
 	} else if (garmin_data_p->state != STATE_DISCONNECTED &&
 		garmin_data_p->state != STATE_RESET) {
 
-		/* remember any appl.layer packets, so we know
-		   if a reset is required or not when closing
-		   the device */
-		if (0 == memcmp(data, GARMIN_APP_LAYER_REPLY,
-				sizeof(GARMIN_APP_LAYER_REPLY))) {
-			atomic_inc(&garmin_data_p->resp_count);
-		}
-
 		/* if throttling is active or postprecessing is required
 		   put the received data in the input queue, otherwise
 		   send it directly to the tty port */
 		if (garmin_data_p->flags & FLAGS_QUEUING) {
 			pkt_add(garmin_data_p, data, data_length);
-		} else if (garmin_data_p->mode == MODE_GARMIN_SERIAL) {
-			if (getLayerId(data) == GARMIN_LAYERID_APPL)
+		} else if (getLayerId(data) == GARMIN_LAYERID_APPL) {
+
+			spin_lock_irqsave(&garmin_data_p->lock, flags);
+			garmin_data_p->flags |= APP_RESP_SEEN;
+			spin_unlock_irqrestore(&garmin_data_p->lock, flags);
+
+			if (garmin_data_p->mode == MODE_GARMIN_SERIAL) {
 				pkt_add(garmin_data_p, data, data_length);
-		} else {
-			send_to_tty(garmin_data_p->port, data, data_length);
+			} else {
+				send_to_tty(garmin_data_p->port, data,
+						data_length);
+			}
 		}
+		/* ignore system layer packets ... */
 	}
 }
 
@@ -1363,8 +1323,6 @@
 			} else {
 				spin_lock_irqsave(&garmin_data_p->lock, flags);
 				garmin_data_p->flags |= FLAGS_BULK_IN_ACTIVE;
-				/* do not send this packet to the user */
-				garmin_data_p->ignorePkts = 1;
 				spin_unlock_irqrestore(&garmin_data_p->lock,
 									flags);
 			}
@@ -1391,17 +1349,7 @@
 			__func__, garmin_data_p->serial_num);
 	}
 
-	if (garmin_data_p->ignorePkts) {
-		/* this reply belongs to a request generated by the driver,
-		   ignore it. */
-		dbg("%s - pkt ignored (%d)",
-			__func__, garmin_data_p->ignorePkts);
-		spin_lock_irqsave(&garmin_data_p->lock, flags);
-		garmin_data_p->ignorePkts--;
-		spin_unlock_irqrestore(&garmin_data_p->lock, flags);
-	} else {
-		garmin_read_process(garmin_data_p, data, urb->actual_length);
-	}
+	garmin_read_process(garmin_data_p, data, urb->actual_length);
 
 	port->interrupt_in_urb->dev = port->serial->dev;
 	retval = usb_submit_urb(urb, GFP_ATOMIC);
@@ -1527,7 +1475,7 @@
 }
 
 
-static void garmin_shutdown(struct usb_serial *serial)
+static void garmin_disconnect(struct usb_serial *serial)
 {
 	struct usb_serial_port *port = serial->port[0];
 	struct garmin_data *garmin_data_p = usb_get_serial_port_data(port);
@@ -1536,8 +1484,17 @@
 
 	usb_kill_urb(port->interrupt_in_urb);
 	del_timer_sync(&garmin_data_p->timer);
+}
+
+
+static void garmin_release(struct usb_serial *serial)
+{
+	struct usb_serial_port *port = serial->port[0];
+	struct garmin_data *garmin_data_p = usb_get_serial_port_data(port);
+
+	dbg("%s", __func__);
+
 	kfree(garmin_data_p);
-	usb_set_serial_port_data(port, NULL);
 }
 
 
@@ -1556,7 +1513,8 @@
 	.throttle            = garmin_throttle,
 	.unthrottle          = garmin_unthrottle,
 	.attach              = garmin_attach,
-	.shutdown            = garmin_shutdown,
+	.disconnect          = garmin_disconnect,
+	.release             = garmin_release,
 	.write               = garmin_write,
 	.write_room          = garmin_write_room,
 	.write_bulk_callback = garmin_write_bulk_callback,
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
index be82ea9..932d624 100644
--- a/drivers/usb/serial/generic.c
+++ b/drivers/usb/serial/generic.c
@@ -63,7 +63,8 @@
 	.id_table =		generic_device_ids,
 	.usb_driver = 		&generic_driver,
 	.num_ports =		1,
-	.shutdown =		usb_serial_generic_shutdown,
+	.disconnect =		usb_serial_generic_disconnect,
+	.release =		usb_serial_generic_release,
 	.throttle =		usb_serial_generic_throttle,
 	.unthrottle =		usb_serial_generic_unthrottle,
 	.resume =		usb_serial_generic_resume,
@@ -190,6 +191,88 @@
 	generic_cleanup(port);
 }
 
+static int usb_serial_multi_urb_write(struct tty_struct *tty,
+	struct usb_serial_port *port, const unsigned char *buf, int count)
+{
+	unsigned long flags;
+	struct urb *urb;
+	unsigned char *buffer;
+	int status;
+	int towrite;
+	int bwrite = 0;
+
+	dbg("%s - port %d", __func__, port->number);
+
+	if (count == 0)
+		dbg("%s - write request of 0 bytes", __func__);
+
+	while (count > 0) {
+		towrite = (count > port->bulk_out_size) ?
+			port->bulk_out_size : count;
+		spin_lock_irqsave(&port->lock, flags);
+		if (port->urbs_in_flight >
+		    port->serial->type->max_in_flight_urbs) {
+			spin_unlock_irqrestore(&port->lock, flags);
+			dbg("%s - write limit hit\n", __func__);
+			return bwrite;
+		}
+		port->tx_bytes_flight += towrite;
+		port->urbs_in_flight++;
+		spin_unlock_irqrestore(&port->lock, flags);
+
+		buffer = kmalloc(towrite, GFP_ATOMIC);
+		if (!buffer) {
+			dev_err(&port->dev,
+			"%s ran out of kernel memory for urb ...\n", __func__);
+			goto error_no_buffer;
+		}
+
+		urb = usb_alloc_urb(0, GFP_ATOMIC);
+		if (!urb) {
+			dev_err(&port->dev, "%s - no more free urbs\n",
+				__func__);
+			goto error_no_urb;
+		}
+
+		/* Copy data */
+		memcpy(buffer, buf + bwrite, towrite);
+		usb_serial_debug_data(debug, &port->dev, __func__,
+				      towrite, buffer);
+		/* fill the buffer and send it */
+		usb_fill_bulk_urb(urb, port->serial->dev,
+			usb_sndbulkpipe(port->serial->dev,
+					port->bulk_out_endpointAddress),
+			buffer, towrite,
+			usb_serial_generic_write_bulk_callback, port);
+
+		status = usb_submit_urb(urb, GFP_ATOMIC);
+		if (status) {
+			dev_err(&port->dev,
+				"%s - failed submitting write urb, error %d\n",
+				__func__, status);
+			goto error;
+		}
+
+		/* This urb is the responsibility of the host driver now */
+		usb_free_urb(urb);
+		dbg("%s write: %d", __func__, towrite);
+		count -= towrite;
+		bwrite += towrite;
+	}
+	return bwrite;
+
+error:
+	usb_free_urb(urb);
+error_no_urb:
+	kfree(buffer);
+error_no_buffer:
+	spin_lock_irqsave(&port->lock, flags);
+	port->urbs_in_flight--;
+	port->tx_bytes_flight -= towrite;
+	spin_unlock_irqrestore(&port->lock, flags);
+	return bwrite;
+}
+
 int usb_serial_generic_write(struct tty_struct *tty,
 	struct usb_serial_port *port, const unsigned char *buf, int count)
 {
@@ -207,6 +290,11 @@
 	/* only do something if we have a bulk out endpoint */
 	if (serial->num_bulk_out) {
 		unsigned long flags;
+
+		if (serial->type->max_in_flight_urbs)
+			return usb_serial_multi_urb_write(tty, port,
+							  buf, count);
+
 		spin_lock_irqsave(&port->lock, flags);
 		if (port->write_urb_busy) {
 			spin_unlock_irqrestore(&port->lock, flags);
@@ -252,20 +340,26 @@
 	/* no bulk out, so return 0 bytes written */
 	return 0;
 }
+EXPORT_SYMBOL_GPL(usb_serial_generic_write);
 
 int usb_serial_generic_write_room(struct tty_struct *tty)
 {
 	struct usb_serial_port *port = tty->driver_data;
 	struct usb_serial *serial = port->serial;
+	unsigned long flags;
 	int room = 0;
 
 	dbg("%s - port %d", __func__, port->number);
-
-	/* FIXME: Locking */
-	if (serial->num_bulk_out) {
-		if (!(port->write_urb_busy))
-			room = port->bulk_out_size;
+	spin_lock_irqsave(&port->lock, flags);
+	if (serial->type->max_in_flight_urbs) {
+		if (port->urbs_in_flight < serial->type->max_in_flight_urbs)
+			room = port->bulk_out_size *
+				(serial->type->max_in_flight_urbs -
+				 port->urbs_in_flight);
+	} else if (serial->num_bulk_out && !(port->write_urb_busy)) {
+		room = port->bulk_out_size;
 	}
+	spin_unlock_irqrestore(&port->lock, flags);
 
 	dbg("%s - returns %d", __func__, room);
 	return room;
@@ -276,11 +370,16 @@
 	struct usb_serial_port *port = tty->driver_data;
 	struct usb_serial *serial = port->serial;
 	int chars = 0;
+	unsigned long flags;
 
 	dbg("%s - port %d", __func__, port->number);
 
-	/* FIXME: Locking */
-	if (serial->num_bulk_out) {
+	if (serial->type->max_in_flight_urbs) {
+		spin_lock_irqsave(&port->lock, flags);
+		chars = port->tx_bytes_flight;
+		spin_unlock_irqrestore(&port->lock, flags);
+	} else if (serial->num_bulk_out) {
+		/* FIXME: Locking */
 		if (port->write_urb_busy)
 			chars = port->write_urb->transfer_buffer_length;
 	}
@@ -290,7 +389,8 @@
 }
 
 
-static void resubmit_read_urb(struct usb_serial_port *port, gfp_t mem_flags)
+void usb_serial_generic_resubmit_read_urb(struct usb_serial_port *port,
+			gfp_t mem_flags)
 {
 	struct urb *urb = port->read_urb;
 	struct usb_serial *serial = port->serial;
@@ -311,25 +411,28 @@
 			"%s - failed resubmitting read urb, error %d\n",
 							__func__, result);
 }
+EXPORT_SYMBOL_GPL(usb_serial_generic_resubmit_read_urb);
 
 /* Push data to tty layer and resubmit the bulk read URB */
 static void flush_and_resubmit_read_urb(struct usb_serial_port *port)
 {
 	struct urb *urb = port->read_urb;
 	struct tty_struct *tty = tty_port_tty_get(&port->port);
-	int room;
+	char *ch = (char *)urb->transfer_buffer;
+	int i;
+
+	if (!tty)
+		goto done;
 
 	/* Push data to tty */
-	if (tty && urb->actual_length) {
-		room = tty_buffer_request_room(tty, urb->actual_length);
-		if (room) {
-			tty_insert_flip_string(tty, urb->transfer_buffer, room);
-			tty_flip_buffer_push(tty);
-		}
+	for (i = 0; i < urb->actual_length; i++, ch++) {
+		if (!usb_serial_handle_sysrq_char(port, *ch))
+			tty_insert_flip_char(tty, *ch, TTY_NORMAL);
 	}
+	tty_flip_buffer_push(tty);
 	tty_kref_put(tty);
-
-	resubmit_read_urb(port, GFP_ATOMIC);
+done:
+	usb_serial_generic_resubmit_read_urb(port, GFP_ATOMIC);
 }
 
 void usb_serial_generic_read_bulk_callback(struct urb *urb)
@@ -363,12 +466,24 @@
 
 void usb_serial_generic_write_bulk_callback(struct urb *urb)
 {
+	unsigned long flags;
 	struct usb_serial_port *port = urb->context;
 	int status = urb->status;
 
 	dbg("%s - port %d", __func__, port->number);
 
-	port->write_urb_busy = 0;
+	if (port->serial->type->max_in_flight_urbs) {
+		spin_lock_irqsave(&port->lock, flags);
+		--port->urbs_in_flight;
+		port->tx_bytes_flight -= urb->transfer_buffer_length;
+		if (port->urbs_in_flight < 0)
+			port->urbs_in_flight = 0;
+		spin_unlock_irqrestore(&port->lock, flags);
+	} else {
+		/* Handle the case for single urb mode */
+		port->write_urb_busy = 0;
+	}
+
 	if (status) {
 		dbg("%s - nonzero write bulk status received: %d",
 		    __func__, status);
@@ -408,11 +523,36 @@
 
 	if (was_throttled) {
 		/* Resume reading from device */
-		resubmit_read_urb(port, GFP_KERNEL);
+		usb_serial_generic_resubmit_read_urb(port, GFP_KERNEL);
 	}
 }
 
-void usb_serial_generic_shutdown(struct usb_serial *serial)
+int usb_serial_handle_sysrq_char(struct usb_serial_port *port, unsigned int ch)
+{
+	if (port->sysrq && port->console) {
+		if (ch && time_before(jiffies, port->sysrq)) {
+			handle_sysrq(ch, tty_port_tty_get(&port->port));
+			port->sysrq = 0;
+			return 1;
+		}
+		port->sysrq = 0;
+	}
+	return 0;
+}
+EXPORT_SYMBOL_GPL(usb_serial_handle_sysrq_char);
+
+int usb_serial_handle_break(struct usb_serial_port *port)
+{
+	if (!port->sysrq) {
+		port->sysrq = jiffies + HZ*5;
+		return 1;
+	}
+	port->sysrq = 0;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(usb_serial_handle_break);
+
+void usb_serial_generic_disconnect(struct usb_serial *serial)
 {
 	int i;
 
@@ -423,3 +563,7 @@
 		generic_cleanup(serial->port[i]);
 }
 
+void usb_serial_generic_release(struct usb_serial *serial)
+{
+	dbg("%s", __func__);
+}
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
index 53ef599..0191693 100644
--- a/drivers/usb/serial/io_edgeport.c
+++ b/drivers/usb/serial/io_edgeport.c
@@ -224,7 +224,8 @@
 static int  edge_tiocmset(struct tty_struct *tty, struct file *file,
 					unsigned int set, unsigned int clear);
 static int  edge_startup(struct usb_serial *serial);
-static void edge_shutdown(struct usb_serial *serial);
+static void edge_disconnect(struct usb_serial *serial);
+static void edge_release(struct usb_serial *serial);
 
 #include "io_tables.h"	/* all of the devices that this driver supports */
 
@@ -3193,21 +3194,16 @@
 
 
 /****************************************************************************
- * edge_shutdown
+ * edge_disconnect
  *	This function is called whenever the device is removed from the usb bus.
  ****************************************************************************/
-static void edge_shutdown(struct usb_serial *serial)
+static void edge_disconnect(struct usb_serial *serial)
 {
 	struct edgeport_serial *edge_serial = usb_get_serial_data(serial);
-	int i;
 
 	dbg("%s", __func__);
 
 	/* stop reads and writes on all ports */
-	for (i = 0; i < serial->num_ports; ++i) {
-		kfree(usb_get_serial_port_data(serial->port[i]));
-		usb_set_serial_port_data(serial->port[i],  NULL);
-	}
 	/* free up our endpoint stuff */
 	if (edge_serial->is_epic) {
 		usb_kill_urb(edge_serial->interrupt_read_urb);
@@ -3218,9 +3214,24 @@
 		usb_free_urb(edge_serial->read_urb);
 		kfree(edge_serial->bulk_in_buffer);
 	}
+}
+
+
+/****************************************************************************
+ * edge_release
+ *	This function is called when the device structure is deallocated.
+ ****************************************************************************/
+static void edge_release(struct usb_serial *serial)
+{
+	struct edgeport_serial *edge_serial = usb_get_serial_data(serial);
+	int i;
+
+	dbg("%s", __func__);
+
+	for (i = 0; i < serial->num_ports; ++i)
+		kfree(usb_get_serial_port_data(serial->port[i]));
 
 	kfree(edge_serial);
-	usb_set_serial_data(serial, NULL);
 }
 
 
diff --git a/drivers/usb/serial/io_tables.h b/drivers/usb/serial/io_tables.h
index 7eb9d67..9241d31 100644
--- a/drivers/usb/serial/io_tables.h
+++ b/drivers/usb/serial/io_tables.h
@@ -117,7 +117,8 @@
 	.throttle		= edge_throttle,
 	.unthrottle		= edge_unthrottle,
 	.attach			= edge_startup,
-	.shutdown		= edge_shutdown,
+	.disconnect		= edge_disconnect,
+	.release		= edge_release,
 	.ioctl			= edge_ioctl,
 	.set_termios		= edge_set_termios,
 	.tiocmget		= edge_tiocmget,
@@ -145,7 +146,8 @@
 	.throttle		= edge_throttle,
 	.unthrottle		= edge_unthrottle,
 	.attach			= edge_startup,
-	.shutdown		= edge_shutdown,
+	.disconnect		= edge_disconnect,
+	.release		= edge_release,
 	.ioctl			= edge_ioctl,
 	.set_termios		= edge_set_termios,
 	.tiocmget		= edge_tiocmget,
@@ -173,7 +175,8 @@
 	.throttle		= edge_throttle,
 	.unthrottle		= edge_unthrottle,
 	.attach			= edge_startup,
-	.shutdown		= edge_shutdown,
+	.disconnect		= edge_disconnect,
+	.release		= edge_release,
 	.ioctl			= edge_ioctl,
 	.set_termios		= edge_set_termios,
 	.tiocmget		= edge_tiocmget,
@@ -200,7 +203,8 @@
 	.throttle		= edge_throttle,
 	.unthrottle		= edge_unthrottle,
 	.attach			= edge_startup,
-	.shutdown		= edge_shutdown,
+	.disconnect		= edge_disconnect,
+	.release		= edge_release,
 	.ioctl			= edge_ioctl,
 	.set_termios		= edge_set_termios,
 	.tiocmget		= edge_tiocmget,
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
index db964db..e8bc42f 100644
--- a/drivers/usb/serial/io_ti.c
+++ b/drivers/usb/serial/io_ti.c
@@ -2663,7 +2663,7 @@
 	return -ENOMEM;
 }
 
-static void edge_shutdown(struct usb_serial *serial)
+static void edge_disconnect(struct usb_serial *serial)
 {
 	int i;
 	struct edgeport_port *edge_port;
@@ -2673,12 +2673,22 @@
 	for (i = 0; i < serial->num_ports; ++i) {
 		edge_port = usb_get_serial_port_data(serial->port[i]);
 		edge_remove_sysfs_attrs(edge_port->port);
+	}
+}
+
+static void edge_release(struct usb_serial *serial)
+{
+	int i;
+	struct edgeport_port *edge_port;
+
+	dbg("%s", __func__);
+
+	for (i = 0; i < serial->num_ports; ++i) {
+		edge_port = usb_get_serial_port_data(serial->port[i]);
 		edge_buf_free(edge_port->ep_out_buf);
 		kfree(edge_port);
-		usb_set_serial_port_data(serial->port[i], NULL);
 	}
 	kfree(usb_get_serial_data(serial));
-	usb_set_serial_data(serial, NULL);
 }
 
 
@@ -2915,7 +2925,8 @@
 	.throttle		= edge_throttle,
 	.unthrottle		= edge_unthrottle,
 	.attach			= edge_startup,
-	.shutdown		= edge_shutdown,
+	.disconnect		= edge_disconnect,
+	.release		= edge_release,
 	.port_probe		= edge_create_sysfs_attrs,
 	.ioctl			= edge_ioctl,
 	.set_termios		= edge_set_termios,
@@ -2944,7 +2955,8 @@
 	.throttle		= edge_throttle,
 	.unthrottle		= edge_unthrottle,
 	.attach			= edge_startup,
-	.shutdown		= edge_shutdown,
+	.disconnect		= edge_disconnect,
+	.release		= edge_release,
 	.port_probe		= edge_create_sysfs_attrs,
 	.ioctl			= edge_ioctl,
 	.set_termios		= edge_set_termios,
diff --git a/drivers/usb/serial/ipaq.c b/drivers/usb/serial/ipaq.c
index c610a99..2545d45 100644
--- a/drivers/usb/serial/ipaq.c
+++ b/drivers/usb/serial/ipaq.c
@@ -79,7 +79,6 @@
 static void ipaq_close(struct usb_serial_port *port);
 static int  ipaq_calc_num_ports(struct usb_serial *serial);
 static int  ipaq_startup(struct usb_serial *serial);
-static void ipaq_shutdown(struct usb_serial *serial);
 static int ipaq_write(struct tty_struct *tty, struct usb_serial_port *port,
 			const unsigned char *buf, int count);
 static int ipaq_write_bulk(struct usb_serial_port *port,
@@ -576,7 +575,6 @@
 	.close =		ipaq_close,
 	.attach =		ipaq_startup,
 	.calc_num_ports =	ipaq_calc_num_ports,
-	.shutdown =		ipaq_shutdown,
 	.write =		ipaq_write,
 	.write_room =		ipaq_write_room,
 	.chars_in_buffer =	ipaq_chars_in_buffer,
@@ -990,11 +988,6 @@
 	return usb_reset_configuration(serial->dev);
 }
 
-static void ipaq_shutdown(struct usb_serial *serial)
-{
-	dbg("%s", __func__);
-}
-
 static int __init ipaq_init(void)
 {
 	int retval;
diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c
index 76a3cc3..96873a7 100644
--- a/drivers/usb/serial/iuu_phoenix.c
+++ b/drivers/usb/serial/iuu_phoenix.c
@@ -121,8 +121,8 @@
 	return 0;
 }
 
-/* Shutdown function */
-static void iuu_shutdown(struct usb_serial *serial)
+/* Release function */
+static void iuu_release(struct usb_serial *serial)
 {
 	struct usb_serial_port *port = serial->port[0];
 	struct iuu_private *priv = usb_get_serial_port_data(port);
@@ -1202,7 +1202,7 @@
 	.tiocmset = iuu_tiocmset,
 	.set_termios = iuu_set_termios,
 	.attach = iuu_startup,
-	.shutdown = iuu_shutdown,
+	.release = iuu_release,
 };
 
 static int __init iuu_init(void)
diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
index f1195a9..2594b87 100644
--- a/drivers/usb/serial/keyspan.c
+++ b/drivers/usb/serial/keyspan.c
@@ -2689,7 +2689,7 @@
 	return 0;
 }
 
-static void keyspan_shutdown(struct usb_serial *serial)
+static void keyspan_disconnect(struct usb_serial *serial)
 {
 	int				i, j;
 	struct usb_serial_port		*port;
@@ -2729,6 +2729,17 @@
 			usb_free_urb(p_priv->out_urbs[j]);
 		}
 	}
+}
+
+static void keyspan_release(struct usb_serial *serial)
+{
+	int				i;
+	struct usb_serial_port		*port;
+	struct keyspan_serial_private 	*s_priv;
+
+	dbg("%s", __func__);
+
+	s_priv = usb_get_serial_data(serial);
 
 	/*  dbg("Freeing serial->private."); */
 	kfree(s_priv);
diff --git a/drivers/usb/serial/keyspan.h b/drivers/usb/serial/keyspan.h
index 0d4569b..3107ed1 100644
--- a/drivers/usb/serial/keyspan.h
+++ b/drivers/usb/serial/keyspan.h
@@ -41,7 +41,8 @@
 static void keyspan_close		(struct usb_serial_port *port);
 static void keyspan_dtr_rts		(struct usb_serial_port *port, int on);
 static int  keyspan_startup		(struct usb_serial *serial);
-static void keyspan_shutdown		(struct usb_serial *serial);
+static void keyspan_disconnect		(struct usb_serial *serial);
+static void keyspan_release		(struct usb_serial *serial);
 static int  keyspan_write_room		(struct tty_struct *tty);
 
 static int  keyspan_write		(struct tty_struct *tty,
@@ -569,7 +570,8 @@
 	.tiocmget		= keyspan_tiocmget,
 	.tiocmset		= keyspan_tiocmset,
 	.attach			= keyspan_startup,
-	.shutdown		= keyspan_shutdown,
+	.disconnect		= keyspan_disconnect,
+	.release		= keyspan_release,
 };
 
 static struct usb_serial_driver keyspan_2port_device = {
@@ -590,7 +592,8 @@
 	.tiocmget		= keyspan_tiocmget,
 	.tiocmset		= keyspan_tiocmset,
 	.attach			= keyspan_startup,
-	.shutdown		= keyspan_shutdown,
+	.disconnect		= keyspan_disconnect,
+	.release		= keyspan_release,
 };
 
 static struct usb_serial_driver keyspan_4port_device = {
@@ -611,7 +614,8 @@
 	.tiocmget		= keyspan_tiocmget,
 	.tiocmset		= keyspan_tiocmset,
 	.attach			= keyspan_startup,
-	.shutdown		= keyspan_shutdown,
+	.disconnect		= keyspan_disconnect,
+	.release		= keyspan_release,
 };
 
 #endif
diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c
index ab769db..d0b12e4 100644
--- a/drivers/usb/serial/keyspan_pda.c
+++ b/drivers/usb/serial/keyspan_pda.c
@@ -809,7 +809,7 @@
 	return 0;
 }
 
-static void keyspan_pda_shutdown(struct usb_serial *serial)
+static void keyspan_pda_release(struct usb_serial *serial)
 {
 	dbg("%s", __func__);
 
@@ -869,7 +869,7 @@
 	.tiocmget =		keyspan_pda_tiocmget,
 	.tiocmset =		keyspan_pda_tiocmset,
 	.attach =		keyspan_pda_startup,
-	.shutdown =		keyspan_pda_shutdown,
+	.release =		keyspan_pda_release,
 };
 
 
diff --git a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c
index fa817c6..0f44bb8 100644
--- a/drivers/usb/serial/kl5kusb105.c
+++ b/drivers/usb/serial/kl5kusb105.c
@@ -73,7 +73,8 @@
  * Function prototypes
  */
 static int  klsi_105_startup(struct usb_serial *serial);
-static void klsi_105_shutdown(struct usb_serial *serial);
+static void klsi_105_disconnect(struct usb_serial *serial);
+static void klsi_105_release(struct usb_serial *serial);
 static int  klsi_105_open(struct tty_struct *tty,
 			struct usb_serial_port *port, struct file *filp);
 static void klsi_105_close(struct usb_serial_port *port);
@@ -131,7 +132,8 @@
 	.tiocmget =          klsi_105_tiocmget,
 	.tiocmset =          klsi_105_tiocmset,
 	.attach =	     klsi_105_startup,
-	.shutdown =	     klsi_105_shutdown,
+	.disconnect =	     klsi_105_disconnect,
+	.release =	     klsi_105_release,
 	.throttle =	     klsi_105_throttle,
 	.unthrottle =	     klsi_105_unthrottle,
 };
@@ -315,7 +317,7 @@
 } /* klsi_105_startup */
 
 
-static void klsi_105_shutdown(struct usb_serial *serial)
+static void klsi_105_disconnect(struct usb_serial *serial)
 {
 	int i;
 
@@ -325,33 +327,36 @@
 	for (i = 0; i < serial->num_ports; ++i) {
 		struct klsi_105_private *priv =
 				usb_get_serial_port_data(serial->port[i]);
-		unsigned long flags;
 
 		if (priv) {
 			/* kill our write urb pool */
 			int j;
 			struct urb **write_urbs = priv->write_urb_pool;
-			spin_lock_irqsave(&priv->lock, flags);
 
 			for (j = 0; j < NUM_URBS; j++) {
 				if (write_urbs[j]) {
-					/* FIXME - uncomment the following
-					 * usb_kill_urb call when the host
-					 * controllers get fixed to set
-					 * urb->dev = NULL after the urb is
-					 * finished.  Otherwise this call
-					 * oopses. */
-					/* usb_kill_urb(write_urbs[j]); */
-					kfree(write_urbs[j]->transfer_buffer);
+					usb_kill_urb(write_urbs[j]);
 					usb_free_urb(write_urbs[j]);
 				}
 			}
-			spin_unlock_irqrestore(&priv->lock, flags);
-			kfree(priv);
-			usb_set_serial_port_data(serial->port[i], NULL);
 		}
 	}
-} /* klsi_105_shutdown */
+} /* klsi_105_disconnect */
+
+
+static void klsi_105_release(struct usb_serial *serial)
+{
+	int i;
+
+	dbg("%s", __func__);
+
+	for (i = 0; i < serial->num_ports; ++i) {
+		struct klsi_105_private *priv =
+				usb_get_serial_port_data(serial->port[i]);
+
+		kfree(priv);
+	}
+} /* klsi_105_release */
 
 static int  klsi_105_open(struct tty_struct *tty,
 			struct usb_serial_port *port, struct file *filp)
diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c
index 6b57049..6db0e56 100644
--- a/drivers/usb/serial/kobil_sct.c
+++ b/drivers/usb/serial/kobil_sct.c
@@ -69,7 +69,7 @@
 
 /* Function prototypes */
 static int  kobil_startup(struct usb_serial *serial);
-static void kobil_shutdown(struct usb_serial *serial);
+static void kobil_release(struct usb_serial *serial);
 static int  kobil_open(struct tty_struct *tty,
 			struct usb_serial_port *port, struct file *filp);
 static void kobil_close(struct usb_serial_port *port);
@@ -117,7 +117,7 @@
 	.id_table =		id_table,
 	.num_ports =		1,
 	.attach =		kobil_startup,
-	.shutdown =		kobil_shutdown,
+	.release =		kobil_release,
 	.ioctl =		kobil_ioctl,
 	.set_termios =		kobil_set_termios,
 	.tiocmget =		kobil_tiocmget,
@@ -201,17 +201,13 @@
 }
 
 
-static void kobil_shutdown(struct usb_serial *serial)
+static void kobil_release(struct usb_serial *serial)
 {
 	int i;
 	dbg("%s - port %d", __func__, serial->port[0]->number);
 
-	for (i = 0; i < serial->num_ports; ++i) {
-		while (serial->port[i]->port.count > 0)
-			kobil_close(serial->port[i]);
+	for (i = 0; i < serial->num_ports; ++i)
 		kfree(usb_get_serial_port_data(serial->port[i]));
-		usb_set_serial_port_data(serial->port[i], NULL);
-	}
 }
 
 
diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c
index 8737955..d8825e1 100644
--- a/drivers/usb/serial/mct_u232.c
+++ b/drivers/usb/serial/mct_u232.c
@@ -92,7 +92,7 @@
  * Function prototypes
  */
 static int  mct_u232_startup(struct usb_serial *serial);
-static void mct_u232_shutdown(struct usb_serial *serial);
+static void mct_u232_release(struct usb_serial *serial);
 static int  mct_u232_open(struct tty_struct *tty,
 			struct usb_serial_port *port, struct file *filp);
 static void mct_u232_close(struct usb_serial_port *port);
@@ -149,7 +149,7 @@
 	.tiocmget =	     mct_u232_tiocmget,
 	.tiocmset =	     mct_u232_tiocmset,
 	.attach =	     mct_u232_startup,
-	.shutdown =	     mct_u232_shutdown,
+	.release =	     mct_u232_release,
 };
 
 
@@ -407,7 +407,7 @@
 } /* mct_u232_startup */
 
 
-static void mct_u232_shutdown(struct usb_serial *serial)
+static void mct_u232_release(struct usb_serial *serial)
 {
 	struct mct_u232_private *priv;
 	int i;
@@ -417,12 +417,9 @@
 	for (i = 0; i < serial->num_ports; ++i) {
 		/* My special items, the standard routines free my urbs */
 		priv = usb_get_serial_port_data(serial->port[i]);
-		if (priv) {
-			usb_set_serial_port_data(serial->port[i], NULL);
-			kfree(priv);
-		}
+		kfree(priv);
 	}
-} /* mct_u232_shutdown */
+} /* mct_u232_release */
 
 static int  mct_u232_open(struct tty_struct *tty,
 			struct usb_serial_port *port, struct file *filp)
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index 9e1a013..bfc5ce0 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -1521,19 +1521,16 @@
 	return 0;
 }
 
-static void mos7720_shutdown(struct usb_serial *serial)
+static void mos7720_release(struct usb_serial *serial)
 {
 	int i;
 
 	/* free private structure allocated for serial port */
-	for (i = 0; i < serial->num_ports; ++i) {
+	for (i = 0; i < serial->num_ports; ++i)
 		kfree(usb_get_serial_port_data(serial->port[i]));
-		usb_set_serial_port_data(serial->port[i], NULL);
-	}
 
 	/* free private structure allocated for serial device */
 	kfree(usb_get_serial_data(serial));
-	usb_set_serial_data(serial, NULL);
 }
 
 static struct usb_driver usb_driver = {
@@ -1558,7 +1555,7 @@
 	.throttle		= mos7720_throttle,
 	.unthrottle		= mos7720_unthrottle,
 	.attach			= mos7720_startup,
-	.shutdown		= mos7720_shutdown,
+	.release		= mos7720_release,
 	.ioctl			= mos7720_ioctl,
 	.set_termios		= mos7720_set_termios,
 	.write			= mos7720_write,
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index 10b78a3..c40f95c 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -238,7 +238,7 @@
 {
 	struct usb_device *dev = port->serial->dev;
 	val = val & 0x00ff;
-	dbg("mos7840_set_reg_sync offset is %x, value %x\n", reg, val);
+	dbg("mos7840_set_reg_sync offset is %x, value %x", reg, val);
 
 	return usb_control_msg(dev, usb_sndctrlpipe(dev, 0), MCS_WRREQ,
 			       MCS_WR_RTYPE, val, reg, NULL, 0,
@@ -260,7 +260,7 @@
 	ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ,
 			      MCS_RD_RTYPE, 0, reg, val, VENDOR_READ_LENGTH,
 			      MOS_WDR_TIMEOUT);
-	dbg("mos7840_get_reg_sync offset is %x, return val %x\n", reg, *val);
+	dbg("mos7840_get_reg_sync offset is %x, return val %x", reg, *val);
 	*val = (*val) & 0x00ff;
 	return ret;
 }
@@ -282,18 +282,18 @@
 	if (port->serial->num_ports == 4) {
 		val |= (((__u16) port->number -
 				(__u16) (port->serial->minor)) + 1) << 8;
-		dbg("mos7840_set_uart_reg application number is %x\n", val);
+		dbg("mos7840_set_uart_reg application number is %x", val);
 	} else {
 		if (((__u16) port->number - (__u16) (port->serial->minor)) == 0) {
 			val |= (((__u16) port->number -
 			      (__u16) (port->serial->minor)) + 1) << 8;
-			dbg("mos7840_set_uart_reg application number is %x\n",
+			dbg("mos7840_set_uart_reg application number is %x",
 			    val);
 		} else {
 			val |=
 			    (((__u16) port->number -
 			      (__u16) (port->serial->minor)) + 2) << 8;
-			dbg("mos7840_set_uart_reg application number is %x\n",
+			dbg("mos7840_set_uart_reg application number is %x",
 			    val);
 		}
 	}
@@ -315,24 +315,24 @@
 	int ret = 0;
 	__u16 Wval;
 
-	/* dbg("application number is %4x \n",
+	/* dbg("application number is %4x",
 	    (((__u16)port->number - (__u16)(port->serial->minor))+1)<<8); */
 	/* Wval  is same as application number */
 	if (port->serial->num_ports == 4) {
 		Wval =
 		    (((__u16) port->number - (__u16) (port->serial->minor)) +
 		     1) << 8;
-		dbg("mos7840_get_uart_reg application number is %x\n", Wval);
+		dbg("mos7840_get_uart_reg application number is %x", Wval);
 	} else {
 		if (((__u16) port->number - (__u16) (port->serial->minor)) == 0) {
 			Wval = (((__u16) port->number -
 			      (__u16) (port->serial->minor)) + 1) << 8;
-			dbg("mos7840_get_uart_reg application number is %x\n",
+			dbg("mos7840_get_uart_reg application number is %x",
 			    Wval);
 		} else {
 			Wval = (((__u16) port->number -
 			      (__u16) (port->serial->minor)) + 2) << 8;
-			dbg("mos7840_get_uart_reg application number is %x\n",
+			dbg("mos7840_get_uart_reg application number is %x",
 			    Wval);
 		}
 	}
@@ -346,11 +346,11 @@
 static void mos7840_dump_serial_port(struct moschip_port *mos7840_port)
 {
 
-	dbg("***************************************\n");
-	dbg("SpRegOffset is %2x\n", mos7840_port->SpRegOffset);
-	dbg("ControlRegOffset is %2x \n", mos7840_port->ControlRegOffset);
-	dbg("DCRRegOffset is %2x \n", mos7840_port->DcrRegOffset);
-	dbg("***************************************\n");
+	dbg("***************************************");
+	dbg("SpRegOffset is %2x", mos7840_port->SpRegOffset);
+	dbg("ControlRegOffset is %2x", mos7840_port->ControlRegOffset);
+	dbg("DCRRegOffset is %2x", mos7840_port->DcrRegOffset);
+	dbg("***************************************");
 
 }
 
@@ -474,12 +474,12 @@
 		goto exit;
 	}
 
-	dbg("%s urb buffer size is %d\n", __func__, urb->actual_length);
-	dbg("%s mos7840_port->MsrLsr is %d port %d\n", __func__,
+	dbg("%s urb buffer size is %d", __func__, urb->actual_length);
+	dbg("%s mos7840_port->MsrLsr is %d port %d", __func__,
 	    mos7840_port->MsrLsr, mos7840_port->port_num);
 	data = urb->transfer_buffer;
 	regval = (__u8) data[0];
-	dbg("%s data is %x\n", __func__, regval);
+	dbg("%s data is %x", __func__, regval);
 	if (mos7840_port->MsrLsr == 0)
 		mos7840_handle_new_msr(mos7840_port, regval);
 	else if (mos7840_port->MsrLsr == 1)
@@ -538,7 +538,7 @@
 	__u16 wval, wreg = 0;
 	int status = urb->status;
 
-	dbg("%s", " : Entering\n");
+	dbg("%s", " : Entering");
 
 	switch (status) {
 	case 0:
@@ -570,7 +570,7 @@
 	 * Byte 5 FIFO status for both */
 
 	if (length && length > 5) {
-		dbg("%s \n", "Wrong data !!!");
+		dbg("%s", "Wrong data !!!");
 		return;
 	}
 
@@ -587,17 +587,17 @@
 		      (__u16) (serial->minor)) + 1) << 8;
 		if (mos7840_port->open) {
 			if (sp[i] & 0x01) {
-				dbg("SP%d No Interrupt !!!\n", i);
+				dbg("SP%d No Interrupt !!!", i);
 			} else {
 				switch (sp[i] & 0x0f) {
 				case SERIAL_IIR_RLS:
 					dbg("Serial Port %d: Receiver status error or ", i);
-					dbg("address bit detected in 9-bit mode\n");
+					dbg("address bit detected in 9-bit mode");
 					mos7840_port->MsrLsr = 1;
 					wreg = LINE_STATUS_REGISTER;
 					break;
 				case SERIAL_IIR_MS:
-					dbg("Serial Port %d: Modem status change\n", i);
+					dbg("Serial Port %d: Modem status change", i);
 					mos7840_port->MsrLsr = 0;
 					wreg = MODEM_STATUS_REGISTER;
 					break;
@@ -689,7 +689,7 @@
 
 	mos7840_port = urb->context;
 	if (!mos7840_port) {
-		dbg("%s", "NULL mos7840_port pointer \n");
+		dbg("%s", "NULL mos7840_port pointer");
 		mos7840_port->read_urb_busy = false;
 		return;
 	}
@@ -702,41 +702,41 @@
 
 	port = (struct usb_serial_port *)mos7840_port->port;
 	if (mos7840_port_paranoia_check(port, __func__)) {
-		dbg("%s", "Port Paranoia failed \n");
+		dbg("%s", "Port Paranoia failed");
 		mos7840_port->read_urb_busy = false;
 		return;
 	}
 
 	serial = mos7840_get_usb_serial(port, __func__);
 	if (!serial) {
-		dbg("%s\n", "Bad serial pointer ");
+		dbg("%s", "Bad serial pointer");
 		mos7840_port->read_urb_busy = false;
 		return;
 	}
 
-	dbg("%s\n", "Entering... \n");
+	dbg("%s", "Entering... ");
 
 	data = urb->transfer_buffer;
 
-	dbg("%s", "Entering ........... \n");
+	dbg("%s", "Entering ...........");
 
 	if (urb->actual_length) {
 		tty = tty_port_tty_get(&mos7840_port->port->port);
 		if (tty) {
 			tty_buffer_request_room(tty, urb->actual_length);
 			tty_insert_flip_string(tty, data, urb->actual_length);
-			dbg(" %s \n", data);
+			dbg(" %s ", data);
 			tty_flip_buffer_push(tty);
 			tty_kref_put(tty);
 		}
 		mos7840_port->icount.rx += urb->actual_length;
 		smp_wmb();
-		dbg("mos7840_port->icount.rx is %d:\n",
+		dbg("mos7840_port->icount.rx is %d:",
 		    mos7840_port->icount.rx);
 	}
 
 	if (!mos7840_port->read_urb) {
-		dbg("%s", "URB KILLED !!!\n");
+		dbg("%s", "URB KILLED !!!");
 		mos7840_port->read_urb_busy = false;
 		return;
 	}
@@ -777,16 +777,16 @@
 	spin_unlock(&mos7840_port->pool_lock);
 
 	if (status) {
-		dbg("nonzero write bulk status received:%d\n", status);
+		dbg("nonzero write bulk status received:%d", status);
 		return;
 	}
 
 	if (mos7840_port_paranoia_check(mos7840_port->port, __func__)) {
-		dbg("%s", "Port Paranoia failed \n");
+		dbg("%s", "Port Paranoia failed");
 		return;
 	}
 
-	dbg("%s \n", "Entering .........");
+	dbg("%s", "Entering .........");
 
 	tty = tty_port_tty_get(&mos7840_port->port->port);
 	if (tty && mos7840_port->open)
@@ -830,15 +830,17 @@
 	struct moschip_port *mos7840_port;
 	struct moschip_port *port0;
 
+	dbg ("%s enter", __func__);
+
 	if (mos7840_port_paranoia_check(port, __func__)) {
-		dbg("%s", "Port Paranoia failed \n");
+		dbg("%s", "Port Paranoia failed");
 		return -ENODEV;
 	}
 
 	serial = port->serial;
 
 	if (mos7840_serial_paranoia_check(serial, __func__)) {
-		dbg("%s", "Serial Paranoia failed \n");
+		dbg("%s", "Serial Paranoia failed");
 		return -ENODEV;
 	}
 
@@ -891,20 +893,20 @@
 	Data = 0x0;
 	status = mos7840_get_reg_sync(port, mos7840_port->SpRegOffset, &Data);
 	if (status < 0) {
-		dbg("Reading Spreg failed\n");
+		dbg("Reading Spreg failed");
 		return -1;
 	}
 	Data |= 0x80;
 	status = mos7840_set_reg_sync(port, mos7840_port->SpRegOffset, Data);
 	if (status < 0) {
-		dbg("writing Spreg failed\n");
+		dbg("writing Spreg failed");
 		return -1;
 	}
 
 	Data &= ~0x80;
 	status = mos7840_set_reg_sync(port, mos7840_port->SpRegOffset, Data);
 	if (status < 0) {
-		dbg("writing Spreg failed\n");
+		dbg("writing Spreg failed");
 		return -1;
 	}
 	/* End of block to be checked */
@@ -913,7 +915,7 @@
 	status = mos7840_get_reg_sync(port, mos7840_port->ControlRegOffset,
 									&Data);
 	if (status < 0) {
-		dbg("Reading Controlreg failed\n");
+		dbg("Reading Controlreg failed");
 		return -1;
 	}
 	Data |= 0x08;		/* Driver done bit */
@@ -921,7 +923,7 @@
 	status = mos7840_set_reg_sync(port,
 				mos7840_port->ControlRegOffset, Data);
 	if (status < 0) {
-		dbg("writing Controlreg failed\n");
+		dbg("writing Controlreg failed");
 		return -1;
 	}
 	/* do register settings here */
@@ -932,21 +934,21 @@
 	Data = 0x00;
 	status = mos7840_set_uart_reg(port, INTERRUPT_ENABLE_REGISTER, Data);
 	if (status < 0) {
-		dbg("disableing interrupts failed\n");
+		dbg("disabling interrupts failed");
 		return -1;
 	}
 	/* Set FIFO_CONTROL_REGISTER to the default value */
 	Data = 0x00;
 	status = mos7840_set_uart_reg(port, FIFO_CONTROL_REGISTER, Data);
 	if (status < 0) {
-		dbg("Writing FIFO_CONTROL_REGISTER  failed\n");
+		dbg("Writing FIFO_CONTROL_REGISTER  failed");
 		return -1;
 	}
 
 	Data = 0xcf;
 	status = mos7840_set_uart_reg(port, FIFO_CONTROL_REGISTER, Data);
 	if (status < 0) {
-		dbg("Writing FIFO_CONTROL_REGISTER  failed\n");
+		dbg("Writing FIFO_CONTROL_REGISTER  failed");
 		return -1;
 	}
 
@@ -1043,12 +1045,12 @@
 	 * (can't set it up in mos7840_startup as the  *
 	 * structures were not set up at that time.)   */
 
-	dbg("port number is %d \n", port->number);
-	dbg("serial number is %d \n", port->serial->minor);
-	dbg("Bulkin endpoint is %d \n", port->bulk_in_endpointAddress);
-	dbg("BulkOut endpoint is %d \n", port->bulk_out_endpointAddress);
-	dbg("Interrupt endpoint is %d \n", port->interrupt_in_endpointAddress);
-	dbg("port's number in the device is %d\n", mos7840_port->port_num);
+	dbg("port number is %d", port->number);
+	dbg("serial number is %d", port->serial->minor);
+	dbg("Bulkin endpoint is %d", port->bulk_in_endpointAddress);
+	dbg("BulkOut endpoint is %d", port->bulk_out_endpointAddress);
+	dbg("Interrupt endpoint is %d", port->interrupt_in_endpointAddress);
+	dbg("port's number in the device is %d", mos7840_port->port_num);
 	mos7840_port->read_urb = port->read_urb;
 
 	/* set up our bulk in urb */
@@ -1061,7 +1063,7 @@
 			  mos7840_port->read_urb->transfer_buffer_length,
 			  mos7840_bulk_in_callback, mos7840_port);
 
-	dbg("mos7840_open: bulkin endpoint is %d\n",
+	dbg("mos7840_open: bulkin endpoint is %d",
 	    port->bulk_in_endpointAddress);
 	mos7840_port->read_urb_busy = true;
 	response = usb_submit_urb(mos7840_port->read_urb, GFP_KERNEL);
@@ -1087,9 +1089,11 @@
 	mos7840_port->icount.tx = 0;
 	mos7840_port->icount.rx = 0;
 
-	dbg("\n\nusb_serial serial:%p       mos7840_port:%p\n      usb_serial_port port:%p\n\n",
+	dbg("usb_serial serial:%p       mos7840_port:%p\n      usb_serial_port port:%p",
 				serial, mos7840_port, port);
 
+	dbg ("%s leave", __func__);
+
 	return 0;
 
 }
@@ -1112,16 +1116,16 @@
 	unsigned long flags;
 	struct moschip_port *mos7840_port;
 
-	dbg("%s \n", " mos7840_chars_in_buffer:entering ...........");
+	dbg("%s", " mos7840_chars_in_buffer:entering ...........");
 
 	if (mos7840_port_paranoia_check(port, __func__)) {
-		dbg("%s", "Invalid port \n");
+		dbg("%s", "Invalid port");
 		return 0;
 	}
 
 	mos7840_port = mos7840_get_port_private(port);
 	if (mos7840_port == NULL) {
-		dbg("%s \n", "mos7840_break:leaving ...........");
+		dbg("%s", "mos7840_break:leaving ...........");
 		return 0;
 	}
 
@@ -1148,16 +1152,16 @@
 	int j;
 	__u16 Data;
 
-	dbg("%s\n", "mos7840_close:entering...");
+	dbg("%s", "mos7840_close:entering...");
 
 	if (mos7840_port_paranoia_check(port, __func__)) {
-		dbg("%s", "Port Paranoia failed \n");
+		dbg("%s", "Port Paranoia failed");
 		return;
 	}
 
 	serial = mos7840_get_usb_serial(port, __func__);
 	if (!serial) {
-		dbg("%s", "Serial Paranoia failed \n");
+		dbg("%s", "Serial Paranoia failed");
 		return;
 	}
 
@@ -1185,27 +1189,27 @@
 	 * and interrupt read if they exists                  */
 	if (serial->dev) {
 		if (mos7840_port->write_urb) {
-			dbg("%s", "Shutdown bulk write\n");
+			dbg("%s", "Shutdown bulk write");
 			usb_kill_urb(mos7840_port->write_urb);
 		}
 		if (mos7840_port->read_urb) {
-			dbg("%s", "Shutdown bulk read\n");
+			dbg("%s", "Shutdown bulk read");
 			usb_kill_urb(mos7840_port->read_urb);
 			mos7840_port->read_urb_busy = false;
 		}
 		if ((&mos7840_port->control_urb)) {
-			dbg("%s", "Shutdown control read\n");
+			dbg("%s", "Shutdown control read");
 			/*/      usb_kill_urb (mos7840_port->control_urb); */
 		}
 	}
 /*      if(mos7840_port->ctrl_buf != NULL) */
 /*              kfree(mos7840_port->ctrl_buf); */
 	port0->open_ports--;
-	dbg("mos7840_num_open_ports in close%d:in port%d\n",
+	dbg("mos7840_num_open_ports in close%d:in port%d",
 	    port0->open_ports, port->number);
 	if (port0->open_ports == 0) {
 		if (serial->port[0]->interrupt_in_urb) {
-			dbg("%s", "Shutdown interrupt_in_urb\n");
+			dbg("%s", "Shutdown interrupt_in_urb");
 			usb_kill_urb(serial->port[0]->interrupt_in_urb);
 		}
 	}
@@ -1225,7 +1229,7 @@
 
 	mos7840_port->open = 0;
 
-	dbg("%s \n", "Leaving ............");
+	dbg("%s", "Leaving ............");
 }
 
 /************************************************************************
@@ -1280,17 +1284,17 @@
 	struct usb_serial *serial;
 	struct moschip_port *mos7840_port;
 
-	dbg("%s \n", "Entering ...........");
-	dbg("mos7840_break: Start\n");
+	dbg("%s", "Entering ...........");
+	dbg("mos7840_break: Start");
 
 	if (mos7840_port_paranoia_check(port, __func__)) {
-		dbg("%s", "Port Paranoia failed \n");
+		dbg("%s", "Port Paranoia failed");
 		return;
 	}
 
 	serial = mos7840_get_usb_serial(port, __func__);
 	if (!serial) {
-		dbg("%s", "Serial Paranoia failed \n");
+		dbg("%s", "Serial Paranoia failed");
 		return;
 	}
 
@@ -1310,7 +1314,7 @@
 
 	/* FIXME: no locking on shadowLCR anywhere in driver */
 	mos7840_port->shadowLCR = data;
-	dbg("mcs7840_break mos7840_port->shadowLCR is %x\n",
+	dbg("mcs7840_break mos7840_port->shadowLCR is %x",
 	    mos7840_port->shadowLCR);
 	mos7840_set_uart_reg(port, LINE_CONTROL_REGISTER,
 			     mos7840_port->shadowLCR);
@@ -1334,17 +1338,17 @@
 	unsigned long flags;
 	struct moschip_port *mos7840_port;
 
-	dbg("%s \n", " mos7840_write_room:entering ...........");
+	dbg("%s", " mos7840_write_room:entering ...........");
 
 	if (mos7840_port_paranoia_check(port, __func__)) {
-		dbg("%s", "Invalid port \n");
-		dbg("%s \n", " mos7840_write_room:leaving ...........");
+		dbg("%s", "Invalid port");
+		dbg("%s", " mos7840_write_room:leaving ...........");
 		return -1;
 	}
 
 	mos7840_port = mos7840_get_port_private(port);
 	if (mos7840_port == NULL) {
-		dbg("%s \n", "mos7840_break:leaving ...........");
+		dbg("%s", "mos7840_break:leaving ...........");
 		return -1;
 	}
 
@@ -1384,16 +1388,16 @@
 	/* __u16 Data; */
 	const unsigned char *current_position = data;
 	unsigned char *data1;
-	dbg("%s \n", "entering ...........");
-	/* dbg("mos7840_write: mos7840_port->shadowLCR is %x\n",
+	dbg("%s", "entering ...........");
+	/* dbg("mos7840_write: mos7840_port->shadowLCR is %x",
 					mos7840_port->shadowLCR); */
 
 #ifdef NOTMOS7840
 	Data = 0x00;
 	status = mos7840_get_uart_reg(port, LINE_CONTROL_REGISTER, &Data);
 	mos7840_port->shadowLCR = Data;
-	dbg("mos7840_write: LINE_CONTROL_REGISTER is %x\n", Data);
-	dbg("mos7840_write: mos7840_port->shadowLCR is %x\n",
+	dbg("mos7840_write: LINE_CONTROL_REGISTER is %x", Data);
+	dbg("mos7840_write: mos7840_port->shadowLCR is %x",
 	    mos7840_port->shadowLCR);
 
 	/* Data = 0x03; */
@@ -1407,32 +1411,32 @@
 	/* status = mos7840_set_uart_reg(port,DIVISOR_LATCH_LSB,Data); */
 	Data = 0x00;
 	status = mos7840_get_uart_reg(port, DIVISOR_LATCH_LSB, &Data);
-	dbg("mos7840_write:DLL value is %x\n", Data);
+	dbg("mos7840_write:DLL value is %x", Data);
 
 	Data = 0x0;
 	status = mos7840_get_uart_reg(port, DIVISOR_LATCH_MSB, &Data);
-	dbg("mos7840_write:DLM value is %x\n", Data);
+	dbg("mos7840_write:DLM value is %x", Data);
 
 	Data = Data & ~SERIAL_LCR_DLAB;
-	dbg("mos7840_write: mos7840_port->shadowLCR is %x\n",
+	dbg("mos7840_write: mos7840_port->shadowLCR is %x",
 	    mos7840_port->shadowLCR);
 	status = mos7840_set_uart_reg(port, LINE_CONTROL_REGISTER, Data);
 #endif
 
 	if (mos7840_port_paranoia_check(port, __func__)) {
-		dbg("%s", "Port Paranoia failed \n");
+		dbg("%s", "Port Paranoia failed");
 		return -1;
 	}
 
 	serial = port->serial;
 	if (mos7840_serial_paranoia_check(serial, __func__)) {
-		dbg("%s", "Serial Paranoia failed \n");
+		dbg("%s", "Serial Paranoia failed");
 		return -1;
 	}
 
 	mos7840_port = mos7840_get_port_private(port);
 	if (mos7840_port == NULL) {
-		dbg("%s", "mos7840_port is NULL\n");
+		dbg("%s", "mos7840_port is NULL");
 		return -1;
 	}
 
@@ -1444,7 +1448,7 @@
 		if (!mos7840_port->busy[i]) {
 			mos7840_port->busy[i] = 1;
 			urb = mos7840_port->write_urb_pool[i];
-			dbg("\nURB:%d", i);
+			dbg("URB:%d", i);
 			break;
 		}
 	}
@@ -1479,7 +1483,7 @@
 			  mos7840_bulk_out_data_callback, mos7840_port);
 
 	data1 = urb->transfer_buffer;
-	dbg("\nbulkout endpoint is %d", port->bulk_out_endpointAddress);
+	dbg("bulkout endpoint is %d", port->bulk_out_endpointAddress);
 
 	/* send it down the pipe */
 	status = usb_submit_urb(urb, GFP_ATOMIC);
@@ -1494,7 +1498,7 @@
 	bytes_sent = transfer_size;
 	mos7840_port->icount.tx += transfer_size;
 	smp_wmb();
-	dbg("mos7840_port->icount.tx is %d:\n", mos7840_port->icount.tx);
+	dbg("mos7840_port->icount.tx is %d:", mos7840_port->icount.tx);
 exit:
 	return bytes_sent;
 
@@ -1513,11 +1517,11 @@
 	int status;
 
 	if (mos7840_port_paranoia_check(port, __func__)) {
-		dbg("%s", "Invalid port \n");
+		dbg("%s", "Invalid port");
 		return;
 	}
 
-	dbg("- port %d\n", port->number);
+	dbg("- port %d", port->number);
 
 	mos7840_port = mos7840_get_port_private(port);
 
@@ -1525,11 +1529,11 @@
 		return;
 
 	if (!mos7840_port->open) {
-		dbg("%s\n", "port not opened");
+		dbg("%s", "port not opened");
 		return;
 	}
 
-	dbg("%s", "Entering .......... \n");
+	dbg("%s", "Entering ..........");
 
 	/* if we are implementing XON/XOFF, send the stop character */
 	if (I_IXOFF(tty)) {
@@ -1563,7 +1567,7 @@
 	struct moschip_port *mos7840_port = mos7840_get_port_private(port);
 
 	if (mos7840_port_paranoia_check(port, __func__)) {
-		dbg("%s", "Invalid port \n");
+		dbg("%s", "Invalid port");
 		return;
 	}
 
@@ -1575,7 +1579,7 @@
 		return;
 	}
 
-	dbg("%s", "Entering .......... \n");
+	dbg("%s", "Entering ..........");
 
 	/* if we are implementing XON/XOFF, send the start character */
 	if (I_IXOFF(tty)) {
@@ -1660,7 +1664,7 @@
 
 	status = mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER, mcr);
 	if (status < 0) {
-		dbg("setting MODEM_CONTROL_REGISTER Failed\n");
+		dbg("setting MODEM_CONTROL_REGISTER Failed");
 		return status;
 	}
 
@@ -1729,11 +1733,11 @@
 			custom++;
 		*divisor = custom;
 
-		dbg(" Baud %d = %d\n", baudrate, custom);
+		dbg(" Baud %d = %d", baudrate, custom);
 		return 0;
 	}
 
-	dbg("%s\n", " Baud calculation Failed...");
+	dbg("%s", " Baud calculation Failed...");
 	return -1;
 #endif
 }
@@ -1759,16 +1763,16 @@
 
 	port = (struct usb_serial_port *)mos7840_port->port;
 	if (mos7840_port_paranoia_check(port, __func__)) {
-		dbg("%s", "Invalid port \n");
+		dbg("%s", "Invalid port");
 		return -1;
 	}
 
 	if (mos7840_serial_paranoia_check(port->serial, __func__)) {
-		dbg("%s", "Invalid Serial \n");
+		dbg("%s", "Invalid Serial");
 		return -1;
 	}
 
-	dbg("%s", "Entering .......... \n");
+	dbg("%s", "Entering ..........");
 
 	number = mos7840_port->port->number - mos7840_port->port->serial->minor;
 
@@ -1784,7 +1788,7 @@
 		status = mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER,
 									Data);
 		if (status < 0) {
-			dbg("Writing spreg failed in set_serial_baud\n");
+			dbg("Writing spreg failed in set_serial_baud");
 			return -1;
 		}
 #endif
@@ -1797,7 +1801,7 @@
 		status = mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER,
 									Data);
 		if (status < 0) {
-			dbg("Writing spreg failed in set_serial_baud\n");
+			dbg("Writing spreg failed in set_serial_baud");
 			return -1;
 		}
 #endif
@@ -1812,14 +1816,14 @@
 		status = mos7840_get_reg_sync(port, mos7840_port->SpRegOffset,
 								 &Data);
 		if (status < 0) {
-			dbg("reading spreg failed in set_serial_baud\n");
+			dbg("reading spreg failed in set_serial_baud");
 			return -1;
 		}
 		Data = (Data & 0x8f) | clk_sel_val;
 		status = mos7840_set_reg_sync(port, mos7840_port->SpRegOffset,
 								Data);
 		if (status < 0) {
-			dbg("Writing spreg failed in set_serial_baud\n");
+			dbg("Writing spreg failed in set_serial_baud");
 			return -1;
 		}
 		/* Calculate the Divisor */
@@ -1835,11 +1839,11 @@
 
 		/* Write the divisor */
 		Data = (unsigned char)(divisor & 0xff);
-		dbg("set_serial_baud Value to write DLL is %x\n", Data);
+		dbg("set_serial_baud Value to write DLL is %x", Data);
 		mos7840_set_uart_reg(port, DIVISOR_LATCH_LSB, Data);
 
 		Data = (unsigned char)((divisor & 0xff00) >> 8);
-		dbg("set_serial_baud Value to write DLM is %x\n", Data);
+		dbg("set_serial_baud Value to write DLM is %x", Data);
 		mos7840_set_uart_reg(port, DIVISOR_LATCH_MSB, Data);
 
 		/* Disable access to divisor latch */
@@ -1877,12 +1881,12 @@
 	port = (struct usb_serial_port *)mos7840_port->port;
 
 	if (mos7840_port_paranoia_check(port, __func__)) {
-		dbg("%s", "Invalid port \n");
+		dbg("%s", "Invalid port");
 		return;
 	}
 
 	if (mos7840_serial_paranoia_check(port->serial, __func__)) {
-		dbg("%s", "Invalid Serial \n");
+		dbg("%s", "Invalid Serial");
 		return;
 	}
 
@@ -1895,7 +1899,7 @@
 		return;
 	}
 
-	dbg("%s", "Entering .......... \n");
+	dbg("%s", "Entering ..........");
 
 	lData = LCR_BITS_8;
 	lStop = LCR_STOP_1;
@@ -1955,7 +1959,7 @@
 	    ~(LCR_BITS_MASK | LCR_STOP_MASK | LCR_PAR_MASK);
 	mos7840_port->shadowLCR |= (lData | lParity | lStop);
 
-	dbg("mos7840_change_port_settings mos7840_port->shadowLCR is %x\n",
+	dbg("mos7840_change_port_settings mos7840_port->shadowLCR is %x",
 	    mos7840_port->shadowLCR);
 	/* Disable Interrupts */
 	Data = 0x00;
@@ -1997,7 +2001,7 @@
 
 	if (!baud) {
 		/* pick a default, any default... */
-		dbg("%s\n", "Picked default baud...");
+		dbg("%s", "Picked default baud...");
 		baud = 9600;
 	}
 
@@ -2020,7 +2024,7 @@
 	}
 	wake_up(&mos7840_port->delta_msr_wait);
 	mos7840_port->delta_msr_cond = 1;
-	dbg("mos7840_change_port_settings mos7840_port->shadowLCR is End %x\n",
+	dbg("mos7840_change_port_settings mos7840_port->shadowLCR is End %x",
 	    mos7840_port->shadowLCR);
 
 	return;
@@ -2040,16 +2044,16 @@
 	unsigned int cflag;
 	struct usb_serial *serial;
 	struct moschip_port *mos7840_port;
-	dbg("mos7840_set_termios: START\n");
+	dbg("mos7840_set_termios: START");
 	if (mos7840_port_paranoia_check(port, __func__)) {
-		dbg("%s", "Invalid port \n");
+		dbg("%s", "Invalid port");
 		return;
 	}
 
 	serial = port->serial;
 
 	if (mos7840_serial_paranoia_check(serial, __func__)) {
-		dbg("%s", "Invalid Serial \n");
+		dbg("%s", "Invalid Serial");
 		return;
 	}
 
@@ -2063,7 +2067,7 @@
 		return;
 	}
 
-	dbg("%s\n", "setting termios - ");
+	dbg("%s", "setting termios - ");
 
 	cflag = tty->termios->c_cflag;
 
@@ -2078,7 +2082,7 @@
 	mos7840_change_port_settings(tty, mos7840_port, old_termios);
 
 	if (!mos7840_port->read_urb) {
-		dbg("%s", "URB KILLED !!!!!\n");
+		dbg("%s", "URB KILLED !!!!!");
 		return;
 	}
 
@@ -2144,7 +2148,7 @@
 
 	port = (struct usb_serial_port *)mos7840_port->port;
 	if (mos7840_port_paranoia_check(port, __func__)) {
-		dbg("%s", "Invalid port \n");
+		dbg("%s", "Invalid port");
 		return -1;
 	}
 
@@ -2189,7 +2193,7 @@
 	status = mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER, Data);
 	unlock_kernel();
 	if (status < 0) {
-		dbg("setting MODEM_CONTROL_REGISTER Failed\n");
+		dbg("setting MODEM_CONTROL_REGISTER Failed");
 		return -1;
 	}
 
@@ -2274,7 +2278,7 @@
 	int mosret = 0;
 
 	if (mos7840_port_paranoia_check(port, __func__)) {
-		dbg("%s", "Invalid port \n");
+		dbg("%s", "Invalid port");
 		return -1;
 	}
 
@@ -2374,9 +2378,8 @@
 {
 	int mos7840_num_ports = 0;
 
-	dbg("numberofendpoints: %d \n",
-	    (int)serial->interface->cur_altsetting->desc.bNumEndpoints);
-	dbg("numberofendpoints: %d \n",
+	dbg("numberofendpoints: cur %d, alt %d",
+	    (int)serial->interface->cur_altsetting->desc.bNumEndpoints,
 	    (int)serial->interface->altsetting->desc.bNumEndpoints);
 	if (serial->interface->cur_altsetting->desc.bNumEndpoints == 5) {
 		mos7840_num_ports = serial->num_ports = 2;
@@ -2385,7 +2388,7 @@
 		serial->num_bulk_out = 4;
 		mos7840_num_ports = serial->num_ports = 4;
 	}
-
+	dbg ("mos7840_num_ports = %d", mos7840_num_ports);
 	return mos7840_num_ports;
 }
 
@@ -2400,22 +2403,24 @@
 	int i, status;
 
 	__u16 Data;
-	dbg("%s \n", " mos7840_startup :entering..........");
+	dbg("%s", "mos7840_startup :Entering..........");
 
 	if (!serial) {
-		dbg("%s\n", "Invalid Handler");
+		dbg("%s", "Invalid Handler");
 		return -1;
 	}
 
 	dev = serial->dev;
 
-	dbg("%s\n", "Entering...");
+	dbg("%s", "Entering...");
+	dbg ("mos7840_startup: serial = %p", serial);
 
 	/* we set up the pointers to the endpoints in the mos7840_open *
 	 * function, as the structures aren't created yet.             */
 
 	/* set up port private structures */
 	for (i = 0; i < serial->num_ports; ++i) {
+		dbg ("mos7840_startup: configuring port %d............", i);
 		mos7840_port = kzalloc(sizeof(struct moschip_port), GFP_KERNEL);
 		if (mos7840_port == NULL) {
 			dev_err(&dev->dev, "%s - Out of memory\n", __func__);
@@ -2473,10 +2478,10 @@
 		status = mos7840_get_reg_sync(serial->port[i],
 				 mos7840_port->ControlRegOffset, &Data);
 		if (status < 0) {
-			dbg("Reading ControlReg failed status-0x%x\n", status);
+			dbg("Reading ControlReg failed status-0x%x", status);
 			break;
 		} else
-			dbg("ControlReg Reading success val is %x, status%d\n",
+			dbg("ControlReg Reading success val is %x, status%d",
 			    Data, status);
 		Data |= 0x08;	/* setting driver done bit */
 		Data |= 0x04;	/* sp1_bit to have cts change reflect in
@@ -2486,10 +2491,10 @@
 		status = mos7840_set_reg_sync(serial->port[i],
 					 mos7840_port->ControlRegOffset, Data);
 		if (status < 0) {
-			dbg("Writing ControlReg failed(rx_disable) status-0x%x\n", status);
+			dbg("Writing ControlReg failed(rx_disable) status-0x%x", status);
 			break;
 		} else
-			dbg("ControlReg Writing success(rx_disable) status%d\n",
+			dbg("ControlReg Writing success(rx_disable) status%d",
 			    status);
 
 		/* Write default values in DCR (i.e 0x01 in DCR0, 0x05 in DCR2
@@ -2498,48 +2503,48 @@
 		status = mos7840_set_reg_sync(serial->port[i],
 			 (__u16) (mos7840_port->DcrRegOffset + 0), Data);
 		if (status < 0) {
-			dbg("Writing DCR0 failed status-0x%x\n", status);
+			dbg("Writing DCR0 failed status-0x%x", status);
 			break;
 		} else
-			dbg("DCR0 Writing success status%d\n", status);
+			dbg("DCR0 Writing success status%d", status);
 
 		Data = 0x05;
 		status = mos7840_set_reg_sync(serial->port[i],
 			 (__u16) (mos7840_port->DcrRegOffset + 1), Data);
 		if (status < 0) {
-			dbg("Writing DCR1 failed status-0x%x\n", status);
+			dbg("Writing DCR1 failed status-0x%x", status);
 			break;
 		} else
-			dbg("DCR1 Writing success status%d\n", status);
+			dbg("DCR1 Writing success status%d", status);
 
 		Data = 0x24;
 		status = mos7840_set_reg_sync(serial->port[i],
 			 (__u16) (mos7840_port->DcrRegOffset + 2), Data);
 		if (status < 0) {
-			dbg("Writing DCR2 failed status-0x%x\n", status);
+			dbg("Writing DCR2 failed status-0x%x", status);
 			break;
 		} else
-			dbg("DCR2 Writing success status%d\n", status);
+			dbg("DCR2 Writing success status%d", status);
 
 		/* write values in clkstart0x0 and clkmulti 0x20 */
 		Data = 0x0;
 		status = mos7840_set_reg_sync(serial->port[i],
 					 CLK_START_VALUE_REGISTER, Data);
 		if (status < 0) {
-			dbg("Writing CLK_START_VALUE_REGISTER failed status-0x%x\n", status);
+			dbg("Writing CLK_START_VALUE_REGISTER failed status-0x%x", status);
 			break;
 		} else
-			dbg("CLK_START_VALUE_REGISTER Writing success status%d\n", status);
+			dbg("CLK_START_VALUE_REGISTER Writing success status%d", status);
 
 		Data = 0x20;
 		status = mos7840_set_reg_sync(serial->port[i],
 					CLK_MULTI_REGISTER, Data);
 		if (status < 0) {
-			dbg("Writing CLK_MULTI_REGISTER failed status-0x%x\n",
+			dbg("Writing CLK_MULTI_REGISTER failed status-0x%x",
 			    status);
 			goto error;
 		} else
-			dbg("CLK_MULTI_REGISTER Writing success status%d\n",
+			dbg("CLK_MULTI_REGISTER Writing success status%d",
 			    status);
 
 		/* write value 0x0 to scratchpad register */
@@ -2547,11 +2552,11 @@
 		status = mos7840_set_uart_reg(serial->port[i],
 						SCRATCH_PAD_REGISTER, Data);
 		if (status < 0) {
-			dbg("Writing SCRATCH_PAD_REGISTER failed status-0x%x\n",
+			dbg("Writing SCRATCH_PAD_REGISTER failed status-0x%x",
 			    status);
 			break;
 		} else
-			dbg("SCRATCH_PAD_REGISTER Writing success status%d\n",
+			dbg("SCRATCH_PAD_REGISTER Writing success status%d",
 			    status);
 
 		/* Zero Length flag register */
@@ -2562,30 +2567,30 @@
 			status = mos7840_set_reg_sync(serial->port[i],
 				      (__u16) (ZLP_REG1 +
 				      ((__u16)mos7840_port->port_num)), Data);
-			dbg("ZLIP offset%x\n",
+			dbg("ZLIP offset %x",
 			    (__u16) (ZLP_REG1 +
 					((__u16) mos7840_port->port_num)));
 			if (status < 0) {
-				dbg("Writing ZLP_REG%d failed status-0x%x\n",
+				dbg("Writing ZLP_REG%d failed status-0x%x",
 				    i + 2, status);
 				break;
 			} else
-				dbg("ZLP_REG%d Writing success status%d\n",
+				dbg("ZLP_REG%d Writing success status%d",
 				    i + 2, status);
 		} else {
 			Data = 0xff;
 			status = mos7840_set_reg_sync(serial->port[i],
 			      (__u16) (ZLP_REG1 +
 			      ((__u16)mos7840_port->port_num) - 0x1), Data);
-			dbg("ZLIP offset%x\n",
+			dbg("ZLIP offset %x",
 			    (__u16) (ZLP_REG1 +
 				     ((__u16) mos7840_port->port_num) - 0x1));
 			if (status < 0) {
-				dbg("Writing ZLP_REG%d failed status-0x%x\n",
+				dbg("Writing ZLP_REG%d failed status-0x%x",
 				    i + 1, status);
 				break;
 			} else
-				dbg("ZLP_REG%d Writing success status%d\n",
+				dbg("ZLP_REG%d Writing success status%d",
 				    i + 1, status);
 
 		}
@@ -2599,15 +2604,16 @@
 			goto error;
 		}
 	}
+	dbg ("mos7840_startup: all ports configured...........");
 
 	/* Zero Length flag enable */
 	Data = 0x0f;
 	status = mos7840_set_reg_sync(serial->port[0], ZLP_REG5, Data);
 	if (status < 0) {
-		dbg("Writing ZLP_REG5 failed status-0x%x\n", status);
+		dbg("Writing ZLP_REG5 failed status-0x%x", status);
 		goto error;
 	} else
-		dbg("ZLP_REG5 Writing success status%d\n", status);
+		dbg("ZLP_REG5 Writing success status%d", status);
 
 	/* setting configuration feature to one */
 	usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
@@ -2627,19 +2633,19 @@
 }
 
 /****************************************************************************
- * mos7840_shutdown
+ * mos7840_disconnect
  *	This function is called whenever the device is removed from the usb bus.
  ****************************************************************************/
 
-static void mos7840_shutdown(struct usb_serial *serial)
+static void mos7840_disconnect(struct usb_serial *serial)
 {
 	int i;
 	unsigned long flags;
 	struct moschip_port *mos7840_port;
-	dbg("%s \n", " shutdown :entering..........");
+	dbg("%s", " disconnect :entering..........");
 
 	if (!serial) {
-		dbg("%s", "Invalid Handler \n");
+		dbg("%s", "Invalid Handler");
 		return;
 	}
 
@@ -2656,14 +2662,45 @@
 			mos7840_port->zombie = 1;
 			spin_unlock_irqrestore(&mos7840_port->pool_lock, flags);
 			usb_kill_urb(mos7840_port->control_urb);
+		}
+	}
+
+	dbg("%s", "Thank u :: ");
+
+}
+
+/****************************************************************************
+ * mos7840_release
+ *	This function is called when the usb_serial structure is freed.
+ ****************************************************************************/
+
+static void mos7840_release(struct usb_serial *serial)
+{
+	int i;
+	struct moschip_port *mos7840_port;
+	dbg("%s", " release :entering..........");
+
+	if (!serial) {
+		dbg("%s", "Invalid Handler");
+		return;
+	}
+
+	/* check for the ports to be closed,close the ports and disconnect */
+
+	/* free private structure allocated for serial port  *
+	 * stop reads and writes on all ports                */
+
+	for (i = 0; i < serial->num_ports; ++i) {
+		mos7840_port = mos7840_get_port_private(serial->port[i]);
+		dbg("mos7840_port %d = %p", i, mos7840_port);
+		if (mos7840_port) {
 			kfree(mos7840_port->ctrl_buf);
 			kfree(mos7840_port->dr);
 			kfree(mos7840_port);
 		}
-		mos7840_set_port_private(serial->port[i], NULL);
 	}
 
-	dbg("%s\n", "Thank u :: ");
+	dbg("%s", "Thank u :: ");
 
 }
 
@@ -2701,7 +2738,8 @@
 	.tiocmget = mos7840_tiocmget,
 	.tiocmset = mos7840_tiocmset,
 	.attach = mos7840_startup,
-	.shutdown = mos7840_shutdown,
+	.disconnect = mos7840_disconnect,
+	.release = mos7840_release,
 	.read_bulk_callback = mos7840_bulk_in_callback,
 	.read_int_callback = mos7840_interrupt_callback,
 };
@@ -2714,7 +2752,7 @@
 {
 	int retval;
 
-	dbg("%s \n", " mos7840_init :entering..........");
+	dbg("%s", " mos7840_init :entering..........");
 
 	/* Register with the usb serial */
 	retval = usb_serial_register(&moschip7840_4port_device);
@@ -2722,14 +2760,14 @@
 	if (retval)
 		goto failed_port_device_register;
 
-	dbg("%s\n", "Entring...");
+	dbg("%s", "Entering...");
 	printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
 	       DRIVER_DESC "\n");
 
 	/* Register with the usb */
 	retval = usb_register(&io_driver);
 	if (retval == 0) {
-		dbg("%s\n", "Leaving...");
+		dbg("%s", "Leaving...");
 		return 0;
 	}
 	usb_serial_deregister(&moschip7840_4port_device);
@@ -2744,13 +2782,13 @@
 static void __exit moschip7840_exit(void)
 {
 
-	dbg("%s \n", " mos7840_exit :entering..........");
+	dbg("%s", " mos7840_exit :entering..........");
 
 	usb_deregister(&io_driver);
 
 	usb_serial_deregister(&moschip7840_4port_device);
 
-	dbg("%s\n", "Entring...");
+	dbg("%s", "Entering...");
 }
 
 module_init(moschip7840_init);
diff --git a/drivers/usb/serial/omninet.c b/drivers/usb/serial/omninet.c
index 1104617..56857dd 100644
--- a/drivers/usb/serial/omninet.c
+++ b/drivers/usb/serial/omninet.c
@@ -72,7 +72,8 @@
 static int  omninet_write(struct tty_struct *tty, struct usb_serial_port *port,
 				const unsigned char *buf, int count);
 static int  omninet_write_room(struct tty_struct *tty);
-static void omninet_shutdown(struct usb_serial *serial);
+static void omninet_disconnect(struct usb_serial *serial);
+static void omninet_release(struct usb_serial *serial);
 static int omninet_attach(struct usb_serial *serial);
 
 static struct usb_device_id id_table[] = {
@@ -108,7 +109,8 @@
 	.write_room =		omninet_write_room,
 	.read_bulk_callback =	omninet_read_bulk_callback,
 	.write_bulk_callback =	omninet_write_bulk_callback,
-	.shutdown =		omninet_shutdown,
+	.disconnect =		omninet_disconnect,
+	.release =		omninet_release,
 };
 
 
@@ -345,13 +347,22 @@
 }
 
 
-static void omninet_shutdown(struct usb_serial *serial)
+static void omninet_disconnect(struct usb_serial *serial)
 {
 	struct usb_serial_port *wport = serial->port[1];
-	struct usb_serial_port *port = serial->port[0];
+
 	dbg("%s", __func__);
 
 	usb_kill_urb(wport->write_urb);
+}
+
+
+static void omninet_release(struct usb_serial *serial)
+{
+	struct usb_serial_port *port = serial->port[0];
+
+	dbg("%s", __func__);
+
 	kfree(usb_get_serial_port_data(port));
 }
 
diff --git a/drivers/usb/serial/opticon.c b/drivers/usb/serial/opticon.c
index c20480a..336bba7 100644
--- a/drivers/usb/serial/opticon.c
+++ b/drivers/usb/serial/opticon.c
@@ -463,7 +463,7 @@
 	return retval;
 }
 
-static void opticon_shutdown(struct usb_serial *serial)
+static void opticon_disconnect(struct usb_serial *serial)
 {
 	struct opticon_private *priv = usb_get_serial_data(serial);
 
@@ -471,9 +471,16 @@
 
 	usb_kill_urb(priv->bulk_read_urb);
 	usb_free_urb(priv->bulk_read_urb);
+}
+
+static void opticon_release(struct usb_serial *serial)
+{
+	struct opticon_private *priv = usb_get_serial_data(serial);
+
+	dbg("%s", __func__);
+
 	kfree(priv->bulk_in_buffer);
 	kfree(priv);
-	usb_set_serial_data(serial, NULL);
 }
 
 static int opticon_suspend(struct usb_interface *intf, pm_message_t message)
@@ -524,7 +531,8 @@
 	.close =		opticon_close,
 	.write =		opticon_write,
 	.write_room = 		opticon_write_room,
-	.shutdown =		opticon_shutdown,
+	.disconnect =		opticon_disconnect,
+	.release =		opticon_release,
 	.throttle = 		opticon_throttle,
 	.unthrottle =		opticon_unthrottle,
 	.ioctl =		opticon_ioctl,
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index a16d69f..575816e 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -43,13 +43,16 @@
 #include <linux/usb/serial.h>
 
 /* Function prototypes */
+static int  option_probe(struct usb_serial *serial,
+			const struct usb_device_id *id);
 static int  option_open(struct tty_struct *tty, struct usb_serial_port *port,
 							struct file *filp);
 static void option_close(struct usb_serial_port *port);
 static void option_dtr_rts(struct usb_serial_port *port, int on);
 
 static int  option_startup(struct usb_serial *serial);
-static void option_shutdown(struct usb_serial *serial);
+static void option_disconnect(struct usb_serial *serial);
+static void option_release(struct usb_serial *serial);
 static int  option_write_room(struct tty_struct *tty);
 
 static void option_instat_callback(struct urb *urb);
@@ -202,9 +205,9 @@
 #define NOVATELWIRELESS_PRODUCT_MC727		0x4100
 #define NOVATELWIRELESS_PRODUCT_MC950D		0x4400
 #define NOVATELWIRELESS_PRODUCT_U727		0x5010
+#define NOVATELWIRELESS_PRODUCT_MC760		0x6000
 
 /* FUTURE NOVATEL PRODUCTS */
-#define NOVATELWIRELESS_PRODUCT_EVDO_FULLSPEED	0X6000
 #define NOVATELWIRELESS_PRODUCT_EVDO_HIGHSPEED	0X6001
 #define NOVATELWIRELESS_PRODUCT_HSPA_FULLSPEED	0X7000
 #define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED	0X7001
@@ -305,6 +308,10 @@
 #define DLINK_PRODUCT_DWM_652			0x3e04
 
 
+/* TOSHIBA PRODUCTS */
+#define TOSHIBA_VENDOR_ID			0x0930
+#define TOSHIBA_PRODUCT_HSDPA_MINICARD		0x1302
+
 static struct usb_device_id option_ids[] = {
 	{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
 	{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
@@ -422,7 +429,7 @@
 	{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC950D) }, /* Novatel MC930D/MC950D */
 	{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727) }, /* Novatel MC727/U727/USB727 */
 	{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U727) }, /* Novatel MC727/U727/USB727 */
-	{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_FULLSPEED) }, /* Novatel EVDO product */
+	{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC760) }, /* Novatel MC760/U760/USB760 */
 	{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_FULLSPEED) }, /* Novatel HSPA product */
 	{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_FULLSPEED) }, /* Novatel EVDO Embedded product */
 	{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED) }, /* Novatel HSPA Embedded product */
@@ -523,6 +530,7 @@
 	{ USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) },
 	{ USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) },
 	{ USB_DEVICE(0x1da5, 0x4515) }, /* BenQ H20 */
+	{ USB_DEVICE(TOSHIBA_VENDOR_ID, TOSHIBA_PRODUCT_HSDPA_MINICARD ) }, /* Toshiba 3G HSDPA == Novatel Expedite EU870D MiniCard */
 	{ } /* Terminating entry */
 };
 MODULE_DEVICE_TABLE(usb, option_ids);
@@ -550,6 +558,7 @@
 	.usb_driver        = &option_driver,
 	.id_table          = option_ids,
 	.num_ports         = 1,
+	.probe             = option_probe,
 	.open              = option_open,
 	.close             = option_close,
 	.dtr_rts	   = option_dtr_rts,
@@ -560,7 +569,8 @@
 	.tiocmget          = option_tiocmget,
 	.tiocmset          = option_tiocmset,
 	.attach            = option_startup,
-	.shutdown          = option_shutdown,
+	.disconnect        = option_disconnect,
+	.release           = option_release,
 	.read_int_callback = option_instat_callback,
 	.suspend           = option_suspend,
 	.resume            = option_resume,
@@ -626,6 +636,18 @@
 module_init(option_init);
 module_exit(option_exit);
 
+static int option_probe(struct usb_serial *serial,
+			const struct usb_device_id *id)
+{
+	/* D-Link DWM 652 still exposes CD-Rom emulation interface in modem mode */
+	if (serial->dev->descriptor.idVendor == DLINK_VENDOR_ID &&
+		serial->dev->descriptor.idProduct == DLINK_PRODUCT_DWM_652 &&
+		serial->interface->cur_altsetting->desc.bInterfaceClass == 0x8)
+		return -ENODEV;
+
+	return 0;
+}
+
 static void option_set_termios(struct tty_struct *tty,
 		struct usb_serial_port *port, struct ktermios *old_termios)
 {
@@ -1129,7 +1151,14 @@
 	}
 }
 
-static void option_shutdown(struct usb_serial *serial)
+static void option_disconnect(struct usb_serial *serial)
+{
+	dbg("%s", __func__);
+
+	stop_read_write_urbs(serial);
+}
+
+static void option_release(struct usb_serial *serial)
 {
 	int i, j;
 	struct usb_serial_port *port;
@@ -1137,8 +1166,6 @@
 
 	dbg("%s", __func__);
 
-	stop_read_write_urbs(serial);
-
 	/* Now free them */
 	for (i = 0; i < serial->num_ports; ++i) {
 		port = serial->port[i];
diff --git a/drivers/usb/serial/oti6858.c b/drivers/usb/serial/oti6858.c
index 7de5478..3cece27 100644
--- a/drivers/usb/serial/oti6858.c
+++ b/drivers/usb/serial/oti6858.c
@@ -159,7 +159,7 @@
 static int oti6858_tiocmset(struct tty_struct *tty, struct file *file,
 				unsigned int set, unsigned int clear);
 static int oti6858_startup(struct usb_serial *serial);
-static void oti6858_shutdown(struct usb_serial *serial);
+static void oti6858_release(struct usb_serial *serial);
 
 /* functions operating on buffers */
 static struct oti6858_buf *oti6858_buf_alloc(unsigned int size);
@@ -194,7 +194,7 @@
 	.write_room =		oti6858_write_room,
 	.chars_in_buffer =	oti6858_chars_in_buffer,
 	.attach =		oti6858_startup,
-	.shutdown =		oti6858_shutdown,
+	.release =		oti6858_release,
 };
 
 struct oti6858_private {
@@ -782,7 +782,7 @@
 }
 
 
-static void oti6858_shutdown(struct usb_serial *serial)
+static void oti6858_release(struct usb_serial *serial)
 {
 	struct oti6858_private *priv;
 	int i;
@@ -794,7 +794,6 @@
 		if (priv) {
 			oti6858_buf_free(priv->buf);
 			kfree(priv);
-			usb_set_serial_port_data(serial->port[i], NULL);
 		}
 	}
 }
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index e02dc3d..ec6c132 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -878,7 +878,7 @@
 		dbg("%s - error sending break = %d", __func__, result);
 }
 
-static void pl2303_shutdown(struct usb_serial *serial)
+static void pl2303_release(struct usb_serial *serial)
 {
 	int i;
 	struct pl2303_private *priv;
@@ -890,7 +890,6 @@
 		if (priv) {
 			pl2303_buf_free(priv->buf);
 			kfree(priv);
-			usb_set_serial_port_data(serial->port[i], NULL);
 		}
 	}
 }
@@ -927,6 +926,8 @@
 	spin_lock_irqsave(&priv->lock, flags);
 	priv->line_status = data[status_idx];
 	spin_unlock_irqrestore(&priv->lock, flags);
+	if (priv->line_status & UART_BREAK_ERROR)
+		usb_serial_handle_break(port);
 	wake_up_interruptible(&priv->delta_msr_wait);
 }
 
@@ -1037,7 +1038,8 @@
 		if (line_status & UART_OVERRUN_ERROR)
 			tty_insert_flip_char(tty, 0, TTY_OVERRUN);
 		for (i = 0; i < urb->actual_length; ++i)
-			tty_insert_flip_char(tty, data[i], tty_flag);
+			if (!usb_serial_handle_sysrq_char(port, data[i]))
+				tty_insert_flip_char(tty, data[i], tty_flag);
 		tty_flip_buffer_push(tty);
 	}
 	tty_kref_put(tty);
@@ -1120,7 +1122,7 @@
 	.write_room =		pl2303_write_room,
 	.chars_in_buffer =	pl2303_chars_in_buffer,
 	.attach =		pl2303_startup,
-	.shutdown =		pl2303_shutdown,
+	.release =		pl2303_release,
 };
 
 static int __init pl2303_init(void)
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index 17ac34f..032f7ae 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -1,7 +1,10 @@
 /*
   USB Driver for Sierra Wireless
 
-  Copyright (C) 2006, 2007, 2008  Kevin Lloyd <klloyd@sierrawireless.com>
+  Copyright (C) 2006, 2007, 2008  Kevin Lloyd <klloyd@sierrawireless.com>,
+
+  Copyright (C) 2008, 2009  Elina Pasheva, Matthew Safar, Rory Filer
+			<linux@sierrawireless.com>
 
   IMPORTANT DISCLAIMER: This driver is not commercially supported by
   Sierra Wireless. Use at your own risk.
@@ -14,8 +17,8 @@
   Whom based his on the Keyspan driver by Hugh Blemings <hugh@blemings.org>
 */
 
-#define DRIVER_VERSION "v.1.3.3"
-#define DRIVER_AUTHOR "Kevin Lloyd <klloyd@sierrawireless.com>"
+#define DRIVER_VERSION "v.1.3.7"
+#define DRIVER_AUTHOR "Kevin Lloyd, Elina Pasheva, Matthew Safar, Rory Filer"
 #define DRIVER_DESC "USB Driver for Sierra Wireless USB modems"
 
 #include <linux/kernel.h>
@@ -30,10 +33,15 @@
 #define SWIMS_USB_REQUEST_SetPower	0x00
 #define SWIMS_USB_REQUEST_SetNmea	0x07
 
-#define N_IN_URB	4
-#define N_OUT_URB	4
+#define N_IN_URB	8
+#define N_OUT_URB	64
 #define IN_BUFLEN	4096
 
+#define MAX_TRANSFER		(PAGE_SIZE - 512)
+/* MAX_TRANSFER is chosen so that the VM is not stressed by
+   allocations > PAGE_SIZE and the number of packets in a page
+   is an integer 512 is the largest possible packet on EHCI */
+
 static int debug;
 static int nmea;
 
@@ -46,7 +54,7 @@
 static int sierra_set_power_state(struct usb_device *udev, __u16 swiState)
 {
 	int result;
-	dev_dbg(&udev->dev, "%s", __func__);
+	dev_dbg(&udev->dev, "%s\n", __func__);
 	result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
 			SWIMS_USB_REQUEST_SetPower,	/* __u8 request      */
 			USB_TYPE_VENDOR,		/* __u8 request type */
@@ -61,7 +69,7 @@
 static int sierra_vsc_set_nmea(struct usb_device *udev, __u16 enable)
 {
 	int result;
-	dev_dbg(&udev->dev, "%s", __func__);
+	dev_dbg(&udev->dev, "%s\n", __func__);
 	result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
 			SWIMS_USB_REQUEST_SetNmea,	/* __u8 request      */
 			USB_TYPE_VENDOR,		/* __u8 request type */
@@ -75,18 +83,22 @@
 
 static int sierra_calc_num_ports(struct usb_serial *serial)
 {
-	int result;
-	int *num_ports = usb_get_serial_data(serial);
-	dev_dbg(&serial->dev->dev, "%s", __func__);
+	int num_ports = 0;
+	u8 ifnum, numendpoints;
 
-	result = *num_ports;
+	dev_dbg(&serial->dev->dev, "%s\n", __func__);
 
-	if (result) {
-		kfree(num_ports);
-		usb_set_serial_data(serial, NULL);
-	}
+	ifnum = serial->interface->cur_altsetting->desc.bInterfaceNumber;
+	numendpoints = serial->interface->cur_altsetting->desc.bNumEndpoints;
 
-	return result;
+	/* Dummy interface present on some SKUs should be ignored */
+	if (ifnum == 0x99)
+		num_ports = 0;
+	else if (numendpoints <= 3)
+		num_ports = 1;
+	else
+		num_ports = (numendpoints-1)/2;
+	return num_ports;
 }
 
 static int is_blacklisted(const u8 ifnum,
@@ -111,7 +123,7 @@
 	int interface;
 	struct usb_interface *p_interface;
 	struct usb_host_interface *p_host_interface;
-	dev_dbg(&serial->dev->dev, "%s", __func__);
+	dev_dbg(&serial->dev->dev, "%s\n", __func__);
 
 	/* Get the interface structure pointer from the serial struct */
 	p_interface = serial->interface;
@@ -132,23 +144,12 @@
 {
 	int result = 0;
 	struct usb_device *udev;
-	int *num_ports;
 	u8 ifnum;
-	u8 numendpoints;
 
-	dev_dbg(&serial->dev->dev, "%s", __func__);
-
-	num_ports = kmalloc(sizeof(*num_ports), GFP_KERNEL);
-	if (!num_ports)
-		return -ENOMEM;
-
-	ifnum = serial->interface->cur_altsetting->desc.bInterfaceNumber;
-	numendpoints = serial->interface->cur_altsetting->desc.bNumEndpoints;
 	udev = serial->dev;
+	dev_dbg(&udev->dev, "%s\n", __func__);
 
-	/* Figure out the interface number from the serial structure */
 	ifnum = sierra_calc_interface(serial);
-
 	/*
 	 * If this interface supports more than 1 alternate
 	 * select the 2nd one
@@ -160,20 +161,6 @@
 		usb_set_interface(udev, ifnum, 1);
 	}
 
-	/* Dummy interface present on some SKUs should be ignored */
-	if (ifnum == 0x99)
-		*num_ports = 0;
-	else if (numendpoints <= 3)
-		*num_ports = 1;
-	else
-		*num_ports = (numendpoints-1)/2;
-
-	/*
-	 * save off our num_ports info so that we can use it in the
-	 * calc_num_ports callback
-	 */
-	usb_set_serial_data(serial, (void *)num_ports);
-
 	/* ifnum could have changed - by calling usb_set_interface */
 	ifnum = sierra_calc_interface(serial);
 
@@ -289,7 +276,7 @@
 	__u16 interface = 0;
 	int val = 0;
 
-	dev_dbg(&port->dev, "%s", __func__);
+	dev_dbg(&port->dev, "%s\n", __func__);
 
 	portdata = usb_get_serial_port_data(port);
 
@@ -332,7 +319,7 @@
 static void sierra_set_termios(struct tty_struct *tty,
 		struct usb_serial_port *port, struct ktermios *old_termios)
 {
-	dev_dbg(&port->dev, "%s", __func__);
+	dev_dbg(&port->dev, "%s\n", __func__);
 	tty_termios_copy_hw(tty->termios, old_termios);
 	sierra_send_setup(port);
 }
@@ -343,7 +330,7 @@
 	unsigned int value;
 	struct sierra_port_private *portdata;
 
-	dev_dbg(&port->dev, "%s", __func__);
+	dev_dbg(&port->dev, "%s\n", __func__);
 	portdata = usb_get_serial_port_data(port);
 
 	value = ((portdata->rts_state) ? TIOCM_RTS : 0) |
@@ -394,14 +381,14 @@
 	int status = urb->status;
 	unsigned long flags;
 
-	dev_dbg(&port->dev, "%s - port %d", __func__, port->number);
+	dev_dbg(&port->dev, "%s - port %d\n", __func__, port->number);
 
 	/* free up the transfer buffer, as usb_free_urb() does not do this */
 	kfree(urb->transfer_buffer);
 
 	if (status)
 		dev_dbg(&port->dev, "%s - nonzero write bulk status "
-		    "received: %d", __func__, status);
+		    "received: %d\n", __func__, status);
 
 	spin_lock_irqsave(&portdata->lock, flags);
 	--portdata->outstanding_urbs;
@@ -419,50 +406,61 @@
 	unsigned long flags;
 	unsigned char *buffer;
 	struct urb *urb;
-	int status;
+	size_t writesize = min((size_t)count, (size_t)MAX_TRANSFER);
+	int retval = 0;
+
+	/* verify that we actually have some data to write */
+	if (count == 0)
+		return 0;
 
 	portdata = usb_get_serial_port_data(port);
 
-	dev_dbg(&port->dev, "%s: write (%d chars)", __func__, count);
+	dev_dbg(&port->dev, "%s: write (%zd bytes)\n", __func__, writesize);
 
 	spin_lock_irqsave(&portdata->lock, flags);
+	dev_dbg(&port->dev, "%s - outstanding_urbs: %d\n", __func__,
+		portdata->outstanding_urbs);
 	if (portdata->outstanding_urbs > N_OUT_URB) {
 		spin_unlock_irqrestore(&portdata->lock, flags);
 		dev_dbg(&port->dev, "%s - write limit hit\n", __func__);
 		return 0;
 	}
 	portdata->outstanding_urbs++;
+	dev_dbg(&port->dev, "%s - 1, outstanding_urbs: %d\n", __func__,
+		portdata->outstanding_urbs);
 	spin_unlock_irqrestore(&portdata->lock, flags);
 
-	buffer = kmalloc(count, GFP_ATOMIC);
+	buffer = kmalloc(writesize, GFP_ATOMIC);
 	if (!buffer) {
 		dev_err(&port->dev, "out of memory\n");
-		count = -ENOMEM;
+		retval = -ENOMEM;
 		goto error_no_buffer;
 	}
 
 	urb = usb_alloc_urb(0, GFP_ATOMIC);
 	if (!urb) {
 		dev_err(&port->dev, "no more free urbs\n");
-		count = -ENOMEM;
+		retval = -ENOMEM;
 		goto error_no_urb;
 	}
 
-	memcpy(buffer, buf, count);
+	memcpy(buffer, buf, writesize);
 
-	usb_serial_debug_data(debug, &port->dev, __func__, count, buffer);
+	usb_serial_debug_data(debug, &port->dev, __func__, writesize, buffer);
 
 	usb_fill_bulk_urb(urb, serial->dev,
 			  usb_sndbulkpipe(serial->dev,
 					  port->bulk_out_endpointAddress),
-			  buffer, count, sierra_outdat_callback, port);
+			  buffer, writesize, sierra_outdat_callback, port);
+
+	/* Handle the need to send a zero length packet */
+	urb->transfer_flags |= URB_ZERO_PACKET;
 
 	/* send it down the pipe */
-	status = usb_submit_urb(urb, GFP_ATOMIC);
-	if (status) {
+	retval = usb_submit_urb(urb, GFP_ATOMIC);
+	if (retval) {
 		dev_err(&port->dev, "%s - usb_submit_urb(write bulk) failed "
-			"with status = %d\n", __func__, status);
-		count = status;
+			"with status = %d\n", __func__, retval);
 		goto error;
 	}
 
@@ -470,7 +468,7 @@
 	 * really free it when it is finished with it */
 	usb_free_urb(urb);
 
-	return count;
+	return writesize;
 error:
 	usb_free_urb(urb);
 error_no_urb:
@@ -478,8 +476,10 @@
 error_no_buffer:
 	spin_lock_irqsave(&portdata->lock, flags);
 	--portdata->outstanding_urbs;
+	dev_dbg(&port->dev, "%s - 2. outstanding_urbs: %d\n", __func__,
+		portdata->outstanding_urbs);
 	spin_unlock_irqrestore(&portdata->lock, flags);
-	return count;
+	return retval;
 }
 
 static void sierra_indat_callback(struct urb *urb)
@@ -491,33 +491,39 @@
 	unsigned char *data = urb->transfer_buffer;
 	int status = urb->status;
 
-	dbg("%s: %p", __func__, urb);
-
 	endpoint = usb_pipeendpoint(urb->pipe);
-	port =  urb->context;
+	port = urb->context;
+
+	dev_dbg(&port->dev, "%s: %p\n", __func__, urb);
 
 	if (status) {
 		dev_dbg(&port->dev, "%s: nonzero status: %d on"
-		    " endpoint %02x.", __func__, status, endpoint);
+			" endpoint %02x\n", __func__, status, endpoint);
 	} else {
 		if (urb->actual_length) {
 			tty = tty_port_tty_get(&port->port);
+
 			tty_buffer_request_room(tty, urb->actual_length);
 			tty_insert_flip_string(tty, data, urb->actual_length);
 			tty_flip_buffer_push(tty);
-			tty_kref_put(tty);
-		} else
-			dev_dbg(&port->dev, "%s: empty read urb"
-				" received", __func__);
 
-		/* Resubmit urb so we continue receiving */
-		if (port->port.count && status != -ESHUTDOWN && status != -EPERM) {
-			err = usb_submit_urb(urb, GFP_ATOMIC);
-			if (err)
-				dev_err(&port->dev, "resubmit read urb failed."
-					"(%d)\n", err);
+			tty_kref_put(tty);
+			usb_serial_debug_data(debug, &port->dev, __func__,
+				urb->actual_length, data);
+		} else {
+			dev_dbg(&port->dev, "%s: empty read urb"
+				" received\n", __func__);
 		}
 	}
+
+	/* Resubmit urb so we continue receiving */
+	if (port->port.count && status != -ESHUTDOWN && status != -EPERM) {
+		err = usb_submit_urb(urb, GFP_ATOMIC);
+		if (err)
+			dev_err(&port->dev, "resubmit read urb failed."
+				"(%d)\n", err);
+	}
+
 	return;
 }
 
@@ -529,8 +535,7 @@
 	struct sierra_port_private *portdata = usb_get_serial_port_data(port);
 	struct usb_serial *serial = port->serial;
 
-	dev_dbg(&port->dev, "%s", __func__);
-	dev_dbg(&port->dev, "%s: urb %p port %p has data %p", __func__,
+	dev_dbg(&port->dev, "%s: urb %p port %p has data %p\n", __func__,
 		urb, port, portdata);
 
 	if (status == 0) {
@@ -550,7 +555,7 @@
 					sizeof(struct usb_ctrlrequest));
 			struct tty_struct *tty;
 
-			dev_dbg(&port->dev, "%s: signal x%x", __func__,
+			dev_dbg(&port->dev, "%s: signal x%x\n", __func__,
 				signals);
 
 			old_dcd_state = portdata->dcd_state;
@@ -565,20 +570,20 @@
 				tty_hangup(tty);
 			tty_kref_put(tty);
 		} else {
-			dev_dbg(&port->dev, "%s: type %x req %x",
+			dev_dbg(&port->dev, "%s: type %x req %x\n",
 				__func__, req_pkt->bRequestType,
 				req_pkt->bRequest);
 		}
 	} else
-		dev_dbg(&port->dev, "%s: error %d", __func__, status);
+		dev_dbg(&port->dev, "%s: error %d\n", __func__, status);
 
 	/* Resubmit urb so we continue receiving IRQ data */
-	if (status != -ESHUTDOWN) {
+	if (port->port.count && status != -ESHUTDOWN && status != -ENOENT) {
 		urb->dev = serial->dev;
 		err = usb_submit_urb(urb, GFP_ATOMIC);
 		if (err)
-			dev_dbg(&port->dev, "%s: resubmit intr urb "
-				"failed. (%d)",	__func__, err);
+			dev_err(&port->dev, "%s: resubmit intr urb "
+				"failed. (%d)\n", __func__, err);
 	}
 }
 
@@ -588,7 +593,7 @@
 	struct sierra_port_private *portdata = usb_get_serial_port_data(port);
 	unsigned long flags;
 
-	dev_dbg(&port->dev, "%s - port %d", __func__, port->number);
+	dev_dbg(&port->dev, "%s - port %d\n", __func__, port->number);
 
 	/* try to give a good number back based on if we have any free urbs at
 	 * this point in time */
@@ -729,7 +734,7 @@
 
 	portdata = usb_get_serial_port_data(port);
 
-	dev_dbg(&port->dev, "%s", __func__);
+	dev_dbg(&port->dev, "%s\n", __func__);
 
 	/* Set some sane defaults */
 	portdata->rts_state = 1;
@@ -782,7 +787,7 @@
 	struct sierra_port_private *portdata;
 	int i;
 
-	dev_dbg(&serial->dev->dev, "%s", __func__);
+	dev_dbg(&serial->dev->dev, "%s\n", __func__);
 
 	/* Set Device mode to D0 */
 	sierra_set_power_state(serial->dev, 0x0000);
@@ -797,7 +802,7 @@
 		portdata = kzalloc(sizeof(*portdata), GFP_KERNEL);
 		if (!portdata) {
 			dev_dbg(&port->dev, "%s: kmalloc for "
-				"sierra_port_private (%d) failed!.",
+				"sierra_port_private (%d) failed!.\n",
 				__func__, i);
 			return -ENOMEM;
 		}
@@ -809,13 +814,13 @@
 	return 0;
 }
 
-static void sierra_shutdown(struct usb_serial *serial)
+static void sierra_disconnect(struct usb_serial *serial)
 {
 	int i;
 	struct usb_serial_port *port;
 	struct sierra_port_private *portdata;
 
-	dev_dbg(&serial->dev->dev, "%s", __func__);
+	dev_dbg(&serial->dev->dev, "%s\n", __func__);
 
 	for (i = 0; i < serial->num_ports; ++i) {
 		port = serial->port[i];
@@ -848,7 +853,7 @@
 	.tiocmget          = sierra_tiocmget,
 	.tiocmset          = sierra_tiocmset,
 	.attach            = sierra_startup,
-	.shutdown          = sierra_shutdown,
+	.disconnect        = sierra_disconnect,
 	.read_int_callback = sierra_instat_callback,
 };
 
diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c
index 8f7ed8f..3c249d8 100644
--- a/drivers/usb/serial/spcp8x5.c
+++ b/drivers/usb/serial/spcp8x5.c
@@ -356,7 +356,7 @@
 }
 
 /* call when the device plug out. free all the memory alloced by probe */
-static void spcp8x5_shutdown(struct usb_serial *serial)
+static void spcp8x5_release(struct usb_serial *serial)
 {
 	int i;
 	struct spcp8x5_private *priv;
@@ -366,7 +366,6 @@
 		if (priv) {
 			free_ringbuf(priv->buf);
 			kfree(priv);
-			usb_set_serial_port_data(serial->port[i] , NULL);
 		}
 	}
 }
@@ -1020,7 +1019,7 @@
 	.write_bulk_callback	= spcp8x5_write_bulk_callback,
 	.chars_in_buffer 	= spcp8x5_chars_in_buffer,
 	.attach 		= spcp8x5_startup,
-	.shutdown 		= spcp8x5_shutdown,
+	.release 		= spcp8x5_release,
 };
 
 static int __init spcp8x5_init(void)
diff --git a/drivers/usb/serial/symbolserial.c b/drivers/usb/serial/symbolserial.c
index 8b07ebc..6157fac 100644
--- a/drivers/usb/serial/symbolserial.c
+++ b/drivers/usb/serial/symbolserial.c
@@ -267,7 +267,7 @@
 	return retval;
 }
 
-static void symbol_shutdown(struct usb_serial *serial)
+static void symbol_disconnect(struct usb_serial *serial)
 {
 	struct symbol_private *priv = usb_get_serial_data(serial);
 
@@ -275,9 +275,16 @@
 
 	usb_kill_urb(priv->int_urb);
 	usb_free_urb(priv->int_urb);
+}
+
+static void symbol_release(struct usb_serial *serial)
+{
+	struct symbol_private *priv = usb_get_serial_data(serial);
+
+	dbg("%s", __func__);
+
 	kfree(priv->int_buffer);
 	kfree(priv);
-	usb_set_serial_data(serial, NULL);
 }
 
 static struct usb_driver symbol_driver = {
@@ -299,7 +306,8 @@
 	.attach =		symbol_startup,
 	.open =			symbol_open,
 	.close =		symbol_close,
-	.shutdown =		symbol_shutdown,
+	.disconnect =		symbol_disconnect,
+	.release =		symbol_release,
 	.throttle = 		symbol_throttle,
 	.unthrottle =		symbol_unthrottle,
 };
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index 42cb04c..991d823 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -97,7 +97,7 @@
 /* Function Declarations */
 
 static int ti_startup(struct usb_serial *serial);
-static void ti_shutdown(struct usb_serial *serial);
+static void ti_release(struct usb_serial *serial);
 static int ti_open(struct tty_struct *tty, struct usb_serial_port *port,
 		struct file *file);
 static void ti_close(struct usb_serial_port *port);
@@ -230,7 +230,7 @@
 	.id_table		= ti_id_table_3410,
 	.num_ports		= 1,
 	.attach			= ti_startup,
-	.shutdown		= ti_shutdown,
+	.release		= ti_release,
 	.open			= ti_open,
 	.close			= ti_close,
 	.write			= ti_write,
@@ -258,7 +258,7 @@
 	.id_table		= ti_id_table_5052,
 	.num_ports		= 2,
 	.attach			= ti_startup,
-	.shutdown		= ti_shutdown,
+	.release		= ti_release,
 	.open			= ti_open,
 	.close			= ti_close,
 	.write			= ti_write,
@@ -473,7 +473,7 @@
 }
 
 
-static void ti_shutdown(struct usb_serial *serial)
+static void ti_release(struct usb_serial *serial)
 {
 	int i;
 	struct ti_device *tdev = usb_get_serial_data(serial);
@@ -486,12 +486,10 @@
 		if (tport) {
 			ti_buf_free(tport->tp_write_buf);
 			kfree(tport);
-			usb_set_serial_port_data(serial->port[i], NULL);
 		}
 	}
 
 	kfree(tdev);
-	usb_set_serial_data(serial, NULL);
 }
 
 
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 1967a7e..d595aa5 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -141,6 +141,14 @@
 	if (serial->minor != SERIAL_TTY_NO_MINOR)
 		return_serial(serial);
 
+	serial->type->release(serial);
+
+	for (i = 0; i < serial->num_ports; ++i) {
+		port = serial->port[i];
+		if (port)
+			put_device(&port->dev);
+	}
+
 	/* If this is a "fake" port, we have to clean it up here, as it will
 	 * not get cleaned up in port_release() as it was never registered with
 	 * the driver core */
@@ -148,9 +156,8 @@
 		for (i = serial->num_ports;
 					i < serial->num_port_pointers; ++i) {
 			port = serial->port[i];
-			if (!port)
-				continue;
-			port_free(port);
+			if (port)
+				port_free(port);
 		}
 	}
 
@@ -1046,10 +1053,15 @@
 
 		dev_set_name(&port->dev, "ttyUSB%d", port->number);
 		dbg ("%s - registering %s", __func__, dev_name(&port->dev));
+		port->dev_state = PORT_REGISTERING;
 		retval = device_register(&port->dev);
-		if (retval)
+		if (retval) {
 			dev_err(&port->dev, "Error registering port device, "
 				"continuing\n");
+			port->dev_state = PORT_UNREGISTERED;
+		} else {
+			port->dev_state = PORT_REGISTERED;
+		}
 	}
 
 	usb_serial_console_init(debug, minor);
@@ -1113,10 +1125,6 @@
 	serial->disconnected = 1;
 	mutex_unlock(&serial->disc_mutex);
 
-	/* Unfortunately, many of the sub-drivers expect the port structures
-	 * to exist when their shutdown method is called, so we have to go
-	 * through this awkward two-step unregistration procedure.
-	 */
 	for (i = 0; i < serial->num_ports; ++i) {
 		port = serial->port[i];
 		if (port) {
@@ -1130,17 +1138,25 @@
 			}
 			kill_traffic(port);
 			cancel_work_sync(&port->work);
-			device_del(&port->dev);
+			if (port->dev_state == PORT_REGISTERED) {
+
+				/* Make sure the port is bound so that the
+				 * driver's port_remove method is called.
+				 */
+				if (!port->dev.driver) {
+					int rc;
+
+					port->dev.driver =
+							&serial->type->driver;
+					rc = device_bind_driver(&port->dev);
+				}
+				port->dev_state = PORT_UNREGISTERING;
+				device_del(&port->dev);
+				port->dev_state = PORT_UNREGISTERED;
+			}
 		}
 	}
-	serial->type->shutdown(serial);
-	for (i = 0; i < serial->num_ports; ++i) {
-		port = serial->port[i];
-		if (port) {
-			put_device(&port->dev);
-			serial->port[i] = NULL;
-		}
-	}
+	serial->type->disconnect(serial);
 
 	/* let the last holder of this object
 	 * cause it to be cleaned up */
@@ -1318,7 +1334,8 @@
 	set_to_generic_if_null(device, chars_in_buffer);
 	set_to_generic_if_null(device, read_bulk_callback);
 	set_to_generic_if_null(device, write_bulk_callback);
-	set_to_generic_if_null(device, shutdown);
+	set_to_generic_if_null(device, disconnect);
+	set_to_generic_if_null(device, release);
 }
 
 int usb_serial_register(struct usb_serial_driver *driver)
diff --git a/drivers/usb/serial/usb_debug.c b/drivers/usb/serial/usb_debug.c
index 6c9cbb5..6148009 100644
--- a/drivers/usb/serial/usb_debug.c
+++ b/drivers/usb/serial/usb_debug.c
@@ -15,7 +15,19 @@
 #include <linux/usb.h>
 #include <linux/usb/serial.h>
 
+#define URB_DEBUG_MAX_IN_FLIGHT_URBS	4000
 #define USB_DEBUG_MAX_PACKET_SIZE	8
+#define USB_DEBUG_BRK_SIZE		8
+static char USB_DEBUG_BRK[USB_DEBUG_BRK_SIZE] = {
+	0x00,
+	0xff,
+	0x01,
+	0xfe,
+	0x00,
+	0xfe,
+	0x01,
+	0xff,
+};
 
 static struct usb_device_id id_table [] = {
 	{ USB_DEVICE(0x0525, 0x127a) },
@@ -38,6 +50,32 @@
 	return usb_serial_generic_open(tty, port, filp);
 }
 
+/* This HW really does not support a serial break, so one will be
+ * emulated when ever the break state is set to true.
+ */
+static void usb_debug_break_ctl(struct tty_struct *tty, int break_state)
+{
+	struct usb_serial_port *port = tty->driver_data;
+	if (!break_state)
+		return;
+	usb_serial_generic_write(tty, port, USB_DEBUG_BRK, USB_DEBUG_BRK_SIZE);
+}
+
+static void usb_debug_read_bulk_callback(struct urb *urb)
+{
+	struct usb_serial_port *port = urb->context;
+
+	if (urb->actual_length == USB_DEBUG_BRK_SIZE &&
+	    memcmp(urb->transfer_buffer, USB_DEBUG_BRK,
+		   USB_DEBUG_BRK_SIZE) == 0) {
+		usb_serial_handle_break(port);
+		usb_serial_generic_resubmit_read_urb(port, GFP_ATOMIC);
+		return;
+	}
+
+	usb_serial_generic_read_bulk_callback(urb);
+}
+
 static struct usb_serial_driver debug_device = {
 	.driver = {
 		.owner =	THIS_MODULE,
@@ -46,6 +84,9 @@
 	.id_table =		id_table,
 	.num_ports =		1,
 	.open =			usb_debug_open,
+	.max_in_flight_urbs =	URB_DEBUG_MAX_IN_FLIGHT_URBS,
+	.break_ctl =		usb_debug_break_ctl,
+	.read_bulk_callback =	usb_debug_read_bulk_callback,
 };
 
 static int __init debug_init(void)
diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c
index b15f1c0..f5d0f64 100644
--- a/drivers/usb/serial/visor.c
+++ b/drivers/usb/serial/visor.c
@@ -47,7 +47,7 @@
 static int  visor_probe(struct usb_serial *serial,
 					const struct usb_device_id *id);
 static int  visor_calc_num_ports(struct usb_serial *serial);
-static void visor_shutdown(struct usb_serial *serial);
+static void visor_release(struct usb_serial *serial);
 static void visor_write_bulk_callback(struct urb *urb);
 static void visor_read_bulk_callback(struct urb *urb);
 static void visor_read_int_callback(struct urb *urb);
@@ -202,7 +202,7 @@
 	.attach =		treo_attach,
 	.probe =		visor_probe,
 	.calc_num_ports =	visor_calc_num_ports,
-	.shutdown =		visor_shutdown,
+	.release =		visor_release,
 	.write =		visor_write,
 	.write_room =		visor_write_room,
 	.write_bulk_callback =	visor_write_bulk_callback,
@@ -227,7 +227,7 @@
 	.attach =		clie_5_attach,
 	.probe =		visor_probe,
 	.calc_num_ports =	visor_calc_num_ports,
-	.shutdown =		visor_shutdown,
+	.release =		visor_release,
 	.write =		visor_write,
 	.write_room =		visor_write_room,
 	.write_bulk_callback =	visor_write_bulk_callback,
@@ -918,7 +918,7 @@
 	return generic_startup(serial);
 }
 
-static void visor_shutdown(struct usb_serial *serial)
+static void visor_release(struct usb_serial *serial)
 {
 	struct visor_private *priv;
 	int i;
@@ -927,10 +927,7 @@
 
 	for (i = 0; i < serial->num_ports; i++) {
 		priv = usb_get_serial_port_data(serial->port[i]);
-		if (priv) {
-			usb_set_serial_port_data(serial->port[i], NULL);
-			kfree(priv);
-		}
+		kfree(priv);
 	}
 }
 
diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c
index 7c7295d..8d126dd 100644
--- a/drivers/usb/serial/whiteheat.c
+++ b/drivers/usb/serial/whiteheat.c
@@ -144,7 +144,7 @@
 
 /* function prototypes for the Connect Tech WhiteHEAT serial converter */
 static int  whiteheat_attach(struct usb_serial *serial);
-static void whiteheat_shutdown(struct usb_serial *serial);
+static void whiteheat_release(struct usb_serial *serial);
 static int  whiteheat_open(struct tty_struct *tty,
 			struct usb_serial_port *port, struct file *filp);
 static void whiteheat_close(struct usb_serial_port *port);
@@ -189,7 +189,7 @@
 	.id_table =		id_table_std,
 	.num_ports =		4,
 	.attach =		whiteheat_attach,
-	.shutdown =		whiteheat_shutdown,
+	.release =		whiteheat_release,
 	.open =			whiteheat_open,
 	.close =		whiteheat_close,
 	.write =		whiteheat_write,
@@ -617,7 +617,7 @@
 }
 
 
-static void whiteheat_shutdown(struct usb_serial *serial)
+static void whiteheat_release(struct usb_serial *serial)
 {
 	struct usb_serial_port *command_port;
 	struct usb_serial_port *port;
diff --git a/drivers/usb/storage/initializers.c b/drivers/usb/storage/initializers.c
index 2dd9bd4..ec17c96 100644
--- a/drivers/usb/storage/initializers.c
+++ b/drivers/usb/storage/initializers.c
@@ -52,7 +52,7 @@
 	us->iobuf[0] = 0x1;
 	result = usb_stor_control_msg(us, us->send_ctrl_pipe,
 			0x0C, USB_RECIP_INTERFACE | USB_TYPE_VENDOR,
-			0x01, 0x0, us->iobuf, 0x1, 5*HZ);
+			0x01, 0x0, us->iobuf, 0x1, 5000);
 	US_DEBUGP("-- result is %d\n", result);
 
 	return 0;
@@ -80,14 +80,16 @@
 
 	res = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe, bcb,
 			US_BULK_CB_WRAP_LEN, &partial);
-	if(res)
-		return res;
+	if (res)
+		return -EIO;
 
 	US_DEBUGP("Getting status packet...\n");
 	res = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe, bcs,
 			US_BULK_CS_WRAP_LEN, &partial);
+	if (res)
+		return -EIO;
 
-	return (res ? -1 : 0);
+	return 0;
 }
 
 /* This places the HUAWEI E220 devices in multi-port mode */
@@ -99,6 +101,6 @@
 				      USB_REQ_SET_FEATURE,
 				      USB_TYPE_STANDARD | USB_RECIP_DEVICE,
 				      0x01, 0x0, NULL, 0x0, 1000);
-	US_DEBUGP("usb_control_msg performing result is %d\n", result);
-	return (result ? 0 : -1);
+	US_DEBUGP("Huawei mode set result is %d\n", result);
+	return (result ? 0 : -ENODEV);
 }
diff --git a/drivers/usb/storage/option_ms.c b/drivers/usb/storage/option_ms.c
index 353f922..d41cc0a 100644
--- a/drivers/usb/storage/option_ms.c
+++ b/drivers/usb/storage/option_ms.c
@@ -37,7 +37,7 @@
 
 #define RESPONSE_LEN 1024
 
-static int option_rezero(struct us_data *us, int ep_in, int ep_out)
+static int option_rezero(struct us_data *us)
 {
 	const unsigned char rezero_msg[] = {
 	  0x55, 0x53, 0x42, 0x43, 0x78, 0x56, 0x34, 0x12,
@@ -54,10 +54,10 @@
 	if (buffer == NULL)
 		return USB_STOR_TRANSPORT_ERROR;
 
-	memcpy(buffer, rezero_msg, sizeof (rezero_msg));
+	memcpy(buffer, rezero_msg, sizeof(rezero_msg));
 	result = usb_stor_bulk_transfer_buf(us,
-			usb_sndbulkpipe(us->pusb_dev, ep_out),
-			buffer, sizeof (rezero_msg), NULL);
+			us->send_bulk_pipe,
+			buffer, sizeof(rezero_msg), NULL);
 	if (result != USB_STOR_XFER_GOOD) {
 		result = USB_STOR_XFER_ERROR;
 		goto out;
@@ -66,9 +66,15 @@
 	/* Some of the devices need to be asked for a response, but we don't
 	 * care what that response is.
 	 */
-	result = usb_stor_bulk_transfer_buf(us,
-			usb_sndbulkpipe(us->pusb_dev, ep_out),
+	usb_stor_bulk_transfer_buf(us,
+			us->recv_bulk_pipe,
 			buffer, RESPONSE_LEN, NULL);
+
+	/* Read the CSW */
+	usb_stor_bulk_transfer_buf(us,
+			us->recv_bulk_pipe,
+			buffer, 13, NULL);
+
 	result = USB_STOR_XFER_GOOD;
 
 out:
@@ -76,63 +82,75 @@
 	return result;
 }
 
+static int option_inquiry(struct us_data *us)
+{
+	const unsigned char inquiry_msg[] = {
+	  0x55, 0x53, 0x42, 0x43, 0x12, 0x34, 0x56, 0x78,
+	  0x24, 0x00, 0x00, 0x00, 0x80, 0x00, 0x06, 0x12,
+	  0x00, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00, 0x00,
+	  0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+	};
+	char *buffer;
+	int result;
+
+	US_DEBUGP("Option MS: %s", "device inquiry for vendor name\n");
+
+	buffer = kzalloc(0x24, GFP_KERNEL);
+	if (buffer == NULL)
+		return USB_STOR_TRANSPORT_ERROR;
+
+	memcpy(buffer, inquiry_msg, sizeof(inquiry_msg));
+	result = usb_stor_bulk_transfer_buf(us,
+			us->send_bulk_pipe,
+			buffer, sizeof(inquiry_msg), NULL);
+	if (result != USB_STOR_XFER_GOOD) {
+		result = USB_STOR_XFER_ERROR;
+		goto out;
+	}
+
+	result = usb_stor_bulk_transfer_buf(us,
+			us->recv_bulk_pipe,
+			buffer, 0x24, NULL);
+	if (result != USB_STOR_XFER_GOOD) {
+		result = USB_STOR_XFER_ERROR;
+		goto out;
+	}
+
+	result = memcmp(buffer+8, "Option", 6);
+
+	/* Read the CSW */
+	usb_stor_bulk_transfer_buf(us,
+			us->recv_bulk_pipe,
+			buffer, 13, NULL);
+
+out:
+	kfree(buffer);
+	return result;
+}
+
+
 int option_ms_init(struct us_data *us)
 {
-	struct usb_device *udev;
-	struct usb_interface *intf;
-	struct usb_host_interface *iface_desc;
-	struct usb_endpoint_descriptor *endpoint = NULL;
-	u8 ep_in = 0, ep_out = 0;
-	int ep_in_size = 0, ep_out_size = 0;
-	int i, result;
-
-	udev = us->pusb_dev;
-	intf = us->pusb_intf;
-
-	/* Ensure it's really a ZeroCD device; devices that are already
-	 * in modem mode return 0xFF for class, subclass, and protocol.
-	 */
-	if (udev->descriptor.bDeviceClass != 0 ||
-	    udev->descriptor.bDeviceSubClass != 0 ||
-	    udev->descriptor.bDeviceProtocol != 0)
-		return USB_STOR_TRANSPORT_GOOD;
+	int result;
 
 	US_DEBUGP("Option MS: option_ms_init called\n");
 
-	/* Find the right mass storage interface */
-	iface_desc = intf->cur_altsetting;
-	if (iface_desc->desc.bInterfaceClass != 0x8 ||
-	    iface_desc->desc.bInterfaceSubClass != 0x6 ||
-	    iface_desc->desc.bInterfaceProtocol != 0x50) {
-		US_DEBUGP("Option MS: mass storage interface not found, no action "
-		          "required\n");
-		return USB_STOR_TRANSPORT_GOOD;
-	}
-
-	/* Find the mass storage bulk endpoints */
-	for (i = 0; i < iface_desc->desc.bNumEndpoints && (!ep_in_size || !ep_out_size); ++i) {
-		endpoint = &iface_desc->endpoint[i].desc;
-
-		if (usb_endpoint_is_bulk_in(endpoint)) {
-			ep_in = usb_endpoint_num(endpoint);
-			ep_in_size = le16_to_cpu(endpoint->wMaxPacketSize);
-		} else if (usb_endpoint_is_bulk_out(endpoint)) {
-			ep_out = usb_endpoint_num(endpoint);
-			ep_out_size = le16_to_cpu(endpoint->wMaxPacketSize);
-		}
-	}
-
-	/* Can't find the mass storage endpoints */
-	if (!ep_in_size || !ep_out_size) {
-		US_DEBUGP("Option MS: mass storage endpoints not found, no action "
-		          "required\n");
-		return USB_STOR_TRANSPORT_GOOD;
-	}
+	/* Additional test for vendor information via INQUIRY,
+	 * because some vendor/product IDs are ambiguous
+	 */
+	result = option_inquiry(us);
+	if (result != 0) {
+		US_DEBUGP("Option MS: vendor is not Option or not determinable,"
+			  " no action taken\n");
+		return 0;
+	} else
+		US_DEBUGP("Option MS: this is a genuine Option device,"
+			  " proceeding\n");
 
 	/* Force Modem mode */
 	if (option_zero_cd == ZCD_FORCE_MODEM) {
 		US_DEBUGP("Option MS: %s", "Forcing Modem Mode\n");
-		result = option_rezero(us, ep_in, ep_out);
+		result = option_rezero(us);
 		if (result != USB_STOR_XFER_GOOD)
 			US_DEBUGP("Option MS: Failed to switch to modem mode.\n");
 		return -EIO;
@@ -142,6 +160,6 @@
 		          " requests it\n");
 	}
 
-	return USB_STOR_TRANSPORT_GOOD;
+	return 0;
 }
 
diff --git a/drivers/usb/storage/sierra_ms.c b/drivers/usb/storage/sierra_ms.c
index 4359a2c..4395c41 100644
--- a/drivers/usb/storage/sierra_ms.c
+++ b/drivers/usb/storage/sierra_ms.c
@@ -202,6 +202,6 @@
 complete:
 	result = device_create_file(&us->pusb_intf->dev, &dev_attr_truinst);
 
-	return USB_STOR_TRANSPORT_GOOD;
+	return 0;
 }
 
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 4b8b690..1b9c5dd 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1385,7 +1385,7 @@
 UNUSUAL_DEV(  0x1186, 0x3e04, 0x0000, 0x0000,
            "D-Link",
            "USB Mass Storage",
-           US_SC_DEVICE, US_PR_DEVICE, option_ms_init, 0),
+           US_SC_DEVICE, US_PR_DEVICE, option_ms_init, US_FL_IGNORE_DEVICE),
 
 /* Reported by Kevin Lloyd <linux@sierrawireless.com>
  * Entry is needed for the initializer function override,
diff --git a/drivers/video/bw2.c b/drivers/video/bw2.c
index 1e35ba6..b0b147c 100644
--- a/drivers/video/bw2.c
+++ b/drivers/video/bw2.c
@@ -111,9 +111,7 @@
 	u32			flags;
 #define BW2_FLAG_BLANKED	0x00000001
 
-	unsigned long		physbase;
 	unsigned long		which_io;
-	unsigned long		fbsize;
 };
 
 /**
@@ -167,17 +165,15 @@
 	struct bw2_par *par = (struct bw2_par *)info->par;
 
 	return sbusfb_mmap_helper(bw2_mmap_map,
-				  par->physbase, par->fbsize,
+				  info->fix.smem_start, info->fix.smem_len,
 				  par->which_io,
 				  vma);
 }
 
 static int bw2_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg)
 {
-	struct bw2_par *par = (struct bw2_par *) info->par;
-
 	return sbusfb_ioctl_helper(cmd, arg, info,
-				   FBTYPE_SUN2BW, 1, par->fbsize);
+				   FBTYPE_SUN2BW, 1, info->fix.smem_len);
 }
 
 /*
@@ -294,7 +290,7 @@
 
 	spin_lock_init(&par->lock);
 
-	par->physbase = op->resource[0].start;
+	info->fix.smem_start = op->resource[0].start;
 	par->which_io = op->resource[0].flags & IORESOURCE_BITS;
 
 	sbusfb_fill_var(&info->var, dp, 1);
@@ -317,13 +313,13 @@
 			goto out_unmap_regs;
 	}
 
-	par->fbsize = PAGE_ALIGN(linebytes * info->var.yres);
+	info->fix.smem_len = PAGE_ALIGN(linebytes * info->var.yres);
 
 	info->flags = FBINFO_DEFAULT;
 	info->fbops = &bw2_ops;
 
 	info->screen_base = of_ioremap(&op->resource[0], 0,
-				       par->fbsize, "bw2 ram");
+				       info->fix.smem_len, "bw2 ram");
 	if (!info->screen_base)
 		goto out_unmap_regs;
 
@@ -338,12 +334,12 @@
 	dev_set_drvdata(&op->dev, info);
 
 	printk(KERN_INFO "%s: bwtwo at %lx:%lx\n",
-	       dp->full_name, par->which_io, par->physbase);
+	       dp->full_name, par->which_io, info->fix.smem_start);
 
 	return 0;
 
 out_unmap_screen:
-	of_iounmap(&op->resource[0], info->screen_base, par->fbsize);
+	of_iounmap(&op->resource[0], info->screen_base, info->fix.smem_len);
 
 out_unmap_regs:
 	of_iounmap(&op->resource[0], par->regs, sizeof(struct bw2_regs));
@@ -363,7 +359,7 @@
 	unregister_framebuffer(info);
 
 	of_iounmap(&op->resource[0], par->regs, sizeof(struct bw2_regs));
-	of_iounmap(&op->resource[0], info->screen_base, par->fbsize);
+	of_iounmap(&op->resource[0], info->screen_base, info->fix.smem_len);
 
 	framebuffer_release(info);
 
diff --git a/drivers/video/cg14.c b/drivers/video/cg14.c
index a2d1882..fe45a3b 100644
--- a/drivers/video/cg14.c
+++ b/drivers/video/cg14.c
@@ -196,9 +196,7 @@
 	u32			flags;
 #define CG14_FLAG_BLANKED	0x00000001
 
-	unsigned long		physbase;
 	unsigned long		iospace;
-	unsigned long		fbsize;
 
 	struct sbus_mmap_map	mmap_map[CG14_MMAP_ENTRIES];
 
@@ -271,7 +269,7 @@
 	struct cg14_par *par = (struct cg14_par *) info->par;
 
 	return sbusfb_mmap_helper(par->mmap_map,
-				  par->physbase, par->fbsize,
+				  info->fix.smem_start, info->fix.smem_len,
 				  par->iospace, vma);
 }
 
@@ -343,7 +341,8 @@
 
 	default:
 		ret = sbusfb_ioctl_helper(cmd, arg, info,
-					  FBTYPE_MDICOLOR, 8, par->fbsize);
+					  FBTYPE_MDICOLOR, 8,
+					  info->fix.smem_len);
 		break;
 	};
 
@@ -462,7 +461,7 @@
 			   par->cursor, sizeof(struct cg14_cursor));
 	if (info->screen_base)
 		of_iounmap(&op->resource[1],
-			   info->screen_base, par->fbsize);
+			   info->screen_base, info->fix.smem_len);
 }
 
 static int __devinit cg14_probe(struct of_device *op, const struct of_device_id *match)
@@ -488,14 +487,14 @@
 
 	linebytes = of_getintprop_default(dp, "linebytes",
 					  info->var.xres);
-	par->fbsize = PAGE_ALIGN(linebytes * info->var.yres);
+	info->fix.smem_len = PAGE_ALIGN(linebytes * info->var.yres);
 
 	if (!strcmp(dp->parent->name, "sbus") ||
 	    !strcmp(dp->parent->name, "sbi")) {
-		par->physbase = op->resource[0].start;
+		info->fix.smem_start = op->resource[0].start;
 		par->iospace = op->resource[0].flags & IORESOURCE_BITS;
 	} else {
-		par->physbase = op->resource[1].start;
+		info->fix.smem_start = op->resource[1].start;
 		par->iospace = op->resource[0].flags & IORESOURCE_BITS;
 	}
 
@@ -507,7 +506,7 @@
 				 sizeof(struct cg14_cursor), "cg14 cursor");
 
 	info->screen_base = of_ioremap(&op->resource[1], 0,
-				       par->fbsize, "cg14 ram");
+				       info->fix.smem_len, "cg14 ram");
 
 	if (!par->regs || !par->clut || !par->cursor || !info->screen_base)
 		goto out_unmap_regs;
@@ -557,7 +556,7 @@
 
 	printk(KERN_INFO "%s: cgfourteen at %lx:%lx, %dMB\n",
 	       dp->full_name,
-	       par->iospace, par->physbase,
+	       par->iospace, info->fix.smem_start,
 	       par->ramsize >> 20);
 
 	return 0;
diff --git a/drivers/video/cg3.c b/drivers/video/cg3.c
index 99f87fb..b2319fa 100644
--- a/drivers/video/cg3.c
+++ b/drivers/video/cg3.c
@@ -118,9 +118,7 @@
 #define CG3_FLAG_BLANKED	0x00000001
 #define CG3_FLAG_RDI		0x00000002
 
-	unsigned long		physbase;
 	unsigned long		which_io;
-	unsigned long		fbsize;
 };
 
 /**
@@ -231,17 +229,15 @@
 	struct cg3_par *par = (struct cg3_par *)info->par;
 
 	return sbusfb_mmap_helper(cg3_mmap_map,
-				  par->physbase, par->fbsize,
+				  info->fix.smem_start, info->fix.smem_len,
 				  par->which_io,
 				  vma);
 }
 
 static int cg3_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg)
 {
-	struct cg3_par *par = (struct cg3_par *) info->par;
-
 	return sbusfb_ioctl_helper(cmd, arg, info,
-				   FBTYPE_SUN3COLOR, 8, par->fbsize);
+				   FBTYPE_SUN3COLOR, 8, info->fix.smem_len);
 }
 
 /*
@@ -368,7 +364,7 @@
 
 	spin_lock_init(&par->lock);
 
-	par->physbase = op->resource[0].start;
+	info->fix.smem_start = op->resource[0].start;
 	par->which_io = op->resource[0].flags & IORESOURCE_BITS;
 
 	sbusfb_fill_var(&info->var, dp, 8);
@@ -382,7 +378,7 @@
 
 	linebytes = of_getintprop_default(dp, "linebytes",
 					  info->var.xres);
-	par->fbsize = PAGE_ALIGN(linebytes * info->var.yres);
+	info->fix.smem_len = PAGE_ALIGN(linebytes * info->var.yres);
 
 	par->regs = of_ioremap(&op->resource[0], CG3_REGS_OFFSET,
 			       sizeof(struct cg3_regs), "cg3 regs");
@@ -392,7 +388,7 @@
 	info->flags = FBINFO_DEFAULT;
 	info->fbops = &cg3_ops;
 	info->screen_base = of_ioremap(&op->resource[0], CG3_RAM_OFFSET,
-				       par->fbsize, "cg3 ram");
+				       info->fix.smem_len, "cg3 ram");
 	if (!info->screen_base)
 		goto out_unmap_regs;
 
@@ -418,7 +414,7 @@
 	dev_set_drvdata(&op->dev, info);
 
 	printk(KERN_INFO "%s: cg3 at %lx:%lx\n",
-	       dp->full_name, par->which_io, par->physbase);
+	       dp->full_name, par->which_io, info->fix.smem_start);
 
 	return 0;
 
@@ -426,7 +422,7 @@
 	fb_dealloc_cmap(&info->cmap);
 
 out_unmap_screen:
-	of_iounmap(&op->resource[0], info->screen_base, par->fbsize);
+	of_iounmap(&op->resource[0], info->screen_base, info->fix.smem_len);
 
 out_unmap_regs:
 	of_iounmap(&op->resource[0], par->regs, sizeof(struct cg3_regs));
@@ -447,7 +443,7 @@
 	fb_dealloc_cmap(&info->cmap);
 
 	of_iounmap(&op->resource[0], par->regs, sizeof(struct cg3_regs));
-	of_iounmap(&op->resource[0], info->screen_base, par->fbsize);
+	of_iounmap(&op->resource[0], info->screen_base, info->fix.smem_len);
 
 	framebuffer_release(info);
 
diff --git a/drivers/video/cg6.c b/drivers/video/cg6.c
index 940ec04..0d47c60 100644
--- a/drivers/video/cg6.c
+++ b/drivers/video/cg6.c
@@ -263,9 +263,7 @@
 	u32			flags;
 #define CG6_FLAG_BLANKED	0x00000001
 
-	unsigned long		physbase;
 	unsigned long		which_io;
-	unsigned long		fbsize;
 };
 
 static int cg6_sync(struct fb_info *info)
@@ -596,16 +594,14 @@
 	struct cg6_par *par = (struct cg6_par *)info->par;
 
 	return sbusfb_mmap_helper(cg6_mmap_map,
-				  par->physbase, par->fbsize,
+				  info->fix.smem_start, info->fix.smem_len,
 				  par->which_io, vma);
 }
 
 static int cg6_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg)
 {
-	struct cg6_par *par = (struct cg6_par *)info->par;
-
 	return sbusfb_ioctl_helper(cmd, arg, info,
-				   FBTYPE_SUNFAST_COLOR, 8, par->fbsize);
+				   FBTYPE_SUNFAST_COLOR, 8, info->fix.smem_len);
 }
 
 /*
@@ -631,12 +627,12 @@
 		break;
 	};
 	if (((conf >> CG6_FHC_REV_SHIFT) & CG6_FHC_REV_MASK) >= 11) {
-		if (par->fbsize <= 0x100000)
+		if (info->fix.smem_len <= 0x100000)
 			cg6_card_name = "TGX";
 		else
 			cg6_card_name = "TGX+";
 	} else {
-		if (par->fbsize <= 0x100000)
+		if (info->fix.smem_len <= 0x100000)
 			cg6_card_name = "GX";
 		else
 			cg6_card_name = "GX+";
@@ -738,7 +734,8 @@
 		of_iounmap(&op->resource[0], par->fhc, sizeof(u32));
 
 	if (info->screen_base)
-		of_iounmap(&op->resource[0], info->screen_base, par->fbsize);
+		of_iounmap(&op->resource[0], info->screen_base,
+			   info->fix.smem_len);
 }
 
 static int __devinit cg6_probe(struct of_device *op,
@@ -759,7 +756,7 @@
 
 	spin_lock_init(&par->lock);
 
-	par->physbase = op->resource[0].start;
+	info->fix.smem_start = op->resource[0].start;
 	par->which_io = op->resource[0].flags & IORESOURCE_BITS;
 
 	sbusfb_fill_var(&info->var, dp, 8);
@@ -769,11 +766,11 @@
 
 	linebytes = of_getintprop_default(dp, "linebytes",
 					  info->var.xres);
-	par->fbsize = PAGE_ALIGN(linebytes * info->var.yres);
+	info->fix.smem_len = PAGE_ALIGN(linebytes * info->var.yres);
 
 	dblbuf = of_getintprop_default(dp, "dblbuf", 0);
 	if (dblbuf)
-		par->fbsize *= 4;
+		info->fix.smem_len *= 4;
 
 	par->fbc = of_ioremap(&op->resource[0], CG6_FBC_OFFSET,
 				4096, "cgsix fbc");
@@ -792,7 +789,7 @@
 	info->fbops = &cg6_ops;
 
 	info->screen_base = of_ioremap(&op->resource[0], CG6_RAM_OFFSET,
-					par->fbsize, "cgsix ram");
+					info->fix.smem_len, "cgsix ram");
 	if (!par->fbc || !par->tec || !par->thc ||
 	    !par->bt || !par->fhc || !info->screen_base)
 		goto out_unmap_regs;
@@ -817,7 +814,7 @@
 
 	printk(KERN_INFO "%s: CGsix [%s] at %lx:%lx\n",
 	       dp->full_name, info->fix.id,
-	       par->which_io, par->physbase);
+	       par->which_io, info->fix.smem_start);
 
 	return 0;
 
diff --git a/drivers/video/leo.c b/drivers/video/leo.c
index 7c7e8c2..e145e2d 100644
--- a/drivers/video/leo.c
+++ b/drivers/video/leo.c
@@ -191,9 +191,7 @@
 	u32			flags;
 #define LEO_FLAG_BLANKED	0x00000001
 
-	unsigned long		physbase;
 	unsigned long		which_io;
-	unsigned long		fbsize;
 };
 
 static void leo_wait(struct leo_lx_krn __iomem *lx_krn)
@@ -420,16 +418,14 @@
 	struct leo_par *par = (struct leo_par *)info->par;
 
 	return sbusfb_mmap_helper(leo_mmap_map,
-				  par->physbase, par->fbsize,
+				  info->fix.smem_start, info->fix.smem_len,
 				  par->which_io, vma);
 }
 
 static int leo_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg)
 {
-	struct leo_par *par = (struct leo_par *) info->par;
-
 	return sbusfb_ioctl_helper(cmd, arg, info,
-				   FBTYPE_SUNLEO, 32, par->fbsize);
+				   FBTYPE_SUNLEO, 32, info->fix.smem_len);
 }
 
 /*
@@ -569,7 +565,7 @@
 
 	spin_lock_init(&par->lock);
 
-	par->physbase = op->resource[0].start;
+	info->fix.smem_start = op->resource[0].start;
 	par->which_io = op->resource[0].flags & IORESOURCE_BITS;
 
 	sbusfb_fill_var(&info->var, dp, 32);
@@ -577,7 +573,7 @@
 
 	linebytes = of_getintprop_default(dp, "linebytes",
 					  info->var.xres);
-	par->fbsize = PAGE_ALIGN(linebytes * info->var.yres);
+	info->fix.smem_len = PAGE_ALIGN(linebytes * info->var.yres);
 
 	par->lc_ss0_usr =
 		of_ioremap(&op->resource[0], LEO_OFF_LC_SS0_USR,
@@ -627,7 +623,7 @@
 
 	printk(KERN_INFO "%s: leo at %lx:%lx\n",
 	       dp->full_name,
-	       par->which_io, par->physbase);
+	       par->which_io, info->fix.smem_start);
 
 	return 0;
 
diff --git a/drivers/video/p9100.c b/drivers/video/p9100.c
index 7000f2c..7fa4ab0 100644
--- a/drivers/video/p9100.c
+++ b/drivers/video/p9100.c
@@ -134,9 +134,7 @@
 	u32			flags;
 #define P9100_FLAG_BLANKED	0x00000001
 
-	unsigned long		physbase;
 	unsigned long		which_io;
-	unsigned long		fbsize;
 };
 
 /**
@@ -224,18 +222,16 @@
 	struct p9100_par *par = (struct p9100_par *)info->par;
 
 	return sbusfb_mmap_helper(p9100_mmap_map,
-				  par->physbase, par->fbsize,
+				  info->fix.smem_start, info->fix.smem_len,
 				  par->which_io, vma);
 }
 
 static int p9100_ioctl(struct fb_info *info, unsigned int cmd,
 		       unsigned long arg)
 {
-	struct p9100_par *par = (struct p9100_par *) info->par;
-
 	/* Make it look like a cg3. */
 	return sbusfb_ioctl_helper(cmd, arg, info,
-				   FBTYPE_SUN3COLOR, 8, par->fbsize);
+				   FBTYPE_SUN3COLOR, 8, info->fix.smem_len);
 }
 
 /*
@@ -271,7 +267,7 @@
 	spin_lock_init(&par->lock);
 
 	/* This is the framebuffer and the only resource apps can mmap.  */
-	par->physbase = op->resource[2].start;
+	info->fix.smem_start = op->resource[2].start;
 	par->which_io = op->resource[2].flags & IORESOURCE_BITS;
 
 	sbusfb_fill_var(&info->var, dp, 8);
@@ -280,7 +276,7 @@
 	info->var.blue.length = 8;
 
 	linebytes = of_getintprop_default(dp, "linebytes", info->var.xres);
-	par->fbsize = PAGE_ALIGN(linebytes * info->var.yres);
+	info->fix.smem_len = PAGE_ALIGN(linebytes * info->var.yres);
 
 	par->regs = of_ioremap(&op->resource[0], 0,
 			       sizeof(struct p9100_regs), "p9100 regs");
@@ -290,7 +286,7 @@
 	info->flags = FBINFO_DEFAULT;
 	info->fbops = &p9100_ops;
 	info->screen_base = of_ioremap(&op->resource[2], 0,
-				       par->fbsize, "p9100 ram");
+				       info->fix.smem_len, "p9100 ram");
 	if (!info->screen_base)
 		goto out_unmap_regs;
 
@@ -311,7 +307,7 @@
 
 	printk(KERN_INFO "%s: p9100 at %lx:%lx\n",
 	       dp->full_name,
-	       par->which_io, par->physbase);
+	       par->which_io, info->fix.smem_start);
 
 	return 0;
 
@@ -319,7 +315,7 @@
 	fb_dealloc_cmap(&info->cmap);
 
 out_unmap_screen:
-	of_iounmap(&op->resource[2], info->screen_base, par->fbsize);
+	of_iounmap(&op->resource[2], info->screen_base, info->fix.smem_len);
 
 out_unmap_regs:
 	of_iounmap(&op->resource[0], par->regs, sizeof(struct p9100_regs));
@@ -340,7 +336,7 @@
 	fb_dealloc_cmap(&info->cmap);
 
 	of_iounmap(&op->resource[0], par->regs, sizeof(struct p9100_regs));
-	of_iounmap(&op->resource[2], info->screen_base, par->fbsize);
+	of_iounmap(&op->resource[2], info->screen_base, info->fix.smem_len);
 
 	framebuffer_release(info);
 
diff --git a/drivers/video/ps3fb.c b/drivers/video/ps3fb.c
index e00c1df..c0af638 100644
--- a/drivers/video/ps3fb.c
+++ b/drivers/video/ps3fb.c
@@ -32,25 +32,16 @@
 #include <linux/init.h>
 
 #include <asm/abs_addr.h>
+#include <asm/iommu.h>
 #include <asm/lv1call.h>
 #include <asm/ps3av.h>
 #include <asm/ps3fb.h>
 #include <asm/ps3.h>
+#include <asm/ps3gpu.h>
 
 
 #define DEVICE_NAME		"ps3fb"
 
-#define L1GPU_CONTEXT_ATTRIBUTE_DISPLAY_SYNC	0x101
-#define L1GPU_CONTEXT_ATTRIBUTE_DISPLAY_FLIP	0x102
-#define L1GPU_CONTEXT_ATTRIBUTE_FB_SETUP	0x600
-#define L1GPU_CONTEXT_ATTRIBUTE_FB_BLIT		0x601
-#define L1GPU_CONTEXT_ATTRIBUTE_FB_BLIT_SYNC	0x602
-
-#define L1GPU_FB_BLIT_WAIT_FOR_COMPLETION	(1ULL << 32)
-
-#define L1GPU_DISPLAY_SYNC_HSYNC		1
-#define L1GPU_DISPLAY_SYNC_VSYNC		2
-
 #define GPU_CMD_BUF_SIZE			(2 * 1024 * 1024)
 #define GPU_FB_START				(64 * 1024)
 #define GPU_IOIF				(0x0d000000UL)
@@ -462,33 +453,27 @@
 	src_offset += GPU_FB_START;
 
 	mutex_lock(&ps3_gpu_mutex);
-	status = lv1_gpu_context_attribute(ps3fb.context_handle,
-					   L1GPU_CONTEXT_ATTRIBUTE_FB_BLIT,
-					   dst_offset, GPU_IOIF + src_offset,
-					   L1GPU_FB_BLIT_WAIT_FOR_COMPLETION |
-					   (width << 16) | height,
-					   line_length);
+	status = lv1_gpu_fb_blit(ps3fb.context_handle, dst_offset,
+				 GPU_IOIF + src_offset,
+				 L1GPU_FB_BLIT_WAIT_FOR_COMPLETION |
+				 (width << 16) | height,
+				 line_length);
 	mutex_unlock(&ps3_gpu_mutex);
 
 	if (status)
-		dev_err(dev,
-			"%s: lv1_gpu_context_attribute FB_BLIT failed: %d\n",
-			__func__, status);
+		dev_err(dev, "%s: lv1_gpu_fb_blit failed: %d\n", __func__,
+			status);
 #ifdef HEAD_A
-	status = lv1_gpu_context_attribute(ps3fb.context_handle,
-					   L1GPU_CONTEXT_ATTRIBUTE_DISPLAY_FLIP,
-					   0, frame_offset, 0, 0);
+	status = lv1_gpu_display_flip(ps3fb.context_handle, 0, frame_offset);
 	if (status)
-		dev_err(dev, "%s: lv1_gpu_context_attribute FLIP failed: %d\n",
-			__func__, status);
+		dev_err(dev, "%s: lv1_gpu_display_flip failed: %d\n", __func__,
+			status);
 #endif
 #ifdef HEAD_B
-	status = lv1_gpu_context_attribute(ps3fb.context_handle,
-					   L1GPU_CONTEXT_ATTRIBUTE_DISPLAY_FLIP,
-					   1, frame_offset, 0, 0);
+	status = lv1_gpu_display_flip(ps3fb.context_handle, 1, frame_offset);
 	if (status)
-		dev_err(dev, "%s: lv1_gpu_context_attribute FLIP failed: %d\n",
-			__func__, status);
+		dev_err(dev, "%s: lv1_gpu_display_flip failed: %d\n", __func__,
+			status);
 #endif
 }
 
@@ -956,73 +941,6 @@
 }
 
 
-static int ps3fb_vsync_settings(struct gpu_driver_info *dinfo,
-				struct device *dev)
-{
-	int error;
-
-	dev_dbg(dev, "version_driver:%x\n", dinfo->version_driver);
-	dev_dbg(dev, "irq outlet:%x\n", dinfo->irq.irq_outlet);
-	dev_dbg(dev,
-		"version_gpu: %x memory_size: %x ch: %x core_freq: %d "
-		"mem_freq:%d\n",
-		dinfo->version_gpu, dinfo->memory_size, dinfo->hardware_channel,
-		dinfo->nvcore_frequency/1000000, dinfo->memory_frequency/1000000);
-
-	if (dinfo->version_driver != GPU_DRIVER_INFO_VERSION) {
-		dev_err(dev, "%s: version_driver err:%x\n", __func__,
-			dinfo->version_driver);
-		return -EINVAL;
-	}
-
-	error = ps3_irq_plug_setup(PS3_BINDING_CPU_ANY, dinfo->irq.irq_outlet,
-				   &ps3fb.irq_no);
-	if (error) {
-		dev_err(dev, "%s: ps3_alloc_irq failed %d\n", __func__, error);
-		return error;
-	}
-
-	error = request_irq(ps3fb.irq_no, ps3fb_vsync_interrupt, IRQF_DISABLED,
-			    DEVICE_NAME, dev);
-	if (error) {
-		dev_err(dev, "%s: request_irq failed %d\n", __func__, error);
-		ps3_irq_plug_destroy(ps3fb.irq_no);
-		return error;
-	}
-
-	dinfo->irq.mask = (1 << GPU_INTR_STATUS_VSYNC_1) |
-			  (1 << GPU_INTR_STATUS_FLIP_1);
-	return 0;
-}
-
-static int ps3fb_xdr_settings(u64 xdr_lpar, struct device *dev)
-{
-	int status;
-
-	status = lv1_gpu_context_iomap(ps3fb.context_handle, GPU_IOIF,
-				       xdr_lpar, ps3fb_videomemory.size, 0);
-	if (status) {
-		dev_err(dev, "%s: lv1_gpu_context_iomap failed: %d\n",
-			__func__, status);
-		return -ENXIO;
-	}
-	dev_dbg(dev, "video:%p ioif:%lx lpar:%llx size:%lx\n",
-		ps3fb_videomemory.address, GPU_IOIF, xdr_lpar,
-		ps3fb_videomemory.size);
-
-	status = lv1_gpu_context_attribute(ps3fb.context_handle,
-					   L1GPU_CONTEXT_ATTRIBUTE_FB_SETUP,
-					   xdr_lpar, GPU_CMD_BUF_SIZE,
-					   GPU_IOIF, 0);
-	if (status) {
-		dev_err(dev,
-			"%s: lv1_gpu_context_attribute FB_SETUP failed: %d\n",
-			__func__, status);
-		return -ENXIO;
-	}
-	return 0;
-}
-
 static struct fb_ops ps3fb_ops = {
 	.fb_open	= ps3fb_open,
 	.fb_release	= ps3fb_release,
@@ -1048,49 +966,18 @@
 	.accel =	FB_ACCEL_NONE,
 };
 
-static int ps3fb_set_sync(struct device *dev)
-{
-	int status;
-
-#ifdef HEAD_A
-	status = lv1_gpu_context_attribute(0x0,
-					   L1GPU_CONTEXT_ATTRIBUTE_DISPLAY_SYNC,
-					   0, L1GPU_DISPLAY_SYNC_VSYNC, 0, 0);
-	if (status) {
-		dev_err(dev,
-			"%s: lv1_gpu_context_attribute DISPLAY_SYNC failed: "
-			"%d\n",
-			__func__, status);
-		return -1;
-	}
-#endif
-#ifdef HEAD_B
-	status = lv1_gpu_context_attribute(0x0,
-					   L1GPU_CONTEXT_ATTRIBUTE_DISPLAY_SYNC,
-					   1, L1GPU_DISPLAY_SYNC_VSYNC, 0, 0);
-
-	if (status) {
-		dev_err(dev,
-			"%s: lv1_gpu_context_attribute DISPLAY_SYNC failed: "
-			"%d\n",
-			__func__, status);
-		return -1;
-	}
-#endif
-	return 0;
-}
-
 static int __devinit ps3fb_probe(struct ps3_system_bus_device *dev)
 {
 	struct fb_info *info;
 	struct ps3fb_par *par;
-	int retval = -ENOMEM;
+	int retval;
 	u64 ddr_lpar = 0;
 	u64 lpar_dma_control = 0;
 	u64 lpar_driver_info = 0;
 	u64 lpar_reports = 0;
 	u64 lpar_reports_size = 0;
 	u64 xdr_lpar;
+	struct gpu_driver_info *dinfo;
 	void *fb_start;
 	int status;
 	struct task_struct *task;
@@ -1101,8 +988,8 @@
 		return -ENOMEM;
 	}
 
-	status = ps3_open_hv_device(dev);
-	if (status) {
+	retval = ps3_open_hv_device(dev);
+	if (retval) {
 		dev_err(&dev->core, "%s: ps3_open_hv_device failed\n",
 			__func__);
 		goto err;
@@ -1116,7 +1003,24 @@
 	atomic_set(&ps3fb.ext_flip, 0);	/* for flip with vsync */
 	init_waitqueue_head(&ps3fb.wait_vsync);
 
-	ps3fb_set_sync(&dev->core);
+#ifdef HEAD_A
+	status = lv1_gpu_display_sync(0x0, 0, L1GPU_DISPLAY_SYNC_VSYNC);
+	if (status) {
+		dev_err(&dev->core, "%s: lv1_gpu_display_sync failed: %d\n",
+			__func__, status);
+		retval = -ENODEV;
+		goto err_close_device;
+	}
+#endif
+#ifdef HEAD_B
+	status = lv1_gpu_display_sync(0x0, 1, L1GPU_DISPLAY_SYNC_VSYNC);
+	if (status) {
+		dev_err(&dev->core, "%s: lv1_gpu_display_sync failed: %d\n",
+			__func__, status);
+		retval = -ENODEV;
+		goto err_close_device;
+	}
+#endif
 
 	max_ps3fb_size = _ALIGN_UP(GPU_IOIF, 256*1024*1024) - GPU_IOIF;
 	if (ps3fb_videomemory.size > max_ps3fb_size) {
@@ -1131,7 +1035,7 @@
 	if (status) {
 		dev_err(&dev->core, "%s: lv1_gpu_memory_allocate failed: %d\n",
 			__func__, status);
-		goto err;
+		goto err_close_device;
 	}
 	dev_dbg(&dev->core, "ddr:lpar:0x%llx\n", ddr_lpar);
 
@@ -1141,33 +1045,85 @@
 					  &lpar_reports, &lpar_reports_size);
 	if (status) {
 		dev_err(&dev->core,
-			"%s: lv1_gpu_context_attribute failed: %d\n", __func__,
+			"%s: lv1_gpu_context_allocate failed: %d\n", __func__,
 			status);
 		goto err_gpu_memory_free;
 	}
 
 	/* vsync interrupt */
-	ps3fb.dinfo = (void __force *)ioremap(lpar_driver_info, 128 * 1024);
-	if (!ps3fb.dinfo) {
+	dinfo = (void __force *)ioremap(lpar_driver_info, 128 * 1024);
+	if (!dinfo) {
 		dev_err(&dev->core, "%s: ioremap failed\n", __func__);
 		goto err_gpu_context_free;
 	}
 
-	retval = ps3fb_vsync_settings(ps3fb.dinfo, &dev->core);
-	if (retval)
+	ps3fb.dinfo = dinfo;
+	dev_dbg(&dev->core, "version_driver:%x\n", dinfo->version_driver);
+	dev_dbg(&dev->core, "irq outlet:%x\n", dinfo->irq.irq_outlet);
+	dev_dbg(&dev->core, "version_gpu: %x memory_size: %x ch: %x "
+		"core_freq: %d mem_freq:%d\n", dinfo->version_gpu,
+		dinfo->memory_size, dinfo->hardware_channel,
+		dinfo->nvcore_frequency/1000000,
+		dinfo->memory_frequency/1000000);
+
+	if (dinfo->version_driver != GPU_DRIVER_INFO_VERSION) {
+		dev_err(&dev->core, "%s: version_driver err:%x\n", __func__,
+			dinfo->version_driver);
+		retval = -EINVAL;
 		goto err_iounmap_dinfo;
+	}
+
+	retval = ps3_irq_plug_setup(PS3_BINDING_CPU_ANY, dinfo->irq.irq_outlet,
+				    &ps3fb.irq_no);
+	if (retval) {
+		dev_err(&dev->core, "%s: ps3_alloc_irq failed %d\n", __func__,
+			retval);
+		goto err_iounmap_dinfo;
+	}
+
+	retval = request_irq(ps3fb.irq_no, ps3fb_vsync_interrupt,
+			     IRQF_DISABLED, DEVICE_NAME, &dev->core);
+	if (retval) {
+		dev_err(&dev->core, "%s: request_irq failed %d\n", __func__,
+			retval);
+		goto err_destroy_plug;
+	}
+
+	dinfo->irq.mask = (1 << GPU_INTR_STATUS_VSYNC_1) |
+			  (1 << GPU_INTR_STATUS_FLIP_1);
 
 	/* Clear memory to prevent kernel info leakage into userspace */
 	memset(ps3fb_videomemory.address, 0, ps3fb_videomemory.size);
 
 	xdr_lpar = ps3_mm_phys_to_lpar(__pa(ps3fb_videomemory.address));
-	retval = ps3fb_xdr_settings(xdr_lpar, &dev->core);
-	if (retval)
+
+	status = lv1_gpu_context_iomap(ps3fb.context_handle, GPU_IOIF,
+				       xdr_lpar, ps3fb_videomemory.size,
+				       CBE_IOPTE_PP_W | CBE_IOPTE_PP_R |
+				       CBE_IOPTE_M);
+	if (status) {
+		dev_err(&dev->core, "%s: lv1_gpu_context_iomap failed: %d\n",
+			__func__, status);
+		retval =  -ENXIO;
 		goto err_free_irq;
+	}
+
+	dev_dbg(&dev->core, "video:%p ioif:%lx lpar:%llx size:%lx\n",
+		ps3fb_videomemory.address, GPU_IOIF, xdr_lpar,
+		ps3fb_videomemory.size);
+
+	status = lv1_gpu_fb_setup(ps3fb.context_handle, xdr_lpar,
+				  GPU_CMD_BUF_SIZE, GPU_IOIF);
+	if (status) {
+		dev_err(&dev->core, "%s: lv1_gpu_fb_setup failed: %d\n",
+			__func__, status);
+		retval = -ENXIO;
+		goto err_context_unmap;
+	}
 
 	info = framebuffer_alloc(sizeof(struct ps3fb_par), &dev->core);
 	if (!info)
-		goto err_free_irq;
+		goto err_context_fb_close;
 
 	par = info->par;
 	par->mode_id = ~ps3fb_mode;	/* != ps3fb_mode, to trigger change */
@@ -1210,7 +1166,7 @@
 	if (retval < 0)
 		goto err_fb_dealloc;
 
-	dev->core.driver_data = info;
+	ps3_system_bus_set_drvdata(dev, info);
 
 	dev_info(info->device, "%s %s, using %u KiB of video memory\n",
 		 dev_driver_string(info->dev), dev_name(info->dev),
@@ -1232,8 +1188,14 @@
 	fb_dealloc_cmap(&info->cmap);
 err_framebuffer_release:
 	framebuffer_release(info);
+err_context_fb_close:
+	lv1_gpu_fb_close(ps3fb.context_handle);
+err_context_unmap:
+	lv1_gpu_context_iomap(ps3fb.context_handle, GPU_IOIF, xdr_lpar,
+			      ps3fb_videomemory.size, CBE_IOPTE_M);
 err_free_irq:
 	free_irq(ps3fb.irq_no, &dev->core);
+err_destroy_plug:
 	ps3_irq_plug_destroy(ps3fb.irq_no);
 err_iounmap_dinfo:
 	iounmap((u8 __force __iomem *)ps3fb.dinfo);
@@ -1241,14 +1203,16 @@
 	lv1_gpu_context_free(ps3fb.context_handle);
 err_gpu_memory_free:
 	lv1_gpu_memory_free(ps3fb.memory_handle);
+err_close_device:
+	ps3_close_hv_device(dev);
 err:
 	return retval;
 }
 
 static int ps3fb_shutdown(struct ps3_system_bus_device *dev)
 {
-	int status;
-	struct fb_info *info = dev->core.driver_data;
+	struct fb_info *info = ps3_system_bus_get_drvdata(dev);
+	u64 xdr_lpar = ps3_mm_phys_to_lpar(__pa(ps3fb_videomemory.address));
 
 	dev_dbg(&dev->core, " -> %s:%d\n", __func__, __LINE__);
 
@@ -1268,20 +1232,14 @@
 		unregister_framebuffer(info);
 		fb_dealloc_cmap(&info->cmap);
 		framebuffer_release(info);
-		info = dev->core.driver_data = NULL;
+		ps3_system_bus_set_drvdata(dev, NULL);
 	}
 	iounmap((u8 __force __iomem *)ps3fb.dinfo);
-
-	status = lv1_gpu_context_free(ps3fb.context_handle);
-	if (status)
-		dev_dbg(&dev->core, "lv1_gpu_context_free failed: %d\n",
-			status);
-
-	status = lv1_gpu_memory_free(ps3fb.memory_handle);
-	if (status)
-		dev_dbg(&dev->core, "lv1_gpu_memory_free failed: %d\n",
-			status);
-
+	lv1_gpu_fb_close(ps3fb.context_handle);
+	lv1_gpu_context_iomap(ps3fb.context_handle, GPU_IOIF, xdr_lpar,
+			      ps3fb_videomemory.size, CBE_IOPTE_M);
+	lv1_gpu_context_free(ps3fb.context_handle);
+	lv1_gpu_memory_free(ps3fb.memory_handle);
 	ps3_close_hv_device(dev);
 	dev_dbg(&dev->core, " <- %s:%d\n", __func__, __LINE__);
 
diff --git a/drivers/video/tdfxfb.c b/drivers/video/tdfxfb.c
index 89f231d..ff43c88 100644
--- a/drivers/video/tdfxfb.c
+++ b/drivers/video/tdfxfb.c
@@ -1315,7 +1315,6 @@
 
 	strlcpy(chan->adapter.name, name, sizeof(chan->adapter.name));
 	chan->adapter.owner		= THIS_MODULE;
-	chan->adapter.class		= I2C_CLASS_TV_ANALOG;
 	chan->adapter.algo_data		= &chan->algo;
 	chan->adapter.dev.parent	= dev;
 	chan->algo.setsda		= tdfxfb_i2c_setsda;
diff --git a/drivers/video/xen-fbfront.c b/drivers/video/xen-fbfront.c
index 2493f05..15502d5 100644
--- a/drivers/video/xen-fbfront.c
+++ b/drivers/video/xen-fbfront.c
@@ -384,7 +384,7 @@
 		fb_size = XENFB_DEFAULT_FB_LEN;
 	}
 
-	dev->dev.driver_data = info;
+	dev_set_drvdata(&dev->dev, info);
 	info->xbdev = dev;
 	info->irq = -1;
 	info->x1 = info->y1 = INT_MAX;
@@ -503,7 +503,7 @@
 
 static int xenfb_resume(struct xenbus_device *dev)
 {
-	struct xenfb_info *info = dev->dev.driver_data;
+	struct xenfb_info *info = dev_get_drvdata(&dev->dev);
 
 	xenfb_disconnect_backend(info);
 	xenfb_init_shared_page(info, info->fb_info);
@@ -512,7 +512,7 @@
 
 static int xenfb_remove(struct xenbus_device *dev)
 {
-	struct xenfb_info *info = dev->dev.driver_data;
+	struct xenfb_info *info = dev_get_drvdata(&dev->dev);
 
 	xenfb_disconnect_backend(info);
 	if (info->fb_info) {
@@ -621,7 +621,7 @@
 static void xenfb_backend_changed(struct xenbus_device *dev,
 				  enum xenbus_state backend_state)
 {
-	struct xenfb_info *info = dev->dev.driver_data;
+	struct xenfb_info *info = dev_get_drvdata(&dev->dev);
 	int val;
 
 	switch (backend_state) {
diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
index 9367b62..89cd2de 100644
--- a/fs/befs/linuxvfs.c
+++ b/fs/befs/linuxvfs.c
@@ -513,7 +513,7 @@
 {
 	struct nls_table *nls = BEFS_SB(sb)->nls;
 	int i, o;
-	wchar_t uni;
+	unicode_t uni;
 	int unilen, utflen;
 	char *result;
 	/* The utf8->nls conversion won't make the final nls string bigger
@@ -539,16 +539,16 @@
 	for (i = o = 0; i < in_len; i += utflen, o += unilen) {
 
 		/* convert from UTF-8 to Unicode */
-		utflen = utf8_mbtowc(&uni, &in[i], in_len - i);
-		if (utflen < 0) {
+		utflen = utf8_to_utf32(&in[i], in_len - i, &uni);
+		if (utflen < 0)
 			goto conv_err;
-		}
 
 		/* convert from Unicode to nls */
-		unilen = nls->uni2char(uni, &result[o], in_len - o);
-		if (unilen < 0) {
+		if (uni > MAX_WCHAR_T)
 			goto conv_err;
-		}
+		unilen = nls->uni2char(uni, &result[o], in_len - o);
+		if (unilen < 0)
+			goto conv_err;
 	}
 	result[o] = '\0';
 	*out_len = o;
@@ -619,15 +619,13 @@
 
 		/* convert from nls to unicode */
 		unilen = nls->char2uni(&in[i], in_len - i, &uni);
-		if (unilen < 0) {
+		if (unilen < 0)
 			goto conv_err;
-		}
 
 		/* convert from unicode to UTF-8 */
-		utflen = utf8_wctomb(&result[o], uni, 3);
-		if (utflen <= 0) {
+		utflen = utf32_to_utf8(uni, &result[o], 3);
+		if (utflen <= 0)
 			goto conv_err;
-		}
 	}
 
 	result[o] = '\0';
diff --git a/fs/bio.c b/fs/bio.c
index 5f80848..24c9140 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -25,7 +25,6 @@
 #include <linux/module.h>
 #include <linux/mempool.h>
 #include <linux/workqueue.h>
-#include <linux/blktrace_api.h>
 #include <scsi/sg.h>		/* for struct sg_iovec */
 
 #include <trace/events/block.h>
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 0d50d49..d28d29c 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -42,6 +42,8 @@
 static struct extent_io_ops btree_extent_io_ops;
 static void end_workqueue_fn(struct btrfs_work *work);
 
+static atomic_t btrfs_bdi_num = ATOMIC_INIT(0);
+
 /*
  * end_io_wq structs are used to do processing in task context when an IO is
  * complete.  This is used during reads to verify checksums, and it is used
@@ -1342,12 +1344,25 @@
 	free_extent_map(em);
 }
 
+/*
+ * If this fails, caller must call bdi_destroy() to get rid of the
+ * bdi again.
+ */
 static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi)
 {
-	bdi_init(bdi);
+	int err;
+
+	bdi->capabilities = BDI_CAP_MAP_COPY;
+	err = bdi_init(bdi);
+	if (err)
+		return err;
+
+	err = bdi_register(bdi, NULL, "btrfs-%d",
+				atomic_inc_return(&btrfs_bdi_num));
+	if (err)
+		return err;
+
 	bdi->ra_pages	= default_backing_dev_info.ra_pages;
-	bdi->state		= 0;
-	bdi->capabilities	= default_backing_dev_info.capabilities;
 	bdi->unplug_io_fn	= btrfs_unplug_io_fn;
 	bdi->unplug_io_data	= info;
 	bdi->congested_fn	= btrfs_congested_fn;
@@ -1569,7 +1584,8 @@
 	fs_info->sb = sb;
 	fs_info->max_extent = (u64)-1;
 	fs_info->max_inline = 8192 * 1024;
-	setup_bdi(fs_info, &fs_info->bdi);
+	if (setup_bdi(fs_info, &fs_info->bdi))
+		goto fail_bdi;
 	fs_info->btree_inode = new_inode(sb);
 	fs_info->btree_inode->i_ino = 1;
 	fs_info->btree_inode->i_nlink = 1;
@@ -1946,8 +1962,8 @@
 
 	btrfs_close_devices(fs_info->fs_devices);
 	btrfs_mapping_tree_free(&fs_info->mapping_tree);
+fail_bdi:
 	bdi_destroy(&fs_info->bdi);
-
 fail:
 	kfree(extent_root);
 	kfree(tree_root);
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 2e177d7..4e83457 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -543,13 +543,13 @@
 			btrfs_free_log(trans, root);
 			btrfs_update_reloc_root(trans, root);
 
-			if (root->commit_root == root->node)
-				continue;
+			if (root->commit_root != root->node) {
+				free_extent_buffer(root->commit_root);
+				root->commit_root = btrfs_root_node(root);
+				btrfs_set_root_node(&root->root_item,
+						    root->node);
+			}
 
-			free_extent_buffer(root->commit_root);
-			root->commit_root = btrfs_root_node(root);
-
-			btrfs_set_root_node(&root->root_item, root->node);
 			err = btrfs_update_root(trans, fs_info->tree_root,
 						&root->root_key,
 						&root->root_item);
diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c
index 33a9012..4d74fc7 100644
--- a/fs/debugfs/file.c
+++ b/fs/debugfs/file.c
@@ -67,6 +67,8 @@
 	return 0;
 }
 DEFINE_SIMPLE_ATTRIBUTE(fops_u8, debugfs_u8_get, debugfs_u8_set, "%llu\n");
+DEFINE_SIMPLE_ATTRIBUTE(fops_u8_ro, debugfs_u8_get, NULL, "%llu\n");
+DEFINE_SIMPLE_ATTRIBUTE(fops_u8_wo, NULL, debugfs_u8_set, "%llu\n");
 
 /**
  * debugfs_create_u8 - create a debugfs file that is used to read and write an unsigned 8-bit value
@@ -95,6 +97,13 @@
 struct dentry *debugfs_create_u8(const char *name, mode_t mode,
 				 struct dentry *parent, u8 *value)
 {
+	/* if there are no write bits set, make read only */
+	if (!(mode & S_IWUGO))
+		return debugfs_create_file(name, mode, parent, value, &fops_u8_ro);
+	/* if there are no read bits set, make write only */
+	if (!(mode & S_IRUGO))
+		return debugfs_create_file(name, mode, parent, value, &fops_u8_wo);
+
 	return debugfs_create_file(name, mode, parent, value, &fops_u8);
 }
 EXPORT_SYMBOL_GPL(debugfs_create_u8);
@@ -110,6 +119,8 @@
 	return 0;
 }
 DEFINE_SIMPLE_ATTRIBUTE(fops_u16, debugfs_u16_get, debugfs_u16_set, "%llu\n");
+DEFINE_SIMPLE_ATTRIBUTE(fops_u16_ro, debugfs_u16_get, NULL, "%llu\n");
+DEFINE_SIMPLE_ATTRIBUTE(fops_u16_wo, NULL, debugfs_u16_set, "%llu\n");
 
 /**
  * debugfs_create_u16 - create a debugfs file that is used to read and write an unsigned 16-bit value
@@ -138,6 +149,13 @@
 struct dentry *debugfs_create_u16(const char *name, mode_t mode,
 				  struct dentry *parent, u16 *value)
 {
+	/* if there are no write bits set, make read only */
+	if (!(mode & S_IWUGO))
+		return debugfs_create_file(name, mode, parent, value, &fops_u16_ro);
+	/* if there are no read bits set, make write only */
+	if (!(mode & S_IRUGO))
+		return debugfs_create_file(name, mode, parent, value, &fops_u16_wo);
+
 	return debugfs_create_file(name, mode, parent, value, &fops_u16);
 }
 EXPORT_SYMBOL_GPL(debugfs_create_u16);
@@ -153,6 +171,8 @@
 	return 0;
 }
 DEFINE_SIMPLE_ATTRIBUTE(fops_u32, debugfs_u32_get, debugfs_u32_set, "%llu\n");
+DEFINE_SIMPLE_ATTRIBUTE(fops_u32_ro, debugfs_u32_get, NULL, "%llu\n");
+DEFINE_SIMPLE_ATTRIBUTE(fops_u32_wo, NULL, debugfs_u32_set, "%llu\n");
 
 /**
  * debugfs_create_u32 - create a debugfs file that is used to read and write an unsigned 32-bit value
@@ -181,6 +201,13 @@
 struct dentry *debugfs_create_u32(const char *name, mode_t mode,
 				 struct dentry *parent, u32 *value)
 {
+	/* if there are no write bits set, make read only */
+	if (!(mode & S_IWUGO))
+		return debugfs_create_file(name, mode, parent, value, &fops_u32_ro);
+	/* if there are no read bits set, make write only */
+	if (!(mode & S_IRUGO))
+		return debugfs_create_file(name, mode, parent, value, &fops_u32_wo);
+
 	return debugfs_create_file(name, mode, parent, value, &fops_u32);
 }
 EXPORT_SYMBOL_GPL(debugfs_create_u32);
@@ -197,6 +224,8 @@
 	return 0;
 }
 DEFINE_SIMPLE_ATTRIBUTE(fops_u64, debugfs_u64_get, debugfs_u64_set, "%llu\n");
+DEFINE_SIMPLE_ATTRIBUTE(fops_u64_ro, debugfs_u64_get, NULL, "%llu\n");
+DEFINE_SIMPLE_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n");
 
 /**
  * debugfs_create_u64 - create a debugfs file that is used to read and write an unsigned 64-bit value
@@ -225,15 +254,28 @@
 struct dentry *debugfs_create_u64(const char *name, mode_t mode,
 				 struct dentry *parent, u64 *value)
 {
+	/* if there are no write bits set, make read only */
+	if (!(mode & S_IWUGO))
+		return debugfs_create_file(name, mode, parent, value, &fops_u64_ro);
+	/* if there are no read bits set, make write only */
+	if (!(mode & S_IRUGO))
+		return debugfs_create_file(name, mode, parent, value, &fops_u64_wo);
+
 	return debugfs_create_file(name, mode, parent, value, &fops_u64);
 }
 EXPORT_SYMBOL_GPL(debugfs_create_u64);
 
 DEFINE_SIMPLE_ATTRIBUTE(fops_x8, debugfs_u8_get, debugfs_u8_set, "0x%02llx\n");
+DEFINE_SIMPLE_ATTRIBUTE(fops_x8_ro, debugfs_u8_get, NULL, "0x%02llx\n");
+DEFINE_SIMPLE_ATTRIBUTE(fops_x8_wo, NULL, debugfs_u8_set, "0x%02llx\n");
 
 DEFINE_SIMPLE_ATTRIBUTE(fops_x16, debugfs_u16_get, debugfs_u16_set, "0x%04llx\n");
+DEFINE_SIMPLE_ATTRIBUTE(fops_x16_ro, debugfs_u16_get, NULL, "0x%04llx\n");
+DEFINE_SIMPLE_ATTRIBUTE(fops_x16_wo, NULL, debugfs_u16_set, "0x%04llx\n");
 
 DEFINE_SIMPLE_ATTRIBUTE(fops_x32, debugfs_u32_get, debugfs_u32_set, "0x%08llx\n");
+DEFINE_SIMPLE_ATTRIBUTE(fops_x32_ro, debugfs_u32_get, NULL, "0x%08llx\n");
+DEFINE_SIMPLE_ATTRIBUTE(fops_x32_wo, NULL, debugfs_u32_set, "0x%08llx\n");
 
 /*
  * debugfs_create_x{8,16,32} - create a debugfs file that is used to read and write an unsigned {8,16,32}-bit value
@@ -256,6 +298,13 @@
 struct dentry *debugfs_create_x8(const char *name, mode_t mode,
 				 struct dentry *parent, u8 *value)
 {
+	/* if there are no write bits set, make read only */
+	if (!(mode & S_IWUGO))
+		return debugfs_create_file(name, mode, parent, value, &fops_x8_ro);
+	/* if there are no read bits set, make write only */
+	if (!(mode & S_IRUGO))
+		return debugfs_create_file(name, mode, parent, value, &fops_x8_wo);
+
 	return debugfs_create_file(name, mode, parent, value, &fops_x8);
 }
 EXPORT_SYMBOL_GPL(debugfs_create_x8);
@@ -273,6 +322,13 @@
 struct dentry *debugfs_create_x16(const char *name, mode_t mode,
 				 struct dentry *parent, u16 *value)
 {
+	/* if there are no write bits set, make read only */
+	if (!(mode & S_IWUGO))
+		return debugfs_create_file(name, mode, parent, value, &fops_x16_ro);
+	/* if there are no read bits set, make write only */
+	if (!(mode & S_IRUGO))
+		return debugfs_create_file(name, mode, parent, value, &fops_x16_wo);
+
 	return debugfs_create_file(name, mode, parent, value, &fops_x16);
 }
 EXPORT_SYMBOL_GPL(debugfs_create_x16);
@@ -290,6 +346,13 @@
 struct dentry *debugfs_create_x32(const char *name, mode_t mode,
 				 struct dentry *parent, u32 *value)
 {
+	/* if there are no write bits set, make read only */
+	if (!(mode & S_IWUGO))
+		return debugfs_create_file(name, mode, parent, value, &fops_x32_ro);
+	/* if there are no read bits set, make write only */
+	if (!(mode & S_IRUGO))
+		return debugfs_create_file(name, mode, parent, value, &fops_x32_wo);
+
 	return debugfs_create_file(name, mode, parent, value, &fops_x32);
 }
 EXPORT_SYMBOL_GPL(debugfs_create_x32);
@@ -419,7 +482,7 @@
 };
 
 /**
- * debugfs_create_blob - create a debugfs file that is used to read and write a binary blob
+ * debugfs_create_blob - create a debugfs file that is used to read a binary blob
  * @name: a pointer to a string containing the name of the file to create.
  * @mode: the permission that the file should have
  * @parent: a pointer to the parent dentry for this file.  This should be a
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index 0662ba6..d22438e 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -403,6 +403,7 @@
 		}
 		child = list_entry(parent->d_subdirs.next, struct dentry,
 				d_u.d_child);
+ next_sibling:
 
 		/*
 		 * If "child" isn't empty, walk down the tree and
@@ -417,6 +418,16 @@
 		__debugfs_remove(child, parent);
 		if (parent->d_subdirs.next == &child->d_u.d_child) {
 			/*
+			 * Try the next sibling.
+			 */
+			if (child->d_u.d_child.next != &parent->d_subdirs) {
+				child = list_entry(child->d_u.d_child.next,
+						   struct dentry,
+						   d_u.d_child);
+				goto next_sibling;
+			}
+
+			/*
 			 * Avoid infinite loop if we fail to remove
 			 * one dentry.
 			 */
diff --git a/fs/fat/cache.c b/fs/fat/cache.c
index b426022..923990e 100644
--- a/fs/fat/cache.c
+++ b/fs/fat/cache.c
@@ -241,7 +241,7 @@
 	while (*fclus < cluster) {
 		/* prevent the infinite loop of cluster chain */
 		if (*fclus > limit) {
-			fat_fs_panic(sb, "%s: detected the cluster chain loop"
+			fat_fs_error(sb, "%s: detected the cluster chain loop"
 				     " (i_pos %lld)", __func__,
 				     MSDOS_I(inode)->i_pos);
 			nr = -EIO;
@@ -252,7 +252,7 @@
 		if (nr < 0)
 			goto out;
 		else if (nr == FAT_ENT_FREE) {
-			fat_fs_panic(sb, "%s: invalid cluster chain"
+			fat_fs_error(sb, "%s: invalid cluster chain"
 				     " (i_pos %lld)", __func__,
 				     MSDOS_I(inode)->i_pos);
 			nr = -EIO;
@@ -285,7 +285,7 @@
 	if (ret < 0)
 		return ret;
 	else if (ret == FAT_ENT_EOF) {
-		fat_fs_panic(sb, "%s: request beyond EOF (i_pos %lld)",
+		fat_fs_error(sb, "%s: request beyond EOF (i_pos %lld)",
 			     __func__, MSDOS_I(inode)->i_pos);
 		return -EIO;
 	}
diff --git a/fs/fat/dir.c b/fs/fat/dir.c
index f350029..38ff75a 100644
--- a/fs/fat/dir.c
+++ b/fs/fat/dir.c
@@ -22,6 +22,19 @@
 #include <asm/uaccess.h>
 #include "fat.h"
 
+/*
+ * Maximum buffer size of short name.
+ * [(MSDOS_NAME + '.') * max one char + nul]
+ * For msdos style, ['.' (hidden) + MSDOS_NAME + '.' + nul]
+ */
+#define FAT_MAX_SHORT_SIZE	((MSDOS_NAME + 1) * NLS_MAX_CHARSET_SIZE + 1)
+/*
+ * Maximum buffer size of unicode chars from slots.
+ * [(max longname slots * 13 (size in a slot) + nul) * sizeof(wchar_t)]
+ */
+#define FAT_MAX_UNI_CHARS	((MSDOS_SLOTS - 1) * 13 + 1)
+#define FAT_MAX_UNI_SIZE	(FAT_MAX_UNI_CHARS * sizeof(wchar_t))
+
 static inline loff_t fat_make_i_pos(struct super_block *sb,
 				    struct buffer_head *bh,
 				    struct msdos_dir_entry *de)
@@ -171,7 +184,8 @@
 				unsigned char *buf, int size)
 {
 	if (sbi->options.utf8)
-		return utf8_wcstombs(buf, uni, size);
+		return utf16s_to_utf8s(uni, FAT_MAX_UNI_CHARS,
+				UTF16_HOST_ENDIAN, buf, size);
 	else
 		return uni16_to_x8(buf, uni, size, sbi->options.unicode_xlate,
 				   sbi->nls_io);
@@ -325,19 +339,6 @@
 }
 
 /*
- * Maximum buffer size of short name.
- * [(MSDOS_NAME + '.') * max one char + nul]
- * For msdos style, ['.' (hidden) + MSDOS_NAME + '.' + nul]
- */
-#define FAT_MAX_SHORT_SIZE	((MSDOS_NAME + 1) * NLS_MAX_CHARSET_SIZE + 1)
-/*
- * Maximum buffer size of unicode chars from slots.
- * [(max longname slots * 13 (size in a slot) + nul) * sizeof(wchar_t)]
- */
-#define FAT_MAX_UNI_CHARS	((MSDOS_SLOTS - 1) * 13 + 1)
-#define FAT_MAX_UNI_SIZE	(FAT_MAX_UNI_CHARS * sizeof(wchar_t))
-
-/*
  * Return values: negative -> error, 0 -> not found, positive -> found,
  * value is the total amount of slots, including the shortname entry.
  */
@@ -1334,7 +1335,7 @@
 			goto error_remove;
 		}
 		if (dir->i_size & (sbi->cluster_size - 1)) {
-			fat_fs_panic(sb, "Odd directory size");
+			fat_fs_error(sb, "Odd directory size");
 			dir->i_size = (dir->i_size + sbi->cluster_size - 1)
 				& ~((loff_t)sbi->cluster_size - 1);
 		}
diff --git a/fs/fat/fat.h b/fs/fat/fat.h
index e4d8852..adb0e72 100644
--- a/fs/fat/fat.h
+++ b/fs/fat/fat.h
@@ -17,6 +17,10 @@
 #define VFAT_SFN_CREATE_WIN95	0x0100 /* emulate win95 rule for create */
 #define VFAT_SFN_CREATE_WINNT	0x0200 /* emulate winnt rule for create */
 
+#define FAT_ERRORS_CONT		1      /* ignore error and continue */
+#define FAT_ERRORS_PANIC	2      /* panic on error */
+#define FAT_ERRORS_RO		3      /* remount r/o on error */
+
 struct fat_mount_options {
 	uid_t fs_uid;
 	gid_t fs_gid;
@@ -26,6 +30,7 @@
 	char *iocharset;          /* Charset used for filename input/display */
 	unsigned short shortname; /* flags for shortname display/create rule */
 	unsigned char name_check; /* r = relaxed, n = normal, s = strict */
+	unsigned char errors;	  /* On error: continue, panic, remount-ro */
 	unsigned short allow_utime;/* permission for setting the [am]time */
 	unsigned quiet:1,         /* set = fake successful chmods and chowns */
 		 showexec:1,      /* set = only set x bit for com/exe/bat */
@@ -316,7 +321,7 @@
 extern int fat_flush_inodes(struct super_block *sb, struct inode *i1,
 		            struct inode *i2);
 /* fat/misc.c */
-extern void fat_fs_panic(struct super_block *s, const char *fmt, ...)
+extern void fat_fs_error(struct super_block *s, const char *fmt, ...)
 	__attribute__ ((format (printf, 2, 3))) __cold;
 extern void fat_clusters_flush(struct super_block *sb);
 extern int fat_chain_add(struct inode *inode, int new_dclus, int nr_cluster);
diff --git a/fs/fat/fatent.c b/fs/fat/fatent.c
index 618f530..a810377 100644
--- a/fs/fat/fatent.c
+++ b/fs/fat/fatent.c
@@ -348,7 +348,7 @@
 
 	if (entry < FAT_START_ENT || sbi->max_cluster <= entry) {
 		fatent_brelse(fatent);
-		fat_fs_panic(sb, "invalid access to FAT (entry 0x%08x)", entry);
+		fat_fs_error(sb, "invalid access to FAT (entry 0x%08x)", entry);
 		return -EIO;
 	}
 
@@ -560,7 +560,7 @@
 			err = cluster;
 			goto error;
 		} else if (cluster == FAT_ENT_FREE) {
-			fat_fs_panic(sb, "%s: deleting FAT entry beyond EOF",
+			fat_fs_error(sb, "%s: deleting FAT entry beyond EOF",
 				     __func__);
 			err = -EIO;
 			goto error;
diff --git a/fs/fat/file.c b/fs/fat/file.c
index e955a56..b28ea64 100644
--- a/fs/fat/file.c
+++ b/fs/fat/file.c
@@ -18,106 +18,112 @@
 #include <linux/security.h>
 #include "fat.h"
 
+static int fat_ioctl_get_attributes(struct inode *inode, u32 __user *user_attr)
+{
+	u32 attr;
+
+	mutex_lock(&inode->i_mutex);
+	attr = fat_make_attrs(inode);
+	mutex_unlock(&inode->i_mutex);
+
+	return put_user(attr, user_attr);
+}
+
+static int fat_ioctl_set_attributes(struct file *file, u32 __user *user_attr)
+{
+	struct inode *inode = file->f_path.dentry->d_inode;
+	struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
+	int is_dir = S_ISDIR(inode->i_mode);
+	u32 attr, oldattr;
+	struct iattr ia;
+	int err;
+
+	err = get_user(attr, user_attr);
+	if (err)
+		goto out;
+
+	mutex_lock(&inode->i_mutex);
+	err = mnt_want_write(file->f_path.mnt);
+	if (err)
+		goto out_unlock_inode;
+
+	/*
+	 * ATTR_VOLUME and ATTR_DIR cannot be changed; this also
+	 * prevents the user from turning us into a VFAT
+	 * longname entry.  Also, we obviously can't set
+	 * any of the NTFS attributes in the high 24 bits.
+	 */
+	attr &= 0xff & ~(ATTR_VOLUME | ATTR_DIR);
+	/* Merge in ATTR_VOLUME and ATTR_DIR */
+	attr |= (MSDOS_I(inode)->i_attrs & ATTR_VOLUME) |
+		(is_dir ? ATTR_DIR : 0);
+	oldattr = fat_make_attrs(inode);
+
+	/* Equivalent to a chmod() */
+	ia.ia_valid = ATTR_MODE | ATTR_CTIME;
+	ia.ia_ctime = current_fs_time(inode->i_sb);
+	if (is_dir)
+		ia.ia_mode = fat_make_mode(sbi, attr, S_IRWXUGO);
+	else {
+		ia.ia_mode = fat_make_mode(sbi, attr,
+			S_IRUGO | S_IWUGO | (inode->i_mode & S_IXUGO));
+	}
+
+	/* The root directory has no attributes */
+	if (inode->i_ino == MSDOS_ROOT_INO && attr != ATTR_DIR) {
+		err = -EINVAL;
+		goto out_drop_write;
+	}
+
+	if (sbi->options.sys_immutable &&
+	    ((attr | oldattr) & ATTR_SYS) &&
+	    !capable(CAP_LINUX_IMMUTABLE)) {
+		err = -EPERM;
+		goto out_drop_write;
+	}
+
+	/*
+	 * The security check is questionable...  We single
+	 * out the RO attribute for checking by the security
+	 * module, just because it maps to a file mode.
+	 */
+	err = security_inode_setattr(file->f_path.dentry, &ia);
+	if (err)
+		goto out_drop_write;
+
+	/* This MUST be done before doing anything irreversible... */
+	err = fat_setattr(file->f_path.dentry, &ia);
+	if (err)
+		goto out_drop_write;
+
+	fsnotify_change(file->f_path.dentry, ia.ia_valid);
+	if (sbi->options.sys_immutable) {
+		if (attr & ATTR_SYS)
+			inode->i_flags |= S_IMMUTABLE;
+		else
+			inode->i_flags &= S_IMMUTABLE;
+	}
+
+	fat_save_attrs(inode, attr);
+	mark_inode_dirty(inode);
+out_drop_write:
+	mnt_drop_write(file->f_path.mnt);
+out_unlock_inode:
+	mutex_unlock(&inode->i_mutex);
+out:
+	return err;
+}
+
 int fat_generic_ioctl(struct inode *inode, struct file *filp,
 		      unsigned int cmd, unsigned long arg)
 {
-	struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
 	u32 __user *user_attr = (u32 __user *)arg;
 
 	switch (cmd) {
 	case FAT_IOCTL_GET_ATTRIBUTES:
-	{
-		u32 attr;
-
-		mutex_lock(&inode->i_mutex);
-		attr = fat_make_attrs(inode);
-		mutex_unlock(&inode->i_mutex);
-
-		return put_user(attr, user_attr);
-	}
+		return fat_ioctl_get_attributes(inode, user_attr);
 	case FAT_IOCTL_SET_ATTRIBUTES:
-	{
-		u32 attr, oldattr;
-		int err, is_dir = S_ISDIR(inode->i_mode);
-		struct iattr ia;
-
-		err = get_user(attr, user_attr);
-		if (err)
-			return err;
-
-		mutex_lock(&inode->i_mutex);
-
-		err = mnt_want_write(filp->f_path.mnt);
-		if (err)
-			goto up_no_drop_write;
-
-		/*
-		 * ATTR_VOLUME and ATTR_DIR cannot be changed; this also
-		 * prevents the user from turning us into a VFAT
-		 * longname entry.  Also, we obviously can't set
-		 * any of the NTFS attributes in the high 24 bits.
-		 */
-		attr &= 0xff & ~(ATTR_VOLUME | ATTR_DIR);
-		/* Merge in ATTR_VOLUME and ATTR_DIR */
-		attr |= (MSDOS_I(inode)->i_attrs & ATTR_VOLUME) |
-			(is_dir ? ATTR_DIR : 0);
-		oldattr = fat_make_attrs(inode);
-
-		/* Equivalent to a chmod() */
-		ia.ia_valid = ATTR_MODE | ATTR_CTIME;
-		ia.ia_ctime = current_fs_time(inode->i_sb);
-		if (is_dir)
-			ia.ia_mode = fat_make_mode(sbi, attr, S_IRWXUGO);
-		else {
-			ia.ia_mode = fat_make_mode(sbi, attr,
-				S_IRUGO | S_IWUGO | (inode->i_mode & S_IXUGO));
-		}
-
-		/* The root directory has no attributes */
-		if (inode->i_ino == MSDOS_ROOT_INO && attr != ATTR_DIR) {
-			err = -EINVAL;
-			goto up;
-		}
-
-		if (sbi->options.sys_immutable) {
-			if ((attr | oldattr) & ATTR_SYS) {
-				if (!capable(CAP_LINUX_IMMUTABLE)) {
-					err = -EPERM;
-					goto up;
-				}
-			}
-		}
-
-		/*
-		 * The security check is questionable...  We single
-		 * out the RO attribute for checking by the security
-		 * module, just because it maps to a file mode.
-		 */
-		err = security_inode_setattr(filp->f_path.dentry, &ia);
-		if (err)
-			goto up;
-
-		/* This MUST be done before doing anything irreversible... */
-		err = fat_setattr(filp->f_path.dentry, &ia);
-		if (err)
-			goto up;
-
-		fsnotify_change(filp->f_path.dentry, ia.ia_valid);
-		if (sbi->options.sys_immutable) {
-			if (attr & ATTR_SYS)
-				inode->i_flags |= S_IMMUTABLE;
-			else
-				inode->i_flags &= S_IMMUTABLE;
-		}
-
-		fat_save_attrs(inode, attr);
-		mark_inode_dirty(inode);
-up:
-		mnt_drop_write(filp->f_path.mnt);
-up_no_drop_write:
-		mutex_unlock(&inode->i_mutex);
-		return err;
-	}
+		return fat_ioctl_set_attributes(filp, user_attr);
 	default:
 		return -ENOTTY;	/* Inappropriate ioctl for device */
 	}
@@ -225,7 +231,7 @@
 			fatent_brelse(&fatent);
 			return 0;
 		} else if (ret == FAT_ENT_FREE) {
-			fat_fs_panic(sb,
+			fat_fs_error(sb,
 				     "%s: invalid cluster chain (i_pos %lld)",
 				     __func__, MSDOS_I(inode)->i_pos);
 			ret = -EIO;
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index 51a5ecf..304b411 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -76,7 +76,7 @@
 		return 0;
 
 	if (iblock != MSDOS_I(inode)->mmu_private >> sb->s_blocksize_bits) {
-		fat_fs_panic(sb, "corrupted file size (i_pos %lld, %lld)",
+		fat_fs_error(sb, "corrupted file size (i_pos %lld, %lld)",
 			MSDOS_I(inode)->i_pos, MSDOS_I(inode)->mmu_private);
 		return -EIO;
 	}
@@ -856,6 +856,12 @@
 		seq_puts(m, ",flush");
 	if (opts->tz_utc)
 		seq_puts(m, ",tz=UTC");
+	if (opts->errors == FAT_ERRORS_CONT)
+		seq_puts(m, ",errors=continue");
+	else if (opts->errors == FAT_ERRORS_PANIC)
+		seq_puts(m, ",errors=panic");
+	else
+		seq_puts(m, ",errors=remount-ro");
 
 	return 0;
 }
@@ -868,7 +874,8 @@
 	Opt_charset, Opt_shortname_lower, Opt_shortname_win95,
 	Opt_shortname_winnt, Opt_shortname_mixed, Opt_utf8_no, Opt_utf8_yes,
 	Opt_uni_xl_no, Opt_uni_xl_yes, Opt_nonumtail_no, Opt_nonumtail_yes,
-	Opt_obsolate, Opt_flush, Opt_tz_utc, Opt_rodir, Opt_err,
+	Opt_obsolate, Opt_flush, Opt_tz_utc, Opt_rodir, Opt_err_cont,
+	Opt_err_panic, Opt_err_ro, Opt_err,
 };
 
 static const match_table_t fat_tokens = {
@@ -891,6 +898,11 @@
 	{Opt_showexec, "showexec"},
 	{Opt_debug, "debug"},
 	{Opt_immutable, "sys_immutable"},
+	{Opt_flush, "flush"},
+	{Opt_tz_utc, "tz=UTC"},
+	{Opt_err_cont, "errors=continue"},
+	{Opt_err_panic, "errors=panic"},
+	{Opt_err_ro, "errors=remount-ro"},
 	{Opt_obsolate, "conv=binary"},
 	{Opt_obsolate, "conv=text"},
 	{Opt_obsolate, "conv=auto"},
@@ -902,8 +914,6 @@
 	{Opt_obsolate, "cvf_format=%20s"},
 	{Opt_obsolate, "cvf_options=%100s"},
 	{Opt_obsolate, "posix"},
-	{Opt_flush, "flush"},
-	{Opt_tz_utc, "tz=UTC"},
 	{Opt_err, NULL},
 };
 static const match_table_t msdos_tokens = {
@@ -973,6 +983,7 @@
 	opts->numtail = 1;
 	opts->usefree = opts->nocase = 0;
 	opts->tz_utc = 0;
+	opts->errors = FAT_ERRORS_RO;
 	*debug = 0;
 
 	if (!options)
@@ -1065,6 +1076,15 @@
 		case Opt_tz_utc:
 			opts->tz_utc = 1;
 			break;
+		case Opt_err_cont:
+			opts->errors = FAT_ERRORS_CONT;
+			break;
+		case Opt_err_panic:
+			opts->errors = FAT_ERRORS_PANIC;
+			break;
+		case Opt_err_ro:
+			opts->errors = FAT_ERRORS_RO;
+			break;
 
 		/* msdos specific */
 		case Opt_dots:
diff --git a/fs/fat/misc.c b/fs/fat/misc.c
index ac39ebc..a6c2047 100644
--- a/fs/fat/misc.c
+++ b/fs/fat/misc.c
@@ -12,14 +12,19 @@
 #include "fat.h"
 
 /*
- * fat_fs_panic reports a severe file system problem and sets the file system
- * read-only. The file system can be made writable again by remounting it.
+ * fat_fs_error reports a file system problem that might indicate fa data
+ * corruption/inconsistency. Depending on 'errors' mount option the
+ * panic() is called, or error message is printed FAT and nothing is done,
+ * or filesystem is remounted read-only (default behavior).
+ * In case the file system is remounted read-only, it can be made writable
+ * again by remounting it.
  */
-void fat_fs_panic(struct super_block *s, const char *fmt, ...)
+void fat_fs_error(struct super_block *s, const char *fmt, ...)
 {
+	struct fat_mount_options *opts = &MSDOS_SB(s)->options;
 	va_list args;
 
-	printk(KERN_ERR "FAT: Filesystem panic (dev %s)\n", s->s_id);
+	printk(KERN_ERR "FAT: Filesystem error (dev %s)\n", s->s_id);
 
 	printk(KERN_ERR "    ");
 	va_start(args, fmt);
@@ -27,13 +32,14 @@
 	va_end(args);
 	printk("\n");
 
-	if (!(s->s_flags & MS_RDONLY)) {
+	if (opts->errors == FAT_ERRORS_PANIC)
+		panic("    FAT fs panic from previous error\n");
+	else if (opts->errors == FAT_ERRORS_RO && !(s->s_flags & MS_RDONLY)) {
 		s->s_flags |= MS_RDONLY;
 		printk(KERN_ERR "    File system has been set read-only\n");
 	}
 }
-
-EXPORT_SYMBOL_GPL(fat_fs_panic);
+EXPORT_SYMBOL_GPL(fat_fs_error);
 
 /* Flushes the number of free clusters on FAT32 */
 /* XXX: Need to write one per FSINFO block.  Currently only writes 1 */
@@ -124,7 +130,7 @@
 			mark_inode_dirty(inode);
 	}
 	if (new_fclus != (inode->i_blocks >> (sbi->cluster_bits - 9))) {
-		fat_fs_panic(sb, "clusters badly computed (%d != %llu)",
+		fat_fs_error(sb, "clusters badly computed (%d != %llu)",
 			     new_fclus,
 			     (llu)(inode->i_blocks >> (sbi->cluster_bits - 9)));
 		fat_cache_inval_inode(inode);
diff --git a/fs/fat/namei_msdos.c b/fs/fat/namei_msdos.c
index 20f5228..82f88733 100644
--- a/fs/fat/namei_msdos.c
+++ b/fs/fat/namei_msdos.c
@@ -608,7 +608,7 @@
 		sinfo.bh = NULL;
 	}
 	if (corrupt < 0) {
-		fat_fs_panic(new_dir->i_sb,
+		fat_fs_error(new_dir->i_sb,
 			     "%s: Filesystem corrupted (i_pos %lld)",
 			     __func__, sinfo.i_pos);
 	}
diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c
index b50ecbe..73471b7 100644
--- a/fs/fat/namei_vfat.c
+++ b/fs/fat/namei_vfat.c
@@ -502,11 +502,11 @@
 	if (utf8) {
 		int name_len = strlen(name);
 
-		*outlen = utf8_mbstowcs((wchar_t *)outname, name, PATH_MAX);
+		*outlen = utf8s_to_utf16s(name, PATH_MAX, (wchar_t *) outname);
 
 		/*
 		 * We stripped '.'s before and set len appropriately,
-		 * but utf8_mbstowcs doesn't care about len
+		 * but utf8s_to_utf16s doesn't care about len
 		 */
 		*outlen -= (name_len - len);
 
@@ -1030,7 +1030,7 @@
 		sinfo.bh = NULL;
 	}
 	if (corrupt < 0) {
-		fat_fs_panic(new_dir->i_sb,
+		fat_fs_error(new_dir->i_sb,
 			     "%s: Filesystem corrupted (i_pos %lld)",
 			     __func__, sinfo.i_pos);
 	}
diff --git a/fs/fcntl.c b/fs/fcntl.c
index 1ad7031..a040b76 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -198,15 +198,19 @@
 }
 
 static void f_modown(struct file *filp, struct pid *pid, enum pid_type type,
-                     uid_t uid, uid_t euid, int force)
+                     int force)
 {
 	write_lock_irq(&filp->f_owner.lock);
 	if (force || !filp->f_owner.pid) {
 		put_pid(filp->f_owner.pid);
 		filp->f_owner.pid = get_pid(pid);
 		filp->f_owner.pid_type = type;
-		filp->f_owner.uid = uid;
-		filp->f_owner.euid = euid;
+
+		if (pid) {
+			const struct cred *cred = current_cred();
+			filp->f_owner.uid = cred->uid;
+			filp->f_owner.euid = cred->euid;
+		}
 	}
 	write_unlock_irq(&filp->f_owner.lock);
 }
@@ -214,14 +218,13 @@
 int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
 		int force)
 {
-	const struct cred *cred = current_cred();
 	int err;
-	
+
 	err = security_file_set_fowner(filp);
 	if (err)
 		return err;
 
-	f_modown(filp, pid, type, cred->uid, cred->euid, force);
+	f_modown(filp, pid, type, force);
 	return 0;
 }
 EXPORT_SYMBOL(__f_setown);
@@ -247,7 +250,7 @@
 
 void f_delown(struct file *filp)
 {
-	f_modown(filp, NULL, PIDTYPE_PID, 0, 0, 1);
+	f_modown(filp, NULL, PIDTYPE_PID, 1);
 }
 
 pid_t f_getown(struct file *filp)
@@ -425,14 +428,20 @@
 }
 
 static void send_sigio_to_task(struct task_struct *p,
-			       struct fown_struct *fown, 
+			       struct fown_struct *fown,
 			       int fd,
 			       int reason)
 {
-	if (!sigio_perm(p, fown, fown->signum))
+	/*
+	 * F_SETSIG can change ->signum lockless in parallel, make
+	 * sure we read it once and use the same value throughout.
+	 */
+	int signum = ACCESS_ONCE(fown->signum);
+
+	if (!sigio_perm(p, fown, signum))
 		return;
 
-	switch (fown->signum) {
+	switch (signum) {
 		siginfo_t si;
 		default:
 			/* Queue a rt signal with the appropriate fd as its
@@ -441,7 +450,7 @@
 			   delivered even if we can't queue.  Failure to
 			   queue in this case _should_ be reported; we fall
 			   back to SIGIO in that case. --sct */
-			si.si_signo = fown->signum;
+			si.si_signo = signum;
 			si.si_errno = 0;
 		        si.si_code  = reason;
 			/* Make sure we are called with one of the POLL_*
@@ -453,7 +462,7 @@
 			else
 				si.si_band = band_table[reason - POLL_IN];
 			si.si_fd    = fd;
-			if (!group_send_sig_info(fown->signum, &si, p))
+			if (!group_send_sig_info(signum, &si, p))
 				break;
 		/* fall-through: fall back on the old plain SIGIO signal */
 		case 0:
diff --git a/fs/isofs/joliet.c b/fs/isofs/joliet.c
index 92c14b8..a048de8 100644
--- a/fs/isofs/joliet.c
+++ b/fs/isofs/joliet.c
@@ -37,37 +37,6 @@
 	return (op - ascii);
 }
 
-/* Convert big endian wide character string to utf8 */
-static int
-wcsntombs_be(__u8 *s, const __u8 *pwcs, int inlen, int maxlen)
-{
-	const __u8 *ip;
-	__u8 *op;
-	int size;
-	__u16 c;
-
-	op = s;
-	ip = pwcs;
-	while ((*ip || ip[1]) && (maxlen > 0) && (inlen > 0)) {
-		c = (*ip << 8) | ip[1];
-		if (c > 0x7f) {
-			size = utf8_wctomb(op, c, maxlen);
-			if (size == -1) {
-				/* Ignore character and move on */
-				maxlen--;
-			} else {
-				op += size;
-				maxlen -= size;
-			}
-		} else {
-			*op++ = (__u8) c;
-		}
-		ip += 2;
-		inlen--;
-	}
-	return (op - s);
-}
-
 int
 get_joliet_filename(struct iso_directory_record * de, unsigned char *outname, struct inode * inode)
 {
@@ -79,8 +48,9 @@
 	nls = ISOFS_SB(inode->i_sb)->s_nls_iocharset;
 
 	if (utf8) {
-		len = wcsntombs_be(outname, de->name,
-				de->name_len[0] >> 1, PAGE_SIZE);
+		len = utf16s_to_utf8s((const wchar_t *) de->name,
+				de->name_len[0] >> 1, UTF16_BIG_ENDIAN,
+				outname, PAGE_SIZE);
 	} else {
 		len = uni16_to_x8(outname, (__be16 *) de->name,
 				de->name_len[0] >> 1, nls);
diff --git a/fs/jfs/jfs_extent.c b/fs/jfs/jfs_extent.c
index bbbd5f2..41d6045 100644
--- a/fs/jfs/jfs_extent.c
+++ b/fs/jfs/jfs_extent.c
@@ -391,6 +391,7 @@
 		}
 		XADaddress(xp, xaddr);
 		XADlength(xp, xlen);
+		XADoffset(xp, prev);
 		/*
 		 * only preserve the abnr flag within the xad flags
 		 * of the returned hint.
diff --git a/fs/ncpfs/ncplib_kernel.c b/fs/ncpfs/ncplib_kernel.c
index 97645f1..0ec6237 100644
--- a/fs/ncpfs/ncplib_kernel.c
+++ b/fs/ncpfs/ncplib_kernel.c
@@ -1113,11 +1113,13 @@
 
 		if (NCP_IS_FLAG(server, NCP_FLAG_UTF8)) {
 			int k;
+			unicode_t u;
 
-			k = utf8_mbtowc(&ec, iname, iname_end - iname);
-			if (k < 0)
+			k = utf8_to_utf32(iname, iname_end - iname, &u);
+			if (k < 0 || u > MAX_WCHAR_T)
 				return -EINVAL;
 			iname += k;
+			ec = u;
 		} else {
 			if (*iname == NCP_ESC) {
 				int k;
@@ -1214,7 +1216,7 @@
 		if (NCP_IS_FLAG(server, NCP_FLAG_UTF8)) {
 			int k;
 
-			k = utf8_wctomb(iname, ec, iname_end - iname);
+			k = utf32_to_utf8(ec, iname, iname_end - iname);
 			if (k < 0) {
 				err = -ENAMETOOLONG;
 				goto quit;
diff --git a/fs/nls/nls_base.c b/fs/nls/nls_base.c
index 9b0efda..477d37d 100644
--- a/fs/nls/nls_base.c
+++ b/fs/nls/nls_base.c
@@ -15,6 +15,7 @@
 #include <linux/errno.h>
 #include <linux/kmod.h>
 #include <linux/spinlock.h>
+#include <asm/byteorder.h>
 
 static struct nls_table default_table;
 static struct nls_table *tables = &default_table;
@@ -43,10 +44,17 @@
     {0,						       /* end of table    */}
 };
 
-int
-utf8_mbtowc(wchar_t *p, const __u8 *s, int n)
+#define UNICODE_MAX	0x0010ffff
+#define PLANE_SIZE	0x00010000
+
+#define SURROGATE_MASK	0xfffff800
+#define SURROGATE_PAIR	0x0000d800
+#define SURROGATE_LOW	0x00000400
+#define SURROGATE_BITS	0x000003ff
+
+int utf8_to_utf32(const u8 *s, int len, unicode_t *pu)
 {
-	long l;
+	unsigned long l;
 	int c0, c, nc;
 	const struct utf8_table *t;
   
@@ -57,12 +65,13 @@
 		nc++;
 		if ((c0 & t->cmask) == t->cval) {
 			l &= t->lmask;
-			if (l < t->lval)
+			if (l < t->lval || l > UNICODE_MAX ||
+					(l & SURROGATE_MASK) == SURROGATE_PAIR)
 				return -1;
-			*p = l;
+			*pu = (unicode_t) l;
 			return nc;
 		}
-		if (n <= nc)
+		if (len <= nc)
 			return -1;
 		s++;
 		c = (*s ^ 0x80) & 0xFF;
@@ -72,90 +81,133 @@
 	}
 	return -1;
 }
+EXPORT_SYMBOL(utf8_to_utf32);
 
-int
-utf8_mbstowcs(wchar_t *pwcs, const __u8 *s, int n)
+int utf32_to_utf8(unicode_t u, u8 *s, int maxlen)
 {
-	__u16 *op;
-	const __u8 *ip;
-	int size;
-
-	op = pwcs;
-	ip = s;
-	while (*ip && n > 0) {
-		if (*ip & 0x80) {
-			size = utf8_mbtowc(op, ip, n);
-			if (size == -1) {
-				/* Ignore character and move on */
-				ip++;
-				n--;
-			} else {
-				op++;
-				ip += size;
-				n -= size;
-			}
-		} else {
-			*op++ = *ip++;
-			n--;
-		}
-	}
-	return (op - pwcs);
-}
-
-int
-utf8_wctomb(__u8 *s, wchar_t wc, int maxlen)
-{
-	long l;
+	unsigned long l;
 	int c, nc;
 	const struct utf8_table *t;
-  
+
 	if (!s)
 		return 0;
-  
-	l = wc;
+
+	l = u;
+	if (l > UNICODE_MAX || (l & SURROGATE_MASK) == SURROGATE_PAIR)
+		return -1;
+
 	nc = 0;
 	for (t = utf8_table; t->cmask && maxlen; t++, maxlen--) {
 		nc++;
 		if (l <= t->lmask) {
 			c = t->shift;
-			*s = t->cval | (l >> c);
+			*s = (u8) (t->cval | (l >> c));
 			while (c > 0) {
 				c -= 6;
 				s++;
-				*s = 0x80 | ((l >> c) & 0x3F);
+				*s = (u8) (0x80 | ((l >> c) & 0x3F));
 			}
 			return nc;
 		}
 	}
 	return -1;
 }
+EXPORT_SYMBOL(utf32_to_utf8);
 
-int
-utf8_wcstombs(__u8 *s, const wchar_t *pwcs, int maxlen)
+int utf8s_to_utf16s(const u8 *s, int len, wchar_t *pwcs)
 {
-	const __u16 *ip;
-	__u8 *op;
+	u16 *op;
 	int size;
+	unicode_t u;
+
+	op = pwcs;
+	while (*s && len > 0) {
+		if (*s & 0x80) {
+			size = utf8_to_utf32(s, len, &u);
+			if (size < 0) {
+				/* Ignore character and move on */
+				size = 1;
+			} else if (u >= PLANE_SIZE) {
+				u -= PLANE_SIZE;
+				*op++ = (wchar_t) (SURROGATE_PAIR |
+						((u >> 10) & SURROGATE_BITS));
+				*op++ = (wchar_t) (SURROGATE_PAIR |
+						SURROGATE_LOW |
+						(u & SURROGATE_BITS));
+			} else {
+				*op++ = (wchar_t) u;
+			}
+			s += size;
+			len -= size;
+		} else {
+			*op++ = *s++;
+			len--;
+		}
+	}
+	return op - pwcs;
+}
+EXPORT_SYMBOL(utf8s_to_utf16s);
+
+static inline unsigned long get_utf16(unsigned c, enum utf16_endian endian)
+{
+	switch (endian) {
+	default:
+		return c;
+	case UTF16_LITTLE_ENDIAN:
+		return __le16_to_cpu(c);
+	case UTF16_BIG_ENDIAN:
+		return __be16_to_cpu(c);
+	}
+}
+
+int utf16s_to_utf8s(const wchar_t *pwcs, int len, enum utf16_endian endian,
+		u8 *s, int maxlen)
+{
+	u8 *op;
+	int size;
+	unsigned long u, v;
 
 	op = s;
-	ip = pwcs;
-	while (*ip && maxlen > 0) {
-		if (*ip > 0x7f) {
-			size = utf8_wctomb(op, *ip, maxlen);
+	while (len > 0 && maxlen > 0) {
+		u = get_utf16(*pwcs, endian);
+		if (!u)
+			break;
+		pwcs++;
+		len--;
+		if (u > 0x7f) {
+			if ((u & SURROGATE_MASK) == SURROGATE_PAIR) {
+				if (u & SURROGATE_LOW) {
+					/* Ignore character and move on */
+					continue;
+				}
+				if (len <= 0)
+					break;
+				v = get_utf16(*pwcs, endian);
+				if ((v & SURROGATE_MASK) != SURROGATE_PAIR ||
+						!(v & SURROGATE_LOW)) {
+					/* Ignore character and move on */
+					continue;
+				}
+				u = PLANE_SIZE + ((u & SURROGATE_BITS) << 10)
+						+ (v & SURROGATE_BITS);
+				pwcs++;
+				len--;
+			}
+			size = utf32_to_utf8(u, op, maxlen);
 			if (size == -1) {
 				/* Ignore character and move on */
-				maxlen--;
 			} else {
 				op += size;
 				maxlen -= size;
 			}
 		} else {
-			*op++ = (__u8) *ip;
+			*op++ = (u8) u;
+			maxlen--;
 		}
-		ip++;
 	}
-	return (op - s);
+	return op - s;
 }
+EXPORT_SYMBOL(utf16s_to_utf8s);
 
 int register_nls(struct nls_table * nls)
 {
@@ -467,9 +519,5 @@
 EXPORT_SYMBOL(unload_nls);
 EXPORT_SYMBOL(load_nls);
 EXPORT_SYMBOL(load_nls_default);
-EXPORT_SYMBOL(utf8_mbtowc);
-EXPORT_SYMBOL(utf8_mbstowcs);
-EXPORT_SYMBOL(utf8_wctomb);
-EXPORT_SYMBOL(utf8_wcstombs);
 
 MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/nls_utf8.c b/fs/nls/nls_utf8.c
index aa2c42f..0d60a44 100644
--- a/fs/nls/nls_utf8.c
+++ b/fs/nls/nls_utf8.c
@@ -15,7 +15,11 @@
 {
 	int n;
 
-	if ( (n = utf8_wctomb(out, uni, boundlen)) == -1) {
+	if (boundlen <= 0)
+		return -ENAMETOOLONG;
+
+	n = utf32_to_utf8(uni, out, boundlen);
+	if (n < 0) {
 		*out = '?';
 		return -EINVAL;
 	}
@@ -25,11 +29,14 @@
 static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
 {
 	int n;
+	unicode_t u;
 
-	if ( (n = utf8_mbtowc(uni, rawstring, boundlen)) == -1) {
+	n = utf8_to_utf32(rawstring, boundlen, &u);
+	if (n < 0 || u > MAX_WCHAR_T) {
 		*uni = 0x003f;	/* ? */
-		n = -EINVAL;
+		return -EINVAL;
 	}
+	*uni = (wchar_t) u;
 	return n;
 }
 
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index 678a067..9edcde4 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -475,6 +475,12 @@
 #define path_leaf_el(_path) ((_path)->p_node[(_path)->p_tree_depth].el)
 #define path_num_items(_path) ((_path)->p_tree_depth + 1)
 
+static int ocfs2_find_path(struct inode *inode, struct ocfs2_path *path,
+			   u32 cpos);
+static void ocfs2_adjust_rightmost_records(struct inode *inode,
+					   handle_t *handle,
+					   struct ocfs2_path *path,
+					   struct ocfs2_extent_rec *insert_rec);
 /*
  * Reset the actual path elements so that we can re-use the structure
  * to build another path. Generally, this involves freeing the buffer
@@ -1013,6 +1019,54 @@
 }
 
 /*
+ * Change range of the branches in the right most path according to the leaf
+ * extent block's rightmost record.
+ */
+static int ocfs2_adjust_rightmost_branch(handle_t *handle,
+					 struct inode *inode,
+					 struct ocfs2_extent_tree *et)
+{
+	int status;
+	struct ocfs2_path *path = NULL;
+	struct ocfs2_extent_list *el;
+	struct ocfs2_extent_rec *rec;
+
+	path = ocfs2_new_path_from_et(et);
+	if (!path) {
+		status = -ENOMEM;
+		return status;
+	}
+
+	status = ocfs2_find_path(inode, path, UINT_MAX);
+	if (status < 0) {
+		mlog_errno(status);
+		goto out;
+	}
+
+	status = ocfs2_extend_trans(handle, path_num_items(path) +
+				    handle->h_buffer_credits);
+	if (status < 0) {
+		mlog_errno(status);
+		goto out;
+	}
+
+	status = ocfs2_journal_access_path(inode, handle, path);
+	if (status < 0) {
+		mlog_errno(status);
+		goto out;
+	}
+
+	el = path_leaf_el(path);
+	rec = &el->l_recs[le32_to_cpu(el->l_next_free_rec) - 1];
+
+	ocfs2_adjust_rightmost_records(inode, handle, path, rec);
+
+out:
+	ocfs2_free_path(path);
+	return status;
+}
+
+/*
  * Add an entire tree branch to our inode. eb_bh is the extent block
  * to start at, if we don't want to start the branch at the dinode
  * structure.
@@ -1038,7 +1092,7 @@
 	struct ocfs2_extent_block *eb;
 	struct ocfs2_extent_list  *eb_el;
 	struct ocfs2_extent_list  *el;
-	u32 new_cpos;
+	u32 new_cpos, root_end;
 
 	mlog_entry_void();
 
@@ -1055,6 +1109,27 @@
 
 	new_blocks = le16_to_cpu(el->l_tree_depth);
 
+	eb = (struct ocfs2_extent_block *)(*last_eb_bh)->b_data;
+	new_cpos = ocfs2_sum_rightmost_rec(&eb->h_list);
+	root_end = ocfs2_sum_rightmost_rec(et->et_root_el);
+
+	/*
+	 * If there is a gap before the root end and the real end
+	 * of the righmost leaf block, we need to remove the gap
+	 * between new_cpos and root_end first so that the tree
+	 * is consistent after we add a new branch(it will start
+	 * from new_cpos).
+	 */
+	if (root_end > new_cpos) {
+		mlog(0, "adjust the cluster end from %u to %u\n",
+		     root_end, new_cpos);
+		status = ocfs2_adjust_rightmost_branch(handle, inode, et);
+		if (status) {
+			mlog_errno(status);
+			goto bail;
+		}
+	}
+
 	/* allocate the number of new eb blocks we need */
 	new_eb_bhs = kcalloc(new_blocks, sizeof(struct buffer_head *),
 			     GFP_KERNEL);
@@ -1071,9 +1146,6 @@
 		goto bail;
 	}
 
-	eb = (struct ocfs2_extent_block *)(*last_eb_bh)->b_data;
-	new_cpos = ocfs2_sum_rightmost_rec(&eb->h_list);
-
 	/* Note: new_eb_bhs[new_blocks - 1] is the guy which will be
 	 * linked with the rest of the tree.
 	 * conversly, new_eb_bhs[0] is the new bottommost leaf.
diff --git a/fs/ocfs2/blockcheck.c b/fs/ocfs2/blockcheck.c
index 2a947c4..a1163b8 100644
--- a/fs/ocfs2/blockcheck.c
+++ b/fs/ocfs2/blockcheck.c
@@ -22,6 +22,9 @@
 #include <linux/crc32.h>
 #include <linux/buffer_head.h>
 #include <linux/bitops.h>
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/fs.h>
 #include <asm/byteorder.h>
 
 #include <cluster/masklog.h>
@@ -222,6 +225,155 @@
 	ocfs2_hamming_fix(data, blocksize * 8, 0, fix);
 }
 
+
+/*
+ * Debugfs handling.
+ */
+
+#ifdef CONFIG_DEBUG_FS
+
+static int blockcheck_u64_get(void *data, u64 *val)
+{
+	*val = *(u64 *)data;
+	return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(blockcheck_fops, blockcheck_u64_get, NULL, "%llu\n");
+
+static struct dentry *blockcheck_debugfs_create(const char *name,
+						struct dentry *parent,
+						u64 *value)
+{
+	return debugfs_create_file(name, S_IFREG | S_IRUSR, parent, value,
+				   &blockcheck_fops);
+}
+
+static void ocfs2_blockcheck_debug_remove(struct ocfs2_blockcheck_stats *stats)
+{
+	if (stats) {
+		debugfs_remove(stats->b_debug_check);
+		stats->b_debug_check = NULL;
+		debugfs_remove(stats->b_debug_failure);
+		stats->b_debug_failure = NULL;
+		debugfs_remove(stats->b_debug_recover);
+		stats->b_debug_recover = NULL;
+		debugfs_remove(stats->b_debug_dir);
+		stats->b_debug_dir = NULL;
+	}
+}
+
+static int ocfs2_blockcheck_debug_install(struct ocfs2_blockcheck_stats *stats,
+					  struct dentry *parent)
+{
+	int rc = -EINVAL;
+
+	if (!stats)
+		goto out;
+
+	stats->b_debug_dir = debugfs_create_dir("blockcheck", parent);
+	if (!stats->b_debug_dir)
+		goto out;
+
+	stats->b_debug_check =
+		blockcheck_debugfs_create("blocks_checked",
+					  stats->b_debug_dir,
+					  &stats->b_check_count);
+
+	stats->b_debug_failure =
+		blockcheck_debugfs_create("checksums_failed",
+					  stats->b_debug_dir,
+					  &stats->b_failure_count);
+
+	stats->b_debug_recover =
+		blockcheck_debugfs_create("ecc_recoveries",
+					  stats->b_debug_dir,
+					  &stats->b_recover_count);
+	if (stats->b_debug_check && stats->b_debug_failure &&
+	    stats->b_debug_recover)
+		rc = 0;
+
+out:
+	if (rc)
+		ocfs2_blockcheck_debug_remove(stats);
+	return rc;
+}
+#else
+static inline int ocfs2_blockcheck_debug_install(struct ocfs2_blockcheck_stats *stats,
+						 struct dentry *parent)
+{
+	return 0;
+}
+
+static inline void ocfs2_blockcheck_debug_remove(struct ocfs2_blockcheck_stats *stats)
+{
+}
+#endif  /* CONFIG_DEBUG_FS */
+
+/* Always-called wrappers for starting and stopping the debugfs files */
+int ocfs2_blockcheck_stats_debugfs_install(struct ocfs2_blockcheck_stats *stats,
+					   struct dentry *parent)
+{
+	return ocfs2_blockcheck_debug_install(stats, parent);
+}
+
+void ocfs2_blockcheck_stats_debugfs_remove(struct ocfs2_blockcheck_stats *stats)
+{
+	ocfs2_blockcheck_debug_remove(stats);
+}
+
+static void ocfs2_blockcheck_inc_check(struct ocfs2_blockcheck_stats *stats)
+{
+	u64 new_count;
+
+	if (!stats)
+		return;
+
+	spin_lock(&stats->b_lock);
+	stats->b_check_count++;
+	new_count = stats->b_check_count;
+	spin_unlock(&stats->b_lock);
+
+	if (!new_count)
+		mlog(ML_NOTICE, "Block check count has wrapped\n");
+}
+
+static void ocfs2_blockcheck_inc_failure(struct ocfs2_blockcheck_stats *stats)
+{
+	u64 new_count;
+
+	if (!stats)
+		return;
+
+	spin_lock(&stats->b_lock);
+	stats->b_failure_count++;
+	new_count = stats->b_failure_count;
+	spin_unlock(&stats->b_lock);
+
+	if (!new_count)
+		mlog(ML_NOTICE, "Checksum failure count has wrapped\n");
+}
+
+static void ocfs2_blockcheck_inc_recover(struct ocfs2_blockcheck_stats *stats)
+{
+	u64 new_count;
+
+	if (!stats)
+		return;
+
+	spin_lock(&stats->b_lock);
+	stats->b_recover_count++;
+	new_count = stats->b_recover_count;
+	spin_unlock(&stats->b_lock);
+
+	if (!new_count)
+		mlog(ML_NOTICE, "ECC recovery count has wrapped\n");
+}
+
+
+
+/*
+ * These are the low-level APIs for using the ocfs2_block_check structure.
+ */
+
 /*
  * This function generates check information for a block.
  * data is the block to be checked.  bc is a pointer to the
@@ -266,12 +418,15 @@
  * Again, the data passed in should be the on-disk endian.
  */
 int ocfs2_block_check_validate(void *data, size_t blocksize,
-			       struct ocfs2_block_check *bc)
+			       struct ocfs2_block_check *bc,
+			       struct ocfs2_blockcheck_stats *stats)
 {
 	int rc = 0;
 	struct ocfs2_block_check check;
 	u32 crc, ecc;
 
+	ocfs2_blockcheck_inc_check(stats);
+
 	check.bc_crc32e = le32_to_cpu(bc->bc_crc32e);
 	check.bc_ecc = le16_to_cpu(bc->bc_ecc);
 
@@ -282,6 +437,7 @@
 	if (crc == check.bc_crc32e)
 		goto out;
 
+	ocfs2_blockcheck_inc_failure(stats);
 	mlog(ML_ERROR,
 	     "CRC32 failed: stored: %u, computed %u.  Applying ECC.\n",
 	     (unsigned int)check.bc_crc32e, (unsigned int)crc);
@@ -292,8 +448,10 @@
 
 	/* And check the crc32 again */
 	crc = crc32_le(~0, data, blocksize);
-	if (crc == check.bc_crc32e)
+	if (crc == check.bc_crc32e) {
+		ocfs2_blockcheck_inc_recover(stats);
 		goto out;
+	}
 
 	mlog(ML_ERROR, "Fixed CRC32 failed: stored: %u, computed %u\n",
 	     (unsigned int)check.bc_crc32e, (unsigned int)crc);
@@ -366,7 +524,8 @@
  * Again, the data passed in should be the on-disk endian.
  */
 int ocfs2_block_check_validate_bhs(struct buffer_head **bhs, int nr,
-				   struct ocfs2_block_check *bc)
+				   struct ocfs2_block_check *bc,
+				   struct ocfs2_blockcheck_stats *stats)
 {
 	int i, rc = 0;
 	struct ocfs2_block_check check;
@@ -377,6 +536,8 @@
 	if (!nr)
 		return 0;
 
+	ocfs2_blockcheck_inc_check(stats);
+
 	check.bc_crc32e = le32_to_cpu(bc->bc_crc32e);
 	check.bc_ecc = le16_to_cpu(bc->bc_ecc);
 
@@ -388,6 +549,7 @@
 	if (crc == check.bc_crc32e)
 		goto out;
 
+	ocfs2_blockcheck_inc_failure(stats);
 	mlog(ML_ERROR,
 	     "CRC32 failed: stored: %u, computed %u.  Applying ECC.\n",
 	     (unsigned int)check.bc_crc32e, (unsigned int)crc);
@@ -416,8 +578,10 @@
 	/* And check the crc32 again */
 	for (i = 0, crc = ~0; i < nr; i++)
 		crc = crc32_le(crc, bhs[i]->b_data, bhs[i]->b_size);
-	if (crc == check.bc_crc32e)
+	if (crc == check.bc_crc32e) {
+		ocfs2_blockcheck_inc_recover(stats);
 		goto out;
+	}
 
 	mlog(ML_ERROR, "Fixed CRC32 failed: stored: %u, computed %u\n",
 	     (unsigned int)check.bc_crc32e, (unsigned int)crc);
@@ -448,9 +612,11 @@
 			    struct ocfs2_block_check *bc)
 {
 	int rc = 0;
+	struct ocfs2_super *osb = OCFS2_SB(sb);
 
-	if (ocfs2_meta_ecc(OCFS2_SB(sb)))
-		rc = ocfs2_block_check_validate(data, sb->s_blocksize, bc);
+	if (ocfs2_meta_ecc(osb))
+		rc = ocfs2_block_check_validate(data, sb->s_blocksize, bc,
+						&osb->osb_ecc_stats);
 
 	return rc;
 }
@@ -468,9 +634,11 @@
 				struct ocfs2_block_check *bc)
 {
 	int rc = 0;
+	struct ocfs2_super *osb = OCFS2_SB(sb);
 
-	if (ocfs2_meta_ecc(OCFS2_SB(sb)))
-		rc = ocfs2_block_check_validate_bhs(bhs, nr, bc);
+	if (ocfs2_meta_ecc(osb))
+		rc = ocfs2_block_check_validate_bhs(bhs, nr, bc,
+						    &osb->osb_ecc_stats);
 
 	return rc;
 }
diff --git a/fs/ocfs2/blockcheck.h b/fs/ocfs2/blockcheck.h
index 70ec3fe..d4b69fe 100644
--- a/fs/ocfs2/blockcheck.h
+++ b/fs/ocfs2/blockcheck.h
@@ -21,6 +21,24 @@
 #define OCFS2_BLOCKCHECK_H
 
 
+/* Count errors and error correction from blockcheck.c */
+struct ocfs2_blockcheck_stats {
+	spinlock_t b_lock;
+	u64 b_check_count;	/* Number of blocks we've checked */
+	u64 b_failure_count;	/* Number of failed checksums */
+	u64 b_recover_count;	/* Number of blocks fixed by ecc */
+
+	/*
+	 * debugfs entries, used if this is passed to
+	 * ocfs2_blockcheck_stats_debugfs_install()
+	 */
+	struct dentry *b_debug_dir;	/* Parent of the debugfs  files */
+	struct dentry *b_debug_check;	/* Exposes b_check_count */
+	struct dentry *b_debug_failure;	/* Exposes b_failure_count */
+	struct dentry *b_debug_recover;	/* Exposes b_recover_count */
+};
+
+
 /* High level block API */
 void ocfs2_compute_meta_ecc(struct super_block *sb, void *data,
 			    struct ocfs2_block_check *bc);
@@ -37,11 +55,18 @@
 void ocfs2_block_check_compute(void *data, size_t blocksize,
 			       struct ocfs2_block_check *bc);
 int ocfs2_block_check_validate(void *data, size_t blocksize,
-			       struct ocfs2_block_check *bc);
+			       struct ocfs2_block_check *bc,
+			       struct ocfs2_blockcheck_stats *stats);
 void ocfs2_block_check_compute_bhs(struct buffer_head **bhs, int nr,
 				   struct ocfs2_block_check *bc);
 int ocfs2_block_check_validate_bhs(struct buffer_head **bhs, int nr,
-				   struct ocfs2_block_check *bc);
+				   struct ocfs2_block_check *bc,
+				   struct ocfs2_blockcheck_stats *stats);
+
+/* Debug Initialization */
+int ocfs2_blockcheck_stats_debugfs_install(struct ocfs2_blockcheck_stats *stats,
+					   struct dentry *parent);
+void ocfs2_blockcheck_stats_debugfs_remove(struct ocfs2_blockcheck_stats *stats);
 
 /*
  * Hamming code functions
diff --git a/fs/ocfs2/cluster/masklog.h b/fs/ocfs2/cluster/masklog.h
index 7e72a81..696c32e 100644
--- a/fs/ocfs2/cluster/masklog.h
+++ b/fs/ocfs2/cluster/masklog.h
@@ -48,34 +48,33 @@
  * only emit the appropriage printk() when the caller passes in a constant
  * mask, as is almost always the case.
  *
- * All this bitmask nonsense is hidden from the /proc interface so that Joel
- * doesn't have an aneurism.  Reading the file gives a straight forward
- * indication of which bits are on or off:
- * 	ENTRY off
- * 	EXIT off
+ * All this bitmask nonsense is managed from the files under
+ * /sys/fs/o2cb/logmask/.  Reading the files gives a straightforward
+ * indication of which bits are allowed (allow) or denied (off/deny).
+ * 	ENTRY deny
+ * 	EXIT deny
  * 	TCP off
  * 	MSG off
  * 	SOCKET off
- * 	ERROR off
- * 	NOTICE on
+ * 	ERROR allow
+ * 	NOTICE allow
  *
  * Writing changes the state of a given bit and requires a strictly formatted
  * single write() call:
  *
- * 	write(fd, "ENTRY on", 8);
+ * 	write(fd, "allow", 5);
  *
- * would turn the entry bit on.  "1" is also accepted in the place of "on", and
- * "off" and "0" behave as expected.
+ * Echoing allow/deny/off string into the logmask files can flip the bits
+ * on or off as expected; here is the bash script for example:
  *
- * Some trivial shell can flip all the bits on or off:
+ * log_mask="/sys/fs/o2cb/log_mask"
+ * for node in ENTRY EXIT TCP MSG SOCKET ERROR NOTICE; do
+ *	echo allow >"$log_mask"/"$node"
+ * done
  *
- * log_mask="/proc/fs/ocfs2_nodemanager/log_mask"
- * cat $log_mask | (
- * 	while read bit status; do
- * 		# $1 is "on" or "off", say
- * 		echo "$bit $1" > $log_mask
- * 	done
- * )
+ * The debugfs.ocfs2 tool can also flip the bits with the -l option:
+ *
+ * debugfs.ocfs2 -l TCP allow
  */
 
 /* for task_struct */
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
index 9fbe849..334f231 100644
--- a/fs/ocfs2/cluster/tcp.c
+++ b/fs/ocfs2/cluster/tcp.c
@@ -974,7 +974,7 @@
 int o2net_send_message_vec(u32 msg_type, u32 key, struct kvec *caller_vec,
 			   size_t caller_veclen, u8 target_node, int *status)
 {
-	int ret, error = 0;
+	int ret;
 	struct o2net_msg *msg = NULL;
 	size_t veclen, caller_bytes = 0;
 	struct kvec *vec = NULL;
@@ -1015,10 +1015,7 @@
 
 	o2net_set_nst_sock_time(&nst);
 
-	ret = wait_event_interruptible(nn->nn_sc_wq,
-				       o2net_tx_can_proceed(nn, &sc, &error));
-	if (!ret && error)
-		ret = error;
+	wait_event(nn->nn_sc_wq, o2net_tx_can_proceed(nn, &sc, &ret));
 	if (ret)
 		goto out;
 
diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c
index c575230..b358f3b 100644
--- a/fs/ocfs2/dir.c
+++ b/fs/ocfs2/dir.c
@@ -2900,6 +2900,8 @@
 	alloc = ocfs2_clusters_for_bytes(sb, bytes);
 	dx_alloc = 0;
 
+	down_write(&oi->ip_alloc_sem);
+
 	if (ocfs2_supports_indexed_dirs(osb)) {
 		credits += ocfs2_add_dir_index_credits(sb);
 
@@ -2940,8 +2942,6 @@
 		goto out;
 	}
 
-	down_write(&oi->ip_alloc_sem);
-
 	/*
 	 * Prepare for worst case allocation scenario of two separate
 	 * extents in the unindexed tree.
@@ -2953,7 +2953,7 @@
 	if (IS_ERR(handle)) {
 		ret = PTR_ERR(handle);
 		mlog_errno(ret);
-		goto out_sem;
+		goto out;
 	}
 
 	if (vfs_dq_alloc_space_nodirty(dir,
@@ -3172,10 +3172,8 @@
 
 	ocfs2_commit_trans(osb, handle);
 
-out_sem:
-	up_write(&oi->ip_alloc_sem);
-
 out:
+	up_write(&oi->ip_alloc_sem);
 	if (data_ac)
 		ocfs2_free_alloc_context(data_ac);
 	if (meta_ac)
@@ -3322,11 +3320,15 @@
 		brelse(new_bh);
 		new_bh = NULL;
 
+		down_write(&OCFS2_I(dir)->ip_alloc_sem);
+		drop_alloc_sem = 1;
 		dir_i_size = i_size_read(dir);
 		credits = OCFS2_SIMPLE_DIR_EXTEND_CREDITS;
 		goto do_extend;
 	}
 
+	down_write(&OCFS2_I(dir)->ip_alloc_sem);
+	drop_alloc_sem = 1;
 	dir_i_size = i_size_read(dir);
 	mlog(0, "extending dir %llu (i_size = %lld)\n",
 	     (unsigned long long)OCFS2_I(dir)->ip_blkno, dir_i_size);
@@ -3370,9 +3372,6 @@
 		credits++; /* For attaching the new dirent block to the
 			    * dx_root */
 
-	down_write(&OCFS2_I(dir)->ip_alloc_sem);
-	drop_alloc_sem = 1;
-
 	handle = ocfs2_start_trans(osb, credits);
 	if (IS_ERR(handle)) {
 		status = PTR_ERR(handle);
@@ -3435,10 +3434,10 @@
 	*new_de_bh = new_bh;
 	get_bh(*new_de_bh);
 bail:
-	if (drop_alloc_sem)
-		up_write(&OCFS2_I(dir)->ip_alloc_sem);
 	if (handle)
 		ocfs2_commit_trans(osb, handle);
+	if (drop_alloc_sem)
+		up_write(&OCFS2_I(dir)->ip_alloc_sem);
 
 	if (data_ac)
 		ocfs2_free_alloc_context(data_ac);
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index e15fc7d..6cdeaa7 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -248,6 +248,10 @@
 	.flags		= 0,
 };
 
+static struct ocfs2_lock_res_ops ocfs2_orphan_scan_lops = {
+	.flags		= LOCK_TYPE_REQUIRES_REFRESH|LOCK_TYPE_USES_LVB,
+};
+
 static struct ocfs2_lock_res_ops ocfs2_dentry_lops = {
 	.get_osb	= ocfs2_get_dentry_osb,
 	.post_unlock	= ocfs2_dentry_post_unlock,
@@ -637,6 +641,19 @@
 				   &ocfs2_nfs_sync_lops, osb);
 }
 
+static void ocfs2_orphan_scan_lock_res_init(struct ocfs2_lock_res *res,
+					    struct ocfs2_super *osb)
+{
+	struct ocfs2_orphan_scan_lvb *lvb;
+
+	ocfs2_lock_res_init_once(res);
+	ocfs2_build_lock_name(OCFS2_LOCK_TYPE_ORPHAN_SCAN, 0, 0, res->l_name);
+	ocfs2_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_ORPHAN_SCAN,
+				   &ocfs2_orphan_scan_lops, osb);
+	lvb = ocfs2_dlm_lvb(&res->l_lksb);
+	lvb->lvb_version = OCFS2_ORPHAN_LVB_VERSION;
+}
+
 void ocfs2_file_lock_res_init(struct ocfs2_lock_res *lockres,
 			      struct ocfs2_file_private *fp)
 {
@@ -2352,6 +2369,37 @@
 	mlog_exit_void();
 }
 
+int ocfs2_orphan_scan_lock(struct ocfs2_super *osb, u32 *seqno, int ex)
+{
+	struct ocfs2_lock_res *lockres;
+	struct ocfs2_orphan_scan_lvb *lvb;
+	int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
+	int status = 0;
+
+	lockres = &osb->osb_orphan_scan.os_lockres;
+	status = ocfs2_cluster_lock(osb, lockres, level, 0, 0);
+	if (status < 0)
+		return status;
+
+	lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
+	if (lvb->lvb_version == OCFS2_ORPHAN_LVB_VERSION)
+		*seqno = be32_to_cpu(lvb->lvb_os_seqno);
+	return status;
+}
+
+void ocfs2_orphan_scan_unlock(struct ocfs2_super *osb, u32 seqno, int ex)
+{
+	struct ocfs2_lock_res *lockres;
+	struct ocfs2_orphan_scan_lvb *lvb;
+	int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
+
+	lockres = &osb->osb_orphan_scan.os_lockres;
+	lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
+	lvb->lvb_version = OCFS2_ORPHAN_LVB_VERSION;
+	lvb->lvb_os_seqno = cpu_to_be32(seqno);
+	ocfs2_cluster_unlock(osb, lockres, level);
+}
+
 int ocfs2_super_lock(struct ocfs2_super *osb,
 		     int ex)
 {
@@ -2842,6 +2890,7 @@
 	ocfs2_super_lock_res_init(&osb->osb_super_lockres, osb);
 	ocfs2_rename_lock_res_init(&osb->osb_rename_lockres, osb);
 	ocfs2_nfs_sync_lock_res_init(&osb->osb_nfs_sync_lockres, osb);
+	ocfs2_orphan_scan_lock_res_init(&osb->osb_orphan_scan.os_lockres, osb);
 
 	osb->cconn = conn;
 
@@ -2878,6 +2927,7 @@
 	ocfs2_lock_res_free(&osb->osb_super_lockres);
 	ocfs2_lock_res_free(&osb->osb_rename_lockres);
 	ocfs2_lock_res_free(&osb->osb_nfs_sync_lockres);
+	ocfs2_lock_res_free(&osb->osb_orphan_scan.os_lockres);
 
 	ocfs2_cluster_disconnect(osb->cconn, hangup_pending);
 	osb->cconn = NULL;
@@ -3061,6 +3111,7 @@
 	ocfs2_simple_drop_lockres(osb, &osb->osb_super_lockres);
 	ocfs2_simple_drop_lockres(osb, &osb->osb_rename_lockres);
 	ocfs2_simple_drop_lockres(osb, &osb->osb_nfs_sync_lockres);
+	ocfs2_simple_drop_lockres(osb, &osb->osb_orphan_scan.os_lockres);
 }
 
 int ocfs2_drop_inode_locks(struct inode *inode)
diff --git a/fs/ocfs2/dlmglue.h b/fs/ocfs2/dlmglue.h
index e1fd572..31b90d7 100644
--- a/fs/ocfs2/dlmglue.h
+++ b/fs/ocfs2/dlmglue.h
@@ -62,6 +62,14 @@
 	__be32	lvb_free_entry;
 };
 
+#define OCFS2_ORPHAN_LVB_VERSION 1
+
+struct ocfs2_orphan_scan_lvb {
+	__u8	lvb_version;
+	__u8	lvb_reserved[3];
+	__be32	lvb_os_seqno;
+};
+
 /* ocfs2_inode_lock_full() 'arg_flags' flags */
 /* don't wait on recovery. */
 #define OCFS2_META_LOCK_RECOVERY	(0x01)
@@ -113,6 +121,9 @@
 		     int ex);
 void ocfs2_super_unlock(struct ocfs2_super *osb,
 			int ex);
+int ocfs2_orphan_scan_lock(struct ocfs2_super *osb, u32 *seqno, int ex);
+void ocfs2_orphan_scan_unlock(struct ocfs2_super *osb, u32 seqno, int ex);
+
 int ocfs2_rename_lock(struct ocfs2_super *osb);
 void ocfs2_rename_unlock(struct ocfs2_super *osb);
 int ocfs2_nfs_sync_lock(struct ocfs2_super *osb, int ex);
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index c2a87c8..07267e0 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -187,6 +187,9 @@
 	if (err)
 		goto bail;
 
+	if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
+		goto bail;
+
 	journal = osb->journal->j_journal;
 	err = jbd2_journal_force_commit(journal);
 
@@ -894,9 +897,9 @@
 	struct ocfs2_super *osb = OCFS2_SB(sb);
 	struct buffer_head *bh = NULL;
 	handle_t *handle = NULL;
-	int locked[MAXQUOTAS] = {0, 0};
-	int credits, qtype;
-	struct ocfs2_mem_dqinfo *oinfo;
+	int qtype;
+	struct dquot *transfer_from[MAXQUOTAS] = { };
+	struct dquot *transfer_to[MAXQUOTAS] = { };
 
 	mlog_entry("(0x%p, '%.*s')\n", dentry,
 	           dentry->d_name.len, dentry->d_name.name);
@@ -969,30 +972,37 @@
 
 	if ((attr->ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
 	    (attr->ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
-		credits = OCFS2_INODE_UPDATE_CREDITS;
+		/*
+		 * Gather pointers to quota structures so that allocation /
+		 * freeing of quota structures happens here and not inside
+		 * vfs_dq_transfer() where we have problems with lock ordering
+		 */
 		if (attr->ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid
 		    && OCFS2_HAS_RO_COMPAT_FEATURE(sb,
 		    OCFS2_FEATURE_RO_COMPAT_USRQUOTA)) {
-			oinfo = sb_dqinfo(sb, USRQUOTA)->dqi_priv;
-			status = ocfs2_lock_global_qf(oinfo, 1);
-			if (status < 0)
+			transfer_to[USRQUOTA] = dqget(sb, attr->ia_uid,
+						      USRQUOTA);
+			transfer_from[USRQUOTA] = dqget(sb, inode->i_uid,
+							USRQUOTA);
+			if (!transfer_to[USRQUOTA] || !transfer_from[USRQUOTA]) {
+				status = -ESRCH;
 				goto bail_unlock;
-			credits += ocfs2_calc_qinit_credits(sb, USRQUOTA) +
-				ocfs2_calc_qdel_credits(sb, USRQUOTA);
-			locked[USRQUOTA] = 1;
+			}
 		}
 		if (attr->ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid
 		    && OCFS2_HAS_RO_COMPAT_FEATURE(sb,
 		    OCFS2_FEATURE_RO_COMPAT_GRPQUOTA)) {
-			oinfo = sb_dqinfo(sb, GRPQUOTA)->dqi_priv;
-			status = ocfs2_lock_global_qf(oinfo, 1);
-			if (status < 0)
+			transfer_to[GRPQUOTA] = dqget(sb, attr->ia_gid,
+						      GRPQUOTA);
+			transfer_from[GRPQUOTA] = dqget(sb, inode->i_gid,
+							GRPQUOTA);
+			if (!transfer_to[GRPQUOTA] || !transfer_from[GRPQUOTA]) {
+				status = -ESRCH;
 				goto bail_unlock;
-			credits += ocfs2_calc_qinit_credits(sb, GRPQUOTA) +
-				   ocfs2_calc_qdel_credits(sb, GRPQUOTA);
-			locked[GRPQUOTA] = 1;
+			}
 		}
-		handle = ocfs2_start_trans(osb, credits);
+		handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS +
+					   2 * ocfs2_quota_trans_credits(sb));
 		if (IS_ERR(handle)) {
 			status = PTR_ERR(handle);
 			mlog_errno(status);
@@ -1030,12 +1040,6 @@
 bail_commit:
 	ocfs2_commit_trans(osb, handle);
 bail_unlock:
-	for (qtype = 0; qtype < MAXQUOTAS; qtype++) {
-		if (!locked[qtype])
-			continue;
-		oinfo = sb_dqinfo(sb, qtype)->dqi_priv;
-		ocfs2_unlock_global_qf(oinfo, 1);
-	}
 	ocfs2_inode_unlock(inode, 1);
 bail_unlock_rw:
 	if (size_change)
@@ -1043,6 +1047,12 @@
 bail:
 	brelse(bh);
 
+	/* Release quota pointers in case we acquired them */
+	for (qtype = 0; qtype < MAXQUOTAS; qtype++) {
+		dqput(transfer_to[qtype]);
+		dqput(transfer_from[qtype]);
+	}
+
 	if (!status && attr->ia_valid & ATTR_MODE) {
 		status = ocfs2_acl_chmod(inode);
 		if (status < 0)
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index a20a0f1..4a3b9e6 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -28,6 +28,8 @@
 #include <linux/slab.h>
 #include <linux/highmem.h>
 #include <linux/kthread.h>
+#include <linux/time.h>
+#include <linux/random.h>
 
 #define MLOG_MASK_PREFIX ML_JOURNAL
 #include <cluster/masklog.h>
@@ -52,6 +54,8 @@
 
 DEFINE_SPINLOCK(trans_inc_lock);
 
+#define ORPHAN_SCAN_SCHEDULE_TIMEOUT 300000
+
 static int ocfs2_force_read_journal(struct inode *inode);
 static int ocfs2_recover_node(struct ocfs2_super *osb,
 			      int node_num, int slot_num);
@@ -1841,6 +1845,113 @@
 	return status;
 }
 
+/*
+ * Scan timer should get fired every ORPHAN_SCAN_SCHEDULE_TIMEOUT. Add some
+ * randomness to the timeout to minimize multple nodes firing the timer at the
+ * same time.
+ */
+static inline unsigned long ocfs2_orphan_scan_timeout(void)
+{
+	unsigned long time;
+
+	get_random_bytes(&time, sizeof(time));
+	time = ORPHAN_SCAN_SCHEDULE_TIMEOUT + (time % 5000);
+	return msecs_to_jiffies(time);
+}
+
+/*
+ * ocfs2_queue_orphan_scan calls ocfs2_queue_recovery_completion for
+ * every slot, queuing a recovery of the slot on the ocfs2_wq thread. This
+ * is done to catch any orphans that are left over in orphan directories.
+ *
+ * ocfs2_queue_orphan_scan gets called every ORPHAN_SCAN_SCHEDULE_TIMEOUT
+ * seconds.  It gets an EX lock on os_lockres and checks sequence number
+ * stored in LVB. If the sequence number has changed, it means some other
+ * node has done the scan.  This node skips the scan and tracks the
+ * sequence number.  If the sequence number didn't change, it means a scan
+ * hasn't happened.  The node queues a scan and increments the
+ * sequence number in the LVB.
+ */
+void ocfs2_queue_orphan_scan(struct ocfs2_super *osb)
+{
+	struct ocfs2_orphan_scan *os;
+	int status, i;
+	u32 seqno = 0;
+
+	os = &osb->osb_orphan_scan;
+
+	status = ocfs2_orphan_scan_lock(osb, &seqno, DLM_LOCK_EX);
+	if (status < 0) {
+		if (status != -EAGAIN)
+			mlog_errno(status);
+		goto out;
+	}
+
+	if (os->os_seqno != seqno) {
+		os->os_seqno = seqno;
+		goto unlock;
+	}
+
+	for (i = 0; i < osb->max_slots; i++)
+		ocfs2_queue_recovery_completion(osb->journal, i, NULL, NULL,
+						NULL);
+	/*
+	 * We queued a recovery on orphan slots, increment the sequence
+	 * number and update LVB so other node will skip the scan for a while
+	 */
+	seqno++;
+	os->os_count++;
+	os->os_scantime = CURRENT_TIME;
+unlock:
+	ocfs2_orphan_scan_unlock(osb, seqno, DLM_LOCK_EX);
+out:
+	return;
+}
+
+/* Worker task that gets fired every ORPHAN_SCAN_SCHEDULE_TIMEOUT millsec */
+void ocfs2_orphan_scan_work(struct work_struct *work)
+{
+	struct ocfs2_orphan_scan *os;
+	struct ocfs2_super *osb;
+
+	os = container_of(work, struct ocfs2_orphan_scan,
+			  os_orphan_scan_work.work);
+	osb = os->os_osb;
+
+	mutex_lock(&os->os_lock);
+	ocfs2_queue_orphan_scan(osb);
+	schedule_delayed_work(&os->os_orphan_scan_work,
+			      ocfs2_orphan_scan_timeout());
+	mutex_unlock(&os->os_lock);
+}
+
+void ocfs2_orphan_scan_stop(struct ocfs2_super *osb)
+{
+	struct ocfs2_orphan_scan *os;
+
+	os = &osb->osb_orphan_scan;
+	mutex_lock(&os->os_lock);
+	cancel_delayed_work(&os->os_orphan_scan_work);
+	mutex_unlock(&os->os_lock);
+}
+
+int ocfs2_orphan_scan_init(struct ocfs2_super *osb)
+{
+	struct ocfs2_orphan_scan *os;
+
+	os = &osb->osb_orphan_scan;
+	os->os_osb = osb;
+	os->os_count = 0;
+	os->os_scantime = CURRENT_TIME;
+	mutex_init(&os->os_lock);
+
+	INIT_DELAYED_WORK(&os->os_orphan_scan_work,
+			  ocfs2_orphan_scan_work);
+	schedule_delayed_work(&os->os_orphan_scan_work,
+			      ocfs2_orphan_scan_timeout());
+	return 0;
+}
+
 struct ocfs2_orphan_filldir_priv {
 	struct inode		*head;
 	struct ocfs2_super	*osb;
diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h
index eb7b763..61045ee 100644
--- a/fs/ocfs2/journal.h
+++ b/fs/ocfs2/journal.h
@@ -144,6 +144,10 @@
 }
 
 /* Exported only for the journal struct init code in super.c. Do not call. */
+int ocfs2_orphan_scan_init(struct ocfs2_super *osb);
+void ocfs2_orphan_scan_stop(struct ocfs2_super *osb);
+void ocfs2_orphan_scan_exit(struct ocfs2_super *osb);
+
 void ocfs2_complete_recovery(struct work_struct *work);
 void ocfs2_wait_for_recovery(struct ocfs2_super *osb);
 
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index 1386281..18c1d9e 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -47,6 +47,9 @@
 #include "ocfs2_fs.h"
 #include "ocfs2_lockid.h"
 
+/* For struct ocfs2_blockcheck_stats */
+#include "blockcheck.h"
+
 /* Most user visible OCFS2 inodes will have very few pieces of
  * metadata, but larger files (including bitmaps, etc) must be taken
  * into account when designing an access scheme. We allow a small
@@ -151,6 +154,16 @@
 #endif
 };
 
+struct ocfs2_orphan_scan {
+	struct mutex 		os_lock;
+	struct ocfs2_super 	*os_osb;
+	struct ocfs2_lock_res 	os_lockres;     /* lock to synchronize scans */
+	struct delayed_work 	os_orphan_scan_work;
+	struct timespec		os_scantime;  /* time this node ran the scan */
+	u32			os_count;      /* tracks node specific scans */
+	u32  			os_seqno;       /* tracks cluster wide scans */
+};
+
 struct ocfs2_dlm_debug {
 	struct kref d_refcnt;
 	struct dentry *d_locking_state;
@@ -295,6 +308,7 @@
 	struct ocfs2_dinode *local_alloc_copy;
 	struct ocfs2_quota_recovery *quota_rec;
 
+	struct ocfs2_blockcheck_stats osb_ecc_stats;
 	struct ocfs2_alloc_stats alloc_stats;
 	char dev_str[20];		/* "major,minor" of the device */
 
@@ -341,6 +355,8 @@
 	unsigned int			*osb_orphan_wipes;
 	wait_queue_head_t		osb_wipe_event;
 
+	struct ocfs2_orphan_scan	osb_orphan_scan;
+
 	/* used to protect metaecc calculation check of xattr. */
 	spinlock_t osb_xattr_lock;
 
diff --git a/fs/ocfs2/ocfs2_lockid.h b/fs/ocfs2/ocfs2_lockid.h
index a53ce87..fcdba09 100644
--- a/fs/ocfs2/ocfs2_lockid.h
+++ b/fs/ocfs2/ocfs2_lockid.h
@@ -48,6 +48,7 @@
 	OCFS2_LOCK_TYPE_FLOCK,
 	OCFS2_LOCK_TYPE_QINFO,
 	OCFS2_LOCK_TYPE_NFS_SYNC,
+	OCFS2_LOCK_TYPE_ORPHAN_SCAN,
 	OCFS2_NUM_LOCK_TYPES
 };
 
@@ -85,6 +86,9 @@
 		case OCFS2_LOCK_TYPE_NFS_SYNC:
 			c = 'Y';
 			break;
+		case OCFS2_LOCK_TYPE_ORPHAN_SCAN:
+			c = 'P';
+			break;
 		default:
 			c = '\0';
 	}
@@ -104,6 +108,7 @@
 	[OCFS2_LOCK_TYPE_OPEN] = "Open",
 	[OCFS2_LOCK_TYPE_FLOCK] = "Flock",
 	[OCFS2_LOCK_TYPE_QINFO] = "Quota",
+	[OCFS2_LOCK_TYPE_ORPHAN_SCAN] = "OrphanScan",
 };
 
 static inline const char *ocfs2_lock_type_string(enum ocfs2_lock_type type)
diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c
index 1ed0f7c..edfa60c 100644
--- a/fs/ocfs2/quota_global.c
+++ b/fs/ocfs2/quota_global.c
@@ -421,6 +421,7 @@
 	OCFS2_DQUOT(dquot)->dq_originodes = dquot->dq_dqb.dqb_curinodes;
 	if (!dquot->dq_off) {	/* No real quota entry? */
 		/* Upgrade to exclusive lock for allocation */
+		ocfs2_qinfo_unlock(info, 0);
 		err = ocfs2_qinfo_lock(info, 1);
 		if (err < 0)
 			goto out_qlock;
@@ -435,7 +436,8 @@
 out_qlock:
 	if (ex)
 		ocfs2_qinfo_unlock(info, 1);
-	ocfs2_qinfo_unlock(info, 0);
+	else
+		ocfs2_qinfo_unlock(info, 0);
 out:
 	if (err < 0)
 		mlog_errno(err);
diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c
index 07deec5..5a460fa 100644
--- a/fs/ocfs2/quota_local.c
+++ b/fs/ocfs2/quota_local.c
@@ -444,10 +444,6 @@
 
 	mlog_entry("ino=%lu type=%u", (unsigned long)lqinode->i_ino, type);
 
-	status = ocfs2_lock_global_qf(oinfo, 1);
-	if (status < 0)
-		goto out;
-
 	list_for_each_entry_safe(rchunk, next, &(rec->r_list[type]), rc_list) {
 		chunk = rchunk->rc_chunk;
 		hbh = NULL;
@@ -480,12 +476,18 @@
 				     type);
 				goto out_put_bh;
 			}
+			status = ocfs2_lock_global_qf(oinfo, 1);
+			if (status < 0) {
+				mlog_errno(status);
+				goto out_put_dquot;
+			}
+
 			handle = ocfs2_start_trans(OCFS2_SB(sb),
 						   OCFS2_QSYNC_CREDITS);
 			if (IS_ERR(handle)) {
 				status = PTR_ERR(handle);
 				mlog_errno(status);
-				goto out_put_dquot;
+				goto out_drop_lock;
 			}
 			mutex_lock(&sb_dqopt(sb)->dqio_mutex);
 			spin_lock(&dq_data_lock);
@@ -523,6 +525,8 @@
 out_commit:
 			mutex_unlock(&sb_dqopt(sb)->dqio_mutex);
 			ocfs2_commit_trans(OCFS2_SB(sb), handle);
+out_drop_lock:
+			ocfs2_unlock_global_qf(oinfo, 1);
 out_put_dquot:
 			dqput(dquot);
 out_put_bh:
@@ -537,8 +541,6 @@
 		if (status < 0)
 			break;
 	}
-	ocfs2_unlock_global_qf(oinfo, 1);
-out:
 	if (status < 0)
 		free_recovery_list(&(rec->r_list[type]));
 	mlog_exit(status);
@@ -655,6 +657,9 @@
 	struct ocfs2_quota_recovery *rec;
 	int locked = 0;
 
+	/* We don't need the lock and we have to acquire quota file locks
+	 * which will later depend on this lock */
+	mutex_unlock(&sb_dqopt(sb)->dqio_mutex);
 	info->dqi_maxblimit = 0x7fffffffffffffffLL;
 	info->dqi_maxilimit = 0x7fffffffffffffffLL;
 	oinfo = kmalloc(sizeof(struct ocfs2_mem_dqinfo), GFP_NOFS);
@@ -733,6 +738,7 @@
 		goto out_err;
 	}
 
+	mutex_lock(&sb_dqopt(sb)->dqio_mutex);
 	return 0;
 out_err:
 	if (oinfo) {
@@ -746,6 +752,7 @@
 		kfree(oinfo);
 	}
 	brelse(bh);
+	mutex_lock(&sb_dqopt(sb)->dqio_mutex);
 	return -1;
 }
 
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 201b40a..d33767f 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -119,10 +119,12 @@
 static int ocfs2_check_volume(struct ocfs2_super *osb);
 static int ocfs2_verify_volume(struct ocfs2_dinode *di,
 			       struct buffer_head *bh,
-			       u32 sectsize);
+			       u32 sectsize,
+			       struct ocfs2_blockcheck_stats *stats);
 static int ocfs2_initialize_super(struct super_block *sb,
 				  struct buffer_head *bh,
-				  int sector_size);
+				  int sector_size,
+				  struct ocfs2_blockcheck_stats *stats);
 static int ocfs2_get_sector(struct super_block *sb,
 			    struct buffer_head **bh,
 			    int block,
@@ -207,6 +209,7 @@
 	int i;
 	struct ocfs2_cluster_connection *cconn = osb->cconn;
 	struct ocfs2_recovery_map *rm = osb->recovery_map;
+	struct ocfs2_orphan_scan *os;
 
 	out += snprintf(buf + out, len - out,
 			"%10s => Id: %-s  Uuid: %-s  Gen: 0x%X  Label: %-s\n",
@@ -308,6 +311,13 @@
 				i, osb->slot_recovery_generations[i]);
 	}
 
+	os = &osb->osb_orphan_scan;
+	out += snprintf(buf + out, len - out, "Orphan Scan=> ");
+	out += snprintf(buf + out, len - out, "Local: %u  Global: %u ",
+			os->os_count, os->os_seqno);
+	out += snprintf(buf + out, len - out, " Last Scan: %lu seconds ago\n",
+			(get_seconds() - os->os_scantime.tv_sec));
+
 	return out;
 }
 
@@ -693,7 +703,8 @@
 
 static int ocfs2_sb_probe(struct super_block *sb,
 			  struct buffer_head **bh,
-			  int *sector_size)
+			  int *sector_size,
+			  struct ocfs2_blockcheck_stats *stats)
 {
 	int status, tmpstat;
 	struct ocfs1_vol_disk_hdr *hdr;
@@ -759,7 +770,8 @@
 			goto bail;
 		}
 		di = (struct ocfs2_dinode *) (*bh)->b_data;
-		status = ocfs2_verify_volume(di, *bh, blksize);
+		memset(stats, 0, sizeof(struct ocfs2_blockcheck_stats));
+		status = ocfs2_verify_volume(di, *bh, blksize, stats);
 		if (status >= 0)
 			goto bail;
 		brelse(*bh);
@@ -965,6 +977,7 @@
 	struct ocfs2_super *osb = NULL;
 	struct buffer_head *bh = NULL;
 	char nodestr[8];
+	struct ocfs2_blockcheck_stats stats;
 
 	mlog_entry("%p, %p, %i", sb, data, silent);
 
@@ -974,13 +987,13 @@
 	}
 
 	/* probe for superblock */
-	status = ocfs2_sb_probe(sb, &bh, &sector_size);
+	status = ocfs2_sb_probe(sb, &bh, &sector_size, &stats);
 	if (status < 0) {
 		mlog(ML_ERROR, "superblock probe failed!\n");
 		goto read_super_error;
 	}
 
-	status = ocfs2_initialize_super(sb, bh, sector_size);
+	status = ocfs2_initialize_super(sb, bh, sector_size, &stats);
 	osb = OCFS2_SB(sb);
 	if (status < 0) {
 		mlog_errno(status);
@@ -1090,6 +1103,18 @@
 		goto read_super_error;
 	}
 
+	if (ocfs2_meta_ecc(osb)) {
+		status = ocfs2_blockcheck_stats_debugfs_install(
+						&osb->osb_ecc_stats,
+						osb->osb_debug_root);
+		if (status) {
+			mlog(ML_ERROR,
+			     "Unable to create blockcheck statistics "
+			     "files\n");
+			goto read_super_error;
+		}
+	}
+
 	status = ocfs2_mount_volume(sb);
 	if (osb->root_inode)
 		inode = igrab(osb->root_inode);
@@ -1760,13 +1785,8 @@
 	}
 
 	status = ocfs2_truncate_log_init(osb);
-	if (status < 0) {
+	if (status < 0)
 		mlog_errno(status);
-		goto leave;
-	}
-
-	if (ocfs2_mount_local(osb))
-		goto leave;
 
 leave:
 	if (unlock_super)
@@ -1796,6 +1816,8 @@
 
 	ocfs2_truncate_log_shutdown(osb);
 
+	ocfs2_orphan_scan_stop(osb);
+
 	/* This will disable recovery and flush any recovery work. */
 	ocfs2_recovery_exit(osb);
 
@@ -1833,6 +1855,7 @@
 	if (osb->cconn)
 		ocfs2_dlm_shutdown(osb, hangup_needed);
 
+	ocfs2_blockcheck_stats_debugfs_remove(&osb->osb_ecc_stats);
 	debugfs_remove(osb->osb_debug_root);
 
 	if (hangup_needed)
@@ -1880,7 +1903,8 @@
 
 static int ocfs2_initialize_super(struct super_block *sb,
 				  struct buffer_head *bh,
-				  int sector_size)
+				  int sector_size,
+				  struct ocfs2_blockcheck_stats *stats)
 {
 	int status;
 	int i, cbits, bbits;
@@ -1939,6 +1963,9 @@
 	atomic_set(&osb->alloc_stats.bg_allocs, 0);
 	atomic_set(&osb->alloc_stats.bg_extends, 0);
 
+	/* Copy the blockcheck stats from the superblock probe */
+	osb->osb_ecc_stats = *stats;
+
 	ocfs2_init_node_maps(osb);
 
 	snprintf(osb->dev_str, sizeof(osb->dev_str), "%u,%u",
@@ -1951,6 +1978,13 @@
 		goto bail;
 	}
 
+	status = ocfs2_orphan_scan_init(osb);
+	if (status) {
+		mlog(ML_ERROR, "Unable to initialize delayed orphan scan\n");
+		mlog_errno(status);
+		goto bail;
+	}
+
 	init_waitqueue_head(&osb->checkpoint_event);
 	atomic_set(&osb->needs_checkpoint, 0);
 
@@ -2169,7 +2203,8 @@
  */
 static int ocfs2_verify_volume(struct ocfs2_dinode *di,
 			       struct buffer_head *bh,
-			       u32 blksz)
+			       u32 blksz,
+			       struct ocfs2_blockcheck_stats *stats)
 {
 	int status = -EAGAIN;
 
@@ -2182,7 +2217,8 @@
 		    OCFS2_FEATURE_INCOMPAT_META_ECC) {
 			status = ocfs2_block_check_validate(bh->b_data,
 							    bh->b_size,
-							    &di->i_check);
+							    &di->i_check,
+							    stats);
 			if (status)
 				goto out;
 		}
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index 1563101..ba320e2 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -3154,7 +3154,7 @@
 		     le32_to_cpu(bucket_xh(bucket)->xh_entries[0].xe_name_hash));
 		if (func) {
 			ret = func(inode, bucket, para);
-			if (ret)
+			if (ret && ret != -ERANGE)
 				mlog_errno(ret);
 			/* Fall through to bucket_relse() */
 		}
@@ -3261,7 +3261,8 @@
 						  ocfs2_list_xattr_bucket,
 						  &xl);
 		if (ret) {
-			mlog_errno(ret);
+			if (ret != -ERANGE)
+				mlog_errno(ret);
 			goto out;
 		}
 
diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
index a3ba217..1d897ad 100644
--- a/fs/sysfs/symlink.c
+++ b/fs/sysfs/symlink.c
@@ -192,8 +192,11 @@
 {
 	int error = -ENOMEM;
 	unsigned long page = get_zeroed_page(GFP_KERNEL);
-	if (page)
+	if (page) {
 		error = sysfs_getlink(dentry, (char *) page); 
+		if (error < 0)
+			free_page((unsigned long)page);
+	}
 	nd_set_link(nd, error ? ERR_PTR(error) : (char *)page);
 	return NULL;
 }
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 3589eab..3260b73 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -1937,6 +1937,9 @@
 	err  = bdi_init(&c->bdi);
 	if (err)
 		goto out_close;
+	err = bdi_register(&c->bdi, NULL, "ubifs");
+	if (err)
+		goto out_bdi;
 
 	err = ubifs_parse_options(c, data, 0);
 	if (err)
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index 4db89e9..82ec6a3 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -47,7 +47,7 @@
 
 /* Current ACPICA subsystem version in YYYYMMDD format */
 
-#define ACPI_CA_VERSION                 0x20090320
+#define ACPI_CA_VERSION                 0x20090521
 
 #include "actypes.h"
 #include "actbl.h"
@@ -201,6 +201,8 @@
 acpi_status
 acpi_get_object_info(acpi_handle handle, struct acpi_buffer *return_buffer);
 
+acpi_status acpi_install_method(u8 *buffer);
+
 acpi_status
 acpi_get_next_object(acpi_object_type type,
 		     acpi_handle parent,
@@ -375,7 +377,7 @@
 acpi_status acpi_leave_sleep_state(u8 sleep_state);
 
 /*
- * Debug output
+ * Error/Warning output
  */
 void ACPI_INTERNAL_VAR_XFACE
 acpi_error(const char *module_name,
@@ -394,6 +396,9 @@
 acpi_info(const char *module_name,
 	  u32 line_number, const char *format, ...) ACPI_PRINTF_LIKE(3);
 
+/*
+ * Debug output
+ */
 #ifdef ACPI_DEBUG_OUTPUT
 
 void ACPI_INTERNAL_VAR_XFACE
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index f555d927..37ba576 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -429,20 +429,12 @@
 
 /* Data manipulation */
 
-#define ACPI_LOWORD(l)                  ((u16)(u32)(l))
-#define ACPI_HIWORD(l)                  ((u16)((((u32)(l)) >> 16) & 0xFFFF))
-#define ACPI_LOBYTE(l)                  ((u8)(u16)(l))
-#define ACPI_HIBYTE(l)                  ((u8)((((u16)(l)) >> 8) & 0xFF))
-
-/* Full 64-bit integer must be available on both 32-bit and 64-bit platforms */
-
-struct acpi_integer_overlay {
-	u32 lo_dword;
-	u32 hi_dword;
-};
-
-#define ACPI_LODWORD(integer)           (ACPI_CAST_PTR (struct acpi_integer_overlay, &integer)->lo_dword)
-#define ACPI_HIDWORD(integer)           (ACPI_CAST_PTR (struct acpi_integer_overlay, &integer)->hi_dword)
+#define ACPI_LOBYTE(integer)            ((u8)   (u16)(integer))
+#define ACPI_HIBYTE(integer)            ((u8) (((u16)(integer)) >> 8))
+#define ACPI_LOWORD(integer)            ((u16)  (u32)(integer))
+#define ACPI_HIWORD(integer)            ((u16)(((u32)(integer)) >> 16))
+#define ACPI_LODWORD(integer64)         ((u32)  (u64)(integer64))
+#define ACPI_HIDWORD(integer64)         ((u32)(((u64)(integer64)) >> 32))
 
 #define ACPI_SET_BIT(target,bit)        ((target) |= (bit))
 #define ACPI_CLEAR_BIT(target,bit)      ((target) &= ~(bit))
diff --git a/include/acpi/platform/acgcc.h b/include/acpi/platform/acgcc.h
index 8e2cdc5..935c5d7 100644
--- a/include/acpi/platform/acgcc.h
+++ b/include/acpi/platform/acgcc.h
@@ -62,4 +62,8 @@
  */
 #define ACPI_UNUSED_VAR __attribute__ ((unused))
 
+#ifdef _ANSI
+#define inline
+#endif
+
 #endif				/* __ACGCC_H__ */
diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h
index 6d49b2a..fcb8e4b 100644
--- a/include/acpi/platform/aclinux.h
+++ b/include/acpi/platform/aclinux.h
@@ -1,11 +1,11 @@
 /******************************************************************************
  *
- * Name: aclinux.h - OS specific defines, etc.
+ * Name: aclinux.h - OS specific defines, etc. for Linux
  *
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2008, Intel Corp.
+ * Copyright (C) 2000 - 2009, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -44,10 +44,13 @@
 #ifndef __ACLINUX_H__
 #define __ACLINUX_H__
 
+/* Common (in-kernel/user-space) ACPICA configuration */
+
 #define ACPI_USE_SYSTEM_CLIBRARY
 #define ACPI_USE_DO_WHILE_0
 #define ACPI_MUTEX_TYPE             ACPI_BINARY_SEMAPHORE
 
+
 #ifdef __KERNEL__
 
 #include <linux/string.h>
@@ -63,15 +66,18 @@
 #include <linux/spinlock_types.h>
 #include <asm/current.h>
 
-/* Host-dependent types and defines */
+/* Host-dependent types and defines for in-kernel ACPICA */
 
 #define ACPI_MACHINE_WIDTH          BITS_PER_LONG
-#define acpi_cache_t                        struct kmem_cache
-#define acpi_spinlock                   spinlock_t *
 #define ACPI_EXPORT_SYMBOL(symbol)  EXPORT_SYMBOL(symbol);
 #define strtoul                     simple_strtoul
 
-#else				/* !__KERNEL__ */
+#define acpi_cache_t                        struct kmem_cache
+#define acpi_spinlock                       spinlock_t *
+#define acpi_cpu_flags                      unsigned long
+#define acpi_thread_id                      struct task_struct *
+
+#else /* !__KERNEL__ */
 
 #include <stdarg.h>
 #include <string.h>
@@ -79,6 +85,11 @@
 #include <ctype.h>
 #include <unistd.h>
 
+/* Host-dependent types and defines for user-space ACPICA */
+
+#define ACPI_FLUSH_CPU_CACHE()
+#define acpi_thread_id                      pthread_t
+
 #if defined(__ia64__) || defined(__x86_64__)
 #define ACPI_MACHINE_WIDTH          64
 #define COMPILER_DEPENDENT_INT64    long
@@ -94,17 +105,17 @@
 #define __cdecl
 #endif
 
-#define ACPI_FLUSH_CPU_CACHE()
-#endif				/* __KERNEL__ */
+#endif /* __KERNEL__ */
 
 /* Linux uses GCC */
 
 #include "acgcc.h"
 
-#define acpi_cpu_flags unsigned long
 
-#define acpi_thread_id struct task_struct *
-
+#ifdef __KERNEL__
+/*
+ * Overrides for in-kernel ACPICA
+ */
 static inline acpi_thread_id acpi_os_get_thread_id(void)
 {
 	return current;
@@ -119,30 +130,32 @@
 #include <acpi/actypes.h>
 static inline void *acpi_os_allocate(acpi_size size)
 {
-	return kmalloc(size, irqs_disabled()? GFP_ATOMIC : GFP_KERNEL);
+	return kmalloc(size, irqs_disabled() ? GFP_ATOMIC : GFP_KERNEL);
 }
+
 static inline void *acpi_os_allocate_zeroed(acpi_size size)
 {
-	return kzalloc(size, irqs_disabled()? GFP_ATOMIC : GFP_KERNEL);
+	return kzalloc(size, irqs_disabled() ? GFP_ATOMIC : GFP_KERNEL);
 }
 
 static inline void *acpi_os_acquire_object(acpi_cache_t * cache)
 {
 	return kmem_cache_zalloc(cache,
-				 irqs_disabled()? GFP_ATOMIC : GFP_KERNEL);
+		irqs_disabled() ? GFP_ATOMIC : GFP_KERNEL);
 }
 
-#define ACPI_ALLOCATE(a)	acpi_os_allocate(a)
-#define ACPI_ALLOCATE_ZEROED(a)	acpi_os_allocate_zeroed(a)
-#define ACPI_FREE(a)		kfree(a)
+#define ACPI_ALLOCATE(a)        acpi_os_allocate(a)
+#define ACPI_ALLOCATE_ZEROED(a) acpi_os_allocate_zeroed(a)
+#define ACPI_FREE(a)            kfree(a)
 
-/*
- * We need to show where it is safe to preempt execution of ACPICA
- */
-#define ACPI_PREEMPTION_POINT()		\
-	do {				\
-		if (!irqs_disabled())	\
-			cond_resched();	\
+/* Used within ACPICA to show where it is safe to preempt execution */
+
+#define ACPI_PREEMPTION_POINT() \
+	do { \
+		if (!irqs_disabled()) \
+			cond_resched(); \
 	} while (0)
 
-#endif				/* __ACLINUX_H__ */
+#endif /* __KERNEL__ */
+
+#endif /* __ACLINUX_H__ */
diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
new file mode 100644
index 0000000..b18ce4f
--- /dev/null
+++ b/include/asm-generic/atomic64.h
@@ -0,0 +1,42 @@
+/*
+ * Generic implementation of 64-bit atomics using spinlocks,
+ * useful on processors that don't have 64-bit atomic instructions.
+ *
+ * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _ASM_GENERIC_ATOMIC64_H
+#define _ASM_GENERIC_ATOMIC64_H
+
+typedef struct {
+	long long counter;
+} atomic64_t;
+
+#define ATOMIC64_INIT(i)	{ (i) }
+
+extern long long atomic64_read(const atomic64_t *v);
+extern void	 atomic64_set(atomic64_t *v, long long i);
+extern void	 atomic64_add(long long a, atomic64_t *v);
+extern long long atomic64_add_return(long long a, atomic64_t *v);
+extern void	 atomic64_sub(long long a, atomic64_t *v);
+extern long long atomic64_sub_return(long long a, atomic64_t *v);
+extern long long atomic64_dec_if_positive(atomic64_t *v);
+extern long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n);
+extern long long atomic64_xchg(atomic64_t *v, long long new);
+extern int	 atomic64_add_unless(atomic64_t *v, long long a, long long u);
+
+#define atomic64_add_negative(a, v)	(atomic64_add_return((a), (v)) < 0)
+#define atomic64_inc(v)			atomic64_add(1LL, (v))
+#define atomic64_inc_return(v)		atomic64_add_return(1LL, (v))
+#define atomic64_inc_and_test(v) 	(atomic64_inc_return(v) == 0)
+#define atomic64_sub_and_test(a, v)	(atomic64_sub_return((a), (v)) == 0)
+#define atomic64_dec(v)			atomic64_sub(1LL, (v))
+#define atomic64_dec_return(v)		atomic64_sub_return(1LL, (v))
+#define atomic64_dec_and_test(v)	(atomic64_dec_return((v)) == 0)
+#define atomic64_inc_not_zero(v) 	atomic64_add_unless((v), 1LL, 0LL)
+
+#endif  /*  _ASM_GENERIC_ATOMIC64_H  */
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 12737be..2a04eb5 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -590,6 +590,11 @@
 	bl->head = bl2->head;
 }
 
+static inline struct bio *bio_list_peek(struct bio_list *bl)
+{
+	return bl->head;
+}
+
 static inline struct bio *bio_list_pop(struct bio_list *bl)
 {
 	struct bio *bio = bl->head;
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 0b1a6ca..8963d91 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -926,6 +926,7 @@
 				       unsigned int alignment);
 extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
 extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
+extern void blk_set_default_limits(struct queue_limits *lim);
 extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
 			    sector_t offset);
 extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
diff --git a/include/linux/c2port.h b/include/linux/c2port.h
index 7b5a238..2a5cd86 100644
--- a/include/linux/c2port.h
+++ b/include/linux/c2port.h
@@ -10,6 +10,7 @@
  */
 
 #include <linux/device.h>
+#include <linux/kmemcheck.h>
 
 #define C2PORT_NAME_LEN			32
 
@@ -20,8 +21,10 @@
 /* Main struct */
 struct c2port_ops;
 struct c2port_device {
+	kmemcheck_bitfield_begin(flags);
 	unsigned int access:1;
 	unsigned int flash_access:1;
+	kmemcheck_bitfield_end(flags);
 
 	int id;
 	char name[C2PORT_NAME_LEN];
diff --git a/include/linux/device.h b/include/linux/device.h
index a4a7b10..ed4e39f 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -114,6 +114,8 @@
 #define BUS_NOTIFY_BOUND_DRIVER		0x00000003 /* driver bound to device */
 #define BUS_NOTIFY_UNBIND_DRIVER	0x00000004 /* driver about to be
 						      unbound */
+#define BUS_NOTIFY_UNBOUND_DRIVER	0x00000005 /* driver is unbound
+						      from the device */
 
 extern struct kset *bus_get_kset(struct bus_type *bus);
 extern struct klist *bus_get_device_klist(struct bus_type *bus);
@@ -192,6 +194,7 @@
 	struct kobject			*dev_kobj;
 
 	int (*dev_uevent)(struct device *dev, struct kobj_uevent_env *env);
+	char *(*nodename)(struct device *dev);
 
 	void (*class_release)(struct class *class);
 	void (*dev_release)(struct device *dev);
@@ -287,6 +290,7 @@
 	const char *name;
 	struct attribute_group **groups;
 	int (*uevent)(struct device *dev, struct kobj_uevent_env *env);
+	char *(*nodename)(struct device *dev);
 	void (*release)(struct device *dev);
 
 	struct dev_pm_ops *pm;
@@ -486,6 +490,7 @@
 extern int device_rename(struct device *dev, char *new_name);
 extern int device_move(struct device *dev, struct device *new_parent,
 		       enum dpm_order dpm_order);
+extern const char *device_get_nodename(struct device *dev, const char **tmp);
 
 /*
  * Root device objects for grouping under /sys/devices
diff --git a/include/linux/eisa.h b/include/linux/eisa.h
index e61c0be..6925249 100644
--- a/include/linux/eisa.h
+++ b/include/linux/eisa.h
@@ -78,12 +78,12 @@
 /* Mimics pci.h... */
 static inline void *eisa_get_drvdata (struct eisa_device *edev)
 {
-        return edev->dev.driver_data;
+        return dev_get_drvdata(&edev->dev);
 }
 
 static inline void eisa_set_drvdata (struct eisa_device *edev, void *data)
 {
-        edev->dev.driver_data = data;
+        dev_set_drvdata(&edev->dev, data);
 }
 
 /* The EISA root device. There's rumours about machines with multiple
diff --git a/include/linux/firewire.h b/include/linux/firewire.h
new file mode 100644
index 0000000..e584b72
--- /dev/null
+++ b/include/linux/firewire.h
@@ -0,0 +1,358 @@
+#ifndef _LINUX_FIREWIRE_H
+#define _LINUX_FIREWIRE_H
+
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/sysfs.h>
+#include <linux/timer.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include <asm/atomic.h>
+#include <asm/byteorder.h>
+
+#define fw_notify(s, args...) printk(KERN_NOTICE KBUILD_MODNAME ": " s, ## args)
+#define fw_error(s, args...) printk(KERN_ERR KBUILD_MODNAME ": " s, ## args)
+
+static inline void fw_memcpy_from_be32(void *_dst, void *_src, size_t size)
+{
+	u32    *dst = _dst;
+	__be32 *src = _src;
+	int i;
+
+	for (i = 0; i < size / 4; i++)
+		dst[i] = be32_to_cpu(src[i]);
+}
+
+static inline void fw_memcpy_to_be32(void *_dst, void *_src, size_t size)
+{
+	fw_memcpy_from_be32(_dst, _src, size);
+}
+#define CSR_REGISTER_BASE		0xfffff0000000ULL
+
+/* register offsets are relative to CSR_REGISTER_BASE */
+#define CSR_STATE_CLEAR			0x0
+#define CSR_STATE_SET			0x4
+#define CSR_NODE_IDS			0x8
+#define CSR_RESET_START			0xc
+#define CSR_SPLIT_TIMEOUT_HI		0x18
+#define CSR_SPLIT_TIMEOUT_LO		0x1c
+#define CSR_CYCLE_TIME			0x200
+#define CSR_BUS_TIME			0x204
+#define CSR_BUSY_TIMEOUT		0x210
+#define CSR_BUS_MANAGER_ID		0x21c
+#define CSR_BANDWIDTH_AVAILABLE		0x220
+#define CSR_CHANNELS_AVAILABLE		0x224
+#define CSR_CHANNELS_AVAILABLE_HI	0x224
+#define CSR_CHANNELS_AVAILABLE_LO	0x228
+#define CSR_BROADCAST_CHANNEL		0x234
+#define CSR_CONFIG_ROM			0x400
+#define CSR_CONFIG_ROM_END		0x800
+#define CSR_FCP_COMMAND			0xB00
+#define CSR_FCP_RESPONSE		0xD00
+#define CSR_FCP_END			0xF00
+#define CSR_TOPOLOGY_MAP		0x1000
+#define CSR_TOPOLOGY_MAP_END		0x1400
+#define CSR_SPEED_MAP			0x2000
+#define CSR_SPEED_MAP_END		0x3000
+
+#define CSR_OFFSET		0x40
+#define CSR_LEAF		0x80
+#define CSR_DIRECTORY		0xc0
+
+#define CSR_DESCRIPTOR		0x01
+#define CSR_VENDOR		0x03
+#define CSR_HARDWARE_VERSION	0x04
+#define CSR_NODE_CAPABILITIES	0x0c
+#define CSR_UNIT		0x11
+#define CSR_SPECIFIER_ID	0x12
+#define CSR_VERSION		0x13
+#define CSR_DEPENDENT_INFO	0x14
+#define CSR_MODEL		0x17
+#define CSR_INSTANCE		0x18
+#define CSR_DIRECTORY_ID	0x20
+
+struct fw_csr_iterator {
+	u32 *p;
+	u32 *end;
+};
+
+void fw_csr_iterator_init(struct fw_csr_iterator *ci, u32 *p);
+int fw_csr_iterator_next(struct fw_csr_iterator *ci, int *key, int *value);
+
+extern struct bus_type fw_bus_type;
+
+struct fw_card_driver;
+struct fw_node;
+
+struct fw_card {
+	const struct fw_card_driver *driver;
+	struct device *device;
+	struct kref kref;
+	struct completion done;
+
+	int node_id;
+	int generation;
+	int current_tlabel;
+	u64 tlabel_mask;
+	struct list_head transaction_list;
+	struct timer_list flush_timer;
+	unsigned long reset_jiffies;
+
+	unsigned long long guid;
+	unsigned max_receive;
+	int link_speed;
+	int config_rom_generation;
+
+	spinlock_t lock; /* Take this lock when handling the lists in
+			  * this struct. */
+	struct fw_node *local_node;
+	struct fw_node *root_node;
+	struct fw_node *irm_node;
+	u8 color; /* must be u8 to match the definition in struct fw_node */
+	int gap_count;
+	bool beta_repeaters_present;
+
+	int index;
+
+	struct list_head link;
+
+	/* Work struct for BM duties. */
+	struct delayed_work work;
+	int bm_retries;
+	int bm_generation;
+
+	bool broadcast_channel_allocated;
+	u32 broadcast_channel;
+	u32 topology_map[(CSR_TOPOLOGY_MAP_END - CSR_TOPOLOGY_MAP) / 4];
+};
+
+static inline struct fw_card *fw_card_get(struct fw_card *card)
+{
+	kref_get(&card->kref);
+
+	return card;
+}
+
+void fw_card_release(struct kref *kref);
+
+static inline void fw_card_put(struct fw_card *card)
+{
+	kref_put(&card->kref, fw_card_release);
+}
+
+struct fw_attribute_group {
+	struct attribute_group *groups[2];
+	struct attribute_group group;
+	struct attribute *attrs[12];
+};
+
+enum fw_device_state {
+	FW_DEVICE_INITIALIZING,
+	FW_DEVICE_RUNNING,
+	FW_DEVICE_GONE,
+	FW_DEVICE_SHUTDOWN,
+};
+
+/*
+ * Note, fw_device.generation always has to be read before fw_device.node_id.
+ * Use SMP memory barriers to ensure this.  Otherwise requests will be sent
+ * to an outdated node_id if the generation was updated in the meantime due
+ * to a bus reset.
+ *
+ * Likewise, fw-core will take care to update .node_id before .generation so
+ * that whenever fw_device.generation is current WRT the actual bus generation,
+ * fw_device.node_id is guaranteed to be current too.
+ *
+ * The same applies to fw_device.card->node_id vs. fw_device.generation.
+ *
+ * fw_device.config_rom and fw_device.config_rom_length may be accessed during
+ * the lifetime of any fw_unit belonging to the fw_device, before device_del()
+ * was called on the last fw_unit.  Alternatively, they may be accessed while
+ * holding fw_device_rwsem.
+ */
+struct fw_device {
+	atomic_t state;
+	struct fw_node *node;
+	int node_id;
+	int generation;
+	unsigned max_speed;
+	struct fw_card *card;
+	struct device device;
+
+	struct mutex client_list_mutex;
+	struct list_head client_list;
+
+	u32 *config_rom;
+	size_t config_rom_length;
+	int config_rom_retries;
+	unsigned is_local:1;
+	unsigned max_rec:4;
+	unsigned cmc:1;
+	unsigned irmc:1;
+	unsigned bc_implemented:2;
+
+	struct delayed_work work;
+	struct fw_attribute_group attribute_group;
+};
+
+static inline struct fw_device *fw_device(struct device *dev)
+{
+	return container_of(dev, struct fw_device, device);
+}
+
+static inline int fw_device_is_shutdown(struct fw_device *device)
+{
+	return atomic_read(&device->state) == FW_DEVICE_SHUTDOWN;
+}
+
+static inline struct fw_device *fw_device_get(struct fw_device *device)
+{
+	get_device(&device->device);
+
+	return device;
+}
+
+static inline void fw_device_put(struct fw_device *device)
+{
+	put_device(&device->device);
+}
+
+int fw_device_enable_phys_dma(struct fw_device *device);
+
+/*
+ * fw_unit.directory must not be accessed after device_del(&fw_unit.device).
+ */
+struct fw_unit {
+	struct device device;
+	u32 *directory;
+	struct fw_attribute_group attribute_group;
+};
+
+static inline struct fw_unit *fw_unit(struct device *dev)
+{
+	return container_of(dev, struct fw_unit, device);
+}
+
+static inline struct fw_unit *fw_unit_get(struct fw_unit *unit)
+{
+	get_device(&unit->device);
+
+	return unit;
+}
+
+static inline void fw_unit_put(struct fw_unit *unit)
+{
+	put_device(&unit->device);
+}
+
+static inline struct fw_device *fw_parent_device(struct fw_unit *unit)
+{
+	return fw_device(unit->device.parent);
+}
+
+struct ieee1394_device_id;
+
+struct fw_driver {
+	struct device_driver driver;
+	/* Called when the parent device sits through a bus reset. */
+	void (*update)(struct fw_unit *unit);
+	const struct ieee1394_device_id *id_table;
+};
+
+struct fw_packet;
+struct fw_request;
+
+typedef void (*fw_packet_callback_t)(struct fw_packet *packet,
+				     struct fw_card *card, int status);
+typedef void (*fw_transaction_callback_t)(struct fw_card *card, int rcode,
+					  void *data, size_t length,
+					  void *callback_data);
+/*
+ * Important note:  The callback must guarantee that either fw_send_response()
+ * or kfree() is called on the @request.
+ */
+typedef void (*fw_address_callback_t)(struct fw_card *card,
+				      struct fw_request *request,
+				      int tcode, int destination, int source,
+				      int generation, int speed,
+				      unsigned long long offset,
+				      void *data, size_t length,
+				      void *callback_data);
+
+struct fw_packet {
+	int speed;
+	int generation;
+	u32 header[4];
+	size_t header_length;
+	void *payload;
+	size_t payload_length;
+	dma_addr_t payload_bus;
+	u32 timestamp;
+
+	/*
+	 * This callback is called when the packet transmission has
+	 * completed; for successful transmission, the status code is
+	 * the ack received from the destination, otherwise it's a
+	 * negative errno: ENOMEM, ESTALE, ETIMEDOUT, ENODEV, EIO.
+	 * The callback can be called from tasklet context and thus
+	 * must never block.
+	 */
+	fw_packet_callback_t callback;
+	int ack;
+	struct list_head link;
+	void *driver_data;
+};
+
+struct fw_transaction {
+	int node_id; /* The generation is implied; it is always the current. */
+	int tlabel;
+	int timestamp;
+	struct list_head link;
+
+	struct fw_packet packet;
+
+	/*
+	 * The data passed to the callback is valid only during the
+	 * callback.
+	 */
+	fw_transaction_callback_t callback;
+	void *callback_data;
+};
+
+struct fw_address_handler {
+	u64 offset;
+	size_t length;
+	fw_address_callback_t address_callback;
+	void *callback_data;
+	struct list_head link;
+};
+
+struct fw_address_region {
+	u64 start;
+	u64 end;
+};
+
+extern const struct fw_address_region fw_high_memory_region;
+
+int fw_core_add_address_handler(struct fw_address_handler *handler,
+				const struct fw_address_region *region);
+void fw_core_remove_address_handler(struct fw_address_handler *handler);
+void fw_send_response(struct fw_card *card,
+		      struct fw_request *request, int rcode);
+void fw_send_request(struct fw_card *card, struct fw_transaction *t,
+		     int tcode, int destination_id, int generation, int speed,
+		     unsigned long long offset, void *payload, size_t length,
+		     fw_transaction_callback_t callback, void *callback_data);
+int fw_cancel_transaction(struct fw_card *card,
+			  struct fw_transaction *transaction);
+int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
+		       int generation, int speed, unsigned long long offset,
+		       void *payload, size_t length);
+
+#endif /* _LINUX_FIREWIRE_H */
diff --git a/include/linux/firmware.h b/include/linux/firmware.h
index c8ecf5b..d315446 100644
--- a/include/linux/firmware.h
+++ b/include/linux/firmware.h
@@ -5,7 +5,6 @@
 #include <linux/types.h>
 #include <linux/compiler.h>
 
-#define FIRMWARE_NAME_MAX 30 
 #define FW_ACTION_NOHOTPLUG 0
 #define FW_ACTION_HOTPLUG 1
 
diff --git a/include/linux/fs.h b/include/linux/fs.h
index f5ae9f1..74a5793 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1919,8 +1919,9 @@
 
 extern struct kmem_cache *names_cachep;
 
-#define __getname()	kmem_cache_alloc(names_cachep, GFP_KERNEL)
-#define __putname(name) kmem_cache_free(names_cachep, (void *)(name))
+#define __getname_gfp(gfp)	kmem_cache_alloc(names_cachep, (gfp))
+#define __getname()		__getname_gfp(GFP_KERNEL)
+#define __putname(name)		kmem_cache_free(names_cachep, (void *)(name))
 #ifndef CONFIG_AUDITSYSCALL
 #define putname(name)   __putname(name)
 #else
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 7cbd38d..45fc320 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -142,7 +142,7 @@
                                          * disks that can't be partitioned. */
 
 	char disk_name[DISK_NAME_LEN];	/* name of major driver */
-
+	char *(*nodename)(struct gendisk *gd);
 	/* Array of pointers to partitions indexed by partno.
 	 * Protected with matching bdev lock but stat and other
 	 * non-critical accesses use RCU.  Always access through
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 412178a..cfdb35d 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -53,7 +53,19 @@
 #define __GFP_THISNODE	((__force gfp_t)0x40000u)/* No fallback, no policies */
 #define __GFP_RECLAIMABLE ((__force gfp_t)0x80000u) /* Page is reclaimable */
 
-#define __GFP_BITS_SHIFT 21	/* Room for 21 __GFP_FOO bits */
+#ifdef CONFIG_KMEMCHECK
+#define __GFP_NOTRACK	((__force gfp_t)0x200000u)  /* Don't track with kmemcheck */
+#else
+#define __GFP_NOTRACK	((__force gfp_t)0)
+#endif
+
+/*
+ * This may seem redundant, but it's a way of annotating false positives vs.
+ * allocations that simply cannot be supported (e.g. page tables).
+ */
+#define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
+
+#define __GFP_BITS_SHIFT 22	/* Room for 22 __GFP_FOO bits */
 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
 
 /* This equals 0, but use constants in case they ever change */
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index c41e812..2721f07 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -472,6 +472,20 @@
 		__tasklet_hi_schedule(t);
 }
 
+extern void __tasklet_hi_schedule_first(struct tasklet_struct *t);
+
+/*
+ * This version avoids touching any other tasklets. Needed for kmemcheck
+ * in order not to take any page faults while enqueueing this tasklet;
+ * consider VERY carefully whether you really need this or
+ * tasklet_hi_schedule()...
+ */
+static inline void tasklet_hi_schedule_first(struct tasklet_struct *t)
+{
+	if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
+		__tasklet_hi_schedule_first(t);
+}
+
 
 static inline void tasklet_disable_nosync(struct tasklet_struct *t)
 {
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 883cd44..c5a71c3 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -97,12 +97,14 @@
 #define	KERN_INFO	"<6>"	/* informational			*/
 #define	KERN_DEBUG	"<7>"	/* debug-level messages			*/
 
+/* Use the default kernel loglevel */
+#define KERN_DEFAULT	"<d>"
 /*
  * Annotation for a "continued" line of log printout (only done after a
  * line that had no enclosing \n). Only to be used by core/arch code
  * during early bootup (a continued line is not SMP-safe otherwise).
  */
-#define	KERN_CONT	""
+#define	KERN_CONT	"<c>"
 
 extern int console_printk[];
 
@@ -406,7 +408,7 @@
  *
  * Use tracing_on/tracing_off when you want to quickly turn on or off
  * tracing. It simply enables or disables the recording of the trace events.
- * This also corresponds to the user space debugfs/tracing/tracing_on
+ * This also corresponds to the user space /sys/kernel/debug/tracing/tracing_on
  * file, which gives a means for the kernel and userspace to interact.
  * Place a tracing_off() in the kernel where you want tracing to end.
  * From user space, examine the trace, and then echo 1 > tracing_on
diff --git a/include/linux/kmemcheck.h b/include/linux/kmemcheck.h
new file mode 100644
index 0000000..47b39b7
--- /dev/null
+++ b/include/linux/kmemcheck.h
@@ -0,0 +1,153 @@
+#ifndef LINUX_KMEMCHECK_H
+#define LINUX_KMEMCHECK_H
+
+#include <linux/mm_types.h>
+#include <linux/types.h>
+
+#ifdef CONFIG_KMEMCHECK
+extern int kmemcheck_enabled;
+
+/* The slab-related functions. */
+void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node);
+void kmemcheck_free_shadow(struct page *page, int order);
+void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
+			  size_t size);
+void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size);
+
+void kmemcheck_pagealloc_alloc(struct page *p, unsigned int order,
+			       gfp_t gfpflags);
+
+void kmemcheck_show_pages(struct page *p, unsigned int n);
+void kmemcheck_hide_pages(struct page *p, unsigned int n);
+
+bool kmemcheck_page_is_tracked(struct page *p);
+
+void kmemcheck_mark_unallocated(void *address, unsigned int n);
+void kmemcheck_mark_uninitialized(void *address, unsigned int n);
+void kmemcheck_mark_initialized(void *address, unsigned int n);
+void kmemcheck_mark_freed(void *address, unsigned int n);
+
+void kmemcheck_mark_unallocated_pages(struct page *p, unsigned int n);
+void kmemcheck_mark_uninitialized_pages(struct page *p, unsigned int n);
+void kmemcheck_mark_initialized_pages(struct page *p, unsigned int n);
+
+int kmemcheck_show_addr(unsigned long address);
+int kmemcheck_hide_addr(unsigned long address);
+
+#else
+#define kmemcheck_enabled 0
+
+static inline void
+kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node)
+{
+}
+
+static inline void
+kmemcheck_free_shadow(struct page *page, int order)
+{
+}
+
+static inline void
+kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
+		     size_t size)
+{
+}
+
+static inline void kmemcheck_slab_free(struct kmem_cache *s, void *object,
+				       size_t size)
+{
+}
+
+static inline void kmemcheck_pagealloc_alloc(struct page *p,
+	unsigned int order, gfp_t gfpflags)
+{
+}
+
+static inline bool kmemcheck_page_is_tracked(struct page *p)
+{
+	return false;
+}
+
+static inline void kmemcheck_mark_unallocated(void *address, unsigned int n)
+{
+}
+
+static inline void kmemcheck_mark_uninitialized(void *address, unsigned int n)
+{
+}
+
+static inline void kmemcheck_mark_initialized(void *address, unsigned int n)
+{
+}
+
+static inline void kmemcheck_mark_freed(void *address, unsigned int n)
+{
+}
+
+static inline void kmemcheck_mark_unallocated_pages(struct page *p,
+						    unsigned int n)
+{
+}
+
+static inline void kmemcheck_mark_uninitialized_pages(struct page *p,
+						      unsigned int n)
+{
+}
+
+static inline void kmemcheck_mark_initialized_pages(struct page *p,
+						    unsigned int n)
+{
+}
+
+#endif /* CONFIG_KMEMCHECK */
+
+/*
+ * Bitfield annotations
+ *
+ * How to use: If you have a struct using bitfields, for example
+ *
+ *     struct a {
+ *             int x:8, y:8;
+ *     };
+ *
+ * then this should be rewritten as
+ *
+ *     struct a {
+ *             kmemcheck_bitfield_begin(flags);
+ *             int x:8, y:8;
+ *             kmemcheck_bitfield_end(flags);
+ *     };
+ *
+ * Now the "flags_begin" and "flags_end" members may be used to refer to the
+ * beginning and end, respectively, of the bitfield (and things like
+ * &x.flags_begin is allowed). As soon as the struct is allocated, the bit-
+ * fields should be annotated:
+ *
+ *     struct a *a = kmalloc(sizeof(struct a), GFP_KERNEL);
+ *     kmemcheck_annotate_bitfield(a, flags);
+ *
+ * Note: We provide the same definitions for both kmemcheck and non-
+ * kmemcheck kernels. This makes it harder to introduce accidental errors. It
+ * is also allowed to pass NULL pointers to kmemcheck_annotate_bitfield().
+ */
+#define kmemcheck_bitfield_begin(name)	\
+	int name##_begin[0];
+
+#define kmemcheck_bitfield_end(name)	\
+	int name##_end[0];
+
+#define kmemcheck_annotate_bitfield(ptr, name)				\
+	do if (ptr) {							\
+		int _n = (long) &((ptr)->name##_end)			\
+			- (long) &((ptr)->name##_begin);		\
+		BUILD_BUG_ON(_n < 0);					\
+									\
+		kmemcheck_mark_initialized(&((ptr)->name##_begin), _n);	\
+	} while (0)
+
+#define kmemcheck_annotate_variable(var)				\
+	do {								\
+		kmemcheck_mark_initialized(&(var), sizeof(var));	\
+	} while (0)							\
+
+#endif /* LINUX_KMEMCHECK_H */
diff --git a/include/linux/mg_disk.h b/include/linux/mg_disk.h
new file mode 100644
index 0000000..e11f4d9
--- /dev/null
+++ b/include/linux/mg_disk.h
@@ -0,0 +1,45 @@
+/*
+ *  include/linux/mg_disk.c
+ *
+ *  Private data for mflash platform driver
+ *
+ * (c) 2008 mGine Co.,LTD
+ * (c) 2008 unsik Kim <donari75@gmail.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ */
+
+#ifndef __MG_DISK_H__
+#define __MG_DISK_H__
+
+/* name for platform device */
+#define MG_DEV_NAME "mg_disk"
+
+/* names of GPIO resource */
+#define MG_RST_PIN	"mg_rst"
+/* except MG_BOOT_DEV, reset-out pin should be assigned */
+#define MG_RSTOUT_PIN	"mg_rstout"
+
+/* device attribution */
+/* use mflash as boot device */
+#define MG_BOOT_DEV		(1 << 0)
+/* use mflash as storage device */
+#define MG_STORAGE_DEV		(1 << 1)
+/* same as MG_STORAGE_DEV, but bootloader already done reset sequence */
+#define MG_STORAGE_DEV_SKIP_RST	(1 << 2)
+
+/* private driver data */
+struct mg_drv_data {
+	/* disk resource */
+	u32 use_polling;
+
+	/* device attribution */
+	u32 dev_attr;
+
+	/* internally used */
+	void *host;
+};
+
+#endif
diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h
index beb6ec9..0521177 100644
--- a/include/linux/miscdevice.h
+++ b/include/linux/miscdevice.h
@@ -41,6 +41,7 @@
 	struct list_head list;
 	struct device *parent;
 	struct device *this_device;
+	const char *devnode;
 };
 
 extern int misc_register(struct miscdevice * misc);
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index f440810..7acc843 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -98,6 +98,14 @@
 #ifdef CONFIG_WANT_PAGE_DEBUG_FLAGS
 	unsigned long debug_flags;	/* Use atomic bitops on this */
 #endif
+
+#ifdef CONFIG_KMEMCHECK
+	/*
+	 * kmemcheck wants to track the status of each byte in a page; this
+	 * is a pointer to such a status block. NULL if not tracked.
+	 */
+	void *shadow;
+#endif
 };
 
 /*
diff --git a/include/linux/nls.h b/include/linux/nls.h
index 52b1a76..d47beef 100644
--- a/include/linux/nls.h
+++ b/include/linux/nls.h
@@ -3,8 +3,23 @@
 
 #include <linux/init.h>
 
-/* unicode character */
-typedef __u16 wchar_t;
+/* Unicode has changed over the years.  Unicode code points no longer
+ * fit into 16 bits; as of Unicode 5 valid code points range from 0
+ * to 0x10ffff (17 planes, where each plane holds 65536 code points).
+ *
+ * The original decision to represent Unicode characters as 16-bit
+ * wchar_t values is now outdated.  But plane 0 still includes the
+ * most commonly used characters, so we will retain it.  The newer
+ * 32-bit unicode_t type can be used when it is necessary to
+ * represent the full Unicode character set.
+ */
+
+/* Plane-0 Unicode character */
+typedef u16 wchar_t;
+#define MAX_WCHAR_T	0xffff
+
+/* Arbitrary Unicode character */
+typedef u32 unicode_t;
 
 struct nls_table {
 	const char *charset;
@@ -21,6 +36,13 @@
 /* this value hold the maximum octet of charset */
 #define NLS_MAX_CHARSET_SIZE 6 /* for UTF-8 */
 
+/* Byte order for UTF-16 strings */
+enum utf16_endian {
+	UTF16_HOST_ENDIAN,
+	UTF16_LITTLE_ENDIAN,
+	UTF16_BIG_ENDIAN
+};
+
 /* nls.c */
 extern int register_nls(struct nls_table *);
 extern int unregister_nls(struct nls_table *);
@@ -28,10 +50,11 @@
 extern void unload_nls(struct nls_table *);
 extern struct nls_table *load_nls_default(void);
 
-extern int utf8_mbtowc(wchar_t *, const __u8 *, int);
-extern int utf8_mbstowcs(wchar_t *, const __u8 *, int);
-extern int utf8_wctomb(__u8 *, wchar_t, int);
-extern int utf8_wcstombs(__u8 *, const wchar_t *, int);
+extern int utf8_to_utf32(const u8 *s, int len, unicode_t *pu);
+extern int utf32_to_utf8(unicode_t u, u8 *s, int maxlen);
+extern int utf8s_to_utf16s(const u8 *s, int len, wchar_t *pwcs);
+extern int utf16s_to_utf8s(const wchar_t *pwcs, int len,
+		enum utf16_endian endian, u8 *s, int maxlen);
 
 static inline unsigned char nls_tolower(struct nls_table *t, unsigned char c)
 {
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 72698d8..8e366bb 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -124,6 +124,14 @@
 #define PCI_UNKNOWN	((pci_power_t __force) 5)
 #define PCI_POWER_ERROR	((pci_power_t __force) -1)
 
+/* Remember to update this when the list above changes! */
+extern const char *pci_power_names[];
+
+static inline const char *pci_power_name(pci_power_t state)
+{
+	return pci_power_names[1 + (int) state];
+}
+
 #define PCI_PM_D2_DELAY	200
 #define PCI_PM_D3_WAIT	10
 #define PCI_PM_BUS_WAIT	50
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index aa01d38..a3b0003 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -104,6 +104,7 @@
 #define PCI_CLASS_SERIAL_USB_UHCI	0x0c0300
 #define PCI_CLASS_SERIAL_USB_OHCI	0x0c0310
 #define PCI_CLASS_SERIAL_USB_EHCI	0x0c0320
+#define PCI_CLASS_SERIAL_USB_XHCI	0x0c0330
 #define PCI_CLASS_SERIAL_FIBER		0x0c04
 #define PCI_CLASS_SERIAL_SMBUS		0x0c05
 
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
index b67bb5d..8dc5123 100644
--- a/include/linux/platform_device.h
+++ b/include/linux/platform_device.h
@@ -36,8 +36,8 @@
 
 extern struct resource *platform_get_resource(struct platform_device *, unsigned int, unsigned int);
 extern int platform_get_irq(struct platform_device *, unsigned int);
-extern struct resource *platform_get_resource_byname(struct platform_device *, unsigned int, char *);
-extern int platform_get_irq_byname(struct platform_device *, char *);
+extern struct resource *platform_get_resource_byname(struct platform_device *, unsigned int, const char *);
+extern int platform_get_irq_byname(struct platform_device *, const char *);
 extern int platform_add_devices(struct platform_device **, int);
 
 extern struct platform_device *platform_device_register_simple(const char *, int id,
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index 8670f15..29f8599 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -1,6 +1,7 @@
 #ifndef _LINUX_RING_BUFFER_H
 #define _LINUX_RING_BUFFER_H
 
+#include <linux/kmemcheck.h>
 #include <linux/mm.h>
 #include <linux/seq_file.h>
 
@@ -11,7 +12,10 @@
  * Don't refer to this struct directly, use functions below.
  */
 struct ring_buffer_event {
+	kmemcheck_bitfield_begin(bitfield);
 	u32		type_len:5, time_delta:27;
+	kmemcheck_bitfield_end(bitfield);
+
 	u32		array[];
 };
 
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 1bc6fae..02042e7 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -674,7 +674,7 @@
 	struct task_group *tg;
 #ifdef CONFIG_SYSFS
 	struct kobject kobj;
-	struct work_struct work;
+	struct delayed_work work;
 #endif
 #endif
 
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index fa51293..63ef24b 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -15,6 +15,7 @@
 #define _LINUX_SKBUFF_H
 
 #include <linux/kernel.h>
+#include <linux/kmemcheck.h>
 #include <linux/compiler.h>
 #include <linux/time.h>
 #include <linux/cache.h>
@@ -343,6 +344,7 @@
 		};
 	};
 	__u32			priority;
+	kmemcheck_bitfield_begin(flags1);
 	__u8			local_df:1,
 				cloned:1,
 				ip_summed:2,
@@ -353,6 +355,7 @@
 				ipvs_property:1,
 				peeked:1,
 				nf_trace:1;
+	kmemcheck_bitfield_end(flags1);
 	__be16			protocol;
 
 	void			(*destructor)(struct sk_buff *skb);
@@ -372,12 +375,16 @@
 	__u16			tc_verd;	/* traffic control verdict */
 #endif
 #endif
+
+	kmemcheck_bitfield_begin(flags2);
 #ifdef CONFIG_IPV6_NDISC_NODETYPE
 	__u8			ndisc_nodetype:2;
 #endif
 #if defined(CONFIG_MAC80211) || defined(CONFIG_MAC80211_MODULE)
 	__u8			do_not_encrypt:1;
 #endif
+	kmemcheck_bitfield_end(flags2);
+
 	/* 0/13/14 bit hole */
 
 #ifdef CONFIG_NET_DMA
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 219b8fb..2da8372 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -64,6 +64,13 @@
 
 #define SLAB_NOLEAKTRACE	0x00800000UL	/* Avoid kmemleak tracing */
 
+/* Don't track use of uninitialized memory */
+#ifdef CONFIG_KMEMCHECK
+# define SLAB_NOTRACK		0x01000000UL
+#else
+# define SLAB_NOTRACK		0x00000000UL
+#endif
+
 /* The following flags affect the page allocator grouping pages by mobility */
 #define SLAB_RECLAIM_ACCOUNT	0x00020000UL		/* Objects are reclaimable */
 #define SLAB_TEMPORARY		SLAB_RECLAIM_ACCOUNT	/* Objects are short-lived */
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index 713f841..850d057 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -16,6 +16,87 @@
 #include <linux/compiler.h>
 #include <linux/kmemtrace.h>
 
+/*
+ * struct kmem_cache
+ *
+ * manages a cache.
+ */
+
+struct kmem_cache {
+/* 1) per-cpu data, touched during every alloc/free */
+	struct array_cache *array[NR_CPUS];
+/* 2) Cache tunables. Protected by cache_chain_mutex */
+	unsigned int batchcount;
+	unsigned int limit;
+	unsigned int shared;
+
+	unsigned int buffer_size;
+	u32 reciprocal_buffer_size;
+/* 3) touched by every alloc & free from the backend */
+
+	unsigned int flags;		/* constant flags */
+	unsigned int num;		/* # of objs per slab */
+
+/* 4) cache_grow/shrink */
+	/* order of pgs per slab (2^n) */
+	unsigned int gfporder;
+
+	/* force GFP flags, e.g. GFP_DMA */
+	gfp_t gfpflags;
+
+	size_t colour;			/* cache colouring range */
+	unsigned int colour_off;	/* colour offset */
+	struct kmem_cache *slabp_cache;
+	unsigned int slab_size;
+	unsigned int dflags;		/* dynamic flags */
+
+	/* constructor func */
+	void (*ctor)(void *obj);
+
+/* 5) cache creation/removal */
+	const char *name;
+	struct list_head next;
+
+/* 6) statistics */
+#ifdef CONFIG_DEBUG_SLAB
+	unsigned long num_active;
+	unsigned long num_allocations;
+	unsigned long high_mark;
+	unsigned long grown;
+	unsigned long reaped;
+	unsigned long errors;
+	unsigned long max_freeable;
+	unsigned long node_allocs;
+	unsigned long node_frees;
+	unsigned long node_overflow;
+	atomic_t allochit;
+	atomic_t allocmiss;
+	atomic_t freehit;
+	atomic_t freemiss;
+
+	/*
+	 * If debugging is enabled, then the allocator can add additional
+	 * fields and/or padding to every object. buffer_size contains the total
+	 * object size including these internal fields, the following two
+	 * variables contain the offset to the user object and its size.
+	 */
+	int obj_offset;
+	int obj_size;
+#endif /* CONFIG_DEBUG_SLAB */
+
+	/*
+	 * We put nodelists[] at the end of kmem_cache, because we want to size
+	 * this array to nr_node_ids slots instead of MAX_NUMNODES
+	 * (see kmem_cache_init())
+	 * We still use [MAX_NUMNODES] and not [1] or [0] because cache_cache
+	 * is statically defined, so we reserve the max number of nodes.
+	 */
+	struct kmem_list3 *nodelists[MAX_NUMNODES];
+	/*
+	 * Do not add fields after nodelists[]
+	 */
+};
+
 /* Size description struct for general caches. */
 struct cache_sizes {
 	size_t		 	cs_size;
diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h
index 1a8cecc..51efbef 100644
--- a/include/linux/stacktrace.h
+++ b/include/linux/stacktrace.h
@@ -4,6 +4,8 @@
 struct task_struct;
 
 #ifdef CONFIG_STACKTRACE
+struct task_struct;
+
 struct stack_trace {
 	unsigned int nr_entries, max_entries;
 	unsigned long *entries;
@@ -11,6 +13,7 @@
 };
 
 extern void save_stack_trace(struct stack_trace *trace);
+extern void save_stack_trace_bp(struct stack_trace *trace, unsigned long bp);
 extern void save_stack_trace_tsk(struct task_struct *tsk,
 				struct stack_trace *trace);
 
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index 14df7e6..b9dc4ca 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -198,7 +198,7 @@
  *	* This is how the trace record is structured and will
  *	* be saved into the ring buffer. These are the fields
  *	* that will be exposed to user-space in
- *	* /debug/tracing/events/<*>/format.
+ *	* /sys/kernel/debug/tracing/events/<*>/format.
  *	*
  *	* The declared 'local variable' is called '__entry'
  *	*
@@ -258,7 +258,7 @@
  * tracepoint callback (this is used by programmatic plugins and
  * can also by used by generic instrumentation like SystemTap), and
  * it is also used to expose a structured trace record in
- * /debug/tracing/events/.
+ * /sys/kernel/debug/tracing/events/.
  */
 
 #define TRACE_EVENT(name, proto, args, struct, assign, print)	\
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 3aa2cd1..84929e9 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -36,6 +36,7 @@
  *  - configs have one (often) or more interfaces;
  *  - interfaces have one (usually) or more settings;
  *  - each interface setting has zero or (usually) more endpoints.
+ *  - a SuperSpeed endpoint has a companion descriptor
  *
  * And there might be other descriptors mixed in with those.
  *
@@ -44,6 +45,19 @@
 
 struct ep_device;
 
+/* For SS devices */
+/**
+ * struct usb_host_ss_ep_comp - Valid for SuperSpeed devices only
+ * @desc: endpoint companion descriptor, wMaxPacketSize in native byteorder
+ * @extra: descriptors following this endpoint companion descriptor
+ * @extralen: how many bytes of "extra" are valid
+ */
+struct usb_host_ss_ep_comp {
+	struct usb_ss_ep_comp_descriptor	desc;
+	unsigned char				*extra;   /* Extra descriptors */
+	int					extralen;
+};
+
 /**
  * struct usb_host_endpoint - host-side endpoint descriptor and queue
  * @desc: descriptor for this endpoint, wMaxPacketSize in native byteorder
@@ -51,6 +65,7 @@
  * @hcpriv: for use by HCD; typically holds hardware dma queue head (QH)
  *	with one or more transfer descriptors (TDs) per urb
  * @ep_dev: ep_device for sysfs info
+ * @ss_ep_comp: companion descriptor information for this endpoint
  * @extra: descriptors following this endpoint in the configuration
  * @extralen: how many bytes of "extra" are valid
  * @enabled: URBs may be submitted to this endpoint
@@ -63,6 +78,7 @@
 	struct list_head		urb_list;
 	void				*hcpriv;
 	struct ep_device 		*ep_dev;	/* For sysfs info */
+	struct usb_host_ss_ep_comp	*ss_ep_comp;	/* For SS devices */
 
 	unsigned char *extra;   /* Extra descriptors */
 	int extralen;
@@ -336,7 +352,6 @@
 #ifdef CONFIG_USB_DEVICEFS
 	struct dentry *usbfs_dentry;	/* usbfs dentry entry for the bus */
 #endif
-	struct device *dev;		/* device for this bus */
 
 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
 	struct mon_bus *mon_bus;	/* non-null when associated */
@@ -363,6 +378,7 @@
  * struct usb_device - kernel's representation of a USB device
  * @devnum: device number; address on a USB bus
  * @devpath: device ID string for use in messages (e.g., /port/...)
+ * @route: tree topology hex string for use with xHCI
  * @state: device state: configured, not attached, etc.
  * @speed: device speed: high/full/low (or error)
  * @tt: Transaction Translator info; used with low/full speed dev, highspeed hub
@@ -420,6 +436,7 @@
  * @skip_sys_resume: skip the next system resume
  * @wusb_dev: if this is a Wireless USB device, link to the WUSB
  *	specific data for the device.
+ * @slot_id: Slot ID assigned by xHCI
  *
  * Notes:
  * Usbcore drivers should not set usbdev->state directly.  Instead use
@@ -428,6 +445,7 @@
 struct usb_device {
 	int		devnum;
 	char		devpath [16];
+	u32		route;
 	enum usb_device_state	state;
 	enum usb_device_speed	speed;
 
@@ -503,6 +521,7 @@
 	unsigned skip_sys_resume:1;
 #endif
 	struct wusb_dev *wusb_dev;
+	int slot_id;
 };
 #define	to_usb_device(d) container_of(d, struct usb_device, dev)
 
@@ -869,6 +888,8 @@
  * struct usb_device_driver - identifies USB device driver to usbcore
  * @name: The driver name should be unique among USB drivers,
  *	and should normally be the same as the module name.
+ * @nodename: Callback to provide a naming hint for a possible
+ *	device node to create.
  * @probe: Called to see if the driver is willing to manage a particular
  *	device.  If it is, probe returns zero and uses dev_set_drvdata()
  *	to associate driver-specific data with the device.  If unwilling
@@ -912,6 +933,7 @@
  */
 struct usb_class_driver {
 	char *name;
+	char *(*nodename)(struct device *dev);
 	const struct file_operations *fops;
 	int minor_base;
 };
@@ -1041,7 +1063,9 @@
  * @setup_dma: For control transfers with URB_NO_SETUP_DMA_MAP set, the
  *	device driver has provided this DMA address for the setup packet.
  *	The host controller driver should use this in preference to
- *	setup_packet.
+ *	setup_packet, but the HCD may chose to ignore the address if it must
+ *	copy the setup packet into internal structures.  Therefore, setup_packet
+ *	must always point to a valid buffer.
  * @start_frame: Returns the initial frame for isochronous transfers.
  * @number_of_packets: Lists the number of ISO transfer buffers.
  * @interval: Specifies the polling interval for interrupt or isochronous
@@ -1177,6 +1201,8 @@
 	unsigned int transfer_flags;	/* (in) URB_SHORT_NOT_OK | ...*/
 	void *transfer_buffer;		/* (in) associated data buffer */
 	dma_addr_t transfer_dma;	/* (in) dma addr for transfer_buffer */
+	struct usb_sg_request *sg;	/* (in) scatter gather buffer list */
+	int num_sgs;			/* (in) number of entries in the sg list */
 	u32 transfer_buffer_length;	/* (in) data buffer length */
 	u32 actual_length;		/* (return) actual transfer length */
 	unsigned char *setup_packet;	/* (in) setup packet (control only) */
@@ -1422,8 +1448,8 @@
 	int			status;
 	size_t			bytes;
 
-	/*
-	 * members below are private: to usbcore,
+	/* private:
+	 * members below are private to usbcore,
 	 * and are not provided for driver access!
 	 */
 	spinlock_t		lock;
@@ -1558,6 +1584,9 @@
 #define err(format, arg...) printk(KERN_ERR KBUILD_MODNAME ": " \
 	format "\n" , ## arg)
 
+/* debugfs stuff */
+extern struct dentry *usb_debug_root;
+
 #endif  /* __KERNEL__ */
 
 #endif
diff --git a/include/linux/usb/audio.h b/include/linux/usb/audio.h
index 8cb025f..b5744bc 100644
--- a/include/linux/usb/audio.h
+++ b/include/linux/usb/audio.h
@@ -24,10 +24,75 @@
 #define USB_SUBCLASS_AUDIOCONTROL	0x01
 #define USB_SUBCLASS_AUDIOSTREAMING	0x02
 #define USB_SUBCLASS_MIDISTREAMING	0x03
+#define USB_SUBCLASS_VENDOR_SPEC	0xff
 
+/* A.5 Audio Class-Specific AC interface Descriptor Subtypes*/
+#define HEADER				0x01
+#define INPUT_TERMINAL			0x02
+#define OUTPUT_TERMINAL			0x03
+#define MIXER_UNIT			0x04
+#define SELECTOR_UNIT			0x05
+#define FEATURE_UNIT			0x06
+#define PROCESSING_UNIT			0x07
+#define EXTENSION_UNIT			0x08
+
+#define AS_GENERAL			0x01
+#define FORMAT_TYPE			0x02
+#define FORMAT_SPECIFIC			0x03
+
+#define EP_GENERAL			0x01
+
+#define MS_GENERAL			0x01
+#define MIDI_IN_JACK			0x02
+#define MIDI_OUT_JACK			0x03
+
+/* endpoint attributes */
+#define EP_ATTR_MASK			0x0c
+#define EP_ATTR_ASYNC			0x04
+#define EP_ATTR_ADAPTIVE		0x08
+#define EP_ATTR_SYNC			0x0c
+
+/* cs endpoint attributes */
+#define EP_CS_ATTR_SAMPLE_RATE		0x01
+#define EP_CS_ATTR_PITCH_CONTROL	0x02
+#define EP_CS_ATTR_FILL_MAX		0x80
+
+/* Audio Class specific Request Codes */
+#define USB_AUDIO_SET_INTF		0x21
+#define USB_AUDIO_SET_ENDPOINT		0x22
+#define USB_AUDIO_GET_INTF		0xa1
+#define USB_AUDIO_GET_ENDPOINT		0xa2
+
+#define SET_	0x00
+#define GET_	0x80
+
+#define _CUR	0x1
+#define _MIN	0x2
+#define _MAX	0x3
+#define _RES	0x4
+#define _MEM	0x5
+
+#define SET_CUR		(SET_ | _CUR)
+#define GET_CUR		(GET_ | _CUR)
+#define SET_MIN		(SET_ | _MIN)
+#define GET_MIN		(GET_ | _MIN)
+#define SET_MAX		(SET_ | _MAX)
+#define GET_MAX		(GET_ | _MAX)
+#define SET_RES		(SET_ | _RES)
+#define GET_RES		(GET_ | _RES)
+#define SET_MEM		(SET_ | _MEM)
+#define GET_MEM		(GET_ | _MEM)
+
+#define GET_STAT	0xff
+
+#define USB_AC_TERMINAL_UNDEFINED	0x100
+#define USB_AC_TERMINAL_STREAMING	0x101
+#define USB_AC_TERMINAL_VENDOR_SPEC	0x1FF
+
+/* Terminal Control Selectors */
 /* 4.3.2  Class-Specific AC Interface Descriptor */
 struct usb_ac_header_descriptor {
-	__u8  bLength;			/* 8+n */
+	__u8  bLength;			/* 8 + n */
 	__u8  bDescriptorType;		/* USB_DT_CS_INTERFACE */
 	__u8  bDescriptorSubtype;	/* USB_MS_HEADER */
 	__le16 bcdADC;			/* 0x0100 */
@@ -36,7 +101,7 @@
 	__u8  baInterfaceNr[];		/* [n] */
 } __attribute__ ((packed));
 
-#define USB_DT_AC_HEADER_SIZE(n)	(8+(n))
+#define USB_DT_AC_HEADER_SIZE(n)	(8 + (n))
 
 /* As above, but more useful for defining your own descriptors: */
 #define DECLARE_USB_AC_HEADER_DESCRIPTOR(n) 			\
@@ -50,4 +115,200 @@
 	__u8  baInterfaceNr[n];					\
 } __attribute__ ((packed))
 
+/* 4.3.2.1 Input Terminal Descriptor */
+struct usb_input_terminal_descriptor {
+	__u8  bLength;			/* in bytes: 12 */
+	__u8  bDescriptorType;		/* CS_INTERFACE descriptor type */
+	__u8  bDescriptorSubtype;	/* INPUT_TERMINAL descriptor subtype */
+	__u8  bTerminalID;		/* Constant uniquely terminal ID */
+	__le16 wTerminalType;		/* USB Audio Terminal Types */
+	__u8  bAssocTerminal;		/* ID of the Output Terminal associated */
+	__u8  bNrChannels;		/* Number of logical output channels */
+	__le16 wChannelConfig;
+	__u8  iChannelNames;
+	__u8  iTerminal;
+} __attribute__ ((packed));
+
+#define USB_DT_AC_INPUT_TERMINAL_SIZE			12
+
+#define USB_AC_INPUT_TERMINAL_UNDEFINED			0x200
+#define USB_AC_INPUT_TERMINAL_MICROPHONE		0x201
+#define USB_AC_INPUT_TERMINAL_DESKTOP_MICROPHONE	0x202
+#define USB_AC_INPUT_TERMINAL_PERSONAL_MICROPHONE	0x203
+#define USB_AC_INPUT_TERMINAL_OMNI_DIR_MICROPHONE	0x204
+#define USB_AC_INPUT_TERMINAL_MICROPHONE_ARRAY		0x205
+#define USB_AC_INPUT_TERMINAL_PROC_MICROPHONE_ARRAY	0x206
+
+/* 4.3.2.2 Output Terminal Descriptor */
+struct usb_output_terminal_descriptor {
+	__u8  bLength;			/* in bytes: 9 */
+	__u8  bDescriptorType;		/* CS_INTERFACE descriptor type */
+	__u8  bDescriptorSubtype;	/* OUTPUT_TERMINAL descriptor subtype */
+	__u8  bTerminalID;		/* Constant uniquely terminal ID */
+	__le16 wTerminalType;		/* USB Audio Terminal Types */
+	__u8  bAssocTerminal;		/* ID of the Input Terminal associated */
+	__u8  bSourceID;		/* ID of the connected Unit or Terminal*/
+	__u8  iTerminal;
+} __attribute__ ((packed));
+
+#define USB_DT_AC_OUTPUT_TERMINAL_SIZE				9
+
+#define USB_AC_OUTPUT_TERMINAL_UNDEFINED			0x300
+#define USB_AC_OUTPUT_TERMINAL_SPEAKER				0x301
+#define USB_AC_OUTPUT_TERMINAL_HEADPHONES			0x302
+#define USB_AC_OUTPUT_TERMINAL_HEAD_MOUNTED_DISPLAY_AUDIO	0x303
+#define USB_AC_OUTPUT_TERMINAL_DESKTOP_SPEAKER			0x304
+#define USB_AC_OUTPUT_TERMINAL_ROOM_SPEAKER			0x305
+#define USB_AC_OUTPUT_TERMINAL_COMMUNICATION_SPEAKER		0x306
+#define USB_AC_OUTPUT_TERMINAL_LOW_FREQ_EFFECTS_SPEAKER		0x307
+
+/* Set bControlSize = 2 as default setting */
+#define USB_DT_AC_FEATURE_UNIT_SIZE(ch)		(7 + ((ch) + 1) * 2)
+
+/* As above, but more useful for defining your own descriptors: */
+#define DECLARE_USB_AC_FEATURE_UNIT_DESCRIPTOR(ch) 		\
+struct usb_ac_feature_unit_descriptor_##ch {			\
+	__u8  bLength;						\
+	__u8  bDescriptorType;					\
+	__u8  bDescriptorSubtype;				\
+	__u8  bUnitID;						\
+	__u8  bSourceID;					\
+	__u8  bControlSize;					\
+	__le16 bmaControls[ch + 1];				\
+	__u8  iFeature;						\
+} __attribute__ ((packed))
+
+/* 4.5.2 Class-Specific AS Interface Descriptor */
+struct usb_as_header_descriptor {
+	__u8  bLength;			/* in bytes: 7 */
+	__u8  bDescriptorType;		/* USB_DT_CS_INTERFACE */
+	__u8  bDescriptorSubtype;	/* AS_GENERAL */
+	__u8  bTerminalLink;		/* Terminal ID of connected Terminal */
+	__u8  bDelay;			/* Delay introduced by the data path */
+	__le16 wFormatTag;		/* The Audio Data Format */
+} __attribute__ ((packed));
+
+#define USB_DT_AS_HEADER_SIZE		7
+
+#define USB_AS_AUDIO_FORMAT_TYPE_I_UNDEFINED	0x0
+#define USB_AS_AUDIO_FORMAT_TYPE_I_PCM		0x1
+#define USB_AS_AUDIO_FORMAT_TYPE_I_PCM8		0x2
+#define USB_AS_AUDIO_FORMAT_TYPE_I_IEEE_FLOAT	0x3
+#define USB_AS_AUDIO_FORMAT_TYPE_I_ALAW		0x4
+#define USB_AS_AUDIO_FORMAT_TYPE_I_MULAW	0x5
+
+struct usb_as_format_type_i_continuous_descriptor {
+	__u8  bLength;			/* in bytes: 8 + (ns * 3) */
+	__u8  bDescriptorType;		/* USB_DT_CS_INTERFACE */
+	__u8  bDescriptorSubtype;	/* FORMAT_TYPE */
+	__u8  bFormatType;		/* FORMAT_TYPE_1 */
+	__u8  bNrChannels;		/* physical channels in the stream */
+	__u8  bSubframeSize;		/* */
+	__u8  bBitResolution;
+	__u8  bSamFreqType;
+	__u8  tLowerSamFreq[3];
+	__u8  tUpperSamFreq[3];
+} __attribute__ ((packed));
+
+#define USB_AS_FORMAT_TYPE_I_CONTINUOUS_DESC_SIZE	14
+
+struct usb_as_formate_type_i_discrete_descriptor {
+	__u8  bLength;			/* in bytes: 8 + (ns * 3) */
+	__u8  bDescriptorType;		/* USB_DT_CS_INTERFACE */
+	__u8  bDescriptorSubtype;	/* FORMAT_TYPE */
+	__u8  bFormatType;		/* FORMAT_TYPE_1 */
+	__u8  bNrChannels;		/* physical channels in the stream */
+	__u8  bSubframeSize;		/* */
+	__u8  bBitResolution;
+	__u8  bSamFreqType;
+	__u8  tSamFreq[][3];
+} __attribute__ ((packed));
+
+#define DECLARE_USB_AS_FORMAT_TYPE_I_DISCRETE_DESC(n) 		\
+struct usb_as_formate_type_i_discrete_descriptor_##n {		\
+	__u8  bLength;						\
+	__u8  bDescriptorType;					\
+	__u8  bDescriptorSubtype;				\
+	__u8  bFormatType;					\
+	__u8  bNrChannels;					\
+	__u8  bSubframeSize;					\
+	__u8  bBitResolution;					\
+	__u8  bSamFreqType;					\
+	__u8  tSamFreq[n][3];					\
+} __attribute__ ((packed))
+
+#define USB_AS_FORMAT_TYPE_I_DISCRETE_DESC_SIZE(n)	(8 + (n * 3))
+
+#define USB_AS_FORMAT_TYPE_UNDEFINED	0x0
+#define USB_AS_FORMAT_TYPE_I		0x1
+#define USB_AS_FORMAT_TYPE_II		0x2
+#define USB_AS_FORMAT_TYPE_III		0x3
+
+#define USB_AS_ENDPOINT_ASYNC		(1 << 2)
+#define USB_AS_ENDPOINT_ADAPTIVE	(2 << 2)
+#define USB_AS_ENDPOINT_SYNC		(3 << 2)
+
+struct usb_as_iso_endpoint_descriptor {
+	__u8  bLength;			/* in bytes: 7 */
+	__u8  bDescriptorType;		/* USB_DT_CS_ENDPOINT */
+	__u8  bDescriptorSubtype;	/* EP_GENERAL */
+	__u8  bmAttributes;
+	__u8  bLockDelayUnits;
+	__le16 wLockDelay;
+};
+#define USB_AS_ISO_ENDPOINT_DESC_SIZE	7
+
+#define FU_CONTROL_UNDEFINED		0x00
+#define MUTE_CONTROL			0x01
+#define VOLUME_CONTROL			0x02
+#define BASS_CONTROL			0x03
+#define MID_CONTROL			0x04
+#define TREBLE_CONTROL			0x05
+#define GRAPHIC_EQUALIZER_CONTROL	0x06
+#define AUTOMATIC_GAIN_CONTROL		0x07
+#define DELAY_CONTROL			0x08
+#define BASS_BOOST_CONTROL		0x09
+#define LOUDNESS_CONTROL		0x0a
+
+#define FU_MUTE		(1 << (MUTE_CONTROL - 1))
+#define FU_VOLUME	(1 << (VOLUME_CONTROL - 1))
+#define FU_BASS		(1 << (BASS_CONTROL - 1))
+#define FU_MID		(1 << (MID_CONTROL - 1))
+#define FU_TREBLE	(1 << (TREBLE_CONTROL - 1))
+#define FU_GRAPHIC_EQ	(1 << (GRAPHIC_EQUALIZER_CONTROL - 1))
+#define FU_AUTO_GAIN	(1 << (AUTOMATIC_GAIN_CONTROL - 1))
+#define FU_DELAY	(1 << (DELAY_CONTROL - 1))
+#define FU_BASS_BOOST	(1 << (BASS_BOOST_CONTROL - 1))
+#define FU_LOUDNESS	(1 << (LOUDNESS_CONTROL - 1))
+
+struct usb_audio_control {
+	struct list_head list;
+	const char *name;
+	u8 type;
+	int data[5];
+	int (*set)(struct usb_audio_control *con, u8 cmd, int value);
+	int (*get)(struct usb_audio_control *con, u8 cmd);
+};
+
+static inline int generic_set_cmd(struct usb_audio_control *con, u8 cmd, int value)
+{
+	con->data[cmd] = value;
+
+	return 0;
+}
+
+static inline int generic_get_cmd(struct usb_audio_control *con, u8 cmd)
+{
+	return con->data[cmd];
+}
+
+struct usb_audio_control_selector {
+	struct list_head list;
+	struct list_head control;
+	u8 id;
+	const char *name;
+	u8 type;
+	struct usb_descriptor_header *desc;
+};
+
 #endif /* __LINUX_USB_AUDIO_H */
diff --git a/include/linux/usb/ch9.h b/include/linux/usb/ch9.h
index b145119..93223638 100644
--- a/include/linux/usb/ch9.h
+++ b/include/linux/usb/ch9.h
@@ -191,6 +191,8 @@
 #define USB_DT_WIRE_ADAPTER		0x21
 #define USB_DT_RPIPE			0x22
 #define USB_DT_CS_RADIO_CONTROL		0x23
+/* From the USB 3.0 spec */
+#define	USB_DT_SS_ENDPOINT_COMP		0x30
 
 /* Conventional codes for class-specific descriptors.  The convention is
  * defined in the USB "Common Class" Spec (3.11).  Individual class specs
@@ -535,6 +537,20 @@
 
 /*-------------------------------------------------------------------------*/
 
+/* USB_DT_SS_ENDPOINT_COMP: SuperSpeed Endpoint Companion descriptor */
+struct usb_ss_ep_comp_descriptor {
+	__u8  bLength;
+	__u8  bDescriptorType;
+
+	__u8  bMaxBurst;
+	__u8  bmAttributes;
+	__u16 wBytesPerInterval;
+} __attribute__ ((packed));
+
+#define USB_DT_SS_EP_COMP_SIZE		6
+
+/*-------------------------------------------------------------------------*/
+
 /* USB_DT_DEVICE_QUALIFIER: Device Qualifier descriptor */
 struct usb_qualifier_descriptor {
 	__u8  bLength;
@@ -752,6 +768,7 @@
 	USB_SPEED_LOW, USB_SPEED_FULL,		/* usb 1.1 */
 	USB_SPEED_HIGH,				/* usb 2.0 */
 	USB_SPEED_VARIABLE,			/* wireless (usb 2.5) */
+	USB_SPEED_SUPER,			/* usb 3.0 */
 };
 
 enum usb_device_state {
diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h
index acd7b0f..4f6bb3d 100644
--- a/include/linux/usb/composite.h
+++ b/include/linux/usb/composite.h
@@ -124,6 +124,7 @@
 	void			(*suspend)(struct usb_function *);
 	void			(*resume)(struct usb_function *);
 
+	/* private: */
 	/* internals */
 	struct list_head		list;
 };
@@ -219,6 +220,7 @@
 
 	struct usb_composite_dev	*cdev;
 
+	/* private: */
 	/* internals */
 	struct list_head	list;
 	struct list_head	functions;
@@ -321,6 +323,7 @@
 
 	struct usb_configuration	*config;
 
+	/* private: */
 	/* internals */
 	struct usb_device_descriptor	desc;
 	struct list_head		configs;
diff --git a/include/linux/usb/langwell_otg.h b/include/linux/usb/langwell_otg.h
new file mode 100644
index 0000000..e115ae6
--- /dev/null
+++ b/include/linux/usb/langwell_otg.h
@@ -0,0 +1,177 @@
+/*
+ * Intel Langwell USB OTG transceiver driver
+ * Copyright (C) 2008, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#ifndef __LANGWELL_OTG_H__
+#define __LANGWELL_OTG_H__
+
+/* notify transceiver driver about OTG events */
+extern void langwell_update_transceiver(void);
+/* HCD register bus driver */
+extern int langwell_register_host(struct pci_driver *host_driver);
+/* HCD unregister bus driver */
+extern void langwell_unregister_host(struct pci_driver *host_driver);
+/* DCD register bus driver */
+extern int langwell_register_peripheral(struct pci_driver *client_driver);
+/* DCD unregister bus driver */
+extern void langwell_unregister_peripheral(struct pci_driver *client_driver);
+/* No silent failure, output warning message */
+extern void langwell_otg_nsf_msg(unsigned long message);
+
+#define CI_USBCMD		0x30
+#	define USBCMD_RST		BIT(1)
+#	define USBCMD_RS		BIT(0)
+#define CI_USBSTS		0x34
+#	define USBSTS_SLI		BIT(8)
+#	define USBSTS_URI		BIT(6)
+#	define USBSTS_PCI		BIT(2)
+#define CI_PORTSC1		0x74
+#	define PORTSC_PP		BIT(12)
+#	define PORTSC_LS		(BIT(11) | BIT(10))
+#	define PORTSC_SUSP		BIT(7)
+#	define PORTSC_CCS		BIT(0)
+#define CI_HOSTPC1		0xb4
+#	define HOSTPC1_PHCD		BIT(22)
+#define CI_OTGSC		0xf4
+#	define OTGSC_DPIE		BIT(30)
+#	define OTGSC_1MSE		BIT(29)
+#	define OTGSC_BSEIE		BIT(28)
+#	define OTGSC_BSVIE		BIT(27)
+#	define OTGSC_ASVIE		BIT(26)
+#	define OTGSC_AVVIE		BIT(25)
+#	define OTGSC_IDIE		BIT(24)
+#	define OTGSC_DPIS		BIT(22)
+#	define OTGSC_1MSS		BIT(21)
+#	define OTGSC_BSEIS		BIT(20)
+#	define OTGSC_BSVIS		BIT(19)
+#	define OTGSC_ASVIS		BIT(18)
+#	define OTGSC_AVVIS		BIT(17)
+#	define OTGSC_IDIS		BIT(16)
+#	define OTGSC_DPS		BIT(14)
+#	define OTGSC_1MST		BIT(13)
+#	define OTGSC_BSE		BIT(12)
+#	define OTGSC_BSV		BIT(11)
+#	define OTGSC_ASV		BIT(10)
+#	define OTGSC_AVV		BIT(9)
+#	define OTGSC_ID			BIT(8)
+#	define OTGSC_HABA		BIT(7)
+#	define OTGSC_HADP		BIT(6)
+#	define OTGSC_IDPU		BIT(5)
+#	define OTGSC_DP			BIT(4)
+#	define OTGSC_OT			BIT(3)
+#	define OTGSC_HAAR		BIT(2)
+#	define OTGSC_VC			BIT(1)
+#	define OTGSC_VD			BIT(0)
+#	define OTGSC_INTEN_MASK		(0x7f << 24)
+#	define OTGSC_INTSTS_MASK	(0x7f << 16)
+#define CI_USBMODE		0xf8
+#	define USBMODE_CM		(BIT(1) | BIT(0))
+#	define USBMODE_IDLE		0
+#	define USBMODE_DEVICE		0x2
+#	define USBMODE_HOST		0x3
+
+#define INTR_DUMMY_MASK (USBSTS_SLI | USBSTS_URI | USBSTS_PCI)
+
+struct otg_hsm {
+	/* Input */
+	int a_bus_resume;
+	int a_bus_suspend;
+	int a_conn;
+	int a_sess_vld;
+	int a_srp_det;
+	int a_vbus_vld;
+	int b_bus_resume;
+	int b_bus_suspend;
+	int b_conn;
+	int b_se0_srp;
+	int b_sess_end;
+	int b_sess_vld;
+	int id;
+
+	/* Internal variables */
+	int a_set_b_hnp_en;
+	int b_srp_done;
+	int b_hnp_enable;
+
+	/* Timeout indicator for timers */
+	int a_wait_vrise_tmout;
+	int a_wait_bcon_tmout;
+	int a_aidl_bdis_tmout;
+	int b_ase0_brst_tmout;
+	int b_bus_suspend_tmout;
+	int b_srp_res_tmout;
+
+	/* Informative variables */
+	int a_bus_drop;
+	int a_bus_req;
+	int a_clr_err;
+	int a_suspend_req;
+	int b_bus_req;
+
+	/* Output */
+	int drv_vbus;
+	int loc_conn;
+	int loc_sof;
+
+	/* Others */
+	int b_bus_suspend_vld;
+};
+
+#define TA_WAIT_VRISE	100
+#define TA_WAIT_BCON	30000
+#define TA_AIDL_BDIS	15000
+#define TB_ASE0_BRST	5000
+#define TB_SE0_SRP	2
+#define TB_SRP_RES	100
+#define TB_BUS_SUSPEND	500
+
+struct langwell_otg_timer {
+	unsigned long expires;	/* Number of count increase to timeout */
+	unsigned long count;	/* Tick counter */
+	void (*function)(unsigned long);	/* Timeout function */
+	unsigned long data;	/* Data passed to function */
+	struct list_head list;
+};
+
+struct langwell_otg {
+	struct otg_transceiver 	otg;
+	struct otg_hsm 		hsm;
+	void __iomem 		*regs;
+	unsigned 		region;
+	struct pci_driver	*host_ops;
+	struct pci_driver	*client_ops;
+	struct pci_dev		*pdev;
+	struct work_struct 	work;
+	struct workqueue_struct	*qwork;
+	spinlock_t 		lock;
+	spinlock_t 		wq_lock;
+};
+
+static inline struct langwell_otg *otg_to_langwell(struct otg_transceiver *otg)
+{
+	return container_of(otg, struct langwell_otg, otg);
+}
+
+#ifdef DEBUG
+#define otg_dbg(fmt, args...) \
+	printk(KERN_DEBUG fmt , ## args)
+#else
+#define otg_dbg(fmt, args...) \
+	do { } while (0)
+#endif /* DEBUG */
+#endif /* __LANGWELL_OTG_H__ */
diff --git a/include/linux/usb/langwell_udc.h b/include/linux/usb/langwell_udc.h
new file mode 100644
index 0000000..c949178
--- /dev/null
+++ b/include/linux/usb/langwell_udc.h
@@ -0,0 +1,310 @@
+/*
+ * Intel Langwell USB Device Controller driver
+ * Copyright (C) 2008-2009, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#ifndef __LANGWELL_UDC_H
+#define __LANGWELL_UDC_H
+
+
+/* MACRO defines */
+#define	CAP_REG_OFFSET		0x0
+#define	OP_REG_OFFSET		0x28
+
+#define	DMA_ADDR_INVALID	(~(dma_addr_t)0)
+
+#define	DQH_ALIGNMENT		2048
+#define	DTD_ALIGNMENT		64
+#define	DMA_BOUNDARY		4096
+
+#define	EP0_MAX_PKT_SIZE	64
+#define EP_DIR_IN		1
+#define EP_DIR_OUT		0
+
+#define FLUSH_TIMEOUT		1000
+#define RESET_TIMEOUT		1000
+#define SETUPSTAT_TIMEOUT	100
+#define PRIME_TIMEOUT		100
+
+
+/* device memory space registers */
+
+/* Capability Registers, BAR0 + CAP_REG_OFFSET */
+struct langwell_cap_regs {
+	/* offset: 0x0 */
+	u8	caplength;	/* offset of Operational Register */
+	u8	_reserved3;
+	u16	hciversion;	/* H: BCD encoding of host version */
+	u32	hcsparams;	/* H: host port steering logic capability */
+	u32	hccparams;	/* H: host multiple mode control capability */
+#define	HCC_LEN	BIT(17)		/* Link power management (LPM) capability */
+	u8	_reserved4[0x20-0xc];
+	/* offset: 0x20 */
+	u16	dciversion;	/* BCD encoding of device version */
+	u8	_reserved5[0x24-0x22];
+	u32	dccparams;	/* overall device controller capability */
+#define	HOSTCAP	BIT(8)		/* host capable */
+#define	DEVCAP	BIT(7)		/* device capable */
+#define DEN(d)	\
+	(((d)>>0)&0x1f)		/* bits 4:0, device endpoint number */
+} __attribute__ ((packed));
+
+
+/* Operational Registers, BAR0 + OP_REG_OFFSET */
+struct langwell_op_regs {
+	/* offset: 0x28 */
+	u32	extsts;
+#define	EXTS_TI1	BIT(4)	/* general purpose timer interrupt 1 */
+#define	EXTS_TI1TI0	BIT(3)	/* general purpose timer interrupt 0 */
+#define	EXTS_TI1UPI	BIT(2)	/* USB host periodic interrupt */
+#define	EXTS_TI1UAI	BIT(1)	/* USB host asynchronous interrupt */
+#define	EXTS_TI1NAKI	BIT(0)	/* NAK interrupt */
+	u32	extintr;
+#define	EXTI_TIE1	BIT(4)	/* general purpose timer interrupt enable 1 */
+#define	EXTI_TIE0	BIT(3)	/* general purpose timer interrupt enable 0 */
+#define	EXTI_UPIE	BIT(2)	/* USB host periodic interrupt enable */
+#define	EXTI_UAIE	BIT(1)	/* USB host asynchronous interrupt enable */
+#define	EXTI_NAKE	BIT(0)	/* NAK interrupt enable */
+	/* offset: 0x30 */
+	u32	usbcmd;
+#define	CMD_HIRD(u)	\
+	(((u)>>24)&0xf)		/* bits 27:24, host init resume duration */
+#define	CMD_ITC(u)	\
+	(((u)>>16)&0xff)	/* bits 23:16, interrupt threshold control */
+#define	CMD_PPE		BIT(15)	/* per-port change events enable */
+#define	CMD_ATDTW	BIT(14)	/* add dTD tripwire */
+#define	CMD_SUTW	BIT(13)	/* setup tripwire */
+#define	CMD_ASPE	BIT(11) /* asynchronous schedule park mode enable */
+#define	CMD_FS2		BIT(10)	/* frame list size */
+#define	CMD_ASP1	BIT(9)	/* asynchronous schedule park mode count */
+#define	CMD_ASP0	BIT(8)
+#define	CMD_LR		BIT(7)	/* light host/device controller reset */
+#define	CMD_IAA		BIT(6)	/* interrupt on async advance doorbell */
+#define	CMD_ASE		BIT(5)	/* asynchronous schedule enable */
+#define	CMD_PSE		BIT(4)	/* periodic schedule enable */
+#define	CMD_FS1		BIT(3)
+#define	CMD_FS0		BIT(2)
+#define	CMD_RST		BIT(1)	/* controller reset */
+#define	CMD_RUNSTOP	BIT(0)	/* run/stop */
+	u32	usbsts;
+#define	STS_PPCI(u)	\
+	(((u)>>16)&0xffff)	/* bits 31:16, port-n change detect */
+#define	STS_AS		BIT(15)	/* asynchronous schedule status */
+#define	STS_PS		BIT(14)	/* periodic schedule status */
+#define	STS_RCL		BIT(13)	/* reclamation */
+#define	STS_HCH		BIT(12)	/* HC halted */
+#define	STS_ULPII	BIT(10)	/* ULPI interrupt */
+#define	STS_SLI		BIT(8)	/* DC suspend */
+#define	STS_SRI		BIT(7)	/* SOF received */
+#define	STS_URI		BIT(6)	/* USB reset received */
+#define	STS_AAI		BIT(5)	/* interrupt on async advance */
+#define	STS_SEI		BIT(4)	/* system error */
+#define	STS_FRI		BIT(3)	/* frame list rollover */
+#define	STS_PCI		BIT(2)	/* port change detect */
+#define	STS_UEI		BIT(1)	/* USB error interrupt */
+#define	STS_UI		BIT(0)	/* USB interrupt */
+	u32	usbintr;
+/* bits 31:16, per-port interrupt enable */
+#define	INTR_PPCE(u)	(((u)>>16)&0xffff)
+#define	INTR_ULPIE	BIT(10)	/* ULPI enable */
+#define	INTR_SLE	BIT(8)	/* DC sleep/suspend enable */
+#define	INTR_SRE	BIT(7)	/* SOF received enable */
+#define	INTR_URE	BIT(6)	/* USB reset enable */
+#define	INTR_AAE	BIT(5)	/* interrupt on async advance enable */
+#define	INTR_SEE	BIT(4)	/* system error enable */
+#define	INTR_FRE	BIT(3)	/* frame list rollover enable */
+#define	INTR_PCE	BIT(2)	/* port change detect enable */
+#define	INTR_UEE	BIT(1)	/* USB error interrupt enable */
+#define	INTR_UE		BIT(0)	/* USB interrupt enable */
+	u32	frindex;	/* frame index */
+#define	FRINDEX_MASK	(0x3fff << 0)
+	u32	ctrldssegment;	/* not used */
+	u32	deviceaddr;
+#define USBADR_SHIFT	25
+#define	USBADR(d)	\
+	(((d)>>25)&0x7f)	/* bits 31:25, device address */
+#define USBADR_MASK	(0x7f << 25)
+#define	USBADRA		BIT(24)	/* device address advance */
+	u32	endpointlistaddr;/* endpoint list top memory address */
+/* bits 31:11, endpoint list pointer */
+#define	EPBASE(d)	(((d)>>11)&0x1fffff)
+#define	ENDPOINTLISTADDR_MASK	(0x1fffff << 11)
+	u32	ttctrl;		/* H: TT operatin, not used */
+	/* offset: 0x50 */
+	u32	burstsize;	/* burst size of data movement */
+#define	TXPBURST(b)	\
+	(((b)>>8)&0xff)		/* bits 15:8, TX burst length */
+#define	RXPBURST(b)	\
+	(((b)>>0)&0xff)		/* bits 7:0, RX burst length */
+	u32	txfilltuning;	/* TX tuning */
+	u32	txttfilltuning;	/* H: TX TT tuning */
+	u32	ic_usb;		/* control the IC_USB FS/LS transceiver */
+	/* offset: 0x60 */
+	u32	ulpi_viewport;	/* indirect access to ULPI PHY */
+#define	ULPIWU		BIT(31)	/* ULPI wakeup */
+#define	ULPIRUN		BIT(30)	/* ULPI read/write run */
+#define	ULPIRW		BIT(29)	/* ULPI read/write control */
+#define	ULPISS		BIT(27)	/* ULPI sync state */
+#define	ULPIPORT(u)	\
+	(((u)>>24)&7)		/* bits 26:24, ULPI port number */
+#define	ULPIADDR(u)	\
+	(((u)>>16)&0xff)	/* bits 23:16, ULPI data address */
+#define	ULPIDATRD(u)	\
+	(((u)>>8)&0xff)		/* bits 15:8, ULPI data read */
+#define	ULPIDATWR(u)	\
+	(((u)>>0)&0xff)		/* bits 7:0, ULPI date write */
+	u8	_reserved6[0x70-0x64];
+	/* offset: 0x70 */
+	u32	configflag;	/* H: not used */
+	u32	portsc1;	/* port status */
+#define	DA(p)	\
+	(((p)>>25)&0x7f)	/* bits 31:25, device address */
+#define	PORTS_SSTS	(BIT(24) | BIT(23))	/* suspend status */
+#define	PORTS_WKOC	BIT(22)	/* wake on over-current enable */
+#define	PORTS_WKDS	BIT(21)	/* wake on disconnect enable */
+#define	PORTS_WKCN	BIT(20)	/* wake on connect enable */
+#define	PORTS_PTC(p)	(((p)>>16)&0xf)	/* bits 19:16, port test control */
+#define	PORTS_PIC	(BIT(15) | BIT(14))	/* port indicator control */
+#define	PORTS_PO	BIT(13)	/* port owner */
+#define	PORTS_PP	BIT(12)	/* port power */
+#define	PORTS_LS	(BIT(11) | BIT(10)) 	/* line status */
+#define	PORTS_SLP	BIT(9)	/* suspend using L1 */
+#define	PORTS_PR	BIT(8)	/* port reset */
+#define	PORTS_SUSP	BIT(7)	/* suspend */
+#define	PORTS_FPR	BIT(6)	/* force port resume */
+#define	PORTS_OCC	BIT(5)	/* over-current change */
+#define	PORTS_OCA	BIT(4)	/* over-current active */
+#define	PORTS_PEC	BIT(3)	/* port enable/disable change */
+#define	PORTS_PE	BIT(2)	/* port enable/disable */
+#define	PORTS_CSC	BIT(1)	/* connect status change */
+#define	PORTS_CCS	BIT(0)	/* current connect status */
+	u8	_reserved7[0xb4-0x78];
+	/* offset: 0xb4 */
+	u32	devlc;		/* control LPM and each USB port behavior */
+/* bits 31:29, parallel transceiver select */
+#define	LPM_PTS(d)	(((d)>>29)&7)
+#define	LPM_STS		BIT(28)	/* serial transceiver select */
+#define	LPM_PTW		BIT(27)	/* parallel transceiver width */
+#define	LPM_PSPD(d)	(((d)>>25)&3)	/* bits 26:25, port speed */
+#define LPM_PSPD_MASK	(BIT(26) | BIT(25))
+#define LPM_SPEED_FULL	0
+#define LPM_SPEED_LOW	1
+#define LPM_SPEED_HIGH	2
+#define	LPM_SRT		BIT(24)	/* shorten reset time */
+#define	LPM_PFSC	BIT(23)	/* port force full speed connect */
+#define	LPM_PHCD	BIT(22) /* PHY low power suspend clock disable */
+#define	LPM_STL		BIT(16)	/* STALL reply to LPM token */
+#define	LPM_BA(d)	\
+	(((d)>>1)&0x7ff)	/* bits 11:1, BmAttributes */
+#define	LPM_NYT_ACK	BIT(0)	/* NYET/ACK reply to LPM token */
+	u8	_reserved8[0xf4-0xb8];
+	/* offset: 0xf4 */
+	u32	otgsc;		/* On-The-Go status and control */
+#define	OTGSC_DPIE	BIT(30)	/* data pulse interrupt enable */
+#define	OTGSC_MSE	BIT(29)	/* 1 ms timer interrupt enable */
+#define	OTGSC_BSEIE	BIT(28)	/* B session end interrupt enable */
+#define	OTGSC_BSVIE	BIT(27)	/* B session valid interrupt enable */
+#define	OTGSC_ASVIE	BIT(26)	/* A session valid interrupt enable */
+#define	OTGSC_AVVIE	BIT(25)	/* A VBUS valid interrupt enable */
+#define	OTGSC_IDIE	BIT(24)	/* USB ID interrupt enable */
+#define	OTGSC_DPIS	BIT(22)	/* data pulse interrupt status */
+#define	OTGSC_MSS	BIT(21)	/* 1 ms timer interrupt status */
+#define	OTGSC_BSEIS	BIT(20)	/* B session end interrupt status */
+#define	OTGSC_BSVIS	BIT(19)	/* B session valid interrupt status */
+#define	OTGSC_ASVIS	BIT(18)	/* A session valid interrupt status */
+#define	OTGSC_AVVIS	BIT(17)	/* A VBUS valid interrupt status */
+#define	OTGSC_IDIS	BIT(16)	/* USB ID interrupt status */
+#define	OTGSC_DPS	BIT(14)	/* data bus pulsing status */
+#define	OTGSC_MST	BIT(13)	/* 1 ms timer toggle */
+#define	OTGSC_BSE	BIT(12)	/* B session end */
+#define	OTGSC_BSV	BIT(11)	/* B session valid */
+#define	OTGSC_ASV	BIT(10)	/* A session valid */
+#define	OTGSC_AVV	BIT(9)	/* A VBUS valid */
+#define	OTGSC_USBID	BIT(8)	/* USB ID */
+#define	OTGSC_HABA	BIT(7)	/* hw assist B-disconnect to A-connect */
+#define	OTGSC_HADP	BIT(6)	/* hw assist data pulse */
+#define	OTGSC_IDPU	BIT(5)	/* ID pullup */
+#define	OTGSC_DP	BIT(4)	/* data pulsing */
+#define	OTGSC_OT	BIT(3)	/* OTG termination */
+#define	OTGSC_HAAR	BIT(2)	/* hw assist auto reset */
+#define	OTGSC_VC	BIT(1)	/* VBUS charge */
+#define	OTGSC_VD	BIT(0)	/* VBUS discharge */
+	u32	usbmode;
+#define	MODE_VBPS	BIT(5)	/* R/W VBUS power select */
+#define	MODE_SDIS	BIT(4)	/* R/W stream disable mode */
+#define	MODE_SLOM	BIT(3)	/* R/W setup lockout mode */
+#define	MODE_ENSE	BIT(2)	/* endian select */
+#define	MODE_CM(u)	(((u)>>0)&3)	/* bits 1:0, controller mode */
+#define	MODE_IDLE	0
+#define	MODE_DEVICE	2
+#define	MODE_HOST	3
+	u8	_reserved9[0x100-0xfc];
+	/* offset: 0x100 */
+	u32	endptnak;
+#define	EPTN(e)		\
+	(((e)>>16)&0xffff)	/* bits 31:16, TX endpoint NAK */
+#define	EPRN(e)		\
+	(((e)>>0)&0xffff)	/* bits 15:0, RX endpoint NAK */
+	u32	endptnaken;
+#define	EPTNE(e)	\
+	(((e)>>16)&0xffff)	/* bits 31:16, TX endpoint NAK enable */
+#define	EPRNE(e)	\
+	(((e)>>0)&0xffff)	/* bits 15:0, RX endpoint NAK enable */
+	u32	endptsetupstat;
+#define	SETUPSTAT_MASK		(0xffff << 0)	/* bits 15:0 */
+#define EP0SETUPSTAT_MASK	1
+	u32	endptprime;
+/* bits 31:16, prime endpoint transmit buffer */
+#define	PETB(e)		(((e)>>16)&0xffff)
+/* bits 15:0, prime endpoint receive buffer */
+#define	PERB(e)		(((e)>>0)&0xffff)
+	/* offset: 0x110 */
+	u32	endptflush;
+/* bits 31:16, flush endpoint transmit buffer */
+#define	FETB(e)		(((e)>>16)&0xffff)
+/* bits 15:0, flush endpoint receive buffer */
+#define	FERB(e)		(((e)>>0)&0xffff)
+	u32	endptstat;
+/* bits 31:16, endpoint transmit buffer ready */
+#define	ETBR(e)		(((e)>>16)&0xffff)
+/* bits 15:0, endpoint receive buffer ready */
+#define	ERBR(e)		(((e)>>0)&0xffff)
+	u32	endptcomplete;
+/* bits 31:16, endpoint transmit complete event */
+#define	ETCE(e)		(((e)>>16)&0xffff)
+/* bits 15:0, endpoint receive complete event */
+#define	ERCE(e)		(((e)>>0)&0xffff)
+	/* offset: 0x11c */
+	u32	endptctrl[16];
+#define	EPCTRL_TXE	BIT(23)	/* TX endpoint enable */
+#define	EPCTRL_TXR	BIT(22)	/* TX data toggle reset */
+#define	EPCTRL_TXI	BIT(21)	/* TX data toggle inhibit */
+#define	EPCTRL_TXT(e)	(((e)>>18)&3)	/* bits 19:18, TX endpoint type */
+#define	EPCTRL_TXT_SHIFT	18
+#define	EPCTRL_TXD	BIT(17)	/* TX endpoint data source */
+#define	EPCTRL_TXS	BIT(16)	/* TX endpoint STALL */
+#define	EPCTRL_RXE	BIT(7)	/* RX endpoint enable */
+#define	EPCTRL_RXR	BIT(6)	/* RX data toggle reset */
+#define	EPCTRL_RXI	BIT(5)	/* RX data toggle inhibit */
+#define	EPCTRL_RXT(e)	(((e)>>2)&3)	/* bits 3:2, RX endpoint type */
+#define	EPCTRL_RXT_SHIFT	2	/* bits 19:18, TX endpoint type */
+#define	EPCTRL_RXD	BIT(1)	/* RX endpoint data sink */
+#define	EPCTRL_RXS	BIT(0)	/* RX endpoint STALL */
+} __attribute__ ((packed));
+
+#endif /* __LANGWELL_UDC_H */
+
diff --git a/include/linux/usb/otg.h b/include/linux/usb/otg.h
index 1aaa826..2443c0e 100644
--- a/include/linux/usb/otg.h
+++ b/include/linux/usb/otg.h
@@ -80,10 +80,10 @@
 
 /* for board-specific init logic */
 extern int otg_set_transceiver(struct otg_transceiver *);
-#ifdef CONFIG_NOP_USB_XCEIV
+
+/* sometimes transceivers are accessed only through e.g. ULPI */
 extern void usb_nop_xceiv_register(void);
 extern void usb_nop_xceiv_unregister(void);
-#endif
 
 
 /* for usb host and peripheral controller drivers */
diff --git a/include/linux/usb/r8a66597.h b/include/linux/usb/r8a66597.h
new file mode 100644
index 0000000..e9f0384
--- /dev/null
+++ b/include/linux/usb/r8a66597.h
@@ -0,0 +1,44 @@
+/*
+ * R8A66597 driver platform data
+ *
+ * Copyright (C) 2009  Renesas Solutions Corp.
+ *
+ * Author : Yoshihiro Shimoda <shimoda.yoshihiro@renesas.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ *
+ */
+
+#ifndef __LINUX_USB_R8A66597_H
+#define __LINUX_USB_R8A66597_H
+
+#define R8A66597_PLATDATA_XTAL_12MHZ	0x01
+#define R8A66597_PLATDATA_XTAL_24MHZ	0x02
+#define R8A66597_PLATDATA_XTAL_48MHZ	0x03
+
+struct r8a66597_platdata {
+	/* This ops can controll port power instead of DVSTCTR register. */
+	void (*port_power)(int port, int power);
+
+	/* (external controller only) set R8A66597_PLATDATA_XTAL_nnMHZ */
+	unsigned	xtal:2;
+
+	/* set one = 3.3V, set zero = 1.5V */
+	unsigned	vif:1;
+
+	/* set one = big endian, set zero = little endian */
+	unsigned	endian:1;
+};
+#endif
+
diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h
index 8cdfed7..44801d2 100644
--- a/include/linux/usb/serial.h
+++ b/include/linux/usb/serial.h
@@ -15,6 +15,7 @@
 
 #include <linux/kref.h>
 #include <linux/mutex.h>
+#include <linux/sysrq.h>
 
 #define SERIAL_TTY_MAJOR	188	/* Nice legal number now */
 #define SERIAL_TTY_MINORS	254	/* loads of devices :) */
@@ -26,6 +27,13 @@
 /* parity check flag */
 #define RELEVANT_IFLAG(iflag)	(iflag & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK))
 
+enum port_dev_state {
+	PORT_UNREGISTERED,
+	PORT_REGISTERING,
+	PORT_REGISTERED,
+	PORT_UNREGISTERING,
+};
+
 /**
  * usb_serial_port: structure for the specific ports of a device.
  * @serial: pointer back to the struct usb_serial owner of this port.
@@ -91,12 +99,17 @@
 	int			write_urb_busy;
 	__u8			bulk_out_endpointAddress;
 
+	int			tx_bytes_flight;
+	int			urbs_in_flight;
+
 	wait_queue_head_t	write_wait;
 	struct work_struct	work;
 	char			throttled;
 	char			throttle_req;
 	char			console;
+	unsigned long		sysrq; /* sysrq timeout */
 	struct device		dev;
+	enum port_dev_state	dev_state;
 };
 #define to_usb_serial_port(d) container_of(d, struct usb_serial_port, dev)
 
@@ -181,8 +194,10 @@
  *	This will be called when the struct usb_serial structure is fully set
  *	set up.  Do any local initialization of the device, or any private
  *	memory structure allocation at this point in time.
- * @shutdown: pointer to the driver's shutdown function.  This will be
- *	called when the device is removed from the system.
+ * @disconnect: pointer to the driver's disconnect function.  This will be
+ *	called when the device is unplugged or unbound from the driver.
+ * @release: pointer to the driver's release function.  This will be called
+ *	when the usb_serial data structure is about to be destroyed.
  * @usb_driver: pointer to the struct usb_driver that controls this
  *	device.  This is necessary to allow dynamic ids to be added to
  *	the driver from sysfs.
@@ -207,12 +222,14 @@
 	struct device_driver	driver;
 	struct usb_driver	*usb_driver;
 	struct usb_dynids	dynids;
+	int			max_in_flight_urbs;
 
 	int (*probe)(struct usb_serial *serial, const struct usb_device_id *id);
 	int (*attach)(struct usb_serial *serial);
 	int (*calc_num_ports) (struct usb_serial *serial);
 
-	void (*shutdown)(struct usb_serial *serial);
+	void (*disconnect)(struct usb_serial *serial);
+	void (*release)(struct usb_serial *serial);
 
 	int (*port_probe)(struct usb_serial_port *port);
 	int (*port_remove)(struct usb_serial_port *port);
@@ -294,9 +311,16 @@
 extern void usb_serial_generic_write_bulk_callback(struct urb *urb);
 extern void usb_serial_generic_throttle(struct tty_struct *tty);
 extern void usb_serial_generic_unthrottle(struct tty_struct *tty);
-extern void usb_serial_generic_shutdown(struct usb_serial *serial);
+extern void usb_serial_generic_disconnect(struct usb_serial *serial);
+extern void usb_serial_generic_release(struct usb_serial *serial);
 extern int usb_serial_generic_register(int debug);
 extern void usb_serial_generic_deregister(void);
+extern void usb_serial_generic_resubmit_read_urb(struct usb_serial_port *port,
+						 gfp_t mem_flags);
+extern int usb_serial_handle_sysrq_char(struct usb_serial_port *port,
+					unsigned int ch);
+extern int usb_serial_handle_break(struct usb_serial_port *port);
+
 
 extern int usb_serial_bus_register(struct usb_serial_driver *device);
 extern void usb_serial_bus_deregister(struct usb_serial_driver *device);
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index 20a6957..47004f3 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -17,6 +17,7 @@
 #define _INET_SOCK_H
 
 
+#include <linux/kmemcheck.h>
 #include <linux/string.h>
 #include <linux/types.h>
 #include <linux/jhash.h>
@@ -66,14 +67,16 @@
 	__be32			loc_addr;
 	__be32			rmt_addr;
 	__be16			rmt_port;
-	u16			snd_wscale : 4, 
-				rcv_wscale : 4, 
+	kmemcheck_bitfield_begin(flags);
+	u16			snd_wscale : 4,
+				rcv_wscale : 4,
 				tstamp_ok  : 1,
 				sack_ok	   : 1,
 				wscale_ok  : 1,
 				ecn_ok	   : 1,
 				acked	   : 1,
 				no_srccheck: 1;
+	kmemcheck_bitfield_end(flags);
 	struct ip_options	*opt;
 };
 
@@ -199,9 +202,12 @@
 static inline struct request_sock *inet_reqsk_alloc(struct request_sock_ops *ops)
 {
 	struct request_sock *req = reqsk_alloc(ops);
+	struct inet_request_sock *ireq = inet_rsk(req);
 
-	if (req != NULL)
-		inet_rsk(req)->opt = NULL;
+	if (req != NULL) {
+		kmemcheck_annotate_bitfield(ireq, flags);
+		ireq->opt = NULL;
+	}
 
 	return req;
 }
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
index 4b8ece2..b63b80f 100644
--- a/include/net/inet_timewait_sock.h
+++ b/include/net/inet_timewait_sock.h
@@ -16,6 +16,7 @@
 #define _INET_TIMEWAIT_SOCK_
 
 
+#include <linux/kmemcheck.h>
 #include <linux/list.h>
 #include <linux/module.h>
 #include <linux/timer.h>
@@ -127,10 +128,12 @@
 	__be32			tw_rcv_saddr;
 	__be16			tw_dport;
 	__u16			tw_num;
+	kmemcheck_bitfield_begin(flags);
 	/* And these are ours. */
 	__u8			tw_ipv6only:1,
 				tw_transparent:1;
-	/* 15 bits hole, try to pack */
+	/* 14 bits hole, try to pack */
+	kmemcheck_bitfield_end(flags);
 	__u16			tw_ipv6_offset;
 	unsigned long		tw_ttd;
 	struct inet_bind_bucket	*tw_tb;
diff --git a/include/net/sock.h b/include/net/sock.h
index 010e14a..95bd3fd 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -218,9 +218,11 @@
 #define sk_hash			__sk_common.skc_hash
 #define sk_prot			__sk_common.skc_prot
 #define sk_net			__sk_common.skc_net
+	kmemcheck_bitfield_begin(flags);
 	unsigned char		sk_shutdown : 2,
 				sk_no_check : 2,
 				sk_userlocks : 4;
+	kmemcheck_bitfield_end(flags);
 	unsigned char		sk_protocol;
 	unsigned short		sk_type;
 	int			sk_rcvbuf;
diff --git a/init/Kconfig b/init/Kconfig
index fed6dc3..c4b3c6d 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -616,13 +616,13 @@
 	bool
 
 config SYSFS_DEPRECATED_V2
-	bool "Create deprecated sysfs layout for older userspace tools"
+	bool "remove sysfs features which may confuse old userspace tools"
 	depends on SYSFS
-	default y
+	default n
 	select SYSFS_DEPRECATED
 	help
 	  This option switches the layout of sysfs to the deprecated
-	  version.
+	  version. Do not use it on recent distributions.
 
 	  The current sysfs layout features a unified device tree at
 	  /sys/devices/, which is able to express a hierarchy between
diff --git a/init/do_mounts.c b/init/do_mounts.c
index dd7ee5f..093f659 100644
--- a/init/do_mounts.c
+++ b/init/do_mounts.c
@@ -231,7 +231,8 @@
 
 void __init mount_block_root(char *name, int flags)
 {
-	char *fs_names = __getname();
+	char *fs_names = __getname_gfp(GFP_KERNEL
+		| __GFP_NOTRACK_FALSE_POSITIVE);
 	char *p;
 #ifdef CONFIG_BLOCK
 	char b[BDEVNAME_SIZE];
diff --git a/init/main.c b/init/main.c
index 5e0d3f0..7756dda 100644
--- a/init/main.c
+++ b/init/main.c
@@ -65,6 +65,7 @@
 #include <linux/idr.h>
 #include <linux/ftrace.h>
 #include <linux/async.h>
+#include <linux/kmemcheck.h>
 #include <linux/kmemtrace.h>
 #include <trace/boot.h>
 
diff --git a/kernel/fork.c b/kernel/fork.c
index 4430eb1..be022c2 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -178,7 +178,7 @@
 	/* create a slab on which task_structs can be allocated */
 	task_struct_cachep =
 		kmem_cache_create("task_struct", sizeof(struct task_struct),
-			ARCH_MIN_TASKALIGN, SLAB_PANIC, NULL);
+			ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
 #endif
 
 	/* do the arch specific task caches init */
@@ -1470,20 +1470,20 @@
 {
 	sighand_cachep = kmem_cache_create("sighand_cache",
 			sizeof(struct sighand_struct), 0,
-			SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU,
-			sighand_ctor);
+			SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU|
+			SLAB_NOTRACK, sighand_ctor);
 	signal_cachep = kmem_cache_create("signal_cache",
 			sizeof(struct signal_struct), 0,
-			SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
+			SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
 	files_cachep = kmem_cache_create("files_cache",
 			sizeof(struct files_struct), 0,
-			SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
+			SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
 	fs_cachep = kmem_cache_create("fs_cache",
 			sizeof(struct fs_struct), 0,
-			SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
+			SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
 	mm_cachep = kmem_cache_create("mm_struct",
 			sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
-			SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
+			SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
 	vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC);
 	mmap_init();
 }
diff --git a/kernel/module.c b/kernel/module.c
index e4ab36c..215aaab 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -2899,7 +2899,7 @@
 	struct module *mod;
 	char buf[8];
 
-	printk("Modules linked in:");
+	printk(KERN_DEFAULT "Modules linked in:");
 	/* Most callers should already have preempt disabled, but make sure */
 	preempt_disable();
 	list_for_each_entry_rcu(mod, &modules, list)
diff --git a/kernel/printk.c b/kernel/printk.c
index 5052b54..b4d97b5 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -687,20 +687,35 @@
 				  sizeof(printk_buf) - printed_len, fmt, args);
 
 
+	p = printk_buf;
+
+	/* Do we have a loglevel in the string? */
+	if (p[0] == '<') {
+		unsigned char c = p[1];
+		if (c && p[2] == '>') {
+			switch (c) {
+			case '0' ... '7': /* loglevel */
+				current_log_level = c - '0';
+			/* Fallthrough - make sure we're on a new line */
+			case 'd': /* KERN_DEFAULT */
+				if (!new_text_line) {
+					emit_log_char('\n');
+					new_text_line = 1;
+				}
+			/* Fallthrough - skip the loglevel */
+			case 'c': /* KERN_CONT */
+				p += 3;
+				break;
+			}
+		}
+	}
+
 	/*
 	 * Copy the output into log_buf.  If the caller didn't provide
 	 * appropriate log level tags, we insert them here
 	 */
-	for (p = printk_buf; *p; p++) {
+	for ( ; *p; p++) {
 		if (new_text_line) {
-			/* If a token, set current_log_level and skip over */
-			if (p[0] == '<' && p[1] >= '0' && p[1] <= '7' &&
-			    p[2] == '>') {
-				current_log_level = p[1] - '0';
-				p += 3;
-				printed_len -= 3;
-			}
-
 			/* Always output the token */
 			emit_log_char('<');
 			emit_log_char(current_log_level + '0');
diff --git a/kernel/signal.c b/kernel/signal.c
index 809a228..d81f495 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -832,6 +832,7 @@
 {
 	struct sigpending *pending;
 	struct sigqueue *q;
+	int override_rlimit;
 
 	trace_sched_signal_send(sig, t);
 
@@ -863,9 +864,13 @@
 	   make sure at least one signal gets delivered and don't
 	   pass on the info struct.  */
 
-	q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
-					     (is_si_special(info) ||
-					      info->si_code >= 0)));
+	if (sig < SIGRTMIN)
+		override_rlimit = (is_si_special(info) || info->si_code >= 0);
+	else
+		override_rlimit = 0;
+
+	q = __sigqueue_alloc(t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE,
+		override_rlimit);
 	if (q) {
 		list_add_tail(&q->list, &pending->list);
 		switch ((unsigned long) info) {
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 258885a..b41fb71 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -382,6 +382,17 @@
 
 EXPORT_SYMBOL(__tasklet_hi_schedule);
 
+void __tasklet_hi_schedule_first(struct tasklet_struct *t)
+{
+	BUG_ON(!irqs_disabled());
+
+	t->next = __get_cpu_var(tasklet_hi_vec).head;
+	__get_cpu_var(tasklet_hi_vec).head = t;
+	__raise_softirq_irqoff(HI_SOFTIRQ);
+}
+
+EXPORT_SYMBOL(__tasklet_hi_schedule_first);
+
 static void tasklet_action(struct softirq_action *a)
 {
 	struct tasklet_struct *list;
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 2ccee08..ab462b9 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -27,6 +27,7 @@
 #include <linux/security.h>
 #include <linux/ctype.h>
 #include <linux/utsname.h>
+#include <linux/kmemcheck.h>
 #include <linux/smp_lock.h>
 #include <linux/fs.h>
 #include <linux/init.h>
@@ -967,6 +968,17 @@
 		.proc_handler	= &proc_dointvec,
 	},
 #endif
+#ifdef CONFIG_KMEMCHECK
+	{
+		.ctl_name	= CTL_UNNUMBERED,
+		.procname	= "kmemcheck",
+		.data		= &kmemcheck_enabled,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec,
+	},
+#endif
+
 /*
  * NOTE: do not add new entries to this table unless you have read
  * Documentation/sysctl/ctl_unnumbered.txt
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 4a13e5a..61071fe 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -147,7 +147,7 @@
 	  disabled by default and can be runtime (re-)started
 	  via:
 
-	      echo 0 > /debugfs/tracing/tracing_max_latency
+	      echo 0 > /sys/kernel/debug/tracing/tracing_max_latency
 
 	  (Note that kernel size and overhead increases with this option
 	  enabled. This option and the preempt-off timing option can be
@@ -168,7 +168,7 @@
 	  disabled by default and can be runtime (re-)started
 	  via:
 
-	      echo 0 > /debugfs/tracing/tracing_max_latency
+	      echo 0 > /sys/kernel/debug/tracing/tracing_max_latency
 
 	  (Note that kernel size and overhead increases with this option
 	  enabled. This option and the irqs-off timing option can be
@@ -261,7 +261,7 @@
 	  This tracer profiles all the the likely and unlikely macros
 	  in the kernel. It will display the results in:
 
-	  /debugfs/tracing/profile_annotated_branch
+	  /sys/kernel/debug/tracing/profile_annotated_branch
 
 	  Note: this will add a significant overhead, only turn this
 	  on if you need to profile the system's use of these macros.
@@ -274,7 +274,7 @@
 	  taken in the kernel is recorded whether it hit or miss.
 	  The results will be displayed in:
 
-	  /debugfs/tracing/profile_branch
+	  /sys/kernel/debug/tracing/profile_branch
 
 	  This option also enables the likely/unlikely profiler.
 
@@ -323,7 +323,7 @@
 	select KALLSYMS
 	help
 	  This special tracer records the maximum stack footprint of the
-	  kernel and displays it in debugfs/tracing/stack_trace.
+	  kernel and displays it in /sys/kernel/debug/tracing/stack_trace.
 
 	  This tracer works by hooking into every function call that the
 	  kernel executes, and keeping a maximum stack depth value and
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 2e642b2..dc4dc70 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -10,6 +10,7 @@
 #include <linux/debugfs.h>
 #include <linux/uaccess.h>
 #include <linux/hardirq.h>
+#include <linux/kmemcheck.h>
 #include <linux/module.h>
 #include <linux/percpu.h>
 #include <linux/mutex.h>
@@ -1270,6 +1271,7 @@
 	if (tail < BUF_PAGE_SIZE) {
 		/* Mark the rest of the page with padding */
 		event = __rb_page_index(tail_page, tail);
+		kmemcheck_annotate_bitfield(event, bitfield);
 		rb_event_set_padding(event);
 	}
 
@@ -1327,6 +1329,7 @@
 		return NULL;
 
 	event = __rb_page_index(tail_page, tail);
+	kmemcheck_annotate_bitfield(event, bitfield);
 	rb_update_event(event, type, length);
 
 	/* The passed in type is zero for DATA */
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 8acd9b8..c1878bf 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -344,7 +344,7 @@
 /*
  * Copy the new maximum trace into the separate maximum-trace
  * structure. (this way the maximum trace is permanently saved,
- * for later retrieval via /debugfs/tracing/latency_trace)
+ * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
  */
 static void
 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
@@ -2414,21 +2414,20 @@
 
 static const char readme_msg[] =
 	"tracing mini-HOWTO:\n\n"
-	"# mkdir /debug\n"
-	"# mount -t debugfs nodev /debug\n\n"
-	"# cat /debug/tracing/available_tracers\n"
+	"# mount -t debugfs nodev /sys/kernel/debug\n\n"
+	"# cat /sys/kernel/debug/tracing/available_tracers\n"
 	"wakeup preemptirqsoff preemptoff irqsoff function sched_switch nop\n\n"
-	"# cat /debug/tracing/current_tracer\n"
+	"# cat /sys/kernel/debug/tracing/current_tracer\n"
 	"nop\n"
-	"# echo sched_switch > /debug/tracing/current_tracer\n"
-	"# cat /debug/tracing/current_tracer\n"
+	"# echo sched_switch > /sys/kernel/debug/tracing/current_tracer\n"
+	"# cat /sys/kernel/debug/tracing/current_tracer\n"
 	"sched_switch\n"
-	"# cat /debug/tracing/trace_options\n"
+	"# cat /sys/kernel/debug/tracing/trace_options\n"
 	"noprint-parent nosym-offset nosym-addr noverbose\n"
-	"# echo print-parent > /debug/tracing/trace_options\n"
-	"# echo 1 > /debug/tracing/tracing_enabled\n"
-	"# cat /debug/tracing/trace > /tmp/trace.txt\n"
-	"# echo 0 > /debug/tracing/tracing_enabled\n"
+	"# echo print-parent > /sys/kernel/debug/tracing/trace_options\n"
+	"# echo 1 > /sys/kernel/debug/tracing/tracing_enabled\n"
+	"# cat /sys/kernel/debug/tracing/trace > /tmp/trace.txt\n"
+	"# echo 0 > /sys/kernel/debug/tracing/tracing_enabled\n"
 ;
 
 static ssize_t
diff --git a/kernel/user.c b/kernel/user.c
index 850e0ba..2c000e7 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -75,21 +75,6 @@
 	put_user_ns(up->user_ns);
 }
 
-static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent)
-{
-	struct user_struct *user;
-	struct hlist_node *h;
-
-	hlist_for_each_entry(user, h, hashent, uidhash_node) {
-		if (user->uid == uid) {
-			atomic_inc(&user->__count);
-			return user;
-		}
-	}
-
-	return NULL;
-}
-
 #ifdef CONFIG_USER_SCHED
 
 static void sched_destroy_user(struct user_struct *up)
@@ -119,6 +104,23 @@
 
 #if defined(CONFIG_USER_SCHED) && defined(CONFIG_SYSFS)
 
+static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent)
+{
+	struct user_struct *user;
+	struct hlist_node *h;
+
+	hlist_for_each_entry(user, h, hashent, uidhash_node) {
+		if (user->uid == uid) {
+			/* possibly resurrect an "almost deleted" object */
+			if (atomic_inc_return(&user->__count) == 1)
+				cancel_delayed_work(&user->work);
+			return user;
+		}
+	}
+
+	return NULL;
+}
+
 static struct kset *uids_kset; /* represents the /sys/kernel/uids/ directory */
 static DEFINE_MUTEX(uids_mutex);
 
@@ -283,12 +285,12 @@
 	return uids_user_create(&root_user);
 }
 
-/* work function to remove sysfs directory for a user and free up
+/* delayed work function to remove sysfs directory for a user and free up
  * corresponding structures.
  */
 static void cleanup_user_struct(struct work_struct *w)
 {
-	struct user_struct *up = container_of(w, struct user_struct, work);
+	struct user_struct *up = container_of(w, struct user_struct, work.work);
 	unsigned long flags;
 	int remove_user = 0;
 
@@ -297,15 +299,12 @@
 	 */
 	uids_mutex_lock();
 
-	local_irq_save(flags);
-
-	if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) {
+	spin_lock_irqsave(&uidhash_lock, flags);
+	if (atomic_read(&up->__count) == 0) {
 		uid_hash_remove(up);
 		remove_user = 1;
-		spin_unlock_irqrestore(&uidhash_lock, flags);
-	} else {
-		local_irq_restore(flags);
 	}
+	spin_unlock_irqrestore(&uidhash_lock, flags);
 
 	if (!remove_user)
 		goto done;
@@ -331,16 +330,28 @@
  */
 static void free_user(struct user_struct *up, unsigned long flags)
 {
-	/* restore back the count */
-	atomic_inc(&up->__count);
 	spin_unlock_irqrestore(&uidhash_lock, flags);
-
-	INIT_WORK(&up->work, cleanup_user_struct);
-	schedule_work(&up->work);
+	INIT_DELAYED_WORK(&up->work, cleanup_user_struct);
+	schedule_delayed_work(&up->work, msecs_to_jiffies(1000));
 }
 
 #else	/* CONFIG_USER_SCHED && CONFIG_SYSFS */
 
+static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent)
+{
+	struct user_struct *user;
+	struct hlist_node *h;
+
+	hlist_for_each_entry(user, h, hashent, uidhash_node) {
+		if (user->uid == uid) {
+			atomic_inc(&user->__count);
+			return user;
+		}
+	}
+
+	return NULL;
+}
+
 int uids_sysfs_init(void) { return 0; }
 static inline int uids_user_create(struct user_struct *up) { return 0; }
 static inline void uids_mutex_lock(void) { }
diff --git a/lib/Kconfig b/lib/Kconfig
index 9960be0..bb1326d 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -194,4 +194,10 @@
 config NLATTR
 	bool
 
+#
+# Generic 64-bit atomic support is selected if needed
+#
+config GENERIC_ATOMIC64
+       bool
+
 endmenu
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 116a350..6b0c2d8a 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -300,7 +300,7 @@
 
 config DEBUG_SLAB
 	bool "Debug slab memory allocations"
-	depends on DEBUG_KERNEL && SLAB
+	depends on DEBUG_KERNEL && SLAB && !KMEMCHECK
 	help
 	  Say Y here to have the kernel do limited verification on memory
 	  allocation as well as poisoning memory on free to catch use of freed
@@ -312,7 +312,7 @@
 
 config SLUB_DEBUG_ON
 	bool "SLUB debugging on by default"
-	depends on SLUB && SLUB_DEBUG
+	depends on SLUB && SLUB_DEBUG && !KMEMCHECK
 	default n
 	help
 	  Boot with debugging on by default. SLUB boots by default with
@@ -996,3 +996,5 @@
 source "samples/Kconfig"
 
 source "lib/Kconfig.kgdb"
+
+source "lib/Kconfig.kmemcheck"
diff --git a/lib/Kconfig.kmemcheck b/lib/Kconfig.kmemcheck
new file mode 100644
index 0000000..603c81b
--- /dev/null
+++ b/lib/Kconfig.kmemcheck
@@ -0,0 +1,91 @@
+config HAVE_ARCH_KMEMCHECK
+	bool
+
+menuconfig KMEMCHECK
+	bool "kmemcheck: trap use of uninitialized memory"
+	depends on DEBUG_KERNEL
+	depends on !X86_USE_3DNOW
+	depends on SLUB || SLAB
+	depends on !CC_OPTIMIZE_FOR_SIZE
+	depends on !FUNCTION_TRACER
+	select FRAME_POINTER
+	select STACKTRACE
+	default n
+	help
+	  This option enables tracing of dynamically allocated kernel memory
+	  to see if memory is used before it has been given an initial value.
+	  Be aware that this requires half of your memory for bookkeeping and
+	  will insert extra code at *every* read and write to tracked memory
+	  thus slow down the kernel code (but user code is unaffected).
+
+	  The kernel may be started with kmemcheck=0 or kmemcheck=1 to disable
+	  or enable kmemcheck at boot-time. If the kernel is started with
+	  kmemcheck=0, the large memory and CPU overhead is not incurred.
+
+choice
+	prompt "kmemcheck: default mode at boot"
+	depends on KMEMCHECK
+	default KMEMCHECK_ONESHOT_BY_DEFAULT
+	help
+	  This option controls the default behaviour of kmemcheck when the
+	  kernel boots and no kmemcheck= parameter is given.
+
+config KMEMCHECK_DISABLED_BY_DEFAULT
+	bool "disabled"
+	depends on KMEMCHECK
+
+config KMEMCHECK_ENABLED_BY_DEFAULT
+	bool "enabled"
+	depends on KMEMCHECK
+
+config KMEMCHECK_ONESHOT_BY_DEFAULT
+	bool "one-shot"
+	depends on KMEMCHECK
+	help
+	  In one-shot mode, only the first error detected is reported before
+	  kmemcheck is disabled.
+
+endchoice
+
+config KMEMCHECK_QUEUE_SIZE
+	int "kmemcheck: error queue size"
+	depends on KMEMCHECK
+	default 64
+	help
+	  Select the maximum number of errors to store in the queue. Since
+	  errors can occur virtually anywhere and in any context, we need a
+	  temporary storage area which is guarantueed not to generate any
+	  other faults. The queue will be emptied as soon as a tasklet may
+	  be scheduled. If the queue is full, new error reports will be
+	  lost.
+
+config KMEMCHECK_SHADOW_COPY_SHIFT
+	int "kmemcheck: shadow copy size (5 => 32 bytes, 6 => 64 bytes)"
+	depends on KMEMCHECK
+	range 2 8
+	default 5
+	help
+	  Select the number of shadow bytes to save along with each entry of
+	  the queue. These bytes indicate what parts of an allocation are
+	  initialized, uninitialized, etc. and will be displayed when an
+	  error is detected to help the debugging of a particular problem.
+
+config KMEMCHECK_PARTIAL_OK
+	bool "kmemcheck: allow partially uninitialized memory"
+	depends on KMEMCHECK
+	default y
+	help
+	  This option works around certain GCC optimizations that produce
+	  32-bit reads from 16-bit variables where the upper 16 bits are
+	  thrown away afterwards. This may of course also hide some real
+	  bugs.
+
+config KMEMCHECK_BITOPS_OK
+	bool "kmemcheck: allow bit-field manipulation"
+	depends on KMEMCHECK
+	default n
+	help
+	  This option silences warnings that would be generated for bit-field
+	  accesses where not all the bits are initialized at the same time.
+	  This may also hide some real bugs.
+
diff --git a/lib/Makefile b/lib/Makefile
index 34c5c0e..8e9bcf9 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -95,6 +95,8 @@
 
 obj-$(CONFIG_GENERIC_CSUM) += checksum.o
 
+obj-$(CONFIG_GENERIC_ATOMIC64) += atomic64.o
+
 hostprogs-y	:= gen_crc32table
 clean-files	:= crc32table.h
 
diff --git a/lib/atomic64.c b/lib/atomic64.c
new file mode 100644
index 0000000..c5e7255
--- /dev/null
+++ b/lib/atomic64.c
@@ -0,0 +1,175 @@
+/*
+ * Generic implementation of 64-bit atomics using spinlocks,
+ * useful on processors that don't have 64-bit atomic instructions.
+ *
+ * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/types.h>
+#include <linux/cache.h>
+#include <linux/spinlock.h>
+#include <linux/init.h>
+#include <asm/atomic.h>
+
+/*
+ * We use a hashed array of spinlocks to provide exclusive access
+ * to each atomic64_t variable.  Since this is expected to used on
+ * systems with small numbers of CPUs (<= 4 or so), we use a
+ * relatively small array of 16 spinlocks to avoid wasting too much
+ * memory on the spinlock array.
+ */
+#define NR_LOCKS	16
+
+/*
+ * Ensure each lock is in a separate cacheline.
+ */
+static union {
+	spinlock_t lock;
+	char pad[L1_CACHE_BYTES];
+} atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp;
+
+static inline spinlock_t *lock_addr(const atomic64_t *v)
+{
+	unsigned long addr = (unsigned long) v;
+
+	addr >>= L1_CACHE_SHIFT;
+	addr ^= (addr >> 8) ^ (addr >> 16);
+	return &atomic64_lock[addr & (NR_LOCKS - 1)].lock;
+}
+
+long long atomic64_read(const atomic64_t *v)
+{
+	unsigned long flags;
+	spinlock_t *lock = lock_addr(v);
+	long long val;
+
+	spin_lock_irqsave(lock, flags);
+	val = v->counter;
+	spin_unlock_irqrestore(lock, flags);
+	return val;
+}
+
+void atomic64_set(atomic64_t *v, long long i)
+{
+	unsigned long flags;
+	spinlock_t *lock = lock_addr(v);
+
+	spin_lock_irqsave(lock, flags);
+	v->counter = i;
+	spin_unlock_irqrestore(lock, flags);
+}
+
+void atomic64_add(long long a, atomic64_t *v)
+{
+	unsigned long flags;
+	spinlock_t *lock = lock_addr(v);
+
+	spin_lock_irqsave(lock, flags);
+	v->counter += a;
+	spin_unlock_irqrestore(lock, flags);
+}
+
+long long atomic64_add_return(long long a, atomic64_t *v)
+{
+	unsigned long flags;
+	spinlock_t *lock = lock_addr(v);
+	long long val;
+
+	spin_lock_irqsave(lock, flags);
+	val = v->counter += a;
+	spin_unlock_irqrestore(lock, flags);
+	return val;
+}
+
+void atomic64_sub(long long a, atomic64_t *v)
+{
+	unsigned long flags;
+	spinlock_t *lock = lock_addr(v);
+
+	spin_lock_irqsave(lock, flags);
+	v->counter -= a;
+	spin_unlock_irqrestore(lock, flags);
+}
+
+long long atomic64_sub_return(long long a, atomic64_t *v)
+{
+	unsigned long flags;
+	spinlock_t *lock = lock_addr(v);
+	long long val;
+
+	spin_lock_irqsave(lock, flags);
+	val = v->counter -= a;
+	spin_unlock_irqrestore(lock, flags);
+	return val;
+}
+
+long long atomic64_dec_if_positive(atomic64_t *v)
+{
+	unsigned long flags;
+	spinlock_t *lock = lock_addr(v);
+	long long val;
+
+	spin_lock_irqsave(lock, flags);
+	val = v->counter - 1;
+	if (val >= 0)
+		v->counter = val;
+	spin_unlock_irqrestore(lock, flags);
+	return val;
+}
+
+long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n)
+{
+	unsigned long flags;
+	spinlock_t *lock = lock_addr(v);
+	long long val;
+
+	spin_lock_irqsave(lock, flags);
+	val = v->counter;
+	if (val == o)
+		v->counter = n;
+	spin_unlock_irqrestore(lock, flags);
+	return val;
+}
+
+long long atomic64_xchg(atomic64_t *v, long long new)
+{
+	unsigned long flags;
+	spinlock_t *lock = lock_addr(v);
+	long long val;
+
+	spin_lock_irqsave(lock, flags);
+	val = v->counter;
+	v->counter = new;
+	spin_unlock_irqrestore(lock, flags);
+	return val;
+}
+
+int atomic64_add_unless(atomic64_t *v, long long a, long long u)
+{
+	unsigned long flags;
+	spinlock_t *lock = lock_addr(v);
+	int ret = 1;
+
+	spin_lock_irqsave(lock, flags);
+	if (v->counter != u) {
+		v->counter += a;
+		ret = 0;
+	}
+	spin_unlock_irqrestore(lock, flags);
+	return ret;
+}
+
+static int init_atomic64_lock(void)
+{
+	int i;
+
+	for (i = 0; i < NR_LOCKS; ++i)
+		spin_lock_init(&atomic64_lock[i].lock);
+	return 0;
+}
+
+pure_initcall(init_atomic64_lock);
diff --git a/lib/kobject.c b/lib/kobject.c
index bacf6fe..b512b74 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -793,11 +793,16 @@
 				struct kobject *parent_kobj)
 {
 	struct kset *kset;
+	int retval;
 
 	kset = kzalloc(sizeof(*kset), GFP_KERNEL);
 	if (!kset)
 		return NULL;
-	kobject_set_name(&kset->kobj, name);
+	retval = kobject_set_name(&kset->kobj, name);
+	if (retval) {
+		kfree(kset);
+		return NULL;
+	}
 	kset->uevent_ops = uevent_ops;
 	kset->kobj.parent = parent_kobj;
 
diff --git a/mm/Kconfig b/mm/Kconfig
index 97d2c88..c948d4c 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -128,11 +128,11 @@
 config MEMORY_HOTPLUG
 	bool "Allow for memory hot-add"
 	depends on SPARSEMEM || X86_64_ACPI_NUMA
-	depends on HOTPLUG && !HIBERNATION && ARCH_ENABLE_MEMORY_HOTPLUG
+	depends on HOTPLUG && !(HIBERNATION && !S390) && ARCH_ENABLE_MEMORY_HOTPLUG
 	depends on (IA64 || X86 || PPC64 || SUPERH || S390)
 
 comment "Memory hotplug is currently incompatible with Software Suspend"
-	depends on SPARSEMEM && HOTPLUG && HIBERNATION
+	depends on SPARSEMEM && HOTPLUG && HIBERNATION && !S390
 
 config MEMORY_HOTPLUG_SPARSE
 	def_bool y
diff --git a/mm/Kconfig.debug b/mm/Kconfig.debug
index bb01e29..aa99fd1 100644
--- a/mm/Kconfig.debug
+++ b/mm/Kconfig.debug
@@ -2,6 +2,7 @@
 	bool "Debug page memory allocations"
 	depends on DEBUG_KERNEL && ARCH_SUPPORTS_DEBUG_PAGEALLOC
 	depends on !HIBERNATION || !PPC && !SPARC
+	depends on !KMEMCHECK
 	---help---
 	  Unmap pages from the kernel linear mapping after free_pages().
 	  This results in a large slowdown, but helps to find certain types
diff --git a/mm/Makefile b/mm/Makefile
index cf76be7..5e0bd64 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -28,6 +28,7 @@
 obj-$(CONFIG_PAGE_POISONING) += debug-pagealloc.o
 obj-$(CONFIG_SLAB) += slab.o
 obj-$(CONFIG_SLUB) += slub.o
+obj-$(CONFIG_KMEMCHECK) += kmemcheck.o
 obj-$(CONFIG_FAILSLAB) += failslab.o
 obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o
 obj-$(CONFIG_FS_XIP) += filemap_xip.o
diff --git a/mm/bounce.c b/mm/bounce.c
index 4ebe3ea..a2b76a5 100644
--- a/mm/bounce.c
+++ b/mm/bounce.c
@@ -13,7 +13,6 @@
 #include <linux/init.h>
 #include <linux/hash.h>
 #include <linux/highmem.h>
-#include <linux/blktrace_api.h>
 #include <asm/tlbflush.h>
 
 #include <trace/events/block.h>
diff --git a/mm/highmem.c b/mm/highmem.c
index 68eb1d9..25878cc 100644
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -26,7 +26,6 @@
 #include <linux/init.h>
 #include <linux/hash.h>
 #include <linux/highmem.h>
-#include <linux/blktrace_api.h>
 #include <asm/tlbflush.h>
 
 /*
diff --git a/mm/kmemcheck.c b/mm/kmemcheck.c
new file mode 100644
index 0000000..fd814fd
--- /dev/null
+++ b/mm/kmemcheck.c
@@ -0,0 +1,122 @@
+#include <linux/gfp.h>
+#include <linux/mm_types.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/kmemcheck.h>
+
+void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node)
+{
+	struct page *shadow;
+	int pages;
+	int i;
+
+	pages = 1 << order;
+
+	/*
+	 * With kmemcheck enabled, we need to allocate a memory area for the
+	 * shadow bits as well.
+	 */
+	shadow = alloc_pages_node(node, flags | __GFP_NOTRACK, order);
+	if (!shadow) {
+		if (printk_ratelimit())
+			printk(KERN_ERR "kmemcheck: failed to allocate "
+				"shadow bitmap\n");
+		return;
+	}
+
+	for(i = 0; i < pages; ++i)
+		page[i].shadow = page_address(&shadow[i]);
+
+	/*
+	 * Mark it as non-present for the MMU so that our accesses to
+	 * this memory will trigger a page fault and let us analyze
+	 * the memory accesses.
+	 */
+	kmemcheck_hide_pages(page, pages);
+}
+
+void kmemcheck_free_shadow(struct page *page, int order)
+{
+	struct page *shadow;
+	int pages;
+	int i;
+
+	if (!kmemcheck_page_is_tracked(page))
+		return;
+
+	pages = 1 << order;
+
+	kmemcheck_show_pages(page, pages);
+
+	shadow = virt_to_page(page[0].shadow);
+
+	for(i = 0; i < pages; ++i)
+		page[i].shadow = NULL;
+
+	__free_pages(shadow, order);
+}
+
+void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
+			  size_t size)
+{
+	/*
+	 * Has already been memset(), which initializes the shadow for us
+	 * as well.
+	 */
+	if (gfpflags & __GFP_ZERO)
+		return;
+
+	/* No need to initialize the shadow of a non-tracked slab. */
+	if (s->flags & SLAB_NOTRACK)
+		return;
+
+	if (!kmemcheck_enabled || gfpflags & __GFP_NOTRACK) {
+		/*
+		 * Allow notracked objects to be allocated from
+		 * tracked caches. Note however that these objects
+		 * will still get page faults on access, they just
+		 * won't ever be flagged as uninitialized. If page
+		 * faults are not acceptable, the slab cache itself
+		 * should be marked NOTRACK.
+		 */
+		kmemcheck_mark_initialized(object, size);
+	} else if (!s->ctor) {
+		/*
+		 * New objects should be marked uninitialized before
+		 * they're returned to the called.
+		 */
+		kmemcheck_mark_uninitialized(object, size);
+	}
+}
+
+void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size)
+{
+	/* TODO: RCU freeing is unsupported for now; hide false positives. */
+	if (!s->ctor && !(s->flags & SLAB_DESTROY_BY_RCU))
+		kmemcheck_mark_freed(object, size);
+}
+
+void kmemcheck_pagealloc_alloc(struct page *page, unsigned int order,
+			       gfp_t gfpflags)
+{
+	int pages;
+
+	if (gfpflags & (__GFP_HIGHMEM | __GFP_NOTRACK))
+		return;
+
+	pages = 1 << order;
+
+	/*
+	 * NOTE: We choose to track GFP_ZERO pages too; in fact, they
+	 * can become uninitialized by copying uninitialized memory
+	 * into them.
+	 */
+
+	/* XXX: Can use zone->node for node? */
+	kmemcheck_alloc_shadow(page, order, gfpflags, -1);
+
+	if (gfpflags & __GFP_ZERO)
+		kmemcheck_mark_initialized_pages(page, pages);
+	else
+		kmemcheck_mark_uninitialized_pages(page, pages);
+}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 2f457a7..a5f3c27 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -23,6 +23,7 @@
 #include <linux/bootmem.h>
 #include <linux/compiler.h>
 #include <linux/kernel.h>
+#include <linux/kmemcheck.h>
 #include <linux/module.h>
 #include <linux/suspend.h>
 #include <linux/pagevec.h>
@@ -558,6 +559,8 @@
 	int bad = 0;
 	int clearMlocked = PageMlocked(page);
 
+	kmemcheck_free_shadow(page, order);
+
 	for (i = 0 ; i < (1 << order) ; ++i)
 		bad += free_pages_check(page + i);
 	if (bad)
@@ -1020,6 +1023,8 @@
 	unsigned long flags;
 	int clearMlocked = PageMlocked(page);
 
+	kmemcheck_free_shadow(page, 0);
+
 	if (PageAnon(page))
 		page->mapping = NULL;
 	if (free_pages_check(page))
@@ -1076,6 +1081,16 @@
 
 	VM_BUG_ON(PageCompound(page));
 	VM_BUG_ON(!page_count(page));
+
+#ifdef CONFIG_KMEMCHECK
+	/*
+	 * Split shadow pages too, because free(page[0]) would
+	 * otherwise free the whole shadow.
+	 */
+	if (kmemcheck_page_is_tracked(page))
+		split_page(virt_to_page(page[0].shadow), order);
+#endif
+
 	for (i = 1; i < (1 << order); i++)
 		set_page_refcounted(page + i);
 }
@@ -1828,7 +1843,10 @@
 		dump_stack();
 		show_mem();
 	}
+	return page;
 got_pg:
+	if (kmemcheck_enabled)
+		kmemcheck_pagealloc_alloc(page, order, gfp_mask);
 	return page;
 
 }
diff --git a/mm/slab.c b/mm/slab.c
index 744ab9a..f257d4d 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -114,6 +114,7 @@
 #include	<linux/rtmutex.h>
 #include	<linux/reciprocal_div.h>
 #include	<linux/debugobjects.h>
+#include	<linux/kmemcheck.h>
 
 #include	<asm/cacheflush.h>
 #include	<asm/tlbflush.h>
@@ -179,13 +180,13 @@
 			 SLAB_STORE_USER | \
 			 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
 			 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
-			 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE)
+			 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
 #else
 # define CREATE_MASK	(SLAB_HWCACHE_ALIGN | \
 			 SLAB_CACHE_DMA | \
 			 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
 			 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
-			 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE)
+			 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
 #endif
 
 /*
@@ -380,87 +381,6 @@
 	MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid);	\
 	} while (0)
 
-/*
- * struct kmem_cache
- *
- * manages a cache.
- */
-
-struct kmem_cache {
-/* 1) per-cpu data, touched during every alloc/free */
-	struct array_cache *array[NR_CPUS];
-/* 2) Cache tunables. Protected by cache_chain_mutex */
-	unsigned int batchcount;
-	unsigned int limit;
-	unsigned int shared;
-
-	unsigned int buffer_size;
-	u32 reciprocal_buffer_size;
-/* 3) touched by every alloc & free from the backend */
-
-	unsigned int flags;		/* constant flags */
-	unsigned int num;		/* # of objs per slab */
-
-/* 4) cache_grow/shrink */
-	/* order of pgs per slab (2^n) */
-	unsigned int gfporder;
-
-	/* force GFP flags, e.g. GFP_DMA */
-	gfp_t gfpflags;
-
-	size_t colour;			/* cache colouring range */
-	unsigned int colour_off;	/* colour offset */
-	struct kmem_cache *slabp_cache;
-	unsigned int slab_size;
-	unsigned int dflags;		/* dynamic flags */
-
-	/* constructor func */
-	void (*ctor)(void *obj);
-
-/* 5) cache creation/removal */
-	const char *name;
-	struct list_head next;
-
-/* 6) statistics */
-#if STATS
-	unsigned long num_active;
-	unsigned long num_allocations;
-	unsigned long high_mark;
-	unsigned long grown;
-	unsigned long reaped;
-	unsigned long errors;
-	unsigned long max_freeable;
-	unsigned long node_allocs;
-	unsigned long node_frees;
-	unsigned long node_overflow;
-	atomic_t allochit;
-	atomic_t allocmiss;
-	atomic_t freehit;
-	atomic_t freemiss;
-#endif
-#if DEBUG
-	/*
-	 * If debugging is enabled, then the allocator can add additional
-	 * fields and/or padding to every object. buffer_size contains the total
-	 * object size including these internal fields, the following two
-	 * variables contain the offset to the user object and its size.
-	 */
-	int obj_offset;
-	int obj_size;
-#endif
-	/*
-	 * We put nodelists[] at the end of kmem_cache, because we want to size
-	 * this array to nr_node_ids slots instead of MAX_NUMNODES
-	 * (see kmem_cache_init())
-	 * We still use [MAX_NUMNODES] and not [1] or [0] because cache_cache
-	 * is statically defined, so we reserve the max number of nodes.
-	 */
-	struct kmem_list3 *nodelists[MAX_NUMNODES];
-	/*
-	 * Do not add fields after nodelists[]
-	 */
-};
-
 #define CFLGS_OFF_SLAB		(0x80000000UL)
 #define	OFF_SLAB(x)	((x)->flags & CFLGS_OFF_SLAB)
 
@@ -1704,7 +1624,7 @@
 	if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
 		flags |= __GFP_RECLAIMABLE;
 
-	page = alloc_pages_exact_node(nodeid, flags, cachep->gfporder);
+	page = alloc_pages_exact_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder);
 	if (!page)
 		return NULL;
 
@@ -1717,6 +1637,16 @@
 			NR_SLAB_UNRECLAIMABLE, nr_pages);
 	for (i = 0; i < nr_pages; i++)
 		__SetPageSlab(page + i);
+
+	if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) {
+		kmemcheck_alloc_shadow(page, cachep->gfporder, flags, nodeid);
+
+		if (cachep->ctor)
+			kmemcheck_mark_uninitialized_pages(page, nr_pages);
+		else
+			kmemcheck_mark_unallocated_pages(page, nr_pages);
+	}
+
 	return page_address(page);
 }
 
@@ -1729,6 +1659,8 @@
 	struct page *page = virt_to_page(addr);
 	const unsigned long nr_freed = i;
 
+	kmemcheck_free_shadow(page, cachep->gfporder);
+
 	if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
 		sub_zone_page_state(page_zone(page),
 				NR_SLAB_RECLAIMABLE, nr_freed);
@@ -3404,6 +3336,9 @@
 	kmemleak_alloc_recursive(ptr, obj_size(cachep), 1, cachep->flags,
 				 flags);
 
+	if (likely(ptr))
+		kmemcheck_slab_alloc(cachep, flags, ptr, obj_size(cachep));
+
 	if (unlikely((flags & __GFP_ZERO) && ptr))
 		memset(ptr, 0, obj_size(cachep));
 
@@ -3464,6 +3399,9 @@
 				 flags);
 	prefetchw(objp);
 
+	if (likely(objp))
+		kmemcheck_slab_alloc(cachep, flags, objp, obj_size(cachep));
+
 	if (unlikely((flags & __GFP_ZERO) && objp))
 		memset(objp, 0, obj_size(cachep));
 
@@ -3580,6 +3518,8 @@
 	kmemleak_free_recursive(objp, cachep->flags);
 	objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));
 
+	kmemcheck_slab_free(cachep, objp, obj_size(cachep));
+
 	/*
 	 * Skip calling cache_free_alien() when the platform is not numa.
 	 * This will avoid cache misses that happen while accessing slabp (which
diff --git a/mm/slub.c b/mm/slub.c
index 1c95077..2701419 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -18,6 +18,7 @@
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
 #include <linux/kmemtrace.h>
+#include <linux/kmemcheck.h>
 #include <linux/cpu.h>
 #include <linux/cpuset.h>
 #include <linux/kmemleak.h>
@@ -147,7 +148,7 @@
 		SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE)
 
 #define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \
-		SLAB_CACHE_DMA)
+		SLAB_CACHE_DMA | SLAB_NOTRACK)
 
 #ifndef ARCH_KMALLOC_MINALIGN
 #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
@@ -1071,6 +1072,8 @@
 {
 	int order = oo_order(oo);
 
+	flags |= __GFP_NOTRACK;
+
 	if (node == -1)
 		return alloc_pages(flags, order);
 	else
@@ -1098,6 +1101,24 @@
 
 		stat(get_cpu_slab(s, raw_smp_processor_id()), ORDER_FALLBACK);
 	}
+
+	if (kmemcheck_enabled
+		&& !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS)))
+	{
+		int pages = 1 << oo_order(oo);
+
+		kmemcheck_alloc_shadow(page, oo_order(oo), flags, node);
+
+		/*
+		 * Objects from caches that have a constructor don't get
+		 * cleared when they're allocated, so we need to do it here.
+		 */
+		if (s->ctor)
+			kmemcheck_mark_uninitialized_pages(page, pages);
+		else
+			kmemcheck_mark_unallocated_pages(page, pages);
+	}
+
 	page->objects = oo_objects(oo);
 	mod_zone_page_state(page_zone(page),
 		(s->flags & SLAB_RECLAIM_ACCOUNT) ?
@@ -1171,6 +1192,8 @@
 		__ClearPageSlubDebug(page);
 	}
 
+	kmemcheck_free_shadow(page, compound_order(page));
+
 	mod_zone_page_state(page_zone(page),
 		(s->flags & SLAB_RECLAIM_ACCOUNT) ?
 		NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
@@ -1626,7 +1649,9 @@
 	if (unlikely((gfpflags & __GFP_ZERO) && object))
 		memset(object, 0, objsize);
 
+	kmemcheck_slab_alloc(s, gfpflags, object, c->objsize);
 	kmemleak_alloc_recursive(object, objsize, 1, s->flags, gfpflags);
+
 	return object;
 }
 
@@ -1759,6 +1784,7 @@
 	kmemleak_free_recursive(x, s->flags);
 	local_irq_save(flags);
 	c = get_cpu_slab(s, smp_processor_id());
+	kmemcheck_slab_free(s, object, c->objsize);
 	debug_check_no_locks_freed(object, c->objsize);
 	if (!(s->flags & SLAB_DEBUG_OBJECTS))
 		debug_check_no_obj_freed(object, c->objsize);
@@ -2633,7 +2659,8 @@
 
 	if (!s || !text || !kmem_cache_open(s, flags, text,
 			realsize, ARCH_KMALLOC_MINALIGN,
-			SLAB_CACHE_DMA|__SYSFS_ADD_DEFERRED, NULL)) {
+			SLAB_CACHE_DMA|SLAB_NOTRACK|__SYSFS_ADD_DEFERRED,
+			NULL)) {
 		kfree(s);
 		kfree(text);
 		goto unlock_out;
@@ -2727,9 +2754,10 @@
 
 static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
 {
-	struct page *page = alloc_pages_node(node, flags | __GFP_COMP,
-						get_order(size));
+	struct page *page;
 
+	flags |= __GFP_COMP | __GFP_NOTRACK;
+	page = alloc_pages_node(node, flags, get_order(size));
 	if (page)
 		return page_address(page);
 	else
@@ -4412,6 +4440,8 @@
 		*p++ = 'a';
 	if (s->flags & SLAB_DEBUG_FREE)
 		*p++ = 'F';
+	if (!(s->flags & SLAB_NOTRACK))
+		*p++ = 't';
 	if (p != name + 1)
 		*p++ = '-';
 	p += sprintf(p, "%07d", s->size);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 1a94a30..5c93435 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -39,6 +39,7 @@
 #include <linux/module.h>
 #include <linux/types.h>
 #include <linux/kernel.h>
+#include <linux/kmemcheck.h>
 #include <linux/mm.h>
 #include <linux/interrupt.h>
 #include <linux/in.h>
@@ -201,6 +202,8 @@
 	skb->data = data;
 	skb_reset_tail_pointer(skb);
 	skb->end = skb->tail + size;
+	kmemcheck_annotate_bitfield(skb, flags1);
+	kmemcheck_annotate_bitfield(skb, flags2);
 	/* make sure we initialize shinfo sequentially */
 	shinfo = skb_shinfo(skb);
 	atomic_set(&shinfo->dataref, 1);
@@ -217,6 +220,8 @@
 		struct sk_buff *child = skb + 1;
 		atomic_t *fclone_ref = (atomic_t *) (child + 1);
 
+		kmemcheck_annotate_bitfield(child, flags1);
+		kmemcheck_annotate_bitfield(child, flags2);
 		skb->fclone = SKB_FCLONE_ORIG;
 		atomic_set(fclone_ref, 1);
 
@@ -635,6 +640,9 @@
 		n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
 		if (!n)
 			return NULL;
+
+		kmemcheck_annotate_bitfield(n, flags1);
+		kmemcheck_annotate_bitfield(n, flags2);
 		n->fclone = SKB_FCLONE_UNAVAILABLE;
 	}
 
diff --git a/net/core/sock.c b/net/core/sock.c
index 06e26b7..b0ba569 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -945,6 +945,8 @@
 		sk = kmalloc(prot->obj_size, priority);
 
 	if (sk != NULL) {
+		kmemcheck_annotate_bitfield(sk, flags);
+
 		if (security_sk_alloc(sk, family, priority))
 			goto out_free;
 
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index 68a8d89..61283f9 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -9,6 +9,7 @@
  */
 
 #include <linux/kernel.h>
+#include <linux/kmemcheck.h>
 #include <net/inet_hashtables.h>
 #include <net/inet_timewait_sock.h>
 #include <net/ip.h>
@@ -120,6 +121,8 @@
 	if (tw != NULL) {
 		const struct inet_sock *inet = inet_sk(sk);
 
+		kmemcheck_annotate_bitfield(tw, flags);
+
 		/* Give us an identity. */
 		tw->tw_daddr	    = inet->daddr;
 		tw->tw_rcv_saddr    = inet->rcv_saddr;
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index a9b3a6f..656cbd1 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -1,11 +1,12 @@
 /*
- *  linux/net/iucv/af_iucv.c
- *
  *  IUCV protocol stack for Linux on zSeries
  *
- *  Copyright 2006 IBM Corporation
+ *  Copyright IBM Corp. 2006, 2009
  *
  *  Author(s):	Jennifer Hunt <jenhunt@us.ibm.com>
+ *		Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
+ *  PM functions:
+ *		Ursula Braun <ursula.braun@de.ibm.com>
  */
 
 #define KMSG_COMPONENT "af_iucv"
@@ -90,6 +91,122 @@
        memcpy(&dst[8], src, 8);
 }
 
+static int afiucv_pm_prepare(struct device *dev)
+{
+#ifdef CONFIG_PM_DEBUG
+	printk(KERN_WARNING "afiucv_pm_prepare\n");
+#endif
+	return 0;
+}
+
+static void afiucv_pm_complete(struct device *dev)
+{
+#ifdef CONFIG_PM_DEBUG
+	printk(KERN_WARNING "afiucv_pm_complete\n");
+#endif
+	return;
+}
+
+/**
+ * afiucv_pm_freeze() - Freeze PM callback
+ * @dev:	AFIUCV dummy device
+ *
+ * Sever all established IUCV communication pathes
+ */
+static int afiucv_pm_freeze(struct device *dev)
+{
+	struct iucv_sock *iucv;
+	struct sock *sk;
+	struct hlist_node *node;
+	int err = 0;
+
+#ifdef CONFIG_PM_DEBUG
+	printk(KERN_WARNING "afiucv_pm_freeze\n");
+#endif
+	read_lock(&iucv_sk_list.lock);
+	sk_for_each(sk, node, &iucv_sk_list.head) {
+		iucv = iucv_sk(sk);
+		skb_queue_purge(&iucv->send_skb_q);
+		skb_queue_purge(&iucv->backlog_skb_q);
+		switch (sk->sk_state) {
+		case IUCV_SEVERED:
+		case IUCV_DISCONN:
+		case IUCV_CLOSING:
+		case IUCV_CONNECTED:
+			if (iucv->path) {
+				err = iucv_path_sever(iucv->path, NULL);
+				iucv_path_free(iucv->path);
+				iucv->path = NULL;
+			}
+			break;
+		case IUCV_OPEN:
+		case IUCV_BOUND:
+		case IUCV_LISTEN:
+		case IUCV_CLOSED:
+		default:
+			break;
+		}
+	}
+	read_unlock(&iucv_sk_list.lock);
+	return err;
+}
+
+/**
+ * afiucv_pm_restore_thaw() - Thaw and restore PM callback
+ * @dev:	AFIUCV dummy device
+ *
+ * socket clean up after freeze
+ */
+static int afiucv_pm_restore_thaw(struct device *dev)
+{
+	struct iucv_sock *iucv;
+	struct sock *sk;
+	struct hlist_node *node;
+
+#ifdef CONFIG_PM_DEBUG
+	printk(KERN_WARNING "afiucv_pm_restore_thaw\n");
+#endif
+	read_lock(&iucv_sk_list.lock);
+	sk_for_each(sk, node, &iucv_sk_list.head) {
+		iucv = iucv_sk(sk);
+		switch (sk->sk_state) {
+		case IUCV_CONNECTED:
+			sk->sk_err = EPIPE;
+			sk->sk_state = IUCV_DISCONN;
+			sk->sk_state_change(sk);
+			break;
+		case IUCV_DISCONN:
+		case IUCV_SEVERED:
+		case IUCV_CLOSING:
+		case IUCV_LISTEN:
+		case IUCV_BOUND:
+		case IUCV_OPEN:
+		default:
+			break;
+		}
+	}
+	read_unlock(&iucv_sk_list.lock);
+	return 0;
+}
+
+static struct dev_pm_ops afiucv_pm_ops = {
+	.prepare = afiucv_pm_prepare,
+	.complete = afiucv_pm_complete,
+	.freeze = afiucv_pm_freeze,
+	.thaw = afiucv_pm_restore_thaw,
+	.restore = afiucv_pm_restore_thaw,
+};
+
+static struct device_driver af_iucv_driver = {
+	.owner = THIS_MODULE,
+	.name = "afiucv",
+	.bus  = &iucv_bus,
+	.pm   = &afiucv_pm_ops,
+};
+
+/* dummy device used as trigger for PM functions */
+static struct device *af_iucv_dev;
+
 /**
  * iucv_msg_length() - Returns the length of an iucv message.
  * @msg:	Pointer to struct iucv_message, MUST NOT be NULL
@@ -1556,8 +1673,30 @@
 	err = sock_register(&iucv_sock_family_ops);
 	if (err)
 		goto out_proto;
+	/* establish dummy device */
+	err = driver_register(&af_iucv_driver);
+	if (err)
+		goto out_sock;
+	af_iucv_dev = kzalloc(sizeof(struct device), GFP_KERNEL);
+	if (!af_iucv_dev) {
+		err = -ENOMEM;
+		goto out_driver;
+	}
+	dev_set_name(af_iucv_dev, "af_iucv");
+	af_iucv_dev->bus = &iucv_bus;
+	af_iucv_dev->parent = iucv_root;
+	af_iucv_dev->release = (void (*)(struct device *))kfree;
+	af_iucv_dev->driver = &af_iucv_driver;
+	err = device_register(af_iucv_dev);
+	if (err)
+		goto out_driver;
+
 	return 0;
 
+out_driver:
+	driver_unregister(&af_iucv_driver);
+out_sock:
+	sock_unregister(PF_IUCV);
 out_proto:
 	proto_unregister(&iucv_proto);
 out_iucv:
@@ -1568,6 +1707,8 @@
 
 static void __exit afiucv_exit(void)
 {
+	device_unregister(af_iucv_dev);
+	driver_unregister(&af_iucv_driver);
 	sock_unregister(PF_IUCV);
 	proto_unregister(&iucv_proto);
 	iucv_unregister(&af_iucv_handler, 0);
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index 61e8038..c833481d 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -1,7 +1,8 @@
 /*
  * IUCV base infrastructure.
  *
- * Copyright 2001, 2006 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Copyright IBM Corp. 2001, 2009
+ *
  * Author(s):
  *    Original source:
  *	Alan Altmark (Alan_Altmark@us.ibm.com)	Sept. 2000
@@ -10,6 +11,8 @@
  *	Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
  *    Rewritten for af_iucv:
  *	Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *    PM functions:
+ *	Ursula Braun (ursula.braun@de.ibm.com)
  *
  * Documentation used:
  *    The original source
@@ -45,6 +48,7 @@
 #include <linux/err.h>
 #include <linux/device.h>
 #include <linux/cpu.h>
+#include <linux/reboot.h>
 #include <net/iucv/iucv.h>
 #include <asm/atomic.h>
 #include <asm/ebcdic.h>
@@ -75,9 +79,24 @@
 	return 0;
 }
 
+static int iucv_pm_prepare(struct device *);
+static void iucv_pm_complete(struct device *);
+static int iucv_pm_freeze(struct device *);
+static int iucv_pm_thaw(struct device *);
+static int iucv_pm_restore(struct device *);
+
+static struct dev_pm_ops iucv_pm_ops = {
+	.prepare = iucv_pm_prepare,
+	.complete = iucv_pm_complete,
+	.freeze = iucv_pm_freeze,
+	.thaw = iucv_pm_thaw,
+	.restore = iucv_pm_restore,
+};
+
 struct bus_type iucv_bus = {
 	.name = "iucv",
 	.match = iucv_bus_match,
+	.pm = &iucv_pm_ops,
 };
 EXPORT_SYMBOL(iucv_bus);
 
@@ -147,6 +166,7 @@
 	IUCV_RESUME = 14,
 	IUCV_SEVER = 15,
 	IUCV_SETMASK = 16,
+	IUCV_SETCONTROLMASK = 17,
 };
 
 /*
@@ -364,6 +384,18 @@
 	parm->set_mask.ipmask = 0xf8;
 	iucv_call_b2f0(IUCV_SETMASK, parm);
 
+	/*
+	 * Enable all iucv control interrupts.
+	 * ipmask contains bits for the different interrupts
+	 *	0x80 - Flag to allow pending connections interrupts
+	 *	0x40 - Flag to allow connection complete interrupts
+	 *	0x20 - Flag to allow connection severed interrupts
+	 *	0x10 - Flag to allow connection quiesced interrupts
+	 *	0x08 - Flag to allow connection resumed interrupts
+	 */
+	memset(parm, 0, sizeof(union iucv_param));
+	parm->set_mask.ipmask = 0xf8;
+	iucv_call_b2f0(IUCV_SETCONTROLMASK, parm);
 	/* Set indication that iucv interrupts are allowed for this cpu. */
 	cpu_set(cpu, iucv_irq_cpumask);
 }
@@ -389,6 +421,31 @@
 }
 
 /**
+ * iucv_block_cpu_almost
+ * @data: unused
+ *
+ * Allow connection-severed interrupts only on this cpu.
+ */
+static void iucv_block_cpu_almost(void *data)
+{
+	int cpu = smp_processor_id();
+	union iucv_param *parm;
+
+	/* Allow iucv control interrupts only */
+	parm = iucv_param_irq[cpu];
+	memset(parm, 0, sizeof(union iucv_param));
+	parm->set_mask.ipmask = 0x08;
+	iucv_call_b2f0(IUCV_SETMASK, parm);
+	/* Allow iucv-severed interrupt only */
+	memset(parm, 0, sizeof(union iucv_param));
+	parm->set_mask.ipmask = 0x20;
+	iucv_call_b2f0(IUCV_SETCONTROLMASK, parm);
+
+	/* Clear indication that iucv interrupts are allowed for this cpu. */
+	cpu_clear(cpu, iucv_irq_cpumask);
+}
+
+/**
  * iucv_declare_cpu
  * @data: unused
  *
@@ -758,6 +815,28 @@
 }
 EXPORT_SYMBOL(iucv_unregister);
 
+static int iucv_reboot_event(struct notifier_block *this,
+			     unsigned long event, void *ptr)
+{
+	int i, rc;
+
+	get_online_cpus();
+	on_each_cpu(iucv_block_cpu, NULL, 1);
+	preempt_disable();
+	for (i = 0; i < iucv_max_pathid; i++) {
+		if (iucv_path_table[i])
+			rc = iucv_sever_pathid(i, NULL);
+	}
+	preempt_enable();
+	put_online_cpus();
+	iucv_disable();
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block iucv_reboot_notifier = {
+	.notifier_call = iucv_reboot_event,
+};
+
 /**
  * iucv_path_accept
  * @path: address of iucv path structure
@@ -777,6 +856,10 @@
 	int rc;
 
 	local_bh_disable();
+	if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) {
+		rc = -EIO;
+		goto out;
+	}
 	/* Prepare parameter block. */
 	parm = iucv_param[smp_processor_id()];
 	memset(parm, 0, sizeof(union iucv_param));
@@ -792,6 +875,7 @@
 		path->msglim = parm->ctrl.ipmsglim;
 		path->flags = parm->ctrl.ipflags1;
 	}
+out:
 	local_bh_enable();
 	return rc;
 }
@@ -821,6 +905,10 @@
 
 	spin_lock_bh(&iucv_table_lock);
 	iucv_cleanup_queue();
+	if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) {
+		rc = -EIO;
+		goto out;
+	}
 	parm = iucv_param[smp_processor_id()];
 	memset(parm, 0, sizeof(union iucv_param));
 	parm->ctrl.ipmsglim = path->msglim;
@@ -855,6 +943,7 @@
 			rc = -EIO;
 		}
 	}
+out:
 	spin_unlock_bh(&iucv_table_lock);
 	return rc;
 }
@@ -876,12 +965,17 @@
 	int rc;
 
 	local_bh_disable();
+	if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) {
+		rc = -EIO;
+		goto out;
+	}
 	parm = iucv_param[smp_processor_id()];
 	memset(parm, 0, sizeof(union iucv_param));
 	if (userdata)
 		memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser));
 	parm->ctrl.ippathid = path->pathid;
 	rc = iucv_call_b2f0(IUCV_QUIESCE, parm);
+out:
 	local_bh_enable();
 	return rc;
 }
@@ -903,12 +997,17 @@
 	int rc;
 
 	local_bh_disable();
+	if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) {
+		rc = -EIO;
+		goto out;
+	}
 	parm = iucv_param[smp_processor_id()];
 	memset(parm, 0, sizeof(union iucv_param));
 	if (userdata)
 		memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser));
 	parm->ctrl.ippathid = path->pathid;
 	rc = iucv_call_b2f0(IUCV_RESUME, parm);
+out:
 	local_bh_enable();
 	return rc;
 }
@@ -927,6 +1026,10 @@
 	int rc;
 
 	preempt_disable();
+	if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) {
+		rc = -EIO;
+		goto out;
+	}
 	if (iucv_active_cpu != smp_processor_id())
 		spin_lock_bh(&iucv_table_lock);
 	rc = iucv_sever_pathid(path->pathid, userdata);
@@ -934,6 +1037,7 @@
 	list_del_init(&path->list);
 	if (iucv_active_cpu != smp_processor_id())
 		spin_unlock_bh(&iucv_table_lock);
+out:
 	preempt_enable();
 	return rc;
 }
@@ -956,6 +1060,10 @@
 	int rc;
 
 	local_bh_disable();
+	if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) {
+		rc = -EIO;
+		goto out;
+	}
 	parm = iucv_param[smp_processor_id()];
 	memset(parm, 0, sizeof(union iucv_param));
 	parm->purge.ippathid = path->pathid;
@@ -967,6 +1075,7 @@
 		msg->audit = (*(u32 *) &parm->purge.ipaudit) >> 8;
 		msg->tag = parm->purge.ipmsgtag;
 	}
+out:
 	local_bh_enable();
 	return rc;
 }
@@ -1043,6 +1152,10 @@
 	if (msg->flags & IUCV_IPRMDATA)
 		return iucv_message_receive_iprmdata(path, msg, flags,
 						     buffer, size, residual);
+	if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) {
+		rc = -EIO;
+		goto out;
+	}
 	parm = iucv_param[smp_processor_id()];
 	memset(parm, 0, sizeof(union iucv_param));
 	parm->db.ipbfadr1 = (u32)(addr_t) buffer;
@@ -1058,6 +1171,7 @@
 		if (residual)
 			*residual = parm->db.ipbfln1f;
 	}
+out:
 	return rc;
 }
 EXPORT_SYMBOL(__iucv_message_receive);
@@ -1111,6 +1225,10 @@
 	int rc;
 
 	local_bh_disable();
+	if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) {
+		rc = -EIO;
+		goto out;
+	}
 	parm = iucv_param[smp_processor_id()];
 	memset(parm, 0, sizeof(union iucv_param));
 	parm->db.ippathid = path->pathid;
@@ -1118,6 +1236,7 @@
 	parm->db.iptrgcls = msg->class;
 	parm->db.ipflags1 = (IUCV_IPTRGCLS | IUCV_IPFGMID | IUCV_IPFGPID);
 	rc = iucv_call_b2f0(IUCV_REJECT, parm);
+out:
 	local_bh_enable();
 	return rc;
 }
@@ -1145,6 +1264,10 @@
 	int rc;
 
 	local_bh_disable();
+	if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) {
+		rc = -EIO;
+		goto out;
+	}
 	parm = iucv_param[smp_processor_id()];
 	memset(parm, 0, sizeof(union iucv_param));
 	if (flags & IUCV_IPRMDATA) {
@@ -1162,6 +1285,7 @@
 		parm->db.iptrgcls = msg->class;
 	}
 	rc = iucv_call_b2f0(IUCV_REPLY, parm);
+out:
 	local_bh_enable();
 	return rc;
 }
@@ -1190,6 +1314,10 @@
 	union iucv_param *parm;
 	int rc;
 
+	if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) {
+		rc = -EIO;
+		goto out;
+	}
 	parm = iucv_param[smp_processor_id()];
 	memset(parm, 0, sizeof(union iucv_param));
 	if (flags & IUCV_IPRMDATA) {
@@ -1212,6 +1340,7 @@
 	rc = iucv_call_b2f0(IUCV_SEND, parm);
 	if (!rc)
 		msg->id = parm->db.ipmsgid;
+out:
 	return rc;
 }
 EXPORT_SYMBOL(__iucv_message_send);
@@ -1272,6 +1401,10 @@
 	int rc;
 
 	local_bh_disable();
+	if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) {
+		rc = -EIO;
+		goto out;
+	}
 	parm = iucv_param[smp_processor_id()];
 	memset(parm, 0, sizeof(union iucv_param));
 	if (flags & IUCV_IPRMDATA) {
@@ -1297,6 +1430,7 @@
 	rc = iucv_call_b2f0(IUCV_SEND, parm);
 	if (!rc)
 		msg->id = parm->db.ipmsgid;
+out:
 	local_bh_enable();
 	return rc;
 }
@@ -1687,6 +1821,130 @@
 	spin_unlock(&iucv_queue_lock);
 }
 
+static int iucv_pm_prepare(struct device *dev)
+{
+	int rc = 0;
+
+#ifdef CONFIG_PM_DEBUG
+	printk(KERN_INFO "iucv_pm_prepare\n");
+#endif
+	if (dev->driver && dev->driver->pm && dev->driver->pm->prepare)
+		rc = dev->driver->pm->prepare(dev);
+	return rc;
+}
+
+static void iucv_pm_complete(struct device *dev)
+{
+#ifdef CONFIG_PM_DEBUG
+	printk(KERN_INFO "iucv_pm_complete\n");
+#endif
+	if (dev->driver && dev->driver->pm && dev->driver->pm->complete)
+		dev->driver->pm->complete(dev);
+}
+
+/**
+ * iucv_path_table_empty() - determine if iucv path table is empty
+ *
+ * Returns 0 if there are still iucv pathes defined
+ *	   1 if there are no iucv pathes defined
+ */
+int iucv_path_table_empty(void)
+{
+	int i;
+
+	for (i = 0; i < iucv_max_pathid; i++) {
+		if (iucv_path_table[i])
+			return 0;
+	}
+	return 1;
+}
+
+/**
+ * iucv_pm_freeze() - Freeze PM callback
+ * @dev:	iucv-based device
+ *
+ * disable iucv interrupts
+ * invoke callback function of the iucv-based driver
+ * shut down iucv, if no iucv-pathes are established anymore
+ */
+static int iucv_pm_freeze(struct device *dev)
+{
+	int cpu;
+	int rc = 0;
+
+#ifdef CONFIG_PM_DEBUG
+	printk(KERN_WARNING "iucv_pm_freeze\n");
+#endif
+	for_each_cpu_mask_nr(cpu, iucv_irq_cpumask)
+		smp_call_function_single(cpu, iucv_block_cpu_almost, NULL, 1);
+	if (dev->driver && dev->driver->pm && dev->driver->pm->freeze)
+		rc = dev->driver->pm->freeze(dev);
+	if (iucv_path_table_empty())
+		iucv_disable();
+	return rc;
+}
+
+/**
+ * iucv_pm_thaw() - Thaw PM callback
+ * @dev:	iucv-based device
+ *
+ * make iucv ready for use again: allocate path table, declare interrupt buffers
+ *				  and enable iucv interrupts
+ * invoke callback function of the iucv-based driver
+ */
+static int iucv_pm_thaw(struct device *dev)
+{
+	int rc = 0;
+
+#ifdef CONFIG_PM_DEBUG
+	printk(KERN_WARNING "iucv_pm_thaw\n");
+#endif
+	if (!iucv_path_table) {
+		rc = iucv_enable();
+		if (rc)
+			goto out;
+	}
+	if (cpus_empty(iucv_irq_cpumask)) {
+		if (iucv_nonsmp_handler)
+			/* enable interrupts on one cpu */
+			iucv_allow_cpu(NULL);
+		else
+			/* enable interrupts on all cpus */
+			iucv_setmask_mp();
+	}
+	if (dev->driver && dev->driver->pm && dev->driver->pm->thaw)
+		rc = dev->driver->pm->thaw(dev);
+out:
+	return rc;
+}
+
+/**
+ * iucv_pm_restore() - Restore PM callback
+ * @dev:	iucv-based device
+ *
+ * make iucv ready for use again: allocate path table, declare interrupt buffers
+ *				  and enable iucv interrupts
+ * invoke callback function of the iucv-based driver
+ */
+static int iucv_pm_restore(struct device *dev)
+{
+	int rc = 0;
+
+#ifdef CONFIG_PM_DEBUG
+	printk(KERN_WARNING "iucv_pm_restore %p\n", iucv_path_table);
+#endif
+	if (cpus_empty(iucv_irq_cpumask)) {
+		rc = iucv_query_maxconn();
+		rc = iucv_enable();
+		if (rc)
+			goto out;
+	}
+	if (dev->driver && dev->driver->pm && dev->driver->pm->restore)
+		rc = dev->driver->pm->restore(dev);
+out:
+	return rc;
+}
+
 /**
  * iucv_init
  *
@@ -1740,15 +1998,20 @@
 	rc = register_hotcpu_notifier(&iucv_cpu_notifier);
 	if (rc)
 		goto out_free;
+	rc = register_reboot_notifier(&iucv_reboot_notifier);
+	if (rc)
+		goto out_cpu;
 	ASCEBC(iucv_error_no_listener, 16);
 	ASCEBC(iucv_error_no_memory, 16);
 	ASCEBC(iucv_error_pathid, 16);
 	iucv_available = 1;
 	rc = bus_register(&iucv_bus);
 	if (rc)
-		goto out_cpu;
+		goto out_reboot;
 	return 0;
 
+out_reboot:
+	unregister_reboot_notifier(&iucv_reboot_notifier);
 out_cpu:
 	unregister_hotcpu_notifier(&iucv_cpu_notifier);
 out_free:
@@ -1783,6 +2046,7 @@
 	list_for_each_entry_safe(p, n, &iucv_work_queue, list)
 		kfree(p);
 	spin_unlock_irq(&iucv_queue_lock);
+	unregister_reboot_notifier(&iucv_reboot_notifier);
 	unregister_hotcpu_notifier(&iucv_cpu_notifier);
 	for_each_possible_cpu(cpu) {
 		kfree(iucv_param_irq[cpu]);
diff --git a/samples/Kconfig b/samples/Kconfig
index b75d28c..428b065 100644
--- a/samples/Kconfig
+++ b/samples/Kconfig
@@ -26,7 +26,8 @@
 	  This build trace event example modules.
 
 config SAMPLE_KOBJECT
-	tristate "Build kobject examples"
+	tristate "Build kobject examples -- loadable modules only"
+	depends on m
 	help
 	  This config option will allow you to build a number of
 	  different kobject sample modules showing how to use kobjects,
diff --git a/samples/firmware_class/firmware_sample_driver.c b/samples/firmware_class/firmware_sample_driver.c
deleted file mode 100644
index 219a298..0000000
--- a/samples/firmware_class/firmware_sample_driver.c
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * firmware_sample_driver.c -
- *
- * Copyright (c) 2003 Manuel Estrada Sainz
- *
- * Sample code on how to use request_firmware() from drivers.
- *
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/device.h>
-#include <linux/string.h>
-#include <linux/firmware.h>
-
-static struct device ghost_device = {
-	.bus_id    = "ghost0",
-};
-
-
-static void sample_firmware_load(char *firmware, int size)
-{
-	u8 buf[size+1];
-	memcpy(buf, firmware, size);
-	buf[size] = '\0';
-	printk(KERN_INFO "firmware_sample_driver: firmware: %s\n", buf);
-}
-
-static void sample_probe_default(void)
-{
-	/* uses the default method to get the firmware */
-	const struct firmware *fw_entry;
-	int retval;
-
-	printk(KERN_INFO "firmware_sample_driver: "
-		"a ghost device got inserted :)\n");
-
-	retval = request_firmware(&fw_entry, "sample_driver_fw", &ghost_device);
-	if (retval) {
-		printk(KERN_ERR
-		       "firmware_sample_driver: Firmware not available\n");
-		return;
-	}
-
-	sample_firmware_load(fw_entry->data, fw_entry->size);
-
-	release_firmware(fw_entry);
-
-	/* finish setting up the device */
-}
-
-static void sample_probe_specific(void)
-{
-	int retval;
-	/* Uses some specific hotplug support to get the firmware from
-	 * userspace  directly into the hardware, or via some sysfs file */
-
-	/* NOTE: This currently doesn't work */
-
-	printk(KERN_INFO "firmware_sample_driver: "
-		"a ghost device got inserted :)\n");
-
-	retval = request_firmware(NULL, "sample_driver_fw", &ghost_device);
-	if (retval) {
-		printk(KERN_ERR
-		       "firmware_sample_driver: Firmware load failed\n");
-		return;
-	}
-
-	/* request_firmware blocks until userspace finished, so at
-	 * this point the firmware should be already in the device */
-
-	/* finish setting up the device */
-}
-
-static void sample_probe_async_cont(const struct firmware *fw, void *context)
-{
-	if (!fw) {
-		printk(KERN_ERR
-		       "firmware_sample_driver: firmware load failed\n");
-		return;
-	}
-
-	printk(KERN_INFO "firmware_sample_driver: device pointer \"%s\"\n",
-	       (char *)context);
-	sample_firmware_load(fw->data, fw->size);
-}
-
-static void sample_probe_async(void)
-{
-	/* Let's say that I can't sleep */
-	int error;
-	error = request_firmware_nowait(THIS_MODULE, FW_ACTION_NOHOTPLUG,
-					"sample_driver_fw", &ghost_device,
-					"my device pointer",
-					sample_probe_async_cont);
-	if (error)
-		printk(KERN_ERR "firmware_sample_driver:"
-		       " request_firmware_nowait failed\n");
-}
-
-static int __init sample_init(void)
-{
-	device_initialize(&ghost_device);
-	/* since there is no real hardware insertion I just call the
-	 * sample probe functions here */
-	sample_probe_specific();
-	sample_probe_default();
-	sample_probe_async();
-	return 0;
-}
-
-static void __exit sample_exit(void)
-{
-}
-
-module_init(sample_init);
-module_exit(sample_exit);
-
-MODULE_LICENSE("GPL");
diff --git a/samples/firmware_class/firmware_sample_firmware_class.c b/samples/firmware_class/firmware_sample_firmware_class.c
deleted file mode 100644
index e6cf7a4..0000000
--- a/samples/firmware_class/firmware_sample_firmware_class.c
+++ /dev/null
@@ -1,204 +0,0 @@
-/*
- * firmware_sample_firmware_class.c -
- *
- * Copyright (c) 2003 Manuel Estrada Sainz
- *
- * NOTE: This is just a probe of concept, if you think that your driver would
- * be well served by this mechanism please contact me first.
- *
- * DON'T USE THIS CODE AS IS
- *
- */
-
-#include <linux/device.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/timer.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/firmware.h>
-
-
-MODULE_AUTHOR("Manuel Estrada Sainz");
-MODULE_DESCRIPTION("Hackish sample for using firmware class directly");
-MODULE_LICENSE("GPL");
-
-static inline struct class_device *to_class_dev(struct kobject *obj)
-{
-	return container_of(obj, struct class_device, kobj);
-}
-
-static inline
-struct class_device_attribute *to_class_dev_attr(struct attribute *_attr)
-{
-	return container_of(_attr, struct class_device_attribute, attr);
-}
-
-struct firmware_priv {
-	char fw_id[FIRMWARE_NAME_MAX];
-	s32 loading:2;
-	u32 abort:1;
-};
-
-static ssize_t firmware_loading_show(struct class_device *class_dev, char *buf)
-{
-	struct firmware_priv *fw_priv = class_get_devdata(class_dev);
-	return sprintf(buf, "%d\n", fw_priv->loading);
-}
-
-static ssize_t firmware_loading_store(struct class_device *class_dev,
-				      const char *buf, size_t count)
-{
-	struct firmware_priv *fw_priv = class_get_devdata(class_dev);
-	int prev_loading = fw_priv->loading;
-
-	fw_priv->loading = simple_strtol(buf, NULL, 10);
-
-	switch (fw_priv->loading) {
-	case -1:
-		/* abort load an panic */
-		break;
-	case 1:
-		/* setup load */
-		break;
-	case 0:
-		if (prev_loading == 1) {
-			/* finish load and get the device back to working
-			 * state */
-		}
-		break;
-	}
-
-	return count;
-}
-static CLASS_DEVICE_ATTR(loading, 0644,
-			 firmware_loading_show, firmware_loading_store);
-
-static ssize_t firmware_data_read(struct kobject *kobj,
-				  struct bin_attribute *bin_attr,
-				  char *buffer, loff_t offset, size_t count)
-{
-	struct class_device *class_dev = to_class_dev(kobj);
-	struct firmware_priv *fw_priv = class_get_devdata(class_dev);
-
-	/* read from the devices firmware memory */
-
-	return count;
-}
-static ssize_t firmware_data_write(struct kobject *kobj,
-				   struct bin_attribute *bin_attr,
-				   char *buffer, loff_t offset, size_t count)
-{
-	struct class_device *class_dev = to_class_dev(kobj);
-	struct firmware_priv *fw_priv = class_get_devdata(class_dev);
-
-	/* write to the devices firmware memory */
-
-	return count;
-}
-static struct bin_attribute firmware_attr_data = {
-	.attr = {.name = "data", .mode = 0644},
-	.size = 0,
-	.read = firmware_data_read,
-	.write = firmware_data_write,
-};
-static int fw_setup_class_device(struct class_device *class_dev,
-				 const char *fw_name,
-				 struct device *device)
-{
-	int retval;
-	struct firmware_priv *fw_priv;
-
-	fw_priv = kzalloc(sizeof(struct firmware_priv),	GFP_KERNEL);
-	if (!fw_priv) {
-		retval = -ENOMEM;
-		goto out;
-	}
-
-	memset(class_dev, 0, sizeof(*class_dev));
-
-	strncpy(fw_priv->fw_id, fw_name, FIRMWARE_NAME_MAX);
-	fw_priv->fw_id[FIRMWARE_NAME_MAX-1] = '\0';
-
-	strncpy(class_dev->class_id, device->bus_id, BUS_ID_SIZE);
-	class_dev->class_id[BUS_ID_SIZE-1] = '\0';
-	class_dev->dev = device;
-
-	class_dev->class = &firmware_class;
-	class_set_devdata(class_dev, fw_priv);
-	retval = class_device_register(class_dev);
-	if (retval) {
-		printk(KERN_ERR "%s: class_device_register failed\n",
-		       __func__);
-		goto error_free_fw_priv;
-	}
-
-	retval = sysfs_create_bin_file(&class_dev->kobj, &firmware_attr_data);
-	if (retval) {
-		printk(KERN_ERR "%s: sysfs_create_bin_file failed\n",
-		       __func__);
-		goto error_unreg_class_dev;
-	}
-
-	retval = class_device_create_file(class_dev,
-					  &class_device_attr_loading);
-	if (retval) {
-		printk(KERN_ERR "%s: class_device_create_file failed\n",
-		       __func__);
-		goto error_remove_data;
-	}
-
-	goto out;
-
-error_remove_data:
-	sysfs_remove_bin_file(&class_dev->kobj, &firmware_attr_data);
-error_unreg_class_dev:
-	class_device_unregister(class_dev);
-error_free_fw_priv:
-	kfree(fw_priv);
-out:
-	return retval;
-}
-static void fw_remove_class_device(struct class_device *class_dev)
-{
-	struct firmware_priv *fw_priv = class_get_devdata(class_dev);
-
-	class_device_remove_file(class_dev, &class_device_attr_loading);
-	sysfs_remove_bin_file(&class_dev->kobj, &firmware_attr_data);
-	class_device_unregister(class_dev);
-}
-
-static struct class_device *class_dev;
-
-static struct device my_device = {
-	.bus_id    = "my_dev0",
-};
-
-static int __init firmware_sample_init(void)
-{
-	int error;
-
-	device_initialize(&my_device);
-	class_dev = kmalloc(sizeof(struct class_device), GFP_KERNEL);
-	if (!class_dev)
-		return -ENOMEM;
-
-	error = fw_setup_class_device(class_dev, "my_firmware_image",
-				      &my_device);
-	if (error) {
-		kfree(class_dev);
-		return error;
-	}
-	return 0;
-
-}
-static void __exit firmware_sample_exit(void)
-{
-	struct firmware_priv *fw_priv = class_get_devdata(class_dev);
-	fw_remove_class_device(class_dev);
-	kfree(fw_priv);
-	kfree(class_dev);
-}
-
-module_init(firmware_sample_init);
-module_exit(firmware_sample_exit);
diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl
index 91033e6..7109e2b 100755
--- a/scripts/recordmcount.pl
+++ b/scripts/recordmcount.pl
@@ -226,6 +226,26 @@
     if ($is_module eq "0") {
         $cc .= " -mconstant-gp";
     }
+} elsif ($arch eq "sparc64") {
+    # In the objdump output there are giblets like:
+    # 0000000000000000 <igmp_net_exit-0x18>:
+    # As there's some data blobs that get emitted into the
+    # text section before the first instructions and the first
+    # real symbols.  We don't want to match that, so to combat
+    # this we use '\w' so we'll match just plain symbol names,
+    # and not those that also include hex offsets inside of the
+    # '<>' brackets.  Actually the generic function_regex setting
+    # could safely use this too.
+    $function_regex = "^([0-9a-fA-F]+)\\s+<(\\w*?)>:";
+
+    # Sparc64 calls '_mcount' instead of plain 'mcount'.
+    $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s_mcount\$";
+
+    $alignment = 8;
+    $type = ".xword";
+    $ld .= " -m elf64_sparc";
+    $cc .= " -m64";
+    $objcopy .= " -O elf64-sparc";
 } else {
     die "Arch $arch is not supported with CONFIG_FTRACE_MCOUNT_RECORD";
 }
diff --git a/scripts/tracing/draw_functrace.py b/scripts/tracing/draw_functrace.py
index 902f9a9..db40fa0 100644
--- a/scripts/tracing/draw_functrace.py
+++ b/scripts/tracing/draw_functrace.py
@@ -12,10 +12,9 @@
 
 Usage:
 	Be sure that you have CONFIG_FUNCTION_TRACER
-	# mkdir /debugfs
-	# mount -t debug debug /debug
-	# echo function > /debug/tracing/current_tracer
-	$ cat /debug/tracing/trace_pipe > ~/raw_trace_func
+	# mount -t debugfs nodev /sys/kernel/debug
+	# echo function > /sys/kernel/debug/tracing/current_tracer
+	$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
 	Wait some times but not too much, the script is a bit slow.
 	Break the pipe (Ctrl + Z)
 	$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
diff --git a/sound/pci/ctxfi/ctatc.c b/sound/pci/ctxfi/ctatc.c
index 80fb2ba..b0adc80 100644
--- a/sound/pci/ctxfi/ctatc.c
+++ b/sound/pci/ctxfi/ctatc.c
@@ -259,7 +259,6 @@
 	int n_amixer = apcm->substream->runtime->channels, i = 0;
 	int device = apcm->substream->pcm->device;
 	unsigned int pitch;
-	unsigned long flags;
 
 	if (NULL != apcm->src) {
 		/* Prepared pcm playback */
@@ -311,10 +310,10 @@
 	src = apcm->src;
 	for (i = 0; i < n_amixer; i++) {
 		amixer = apcm->amixers[i];
-		spin_lock_irqsave(&atc->atc_lock, flags);
+		mutex_lock(&atc->atc_mutex);
 		amixer->ops->setup(amixer, &src->rsc,
 					INIT_VOL, atc->pcm[i+device*2]);
-		spin_unlock_irqrestore(&atc->atc_lock, flags);
+		mutex_unlock(&atc->atc_mutex);
 		src = src->ops->next_interleave(src);
 		if (NULL == src)
 			src = apcm->src;
@@ -865,7 +864,6 @@
 spdif_passthru_playback_setup(struct ct_atc *atc, struct ct_atc_pcm *apcm)
 {
 	struct dao *dao = container_of(atc->daios[SPDIFOO], struct dao, daio);
-	unsigned long flags;
 	unsigned int rate = apcm->substream->runtime->rate;
 	unsigned int status;
 	int err;
@@ -885,7 +883,7 @@
 		return -ENOENT;
 	}
 
-	spin_lock_irqsave(&atc->atc_lock, flags);
+	mutex_lock(&atc->atc_mutex);
 	dao->ops->get_spos(dao, &status);
 	if (((status >> 24) & IEC958_AES3_CON_FS) != iec958_con_fs) {
 		status &= ((~IEC958_AES3_CON_FS) << 24);
@@ -895,7 +893,7 @@
 	}
 	if ((rate != atc->pll_rate) && (32000 != rate))
 		err = atc_pll_init(atc, rate);
-	spin_unlock_irqrestore(&atc->atc_lock, flags);
+	mutex_unlock(&atc->atc_mutex);
 
 	return err;
 }
@@ -908,7 +906,6 @@
 	struct dao *dao;
 	int err;
 	int i;
-	unsigned long flags;
 
 	if (NULL != apcm->src)
 		return 0;
@@ -934,13 +931,13 @@
 			src = apcm->src;
 	}
 	/* Connect to SPDIFOO */
-	spin_lock_irqsave(&atc->atc_lock, flags);
+	mutex_lock(&atc->atc_mutex);
 	dao = container_of(atc->daios[SPDIFOO], struct dao, daio);
 	amixer = apcm->amixers[0];
 	dao->ops->set_left_input(dao, &amixer->rsc);
 	amixer = apcm->amixers[1];
 	dao->ops->set_right_input(dao, &amixer->rsc);
-	spin_unlock_irqrestore(&atc->atc_lock, flags);
+	mutex_unlock(&atc->atc_mutex);
 
 	ct_timer_prepare(apcm->timer);
 
@@ -1088,7 +1085,6 @@
 
 static int atc_spdif_out_passthru(struct ct_atc *atc, unsigned char state)
 {
-	unsigned long flags;
 	struct dao_desc da_dsc = {0};
 	struct dao *dao;
 	int err;
@@ -1096,7 +1092,7 @@
 	struct rsc *rscs[2] = {NULL};
 	unsigned int spos = 0;
 
-	spin_lock_irqsave(&atc->atc_lock, flags);
+	mutex_lock(&atc->atc_mutex);
 	dao = container_of(atc->daios[SPDIFOO], struct dao, daio);
 	da_dsc.msr = state ? 1 : atc->msr;
 	da_dsc.passthru = state ? 1 : 0;
@@ -1114,7 +1110,7 @@
 	}
 	dao->ops->set_spos(dao, spos);
 	dao->ops->commit_write(dao);
-	spin_unlock_irqrestore(&atc->atc_lock, flags);
+	mutex_unlock(&atc->atc_mutex);
 
 	return err;
 }
@@ -1572,7 +1568,7 @@
 	atc->msr = msr;
 	atc->chip_type = chip_type;
 
-	spin_lock_init(&atc->atc_lock);
+	mutex_init(&atc->atc_mutex);
 
 	/* Find card model */
 	err = atc_identify_card(atc);
diff --git a/sound/pci/ctxfi/ctatc.h b/sound/pci/ctxfi/ctatc.h
index a033472..9fe620ea 100644
--- a/sound/pci/ctxfi/ctatc.h
+++ b/sound/pci/ctxfi/ctatc.h
@@ -19,7 +19,7 @@
 #define CTATC_H
 
 #include <linux/types.h>
-#include <linux/spinlock_types.h>
+#include <linux/mutex.h>
 #include <linux/pci.h>
 #include <linux/timer.h>
 #include <sound/core.h>
@@ -90,7 +90,7 @@
 	void (*unmap_audio_buffer)(struct ct_atc *atc, struct ct_atc_pcm *apcm);
 	unsigned long (*get_ptp_phys)(struct ct_atc *atc, int index);
 
-	spinlock_t atc_lock;
+	struct mutex atc_mutex;
 
 	int (*pcm_playback_prepare)(struct ct_atc *atc,
 				    struct ct_atc_pcm *apcm);
diff --git a/sound/pci/ctxfi/cttimer.c b/sound/pci/ctxfi/cttimer.c
index 779c6c3..93b0aed 100644
--- a/sound/pci/ctxfi/cttimer.c
+++ b/sound/pci/ctxfi/cttimer.c
@@ -180,7 +180,7 @@
  *
  * call this inside the lock and irq disabled
  */
-static int ct_xfitimer_reprogram(struct ct_timer *atimer)
+static int ct_xfitimer_reprogram(struct ct_timer *atimer, int can_update)
 {
 	struct ct_timer_instance *ti;
 	unsigned int min_intr = (unsigned int)-1;
@@ -216,6 +216,8 @@
 			ti->frag_count = div_u64((u64)pos * CT_TIMER_FREQ +
 						 rate - 1, rate);
 		}
+		if (ti->need_update && !can_update)
+			min_intr = 0; /* pending to the next irq */
 		if (ti->frag_count < min_intr)
 			min_intr = ti->frag_count;
 	}
@@ -235,7 +237,7 @@
 
 	spin_lock_irqsave(&atimer->list_lock, flags);
 	list_for_each_entry(ti, &atimer->instance_head, instance_list) {
-		if (ti->need_update) {
+		if (ti->running && ti->need_update) {
 			ti->need_update = 0;
 			ti->apcm->interrupt(ti->apcm);
 		}
@@ -252,7 +254,7 @@
 	spin_lock_irqsave(&atimer->lock, flags);
 	atimer->irq_handling = 1;
 	do {
-		update = ct_xfitimer_reprogram(atimer);
+		update = ct_xfitimer_reprogram(atimer, 1);
 		spin_unlock(&atimer->lock);
 		if (update)
 			ct_xfitimer_check_period(atimer);
@@ -265,6 +267,7 @@
 static void ct_xfitimer_prepare(struct ct_timer_instance *ti)
 {
 	ti->frag_count = ti->substream->runtime->period_size;
+	ti->running = 0;
 	ti->need_update = 0;
 }
 
@@ -273,7 +276,6 @@
 static void ct_xfitimer_update(struct ct_timer *atimer)
 {
 	unsigned long flags;
-	int update;
 
 	spin_lock_irqsave(&atimer->lock, flags);
 	if (atimer->irq_handling) {
@@ -284,10 +286,8 @@
 	}
 
 	ct_xfitimer_irq_stop(atimer);
-	update = ct_xfitimer_reprogram(atimer);
+	ct_xfitimer_reprogram(atimer, 0);
 	spin_unlock_irqrestore(&atimer->lock, flags);
-	if (update)
-		ct_xfitimer_check_period(atimer);
 }
 
 static void ct_xfitimer_start(struct ct_timer_instance *ti)
@@ -298,6 +298,8 @@
 	spin_lock_irqsave(&atimer->lock, flags);
 	if (list_empty(&ti->running_list))
 		atimer->wc = ct_xfitimer_get_wc(atimer);
+	ti->running = 1;
+	ti->need_update = 0;
 	list_add(&ti->running_list, &atimer->running_head);
 	spin_unlock_irqrestore(&atimer->lock, flags);
 	ct_xfitimer_update(atimer);
@@ -310,7 +312,7 @@
 
 	spin_lock_irqsave(&atimer->lock, flags);
 	list_del_init(&ti->running_list);
-	ti->need_update = 0;
+	ti->running = 0;
 	spin_unlock_irqrestore(&atimer->lock, flags);
 	ct_xfitimer_update(atimer);
 }
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 337d2a5..d22b260 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -9014,6 +9014,8 @@
 		ALC888_ACER_ASPIRE_4930G),
 	SND_PCI_QUIRK(0x1025, 0x0145, "Acer Aspire 8930G",
 		ALC888_ACER_ASPIRE_8930G),
+	SND_PCI_QUIRK(0x1025, 0x0146, "Acer Aspire 6935G",
+		ALC888_ACER_ASPIRE_8930G),
 	SND_PCI_QUIRK(0x1025, 0x0157, "Acer X3200", ALC883_AUTO),
 	SND_PCI_QUIRK(0x1025, 0x0158, "Acer AX1700-U3700A", ALC883_AUTO),
 	SND_PCI_QUIRK(0x1025, 0x015e, "Acer Aspire 6930G",
diff --git a/sound/pci/intel8x0.c b/sound/pci/intel8x0.c
index 173bebf..8aa5687 100644
--- a/sound/pci/intel8x0.c
+++ b/sound/pci/intel8x0.c
@@ -356,8 +356,6 @@
         unsigned int position;
 	unsigned int pos_shift;
 	unsigned int last_pos;
-	unsigned long last_pos_jiffies;
-	unsigned int jiffy_to_bytes;
         int frags;
         int lvi;
         int lvi_frag;
@@ -844,7 +842,6 @@
 	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
 		val = ICH_IOCE | ICH_STARTBM;
 		ichdev->last_pos = ichdev->position;
-		ichdev->last_pos_jiffies = jiffies;
 		break;
 	case SNDRV_PCM_TRIGGER_SUSPEND:
 		ichdev->suspended = 1;
@@ -1048,7 +1045,6 @@
 			ichdev->pos_shift = (runtime->sample_bits > 16) ? 2 : 1;
 	}
 	snd_intel8x0_setup_periods(chip, ichdev);
-	ichdev->jiffy_to_bytes = (runtime->rate * 4 * ichdev->pos_shift) / HZ;
 	return 0;
 }
 
@@ -1073,19 +1069,23 @@
 		    ptr1 == igetword(chip, ichdev->reg_offset + ichdev->roff_picb))
 			break;
 	} while (timeout--);
+	ptr = ichdev->last_pos;
 	if (ptr1 != 0) {
 		ptr1 <<= ichdev->pos_shift;
 		ptr = ichdev->fragsize1 - ptr1;
 		ptr += position;
-		ichdev->last_pos = ptr;
-		ichdev->last_pos_jiffies = jiffies;
-	} else {
-		ptr1 = jiffies - ichdev->last_pos_jiffies;
-		if (ptr1)
-			ptr1 -= 1;
-		ptr = ichdev->last_pos + ptr1 * ichdev->jiffy_to_bytes;
-		ptr %= ichdev->size;
+		if (ptr < ichdev->last_pos) {
+			unsigned int pos_base, last_base;
+			pos_base = position / ichdev->fragsize1;
+			last_base = ichdev->last_pos / ichdev->fragsize1;
+			/* another sanity check; ptr1 can go back to full
+			 * before the base position is updated
+			 */
+			if (pos_base == last_base)
+				ptr = ichdev->last_pos;
+		}
 	}
+	ichdev->last_pos = ptr;
 	spin_unlock(&chip->reg_lock);
 	if (ptr >= ichdev->size)
 		return 0;
diff --git a/sound/soc/codecs/ssm2602.c b/sound/soc/codecs/ssm2602.c
index 1fc4c8e..c550750 100644
--- a/sound/soc/codecs/ssm2602.c
+++ b/sound/soc/codecs/ssm2602.c
@@ -375,10 +375,6 @@
 	struct snd_soc_codec *codec = socdev->card->codec;
 	struct ssm2602_priv *ssm2602 = codec->private_data;
 
-	if (ssm2602->master_substream == substream)
-		ssm2602->master_substream = ssm2602->slave_substream;
-
-	ssm2602->slave_substream = NULL;
 	/* deactivate */
 	if (!codec->active)
 		ssm2602_write(codec, SSM2602_ACTIVE, 0);
diff --git a/sound/soc/codecs/wm8903.c b/sound/soc/codecs/wm8903.c
index d8a9222..e8d2e3e 100644
--- a/sound/soc/codecs/wm8903.c
+++ b/sound/soc/codecs/wm8903.c
@@ -1257,22 +1257,18 @@
 	int div;
 } bclk_divs[] = {
 	{  10,  0 },
-	{  15,  1 },
 	{  20,  2 },
 	{  30,  3 },
 	{  40,  4 },
 	{  50,  5 },
-	{  55,  6 },
 	{  60,  7 },
 	{  80,  8 },
 	{ 100,  9 },
-	{ 110, 10 },
 	{ 120, 11 },
 	{ 160, 12 },
 	{ 200, 13 },
 	{ 220, 14 },
 	{ 240, 15 },
-	{ 250, 16 },
 	{ 300, 17 },
 	{ 320, 18 },
 	{ 440, 19 },
diff --git a/sound/soc/pxa/magician.c b/sound/soc/pxa/magician.c
index c89a3cd..326955d 100644
--- a/sound/soc/pxa/magician.c
+++ b/sound/soc/pxa/magician.c
@@ -184,7 +184,7 @@
 
 	/* set cpu DAI configuration */
 	ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_DSP_A |
-			SND_SOC_DAIFMT_IB_IF | SND_SOC_DAIFMT_CBS_CFS);
+			SND_SOC_DAIFMT_NB_IF | SND_SOC_DAIFMT_CBS_CFS);
 	if (ret < 0)
 		return ret;
 
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 3f44150..1d70829 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -1389,6 +1389,9 @@
 	snprintf(codec->card->longname, sizeof(codec->card->longname),
 		 "%s (%s)", card->name, codec->name);
 
+	/* Make sure all DAPM widgets are instantiated */
+	snd_soc_dapm_new_widgets(codec);
+
 	ret = snd_card_register(codec->card);
 	if (ret < 0) {
 		printk(KERN_ERR "asoc: failed to register soundcard for %s\n",
diff --git a/sound/sound_core.c b/sound/sound_core.c
index 2b302bb..12522e6 100644
--- a/sound/sound_core.c
+++ b/sound/sound_core.c
@@ -27,6 +27,11 @@
 MODULE_AUTHOR("Alan Cox");
 MODULE_LICENSE("GPL");
 
+static char *sound_nodename(struct device *dev)
+{
+	return kasprintf(GFP_KERNEL, "snd/%s", dev_name(dev));
+}
+
 static int __init init_soundcore(void)
 {
 	int rc;
@@ -41,6 +46,8 @@
 		return PTR_ERR(sound_class);
 	}
 
+	sound_class->nodename = sound_nodename;
+
 	return 0;
 }
 
diff --git a/sound/usb/usbquirks.h b/sound/usb/usbquirks.h
index f0f7624..f6f201e 100644
--- a/sound/usb/usbquirks.h
+++ b/sound/usb/usbquirks.h
@@ -1989,7 +1989,7 @@
 	USB_DEVICE(0x0ccd, 0x0028),
 	.driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
 		.vendor_name = "TerraTec",
-		.product_name = "Aureon 5.1 MkII",
+		.product_name = "Aureon5.1MkII",
 		.ifnum = QUIRK_NO_INTERFACE
 	}
 },